diff --git a/FindBugsExclude.xml b/FindBugsExclude.xml
new file mode 100644
index 0000000000000000000000000000000000000000..01186ffde0406b54eace8d2135adf36fc56ca755
--- /dev/null
+++ b/FindBugsExclude.xml
@@ -0,0 +1,458 @@
+<!-- The excluded FindBugs warnings listed below are "known" and "ok".
+     Run FindBugs with this as an exclude file.  For example:
+
+findbugs -textui -exclude FindBugsExclude.xml je.jar
+
+-->
+<FindBugsFilter>
+     <Match class="com.sleepycat.collections.BlockIterator">
+       <BugCode name="REC" />
+       <Method name="add"/>
+     </Match>
+
+     <Match class="com.sleepycat.compat.DbCompat">
+       <BugCode name="MS" />
+     </Match>
+
+     <Match class="com.sleepycat.bind.serial.SerialOutput">
+       <BugCode name="MS" />
+     </Match>
+
+     <Match class="com.sleepycat.je.Database">
+       <BugCode name="IS" />
+       <Field name="handleLocker" />
+     </Match>
+
+     <Match class="com.sleepycat.je.DatabaseEntry">
+       <BugCode name="MS" />
+       <Field name="MAX_DUMP_BYTES" />
+     </Match>
+
+     <Match class="com.sleepycat.je.Environment">
+       <BugCode name="UG" />
+       <Method name="getMutableConfig"/>
+     </Match>
+
+     <Match class="com.sleepycat.je.dbi.DatabaseImpl">
+       <BugCode name="MS" />
+       <Field name="forceTreeWalkForTruncateAndRemove" />
+     </Match>
+
+     <Match class="com.sleepycat.je.dbi.MemoryBudget">
+       <BugCode name="MS" />
+       <Field name="CLEANUP_DONE" />
+     </Match>
+
+     <Match class="com.sleepycat.je.dbi.MemoryBudget">
+       <BugCode name="MS" />
+       <Field name="DEBUG_ADMIN" />
+     </Match>
+
+     <Match class="com.sleepycat.je.dbi.MemoryBudget">
+       <BugCode name="MS" />
+       <Field name="DEBUG_LOCK" />
+     </Match>
+
+     <Match class="com.sleepycat.je.dbi.MemoryBudget">
+       <BugCode name="MS" />
+       <Field name="DEBUG_TXN" />
+     </Match>
+
+     <Match class="com.sleepycat.je.dbi.MemoryBudget">
+       <BugCode name="MS" />
+       <Field name="DEBUG_TREEADMIN" />
+     </Match>
+
+     <Match class="com.sleepycat.je.dbi.MemoryBudget">
+       <BugCode name="MS" />
+       <Field name="DEBUG_TREE" />
+     </Match>
+
+     <Match class="com.sleepycat.persist.impl.PersistCatalog">
+       <BugCode name="MS" />
+       <Field name="expectNoClassChanges" />
+     </Match>
+
+     <Match class="com.sleepycat.persist.model.ClassEnhancer">
+       <BugCode name="RV" />
+       <Method name="enhanceFile"/>
+     </Match>
+
+     <Match class="com.sleepycat.je.txn.LockType">
+       <BugCode name="IC" />
+     </Match>
+
+     <Match class="com.sleepycat.je.log.LogEntryType">
+       <BugCode name="MS" />
+     </Match>
+
+     <Match class="com.sleepycat.je.log.LogUtils">
+       <BugCode name="MS" />
+     </Match>
+
+     <Match class="com.sleepycat.je.tree.BIN">
+       <BugCode name="Eq" />
+     </Match>
+
+     <Match class="com.sleepycat.je.tree.DIN">
+       <BugCode name="Eq" />
+     </Match>
+
+     <Match class="com.sleepycat.je.tree.DBIN">
+       <BugCode name="Eq" />
+     </Match>
+
+     <Match class="com.sleepycat.je.tree.DBINReference">
+       <BugCode name="Eq" />
+     </Match>
+
+     <Match class="com.sleepycat.collections.StoredMapEntry">
+       <BugCode name="Eq" />
+     </Match>
+
+     <Match class="com.sleepycat.je.log.StatsFileReader$LogEntryTypeComparator" >
+       <BugCode name="Se" />
+     </Match>
+
+     <Match class="com.sleepycat.je.log.FileManager">
+       <BugCode name="ST" />
+     </Match>
+
+     <Match class="com.sleepycat.je.log.FileManager">
+       <BugCode name="MS" />
+       <Field name="WRITE_COUNT" />
+     </Match>
+
+     <Match class="com.sleepycat.je.log.FileManager">
+       <BugCode name="MS" />
+       <Field name="STOP_ON_WRITE_COUNT" />
+     </Match>
+
+     <Match class="com.sleepycat.je.log.FileManager">
+       <BugCode name="MS" />
+       <Field name="N_BAD_WRITES" />
+     </Match>
+
+     <Match class="com.sleepycat.je.log.FileManager">
+       <BugCode name="MS" />
+       <Field name="THROW_ON_WRITE" />
+     </Match>
+
+     <Match class="com.sleepycat.persist.impl.Format">
+       <BugCode name="ST" />
+       <Method name="getReader"/>
+     </Match>
+
+     <Match class="com.sleepycat.je.log.ScavengerFileReader">
+       <BugCode name="NP" />
+       <Method name="resyncReader"/>
+     </Match>
+
+     <Match class="com.sleepycat.persist.impl.Evolver">
+       <BugCode name="NP" />
+       <Method name="evolveRequiredKeyField"/>
+     </Match>
+
+     <Match class="com.sleepycat.persist.impl.Evolver">
+       <BugCode name="NP" />
+       <Method name="evolveFormatInternal"/>
+     </Match>
+
+     <Match class="com.sleepycat.je.dbi.EnvironmentImpl">
+       <BugCode name="ST" />
+     </Match>
+
+     <Match class="com.sleepycat.je.dbi.EnvironmentImpl">
+       <BugCode name="ICAST" />
+       <Method name="updateBackgroundWrites"/>
+     </Match>
+
+     <Match class="com.sleepycat.je.dbi.EnvironmentImpl">
+       <BugCode name="SWL" />
+       <Method name="sleepAfterBackgroundIO"/>
+     </Match>
+
+     <Match class="com.sleepycat.je.TransactionStats$Active">
+       <BugCode name="SIC" />
+     </Match>
+
+     <Match class="com.sleepycat.je.evictor.Evictor">
+       <BugCode name="IS2" />
+     </Match>
+
+     <Match class="com.sleepycat.je.evictor.Evictor">
+       <BugCode name="IS" />
+     </Match>
+
+     <Match class="com.sleepycat.je.latch.SharedLatch">
+       <BugCode name="PS" />
+     </Match>
+
+     <Match class="com.sleepycat.je.latch.LatchImpl">
+       <BugCode name="IS" />
+     </Match>
+
+     <Match class="com.sleepycat.je.latch.SharedLatchImpl">
+       <BugCode name="IS" />
+     </Match>
+
+     <Match class="com.sleepycat.je.latch.Latch">
+       <BugCode name="IS2" />
+     </Match>
+
+     <Match class="com.sleepycat.je.latch.Latch">
+       <BugCode name="Wa" />
+     </Match>
+
+     <Match class="com.sleepycat.je.latch.Java5LatchImpl">
+       <BugCode name="IMSE" />
+       <Method name="doRelease" />
+     </Match>
+
+     <Match class="com.sleepycat.je.latch.Latch">
+       <BugCode name="IMSE" />
+       <Method name="doRelease" />
+     </Match>
+
+     <Match class="com.sleepycat.je.latch.Java5SharedLatchImpl" >
+       <BugCode name="SnVI" />
+     </Match>
+
+     <Match class="com.sleepycat.je.latch.Java5SharedLatchImpl">
+       <BugCode name="IMSE" />
+       <Method name="release" />
+     </Match>
+
+     <Match class="com.sleepycat.je.latch.SharedLatch">
+       <BugCode name="IMSE" />
+       <Method name="release" />
+     </Match>
+
+     <Match class="com.sleepycat.je.latch.Java5SharedLatchImpl">
+       <BugCode name="UL" />
+       <Method name="acquireExclusive" />
+     </Match>
+
+     <Match class="com.sleepycat.je.latch.Java5SharedLatchImpl">
+       <BugCode name="UL" />
+       <Method name="acquireShared" />
+     </Match>
+
+     <Match class="com.sleepycat.je.latch.SharedLatch">
+       <BugCode name="UL" />
+       <Method name="acquireExclusive" />
+     </Match>
+
+     <Match class="com.sleepycat.je.latch.SharedLatch">
+       <BugCode name="UL" />
+       <Method name="acquireShared" />
+     </Match>
+
+     <Match class="com.sleepycat.je.cleaner.Cleaner">
+       <BugCode name="IS2" />
+     </Match>
+
+     <Match class="com.sleepycat.je.cleaner.FileProcessor">
+       <BugCode name="IS" />
+     </Match>
+
+     <Match class="com.sleepycat.je.cleaner.UtilizationProfile">
+       <BugCode name="IS" />
+     </Match>
+
+     <Match class="com.sleepycat.je.recovery.Checkpointer">
+       <BugCode name="IS2" />
+     </Match>
+
+     <Match class="com.sleepycat.je.recovery.Checkpointer">
+       <BugCode name="ICAST" />
+       <Method name="isRunnable" />
+     </Match>
+
+     <Match class="com.sleepycat.je.recovery.Checkpointer">
+       <BugCode name="IS" />
+     </Match>
+
+     <Match class="com.sleepycat.je.recovery.RecoveryManager">
+       <BugCode name="REC" />
+       <Method name="readINsAndTrackIds"/>
+     </Match>
+
+     <Match class="com.sleepycat.je.recovery.RecoveryManager">
+       <BugCode name="REC" />
+       <Method name="redoLNs"/>
+     </Match>
+
+     <Match class="com.sleepycat.je.txn.Txn">
+       <BugCode name="IS2" />
+     </Match>
+
+     <Match class="com.sleepycat.je.txn.Txn">
+       <BugCode name="IS" />
+     </Match>
+
+     <Match class="com.sleepycat.collections.StoredMap">
+       <BugCode name="IS2" />
+     </Match>
+
+     <Match class="com.sleepycat.je.Sequence">
+       <BugCode name="IS2" />
+     </Match>
+
+     <Match class="com.sleepycat.je.Sequence">
+       <BugCode name="IS" />
+     </Match>
+
+     <Match class="com.sleepycat.je.incomp.INCompressor">
+       <BugCode name="IS,IS2,BC" />
+     </Match>
+
+     <Match class="com.sleepycat.je.tree.Key">
+       <BugCode name="MS" />
+     </Match>
+
+     <Match class="com.sleepycat.je.tree.IN">
+       <BugCode name="MS" />
+     </Match>
+
+     <Match class="com.sleepycat.je.tree.Tree">
+       <BugCode name="DMI" />
+       <Method name="validateINList" />
+     </Match>
+
+     <Match class="com.sleepycat.je.tree.Tree">
+       <BugCode name="NP" />
+       <Method name="deleteDupSubtree" />
+     </Match>
+
+     <Match class="com.sleepycat.je.tree.Tree">
+       <BugCode name="NP" />
+       <Method name="splitRoot" />
+     </Match>
+
+     <Match class="com.sleepycat.je.tree.Tree">
+       <BugCode name="NP" />
+       <Method name="searchSplitsAllowed" />
+     </Match>
+
+     <Match class="com.sleepycat.je.txn.Txn">
+       <BugCode name="MS" />
+     </Match>
+
+     <Match class="com.sleepycat.collections.StoredIterator">
+       <BugCode name="RV" />
+       <Method name="reset"/>
+     </Match>
+
+     <Match class="com.sleepycat.je.utilint.DaemonThread">
+       <BugCode name="RV" />
+       <Method name="checkErrorListener"/>
+     </Match>
+
+     <Match class="com.sleepycat.collections.DataView">
+       <BugCode name="DE" />
+       <Method name="join"/>
+     </Match>
+
+     <Match class="com.sleepycat.je.utilint.TracerFormatter">
+       <BugCode name="DE" />
+       <Method name="format"/>
+     </Match>
+
+     <Match class="com.sleepycat.je.incomp.INCompressor">
+       <BugCode name="RCN" />
+       <Method name="searchForBIN"/>
+     </Match>
+
+     <Match class="com.sleepycat.je.tree.Tree">
+       <BugCode name="RCN" />
+       <Method name="getNextBinInternal"/>
+     </Match>
+
+     <Match class="com.sleepycat.je.log.FSyncManager$FSyncGroup">
+       <BugCode name="NN" />
+       <Method name="wakeupOne"/>
+     </Match>
+
+     <Match class="com.sleepycat.je.txn.LockManager">
+       <BugCode name="NN" />
+       <Method name="release"/>
+     </Match>
+
+     <Match class="com.sleepycat.je.utilint.DaemonThread">
+       <BugCode name="UW" />
+       <Method name="run"/>
+     </Match>
+
+     <Match class="com.sleepycat.je.util.DbRunAction">
+       <BugCode name="REC" />
+       <Method name="main"/>
+     </Match>
+
+     <Match classregex="com.sleepycat.je.util.*" >
+       <BugCode name="Dm" />
+     </Match>
+
+     <Match class="com.sleepycat.asm.signature.SignatureWriter" >
+       <BugCode name="IM" />
+       <Method name="endArguments" />
+     </Match>
+
+     <Match class="com.sleepycat.asm.ClassReader" >
+       <BugCode name="Bx" />
+       <Method name="readAnnotationValue" />
+     </Match>
+
+     <Match class="com.sleepycat.asm.ClassReader" >
+       <BugCode name="Bx" />
+       <Method name="readConst" />
+     </Match>
+
+     <Match class="com.sleepycat.bind.serial.StoredClassCatalog$ClassInfo" >
+       <BugCode name="SnVI" />
+     </Match>
+
+     <Match classregex="com.sleepycat.persist.impl.*" >
+       <BugCode name="SnVI" />
+     </Match>
+
+     <Match class="com.sleepycat.persist.impl.ComplexFormat$SkipFieldReader">
+       <BugCode name="Se" />
+       <Field name="endField" />
+     </Match>
+
+     <Match class="com.sleepycat.persist.impl.EnumFormat" >
+       <BugCode name="Se" />
+       <Field name="values" />
+     </Match>
+
+     <Match class="com.sleepycat.persist.impl.FieldInfo" >
+       <BugCode name="Se" />
+       <Field name="cls" />
+     </Match>
+
+     <Match classregex=".*" >
+       <BugCode name="EI,EI2,CD" />
+     </Match>
+
+     <Match class="com.sleepycat.asm.Handler" >
+       <BugCode name="UrF" />
+       <Field name="desc" />
+     </Match>
+
+     <Match class="com.sleepycat.je.dbi.EnvironmentImpl" >
+       <BugCode name="UrF" />
+       <Field name="lockoutTimeout" />
+     </Match>
+
+     <!-- Match all doublecheck violations in these methods of "AnotherClass". 
+     <Match class="com.foobar.AnotherClass">
+       <Or>
+         <Method name="nonOverloadedMethod" />
+         <Method name="frob" params="int,java.lang.String" returns="void" />
+         <Method name="blat" params="" returns="boolean" />
+       </Or>
+       <BugCode name="DC" />
+     </Match>
+     -->
+</FindBugsFilter>
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..df8fe6f5b8256145e2d83f7beb049b32d3f3dd01
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,75 @@
+/*-
+ * $Id: LICENSE,v 1.12.2.2 2010/01/04 15:30:18 cwl Exp $
+ */
+
+The following is the license that applies to this copy of the Berkeley
+DB Java Edition software.  For a license to use the Berkeley DB Java
+Edition software under conditions other than those described here, or
+to purchase support for this software, please contact Oracle at
+berkeleydb-info_us@oracle.com.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+/*
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Redistributions in any form must be accompanied by information on
+ *    how to obtain complete source code for the DB software and any
+ *    accompanying software that uses the DB software.  The source code
+ *    must either be included in the distribution or be available for no
+ *    more than the cost of distribution plus a nominal fee, and must be
+ *    freely redistributable under reasonable conditions.  For an
+ *    executable file, complete source code means the source code for all
+ *    modules it contains.  It does not include source code for modules or
+ *    files that typically accompany the major components of the operating
+ *    system on which the executable file runs.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ORACLE ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
+ * NON-INFRINGEMENT, ARE DISCLAIMED.  IN NO EVENT SHALL ORACLE BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+/***
+ * ASM: a very small and fast Java bytecode manipulation framework
+ * Copyright (c) 2000-2005 INRIA, France Telecom
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
diff --git a/README b/README
new file mode 100644
index 0000000000000000000000000000000000000000..f2811cb51d311d0aa73a12fd2aeea4a6ae2e7521
--- /dev/null
+++ b/README
@@ -0,0 +1,5 @@
+Oracle: Berkeley DB, Java Edition 3.3.98: March 26, 2010
+
+This is Berkeley DB, Java Edition, version 3.3.98 from
+Oracle.  To view the release and installation documentation, load
+the distribution file docs/index.html into your web browser.
\ No newline at end of file
diff --git a/ant/compile.xml b/ant/compile.xml
new file mode 100644
index 0000000000000000000000000000000000000000..f1b228ea31c5d19dd18d6fb9ae7ca3fcb86a87eb
--- /dev/null
+++ b/ant/compile.xml
@@ -0,0 +1,36 @@
+<?xml version="1.0"?>
+
+<!-- ======================================================= -->
+<!-- Ant Build File for Berkeley DB Java Edition compilation -->
+<!-- ======================================================= -->
+
+<project name="compile" default="compile" basedir=".">
+    <description>Compile java code.</description>
+
+    <property name="srcdir" value="${basedir}/src"/>
+    <property name="builddir" value="${basedir}/build"/>
+    <property name="destdir" value="${builddir}/classes"/>
+    <property name="build.debug" value="on"/>
+    <property name="build.deprecation" value="off"/>
+    <property name="include" value="com/sleepycat/**/*.java"/>
+    <property name="exclude1" value="com/sleepycat/**/jca/**/*.java"/>
+    <property name="exclude2" value="com/sleepycat/**/jmx/**/*.java"/>
+
+    <target name="compile">
+        <javac srcdir="${srcdir}"
+               destdir="${destdir}" 
+               debug="${build.debug}"
+               deprecation="${build.deprecation}" 
+               optimize="on"
+               source="1.5"
+               target="1.5">
+            <classpath refid="compile.classpath"/>
+            <include name="${include}"/>
+	    <exclude name="${exclude1}"/>
+	    <exclude name="${exclude2}"/>
+            <!--
+            <compilerarg value="-Xlint:unchecked"/>
+            -->
+        </javac>
+    </target>
+</project>
diff --git a/ant/internal.xml b/ant/internal.xml
new file mode 100644
index 0000000000000000000000000000000000000000..a20d2d7f5153b27a4b309408980e69ea5b12b281
--- /dev/null
+++ b/ant/internal.xml
@@ -0,0 +1,459 @@
+<?xml version="1.0"?>
+
+<!-- =================================================== -->
+<!-- Internal build targets for Berkeley DB Java Edition -->
+<!-- All targets here are for internal use only and are  -->
+<!-- not part of the published package.                  -->
+<!-- =================================================== -->
+
+<project name="JEInternal" default="jar" basedir="..">
+<description>Internal targets for JE</description>
+
+    <!--
+    Properties that are intended to be passed via -D on the command line.
+    -->
+    <!-- Arguments to be passed on the command line of the <java> task. -->
+    <property name="args" value=""/>
+    <!-- Default jar file for testing is the je.jar built here. -->
+    <property name="testjar" value="${jarfile}"/>
+
+    <!--
+    Common standalone test properties.
+    -->
+    <property name="standalone.srcdir" location="${basedir}/test/standalone"/>
+    <property name="standalone.dir" location="${builddir}/test/standalone"/>
+    <property name="standalone.destdir" location="${standalone.dir}/classes"/>
+    <property name="standalone.datadir" location="${standalone.dir}/data"/>
+    <property name="standalone.logfile" location="${standalone.dir}/log"/>
+    <property name="standalone.propfile"
+              location="${standalone.datadir}/je.properties"/>
+
+    <path id="standalone.compile.classpath">
+      <pathelement location="${destdir}"/>
+      <pathelement location="${standalone.destdir}"/>
+      <path refid="clover.classpath"/>
+    </path>
+
+    <path id="standalone.test.classpath">
+      <pathelement location="${testjar}"/>
+      <pathelement location="${standalone.destdir}"/>
+      <path refid="clover.classpath"/>
+    </path>
+
+    <!-- ============================================================ -->
+    <!-- Global Targets                                               -->
+    <!-- ============================================================ -->
+
+    <target name="init">
+        <mkdir dir="${standalone.dir}"/>
+        <mkdir dir="${standalone.destdir}"/>
+    </target>
+
+    <target name="clean" depends="clean-standalone"/>
+
+    <target name="compile" depends="compile-standalone"/>
+
+    <target name="init-standalone" depends="init">
+        <delete dir="${standalone.datadir}"/>
+        <mkdir dir="${standalone.datadir}"/>
+    </target>
+
+    <target name="clean-standalone" depends="init">
+        <delete dir="${standalone.destdir}"/>
+        <mkdir dir="${standalone.destdir}"/>
+    </target>
+
+    <target name="compile-standalone" depends="init-standalone">
+        <ant antfile="ant/compile.xml" dir="." inheritall="false">
+            <property name="srcdir" value="${standalone.srcdir}"/>
+            <property name="destdir" value="${standalone.destdir}"/>
+            <property name="include" value="**/*.java"/>
+            <reference refid="standalone.compile.classpath"
+	               torefid="compile.classpath"/>
+        </ant>
+    </target>
+
+
+    <!-- Outputs the version of the ${jvm} so it can be visually verified. -->
+    <target name="javaversion">
+      <java classname="xxNotUsed" fork="yes" jvm="${jvm}">
+        <jvmarg value="-version"/>
+      </java>
+    </target>
+
+    <!-- =================== Standalone regression tests ======================
+    Tests that have a standard Java main and are run separately via <java>
+    rather than via <junit>.
+
+    Specific test targets (e.g., MemoryStress) are also invoked via the
+    'standalone' target in build.xml, using -Dtestcase=TestName where TestName
+    is the name of the one of the targets below.  Additional program args and
+    JVM args can be specified using -Dargs= and -Djvmargs=.
+    
+    Each test program should:
+     + fail (System.exit with non-zero value) if the test fails
+     + write log information and errors to System.out and System.err
+     + take command line arguments for environment home and other parameters 
+     + allow command line arguments to be repeated, so that the last specified
+       argument takes precendence; this allows specifying -Dargs="..." to
+       override arguments specified explicitly in the <java> task
+     + Print the full command line at the start of the test
+     + Check for data correctness, as well as for exceptions; if a
+       correctness check fails, the test should fail
+     + As part of correctness checking, check statistics to ensure that
+       activities relevant to the test are occurring (cache misses,
+       eviction, cleaning, etc)
+
+    Each test target below should:
+     + run the <java> task to invoke the standalone test program
+     + use fork="yes" 
+     + use failonerror="yes"
+     + use output="${standalone.logfile}"
+     + use the predefined standalone.test.classpath
+     + pass ${standalone.datadir} as the environment directory argument
+     + pass other program args using <arg>, followed by <arg line="${args}"/>
+       to allow overriding program args using -Dargs="..."
+     + pass JVM args using <jvmarg>, followed by <jvmarg line="${jvmargs}"/>
+       to allow overriding JVM args using -Djvmargs="..."
+     + for stress tests, use <jvmarg value="-ea"/> to enable assertions
+
+    Each test may have any number of properties that can be passed via
+    -Dargs="...".  However, a default regression test should run without
+    specifying any such properties.
+
+    Some tests (namely performance regressions such as BigRandom) are not meant
+    to fail or pass in an automated way, and instead they output data that must
+    be manually examined.
+
+    By default a test is run against the je.jar built here.  -Dtestjar=JARFILE
+    may be used to run the test againt a different release of JE, as long as
+    the test only uses JE APIs that exist in that release, of course.  For
+    example, this can be used to run BigRandom against an earlier released
+    version of JE.
+
+    Miscellaneous:
+     + For long-running test, you can specify
+       "-Djvmargs=-Xloggc:<file>, -XX:-PrintGCTimeStamps, -XX:-PrintGCDetails"
+       to monitor GC behaviors. This would be useful for performance tests.
+     + In case of performance tuning standalone tests, it would be helpful to
+       print a full list of JE's perf tuning parameters(e.g. je.maxMemory,
+       je.cleaner.threads, etc) to logs and then do the comparisons.
+    ======================================================================= -->
+    <target name="standalone" depends="javaversion">
+      <echo message="Running: ${testcase}"/>
+      <echo message="Using JE: ${testjar}"/>
+      <echo message="Overriding: args=${args} jvmargs=${jvmargs}"/>
+      <echo message="See results in: (${standalone.logfile})"/>
+      <antcall target="${testcase}" inheritrefs="true"/>
+    </target>
+
+    <!--
+    MemoryStress: Runs a stress test under low memory conditions to force an
+    OutOfMemoryError to occur if a memory cache budget bug exists.  Also
+    outputs a detailed log.
+    
+    Regression test:  Test should pass when run as follows:
+
+      ant -Dtestcase=MemoryStress standalone
+      ant -Dtestcase=MemoryStress -Dargs="-dups" standalone
+    -->
+    <target name="MemoryStress">
+      <java fork="yes" jvm="${jvm}" failonerror="yes"
+            classname="MemoryStress"
+            output="${standalone.logfile}">
+        <classpath refid="standalone.test.classpath"/>
+        <jvmarg value="-Xmx42m"/>
+	<jvmarg value="-ea"/>
+	<jvmarg value="-DsetErrorListener=true"/>
+	<jvmarg line="${jvmargs}"/>
+        <arg value="-h"/>
+        <arg value="${standalone.datadir}"/>
+        <arg value="-nThreads"/>
+        <arg value="20"/>
+        <arg value="-nOps"/>
+        <arg value="500000"/>
+        <arg line="${args}"/>
+      </java>
+    </target>
+
+    <!--
+    EnvSharedCache: Runs a stress test comparing EnvironmentStats with/without
+    setting EnvironmentConfig.setSharedCache(true) to see if the shared env
+    cache works efficiently.  Also outputs a detailed log.
+    
+    Regression test:  Test should pass when run as follows:
+
+      ant -Dtestcase=EnvSharedCache standalone
+      ant -Dtestcase=EnvSharedCache -Dargs="-opentest" standalone
+      ant -Dtestcase=EnvSharedCache -Dargs="-eventest" standalone
+    -->
+    <target name="EnvSharedCache">
+      <java fork="yes" jvm="${jvm}" failonerror="yes"
+            classname="EnvSharedCache"
+            output="${standalone.logfile}">
+        <classpath refid="standalone.test.classpath"/>
+        <jvmarg value="-Xmx128m"/>
+	<jvmarg value="-ea"/>
+	<jvmarg value="-DsetErrorListener=true"/>
+        <jvmarg line="${jvmargs}"/>
+        <arg value="-v"/>
+        <arg value="-h"/>
+        <arg value="${standalone.datadir}"/>
+        <arg value="-envs"/>
+        <arg value="4"/>
+        <arg value="-threads"/>
+        <arg value="4"/>
+        <arg value="-keys"/>
+        <arg value="25000"/>
+        <arg value="-initonly"/>
+      </java>
+      <!-- If it's opentest, use larger number of txns for a 12hour run. -->
+      <condition property="num.txns" value="600000">
+        <contains string="${args}" substring="-opentest"/>
+      </condition>
+      <condition property="num.txns" value="25000">
+        <not>
+          <isset property="num.txns"/>
+        </not>
+      </condition>
+      <java fork="yes" jvm="${jvm}" failonerror="yes"
+            classname="EnvSharedCache"
+            output="${standalone.logfile}" append="true">
+        <classpath refid="standalone.test.classpath"/>
+        <jvmarg value="-Xmx128m"/>
+	<jvmarg value="-ea"/>
+	<jvmarg value="-DsetErrorListener=true"/>
+        <jvmarg line="${jvmargs}"/>
+        <arg value="-v"/>
+        <arg value="-h"/>
+        <arg value="${standalone.datadir}"/>
+        <arg value="-envs"/>
+        <arg value="4"/>
+        <arg value="-threads"/>
+        <arg value="4"/>
+        <arg value="-txns"/>
+        <arg value="${num.txns}"/>
+        <arg value="-cachetest"/>
+        <arg value="-shared"/>
+        <arg line="${args}"/>
+      </java>
+    </target>
+
+    <!--
+    BigRandom: Outputs a detailed log and final throughput value.
+    
+    Regression test:  The final throughput (printed as the last line of the
+    log) should be roughly equal or greater than for the prior release.  The
+    test is normally run twice as follows, for each version of JE being tested;
+    four times in all.  Each run takes apprx 12 to 24 hours.
+
+      ant -Dtestcase=BigRandom standalone
+      ant -Dtestcase=BigRandom -Dargs="-nosync" standalone
+
+    After each test is run, be sure to save the log file for comparing it to
+    subsequent runs and for generating graphs later.  For example:
+
+      cp build/test/standalone/log bigrandom-je3.2.23-nosync-log
+
+    Not yet done:
+    + move maxMemory to a command line argument so it can be overridden using
+      the args property.
+    + automate running of gnuplot to produce graphs for comparing
+      multiple runs.
+    -->
+    <target name="BigRandom">
+      <echo message="je.maxMemory=268435456"
+            file="${standalone.propfile}"/>
+      <java fork="yes" jvm="${jvm}" failonerror="yes"
+            classname="BigRandom"
+            output="${standalone.logfile}">
+        <classpath refid="standalone.test.classpath"/>
+	<jvmarg value="-Xmx360m"/>
+	<jvmarg value="-DsetErrorListener=true"/>
+        <jvmarg line="${jvmargs}"/>
+        <arg value="-h"/>
+        <arg value="${standalone.datadir}"/>
+        <arg value="-initonly"/>
+        <arg value="-locality"/>
+        <arg value="5"/>
+        <arg value="-keys"/>
+        <arg value="10000000"/>
+        <arg line="${args}"/>
+      </java>
+      <echo message="result ${test.result}"/>
+    </target>
+
+    <!--
+    RemoveDbStress: Make sure no bugs are spotted when remove/truncate
+    database is being called and the log cleaner and checkpointer threads
+    are interacting with the db. Also outputs a detailed log and final
+    throughput value.
+    
+    Regression test:  The final throughput (printed as the last line of the
+    log) should be roughly equal or greater than for the prior release.  The
+    test is normally run once as follows when needed.
+    Each run takes apprx 1 to 2 hours.
+
+      ant -Dtestcase=RemoveDbStress standalone
+
+    After each test is run, the log file is automatically copied to
+    RemoveDbStress-<yyMMdd_HH>-log for comparing it to subsequent runs.
+
+    -->
+    <target name="RemoveDbStress">
+      <java fork="yes" jvm="${jvm}" failonerror="yes"
+            classname="RemoveDbStress"
+            output="${standalone.logfile}">
+        <classpath refid="standalone.test.classpath"/>
+        <jvmarg value="-ea"/>
+        <jvmarg value="-DsetErrorListener=true"/>
+        <jvmarg line="${jvmargs}"/>
+        <arg value="-h"/>
+        <arg value="${standalone.datadir}"/>
+        <arg value="-iter"/>
+        <arg value="200000"/>
+        <arg line="${args}"/>
+      </java>
+      <tstamp>
+        <format property="log.time" pattern="yyMMdd_HH"/>
+      </tstamp>
+      <copy file="${standalone.logfile}"
+            tofile="${standalone.dir}/RemoveDbStress-${log.time}-log"/>
+    </target>
+
+    <!--
+    BigDW: Runs a stress test to check if deferred writed database works as
+    properly as a normal one by performing insertion, dup-deletion and read
+    operations against a normal database and a deferred writed database. The
+    outputed throughput (printed as the last line of the log) should be around
+    30 ops/sec, and 20000 txns should be enough for apprx 10 mins run.
+
+    Regression test:  Test should pass when run as follows:
+
+      ant -Dtestcase=BigDW standalone
+      ant -Dtestcase=BigDW -Dargs="-txns 100000" standalone
+    
+    After each test is run, the log file is automatically copied to
+    BigDW-<yyMMdd_HH>-log for comparing it to subsequent runs.
+    -->
+    <target name="BigDW">
+      <java fork="yes" jvm="${jvm}" failonerror="yes"
+            classname="BigDW"
+            output="${standalone.logfile}">
+        <classpath refid="standalone.test.classpath"/>
+        <jvmarg value="-Xmx32m"/>
+        <jvmarg line="${jvmargs}"/>
+        <arg value="-v"/>
+        <arg value="-h"/>
+        <arg value="${standalone.datadir}"/>
+        <arg value="-init"/>
+        <arg value="-dupdel"/>
+        <arg line="${args}"/>
+      </java>
+      <tstamp>
+        <format property="log.time" pattern="yyMMdd_HH"/>
+      </tstamp>
+      <copy file="${standalone.logfile}"
+            tofile="${standalone.dir}/BigDW-${log.time}-log"/>
+    </target>
+
+    <!--
+    ClosedDbEviction: Runs a stress test to test the efficiency of eviction of
+    closed database roots, to make sure that the eviction would not
+    cause corruption or concurrency bugs. Also prints a detailed log.
+
+    Regression test:  Test should pass when run as follows:
+
+      ant -Dtestcase=ClosedDbEviction standalone
+      ant -Dtestcase=ClosedDbEviction -Dargs="-recovery 10000000" standalone
+
+    After each test is run, the log file is automatically copied to
+    ClosedDbEviction-<yyMMdd_HH>-log for comparing it to subsequent runs.
+    -->
+    <target name="ClosedDbEviction">
+      <java fork="yes" jvm="${jvm}" failonerror="yes"
+            classname="ClosedDbEviction"
+            output="${standalone.logfile}">
+        <classpath refid="standalone.test.classpath"/>
+        <jvmarg value="-Xmx512m"/>
+        <jvmarg line="${jvmargs}"/>
+        <arg value="-v"/>
+        <arg value="-h"/>
+        <arg value="${standalone.datadir}"/>
+        <arg value="-init"/>
+        <arg value="100000"/>
+      </java>
+      <java fork="yes" jvm="${jvm}" failonerror="yes"
+            classname="ClosedDbEviction"
+            output="${standalone.logfile}" append="true">
+        <classpath refid="standalone.test.classpath"/>
+        <jvmarg value="-Xmx32m"/>
+        <jvmarg line="${jvmargs}"/>
+        <arg value="-v"/>
+        <arg value="-h"/>
+        <arg value="${standalone.datadir}"/>
+        <arg value="-contention"/>
+        <arg value="100000000"/>
+        <arg line="${args}"/>
+      </java>
+      <java fork="yes" jvm="${jvm}" failonerror="yes"
+            classname="ClosedDbEviction"
+            output="${standalone.logfile}" append="true">
+        <classpath refid="standalone.test.classpath"/>
+        <jvmarg value="-Xmx32m"/>
+        <jvmarg line="${jvmargs}"/>
+        <arg value="-v"/>
+        <arg value="-h"/>
+        <arg value="${standalone.datadir}"/>
+        <arg value="-evict"/>
+        <arg value="20000000"/>
+        <arg line="${args}"/>
+      </java>
+      <tstamp>
+        <format property="log.time" pattern="yyMMdd_HH"/>
+      </tstamp>
+      <copy file="${standalone.logfile}"
+            tofile="${standalone.dir}/ClosedDbEviction-${log.time}-log"/>
+    </target>
+
+    <!--
+    CleanWithSmallCache: Runs a stress test to test ...
+
+    Regression test:  Test should pass when run as follows:
+
+      ant -Dtestcase=CleanWithSmallCache standalone
+
+    After each test is run, the log file is automatically copied to
+    CleanWithSmallCache-<yyMMdd_HH>-log for comparing it to subsequent runs.
+    -->
+    <target name="CleanWithSmallCache">
+      <echo message="CleanWithSmallCache: write"/>
+      <java fork="yes" jvm="${jvm}" failonerror="yes"
+            classname="CleanWithSmallCache"
+            output="${standalone.logfile}">
+        <classpath refid="standalone.test.classpath"/>
+        <jvmarg value="-Xmx6m"/>
+        <jvmarg line="${jvmargs}"/>
+        <arg value="-h"/>
+        <arg value="${standalone.datadir}"/>
+        <arg line="-records 40000 -key 48 -data 10 -random -cache 250k -seconds 2000 -write 10000"/>
+      </java>
+      <echo message="CleanWithSmallCache: clean"/>
+      <java fork="yes" jvm="${jvm}" failonerror="yes"
+            classname="CleanWithSmallCache"
+            output="${standalone.logfile}" append="true">
+        <classpath refid="standalone.test.classpath"/>
+        <jvmarg value="-Xmx15m"/>
+        <jvmarg line="${jvmargs}"/>
+        <arg value="-h"/>
+        <arg value="${standalone.datadir}"/>
+        <arg line="-records 40000 -key 48 -data 10 -random -cache 250k -seconds 22000 -read 10 -clean"/>
+        <arg line="${args}"/>
+      </java>
+      <tstamp>
+        <format property="log.time" pattern="yyMMdd_HH"/>
+      </tstamp>
+      <copy file="${standalone.logfile}"
+            tofile="${standalone.dir}/CleanWithSmallCache-${log.time}-log"/>
+    </target>
+</project>
diff --git a/build.properties b/build.properties
new file mode 100644
index 0000000000000000000000000000000000000000..54c13acf0ab67feba9930dfcc7ba9c4e4046e2d1
--- /dev/null
+++ b/build.properties
@@ -0,0 +1,54 @@
+##########################################################################
+# To compile the jca, jca-examples, jmx, jmx-examples targets.
+# Set j2ee.jarfile to a j2ee.jar.
+##########################################################################
+# Typical j2ee jar for JBOSS
+#j2ee.jarfile = c:/jboss-3.2.6/client/jbossall-client.jar
+
+# Typical j2ee jar for Sun Java Application Server
+#j2ee.jarfile = c:/j2ee1.4AppServer/lib/j2ee.jar
+
+# Typical j2ee jar for OC4J
+#j2ee.jarfile = <OC4J_HOME>/j2ee/home/lib/ejb.jar:<OC4J_HOME>/oc4j/j2ee/home/lib/connector.jar:<OC4J_HOME>/oc4j/j2ee/home/lib/oc4j-internal.jar
+
+##########################################################################
+# Set example.resources to run the JCA examples.
+##########################################################################
+# SJSAS
+#example.resources = c:/j2ee1.4AppServer/lib/appserv-rt.jar
+# JBOSS
+#example.resources = <jehome>/examples/resources/jboss
+# OC4J
+#example.resources = <jehome>/examples/resources/oc4j/oc4j.jar
+
+##########################################################################
+# Set example.jca.srcdir to run the JCA examples.
+##########################################################################
+# JBOSS
+#example.jca.srcdir = <jehome>/examples/jca/jboss
+# SJSAS
+#example.jca.srcdir = <jehome>/examples/jca/sjsas8_1
+# OC4J
+#example.jca.srcdir = <jehome>/examples/jca/oc4j
+
+##########################################################################
+# Set example.jca.descriptorname to run the JCA examples.
+##########################################################################
+# JBOSS
+#example.jca.descriptorname = jboss.xml
+# SJSAS
+#example.jca.descriptorname = sun-ejb-jar.xml
+# OC4J
+#example.jca.descriptorname = orion-ejb-jar.xml
+
+##########################################################################
+# Set #clover.ignorefailure=true to cause unit test failures to be ignored
+# when doing a full clover run of all tests.
+##########################################################################
+#clover.ignorefailure=true
+
+##########################################################################
+# Set logging.config.file to a configuration file which can change the 
+# default logging configuration for all logging operations.
+##########################################################################
+#logging.config.file=rep.properties
diff --git a/build.xml b/build.xml
new file mode 100644
index 0000000000000000000000000000000000000000..48ba0e8cbbfaa49fae7c4d3ad40c8261f18d71d6
--- /dev/null
+++ b/build.xml
@@ -0,0 +1,1581 @@
+<?xml version="1.0"?>
+
+<!-- ============================================ -->
+<!-- Ant Build File for Berkeley DB Java Database -->
+<!-- ============================================ -->
+
+<project name="JE" default="jar" basedir=".">
+<description>Compile and test JE</description>
+
+    <fail message="Ant 1.7.0 or greater is required">
+      <condition><not><antversion atleast="1.7.0"/></not></condition>
+    </fail>
+
+    <!-- Modify build.properties to point to the proper j2ee paths for
+         JCA and JMX support  -->
+    <property file="build.properties"/>
+
+    <property name="srcdir" value="${basedir}/src"/>
+    <property name="builddir" value="${basedir}/build"/>
+    <property name="destdir" value="${builddir}/classes"/>
+    <condition property="specificjar" value="${testjar}">
+      <isset property="testjar"/>
+    </condition>
+    <condition property="specificjar" value="${destdir}">
+      <not>
+        <isset property="testjar"/>
+      </not>
+    </condition>
+    <property name="libdir" value="${builddir}/lib"/>
+    <property name="extdir" value="${basedir}/ext"/>
+
+    <property name="dist.srcdir" value="${basedir}/dist"/>
+    <property name="dist.destdir" value="${builddir}/dist"/>
+
+    <property name="unittest.srcdir" value="${basedir}/test"/>
+    <property name="unittest.dir" value="${builddir}/test"/>
+    <property name="unittest.destdir" value="${unittest.dir}/classes"/>
+    <property name="unittest.datadir" value="${unittest.dir}/data"/>
+    <property name="unittest.extraenvdir" value="${unittest.destdir}/propTest"/>
+    <property name="unittest.extraenvdir2" value="${unittest.destdir}/propTest2"/>
+    <property name="unittest.reportsdir" value="${unittest.dir}/reports"/>
+    <property name="unittest.testserialdir" value="${unittest.dir}/testserial"/>
+    <property name="unittest.testevolvedir" value="${unittest.dir}/testevolve"/>
+
+    <property name="doc.dir" value="${basedir}/docs"/>
+    <property name="doc.javadir" value="${doc.dir}/java"/>
+    <property name="docsrc.dir" value="${basedir}/docs_src"/>
+    <property name="doclet.classes.dir"  
+              value="${doc.dir}/doclet/Classes"/>
+    <property name="doclet.src.dir" value="${doc.dir}/doclet"/>
+    <property name="doclet.jar" value="${doc.dir}/HidingDoclet.jar"/>
+
+    <property name="jarfile" value="${libdir}/je.jar"/>
+    <property name="jcararfile" value="${libdir}/jejca.rar"/>
+    <property name="srczipfile" value="${basedir}/src.zip"/>
+    <property name="buildzipfile" value="${basedir}/build.zip"/>
+    <property name="zipfile" value="${basedir}/je.zip"/>
+    <property name="plugin.id" value="com.sleepycat.je"/>
+
+    <property name="clover.tmpdir" value="${builddir}/tmp"/>
+    <property name="clover.initstring"
+              location="${clover.tmpdir}/jecoverage.db"/>
+      <property name="clover.libdir" value="/clover/lib"/>
+    <property name="clover.excludes" value="**/rpcserver/** **/je/examples/** **/je/junit/** **/*Test.java **/*TestBase.java **/compat/** **/je/util/**"/>
+
+    <property name="example.srcdir" value="${basedir}/examples"/>
+    <property name="example.destdir" value="${destdir}"/>
+
+    <property name="packages" value="com.sleepycat.*"/>
+    <property file="${user.home}/ant.properties"/>
+    <property name="build.propertyfile"
+              value="${dist.destdir}/build.properties"/>
+    <property name="installdestdir" value="/usr/local"/>
+
+    <!--
+    Can be used to specify a different JVM for <java> tasks, for example:
+      ant "-Djvm=/usr/local/jdk1.6.0_01/bin/java" ...
+    -->
+    <property name="jvm" value="java"/>
+
+    <!--
+    Can be used to override JVM args for <java> tasks, for example:
+      ant "-Djvmargs=-Xmx32M -client" ...
+    -->
+    <property name="jvmargs" value=""/>
+
+    <!--
+    For unit testing using different default isolation levels.  May be:
+     empty string (repeatableRead)
+     readCommitted (degree 2)
+     serializable (degree 3)
+    -->
+    <property name="isolationLevel" value=""/>
+
+    <!-- For long and short version of unit tests. May be:
+               empty string (false);
+	       true
+	       false
+    -->
+    <property name="longtest" value="false"/>
+
+    <path id="empty.classpath"/>
+
+    <path id="class.path">
+      <pathelement location="${specificjar}"/>
+      <pathelement location="."/>
+    </path>
+
+    <path id="unittest.classpath">
+      <pathelement location="${specificjar}"/>
+      <pathelement location="."/>
+      <pathelement location="${unittest.destdir}"/>
+    </path>
+
+    <path id="clover.classpath">
+      <pathelement path="${clover.libdir}/clover.jar"/>
+      <pathelement path="${clover.libdir}/velocity.jar"/>
+    </path>
+
+    <path id="unittest-j2ee.classpath">
+      <pathelement location="${destdir}"/>
+      <pathelement location="."/>
+      <pathelement location="${unittest.destdir}"/>
+      <pathelement location="${examples.destdir}"/>
+      <pathelement path="${j2ee.jarfile}"/>
+    </path>
+
+    <path id="jca.classpath">
+      <pathelement location="${destdir}"/>
+      <pathelement location="."/>
+      <pathelement path="${j2ee.jarfile}"/>
+      <pathelement path="${example.resources}"/>
+    </path>
+
+    <path id="j2ee.classpath">
+      <pathelement location="${destdir}"/>
+      <pathelement location="."/>
+      <pathelement path="${j2ee.jarfile}"/>
+    </path>
+
+    <fileset id="jarclasses" dir="${destdir}"
+             includes="com/sleepycat/asm/**/*.class,
+                       com/sleepycat/bind/**/*.class,
+                       com/sleepycat/collections/**/*.class,
+                       com/sleepycat/compat/**/*.class,
+                       com/sleepycat/je/**/*.class,
+                       com/sleepycat/persist/**/*.class,
+                       com/sleepycat/util/**/*.class"
+             excludes="com/sleepycat/je/rep/**/*.class"/>
+
+    <target name="buildDbg">
+    <description>debug the build file itself</description>
+      <property name="classpath.string" refid="class.path"/>
+      <property name="unittestclasspath.string" refid="unittest.classpath"/>
+      <echoproperties/>
+    </target>
+
+    <!-- ============================================================ -->
+    <!-- Global Targets                                               -->
+    <!-- ============================================================ -->
+    <target name="init">
+        <mkdir dir="${builddir}"/>
+        <mkdir dir="${destdir}"/>
+        <mkdir dir="${libdir}"/>
+        <mkdir dir="${dist.destdir}"/>
+        <mkdir dir="${doc.dir}"/>
+        <mkdir dir="${doc.javadir}"/>
+        <mkdir dir="${clover.tmpdir}"/>
+        <mkdir dir="${unittest.dir}"/>
+        <mkdir dir="${unittest.destdir}"/>
+        <mkdir dir="${unittest.extraenvdir}"/>
+        <mkdir dir="${unittest.extraenvdir2}"/>
+        <mkdir dir="${unittest.testserialdir}"/>
+        <mkdir dir="${unittest.testevolvedir}"/>
+        <tstamp/>
+	<antcall target="do-internal">
+	   <param name="target" value="init"/>
+        </antcall>
+    </target>
+
+    <target name="compile"
+            depends="compile-src,
+	             compile-examples,
+		     compile-unittest">
+	<antcall target="do-internal">
+	   <param name="target" value="compile"/>
+        </antcall>
+    </target>
+
+    <target name="compile-j2ee"
+            depends="check-j2ee-present,
+	             compile-jmx,
+	             compile-jca"/>
+       
+    <target name="check-j2ee-present">
+        <available classname="javax.resource.ResourceException"
+	           property="j2eelib.present">
+	  <classpath refid="j2ee.classpath"/>
+	</available>
+
+	<fail message="You need to set the 'j2ee.jarfile' property in
+	the build.properties file"
+	      unless="j2eelib.present"/>
+
+    </target>
+
+    <!-- ====== Internal (non-public) targets delegated to internal.xml =======
+
+    For each top level target in internal.xml, a target here is used to compile
+    all JE and internal sources and then invoke the target via internal.xml.
+    Any other parameters for each target are passed using properties (-D
+    propName=propValue).
+    ======================================================================= -->
+    <target name="do-internal">
+       <ant antfile="ant/internal.xml" target="${target}"
+            inheritAll="true" inheritrefs="true"/>
+    </target>
+    
+    <!-- ============================================================ -->
+    <!-- Clean                                                        -->
+    <!-- ============================================================ -->
+    <target name="clean" depends="clean-src,
+                                  clean-unittest,
+                                  clean-clover,
+				  clean-jar,
+                                  clean-package">
+	<antcall target="do-internal">
+	   <param name="target" value="clean"/>
+        </antcall>
+    </target>
+
+    <!-- ============================================================ -->
+    <!-- Compiles                                                     -->
+    <!-- ============================================================ -->
+    <target name="clean-src" depends="init">
+        <delete>
+            <fileset dir="${destdir}"
+             includes="**/*.class,**/package.html,**/*.xml"/>
+        </delete>
+        <delete dir="${dist.destdir}"/>
+    </target>
+
+    <target name="compile-src" depends="init">
+        <ant antfile="ant/compile.xml" dir="." inheritall="false">
+            <reference refid="class.path" torefid="compile.classpath"/>
+        </ant>
+        <!-- Enhance the built-in persistent proxy classes. -->
+        <taskdef name="enhancer"
+                 classname="com.sleepycat.persist.model.ClassEnhancerTask">
+          <classpath refid="class.path"/>
+        </taskdef>
+        <enhancer verbose="off">
+          <fileset dir="${destdir}"
+                   includes="com/sleepycat/persist/impl/*Proxy.class"/>
+        </enhancer>
+    </target>
+
+    <target name="compile-examples" depends="compile-src">
+        <ant antfile="ant/compile.xml" dir="." inheritall="false">
+            <property name="srcdir" value="${example.srcdir}"/>
+            <property name="include" value="**/*.java"/>
+            <property name="exclude1" value="jmx/**/*.java"/>
+            <property name="exclude2" value="jca/**/*.java"/>
+            <reference refid="class.path" torefid="compile.classpath"/>
+        </ant>
+    </target>
+
+    <target name="compile-jmx" depends="check-j2ee-present,compile-src, init">
+        <ant antfile="ant/compile.xml" dir="." inheritall="false">
+            <reference refid="j2ee.classpath" torefid="compile.classpath"/>
+            <property name="include" value="com/sleepycat/je/jmx/**/*.java"/>
+            <property name="exclude2" value=""/>
+        </ant>
+    </target>
+
+    <target name="compile-jmxexamples" depends="compile-jmx">
+        <ant antfile="ant/compile.xml" dir="." inheritall="false">
+            <reference refid="j2ee.classpath" torefid="compile.classpath"/>
+            <property name="srcdir" value="${example.srcdir}"/>
+            <property name="include" value="jmx/**/*.java"/>
+            <property name="exclude2" value=""/>
+        </ant>
+    </target>
+
+    <target name="compile-jca" depends="check-j2ee-present,compile-src">
+        <ant antfile="ant/compile.xml" dir="." inheritall="false">
+            <reference refid="j2ee.classpath" torefid="compile.classpath"/>
+            <property name="include" value="com/sleepycat/je/jca/**/*.java"/>
+            <property name="exclude1" value=""/>
+        </ant>
+    </target>
+
+    <target name="compile-jcaexamples" depends="compile-jca">
+        <ant antfile="ant/compile.xml" dir="." inheritall="false">
+            <reference refid="j2ee.classpath" torefid="compile.classpath"/>
+            <property name="srcdir" value="${example.srcdir}"/>
+            <property name="include" value="jca/**/*.java"/>
+            <property name="exclude1" value=""/>
+        </ant>
+    </target>
+
+    <!-- for jar packaging -->
+    <target name="compile-dist" depends="init">
+        <javac srcdir="${dist.srcdir}"
+               destdir="${dist.destdir}" 
+               source="1.5">
+            <classpath refid="class.path"/>
+        </javac>
+    </target>
+
+
+    <!-- ============================================================ -->
+    <!-- Jars                                                         -->
+    <!-- ============================================================ -->
+    <target name="clean-jar">
+        <delete dir="${libdir}"/>
+    </target>
+
+    <!-- JE jar -->
+    <target name="jar" depends="compile, clean-package">
+        <jar jarfile="${jarfile}">
+	<fileset refid="jarclasses"/>
+        <manifest>
+            <attribute name="Main-Class"
+                       value="com.sleepycat.je.utilint.JarMain"/>
+            <attribute name="Premain-Class"
+                       value="com.sleepycat.persist.model.ClassEnhancer"/>
+        </manifest>
+        </jar>
+    </target>
+
+    <!-- jca jar -->
+    <target name="jcajar" depends="compile-jca">
+        <jar jarfile="${libdir}/jejca.jar">
+	<fileset dir="${destdir}"
+	         includes="com/sleepycat/je/jca/ra/**"/>
+       </jar>
+    </target>
+
+    <target name="clean-jca" depends="init">
+         <delete>
+              <fileset dir="${destdir}"
+               includes="**/jca/**/*.class,package"/>
+         </delete>
+         <delete dir="${dist.destdir}/**/jca"/>
+    </target>
+
+    <target name="jca" depends="jcajar,jar">
+       <jar destfile="${jcararfile}">
+	  <metainf dir="${srcdir}/com/sleepycat/je/jca/ra" includes="ra.xml"/>
+	  <fileset dir="${libdir}" includes="jejca.jar"/>
+	  <fileset dir="${libdir}" includes="je.jar"/>
+       </jar>
+    </target>
+
+    <!-- JCA examples jar -->
+    <target name="jca-examples" depends="compile-jcaexamples,jca">
+
+	<jar jarfile="${libdir}/jejca-example.jar">
+	   <metainf dir="${example.srcdir}/jca/simple"
+	            includes="ejb-jar.xml"/>
+	   <metainf dir="${example.jca.srcdir}"
+	             includes="${example.jca.descriptorname}"/>
+	   <fileset dir="${destdir}"
+	      includes="jca/simple/*"
+	      excludes="jca/simple/ExClient.class"
+	   />
+	</jar>
+    </target>
+
+    <!-- jmx jar -->
+    <target name="jmx" depends="compile-jmx,jar">
+        <jar jarfile="${libdir}/jejmx.jar">
+	<fileset dir="${destdir}"
+	         includes="com/sleepycat/je/jmx/**"/>
+        </jar>
+    </target>
+
+    <!-- jmx examples jar -->
+    <target name="jmx-examples" depends="compile-jmxexamples,jmx">
+        <jar jarfile="${libdir}/jejmx-example.jar">
+	<fileset dir="${destdir}" includes="jmx/**"/>
+        </jar>
+    </target>
+
+    <!-- create hidingdoclet jar. -->
+    <target name="jar-hidingdoclet" depends="init, copy-doc-materials">
+        <delete dir="${doclet.classes.dir}"/>
+	<delete file="${doclet.jar}"/>
+
+	<mkdir dir="${doclet.classes.dir}"/>
+	<javac srcdir="${doclet.src.dir}" destdir="${doclet.classes.dir}" source="1.5">
+            <include name="*.java"/>
+        </javac>	    
+
+	<jar jarfile="${doclet.jar}">
+            <fileset dir="${doclet.classes.dir}" includes="*.class"/>
+	</jar>
+    </target>
+
+
+    <!-- Eclipse "wrapper plug-in" jar -->
+    <target name="je-version" depends="compile-src">
+       <java fork="yes" 
+             classname="com.sleepycat.je.util.DbDump"
+             outputproperty="version.result"
+             failonerror="true">
+	  <arg value="-V"/>
+          <classpath refid="class.path"/>
+       </java>
+
+    </target>
+
+    <target name="eclipsejar" depends="je-version,compile,clean-package">
+        <property name="eclipsejarfile"
+                  value="${libdir}/${plugin.id}_${version.result}.jar"/>
+        <jar jarfile="${eclipsejarfile}">
+	<fileset refid="jarclasses"/>
+        <manifest>
+            <attribute name="Export-Package"
+                       value="com.sleepycat.bind,com.sleepycat.bind.serial,com.sleepycat.bind.tuple,com.sleepycat.collections,com.sleepycat.je,com.sleepycat.je.util,com.sleepycat.persist,com.sleepycat.persist.evolve,com.sleepycat.persist.model,com.sleepycat.persist.raw,com.sleepycat.util"/>
+            <attribute name="Bundle-Vendor"
+                       value="Oracle"/>
+            <attribute name="Bundle-Version"
+                       value="${version.result}"/>
+            <attribute name="Eclipse-BuddyPolicy"
+                       value="registered"/>
+            <attribute name="Bundle-Name"
+                       value="Berkeley DB Java Edition"/>
+            <attribute name="Bundle-ManifestVersion"
+                       value="2"/>
+            <attribute name="Bundle-SymbolicName"
+                       value="${plugin.id}"/>
+        </manifest>
+        </jar>
+    </target>
+
+    <!-- ============================================================ -->
+    <!-- Package .jar and other files into a .zip                     -->
+    <!-- ============================================================ -->
+    <target name="clean-package">
+        <delete file="${zipfile}"/>
+        <delete file="${buildzipfile}"/>
+        <delete file="${srczipfile}"/>
+        <mkdir dir="${libdir}"/>
+    </target>
+
+    <target name="package" depends="jar, javadoc">
+        <description>
+            The package target builds the distribution package.
+        </description>
+
+	<!-- copy the README in, adding the current release number and date-->
+        <copy overwrite="true" file="${dist.srcdir}/README" todir="${basedir}">
+	   <filterset>
+	       <filter token="RELEASE_VERSION" value="${release.version}"/>
+	       <filter token="DATE" value="${release.date}"/>
+	   </filterset>	
+        </copy>	   
+
+        <copy overwrite="true" file="${dist.srcdir}/example.properties"
+            todir="${basedir}"/>
+
+        <zip basedir="${builddir}"
+             destfile="build.zip"
+             includes="bin/**,lib/**,dist/build.properties"/>
+        <zip basedir="${basedir}"
+             destfile="src.zip"
+             excludes="test/rpcserver/**,test/experiments/**,test/examples/**,test/regress/*/**,test/**/MiniStress.java,test/**/AbortStress.java,dist/,**/jca/README.txt,examples/com/**,src/com/sleepycat/je/rep/**,examples/je/rep/**,test/com/sleepycat/je/rep/**,test/standalone/scalability/**"
+             includes="src/**,examples/**,test/**,docs/**,ant/**,regress/,build.xml,build.properties,example.properties,README,LICENSE,FindBugsExclude.xml"/>
+        <zip basedir="${basedir}"
+             destfile="${zipfile}"
+             excludes="**/">
+             <zipfileset src="build.zip"
+                         prefix="je/"/>
+             <zipfileset src="src.zip"
+                         prefix="je/"/>
+        </zip>
+    </target>
+
+
+    <!-- ============================================================ -->
+    <!-- Testing                                                      -->
+    <!-- ============================================================ -->
+
+    <!-- ============================================================ -->
+    <!-- JUnit unit tests                                             -->
+    <!-- ============================================================ -->
+
+    <target name="init-unittest" depends="init">
+        <delete dir="${unittest.datadir}"/>
+        <delete dir="${unittest.reportsdir}"/>
+        <mkdir dir="${unittest.datadir}"/>
+        <mkdir dir="${unittest.reportsdir}"/>
+    </target>
+
+    <target name="clean-unittest" depends="init-unittest">
+        <delete dir="${unittest.destdir}"/>
+        <mkdir dir="${unittest.destdir}"/>
+        <delete dir="${unittest.testserialdir}"/>
+        <mkdir dir="${unittest.testserialdir}"/>
+        <delete dir="${unittest.testevolvedir}"/>
+        <mkdir dir="${unittest.testevolvedir}"/>
+    </target>
+
+    <target name="compile-unittest" depends="compile-src">
+      <ant antfile="ant/compile.xml" dir="." inheritall="false">
+        <property name="srcdir" value="${unittest.srcdir}"/>
+        <property name="destdir" value="${unittest.destdir}"/>
+        <reference refid="unittest.classpath" torefid="compile.classpath"/>
+      </ant>
+
+      <copy todir="${unittest.destdir}">
+        <fileset dir="${unittest.srcdir}" 
+	         excludes="**/*.java,experiments/**,examples/**"/>
+      </copy>
+
+      <!-- for testing je.properties -->
+      <copy file="${unittest.destdir}/com/sleepycat/je/je.properties"
+            todir="${unittest.extraenvdir}"/>
+      <!-- for testing rep.properties -->
+      <copy file="${unittest.destdir}/com/sleepycat/je/rep.properties"
+            tofile="${unittest.extraenvdir2}/je.properties"/>
+
+      <!-- Compile original version of TestSerial class separately. -->
+      <property name="testserialpath"
+                value="com/sleepycat/collections/test/serial/TestSerial"/>
+      <copy file="${unittest.srcdir}/${testserialpath}.java.original"
+            tofile="${unittest.testserialdir}/${testserialpath}.java"/>
+      <ant antfile="ant/compile.xml" dir="." inheritall="false">
+          <property name="srcdir" value="${unittest.testserialdir}"/>
+          <property name="destdir" value="${unittest.testserialdir}"/>
+          <reference refid="class.path" torefid="compile.classpath"/>
+      </ant>
+
+      <!-- Compile original version of EvolveClasses separately. -->
+      <property name="testevolvepath"
+                value="com/sleepycat/persist/test/EvolveClasses"/>
+      <copy file="${unittest.srcdir}/${testevolvepath}.java.original"
+            tofile="${unittest.testevolvedir}/${testevolvepath}.java"/>
+      <copy file=
+             "${unittest.srcdir}/com/sleepycat/persist/test/EvolveCase.java"
+            tofile=
+             "${unittest.testevolvedir}/com/sleepycat/persist/test/EvolveCase.java"/>
+      <copy file=
+             "${unittest.srcdir}/com/sleepycat/persist/test/PersistTestUtils.java"
+            tofile=
+             "${unittest.testevolvedir}/com/sleepycat/persist/test/PersistTestUtils.java"/>
+      <ant antfile="ant/compile.xml" dir="." inheritall="false">
+          <property name="srcdir" value="${unittest.testevolvedir}"/>
+          <property name="destdir" value="${unittest.testevolvedir}"/>
+          <reference refid="class.path" torefid="compile.classpath"/>
+      </ant>
+    </target>
+
+    <target name="compile-unittest-j2ee"
+            depends="compile, compile-j2ee, compile-jmxexamples">
+        <ant antfile="ant/compile.xml" dir="." inheritall="false">
+            <property name="srcdir" value="${unittest.srcdir}"/>
+            <property name="destdir" value="${unittest.destdir}"/>
+            <property name="include" value="com/sleepycat/je/jmx/**/*.java"/>
+            <property name="exclude2" value=""/>
+            <reference refid="unittest-j2ee.classpath"
+	               torefid="compile.classpath"/>
+        </ant>
+    </target>
+
+
+    <!-- Do one of the following:
+
+         ant test -Dtestcase=com.sleepycat.je.db.DbTest   (run one test)
+         ant test -Dsuite=db                           (run one package)
+         ant test                       (run all tests except rep tests)
+         ant test -Dtestcase=com.sleepycat.je.db.DbTest -Dtestjar=je.jar
+                                      (run one test with a specific jar)
+         ant test -Dsuite=db -Dtestjar=je.jar 
+                                   (run one package with a specific jar)
+         ant test -Dtestjar=je.jar   (run all tests with a specific jar) 
+         ant reptest              (run all com.sleepycat.je.rep.* tests)
+     -->
+
+    <target name="test-j2ee"
+            depends="compile-unittest-j2ee,test"/>
+
+    <target name="reptest">
+	 <antcall target="test">
+             <param name="param-rep" value="true"/>
+	 </antcall>
+    </target>
+
+    <path id="testserial-classpath">
+      <pathelement location="${unittest.testserialdir}"/>
+    </path>
+
+    <path id="testevolve-classpath">
+      <pathelement location="${unittest.testevolvedir}"/>
+    </path>
+
+    <target name="test"
+            depends="compile-unittest, init-unittest, jar">
+
+      <!-- Determine which tests to run. -->
+      <condition property="alltests">
+        <not>
+          <or>
+            <isset property="suite"/>
+            <isset property="testcase"/>
+            <isset property="param-rep"/>
+          </or>
+        </not>
+      </condition>
+      <condition property="testserial">
+        <or>
+          <isset property="alltests"/>
+          <contains string="${suite}" substring="serial"/>
+          <equals arg1="${testcase}" arg2=
+           "com.sleepycat.collections.test.serial.StoredClassCatalogTest"/>
+        </or>
+      </condition>
+      <condition property="testevolve">
+        <or>
+          <isset property="alltests"/>
+          <contains string="${suite}" substring="persist"/>
+          <equals arg1="${testcase}"
+                  arg2="com.sleepycat.persist.test.EvolveTest"/>
+        </or>
+      </condition>
+      <condition property="testpersist">
+        <or>
+          <isset property="alltests"/>
+          <contains string="${suite}" substring="persist"/>
+        </or>
+      </condition>
+
+      <!-- Performs initialization needed before StoredClassCatalogTest. -->
+      <antcall target="do-junit">
+        <param name="param-msg" value="Run testserial initialization"/>
+        <param name="param-if" value="testserial"/>
+        <param name="param-jvmarg" value=""/>
+        <param name="param-classpath" value="testserial-classpath"/>
+        <param name="param-testcase" value=
+         "com.sleepycat.collections.test.serial.StoredClassCatalogTestInit"/>
+      </antcall>
+
+      <!-- Performs initialization needed before persist evolve tests. -->
+      <antcall target="do-junit">
+        <param name="param-msg" value="Run testevolve initialization"/>
+        <param name="param-if" value="testevolve"/>
+        <param name="param-jvmarg" value=""/>
+        <param name="param-classpath" value="testevolve-classpath"/>
+        <param name="param-testcase"
+               value="com.sleepycat.persist.test.EvolveTestInit"/>
+      </antcall>
+
+      <!-- Run all unit tests except for replication.  -->
+      <antcall target="do-junit">
+        <param name="param-msg" value="Run all unit tests"/>
+        <param name="param-if" value="alltests"/>
+        <param name="param-jvmarg" value=""/>
+        <param name="param-classpath" value="empty.classpath"/>
+        <param name="param-includes" value="**/*Test.class"/>
+	<param name="param-excludes" value="**/rep/**/*Test.class"/>
+      </antcall>
+
+      <!-- Run a test suite.  -->
+      <antcall target="do-junit">
+        <param name="param-msg" value="Run a test suite: ${suite}"/>
+        <param name="param-if" value="suite"/>
+        <param name="param-jvmarg" value=""/>
+        <param name="param-classpath" value="empty.classpath"/>
+        <param name="param-includes" value="**/${suite}/*Test.class"/>
+      </antcall>
+
+      <!-- Run a single test case.  -->
+      <antcall target="do-junit">
+        <param name="param-msg" value="Run a test case: ${testcase}"/>
+        <param name="param-if" value="testcase"/>
+        <param name="param-jvmarg" value=""/>
+        <param name="param-classpath" value="empty.classpath"/>
+        <param name="param-testcase" value="${testcase}"/>
+      </antcall>
+
+      <!-- Run the persist test variants. -->
+      <antcall target="test-persist-enhance-javaagent"/>
+      <antcall target="test-persist-enhance-task"/>
+      <antcall target="test-persist-enhance-main"/>
+
+      <!-- Run the replication tests. -->
+      <antcall target="do-junit">
+        <param name="param-msg" value="Run replication tests only"/>
+        <param name="param-if" value="param-rep"/>
+        <param name="param-jvmarg" value=""/>
+        <param name="param-classpath" value="empty.classpath"/>
+        <param name="param-includes" value="**/rep/**/*Test.class"/>
+      </antcall>
+
+      <!-- Generate test reports -->
+
+      <junitreport todir="${unittest.datadir}">
+        <fileset dir="${unittest.datadir}">
+          <include name="TEST-*.xml"/>
+        </fileset>
+        <report format="frames"
+          todir="${unittest.reportsdir}"/>
+      </junitreport>
+    </target>
+
+    <!-- Run the persist tests with -javaagent to enhance classes. -->
+    <target name="test-persist-enhance-javaagent" if="testpersist">
+      <antcall target="do-junit">
+        <param name="param-msg" value="Run persist tests with -javaagent"/>
+        <param name="param-if" value="testpersist"/>
+        <param name="param-sysprop-key" value="expectEnhanced"/>
+        <param name="param-sysprop-value" value="true"/>
+        <param name="param-jvmarg"
+               value="-javaagent:${jarfile}=enhance:-v,com.sleepycat.persist"/>
+        <param name="param-classpath" value="empty.classpath"/>
+        <param name="param-includes"
+               value="com/sleepycat/persist/**/*Test.class"/>
+      </antcall>
+    </target>
+
+    <!-- Enhance persist test classes and run the persist tests again,
+         setting the expectEnhanced system property to cause the test to
+         fail if classes are not enhanced.  -->
+    <property name="enhance-task.tmpdir" value="${builddir}/enhance.task"/>
+    <path id="enhanced-task.classpath">
+      <pathelement location="${enhance-task.tmpdir}"/>
+    </path>
+    <target name="test-persist-enhance-task" if="testpersist">
+      <delete dir="${enhance-task.tmpdir}"/>
+      <mkdir dir="${enhance-task.tmpdir}"/>
+      <copy todir="${enhance-task.tmpdir}">
+        <fileset dir="${unittest.destdir}"
+                 includes="com/sleepycat/persist/**/*.class"/>
+      </copy>
+      <enhancer verbose="on">
+        <fileset dir="${enhance-task.tmpdir}"/>
+      </enhancer>
+      <antcall target="do-junit">
+        <param name="param-msg"
+               value="Run persist tests with ClassEnhancer ant task"/>
+        <param name="param-if" value="testpersist"/>
+        <param name="param-sysprop-key" value="expectEnhanced"/>
+        <param name="param-sysprop-value" value="true"/>
+        <param name="param-jvmarg" value=""/>
+        <param name="param-classpath" value="enhanced-task.classpath"/>
+        <param name="param-includes"
+               value="com/sleepycat/persist/**/*Test.class"/>
+      </antcall>
+    </target>
+
+    <!-- Same as above but use the ClassEnhancer main program. -->
+    <property name="enhance-main.tmpdir" value="${builddir}/enhance.main"/>
+    <path id="enhanced-main.classpath">
+      <pathelement location="${enhance-main.tmpdir}"/>
+    </path>
+    <path id="enhanced-main.classpath">
+      <pathelement location="${enhance-main.tmpdir}"/>
+      <path refid="class.path"/>
+      <path refid="clover.classpath"/>
+    </path> 
+    <target name="test-persist-enhance-main" if="testpersist">
+      <delete dir="${enhance-main.tmpdir}"/>
+      <mkdir dir="${enhance-main.tmpdir}"/>
+      <copy todir="${enhance-main.tmpdir}">
+        <fileset dir="${unittest.destdir}"
+                 includes="com/sleepycat/persist/**/*.class"/>
+      </copy>
+      <java fork="yes" jvm="${jvm}"
+            classname="com.sleepycat.persist.model.ClassEnhancer">
+        <jvmarg line="${jvmargs}"/>
+        <arg line="-v ${enhance-main.tmpdir}"/>
+        <classpath refid="enhanced-main.classpath"/>
+      </java>
+      <antcall target="do-junit">
+        <param name="param-msg"
+               value="Run persist tests with ClassEnhancer main program"/>
+        <param name="param-if" value="testpersist"/>
+        <param name="param-sysprop-key" value="expectEnhanced"/>
+        <param name="param-sysprop-value" value="true"/>
+        <param name="param-jvmarg" value=""/>
+        <param name="param-classpath" value="enhanced-main.classpath"/>
+        <param name="param-includes"
+               value="com/sleepycat/persist/**/*Test.class"/>
+      </antcall>
+    </target>
+
+    <!-- Called via antcall above to run the junit task.
+     Specify one of these params:
+      param-includes: path of class files to run a batchtest
+      param-testcase: class name that to run a single test
+     Required params:
+      param-msg: message printed to identify this test run
+      param-if: variable name that must be set to run tests
+      param-jvmarg: value of additional jvmarg, may be empty
+      param-classpath: refid of classpath to insert, may be empty.classpath
+     Optional params:
+      param-sysprop-key: key of additional sysproperty
+      param-sysprop-value: value of additional sysproperty
+      -->
+    <target name="do-junit" if="${param-if}">
+      <echo message="${param-msg}"/>
+      <junit printsummary="false"
+             errorProperty="unittest.failed"
+             failureProperty="unittest.failed"
+	     haltOnFailure="false"
+             showoutput="true"
+             fork="yes">
+        <jvmarg value="-ea"/>
+	<!--
+        <jvmarg value="-d64"/>
+	-->
+        <jvmarg value="-Xmx256M"/>
+        <jvmarg line="${param-jvmarg}"/>
+        <classpath refid="${param-classpath}"/>
+        <classpath refid="unittest.classpath"/>
+        <classpath refid="clover.classpath"/>
+        <sysproperty key="testdestdir" value="${unittest.destdir}"/>
+        <sysproperty key="testevolvedir" value="${unittest.testevolvedir}"/>
+        <sysproperty key="txnnosync" value="true"/>
+	<sysproperty key="isolationLevel" value="${isolationLevel}"/>
+	<sysproperty key="setErrorListener" value="true"/>
+        <sysproperty key="longtest" value="${longtest}"/>
+        <sysproperty key="${param-sysprop-key}"
+                     value="${param-sysprop-value}"/>
+        <sysproperty key="java.util.logging.config.file"
+            value="${logging.config.file}"/>
+        <formatter type="plain" usefile="false"/>
+        <formatter type="xml"/>
+        <test todir="${unittest.datadir}"
+              if="param-testcase"
+              name="${param-testcase}"/>
+        <batchtest todir="${unittest.datadir}"
+                   if="param-includes">
+          <fileset dir="${unittest.destdir}" 
+	           includes="${param-includes}"
+                   excludes="${param-excludes}"/>
+        </batchtest>
+      </junit>
+      <fail message="UnitTests failed.Check log and/or reports.">
+        <condition>
+          <and>
+            <istrue value="${unittest.failed}"/>
+	    <isfalse value="${clover.ignorefailure}"/>
+	  </and>
+	</condition>
+      </fail>
+    </target>
+
+    <!-- Runs XxxTestMain test programs and other misc non-JUnit test. -->
+    <target name="misctest" depends="compile-unittest, init-unittest">
+
+      <antcall target="do-testmain">
+        <param name="cls" value="je.test.SecondarySplitTestMain"/>
+      </antcall>
+
+      <!-- Special case JUnit test that only failed when run separately. -->
+      <junit printsummary="false"
+             showoutput="on"
+	     haltOnFailure="true"
+             fork="yes">
+        <jvmarg value="-ea"/>
+        <jvmarg value="-Xmx256M"/>
+        <classpath refid="unittest-j2ee.classpath"/>
+        <classpath refid="clover.classpath"/>
+        <sysproperty key="testdestdir" value="${unittest.destdir}"/>
+        <test name="com.sleepycat.je.dbi.SR12641"/>
+      </junit>
+
+    </target>
+
+    <target name="do-testmain">
+      <echo message="Running: ${cls}"/>
+      <java fork="yes" failonerror="true" jvm="${jvm}"
+            classname="com.sleepycat.${cls}">
+        <jvmarg value="-Xmx256M"/>
+        <jvmarg line="${jvmargs}"/>
+        <sysproperty key="testdestdir" value="${unittest.destdir}"/>
+        <classpath refid="unittest-j2ee.classpath"/>
+        <classpath refid="clover.classpath"/>
+      </java>
+    </target>
+
+    <!-- logversiondata is run at the end of each major release.  After running
+         add the je-x.y.z.* files that were generated to CVS.  Be sure to use
+         -kb to add the .jdb file.  Then add a test_x_y_z() method to
+         com/sleepycat/je/logversion/LogEntryVersionTest.java.
+    -->
+    <target name="logversiondata" depends="compile-unittest">
+      <java fork="yes" jvm="${jvm}"
+            classname="com.sleepycat.je.logversion.MakeLogEntryVersionData">
+        <jvmarg line="${jvmargs}"/>
+        <arg line="${unittest.destdir}"/>
+        <classpath refid="unittest.classpath"/>
+      </java>
+      <copy todir="test/com/sleepycat/je/logversion">
+        <fileset dir="${unittest.destdir}">
+          <include name="je-*.jdb"/>
+          <include name="je-*.txt"/>
+        </fileset>
+      </copy>
+    </target>
+
+    <!-- ============================================================ -->
+    <!-- Test CLI utils -->
+    <!-- ============================================================ -->
+
+    <target name="testcli"
+        depends="testcli-dbdump"/>
+
+    <target name="testcli-dbdump" depends="compile-src,compile-examples">
+      <delete dir="${builddir}/tmp"/>
+      <mkdir dir="${builddir}/tmp"/>
+      <java fork="yes" dir="${builddir}" jvm="${jvm}"
+            classname="je.SimpleExample">
+        <jvmarg line="${jvmargs}"/>
+        <arg line="tmp insert 5"/>
+        <classpath refid="class.path"/>
+      </java>
+      <java fork="yes" dir="${builddir}" jvm="${jvm}"
+            classname="com.sleepycat.je.util.DbDump">
+        <jvmarg line="${jvmargs}"/>
+        <arg line="-f ./dump.out -h tmp -s simpleDb"/>
+        <classpath refid="class.path"/>
+      </java>
+      <delete dir="${builddir}/tmp"/>
+      <mkdir dir="${builddir}/tmp"/>
+      <java fork="yes" dir="${builddir}" jvm="${jvm}"
+            classname="com.sleepycat.je.util.DbLoad">
+        <jvmarg line="${jvmargs}"/>
+        <arg line="-f ./dump.out -h tmp -s simpleDb"/>
+        <classpath refid="class.path"/>
+      </java>
+      <java fork="yes" dir="${builddir}" jvm="${jvm}"
+            classname="com.sleepycat.je.util.DbDump">
+        <jvmarg line="${jvmargs}"/>
+        <arg line="-h tmp -r"/>
+        <classpath refid="class.path"/>
+      </java>
+    </target>
+
+    <!-- ========= Standalone test targets delegated to internal.xml ==========
+
+    Standalone tests are run by passing the testcase property as the name of
+    the target in internal.xml.  Other parameters for each target are passed
+    using the args property, for example:
+
+      ant -Dtestcase=MemoryStress -Dargs=-dups standalone
+
+    See the individual standalone targets in ant/internal.xml for details.
+    ======================================================================= -->
+    <target name="standalone" depends="jar">
+	<antcall target="do-internal">
+	   <param name="target" value="standalone"/>
+        </antcall>
+    </target>
+
+    <!-- ============================================================ -->
+    <!-- Internal Use Only: Test examples -->
+    <!-- ============================================================ -->
+
+    <!-- testex runs all examples, but does not include testex-access because
+         it is interactive.  Does not test JCA examples.
+    -->
+    <target name="testex"
+        depends="testex-simple, testex-binding, testex-secondary,
+        testex-sequence, testex-tomany, testex-measureinsertsize,
+        testex-persist-person, testex-persist-dpldump,
+        testex-persist-customkeyorder, testex-persist-eventexample,
+        testex-persist-eventexampledpl, testex-persist-sqlapp,
+        testex-hello, testex-ship-basic, testex-ship-index, testex-ship-entity,
+        testex-ship-tuple, testex-ship-sentity, testex-ship-marshal,
+        testex-ship-factory"/>
+
+    <target name="testex-simple" depends="compile-examples">
+      <delete dir="${builddir}/tmp"/>
+      <mkdir dir="${builddir}/tmp"/>
+      <java fork="yes" dir="${builddir}" jvm="${jvm}"
+            classname="je.SimpleExample">
+        <jvmarg line="${jvmargs}"/>
+        <arg line="tmp insert 5"/>
+        <classpath refid="class.path"/>
+      </java>
+      <java fork="yes" dir="${builddir}" jvm="${jvm}"
+            classname="je.SimpleExample">
+        <jvmarg line="${jvmargs}"/>
+        <arg line="tmp retrieve 5"/>
+        <classpath refid="class.path"/>
+      </java>
+    </target>
+
+    <target name="testex-binding" depends="compile-examples">
+      <delete dir="${builddir}/tmp"/>
+      <mkdir dir="${builddir}/tmp"/>
+      <java fork="yes" dir="${builddir}" jvm="${jvm}"
+            classname="je.BindingExample">
+        <jvmarg line="${jvmargs}"/>
+        <arg line="tmp insert 5"/>
+	<classpath refid="class.path"/>
+      </java>
+      <java fork="yes" dir="${builddir}" jvm="${jvm}"
+            classname="je.BindingExample">
+        <jvmarg line="${jvmargs}"/>
+        <arg line="tmp retrieve 5"/>
+        <classpath refid="class.path"/>
+      </java>
+    </target>
+
+    <target name="testex-secondary" depends="compile-examples">
+      <delete dir="${builddir}/tmp"/>
+      <mkdir dir="${builddir}/tmp"/>
+      <java fork="yes" dir="${builddir}" jvm="${jvm}"
+            classname="je.SecondaryExample">
+        <jvmarg line="${jvmargs}"/>
+        <arg line="tmp insert 5"/>
+        <classpath refid="class.path"/>
+      </java>
+      <java fork="yes" dir="${builddir}" jvm="${jvm}"
+            classname="je.SecondaryExample">
+        <jvmarg line="${jvmargs}"/>
+        <arg line="tmp retrieve 5"/>
+        <classpath refid="class.path"/>
+      </java>
+    </target>
+
+    <target name="testex-sequence" depends="compile-examples">
+      <delete dir="${builddir}/tmp"/>
+      <mkdir dir="${builddir}/tmp"/>
+      <java fork="yes" dir="${builddir}" jvm="${jvm}"
+            classname="je.SequenceExample">
+        <jvmarg line="${jvmargs}"/>
+        <arg line="tmp"/>
+        <classpath refid="class.path"/>
+      </java>
+      <java fork="yes" dir="${builddir}" jvm="${jvm}"
+            classname="je.SequenceExample">
+        <jvmarg line="${jvmargs}"/>
+        <arg line="tmp"/>
+        <classpath refid="class.path"/>
+      </java>
+    </target>
+
+    <target name="testex-tomany" depends="compile-examples">
+      <delete dir="${builddir}/tmp"/>
+      <mkdir dir="${builddir}/tmp"/>
+      <java fork="yes" dir="${builddir}" jvm="${jvm}"
+            classname="je.ToManyExample">
+        <jvmarg line="${jvmargs}"/>
+        <arg line="-h tmp"/>
+        <classpath refid="class.path"/>
+      </java>
+    </target>
+
+    <target name="testex-measureinsertsize" depends="compile-examples">
+      <delete dir="${builddir}/tmp"/>
+      <mkdir dir="${builddir}/tmp"/>
+      <java fork="yes" dir="${builddir}" jvm="${jvm}"
+            classname="je.MeasureInsertSize">
+        <jvmarg line="${jvmargs}"/>
+        <arg line="-h tmp -records 100 -key 10 -data 100"/>
+        <classpath refid="class.path"/>
+      </java>
+      <echo>MeasureInsertSize ran OK</echo>
+    </target>
+
+    <!-- Using fork="yes" does not work for AccessExample, apparently because
+         it is interactive and the input stream of the forked process isn't
+         functional; therefore this sample writes to the base directory.
+    -->
+    <target name="testex-access" depends="compile-examples">
+      <java fork="yes" jvm="${jvm}"
+            classname="collections.access.AccessExample">
+        <jvmarg line="${jvmargs}"/>
+        <classpath refid="class.path"/>
+      </java>
+    </target>
+
+    <target name="testex-persist-person" depends="compile-examples">
+      <delete dir="${builddir}/tmp"/>
+      <mkdir dir="${builddir}/tmp"/>
+      <java fork="yes" dir="${builddir}" jvm="${jvm}"
+            classname="persist.PersonExample">
+        <jvmarg value="-ea"/>
+        <jvmarg line="${jvmargs}"/>
+        <arg line="-h tmp"/>
+        <classpath refid="class.path"/>
+      </java>
+    </target>
+
+    <!--
+    Before running this example, first run another DPL example that writes to
+    the tmp directory, such as testex-persist-person.
+    -->
+    <target name="testex-persist-dpldump" depends="compile-examples">
+      <java fork="yes" dir="${builddir}" jvm="${jvm}"
+            classname="persist.DplDump">
+        <jvmarg line="${jvmargs}"/>
+        <arg line="-h tmp"/>
+        <classpath refid="class.path"/>
+      </java>
+    </target>
+
+    <target name="testex-persist-customkeyorder" depends="compile-examples">
+      <delete dir="${builddir}/tmp"/>
+      <mkdir dir="${builddir}/tmp"/>
+      <java fork="yes" dir="${builddir}" jvm="${jvm}"
+            classname="persist.CustomKeyOrderExample">
+        <jvmarg line="${jvmargs}"/>
+        <arg line="-h tmp"/>
+        <classpath refid="class.path"/>
+      </java>
+    </target>
+
+    <target name="testex-persist-eventexample" depends="compile-examples">
+      <delete dir="${builddir}/tmp"/>
+      <mkdir dir="${builddir}/tmp"/>
+      <java fork="yes" dir="${builddir}" jvm="${jvm}"
+            classname="persist.EventExample">
+        <jvmarg line="${jvmargs}"/>
+        <arg line="-h tmp"/>
+        <classpath refid="class.path"/>
+      </java>
+    </target>
+
+    <target name="testex-persist-eventexampledpl" depends="compile-examples">
+      <delete dir="${builddir}/tmp"/>
+      <mkdir dir="${builddir}/tmp"/>
+      <java fork="yes" dir="${builddir}" jvm="${jvm}"
+            classname="persist.EventExampleDPL">
+        <jvmarg line="${jvmargs}"/>
+        <arg line="-h tmp"/>
+        <classpath refid="class.path"/>
+      </java>
+    </target>
+
+    <target name="testex-persist-sqlapp" depends="compile-examples">
+      <delete dir="${builddir}/tmp"/>
+      <mkdir dir="${builddir}/tmp"/>
+      <java fork="yes" dir="${builddir}" jvm="${jvm}"
+            classname="persist.sqlapp.SQLApp">
+        <jvmarg line="${jvmargs}"/>
+        <arg line="-h tmp"/>
+        <classpath refid="class.path"/>
+      </java>
+    </target>
+
+    <target name="testex-hello" depends="compile-examples">
+      <delete dir="${builddir}/tmp"/>
+      <mkdir dir="${builddir}/tmp"/>
+      <java fork="yes" dir="${builddir}" jvm="${jvm}"
+            classname="collections.hello.HelloDatabaseWorld">
+        <jvmarg line="${jvmargs}"/>
+        <classpath refid="class.path"/>
+      </java>
+    </target>
+
+    <target name="testex-ship-basic" depends="compile-examples">
+      <delete dir="${builddir}/tmp"/>
+      <mkdir dir="${builddir}/tmp"/>
+      <java fork="yes" dir="${builddir}" jvm="${jvm}"
+            classname="collections.ship.basic.Sample">
+        <jvmarg line="${jvmargs}"/>
+        <classpath refid="class.path"/>
+      </java>
+    </target>
+
+    <target name="testex-ship-index" depends="compile-examples">
+      <delete dir="${builddir}/tmp"/>
+      <mkdir dir="${builddir}/tmp"/>
+      <java fork="yes" dir="${builddir}" jvm="${jvm}"
+            classname="collections.ship.index.Sample">
+        <jvmarg line="${jvmargs}"/>
+        <classpath refid="class.path"/>
+      </java>
+    </target>
+
+    <target name="testex-ship-entity" depends="compile-examples">
+      <delete dir="${builddir}/tmp"/>
+      <mkdir dir="${builddir}/tmp"/>
+      <java fork="yes" dir="${builddir}" jvm="${jvm}"
+            classname="collections.ship.entity.Sample">
+        <classpath refid="class.path"/>
+      </java>
+    </target>
+
+    <target name="testex-ship-tuple" depends="compile-examples">
+      <delete dir="${builddir}/tmp"/>
+      <mkdir dir="${builddir}/tmp"/>
+      <java fork="yes" dir="${builddir}" jvm="${jvm}"
+            classname="collections.ship.tuple.Sample">
+        <jvmarg line="${jvmargs}"/>
+        <classpath refid="class.path"/>
+      </java>
+    </target>
+
+    <target name="testex-ship-sentity" depends="compile-examples">
+      <delete dir="${builddir}/tmp"/>
+      <mkdir dir="${builddir}/tmp"/>
+      <java fork="yes" dir="${builddir}" jvm="${jvm}"
+            classname="collections.ship.sentity.Sample">
+        <jvmarg line="${jvmargs}"/>
+        <classpath refid="class.path"/>
+      </java>
+    </target>
+
+    <target name="testex-ship-marshal" depends="compile-examples">
+      <delete dir="${builddir}/tmp"/>
+      <mkdir dir="${builddir}/tmp"/>
+      <java fork="yes" dir="${builddir}" jvm="${jvm}"
+            classname="collections.ship.marshal.Sample">
+        <jvmarg line="${jvmargs}"/>
+        <classpath refid="class.path"/>
+      </java>
+    </target>
+
+    <target name="testex-ship-factory" depends="compile-examples">
+      <delete dir="${builddir}/tmp"/>
+      <mkdir dir="${builddir}/tmp"/>
+      <java fork="yes" dir="${builddir}" jvm="${jvm}"
+            classname="collections.ship.factory.Sample">
+        <jvmarg line="${jvmargs}"/>
+        <classpath refid="class.path"/>
+      </java>
+    </target>
+        
+    <!-- testex-jejca runs all JCA examples.
+    -->
+    <target name="testex-jejca"
+        depends="testex-jejcasimple"/>
+
+    <target name="testex-jejcasimple" depends="compile-jcaexamples">
+      <java fork="yes" dir="${builddir}" jvm="${jvm}"
+            classname="jca.simple.SimpleClient">
+        <jvmarg line="${jvmargs}"/>
+        <classpath refid="jca.classpath"/>
+	<arg value="${key}"/>
+	<arg value="${data}"/>
+      </java>
+    </target>
+
+    <!-- ============================================================ -->
+    <!-- Internal Use Only: Javadocs for Entire Source Tree           -->
+    <!-- ============================================================ -->
+    <target name="javadoc-all" depends="init">
+        <mkdir dir="${doc.dir}"/>
+        <javadoc packagenames="${packages}"
+                maxmemory="128M"
+                sourcepath="${srcdir}"
+                destdir="${doc.dir}"
+                classpathref="class.path"
+                version="true"
+                nodeprecated="false"
+		windowtitle="${name} Classes"
+                doctitle="${name}"
+                package="true"
+                source="1.5"
+                bottom="Copyright (c) 2004,2008 Oracle.  All rights reserved.">
+
+          <link href="http://java.sun.com/j2se/1.5/docs/api"/>
+        </javadoc>
+    </target>
+
+    <!-- ============================================================ -->
+    <!-- Javadocs for the public api for distribution                 -->
+    <!-- This is a complex process that incorporates M4 processing of -->
+    <!-- documentation template files.                                -->
+    <!-- ============================================================ -->
+
+    <!-- Make sure we update the version in the build properties file -->
+    <path id="dist.classpath">
+        <path refid="class.path"/>
+	<pathelement location="${dist.destdir}"/>
+    </path>
+
+    <target name="update-version" depends="compile-src, compile-dist">
+       <!-- Create an up to date property file for the build -->
+       <java fork="yes" jvm="${jvm}"
+             classname="CreateRELEASEFile"
+	     classpathref="dist.classpath">
+          <jvmarg line="${jvmargs}"/>
+	  <arg value="${build.propertyfile}"/>
+       </java>
+
+       <!-- read it in -->
+       <property file="${build.propertyfile}"/>
+    </target>
+
+    <target name="clean-javadoc" depends="init">
+        <delete dir="${doc.dir}"/>
+    </target>
+
+    <!-- create javadoc -->
+    <target name="javadoc" depends="init, update-version, clean-javadoc">
+
+        <!-- create fresh directories --> 
+        <mkdir dir="${doc.dir}"/>
+        <mkdir dir="${doc.javadir}"/>
+
+        <!-- set the time -->
+        <tstamp>
+	   <format property="release.date" pattern="MMMMMMMM dd, yyyy"/>
+	</tstamp>
+
+	<!-- Copy the main page, images and release notes, to the
+             docs directory, putting in the release version where
+	     appropriate -->
+        <copy overwrite="true" todir="${doc.dir}">
+	   <filterset>
+	       <filter token="RELEASE_VERSION" value="${release.version}"/>
+	       <filter token="RELEASE_NUMERIC_VERSION"
+	               value="${release.numeric.versionc.version}"/>
+	       <filter token="DATE" value="${release.date}"/>
+	   </filterset>
+	   <fileset dir="${docsrc.dir}">
+               <include name="*.html"/>
+               <include name="*.css"/>
+           </fileset>
+        </copy>
+        <copy todir="${doc.dir}/images">
+	    <fileset dir="${docsrc.dir}/images"/>
+	</copy>
+        <copy todir="${doc.dir}/sleepycat">
+	    <fileset dir="${docsrc.dir}/sleepycat"/>
+	</copy>
+
+	<!-- Copy everything for the Getting Started Guide and the
+             Writing Transactional Applications Guide to the docs 
+             directory -->
+        <copy todir="${doc.dir}/GettingStartedGuide">
+	    <fileset dir="${docsrc.dir}/GettingStartedGuide">
+	    </fileset>
+	</copy>
+
+        <copy todir="${doc.dir}/TransactionGettingStarted">
+	    <fileset dir="${docsrc.dir}/TransactionGettingStarted">
+	    </fileset>
+	</copy>
+
+	<!-- Copy everything for the collections tutorial to the docs 
+             directory -->
+        <copy todir="${doc.dir}/collections">
+	    <fileset dir="${docsrc.dir}/collections">
+	    </fileset>
+        </copy>
+	
+	<antcall target="javadoc-src"/>
+    </target>
+
+    <available file="${docsrc.dir}" type="dir" property="UserOrMember"/>
+
+    <target name="copy-doc-materials" if="UserOrMember">
+        <copy overwrite="true" todir="${doclet.src.dir}">
+            <fileset dir="${docsrc.dir}/doclet"/>
+	</copy>
+
+	<copy overwrite="true" todir="${doc.dir}" file="${docsrc.dir}/style.css"/>
+    </target>
+
+    <target name="javadoc-src" depends="init, copy-doc-materials">
+	<antcall target="jar-hidingdoclet"/>
+	
+	<delete dir="${doc.dir}/java"/>
+	<mkdir dir="${doc.dir}/java"/>
+
+        <!-- Run javadoc on public API -->
+	<echo>Javadoc public api</echo>
+        <javadoc  sourcepath="${srcdir}"
+                destdir="${doc.javadir}"
+		version="true"
+                nodeprecated="false"
+		protected="true"
+                source="1.5"
+		use="true"
+		docletpath="docs/HidingDoclet.jar"
+		stylesheetfile="${doc.dir}/style.css"
+		windowtitle="Oracle - Berkeley DB Java Edition API">
+	  <classpath refid="j2ee.classpath"/>
+	  <classpath path="${ant.library.dir}/ant.jar"/>
+	  <arg value="-author"/>
+	  <doclet name="HidingDoclet"/>
+          <group title="Berkeley DB Java Edition Packages">
+              <package name="com.sleepycat.je*"/>
+              <package name="com.sleepycat.je.util"/>
+              <package name="com.sleepycat.je.jmx"/>
+              <package name="com.sleepycat.je.jca.ra"/>
+	  </group>
+          <group title="Berkeley DB Direct Persistence Layer (DPL) Packages">
+              <package name="com.sleepycat.persist"/>
+              <package name="com.sleepycat.persist.model"/>
+              <package name="com.sleepycat.persist.evolve"/>
+              <package name="com.sleepycat.persist.raw"/>
+	  </group>
+          <group title="Berkeley DB Bind and Collections Packages">
+              <package name="com.sleepycat.bind*"/>
+              <package name="com.sleepycat.collections*"/>
+              <package name="com.sleepycat.util*"/>
+	  </group>
+	  <header><![CDATA[<b>Berkeley DB Java Edition</b><br><font size=\"-1\"> version ${release.version}</font>]]>
+	  </header>
+	  <bottom><![CDATA[<font size=1>Copyright (c) 2004,2008 Oracle.  All rights reserved.</font>]]> </bottom> 
+          <package name="com.sleepycat.je"/>
+          <package name="com.sleepycat.je.jmx"/>
+         <!-- We'd like to include the com.sleepycat.je.jca.ra package
+              and use the @hidden tag, but the doclet seems to have some
+              problems there. -->
+          <fileset dir="${srcdir}">
+	        <include name="com/sleepycat/je/jca/ra/JEConnection.java"/>
+	        <include name="com/sleepycat/je/jca/ra/JEConnectionFactory.java"/>	        
+	        <include name="com/sleepycat/je/jca/ra/JEException.java"/>
+          </fileset>
+          <package name="com.sleepycat.je.util"/>
+          <package name="com.sleepycat.collections"/>
+          <package name="com.sleepycat.bind.*"/>
+          <package name="com.sleepycat.persist"/>
+          <package name="com.sleepycat.persist.model"/>
+          <package name="com.sleepycat.persist.evolve"/>
+          <package name="com.sleepycat.persist.raw"/>
+          <package name="com.sleepycat.util"/>
+          <link href="http://java.sun.com/j2se/1.5/docs/api"/>
+          <link href="http://java.sun.com/javase/6/docs/api"/>
+        </javadoc>
+    </target>
+
+    <target name="install">
+    <description>Install JE into destdir</description>
+      <condition property="isWindows">
+         <os family="windows"/>
+      </condition>
+      <property file="dist/build.properties"/>
+      <echo message="Installing JE into ${installdestdir}"/>
+      <fail message="Install manually on Windows" if="isWindows"/>
+      <property name="installdir"
+             value="${installdestdir}/JE.${release.major}.${release.minor}"/>
+      <mkdir dir="${installdir}"/>
+      <mkdir dir="${installdir}/docs"/>
+      <mkdir dir="${installdir}/bin"/>
+      <mkdir dir="${installdir}/lib"/>
+      <copy todir="${installdir}/docs">
+         <fileset dir="docs">
+	   <include name="**/*"/>
+	 </fileset>
+      </copy>
+      <copy todir="${installdir}/lib">
+	 <fileset dir="lib"/>
+      </copy>
+      <copy todir="${installdir}/bin">
+	 <fileset dir="bin"/>
+      </copy>
+    </target>
+
+    <!-- ============================================================ -->
+    <!-- Clover, code coverage. To run with clover                    -->
+    <!--        set the clover.libdir property above.                 -->
+    <!--        make sure the clover.jar and clover.license are in    -->
+    <!--           the ant/lib directory                              -->
+    <!--        change inheritall=true above for the compile-src      -->
+    <!--           target above.                                      -->
+    <!--           and inheritall=false in ant/internal.xml to        -->
+    <!--               inheritall=true                                -->
+    <!--                                                              -->
+    <!--  To run single test and generate report,we should type       -->
+    <!--           ant clover.singletestdone                          -->
+    <!--  and default single test is for no parameters, if you want   -->
+    <!--  to change to other test, you can change the second parameter-->
+    <!--  of the depends attribute of "clover.singletestrun" target   -->
+    <!--                                                              -->
+    <!--  To run all the tests and generate  report,we should type    -->
+    <!--           ant clover.alltestsdone                            -->
+    <!-- ============================================================ -->
+    <target name="init-clover">
+      <delete dir="${clover.tmpdir}"/>
+      <mkdir dir="${clover.tmpdir}"/>
+    </target>
+
+    <target name="clean-clover" depends="init-clover">
+      <delete dir="${clover.tmpdir}"/>
+      <mkdir dir="${clover.tmpdir}"/>
+    </target>
+
+    <target name="clover.setup">
+      <antcall target="clean"/>
+      <antcall target="compile"/>
+      <antcall target="clean-src"/>
+      <taskdef resource="clovertasks"/>
+      <clover-setup initString="${clover.tmpdir}/jecoverage.db"/>
+      <antcall target="compile-src"/>
+    </target>
+
+    <target name="clover.runtest">
+      <taskdef resource="clovertasks"/>
+      <clover-setup initString="${clover.tmpdir}/jecoverage.db"/>
+      <antcall target="test"/>
+    </target>
+
+    <target name="clover.runtest.LruOnly">
+      <echo file="${unittest.srcdir}/je.properties" 
+            message="je.evictor.lruOnly=true"/>
+      <antcall target="clover.runtest"/>
+      <echo file="${unittest.srcdir}/je.properties" message=""/>
+    </target>
+
+    <target name="clover.runtest.SharedLatches">
+      <echo file="${unittest.srcdir}/je.properties" 
+            message="je.env.sharedLatches=true"/>
+      <antcall target="clover.runtest"/>
+      <echo file="${unittest.srcdir}/je.properties" message=""/>
+    </target>
+
+    <target name="clover.runtest.Isolation">
+      <echo file="${unittest.srcdir}/je.properties" 
+            message="je.txn.serializableIsolation=true"/>
+      <antcall target="clover.runtest"/>
+      <echo file="${unittest.srcdir}/je.properties" message=""/>
+    </target>
+
+    <target name="clover.runtest.DirectNIO">
+      <echo file="${unittest.srcdir}/je.properties">
+        je.log.useNIO=true
+        je.log.directNIO=true
+      </echo>
+      <antcall target="clover.runtest"/>
+      <echo file="${unittest.srcdir}/je.properties" message=""/>  
+    </target>
+
+    <target name="clover.runtest.ChunkNIO">
+      <echo file="${unittest.srcdir}/je.properties">
+        je.log.useNIO=true
+        je.log.chunkedNIO=4096
+      </echo>
+      <antcall target="clover.runtest"/>
+      <echo file="${unittest.srcdir}/je.properties" message=""/>
+    </target>
+	
+    <target name="clover.alltestsrun" 
+            depends="clover.setup, clover.runtest, 
+            clover.runtest.DirectNIO, clover.runtest.SharedLatches, 
+            clover.runtest.LruOnly, clover.runtest.Isolation,
+            clover.runtest.ChunkNIO">
+    </target>
+
+    <target name="clover.swing" depends="clover.alltestsrun">
+      <taskdef resource="clovertasks"/>
+      <clover-view/>
+    </target>
+
+    <target name="clover.log" depends="clover.alltestsrun">
+      <taskdef resource="clovertasks"/>
+      <clover-log/>
+    </target>
+
+    <target name="clover.pdf" depends="clover.alltestsrun">
+      <taskdef resource="clovertasks"/>
+      <clover-report>
+      	<current outfile="jecoverage.pdf">
+      	  <format type="pdf"/>
+        </current>
+      </clover-report>
+    </target>
+
+    <target name="clover.alltestsdone" depends="clover.alltestsrun">
+      <taskdef resource="clovertasks"/>
+      <clover-setup initString="${clover.tmpdir}/jecoverage.db"/>
+      <clover-report>
+        <current outfile="clover_html">
+          <format type="html"/>
+        </current>
+      </clover-report>
+    </target>
+
+    <target name="clover.singletestdone" depends="clover.setup,clover.runtest">
+      <taskdef resource="clovertasks"/>
+      <clover-setup initString="${clover.tmpdir}/jecoverage.db"/>
+      <clover-report>
+        <current outfile="clover_html">
+          <format type="html"/>
+        </current>
+      </clover-report>
+    </target>
+
+</project>
diff --git a/dist/build.properties b/dist/build.properties
new file mode 100644
index 0000000000000000000000000000000000000000..45d020d5f8bfda65a4a54527665c105ca7c4aeab
--- /dev/null
+++ b/dist/build.properties
@@ -0,0 +1,4 @@
+release.version=3.3.98
+release.numeric.version=3.3.98
+release.major=3
+release.minor=3
diff --git a/example.properties b/example.properties
new file mode 100644
index 0000000000000000000000000000000000000000..d0b0d0031163951d12c1c10f4db964a47809ff2e
--- /dev/null
+++ b/example.properties
@@ -0,0 +1,5 @@
+The example.properties file is no longer used to document the configuration
+parameters that are used with EnvironmentConfig and EnvironmentMutableConfig.
+Instead, String constants have been added EnvironmentConfig which include that
+documentation.  When calling setConfigParam these constants can be used to
+avoid hard-coding the parameter name.  See EnvironmentConfig for details.
diff --git a/examples/collections/access/AccessExample.java b/examples/collections/access/AccessExample.java
new file mode 100644
index 0000000000000000000000000000000000000000..6b053abaf21d69d0167fcf3e217da90ed80baaa1
--- /dev/null
+++ b/examples/collections/access/AccessExample.java
@@ -0,0 +1,278 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997,2008 Oracle.  All rights reserved.
+ *
+ * $Id: AccessExample.java,v 1.26 2008/05/27 15:30:30 mark Exp $
+ */
+
+package collections.access;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.PrintStream;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.SortedMap;
+
+import com.sleepycat.bind.ByteArrayBinding;
+import com.sleepycat.collections.StoredSortedMap;
+import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+
+/**
+ *  AccesssExample mirrors the functionality of a class by the same name
+ * used to demonstrate the com.sleepycat.je Java API. This version makes
+ * use of the new com.sleepycat.collections.* collections style classes to make
+ * life easier.
+ *
+ *@author     Gregory Burd <gburd@sleepycat.com>
+ *@created    October 22, 2002
+ */
+public class AccessExample
+         implements Runnable {
+
+    // Class Variables of AccessExample class
+    private static boolean create = true;
+    private static final int EXIT_FAILURE = 1;
+
+    public static void usage() {
+
+	System.out.println("usage: java " + AccessExample.class.getName() +
+            " [-r] [database]\n");
+	System.exit(EXIT_FAILURE);
+    }
+
+    /**
+     *  The main program for the AccessExample class
+     *
+     *@param  argv  The command line arguments
+     */
+    public static void main(String[] argv) {
+
+	boolean removeExistingDatabase = false;
+	String databaseName = "access.db";
+
+	for (int i = 0; i < argv.length; i++) {
+	    if (argv[i].equals("-r")) {
+		removeExistingDatabase = true;
+	    } else if (argv[i].equals("-?")) {
+		usage();
+	    } else if (argv[i].startsWith("-")) {
+		usage();
+	    } else {
+		if ((argv.length - i) != 1)
+		    usage();
+		databaseName = argv[i];
+		break;
+	    }
+	}
+
+        try {
+
+            EnvironmentConfig envConfig = new EnvironmentConfig();
+            envConfig.setTransactional(true);
+            if (create) {
+                envConfig.setAllowCreate(true);
+            }
+            Environment env = new Environment(new File("."), envConfig);
+	    // Remove the previous database.
+	    if (removeExistingDatabase) {
+                env.removeDatabase(null, databaseName);
+            }
+
+            // create the app and run it
+            AccessExample app = new AccessExample(env, databaseName);
+            app.run();
+        } catch (DatabaseException e) {
+            e.printStackTrace();
+            System.exit(1);
+        } catch (FileNotFoundException e) {
+            e.printStackTrace();
+            System.exit(1);
+        } catch (Exception e) {
+            e.printStackTrace();
+            System.exit(1);
+        }
+        System.exit(0);
+    }
+
+
+    private Database db;
+    private SortedMap<byte[], byte[]> map;
+    private Environment env;
+
+
+    /**
+     *  Constructor for the AccessExample object
+     *
+     *@param  env            Description of the Parameter
+     *@exception  Exception  Description of the Exception
+     */
+    public AccessExample(Environment env, String databaseName)
+	throws Exception {
+
+        this.env = env;
+
+        //
+        // Lets mimic the db.AccessExample 100%
+        // and use plain old byte arrays to store the key and data strings.
+        //
+        ByteArrayBinding keyBinding = new ByteArrayBinding();
+        ByteArrayBinding dataBinding = new ByteArrayBinding();
+
+        //
+        // Open a data store.
+        //
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        if (create) {
+            dbConfig.setAllowCreate(true);
+        }
+        this.db = env.openDatabase(null, databaseName, dbConfig);
+
+        //
+        // Now create a collection style map view of the data store
+        // so that it is easy to work with the data in the database.
+        //
+        this.map = new StoredSortedMap<byte[], byte[]>
+            (db, keyBinding, dataBinding, true);
+    }
+
+
+    /**
+     *  Main processing method for the AccessExample object
+     */
+    public void run() {
+        //
+        // Insert records into a Stored Sorted Map DatabaseImpl, where
+        // the key is the user input and the data is the user input
+        // in reverse order.
+        //
+        final InputStreamReader reader = new InputStreamReader(System.in);
+
+        for (; ; ) {
+            final String line = askForLine(reader, System.out, "input> ");
+            if (line == null) {
+                break;
+            }
+
+            final String reversed =
+		(new StringBuffer(line)).reverse().toString();
+
+            log("adding: \"" +
+		line + "\" : \"" +
+		reversed + "\"");
+
+            // Do the work to add the key/data to the HashMap here.
+            TransactionRunner tr = new TransactionRunner(env);
+            try {
+                tr.run(
+		       new TransactionWorker() {
+			   public void doWork() {
+                               try {
+                                   if (!map.containsKey(line.getBytes("UTF-8")))
+                                       map.put(line.getBytes("UTF-8"),
+                                               reversed.getBytes("UTF-8"));
+                                   else
+                                       System.out.println("Key " + line +
+                                                          " already exists.");
+                               } catch (Exception e) {
+                                   System.err.println("doWork: " + e);
+                               }
+			   }
+		       });
+            } catch (com.sleepycat.je.DatabaseException e) {
+                System.err.println("AccessExample: " + e);
+                System.exit(1);
+            } catch (java.lang.Exception e) {
+                System.err.println("AccessExample: " + e);
+                System.exit(1);
+            }
+        }
+        System.out.println("");
+
+        // Do the work to traverse and print the HashMap key/data
+        // pairs here get iterator over map entries.
+        Iterator<Map.Entry<byte[], byte[]>> iter = map.entrySet().iterator();
+        System.out.println("Reading data");
+        while (iter.hasNext()) {
+            Map.Entry<byte[], byte[]> entry = iter.next();
+            log("found \"" +
+                new String(entry.getKey()) +
+                "\" key with data \"" +
+                new String(entry.getValue()) + "\"");
+        }
+    }
+
+
+    /**
+     *  Prompts for a line, and keeps prompting until a non blank line is
+     *  returned. Returns null on error.
+     *
+     *@param  reader  stream from which to read user input
+     *@param  out     stream on which to prompt for user input
+     *@param  prompt  prompt to use to solicit input
+     *@return         the string supplied by the user
+     */
+    String askForLine(InputStreamReader reader, PrintStream out,
+                      String prompt) {
+
+        String result = "";
+        while (result != null && result.length() == 0) {
+            out.print(prompt);
+            out.flush();
+            result = getLine(reader);
+        }
+        return result;
+    }
+
+
+    /**
+     *  Read a single line. Gets the line attribute of the AccessExample object
+     *  Not terribly efficient, but does the job. Works for reading a line from
+     *  stdin or a file.
+     *
+     *@param  reader  stream from which to read the line
+     *@return         either a String or null on EOF, if EOF appears in the
+     *      middle of a line, returns that line, then null on next call.
+     */
+    String getLine(InputStreamReader reader) {
+
+        StringBuffer b = new StringBuffer();
+        int c;
+        try {
+            while ((c = reader.read()) != -1 && c != '\n') {
+                if (c != '\r') {
+                    b.append((char) c);
+                }
+            }
+        } catch (IOException ioe) {
+            c = -1;
+        }
+
+        if (c == -1 && b.length() == 0) {
+            return null;
+        } else {
+            return b.toString();
+        }
+    }
+
+
+    /**
+     *  A simple log method.
+     *
+     *@param  s  The string to be logged.
+     */
+    private void log(String s) {
+
+        System.out.println(s);
+        System.out.flush();
+    }
+}
diff --git a/examples/collections/hello/HelloDatabaseWorld.java b/examples/collections/hello/HelloDatabaseWorld.java
new file mode 100644
index 0000000000000000000000000000000000000000..8c7184ad8a58073ebcbc4cbcac881c4e4ae8f0de
--- /dev/null
+++ b/examples/collections/hello/HelloDatabaseWorld.java
@@ -0,0 +1,154 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: HelloDatabaseWorld.java,v 1.27.2.2 2010/01/04 15:30:24 cwl Exp $
+ */
+
+package collections.hello;
+
+import java.io.File;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.SortedMap;
+
+import com.sleepycat.bind.serial.ClassCatalog;
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.collections.StoredSortedMap;
+import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+
+/**
+ * @author Mark Hayes
+ */
+public class HelloDatabaseWorld implements TransactionWorker {
+
+    private static final String[] INT_NAMES = {
+        "Hello", "Database", "World",
+    };
+    private static boolean create = true;
+
+    private Environment env;
+    private ClassCatalog catalog;
+    private Database db;
+    private SortedMap<Integer, String> map;
+
+    /** Creates the environment and runs a transaction */
+    public static void main(String[] argv)
+        throws Exception {
+
+        String dir = "./tmp";
+
+        // environment is transactional
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setTransactional(true);
+        if (create) {
+            envConfig.setAllowCreate(true);
+        }
+        Environment env = new Environment(new File(dir), envConfig);
+
+        // create the application and run a transaction
+        HelloDatabaseWorld worker = new HelloDatabaseWorld(env);
+        TransactionRunner runner = new TransactionRunner(env);
+        try {
+            // open and access the database within a transaction
+            runner.run(worker);
+        } finally {
+            // close the database outside the transaction
+            worker.close();
+        }
+    }
+
+    /** Creates the database for this application */
+    private HelloDatabaseWorld(Environment env)
+        throws Exception {
+
+        this.env = env;
+        open();
+    }
+
+    /** Performs work within a transaction. */
+    public void doWork()
+        throws Exception {
+
+        writeAndRead();
+    }
+
+    /** Opens the database and creates the Map. */
+    private void open()
+        throws Exception {
+
+        // use a generic database configuration
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        if (create) {
+            dbConfig.setAllowCreate(true);
+        }
+
+        // catalog is needed for serial bindings (java serialization)
+        Database catalogDb = env.openDatabase(null, "catalog", dbConfig);
+        catalog = new StoredClassCatalog(catalogDb);
+
+        // use Integer tuple binding for key entries
+        TupleBinding<Integer> keyBinding =
+            TupleBinding.getPrimitiveBinding(Integer.class);
+
+        // use String serial binding for data entries
+        SerialBinding<String> dataBinding =
+            new SerialBinding<String>(catalog, String.class);
+
+        this.db = env.openDatabase(null, "helloworld", dbConfig);
+
+        // create a map view of the database
+        this.map = new StoredSortedMap<Integer, String>
+            (db, keyBinding, dataBinding, true);
+    }
+
+    /** Closes the database. */
+    private void close()
+        throws Exception {
+
+        if (catalog != null) {
+            catalog.close();
+            catalog = null;
+        }
+        if (db != null) {
+            db.close();
+            db = null;
+        }
+        if (env != null) {
+            env.close();
+            env = null;
+        }
+    }
+
+    /** Writes and reads the database via the Map. */
+    private void writeAndRead() {
+
+        // check for existing data
+        Integer key = new Integer(0);
+        String val = map.get(key);
+        if (val == null) {
+            System.out.println("Writing data");
+            // write in reverse order to show that keys are sorted
+            for (int i = INT_NAMES.length - 1; i >= 0; i -= 1) {
+                map.put(new Integer(i), INT_NAMES[i]);
+            }
+        }
+        // get iterator over map entries
+        Iterator<Map.Entry<Integer, String>> iter = map.entrySet().iterator();
+        System.out.println("Reading data");
+        while (iter.hasNext()) {
+            Map.Entry<Integer, String> entry = iter.next();
+            System.out.println(entry.getKey().toString() + ' ' +
+                               entry.getValue());
+        }
+    }
+}
diff --git a/examples/collections/ship/basic/PartData.java b/examples/collections/ship/basic/PartData.java
new file mode 100644
index 0000000000000000000000000000000000000000..13cecaf7e95089737a5bd935ad3a59298b806c31
--- /dev/null
+++ b/examples/collections/ship/basic/PartData.java
@@ -0,0 +1,64 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PartData.java,v 1.15.2.2 2010/01/04 15:30:24 cwl Exp $
+ */
+
+package collections.ship.basic;
+
+import java.io.Serializable;
+
+/**
+ * A PartData serves as the data in the key/data pair for a part entity.
+ *
+ * <p> In this sample, PartData is used both as the storage entry for the
+ * data as well as the object binding to the data.  Because it is used
+ * directly as storage data using serial format, it must be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class PartData implements Serializable {
+
+    private String name;
+    private String color;
+    private Weight weight;
+    private String city;
+
+    public PartData(String name, String color, Weight weight, String city) {
+
+        this.name = name;
+        this.color = color;
+        this.weight = weight;
+        this.city = city;
+    }
+
+    public final String getName() {
+
+        return name;
+    }
+
+    public final String getColor() {
+
+        return color;
+    }
+
+    public final Weight getWeight() {
+
+        return weight;
+    }
+
+    public final String getCity() {
+
+        return city;
+    }
+
+    public String toString() {
+
+        return "[PartData: name=" + name +
+	    " color=" + color +
+	    " weight=" + weight +
+	    " city=" + city + ']';
+    }
+}
diff --git a/examples/collections/ship/basic/PartKey.java b/examples/collections/ship/basic/PartKey.java
new file mode 100644
index 0000000000000000000000000000000000000000..e8e88edf64b9aa899b83487cfb1d9d3c9c7d1e9b
--- /dev/null
+++ b/examples/collections/ship/basic/PartKey.java
@@ -0,0 +1,40 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PartKey.java,v 1.13.2.2 2010/01/04 15:30:24 cwl Exp $
+ */
+
+package collections.ship.basic;
+
+import java.io.Serializable;
+
+/**
+ * A PartKey serves as the key in the key/data pair for a part entity.
+ *
+ * <p> In this sample, PartKey is used both as the storage entry for the key as
+ * well as the object binding to the key.  Because it is used directly as
+ * storage data using serial format, it must be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class PartKey implements Serializable {
+
+    private String number;
+
+    public PartKey(String number) {
+
+        this.number = number;
+    }
+
+    public final String getNumber() {
+
+        return number;
+    }
+
+    public String toString() {
+
+        return "[PartKey: number=" + number + ']';
+    }
+}
diff --git a/examples/collections/ship/basic/Sample.java b/examples/collections/ship/basic/Sample.java
new file mode 100644
index 0000000000000000000000000000000000000000..5f1c64782899ae1d3dac95bc65a1dbfafb6ed807
--- /dev/null
+++ b/examples/collections/ship/basic/Sample.java
@@ -0,0 +1,254 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Sample.java,v 1.19.2.2 2010/01/04 15:30:24 cwl Exp $
+ */
+
+package collections.ship.basic;
+
+import java.io.FileNotFoundException;
+import java.util.Iterator;
+import java.util.Map;
+
+import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+import com.sleepycat.je.DatabaseException;
+
+/**
+ * Sample is the main entry point for the sample program and may be run as
+ * follows:
+ *
+ * <pre>
+ * java collections.ship.basic.Sample
+ *      [-h <home-directory> ]
+ * </pre>
+ *
+ * <p> The default for the home directory is ./tmp -- the tmp subdirectory of
+ * the current directory where the sample is run. The home directory must exist
+ * before running the sample.  To recreate the sample database from scratch,
+ * delete all files in the home directory before running the sample.  </p>
+ *
+ * @author Mark Hayes
+ */
+public class Sample {
+
+    private SampleDatabase db;
+    private SampleViews views;
+
+    /**
+     * Run the sample program.
+     */
+    public static void main(String[] args) {
+
+        System.out.println("\nRunning sample: " + Sample.class);
+
+        // Parse the command line arguments.
+        //
+        String homeDir = "./tmp";
+        for (int i = 0; i < args.length; i += 1) {
+            if (args[i].equals("-h") && i < args.length - 1) {
+                i += 1;
+                homeDir = args[i];
+            } else {
+                System.err.println("Usage:\n java " + Sample.class.getName() +
+				   "\n  [-h <home-directory>]");
+                System.exit(2);
+            }
+        }
+
+        // Run the sample.
+        //
+        Sample sample = null;
+        try {
+            sample = new Sample(homeDir);
+            sample.run();
+        } catch (Exception e) {
+            // If an exception reaches this point, the last transaction did not
+            // complete.  If the exception is RunRecoveryException, follow
+            // the Berkeley DB recovery procedures before running again.
+            e.printStackTrace();
+        } finally {
+            if (sample != null) {
+                try {
+                    // Always attempt to close the database cleanly.
+                    sample.close();
+                } catch (Exception e) {
+                    System.err.println("Exception during database close:");
+                    e.printStackTrace();
+                }
+            }
+        }
+    }
+
+    /**
+     * Open the database and views.
+     */
+    private Sample(String homeDir)
+        throws DatabaseException, FileNotFoundException {
+
+        db = new SampleDatabase(homeDir);
+        views = new SampleViews(db);
+    }
+
+    /**
+     * Close the database cleanly.
+     */
+    private void close()
+        throws DatabaseException {
+
+        db.close();
+    }
+
+    /**
+     * Run two transactions to populate and print the database.  A
+     * TransactionRunner is used to ensure consistent handling of transactions,
+     * including deadlock retries.  But the best transaction handling mechanism
+     * to use depends on the application.
+     */
+    private void run()
+        throws Exception {
+
+        TransactionRunner runner = new TransactionRunner(db.getEnvironment());
+        runner.run(new PopulateDatabase());
+        runner.run(new PrintDatabase());
+    }
+
+    /**
+     * Populate the database in a single transaction.
+     */
+    private class PopulateDatabase implements TransactionWorker {
+
+        public void doWork()
+            throws Exception {
+            addSuppliers();
+            addParts();
+            addShipments();
+        }
+    }
+
+    /**
+     * Print the database in a single transaction.  All entities are printed.
+     */
+    private class PrintDatabase implements TransactionWorker {
+
+        public void doWork()
+            throws Exception {
+            printEntries("Parts",
+			 views.getPartEntrySet().iterator());
+            printEntries("Suppliers",
+			 views.getSupplierEntrySet().iterator());
+            printEntries("Shipments",
+			 views.getShipmentEntrySet().iterator());
+        }
+    }
+
+    /**
+     * Populate the part entities in the database.  If the part map is not
+     * empty, assume that this has already been done.
+     */
+    private void addParts() {
+
+        Map parts = views.getPartMap();
+        if (parts.isEmpty()) {
+            System.out.println("Adding Parts");
+            parts.put(new PartKey("P1"),
+                      new PartData("Nut", "Red",
+                                    new Weight(12.0, Weight.GRAMS),
+                                    "London"));
+            parts.put(new PartKey("P2"),
+                      new PartData("Bolt", "Green",
+                                    new Weight(17.0, Weight.GRAMS),
+                                    "Paris"));
+            parts.put(new PartKey("P3"),
+                      new PartData("Screw", "Blue",
+                                    new Weight(17.0, Weight.GRAMS),
+                                    "Rome"));
+            parts.put(new PartKey("P4"),
+                      new PartData("Screw", "Red",
+                                    new Weight(14.0, Weight.GRAMS),
+                                    "London"));
+            parts.put(new PartKey("P5"),
+                      new PartData("Cam", "Blue",
+                                    new Weight(12.0, Weight.GRAMS),
+                                    "Paris"));
+            parts.put(new PartKey("P6"),
+                      new PartData("Cog", "Red",
+                                    new Weight(19.0, Weight.GRAMS),
+                                    "London"));
+        }
+    }
+
+    /**
+     * Populate the supplier entities in the database.  If the supplier map is
+     * not empty, assume that this has already been done.
+     */
+    private void addSuppliers() {
+
+        Map suppliers = views.getSupplierMap();
+        if (suppliers.isEmpty()) {
+            System.out.println("Adding Suppliers");
+            suppliers.put(new SupplierKey("S1"),
+                          new SupplierData("Smith", 20, "London"));
+            suppliers.put(new SupplierKey("S2"),
+                          new SupplierData("Jones", 10, "Paris"));
+            suppliers.put(new SupplierKey("S3"),
+                          new SupplierData("Blake", 30, "Paris"));
+            suppliers.put(new SupplierKey("S4"),
+                          new SupplierData("Clark", 20, "London"));
+            suppliers.put(new SupplierKey("S5"),
+                          new SupplierData("Adams", 30, "Athens"));
+        }
+    }
+
+    /**
+     * Populate the shipment entities in the database.  If the shipment map
+     * is not empty, assume that this has already been done.
+     */
+    private void addShipments() {
+
+        Map shipments = views.getShipmentMap();
+        if (shipments.isEmpty()) {
+            System.out.println("Adding Shipments");
+            shipments.put(new ShipmentKey("P1", "S1"),
+                          new ShipmentData(300));
+            shipments.put(new ShipmentKey("P2", "S1"),
+                          new ShipmentData(200));
+            shipments.put(new ShipmentKey("P3", "S1"),
+                          new ShipmentData(400));
+            shipments.put(new ShipmentKey("P4", "S1"),
+                          new ShipmentData(200));
+            shipments.put(new ShipmentKey("P5", "S1"),
+                          new ShipmentData(100));
+            shipments.put(new ShipmentKey("P6", "S1"),
+                          new ShipmentData(100));
+            shipments.put(new ShipmentKey("P1", "S2"),
+                          new ShipmentData(300));
+            shipments.put(new ShipmentKey("P2", "S2"),
+                          new ShipmentData(400));
+            shipments.put(new ShipmentKey("P2", "S3"),
+                          new ShipmentData(200));
+            shipments.put(new ShipmentKey("P2", "S4"),
+                          new ShipmentData(200));
+            shipments.put(new ShipmentKey("P4", "S4"),
+                          new ShipmentData(300));
+            shipments.put(new ShipmentKey("P5", "S4"),
+                          new ShipmentData(400));
+        }
+    }
+
+    /**
+     * Print the key/value objects returned by an iterator of Map.Entry
+     * objects.
+     */
+    private void printEntries(String label, Iterator iterator) {
+
+        System.out.println("\n--- " + label + " ---");
+        while (iterator.hasNext()) {
+            Map.Entry entry = (Map.Entry) iterator.next();
+            System.out.println(entry.getKey().toString());
+            System.out.println(entry.getValue().toString());
+        }
+    }
+}
diff --git a/examples/collections/ship/basic/SampleDatabase.java b/examples/collections/ship/basic/SampleDatabase.java
new file mode 100644
index 0000000000000000000000000000000000000000..a2fa8e55b54ba43c1422c5dee755de4006efff87
--- /dev/null
+++ b/examples/collections/ship/basic/SampleDatabase.java
@@ -0,0 +1,129 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SampleDatabase.java,v 1.26.2.2 2010/01/04 15:30:24 cwl Exp $
+ */
+
+package collections.ship.basic;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+
+/**
+ * SampleDatabase defines the storage containers, indices and foreign keys
+ * for the sample database.
+ *
+ * @author Mark Hayes
+ */
+public class SampleDatabase {
+
+    private static final String CLASS_CATALOG = "java_class_catalog";
+    private static final String SUPPLIER_STORE = "supplier_store";
+    private static final String PART_STORE = "part_store";
+    private static final String SHIPMENT_STORE = "shipment_store";
+
+    private Environment env;
+    private Database partDb;
+    private Database supplierDb;
+    private Database shipmentDb;
+    private StoredClassCatalog javaCatalog;
+
+    /**
+     * Open all storage containers, indices, and catalogs.
+     */
+    public SampleDatabase(String homeDirectory)
+        throws DatabaseException, FileNotFoundException {
+
+        // Open the Berkeley DB environment in transactional mode.
+        //
+        System.out.println("Opening environment in: " + homeDirectory);
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+        env = new Environment(new File(homeDirectory), envConfig);
+
+        // Set the Berkeley DB config for opening all stores.
+        //
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+
+        // Create the Serial class catalog.  This holds the serialized class
+        // format for all database records of serial format.
+        //
+        Database catalogDb = env.openDatabase(null, CLASS_CATALOG, dbConfig);
+        javaCatalog = new StoredClassCatalog(catalogDb);
+
+        // Open the Berkeley DB database for the part, supplier and shipment
+        // stores.  The stores are opened with no duplicate keys allowed.
+        //
+        partDb = env.openDatabase(null, PART_STORE, dbConfig);
+
+        supplierDb = env.openDatabase(null, SUPPLIER_STORE, dbConfig);
+
+        shipmentDb = env.openDatabase(null, SHIPMENT_STORE, dbConfig);
+    }
+
+    /**
+     * Return the storage environment for the database.
+     */
+    public final Environment getEnvironment() {
+
+        return env;
+    }
+
+    /**
+     * Return the class catalog.
+     */
+    public final StoredClassCatalog getClassCatalog() {
+
+        return javaCatalog;
+    }
+
+    /**
+     * Return the part storage container.
+     */
+    public final Database getPartDatabase() {
+
+        return partDb;
+    }
+
+    /**
+     * Return the supplier storage container.
+     */
+    public final Database getSupplierDatabase() {
+
+        return supplierDb;
+    }
+
+    /**
+     * Return the shipment storage container.
+     */
+    public final Database getShipmentDatabase() {
+
+        return shipmentDb;
+    }
+
+    /**
+     * Close all databases and the environment.
+     */
+    public void close()
+        throws DatabaseException {
+
+        partDb.close();
+        supplierDb.close();
+        shipmentDb.close();
+        // And don't forget to close the catalog and the environment.
+        javaCatalog.close();
+        env.close();
+    }
+}
diff --git a/examples/collections/ship/basic/SampleViews.java b/examples/collections/ship/basic/SampleViews.java
new file mode 100644
index 0000000000000000000000000000000000000000..1d53225d1d888e7dddedf4bebe9e74cbfa993dd5
--- /dev/null
+++ b/examples/collections/ship/basic/SampleViews.java
@@ -0,0 +1,122 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SampleViews.java,v 1.16.2.2 2010/01/04 15:30:24 cwl Exp $
+ */
+
+package collections.ship.basic;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.serial.ClassCatalog;
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.collections.StoredEntrySet;
+import com.sleepycat.collections.StoredMap;
+
+/**
+ * SampleViews defines the data bindings and collection views for the sample
+ * database.
+ *
+ * @author Mark Hayes
+ */
+public class SampleViews {
+
+    private StoredMap partMap;
+    private StoredMap supplierMap;
+    private StoredMap shipmentMap;
+
+    /**
+     * Create the data bindings and collection views.
+     */
+    public SampleViews(SampleDatabase db) {
+
+        // In this sample, the stored key and data entries are used directly
+        // rather than mapping them to separate objects. Therefore, no binding
+        // classes are defined here and the SerialBinding class is used.
+        //
+        ClassCatalog catalog = db.getClassCatalog();
+        EntryBinding partKeyBinding =
+            new SerialBinding(catalog, PartKey.class);
+        EntryBinding partDataBinding =
+            new SerialBinding(catalog, PartData.class);
+        EntryBinding supplierKeyBinding =
+            new SerialBinding(catalog, SupplierKey.class);
+        EntryBinding supplierDataBinding =
+            new SerialBinding(catalog, SupplierData.class);
+        EntryBinding shipmentKeyBinding =
+            new SerialBinding(catalog, ShipmentKey.class);
+        EntryBinding shipmentDataBinding =
+            new SerialBinding(catalog, ShipmentData.class);
+
+        // Create map views for all stores and indices.
+        // StoredSortedMap is not used since the stores and indices are
+        // ordered by serialized key objects, which do not provide a very
+        // useful ordering.
+        //
+        partMap =
+            new StoredMap(db.getPartDatabase(),
+                          partKeyBinding, partDataBinding, true);
+        supplierMap =
+            new StoredMap(db.getSupplierDatabase(),
+                          supplierKeyBinding, supplierDataBinding, true);
+        shipmentMap =
+            new StoredMap(db.getShipmentDatabase(),
+                          shipmentKeyBinding, shipmentDataBinding, true);
+    }
+
+    // The views returned below can be accessed using the java.util.Map or
+    // java.util.Set interfaces, or using the StoredMap and StoredEntrySet
+    // classes, which provide additional methods.  The entry sets could be
+    // obtained directly from the Map.entrySet() method, but convenience
+    // methods are provided here to return them in order to avoid down-casting
+    // elsewhere.
+
+    /**
+     * Return a map view of the part storage container.
+     */
+    public final StoredMap getPartMap() {
+
+        return partMap;
+    }
+
+    /**
+     * Return a map view of the supplier storage container.
+     */
+    public final StoredMap getSupplierMap() {
+
+        return supplierMap;
+    }
+
+    /**
+     * Return a map view of the shipment storage container.
+     */
+    public final StoredMap getShipmentMap() {
+
+        return shipmentMap;
+    }
+
+    /**
+     * Return an entry set view of the part storage container.
+     */
+    public final StoredEntrySet getPartEntrySet() {
+
+        return (StoredEntrySet) partMap.entrySet();
+    }
+
+    /**
+     * Return an entry set view of the supplier storage container.
+     */
+    public final StoredEntrySet getSupplierEntrySet() {
+
+        return (StoredEntrySet) supplierMap.entrySet();
+    }
+
+    /**
+     * Return an entry set view of the shipment storage container.
+     */
+    public final StoredEntrySet getShipmentEntrySet() {
+
+        return (StoredEntrySet) shipmentMap.entrySet();
+    }
+}
diff --git a/examples/collections/ship/basic/ShipmentData.java b/examples/collections/ship/basic/ShipmentData.java
new file mode 100644
index 0000000000000000000000000000000000000000..a6436f17c33dfa8b886e95037fe0f13d5af43b38
--- /dev/null
+++ b/examples/collections/ship/basic/ShipmentData.java
@@ -0,0 +1,41 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ShipmentData.java,v 1.14.2.2 2010/01/04 15:30:24 cwl Exp $
+ */
+
+package collections.ship.basic;
+
+import java.io.Serializable;
+
+/**
+ * A ShipmentData serves as the data in the key/data pair for a shipment
+ * entity.
+ *
+ * <p> In this sample, ShipmentData is used both as the storage entry for the
+ * data as well as the object binding to the data.  Because it is used
+ * directly as storage data using serial format, it must be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class ShipmentData implements Serializable {
+
+    private int quantity;
+
+    public ShipmentData(int quantity) {
+
+        this.quantity = quantity;
+    }
+
+    public final int getQuantity() {
+
+        return quantity;
+    }
+
+    public String toString() {
+
+        return "[ShipmentData: quantity=" + quantity + ']';
+    }
+}
diff --git a/examples/collections/ship/basic/ShipmentKey.java b/examples/collections/ship/basic/ShipmentKey.java
new file mode 100644
index 0000000000000000000000000000000000000000..47e6887c878b1008c5e5597f621f3aac993138e8
--- /dev/null
+++ b/examples/collections/ship/basic/ShipmentKey.java
@@ -0,0 +1,48 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ShipmentKey.java,v 1.14.2.2 2010/01/04 15:30:24 cwl Exp $
+ */
+
+package collections.ship.basic;
+
+import java.io.Serializable;
+
+/**
+ * A ShipmentKey serves as the key in the key/data pair for a shipment entity.
+ *
+ * <p> In this sample, ShipmentKey is used both as the storage entry for the
+ * key as well as the object binding to the key.  Because it is used directly
+ * as storage data using serial format, it must be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class ShipmentKey implements Serializable {
+
+    private String partNumber;
+    private String supplierNumber;
+
+    public ShipmentKey(String partNumber, String supplierNumber) {
+
+        this.partNumber = partNumber;
+        this.supplierNumber = supplierNumber;
+    }
+
+    public final String getPartNumber() {
+
+        return partNumber;
+    }
+
+    public final String getSupplierNumber() {
+
+        return supplierNumber;
+    }
+
+    public String toString() {
+
+        return "[ShipmentKey: supplier=" + supplierNumber +
+	    " part=" + partNumber + ']';
+    }
+}
diff --git a/examples/collections/ship/basic/SupplierData.java b/examples/collections/ship/basic/SupplierData.java
new file mode 100644
index 0000000000000000000000000000000000000000..7051737b35c88bf6a691e98fc55b3785a9c7d410
--- /dev/null
+++ b/examples/collections/ship/basic/SupplierData.java
@@ -0,0 +1,57 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SupplierData.java,v 1.15.2.2 2010/01/04 15:30:24 cwl Exp $
+ */
+
+package collections.ship.basic;
+
+import java.io.Serializable;
+
+/**
+ * A SupplierData serves as the data in the key/data pair for a supplier
+ * entity.
+ *
+ * <p> In this sample, SupplierData is used both as the storage entry for the
+ * data as well as the object binding to the data.  Because it is used
+ * directly as storage data using serial format, it must be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class SupplierData implements Serializable {
+
+    private String name;
+    private int status;
+    private String city;
+
+    public SupplierData(String name, int status, String city) {
+
+        this.name = name;
+        this.status = status;
+        this.city = city;
+    }
+
+    public final String getName() {
+
+        return name;
+    }
+
+    public final int getStatus() {
+
+        return status;
+    }
+
+    public final String getCity() {
+
+        return city;
+    }
+
+    public String toString() {
+
+        return "[SupplierData: name=" + name +
+	    " status=" + status +
+	    " city=" + city + ']';
+    }
+}
diff --git a/examples/collections/ship/basic/SupplierKey.java b/examples/collections/ship/basic/SupplierKey.java
new file mode 100644
index 0000000000000000000000000000000000000000..1479a95522a1b3f6350ca4b941c1ba6c9aaeae5a
--- /dev/null
+++ b/examples/collections/ship/basic/SupplierKey.java
@@ -0,0 +1,40 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SupplierKey.java,v 1.14.2.2 2010/01/04 15:30:24 cwl Exp $
+ */
+
+package collections.ship.basic;
+
+import java.io.Serializable;
+
+/**
+ * A SupplierKey serves as the key in the key/data pair for a supplier entity.
+ *
+ * <p>In this sample, SupplierKey is used both as the storage entry for the key
+ * as well as the object binding to the key.  Because it is used directly as
+ * storage data using serial format, it must be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class SupplierKey implements Serializable {
+
+    private String number;
+
+    public SupplierKey(String number) {
+
+        this.number = number;
+    }
+
+    public final String getNumber() {
+
+        return number;
+    }
+
+    public String toString() {
+
+        return "[SupplierKey: number=" + number + ']';
+    }
+}
diff --git a/examples/collections/ship/basic/Weight.java b/examples/collections/ship/basic/Weight.java
new file mode 100644
index 0000000000000000000000000000000000000000..ae207a1de223dea2fe668b7b2e264b8b6cf1f852
--- /dev/null
+++ b/examples/collections/ship/basic/Weight.java
@@ -0,0 +1,49 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Weight.java,v 1.11.2.2 2010/01/04 15:30:24 cwl Exp $
+ */
+
+package collections.ship.basic;
+
+import java.io.Serializable;
+
+/**
+ * Weight represents a weight amount and unit of measure.
+ *
+ * <p> In this sample, Weight is embedded in part data values which are stored
+ * as Serial serialized objects; therefore Weight must be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class Weight implements Serializable {
+
+    public final static String GRAMS = "grams";
+    public final static String OUNCES = "ounces";
+
+    private double amount;
+    private String units;
+
+    public Weight(double amount, String units) {
+
+        this.amount = amount;
+        this.units = units;
+    }
+
+    public final double getAmount() {
+
+        return amount;
+    }
+
+    public final String getUnits() {
+
+        return units;
+    }
+
+    public String toString() {
+
+        return "[" + amount + ' ' + units + ']';
+    }
+}
diff --git a/examples/collections/ship/entity/Part.java b/examples/collections/ship/entity/Part.java
new file mode 100644
index 0000000000000000000000000000000000000000..95c46cf3bcd3aa738427cb0c5d3ecf14823f2833
--- /dev/null
+++ b/examples/collections/ship/entity/Part.java
@@ -0,0 +1,72 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Part.java,v 1.13.2.2 2010/01/04 15:30:24 cwl Exp $
+ */
+
+package collections.ship.entity;
+
+/**
+ * A Part represents the combined key/data pair for a part entity.
+ *
+ * <p>In this sample, Part is created from the stored key/data entry using a
+ * SerialSerialBinding.  See {@link SampleViews.PartBinding} for details.
+ * Since this class is not used directly for data storage, it does not need to
+ * be Serializable.</p>
+ *
+ * @author Mark Hayes
+ */
+public class Part {
+
+    private String number;
+    private String name;
+    private String color;
+    private Weight weight;
+    private String city;
+
+    public Part(String number, String name, String color, Weight weight,
+                String city) {
+
+        this.number = number;
+        this.name = name;
+        this.color = color;
+        this.weight = weight;
+        this.city = city;
+    }
+
+    public final String getNumber() {
+
+        return number;
+    }
+
+    public final String getName() {
+
+        return name;
+    }
+
+    public final String getColor() {
+
+        return color;
+    }
+
+    public final Weight getWeight() {
+
+        return weight;
+    }
+
+    public final String getCity() {
+
+        return city;
+    }
+
+    public String toString() {
+
+        return "[Part: number=" + number +
+               " name=" + name +
+               " color=" + color +
+               " weight=" + weight +
+               " city=" + city + ']';
+    }
+}
diff --git a/examples/collections/ship/entity/PartData.java b/examples/collections/ship/entity/PartData.java
new file mode 100644
index 0000000000000000000000000000000000000000..d659f865a340a6908dad5440501e0929b0831df5
--- /dev/null
+++ b/examples/collections/ship/entity/PartData.java
@@ -0,0 +1,65 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PartData.java,v 1.14.2.2 2010/01/04 15:30:24 cwl Exp $
+ */
+
+package collections.ship.entity;
+
+import java.io.Serializable;
+
+/**
+ * A PartData serves as the value in the key/value pair for a part entity.
+ *
+ * <p> In this sample, PartData is used only as the storage data for the
+ * value, while the Part object is used as the value's object representation.
+ * Because it is used directly as storage data using serial format, it must be
+ * Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class PartData implements Serializable {
+
+    private String name;
+    private String color;
+    private Weight weight;
+    private String city;
+
+    public PartData(String name, String color, Weight weight, String city) {
+
+        this.name = name;
+        this.color = color;
+        this.weight = weight;
+        this.city = city;
+    }
+
+    public final String getName() {
+
+        return name;
+    }
+
+    public final String getColor() {
+
+        return color;
+    }
+
+    public final Weight getWeight() {
+
+        return weight;
+    }
+
+    public final String getCity() {
+
+        return city;
+    }
+
+    public String toString() {
+
+        return "[PartData: name=" + name +
+	    " color=" + color +
+	    " weight=" + weight +
+	    " city=" + city + ']';
+    }
+}
diff --git a/examples/collections/ship/entity/PartKey.java b/examples/collections/ship/entity/PartKey.java
new file mode 100644
index 0000000000000000000000000000000000000000..54d8fbe1263c94b72a4ba20a5b95f7182480c307
--- /dev/null
+++ b/examples/collections/ship/entity/PartKey.java
@@ -0,0 +1,40 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PartKey.java,v 1.13.2.2 2010/01/04 15:30:24 cwl Exp $
+ */
+
+package collections.ship.entity;
+
+import java.io.Serializable;
+
+/**
+ * A PartKey serves as the key in the key/data pair for a part entity.
+ *
+ * <p> In this sample, PartKey is used both as the storage entry for the key as
+ * well as the object binding to the key.  Because it is used directly as
+ * storage data using serial format, it must be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class PartKey implements Serializable {
+
+    private String number;
+
+    public PartKey(String number) {
+
+        this.number = number;
+    }
+
+    public final String getNumber() {
+
+        return number;
+    }
+
+    public String toString() {
+
+        return "[PartKey: number=" + number + ']';
+    }
+}
diff --git a/examples/collections/ship/entity/Sample.java b/examples/collections/ship/entity/Sample.java
new file mode 100644
index 0000000000000000000000000000000000000000..3a8f470838c78a4c63c65c5f92b0d8b13586b3b6
--- /dev/null
+++ b/examples/collections/ship/entity/Sample.java
@@ -0,0 +1,236 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Sample.java,v 1.20.2.2 2010/01/04 15:30:24 cwl Exp $
+ */
+
+package collections.ship.entity;
+
+import java.io.FileNotFoundException;
+import java.util.Iterator;
+import java.util.Set;
+
+import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+import com.sleepycat.je.DatabaseException;
+
+/**
+ * Sample is the main entry point for the sample program and may be run as
+ * follows:
+ *
+ * <pre>
+ * java collections.ship.entity.Sample
+ *      [-h <home-directory> ]
+ * </pre>
+ *
+ * <p> The default for the home directory is ./tmp -- the tmp subdirectory of
+ * the current directory where the sample is run. The home directory must exist
+ * before running the sample.  To recreate the sample database from scratch,
+ * delete all files in the home directory before running the sample.  </p>
+ *
+ * @author Mark Hayes
+ */
+public class Sample {
+
+    private SampleDatabase db;
+    private SampleViews views;
+
+    /**
+     * Run the sample program.
+     */
+    public static void main(String[] args) {
+
+        System.out.println("\nRunning sample: " + Sample.class);
+
+        // Parse the command line arguments.
+        //
+        String homeDir = "./tmp";
+        for (int i = 0; i < args.length; i += 1) {
+            if (args[i].equals("-h") && i < args.length - 1) {
+                i += 1;
+                homeDir = args[i];
+            } else {
+                System.err.println("Usage:\n java " + Sample.class.getName() +
+				   "\n  [-h <home-directory>]");
+                System.exit(2);
+            }
+        }
+
+        // Run the sample.
+        //
+        Sample sample = null;
+        try {
+            sample = new Sample(homeDir);
+            sample.run();
+        } catch (Exception e) {
+            // If an exception reaches this point, the last transaction did not
+            // complete.  If the exception is RunRecoveryException, follow
+            // the Berkeley DB recovery procedures before running again.
+            e.printStackTrace();
+        } finally {
+            if (sample != null) {
+                try {
+                    // Always attempt to close the database cleanly.
+                    sample.close();
+                } catch (Exception e) {
+                    System.err.println("Exception during database close:");
+                    e.printStackTrace();
+                }
+            }
+        }
+    }
+
+    /**
+     * Open the database and views.
+     */
+    private Sample(String homeDir)
+        throws DatabaseException, FileNotFoundException {
+
+        db = new SampleDatabase(homeDir);
+        views = new SampleViews(db);
+    }
+
+    /**
+     * Close the database cleanly.
+     */
+    private void close()
+        throws DatabaseException {
+
+        db.close();
+    }
+
+    /**
+     * Run two transactions to populate and print the database.  A
+     * TransactionRunner is used to ensure consistent handling of transactions,
+     * including deadlock retries.  But the best transaction handling mechanism
+     * to use depends on the application.
+     */
+    private void run()
+        throws Exception {
+
+        TransactionRunner runner = new TransactionRunner(db.getEnvironment());
+        runner.run(new PopulateDatabase());
+        runner.run(new PrintDatabase());
+    }
+
+    /**
+     * Populate the database in a single transaction.
+     */
+    private class PopulateDatabase implements TransactionWorker {
+
+        public void doWork()
+            throws Exception {
+            addSuppliers();
+            addParts();
+            addShipments();
+        }
+    }
+
+    /**
+     * Print the database in a single transaction.  All entities are printed
+     * and the indices are used to print the entities for certain keys.
+     *
+     * <p> Note the use of special iterator() methods.  These are used here
+     * with indices to find the shipments for certain keys.</p>
+     */
+    private class PrintDatabase implements TransactionWorker {
+
+
+        public void doWork()
+            throws Exception {
+            printValues("Parts",
+			views.getPartSet().iterator());
+            printValues("Suppliers",
+			views.getSupplierSet().iterator());
+            printValues("Suppliers for City Paris",
+                        views.getSupplierByCityMap().duplicates(
+                                            "Paris").iterator());
+            printValues("Shipments",
+			views.getShipmentSet().iterator());
+            printValues("Shipments for Part P1",
+                        views.getShipmentByPartMap().duplicates(
+                                            new PartKey("P1")).iterator());
+            printValues("Shipments for Supplier S1",
+                        views.getShipmentBySupplierMap().duplicates(
+                                            new SupplierKey("S1")).iterator());
+        }
+    }
+
+    /**
+     * Populate the part entities in the database.  If the part set is not
+     * empty, assume that this has already been done.
+     */
+    private void addParts() {
+
+        Set parts = views.getPartSet();
+        if (parts.isEmpty()) {
+            System.out.println("Adding Parts");
+            parts.add(new Part("P1", "Nut", "Red",
+			       new Weight(12.0, Weight.GRAMS), "London"));
+            parts.add(new Part("P2", "Bolt", "Green",
+			       new Weight(17.0, Weight.GRAMS), "Paris"));
+            parts.add(new Part("P3", "Screw", "Blue",
+			       new Weight(17.0, Weight.GRAMS), "Rome"));
+            parts.add(new Part("P4", "Screw", "Red",
+			       new Weight(14.0, Weight.GRAMS), "London"));
+            parts.add(new Part("P5", "Cam", "Blue",
+			       new Weight(12.0, Weight.GRAMS), "Paris"));
+            parts.add(new Part("P6", "Cog", "Red",
+			       new Weight(19.0, Weight.GRAMS), "London"));
+        }
+    }
+
+    /**
+     * Populate the supplier entities in the database.  If the supplier set is
+     * not empty, assume that this has already been done.
+     */
+    private void addSuppliers() {
+
+        Set suppliers = views.getSupplierSet();
+        if (suppliers.isEmpty()) {
+            System.out.println("Adding Suppliers");
+            suppliers.add(new Supplier("S1", "Smith", 20, "London"));
+            suppliers.add(new Supplier("S2", "Jones", 10, "Paris"));
+            suppliers.add(new Supplier("S3", "Blake", 30, "Paris"));
+            suppliers.add(new Supplier("S4", "Clark", 20, "London"));
+            suppliers.add(new Supplier("S5", "Adams", 30, "Athens"));
+        }
+    }
+
+    /**
+     * Populate the shipment entities in the database.  If the shipment set
+     * is not empty, assume that this has already been done.
+     */
+    private void addShipments() {
+
+        Set shipments = views.getShipmentSet();
+        if (shipments.isEmpty()) {
+            System.out.println("Adding Shipments");
+            shipments.add(new Shipment("P1", "S1", 300));
+            shipments.add(new Shipment("P2", "S1", 200));
+            shipments.add(new Shipment("P3", "S1", 400));
+            shipments.add(new Shipment("P4", "S1", 200));
+            shipments.add(new Shipment("P5", "S1", 100));
+            shipments.add(new Shipment("P6", "S1", 100));
+            shipments.add(new Shipment("P1", "S2", 300));
+            shipments.add(new Shipment("P2", "S2", 400));
+            shipments.add(new Shipment("P2", "S3", 200));
+            shipments.add(new Shipment("P2", "S4", 200));
+            shipments.add(new Shipment("P4", "S4", 300));
+            shipments.add(new Shipment("P5", "S4", 400));
+        }
+    }
+
+    /**
+     * Print the objects returned by an iterator of entity value objects.
+     */
+    private void printValues(String label, Iterator iterator) {
+
+        System.out.println("\n--- " + label + " ---");
+        while (iterator.hasNext()) {
+            System.out.println(iterator.next().toString());
+        }
+    }
+}
diff --git a/examples/collections/ship/entity/SampleDatabase.java b/examples/collections/ship/entity/SampleDatabase.java
new file mode 100644
index 0000000000000000000000000000000000000000..cda34e489431f7fed9756de8579a8a71ef02980c
--- /dev/null
+++ b/examples/collections/ship/entity/SampleDatabase.java
@@ -0,0 +1,322 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SampleDatabase.java,v 1.28.2.2 2010/01/04 15:30:24 cwl Exp $
+ */
+
+package collections.ship.entity;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+
+import com.sleepycat.bind.serial.ClassCatalog;
+import com.sleepycat.bind.serial.SerialSerialKeyCreator;
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.ForeignKeyDeleteAction;
+import com.sleepycat.je.SecondaryConfig;
+import com.sleepycat.je.SecondaryDatabase;
+
+/**
+ * SampleDatabase defines the storage containers, indices and foreign keys
+ * for the sample database.
+ *
+ * @author Mark Hayes
+ */
+public class SampleDatabase {
+
+    private static final String CLASS_CATALOG = "java_class_catalog";
+    private static final String SUPPLIER_STORE = "supplier_store";
+    private static final String PART_STORE = "part_store";
+    private static final String SHIPMENT_STORE = "shipment_store";
+    private static final String SHIPMENT_PART_INDEX = "shipment_part_index";
+    private static final String SHIPMENT_SUPPLIER_INDEX =
+	"shipment_supplier_index";
+    private static final String SUPPLIER_CITY_INDEX = "supplier_city_index";
+
+    private Environment env;
+    private Database partDb;
+    private Database supplierDb;
+    private Database shipmentDb;
+    private SecondaryDatabase supplierByCityDb;
+    private SecondaryDatabase shipmentByPartDb;
+    private SecondaryDatabase shipmentBySupplierDb;
+    private StoredClassCatalog javaCatalog;
+
+    /**
+     * Open all storage containers, indices, and catalogs.
+     */
+    public SampleDatabase(String homeDirectory)
+        throws DatabaseException, FileNotFoundException {
+
+        // Open the Berkeley DB environment in transactional mode.
+        //
+        System.out.println("Opening environment in: " + homeDirectory);
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+        env = new Environment(new File(homeDirectory), envConfig);
+
+        // Set the Berkeley DB config for opening all stores.
+        //
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+
+        // Create the Serial class catalog.  This holds the serialized class
+        // format for all database records of serial format.
+        //
+        Database catalogDb = env.openDatabase(null, CLASS_CATALOG, dbConfig);
+        javaCatalog = new StoredClassCatalog(catalogDb);
+
+        // Open the Berkeley DB database for the part, supplier and shipment
+        // stores.  The stores are opened with no duplicate keys allowed.
+        //
+        partDb = env.openDatabase(null, PART_STORE, dbConfig);
+
+        supplierDb = env.openDatabase(null, SUPPLIER_STORE, dbConfig);
+
+        shipmentDb = env.openDatabase(null, SHIPMENT_STORE, dbConfig);
+
+        // Open the SecondaryDatabase for the city index of the supplier store,
+        // and for the part and supplier indices of the shipment store.
+        // Duplicate keys are allowed since more than one supplier may be in
+        // the same city, and more than one shipment may exist for the same
+        // supplier or part.  A foreign key constraint is defined for the
+        // supplier and part indices to ensure that a shipment only refers to
+        // existing part and supplier keys.  The CASCADE delete action means
+        // that shipments will be deleted if their associated part or supplier
+        // is deleted.
+        //
+        SecondaryConfig secConfig = new SecondaryConfig();
+        secConfig.setTransactional(true);
+        secConfig.setAllowCreate(true);
+        secConfig.setSortedDuplicates(true);
+
+        secConfig.setKeyCreator(
+            new SupplierByCityKeyCreator(javaCatalog,
+                                         SupplierKey.class,
+                                         SupplierData.class,
+                                         String.class));
+        supplierByCityDb = env.openSecondaryDatabase(null, SUPPLIER_CITY_INDEX,
+                                                     supplierDb, secConfig);
+
+        secConfig.setForeignKeyDatabase(partDb);
+        secConfig.setForeignKeyDeleteAction(ForeignKeyDeleteAction.CASCADE);
+        secConfig.setKeyCreator(
+            new ShipmentByPartKeyCreator(javaCatalog,
+                                         ShipmentKey.class,
+                                         ShipmentData.class,
+                                         PartKey.class));
+        shipmentByPartDb = env.openSecondaryDatabase(null, SHIPMENT_PART_INDEX,
+                                                     shipmentDb, secConfig);
+
+        secConfig.setForeignKeyDatabase(supplierDb);
+        secConfig.setForeignKeyDeleteAction(ForeignKeyDeleteAction.CASCADE);
+        secConfig.setKeyCreator(
+            new ShipmentBySupplierKeyCreator(javaCatalog,
+                                             ShipmentKey.class,
+                                             ShipmentData.class,
+                                             SupplierKey.class));
+        shipmentBySupplierDb = env.openSecondaryDatabase(null,
+                                                     SHIPMENT_SUPPLIER_INDEX,
+                                                     shipmentDb, secConfig);
+    }
+
+    /**
+     * Return the storage environment for the database.
+     */
+    public final Environment getEnvironment() {
+
+        return env;
+    }
+
+    /**
+     * Return the class catalog.
+     */
+    public final StoredClassCatalog getClassCatalog() {
+
+        return javaCatalog;
+    }
+
+    /**
+     * Return the part storage container.
+     */
+    public final Database getPartDatabase() {
+
+        return partDb;
+    }
+
+    /**
+     * Return the supplier storage container.
+     */
+    public final Database getSupplierDatabase() {
+
+        return supplierDb;
+    }
+
+    /**
+     * Return the shipment storage container.
+     */
+    public final Database getShipmentDatabase() {
+
+        return shipmentDb;
+    }
+
+    /**
+     * Return the shipment-by-part index.
+     */
+    public final SecondaryDatabase getShipmentByPartDatabase() {
+
+        return shipmentByPartDb;
+    }
+
+    /**
+     * Return the shipment-by-supplier index.
+     */
+    public final SecondaryDatabase getShipmentBySupplierDatabase() {
+
+        return shipmentBySupplierDb;
+    }
+
+    /**
+     * Return the supplier-by-city index.
+     */
+    public final SecondaryDatabase getSupplierByCityDatabase() {
+
+        return supplierByCityDb;
+    }
+
+    /**
+     * Close all stores (closing a store automatically closes its indices).
+     */
+    public void close()
+        throws DatabaseException {
+
+        // Close secondary databases, then primary databases.
+        supplierByCityDb.close();
+        shipmentByPartDb.close();
+        shipmentBySupplierDb.close();
+        partDb.close();
+        supplierDb.close();
+        shipmentDb.close();
+        // And don't forget to close the catalog and the environment.
+        javaCatalog.close();
+        env.close();
+    }
+
+    /**
+     * The SecondaryKeyCreator for the SupplierByCity index.  This is an
+     * extension of the abstract class SerialSerialKeyCreator, which implements
+     * SecondaryKeyCreator for the case where the data keys and value are all
+     * of the serial format.
+     */
+    private static class SupplierByCityKeyCreator
+        extends SerialSerialKeyCreator {
+
+        /**
+         * Construct the city key extractor.
+         * @param catalog is the class catalog.
+         * @param primaryKeyClass is the supplier key class.
+         * @param valueClass is the supplier value class.
+         * @param indexKeyClass is the city key class.
+         */
+        private SupplierByCityKeyCreator(ClassCatalog catalog,
+                                         Class<SupplierKey> primaryKeyClass,
+                                         Class<SupplierData> valueClass,
+                                         Class<String> indexKeyClass) {
+
+            super(catalog, primaryKeyClass, valueClass, indexKeyClass);
+        }
+
+        /**
+         * Extract the city key from a supplier key/value pair.  The city key
+         * is stored in the supplier value, so the supplier key is not used.
+         */
+        public Object createSecondaryKey(Object primaryKeyInput,
+                                         Object valueInput) {
+
+            SupplierData supplierData = (SupplierData) valueInput;
+            return supplierData.getCity();
+        }
+    }
+
+    /**
+     * The SecondaryKeyCreator for the ShipmentByPart index.  This is an
+     * extension of the abstract class SerialSerialKeyCreator, which implements
+     * SecondaryKeyCreator for the case where the data keys and value are all
+     * of the serial format.
+     */
+    private static class ShipmentByPartKeyCreator
+        extends SerialSerialKeyCreator {
+
+        /**
+         * Construct the part key extractor.
+         * @param catalog is the class catalog.
+         * @param primaryKeyClass is the shipment key class.
+         * @param valueClass is the shipment value class.
+         * @param indexKeyClass is the part key class.
+         */
+        private ShipmentByPartKeyCreator(ClassCatalog catalog,
+                                         Class<ShipmentKey> primaryKeyClass,
+                                         Class<ShipmentData> valueClass,
+                                         Class<PartKey> indexKeyClass) {
+
+            super(catalog, primaryKeyClass, valueClass, indexKeyClass);
+        }
+
+        /**
+         * Extract the part key from a shipment key/value pair.  The part key
+         * is stored in the shipment key, so the shipment value is not used.
+         */
+        public Object createSecondaryKey(Object primaryKeyInput,
+                                         Object valueInput) {
+
+            ShipmentKey shipmentKey = (ShipmentKey) primaryKeyInput;
+            return new PartKey(shipmentKey.getPartNumber());
+        }
+    }
+
+    /**
+     * The SecondaryKeyCreator for the ShipmentBySupplier index.  This is an
+     * extension of the abstract class SerialSerialKeyCreator, which implements
+     * SecondaryKeyCreator for the case where the data keys and value are all
+     * of the serial format.
+     */
+    private static class ShipmentBySupplierKeyCreator
+        extends SerialSerialKeyCreator {
+
+        /**
+         * Construct the supplier key extractor.
+         * @param catalog is the class catalog.
+         * @param primaryKeyClass is the shipment key class.
+         * @param valueClass is the shipment value class.
+         * @param indexKeyClass is the supplier key class.
+         */
+        private ShipmentBySupplierKeyCreator(ClassCatalog catalog,
+                                             Class<ShipmentKey> primaryKeyClass,
+                                             Class<ShipmentData> valueClass,
+                                             Class<SupplierKey> indexKeyClass) {
+
+            super(catalog, primaryKeyClass, valueClass, indexKeyClass);
+        }
+
+        /**
+         * Extract the supplier key from a shipment key/value pair.  The part
+         * key is stored in the shipment key, so the shipment value is not
+         * used.
+         */
+        public Object createSecondaryKey(Object primaryKeyInput,
+                                         Object valueInput) {
+
+            ShipmentKey shipmentKey = (ShipmentKey) primaryKeyInput;
+            return new SupplierKey(shipmentKey.getSupplierNumber());
+        }
+    }
+}
diff --git a/examples/collections/ship/entity/SampleViews.java b/examples/collections/ship/entity/SampleViews.java
new file mode 100644
index 0000000000000000000000000000000000000000..4a511d8d4b516c1a4fb42adde8f65ad091e0c8ca
--- /dev/null
+++ b/examples/collections/ship/entity/SampleViews.java
@@ -0,0 +1,306 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SampleViews.java,v 1.20.2.2 2010/01/04 15:30:24 cwl Exp $
+ */
+
+package collections.ship.entity;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.bind.serial.ClassCatalog;
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.bind.serial.SerialSerialBinding;
+import com.sleepycat.collections.StoredSortedMap;
+import com.sleepycat.collections.StoredValueSet;
+
+/**
+ * SampleViews defines the data bindings and collection views for the sample
+ * database.
+ *
+ * @author Mark Hayes
+ */
+public class SampleViews {
+
+    private StoredSortedMap partMap;
+    private StoredSortedMap supplierMap;
+    private StoredSortedMap shipmentMap;
+    private StoredSortedMap shipmentByPartMap;
+    private StoredSortedMap shipmentBySupplierMap;
+    private StoredSortedMap supplierByCityMap;
+
+    /**
+     * Create the data bindings and collection views.
+     */
+    public SampleViews(SampleDatabase db) {
+
+        // Create the data bindings.
+        // In this sample, EntityBinding classes are used to bind the stored
+        // key/data entry pair to a combined data object.  For keys, however,
+        // the stored entry is used directly via a SerialBinding and no
+        // special binding class is needed.
+        //
+        ClassCatalog catalog = db.getClassCatalog();
+        SerialBinding partKeyBinding =
+            new SerialBinding(catalog, PartKey.class);
+        EntityBinding partDataBinding =
+            new PartBinding(catalog, PartKey.class, PartData.class);
+        SerialBinding supplierKeyBinding =
+            new SerialBinding(catalog, SupplierKey.class);
+        EntityBinding supplierDataBinding =
+            new SupplierBinding(catalog, SupplierKey.class,
+                                SupplierData.class);
+        SerialBinding shipmentKeyBinding =
+            new SerialBinding(catalog, ShipmentKey.class);
+        EntityBinding shipmentDataBinding =
+            new ShipmentBinding(catalog, ShipmentKey.class,
+                                ShipmentData.class);
+        SerialBinding cityKeyBinding =
+            new SerialBinding(catalog, String.class);
+
+        // Create map views for all stores and indices.
+        // StoredSortedMap is not used since the stores and indices are
+        // ordered by serialized key objects, which do not provide a very
+        // useful ordering.
+        //
+        partMap =
+            new StoredSortedMap(db.getPartDatabase(),
+                                partKeyBinding, partDataBinding, true);
+        supplierMap =
+            new StoredSortedMap(db.getSupplierDatabase(),
+                                supplierKeyBinding, supplierDataBinding, true);
+        shipmentMap =
+            new StoredSortedMap(db.getShipmentDatabase(),
+                                shipmentKeyBinding, shipmentDataBinding, true);
+        shipmentByPartMap =
+            new StoredSortedMap(db.getShipmentByPartDatabase(),
+                                partKeyBinding, shipmentDataBinding, true);
+        shipmentBySupplierMap =
+            new StoredSortedMap(db.getShipmentBySupplierDatabase(),
+                                supplierKeyBinding, shipmentDataBinding, true);
+        supplierByCityMap =
+            new StoredSortedMap(db.getSupplierByCityDatabase(),
+                                cityKeyBinding, supplierDataBinding, true);
+    }
+
+    // The views returned below can be accessed using the java.util.Map or
+    // java.util.Set interfaces, or using the StoredSortedMap and
+    // StoredValueSet classes, which provide additional methods.  The entity
+    // sets could be obtained directly from the Map.values() method but
+    // convenience methods are provided here to return them in order to avoid
+    // down-casting elsewhere.
+
+    /**
+     * Return a map view of the part storage container.
+     */
+    public StoredSortedMap getPartMap() {
+
+        return partMap;
+    }
+
+    /**
+     * Return a map view of the supplier storage container.
+     */
+    public StoredSortedMap getSupplierMap() {
+
+        return supplierMap;
+    }
+
+    /**
+     * Return a map view of the shipment storage container.
+     */
+    public StoredSortedMap getShipmentMap() {
+
+        return shipmentMap;
+    }
+
+    /**
+     * Return an entity set view of the part storage container.
+     */
+    public StoredValueSet getPartSet() {
+
+        return (StoredValueSet) partMap.values();
+    }
+
+    /**
+     * Return an entity set view of the supplier storage container.
+     */
+    public StoredValueSet getSupplierSet() {
+
+        return (StoredValueSet) supplierMap.values();
+    }
+
+    /**
+     * Return an entity set view of the shipment storage container.
+     */
+    public StoredValueSet getShipmentSet() {
+
+        return (StoredValueSet) shipmentMap.values();
+    }
+
+    /**
+     * Return a map view of the shipment-by-part index.
+     */
+    public StoredSortedMap getShipmentByPartMap() {
+
+        return shipmentByPartMap;
+    }
+
+    /**
+     * Return a map view of the shipment-by-supplier index.
+     */
+    public StoredSortedMap getShipmentBySupplierMap() {
+
+        return shipmentBySupplierMap;
+    }
+
+    /**
+     * Return a map view of the supplier-by-city index.
+     */
+    public final StoredSortedMap getSupplierByCityMap() {
+
+        return supplierByCityMap;
+    }
+
+    /**
+     * PartBinding is used to bind the stored key/data entry pair for a part
+     * to a combined data object (entity).
+     */
+    private static class PartBinding extends SerialSerialBinding {
+
+        /**
+         * Construct the binding object.
+         */
+        private PartBinding(ClassCatalog classCatalog,
+                            Class keyClass,
+                            Class dataClass) {
+
+            super(classCatalog, keyClass, dataClass);
+        }
+
+        /**
+         * Create the entity by combining the stored key and data.
+         */
+        public Object entryToObject(Object keyInput, Object dataInput) {
+
+            PartKey key = (PartKey) keyInput;
+            PartData data = (PartData) dataInput;
+            return new Part(key.getNumber(), data.getName(), data.getColor(),
+                            data.getWeight(), data.getCity());
+        }
+
+        /**
+         * Create the stored key from the entity.
+         */
+        public Object objectToKey(Object object) {
+
+            Part part = (Part) object;
+            return new PartKey(part.getNumber());
+        }
+
+        /**
+         * Create the stored data from the entity.
+         */
+        public Object objectToData(Object object) {
+
+            Part part = (Part) object;
+            return new PartData(part.getName(), part.getColor(),
+                                 part.getWeight(), part.getCity());
+        }
+    }
+
+    /**
+     * SupplierBinding is used to bind the stored key/data entry pair for a
+     * supplier to a combined data object (entity).
+     */
+    private static class SupplierBinding extends SerialSerialBinding {
+
+        /**
+         * Construct the binding object.
+         */
+        private SupplierBinding(ClassCatalog classCatalog,
+                                Class keyClass,
+                                Class dataClass) {
+
+            super(classCatalog, keyClass, dataClass);
+        }
+
+        /**
+         * Create the entity by combining the stored key and data.
+         */
+        public Object entryToObject(Object keyInput, Object dataInput) {
+
+            SupplierKey key = (SupplierKey) keyInput;
+            SupplierData data = (SupplierData) dataInput;
+            return new Supplier(key.getNumber(), data.getName(),
+                                data.getStatus(), data.getCity());
+        }
+
+        /**
+         * Create the stored key from the entity.
+         */
+        public Object objectToKey(Object object) {
+
+            Supplier supplier = (Supplier) object;
+            return new SupplierKey(supplier.getNumber());
+        }
+
+        /**
+         * Create the stored data from the entity.
+         */
+        public Object objectToData(Object object) {
+
+            Supplier supplier = (Supplier) object;
+            return new SupplierData(supplier.getName(), supplier.getStatus(),
+                                     supplier.getCity());
+        }
+    }
+
+    /**
+     * ShipmentBinding is used to bind the stored key/data entry pair for a
+     * shipment to a combined data object (entity).
+     */
+    private static class ShipmentBinding extends SerialSerialBinding {
+
+        /**
+         * Construct the binding object.
+         */
+        private ShipmentBinding(ClassCatalog classCatalog,
+                                Class keyClass,
+                                Class dataClass) {
+
+            super(classCatalog, keyClass, dataClass);
+        }
+
+        /**
+         * Create the entity by combining the stored key and data.
+         */
+        public Object entryToObject(Object keyInput, Object dataInput) {
+
+            ShipmentKey key = (ShipmentKey) keyInput;
+            ShipmentData data = (ShipmentData) dataInput;
+            return new Shipment(key.getPartNumber(), key.getSupplierNumber(),
+                                data.getQuantity());
+        }
+
+        /**
+         * Create the stored key from the entity.
+         */
+        public Object objectToKey(Object object) {
+
+            Shipment shipment = (Shipment) object;
+            return new ShipmentKey(shipment.getPartNumber(),
+                                   shipment.getSupplierNumber());
+        }
+
+        /**
+         * Create the stored data from the entity.
+         */
+        public Object objectToData(Object object) {
+
+            Shipment shipment = (Shipment) object;
+            return new ShipmentData(shipment.getQuantity());
+        }
+    }
+}
diff --git a/examples/collections/ship/entity/Shipment.java b/examples/collections/ship/entity/Shipment.java
new file mode 100644
index 0000000000000000000000000000000000000000..f56894c3510dc47c8d0e47399ff388646cd4e58f
--- /dev/null
+++ b/examples/collections/ship/entity/Shipment.java
@@ -0,0 +1,55 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Shipment.java,v 1.12.2.2 2010/01/04 15:30:24 cwl Exp $
+ */
+
+package collections.ship.entity;
+
+/**
+ * A Shipment represents the combined key/data pair for a shipment entity.
+ *
+ * <p> In this sample, Shipment is created from the stored key/data entry
+ * using a SerialSerialBinding.  See {@link SampleViews.ShipmentBinding} for
+ * details.  Since this class is not used directly for data storage, it does
+ * not need to be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class Shipment {
+
+    private String partNumber;
+    private String supplierNumber;
+    private int quantity;
+
+    public Shipment(String partNumber, String supplierNumber, int quantity) {
+
+        this.partNumber = partNumber;
+        this.supplierNumber = supplierNumber;
+        this.quantity = quantity;
+    }
+
+    public final String getPartNumber() {
+
+        return partNumber;
+    }
+
+    public final String getSupplierNumber() {
+
+        return supplierNumber;
+    }
+
+    public final int getQuantity() {
+
+        return quantity;
+    }
+
+    public String toString() {
+
+        return "[Shipment: part=" + partNumber +
+                " supplier=" + supplierNumber +
+                " quantity=" + quantity + ']';
+    }
+}
diff --git a/examples/collections/ship/entity/ShipmentData.java b/examples/collections/ship/entity/ShipmentData.java
new file mode 100644
index 0000000000000000000000000000000000000000..1319eeb1c2ce9d6fe662c768253a31e83a476cc7
--- /dev/null
+++ b/examples/collections/ship/entity/ShipmentData.java
@@ -0,0 +1,42 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ShipmentData.java,v 1.13.2.2 2010/01/04 15:30:24 cwl Exp $
+ */
+
+package collections.ship.entity;
+
+import java.io.Serializable;
+
+/**
+ * A ShipmentData serves as the value in the key/value pair for a shipment
+ * entity.
+ *
+ * <p> In this sample, ShipmentData is used only as the storage data for the
+ * value, while the Shipment object is used as the value's object
+ * representation.  Because it is used directly as storage data using
+ * serial format, it must be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class ShipmentData implements Serializable {
+
+    private int quantity;
+
+    public ShipmentData(int quantity) {
+
+        this.quantity = quantity;
+    }
+
+    public final int getQuantity() {
+
+        return quantity;
+    }
+
+    public String toString() {
+
+        return "[ShipmentData: quantity=" + quantity + ']';
+    }
+}
diff --git a/examples/collections/ship/entity/ShipmentKey.java b/examples/collections/ship/entity/ShipmentKey.java
new file mode 100644
index 0000000000000000000000000000000000000000..5037f04ebb56e58f744fb6b2ebca064a283b447d
--- /dev/null
+++ b/examples/collections/ship/entity/ShipmentKey.java
@@ -0,0 +1,48 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ShipmentKey.java,v 1.14.2.2 2010/01/04 15:30:24 cwl Exp $
+ */
+
+package collections.ship.entity;
+
+import java.io.Serializable;
+
+/**
+ * A ShipmentKey serves as the key in the key/data pair for a shipment entity.
+ *
+ * <p> In this sample, ShipmentKey is used both as the storage entry for the
+ * key as well as the object binding to the key.  Because it is used directly
+ * as storage data using serial format, it must be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class ShipmentKey implements Serializable {
+
+    private String partNumber;
+    private String supplierNumber;
+
+    public ShipmentKey(String partNumber, String supplierNumber) {
+
+        this.partNumber = partNumber;
+        this.supplierNumber = supplierNumber;
+    }
+
+    public final String getPartNumber() {
+
+        return partNumber;
+    }
+
+    public final String getSupplierNumber() {
+
+        return supplierNumber;
+    }
+
+    public String toString() {
+
+        return "[ShipmentKey: supplier=" + supplierNumber +
+	    " part=" + partNumber + ']';
+    }
+}
diff --git a/examples/collections/ship/entity/Supplier.java b/examples/collections/ship/entity/Supplier.java
new file mode 100644
index 0000000000000000000000000000000000000000..c6dc0eb1eb21e2eb838ec837d0df380959afc1da
--- /dev/null
+++ b/examples/collections/ship/entity/Supplier.java
@@ -0,0 +1,63 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Supplier.java,v 1.12.2.2 2010/01/04 15:30:24 cwl Exp $
+ */
+
+package collections.ship.entity;
+
+/**
+ * A Supplier represents the combined key/data pair for a supplier entity.
+ *
+ * <p> In this sample, Supplier is created from the stored key/data entry
+ * using a SerialSerialBinding.  See {@link SampleViews.SupplierBinding} for
+ * details.  Since this class is not used directly for data storage, it does
+ * not need to be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class Supplier {
+
+    private String number;
+    private String name;
+    private int status;
+    private String city;
+
+    public Supplier(String number, String name, int status, String city) {
+
+        this.number = number;
+        this.name = name;
+        this.status = status;
+        this.city = city;
+    }
+
+    public final String getNumber() {
+
+        return number;
+    }
+
+    public final String getName() {
+
+        return name;
+    }
+
+    public final int getStatus() {
+
+        return status;
+    }
+
+    public final String getCity() {
+
+        return city;
+    }
+
+    public String toString() {
+
+        return "[Supplier: number=" + number +
+               " name=" + name +
+               " status=" + status +
+               " city=" + city + ']';
+    }
+}
diff --git a/examples/collections/ship/entity/SupplierData.java b/examples/collections/ship/entity/SupplierData.java
new file mode 100644
index 0000000000000000000000000000000000000000..4437be2a44ccdba4af44377ef95e03178e0de51b
--- /dev/null
+++ b/examples/collections/ship/entity/SupplierData.java
@@ -0,0 +1,58 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SupplierData.java,v 1.14.2.2 2010/01/04 15:30:24 cwl Exp $
+ */
+
+package collections.ship.entity;
+
+import java.io.Serializable;
+
+/**
+ * A SupplierData serves as the value in the key/value pair for a supplier
+ * entity.
+ *
+ * <p> In this sample, SupplierData is used only as the storage data for the
+ * value, while the Supplier object is used as the value's object
+ * representation.  Because it is used directly as storage data using
+ * serial format, it must be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class SupplierData implements Serializable {
+
+    private String name;
+    private int status;
+    private String city;
+
+    public SupplierData(String name, int status, String city) {
+
+        this.name = name;
+        this.status = status;
+        this.city = city;
+    }
+
+    public final String getName() {
+
+        return name;
+    }
+
+    public final int getStatus() {
+
+        return status;
+    }
+
+    public final String getCity() {
+
+        return city;
+    }
+
+    public String toString() {
+
+        return "[SupplierData: name=" + name +
+	    " status=" + status +
+	    " city=" + city + ']';
+    }
+}
diff --git a/examples/collections/ship/entity/SupplierKey.java b/examples/collections/ship/entity/SupplierKey.java
new file mode 100644
index 0000000000000000000000000000000000000000..c95d81bc68deb55ca06dfe663b1aba713bd87399
--- /dev/null
+++ b/examples/collections/ship/entity/SupplierKey.java
@@ -0,0 +1,40 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SupplierKey.java,v 1.13.2.2 2010/01/04 15:30:24 cwl Exp $
+ */
+
+package collections.ship.entity;
+
+import java.io.Serializable;
+
+/**
+ * A SupplierKey serves as the key in the key/data pair for a supplier entity.
+ *
+ * <p> In this sample, SupplierKey is used both as the storage entry for the
+ * key as well as the object binding to the key.  Because it is used directly
+ * as storage data using serial format, it must be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class SupplierKey implements Serializable {
+
+    private String number;
+
+    public SupplierKey(String number) {
+
+        this.number = number;
+    }
+
+    public final String getNumber() {
+
+        return number;
+    }
+
+    public String toString() {
+
+        return "[SupplierKey: number=" + number + ']';
+    }
+}
diff --git a/examples/collections/ship/entity/Weight.java b/examples/collections/ship/entity/Weight.java
new file mode 100644
index 0000000000000000000000000000000000000000..0924d18e92e47b8f1774d73cfef17bc56ad23894
--- /dev/null
+++ b/examples/collections/ship/entity/Weight.java
@@ -0,0 +1,49 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Weight.java,v 1.11.2.2 2010/01/04 15:30:24 cwl Exp $
+ */
+
+package collections.ship.entity;
+
+import java.io.Serializable;
+
+/**
+ * Weight represents a weight amount and unit of measure.
+ *
+ * <p> In this sample, Weight is embedded in part data values which are stored
+ * as Serial serialized objects; therefore Weight must be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class Weight implements Serializable {
+
+    public final static String GRAMS = "grams";
+    public final static String OUNCES = "ounces";
+
+    private double amount;
+    private String units;
+
+    public Weight(double amount, String units) {
+
+        this.amount = amount;
+        this.units = units;
+    }
+
+    public final double getAmount() {
+
+        return amount;
+    }
+
+    public final String getUnits() {
+
+        return units;
+    }
+
+    public String toString() {
+
+        return "[" + amount + ' ' + units + ']';
+    }
+}
diff --git a/examples/collections/ship/factory/Part.java b/examples/collections/ship/factory/Part.java
new file mode 100644
index 0000000000000000000000000000000000000000..c608dc50ff1fabd4ef7ca717161789d60919b942
--- /dev/null
+++ b/examples/collections/ship/factory/Part.java
@@ -0,0 +1,106 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Part.java,v 1.18.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.factory;
+
+import java.io.Serializable;
+
+import com.sleepycat.bind.tuple.MarshalledTupleKeyEntity;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+
+/**
+ * A Part represents the combined key/data pair for a part entity.
+ *
+ * <p> In this sample, Part is bound to the stored key/data entry by
+ * implementing the MarshalledTupleKeyEntity interface. </p>
+ *
+ * <p> The binding is "tricky" in that it uses this class for both the stored
+ * data entry and the combined entity object.  To do this, the key field(s)
+ * are transient and are set by the binding after the data object has been
+ * deserialized. This avoids the use of a PartData class completely. </p>
+ *
+ * <p> Since this class is used directly for data storage, it must be
+ * Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class Part implements Serializable, MarshalledTupleKeyEntity {
+
+    private transient String number;
+    private String name;
+    private String color;
+    private Weight weight;
+    private String city;
+
+    public Part(String number, String name, String color, Weight weight,
+                String city) {
+
+        this.number = number;
+        this.name = name;
+        this.color = color;
+        this.weight = weight;
+        this.city = city;
+    }
+
+    public final String getNumber() {
+
+        return number;
+    }
+
+    public final String getName() {
+
+        return name;
+    }
+
+    public final String getColor() {
+
+        return color;
+    }
+
+    public final Weight getWeight() {
+
+        return weight;
+    }
+
+    public final String getCity() {
+
+        return city;
+    }
+
+    public String toString() {
+
+        return "[Part: number=" + number +
+	    " name=" + name +
+	    " color=" + color +
+	    " weight=" + weight +
+	    " city=" + city + ']';
+    }
+
+    // --- MarshalledTupleKeyEntity implementation ---
+
+    public void marshalPrimaryKey(TupleOutput keyOutput) {
+
+        keyOutput.writeString(this.number);
+    }
+
+    public void unmarshalPrimaryKey(TupleInput keyInput) {
+
+        this.number = keyInput.readString();
+    }
+
+    public boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput) {
+
+        throw new UnsupportedOperationException(keyName);
+    }
+
+    public boolean nullifyForeignKey(String keyName) {
+
+        throw new UnsupportedOperationException(keyName);
+    }
+}
diff --git a/examples/collections/ship/factory/PartKey.java b/examples/collections/ship/factory/PartKey.java
new file mode 100644
index 0000000000000000000000000000000000000000..474166337d890181bec26250ca88b76b430a73ad
--- /dev/null
+++ b/examples/collections/ship/factory/PartKey.java
@@ -0,0 +1,60 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PartKey.java,v 1.16.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.factory;
+
+import com.sleepycat.bind.tuple.MarshalledTupleEntry;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+
+/**
+ * A PartKey serves as the key in the key/data pair for a part entity.
+ *
+ * <p> In this sample, PartKey is bound to the stored key tuple entry by
+ * implementing the MarshalledTupleEntry interface, which is called by {@link
+ * SampleViews.MarshalledKeyBinding}. </p>
+ *
+ * @author Mark Hayes
+ */
+public class PartKey implements MarshalledTupleEntry {
+
+    private String number;
+
+    public PartKey(String number) {
+
+        this.number = number;
+    }
+
+    public final String getNumber() {
+
+        return number;
+    }
+
+    public String toString() {
+
+        return "[PartKey: number=" + number + ']';
+    }
+
+    // --- MarshalledTupleEntry implementation ---
+
+    public PartKey() {
+
+        // A no-argument constructor is necessary only to allow the binding to
+        // instantiate objects of this class.
+    }
+
+    public void marshalEntry(TupleOutput keyOutput) {
+
+        keyOutput.writeString(this.number);
+    }
+
+    public void unmarshalEntry(TupleInput keyInput) {
+
+        this.number = keyInput.readString();
+    }
+}
diff --git a/examples/collections/ship/factory/Sample.java b/examples/collections/ship/factory/Sample.java
new file mode 100644
index 0000000000000000000000000000000000000000..0ee44491169034cb0951fdba13ff20c4c021776c
--- /dev/null
+++ b/examples/collections/ship/factory/Sample.java
@@ -0,0 +1,234 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Sample.java,v 1.19.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.factory;
+
+import java.util.Iterator;
+import java.util.Set;
+
+import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+
+/**
+ * Sample is the main entry point for the sample program and may be run as
+ * follows:
+ *
+ * <pre>
+ * java collections.ship.factory.Sample
+ *      [-h <home-directory> ]
+ * </pre>
+ *
+ * <p> The default for the home directory is ./tmp -- the tmp subdirectory of
+ * the current directory where the sample is run. To specify a different home
+ * directory, use the -home option. The home directory must exist before
+ * running the sample.  To recreate the sample database from scratch, delete
+ * all files in the home directory before running the sample. </p>
+ *
+ * @author Mark Hayes
+ */
+public class Sample {
+
+    private SampleDatabase db;
+    private SampleViews views;
+
+    /**
+     * Run the sample program.
+     */
+    public static void main(String[] args) {
+
+        System.out.println("\nRunning sample: " + Sample.class);
+
+        // Parse the command line arguments.
+        //
+        String homeDir = "./tmp";
+        for (int i = 0; i < args.length; i += 1) {
+            if (args[i].equals("-h") && i < args.length - 1) {
+                i += 1;
+                homeDir = args[i];
+            } else {
+                System.err.println("Usage:\n java " + Sample.class.getName() +
+				   "\n  [-h <home-directory>]");
+                System.exit(2);
+            }
+        }
+
+        // Run the sample.
+        //
+        Sample sample = null;
+        try {
+            sample = new Sample(homeDir);
+            sample.run();
+        } catch (Exception e) {
+            // If an exception reaches this point, the last transaction did not
+            // complete.  If the exception is RunRecoveryException, follow
+            // the Berkeley DB recovery procedures before running again.
+            e.printStackTrace();
+        } finally {
+            if (sample != null) {
+                try {
+                    // Always attempt to close the database cleanly.
+                    sample.close();
+                } catch (Exception e) {
+                    System.err.println("Exception during database close:");
+                    e.printStackTrace();
+                }
+            }
+        }
+    }
+
+    /**
+     * Open the database and views.
+     */
+    private Sample(String homeDir)
+        throws Exception {
+
+        db = new SampleDatabase(homeDir);
+        views = new SampleViews(db);
+    }
+
+    /**
+     * Close the database cleanly.
+     */
+    private void close()
+        throws Exception {
+
+        db.close();
+    }
+
+    /**
+     * Run two transactions to populate and print the database.  A
+     * TransactionRunner is used to ensure consistent handling of transactions,
+     * including deadlock retries.  But the best transaction handling mechanism
+     * to use depends on the application.
+     */
+    private void run()
+        throws Exception {
+
+        TransactionRunner runner = new TransactionRunner(db.getEnvironment());
+        runner.run(new PopulateDatabase());
+        runner.run(new PrintDatabase());
+    }
+
+    /**
+     * Populate the database in a single transaction.
+     */
+    private class PopulateDatabase implements TransactionWorker {
+
+        public void doWork()
+            throws Exception {
+            addSuppliers();
+            addParts();
+            addShipments();
+        }
+    }
+
+    /**
+     * Print the database in a single transaction.  All entities are printed
+     * and the indices are used to print the entities for certain keys.
+     *
+     * <p> Note the use of special iterator() methods.  These are used here
+     * with indices to find the shipments for certain keys.</p>
+     */
+    private class PrintDatabase implements TransactionWorker {
+
+        public void doWork()
+            throws Exception {
+            printValues("Parts",
+			views.getPartSet().iterator());
+            printValues("Suppliers",
+			views.getSupplierSet().iterator());
+            printValues("Suppliers for City Paris",
+                        views.getSupplierByCityMap().duplicates(
+                                            "Paris").iterator());
+            printValues("Shipments",
+			views.getShipmentSet().iterator());
+            printValues("Shipments for Part P1",
+                        views.getShipmentByPartMap().duplicates(
+                                            new PartKey("P1")).iterator());
+            printValues("Shipments for Supplier S1",
+                        views.getShipmentBySupplierMap().duplicates(
+                                            new SupplierKey("S1")).iterator());
+        }
+    }
+
+    /**
+     * Populate the part entities in the database.  If the part set is not
+     * empty, assume that this has already been done.
+     */
+    private void addParts() {
+
+        Set parts = views.getPartSet();
+        if (parts.isEmpty()) {
+            System.out.println("Adding Parts");
+            parts.add(new Part("P1", "Nut", "Red",
+			       new Weight(12.0, Weight.GRAMS), "London"));
+            parts.add(new Part("P2", "Bolt", "Green",
+			       new Weight(17.0, Weight.GRAMS), "Paris"));
+            parts.add(new Part("P3", "Screw", "Blue",
+			       new Weight(17.0, Weight.GRAMS), "Rome"));
+            parts.add(new Part("P4", "Screw", "Red",
+			       new Weight(14.0, Weight.GRAMS), "London"));
+            parts.add(new Part("P5", "Cam", "Blue",
+			       new Weight(12.0, Weight.GRAMS), "Paris"));
+            parts.add(new Part("P6", "Cog", "Red",
+			       new Weight(19.0, Weight.GRAMS), "London"));
+        }
+    }
+
+    /**
+     * Populate the supplier entities in the database.  If the supplier set is
+     * not empty, assume that this has already been done.
+     */
+    private void addSuppliers() {
+
+        Set suppliers = views.getSupplierSet();
+        if (suppliers.isEmpty()) {
+            System.out.println("Adding Suppliers");
+            suppliers.add(new Supplier("S1", "Smith", 20, "London"));
+            suppliers.add(new Supplier("S2", "Jones", 10, "Paris"));
+            suppliers.add(new Supplier("S3", "Blake", 30, "Paris"));
+            suppliers.add(new Supplier("S4", "Clark", 20, "London"));
+            suppliers.add(new Supplier("S5", "Adams", 30, "Athens"));
+        }
+    }
+
+    /**
+     * Populate the shipment entities in the database.  If the shipment set
+     * is not empty, assume that this has already been done.
+     */
+    private void addShipments() {
+
+        Set shipments = views.getShipmentSet();
+        if (shipments.isEmpty()) {
+            System.out.println("Adding Shipments");
+            shipments.add(new Shipment("P1", "S1", 300));
+            shipments.add(new Shipment("P2", "S1", 200));
+            shipments.add(new Shipment("P3", "S1", 400));
+            shipments.add(new Shipment("P4", "S1", 200));
+            shipments.add(new Shipment("P5", "S1", 100));
+            shipments.add(new Shipment("P6", "S1", 100));
+            shipments.add(new Shipment("P1", "S2", 300));
+            shipments.add(new Shipment("P2", "S2", 400));
+            shipments.add(new Shipment("P2", "S3", 200));
+            shipments.add(new Shipment("P2", "S4", 200));
+            shipments.add(new Shipment("P4", "S4", 300));
+            shipments.add(new Shipment("P5", "S4", 400));
+        }
+    }
+
+    /**
+     * Print the objects returned by an iterator of entity value objects.
+     */
+    private void printValues(String label, Iterator iterator) {
+
+        System.out.println("\n--- " + label + " ---");
+        while (iterator.hasNext()) {
+            System.out.println(iterator.next().toString());
+        }
+    }
+}
diff --git a/examples/collections/ship/factory/SampleDatabase.java b/examples/collections/ship/factory/SampleDatabase.java
new file mode 100644
index 0000000000000000000000000000000000000000..737d329b8c132b19bd5838fea2e85ce75ba6944b
--- /dev/null
+++ b/examples/collections/ship/factory/SampleDatabase.java
@@ -0,0 +1,217 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SampleDatabase.java,v 1.27.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.factory;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.collections.TupleSerialFactory;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.ForeignKeyDeleteAction;
+import com.sleepycat.je.SecondaryConfig;
+import com.sleepycat.je.SecondaryDatabase;
+
+/**
+ * SampleDatabase defines the storage containers, indices and foreign keys
+ * for the sample database.
+ *
+ * @author Mark Hayes
+ */
+public class SampleDatabase {
+
+    private static final String CLASS_CATALOG = "java_class_catalog";
+    private static final String SUPPLIER_STORE = "supplier_store";
+    private static final String PART_STORE = "part_store";
+    private static final String SHIPMENT_STORE = "shipment_store";
+    private static final String SHIPMENT_PART_INDEX = "shipment_part_index";
+    private static final String SHIPMENT_SUPPLIER_INDEX =
+                                    "shipment_supplier_index";
+    private static final String SUPPLIER_CITY_INDEX = "supplier_city_index";
+
+    private Environment env;
+    private Database partDb;
+    private Database supplierDb;
+    private Database shipmentDb;
+    private SecondaryDatabase supplierByCityDb;
+    private SecondaryDatabase shipmentByPartDb;
+    private SecondaryDatabase shipmentBySupplierDb;
+    private StoredClassCatalog javaCatalog;
+    private TupleSerialFactory factory;
+
+    /**
+     * Open all storage containers, indices, and catalogs.
+     */
+    public SampleDatabase(String homeDirectory)
+        throws DatabaseException, FileNotFoundException {
+
+        // Open the Berkeley DB environment in transactional mode.
+        //
+        System.out.println("Opening environment in: " + homeDirectory);
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+        env = new Environment(new File(homeDirectory), envConfig);
+
+        // Set the Berkeley DB config for opening all stores.
+        //
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+
+        // Create the Serial class catalog.  This holds the serialized class
+        // format for all database records of serial format.
+        //
+        Database catalogDb = env.openDatabase(null, CLASS_CATALOG, dbConfig);
+        javaCatalog = new StoredClassCatalog(catalogDb);
+
+        // Use the TupleSerialDbFactory for a Serial/Tuple-based database
+        // where marshalling interfaces are used.
+        //
+        factory = new TupleSerialFactory(javaCatalog);
+
+        // Open the Berkeley DB database for the part, supplier and shipment
+        // stores.  The stores are opened with no duplicate keys allowed.
+        //
+        partDb = env.openDatabase(null, PART_STORE, dbConfig);
+
+        supplierDb = env.openDatabase(null, SUPPLIER_STORE, dbConfig);
+
+        shipmentDb = env.openDatabase(null, SHIPMENT_STORE, dbConfig);
+
+        // Open the SecondaryDatabase for the city index of the supplier store,
+        // and for the part and supplier indices of the shipment store.
+        // Duplicate keys are allowed since more than one supplier may be in
+        // the same city, and more than one shipment may exist for the same
+        // supplier or part.  A foreign key constraint is defined for the
+        // supplier and part indices to ensure that a shipment only refers to
+        // existing part and supplier keys.  The CASCADE delete action means
+        // that shipments will be deleted if their associated part or supplier
+        // is deleted.
+        //
+        SecondaryConfig secConfig = new SecondaryConfig();
+        secConfig.setTransactional(true);
+        secConfig.setAllowCreate(true);
+        secConfig.setSortedDuplicates(true);
+
+        secConfig.setKeyCreator(factory.getKeyCreator(Supplier.class,
+                                                      Supplier.CITY_KEY));
+        supplierByCityDb = env.openSecondaryDatabase(null, SUPPLIER_CITY_INDEX,
+                                                     supplierDb, secConfig);
+
+        secConfig.setForeignKeyDatabase(partDb);
+        secConfig.setForeignKeyDeleteAction(ForeignKeyDeleteAction.CASCADE);
+        secConfig.setKeyCreator(factory.getKeyCreator(Shipment.class,
+                                                      Shipment.PART_KEY));
+        shipmentByPartDb = env.openSecondaryDatabase(null, SHIPMENT_PART_INDEX,
+                                                     shipmentDb, secConfig);
+
+        secConfig.setForeignKeyDatabase(supplierDb);
+        secConfig.setForeignKeyDeleteAction(ForeignKeyDeleteAction.CASCADE);
+        secConfig.setKeyCreator(factory.getKeyCreator(Shipment.class,
+                                                      Shipment.SUPPLIER_KEY));
+        shipmentBySupplierDb = env.openSecondaryDatabase(null,
+                                                     SHIPMENT_SUPPLIER_INDEX,
+                                                     shipmentDb, secConfig);
+    }
+
+    /**
+     * Return the tuple-serial factory.
+     */
+    public final TupleSerialFactory getFactory() {
+
+        return factory;
+    }
+
+    /**
+     * Return the storage environment for the database.
+     */
+    public final Environment getEnvironment() {
+
+        return env;
+    }
+
+    /**
+     * Return the class catalog.
+     */
+    public final StoredClassCatalog getClassCatalog() {
+
+        return javaCatalog;
+    }
+
+    /**
+     * Return the part storage container.
+     */
+    public final Database getPartDatabase() {
+
+        return partDb;
+    }
+
+    /**
+     * Return the supplier storage container.
+     */
+    public final Database getSupplierDatabase() {
+
+        return supplierDb;
+    }
+
+    /**
+     * Return the shipment storage container.
+     */
+    public final Database getShipmentDatabase() {
+
+        return shipmentDb;
+    }
+
+    /**
+     * Return the shipment-by-part index.
+     */
+    public final SecondaryDatabase getShipmentByPartDatabase() {
+
+        return shipmentByPartDb;
+    }
+
+    /**
+     * Return the shipment-by-supplier index.
+     */
+    public final SecondaryDatabase getShipmentBySupplierDatabase() {
+
+        return shipmentBySupplierDb;
+    }
+
+    /**
+     * Return the supplier-by-city index.
+     */
+    public final SecondaryDatabase getSupplierByCityDatabase() {
+
+        return supplierByCityDb;
+    }
+
+    /**
+     * Close all databases and the environment.
+     */
+    public void close()
+        throws DatabaseException {
+
+        // Close secondary databases, then primary databases.
+        supplierByCityDb.close();
+        shipmentByPartDb.close();
+        shipmentBySupplierDb.close();
+        partDb.close();
+        supplierDb.close();
+        shipmentDb.close();
+        // And don't forget to close the catalog and the environment.
+        javaCatalog.close();
+        env.close();
+    }
+}
diff --git a/examples/collections/ship/factory/SampleViews.java b/examples/collections/ship/factory/SampleViews.java
new file mode 100644
index 0000000000000000000000000000000000000000..6a393721049f24e2ce724cec9217dab099be14a8
--- /dev/null
+++ b/examples/collections/ship/factory/SampleViews.java
@@ -0,0 +1,142 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SampleViews.java,v 1.16.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.factory;
+
+import com.sleepycat.collections.StoredSortedMap;
+import com.sleepycat.collections.StoredSortedValueSet;
+import com.sleepycat.collections.TupleSerialFactory;
+
+/**
+ * SampleViews defines the data bindings and collection views for the sample
+ * database.
+ *
+ * @author Mark Hayes
+ */
+public class SampleViews {
+
+    private StoredSortedMap partMap;
+    private StoredSortedMap supplierMap;
+    private StoredSortedMap shipmentMap;
+    private StoredSortedMap shipmentByPartMap;
+    private StoredSortedMap shipmentBySupplierMap;
+    private StoredSortedMap supplierByCityMap;
+
+    /**
+     * Create the data bindings and collection views.
+     */
+    public SampleViews(SampleDatabase db) {
+
+        // Use the TupleSerialFactory for a Serial/Tuple-based database
+        // where marshalling interfaces are used.
+        //
+        TupleSerialFactory factory = db.getFactory();
+
+        // Create map views for all stores and indices.
+        // StoredSortedMap is used since the stores and indices are ordered
+        // (they use the DB_BTREE access method).
+        //
+        partMap =
+            factory.newSortedMap(db.getPartDatabase(),
+                                 PartKey.class, Part.class, true);
+        supplierMap =
+            factory.newSortedMap(db.getSupplierDatabase(),
+                                 SupplierKey.class, Supplier.class, true);
+        shipmentMap =
+            factory.newSortedMap(db.getShipmentDatabase(),
+                                 ShipmentKey.class, Shipment.class, true);
+        shipmentByPartMap =
+            factory.newSortedMap(db.getShipmentByPartDatabase(),
+                                 PartKey.class, Shipment.class, true);
+        shipmentBySupplierMap =
+            factory.newSortedMap(db.getShipmentBySupplierDatabase(),
+                                 SupplierKey.class, Shipment.class, true);
+        supplierByCityMap =
+            factory.newSortedMap(db.getSupplierByCityDatabase(),
+                                 String.class, Supplier.class, true);
+    }
+
+    // The views returned below can be accessed using the java.util.Map or
+    // java.util.Set interfaces, or using the StoredMap and StoredValueSet
+    // classes, which provide additional methods.  The entity sets could be
+    // obtained directly from the Map.values() method but convenience methods
+    // are provided here to return them in order to avoid down-casting
+    // elsewhere.
+
+    /**
+     * Return a map view of the part storage container.
+     */
+    public StoredSortedMap getPartMap() {
+
+        return partMap;
+    }
+
+    /**
+     * Return a map view of the supplier storage container.
+     */
+    public StoredSortedMap getSupplierMap() {
+
+        return supplierMap;
+    }
+
+    /**
+     * Return a map view of the shipment storage container.
+     */
+    public StoredSortedMap getShipmentMap() {
+
+        return shipmentMap;
+    }
+
+    /**
+     * Return an entity set view of the part storage container.
+     */
+    public StoredSortedValueSet getPartSet() {
+
+        return (StoredSortedValueSet) partMap.values();
+    }
+
+    /**
+     * Return an entity set view of the supplier storage container.
+     */
+    public StoredSortedValueSet getSupplierSet() {
+
+        return (StoredSortedValueSet) supplierMap.values();
+    }
+
+    /**
+     * Return an entity set view of the shipment storage container.
+     */
+    public StoredSortedValueSet getShipmentSet() {
+
+        return (StoredSortedValueSet) shipmentMap.values();
+    }
+
+    /**
+     * Return a map view of the shipment-by-part index.
+     */
+    public StoredSortedMap getShipmentByPartMap() {
+
+        return shipmentByPartMap;
+    }
+
+    /**
+     * Return a map view of the shipment-by-supplier index.
+     */
+    public StoredSortedMap getShipmentBySupplierMap() {
+
+        return shipmentBySupplierMap;
+    }
+
+    /**
+     * Return a map view of the supplier-by-city index.
+     */
+    public StoredSortedMap getSupplierByCityMap() {
+
+        return supplierByCityMap;
+    }
+}
diff --git a/examples/collections/ship/factory/Shipment.java b/examples/collections/ship/factory/Shipment.java
new file mode 100644
index 0000000000000000000000000000000000000000..d7efd60483cc2aa58001690ab6684affc8e96f13
--- /dev/null
+++ b/examples/collections/ship/factory/Shipment.java
@@ -0,0 +1,102 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Shipment.java,v 1.19.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.factory;
+
+import java.io.Serializable;
+
+import com.sleepycat.bind.tuple.MarshalledTupleKeyEntity;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+
+/**
+ * A Shipment represents the combined key/data pair for a shipment entity.
+ *
+ * <p> In this sample, Shipment is bound to the stored key/data entry by
+ * implementing the MarshalledTupleKeyEntity interface. </p>
+ *
+ * <p> The binding is "tricky" in that it uses this class for both the stored
+ * data entry and the combined entity object.  To do this, the key field(s)
+ * are transient and are set by the binding after the data object has been
+ * deserialized. This avoids the use of a ShipmentData class completely. </p>
+ *
+ * <p> Since this class is used directly for data storage, it must be
+ * Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class Shipment implements Serializable, MarshalledTupleKeyEntity {
+
+    static final String PART_KEY = "part";
+    static final String SUPPLIER_KEY = "supplier";
+
+    private transient String partNumber;
+    private transient String supplierNumber;
+    private int quantity;
+
+    public Shipment(String partNumber, String supplierNumber, int quantity) {
+
+        this.partNumber = partNumber;
+        this.supplierNumber = supplierNumber;
+        this.quantity = quantity;
+    }
+
+    public final String getPartNumber() {
+
+        return partNumber;
+    }
+
+    public final String getSupplierNumber() {
+
+        return supplierNumber;
+    }
+
+    public final int getQuantity() {
+
+        return quantity;
+    }
+
+    public String toString() {
+
+        return "[Shipment: part=" + partNumber +
+	    " supplier=" + supplierNumber +
+	    " quantity=" + quantity + ']';
+    }
+
+    // --- MarshalledTupleKeyEntity implementation ---
+
+    public void marshalPrimaryKey(TupleOutput keyOutput) {
+
+        keyOutput.writeString(this.partNumber);
+        keyOutput.writeString(this.supplierNumber);
+    }
+
+    public void unmarshalPrimaryKey(TupleInput keyInput) {
+
+        this.partNumber = keyInput.readString();
+        this.supplierNumber = keyInput.readString();
+    }
+
+    public boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput) {
+
+        if (keyName.equals(PART_KEY)) {
+            keyOutput.writeString(this.partNumber);
+            return true;
+        } else if (keyName.equals(SUPPLIER_KEY)) {
+            keyOutput.writeString(this.supplierNumber);
+            return true;
+        } else {
+            throw new UnsupportedOperationException(keyName);
+        }
+    }
+
+    public boolean nullifyForeignKey(String keyName) {
+
+        throw new UnsupportedOperationException(keyName);
+    }
+}
diff --git a/examples/collections/ship/factory/ShipmentKey.java b/examples/collections/ship/factory/ShipmentKey.java
new file mode 100644
index 0000000000000000000000000000000000000000..3361ad6a7b7e0e30c85f0b3dae5f47794562d7fa
--- /dev/null
+++ b/examples/collections/ship/factory/ShipmentKey.java
@@ -0,0 +1,70 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ShipmentKey.java,v 1.16.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.factory;
+
+import com.sleepycat.bind.tuple.MarshalledTupleEntry;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+
+/**
+ * A ShipmentKey serves as the key in the key/data pair for a shipment entity.
+ *
+ * <p> In this sample, ShipmentKey is bound to the stored key tuple entry by
+ * implementing the MarshalledTupleEntry interface, which is called by {@link
+ * SampleViews.MarshalledKeyBinding}. </p>
+ *
+ * @author Mark Hayes
+ */
+public class ShipmentKey implements MarshalledTupleEntry {
+
+    private String partNumber;
+    private String supplierNumber;
+
+    public ShipmentKey(String partNumber, String supplierNumber) {
+
+        this.partNumber = partNumber;
+        this.supplierNumber = supplierNumber;
+    }
+
+    public final String getPartNumber() {
+
+        return partNumber;
+    }
+
+    public final String getSupplierNumber() {
+
+        return supplierNumber;
+    }
+
+    public String toString() {
+
+        return "[ShipmentKey: supplier=" + supplierNumber +
+                " part=" + partNumber + ']';
+    }
+
+    // --- MarshalledTupleEntry implementation ---
+
+    public ShipmentKey() {
+
+        // A no-argument constructor is necessary only to allow the binding to
+        // instantiate objects of this class.
+    }
+
+    public void marshalEntry(TupleOutput keyOutput) {
+
+        keyOutput.writeString(this.partNumber);
+        keyOutput.writeString(this.supplierNumber);
+    }
+
+    public void unmarshalEntry(TupleInput keyInput) {
+
+        this.partNumber = keyInput.readString();
+        this.supplierNumber = keyInput.readString();
+    }
+}
diff --git a/examples/collections/ship/factory/Supplier.java b/examples/collections/ship/factory/Supplier.java
new file mode 100644
index 0000000000000000000000000000000000000000..b165bdb4b3317e3a9c79cc3c0ef05de73bedca73
--- /dev/null
+++ b/examples/collections/ship/factory/Supplier.java
@@ -0,0 +1,108 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Supplier.java,v 1.18.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.factory;
+
+import java.io.Serializable;
+
+import com.sleepycat.bind.tuple.MarshalledTupleKeyEntity;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+
+/**
+ * A Supplier represents the combined key/data pair for a supplier entity.
+ *
+ * <p> In this sample, Supplier is bound to the stored key/data entry by
+ * implementing the MarshalledTupleKeyEntity interface. </p>
+ *
+ * <p> The binding is "tricky" in that it uses this class for both the stored
+ * data entry and the combined entity object.  To do this, the key field(s) are
+ * transient and are set by the binding after the data object has been
+ * deserialized. This avoids the use of a SupplierData class completely. </p>
+ *
+ * <p> Since this class is used directly for data storage, it must be
+ * Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class Supplier implements Serializable, MarshalledTupleKeyEntity {
+
+    static final String CITY_KEY = "city";
+
+    private transient String number;
+    private String name;
+    private int status;
+    private String city;
+
+    public Supplier(String number, String name, int status, String city) {
+
+        this.number = number;
+        this.name = name;
+        this.status = status;
+        this.city = city;
+    }
+
+    public final String getNumber() {
+
+        return number;
+    }
+
+    public final String getName() {
+
+        return name;
+    }
+
+    public final int getStatus() {
+
+        return status;
+    }
+
+    public final String getCity() {
+
+        return city;
+    }
+
+    public String toString() {
+
+        return "[Supplier: number=" + number +
+	    " name=" + name +
+	    " status=" + status +
+	    " city=" + city + ']';
+    }
+
+    // --- MarshalledTupleKeyEntity implementation ---
+
+    public void marshalPrimaryKey(TupleOutput keyOutput) {
+
+        keyOutput.writeString(this.number);
+    }
+
+    public void unmarshalPrimaryKey(TupleInput keyInput) {
+
+        this.number = keyInput.readString();
+    }
+
+    public boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput) {
+
+        if (keyName.equals(CITY_KEY)) {
+            if (this.city != null) {
+                keyOutput.writeString(this.city);
+                return true;
+            } else {
+                return false;
+            }
+        } else {
+            throw new UnsupportedOperationException(keyName);
+        }
+    }
+
+    public boolean nullifyForeignKey(String keyName) {
+
+        throw new UnsupportedOperationException(keyName);
+    }
+}
diff --git a/examples/collections/ship/factory/SupplierKey.java b/examples/collections/ship/factory/SupplierKey.java
new file mode 100644
index 0000000000000000000000000000000000000000..12eef9572cd9a6daff6a538235c20cb39886fc33
--- /dev/null
+++ b/examples/collections/ship/factory/SupplierKey.java
@@ -0,0 +1,60 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SupplierKey.java,v 1.16.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.factory;
+
+import com.sleepycat.bind.tuple.MarshalledTupleEntry;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+
+/**
+ * A SupplierKey serves as the key in the key/data pair for a supplier entity.
+ *
+ * <p> In this sample, SupplierKey is bound to the stored key tuple entry by
+ * implementing the MarshalledTupleEntry interface, which is called by {@link
+ * SampleViews.MarshalledKeyBinding}. </p>
+ *
+ * @author Mark Hayes
+ */
+public class SupplierKey implements MarshalledTupleEntry {
+
+    private String number;
+
+    public SupplierKey(String number) {
+
+        this.number = number;
+    }
+
+    public final String getNumber() {
+
+        return number;
+    }
+
+    public String toString() {
+
+        return "[SupplierKey: number=" + number + ']';
+    }
+
+    // --- MarshalledTupleEntry implementation ---
+
+    public SupplierKey() {
+
+        // A no-argument constructor is necessary only to allow the binding to
+        // instantiate objects of this class.
+    }
+
+    public void marshalEntry(TupleOutput keyOutput) {
+
+        keyOutput.writeString(this.number);
+    }
+
+    public void unmarshalEntry(TupleInput keyInput) {
+
+        this.number = keyInput.readString();
+    }
+}
diff --git a/examples/collections/ship/factory/Weight.java b/examples/collections/ship/factory/Weight.java
new file mode 100644
index 0000000000000000000000000000000000000000..21efb538bf5024307e04f824a327adf05123bebb
--- /dev/null
+++ b/examples/collections/ship/factory/Weight.java
@@ -0,0 +1,49 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Weight.java,v 1.11.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.factory;
+
+import java.io.Serializable;
+
+/**
+ * Weight represents a weight amount and unit of measure.
+ *
+ * <p> In this sample, Weight is embedded in part data values which are stored
+ * as Java serialized objects; therefore Weight must be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class Weight implements Serializable {
+
+    public final static String GRAMS = "grams";
+    public final static String OUNCES = "ounces";
+
+    private double amount;
+    private String units;
+
+    public Weight(double amount, String units) {
+
+        this.amount = amount;
+        this.units = units;
+    }
+
+    public final double getAmount() {
+
+        return amount;
+    }
+
+    public final String getUnits() {
+
+        return units;
+    }
+
+    public String toString() {
+
+        return "[" + amount + ' ' + units + ']';
+    }
+}
diff --git a/examples/collections/ship/index/PartData.java b/examples/collections/ship/index/PartData.java
new file mode 100644
index 0000000000000000000000000000000000000000..923df73a13bc45eb1635a933e4df5283d8e3bf61
--- /dev/null
+++ b/examples/collections/ship/index/PartData.java
@@ -0,0 +1,64 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PartData.java,v 1.15.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.index;
+
+import java.io.Serializable;
+
+/**
+ * A PartData serves as the data in the key/data pair for a part entity.
+ *
+ * <p> In this sample, PartData is used both as the storage data for the data
+ * as well as the object binding to the data.  Because it is used directly as
+ * storage data using serial format, it must be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class PartData implements Serializable {
+
+    private String name;
+    private String color;
+    private Weight weight;
+    private String city;
+
+    public PartData(String name, String color, Weight weight, String city) {
+
+        this.name = name;
+        this.color = color;
+        this.weight = weight;
+        this.city = city;
+    }
+
+    public final String getName() {
+
+        return name;
+    }
+
+    public final String getColor() {
+
+        return color;
+    }
+
+    public final Weight getWeight() {
+
+        return weight;
+    }
+
+    public final String getCity() {
+
+        return city;
+    }
+
+    public String toString() {
+
+        return "[PartData: name=" + name +
+	    " color=" + color +
+	    " weight=" + weight +
+	    " city=" + city + ']';
+    }
+}
diff --git a/examples/collections/ship/index/PartKey.java b/examples/collections/ship/index/PartKey.java
new file mode 100644
index 0000000000000000000000000000000000000000..f49cfeb77b53dd54751b74fcf23246ba9bac8f14
--- /dev/null
+++ b/examples/collections/ship/index/PartKey.java
@@ -0,0 +1,40 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PartKey.java,v 1.13.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.index;
+
+import java.io.Serializable;
+
+/**
+ * A PartKey serves as the key in the key/data pair for a part entity.
+ *
+ * <p> In this sample, PartKey is used both as the storage data for the key as
+ * well as the object binding to the key.  Because it is used directly as
+ * storage data using serial format, it must be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class PartKey implements Serializable {
+
+    private String number;
+
+    public PartKey(String number) {
+
+        this.number = number;
+    }
+
+    public final String getNumber() {
+
+        return number;
+    }
+
+    public String toString() {
+
+        return "[PartKey: number=" + number + ']';
+    }
+}
diff --git a/examples/collections/ship/index/Sample.java b/examples/collections/ship/index/Sample.java
new file mode 100644
index 0000000000000000000000000000000000000000..4482f6fcd1269ef4f40adc44c08f1282d4b3274e
--- /dev/null
+++ b/examples/collections/ship/index/Sample.java
@@ -0,0 +1,278 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Sample.java,v 1.22.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.index;
+
+import java.io.FileNotFoundException;
+import java.util.Iterator;
+import java.util.Map;
+
+import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+import com.sleepycat.je.DatabaseException;
+
+/**
+ * Sample is the main entry point for the sample program and may be run as
+ * follows:
+ *
+ * <pre>
+ * java collections.ship.index.Sample
+ *      [-h <home-directory> ]
+ * </pre>
+ *
+ * <p> The default for the home directory is ./tmp -- the tmp subdirectory of
+ * the current directory where the sample is run. The home directory must exist
+ * before running the sample.  To recreate the sample database from scratch,
+ * delete all files in the home directory before running the sample.  </p>
+ *
+ * @author Mark Hayes
+ */
+public class Sample {
+
+    private SampleDatabase db;
+    private SampleViews views;
+
+    /**
+     * Run the sample program.
+     */
+    public static void main(String[] args) {
+
+        System.out.println("\nRunning sample: " + Sample.class);
+
+        // Parse the command line arguments.
+        //
+        String homeDir = "./tmp";
+        for (int i = 0; i < args.length; i += 1) {
+            if (args[i].equals("-h") && i < args.length - 1) {
+                i += 1;
+                homeDir = args[i];
+            } else {
+                System.err.println("Usage:\n java " + Sample.class.getName() +
+				   "\n  [-h <home-directory>]");
+                System.exit(2);
+            }
+        }
+
+        // Run the sample.
+        //
+        Sample sample = null;
+        try {
+            sample = new Sample(homeDir);
+            sample.run();
+        } catch (Exception e) {
+            // If an exception reaches this point, the last transaction did not
+            // complete.  If the exception is RunRecoveryException, follow
+            // the Berkeley DB recovery procedures before running again.
+            e.printStackTrace();
+        } finally {
+            if (sample != null) {
+                try {
+                    // Always attempt to close the database cleanly.
+                    sample.close();
+                } catch (Exception e) {
+                    System.err.println("Exception during database close:");
+                    e.printStackTrace();
+                }
+            }
+        }
+    }
+
+    /**
+     * Open the database and views.
+     */
+    private Sample(String homeDir)
+        throws DatabaseException, FileNotFoundException {
+
+        db = new SampleDatabase(homeDir);
+        views = new SampleViews(db);
+    }
+
+    /**
+     * Close the database cleanly.
+     */
+    private void close()
+        throws DatabaseException {
+
+        db.close();
+    }
+
+    /**
+     * Run two transactions to populate and print the database.  A
+     * TransactionRunner is used to ensure consistent handling of transactions,
+     * including deadlock retries.  But the best transaction handling mechanism
+     * to use depends on the application.
+     */
+    private void run()
+        throws Exception {
+
+        TransactionRunner runner = new TransactionRunner(db.getEnvironment());
+        runner.run(new PopulateDatabase());
+        runner.run(new PrintDatabase());
+    }
+
+    /**
+     * Populate the database in a single transaction.
+     */
+    private class PopulateDatabase implements TransactionWorker {
+
+        public void doWork()
+            throws Exception {
+            addSuppliers();
+            addParts();
+            addShipments();
+        }
+    }
+
+    /**
+     * Print the database in a single transaction.  All entities are printed
+     * and the indices are used to print the entities for certain keys.
+     *
+     * <p> Note the use of special iterator() methods.  These are used here
+     * with indices to find the shipments for certain keys.</p>
+     */
+    private class PrintDatabase implements TransactionWorker {
+
+        public void doWork()
+            throws Exception {
+            printEntries("Parts",
+			 views.getPartEntrySet().iterator());
+            printEntries("Suppliers",
+			 views.getSupplierEntrySet().iterator());
+            printValues("Suppliers for City Paris",
+			views.getSupplierByCityMap().duplicates(
+							"Paris").iterator());
+            printEntries("Shipments",
+			 views.getShipmentEntrySet().iterator());
+            printValues("Shipments for Part P1",
+			views.getShipmentByPartMap().duplicates(
+						new PartKey("P1")).iterator());
+            printValues("Shipments for Supplier S1",
+			views.getShipmentBySupplierMap().duplicates(
+					    new SupplierKey("S1")).iterator());
+        }
+    }
+
+    /**
+     * Populate the part entities in the database.  If the part map is not
+     * empty, assume that this has already been done.
+     */
+    private void addParts() {
+
+        Map parts = views.getPartMap();
+        if (parts.isEmpty()) {
+            System.out.println("Adding Parts");
+            parts.put(new PartKey("P1"),
+                      new PartData("Nut", "Red",
+                                    new Weight(12.0, Weight.GRAMS),
+                                    "London"));
+            parts.put(new PartKey("P2"),
+                      new PartData("Bolt", "Green",
+                                    new Weight(17.0, Weight.GRAMS),
+                                    "Paris"));
+            parts.put(new PartKey("P3"),
+                      new PartData("Screw", "Blue",
+                                    new Weight(17.0, Weight.GRAMS),
+                                    "Rome"));
+            parts.put(new PartKey("P4"),
+                      new PartData("Screw", "Red",
+                                    new Weight(14.0, Weight.GRAMS),
+                                    "London"));
+            parts.put(new PartKey("P5"),
+                      new PartData("Cam", "Blue",
+                                    new Weight(12.0, Weight.GRAMS),
+                                    "Paris"));
+            parts.put(new PartKey("P6"),
+                      new PartData("Cog", "Red",
+                                    new Weight(19.0, Weight.GRAMS),
+                                    "London"));
+        }
+    }
+
+    /**
+     * Populate the supplier entities in the database.  If the supplier map is
+     * not empty, assume that this has already been done.
+     */
+    private void addSuppliers() {
+
+        Map suppliers = views.getSupplierMap();
+        if (suppliers.isEmpty()) {
+            System.out.println("Adding Suppliers");
+            suppliers.put(new SupplierKey("S1"),
+                          new SupplierData("Smith", 20, "London"));
+            suppliers.put(new SupplierKey("S2"),
+                          new SupplierData("Jones", 10, "Paris"));
+            suppliers.put(new SupplierKey("S3"),
+                          new SupplierData("Blake", 30, "Paris"));
+            suppliers.put(new SupplierKey("S4"),
+                          new SupplierData("Clark", 20, "London"));
+            suppliers.put(new SupplierKey("S5"),
+                          new SupplierData("Adams", 30, "Athens"));
+        }
+    }
+
+    /**
+     * Populate the shipment entities in the database.  If the shipment map
+     * is not empty, assume that this has already been done.
+     */
+    private void addShipments() {
+
+        Map shipments = views.getShipmentMap();
+        if (shipments.isEmpty()) {
+            System.out.println("Adding Shipments");
+            shipments.put(new ShipmentKey("P1", "S1"),
+                          new ShipmentData(300));
+            shipments.put(new ShipmentKey("P2", "S1"),
+                          new ShipmentData(200));
+            shipments.put(new ShipmentKey("P3", "S1"),
+                          new ShipmentData(400));
+            shipments.put(new ShipmentKey("P4", "S1"),
+                          new ShipmentData(200));
+            shipments.put(new ShipmentKey("P5", "S1"),
+                          new ShipmentData(100));
+            shipments.put(new ShipmentKey("P6", "S1"),
+                          new ShipmentData(100));
+            shipments.put(new ShipmentKey("P1", "S2"),
+                          new ShipmentData(300));
+            shipments.put(new ShipmentKey("P2", "S2"),
+                          new ShipmentData(400));
+            shipments.put(new ShipmentKey("P2", "S3"),
+                          new ShipmentData(200));
+            shipments.put(new ShipmentKey("P2", "S4"),
+                          new ShipmentData(200));
+            shipments.put(new ShipmentKey("P4", "S4"),
+                          new ShipmentData(300));
+            shipments.put(new ShipmentKey("P5", "S4"),
+                          new ShipmentData(400));
+        }
+    }
+
+    /**
+     * Print the key/value objects returned by an iterator of Map.Entry
+     * objects.
+     */
+    private void printEntries(String label, Iterator iterator) {
+
+        System.out.println("\n--- " + label + " ---");
+        while (iterator.hasNext()) {
+            Map.Entry entry = (Map.Entry) iterator.next();
+            System.out.println(entry.getKey().toString());
+            System.out.println(entry.getValue().toString());
+        }
+    }
+
+    /**
+     * Print the objects returned by an iterator of value objects.
+     */
+    private void printValues(String label, Iterator iterator) {
+
+        System.out.println("\n--- " + label + " ---");
+        while (iterator.hasNext()) {
+            System.out.println(iterator.next().toString());
+        }
+    }
+}
diff --git a/examples/collections/ship/index/SampleDatabase.java b/examples/collections/ship/index/SampleDatabase.java
new file mode 100644
index 0000000000000000000000000000000000000000..48315e0eb073264bc072cec5d14df17a24bd3482
--- /dev/null
+++ b/examples/collections/ship/index/SampleDatabase.java
@@ -0,0 +1,322 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SampleDatabase.java,v 1.29.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.index;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+
+import com.sleepycat.bind.serial.ClassCatalog;
+import com.sleepycat.bind.serial.SerialSerialKeyCreator;
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.ForeignKeyDeleteAction;
+import com.sleepycat.je.SecondaryConfig;
+import com.sleepycat.je.SecondaryDatabase;
+
+/**
+ * SampleDatabase defines the storage containers, indices and foreign keys
+ * for the sample database.
+ *
+ * @author Mark Hayes
+ */
+public class SampleDatabase {
+
+    private static final String CLASS_CATALOG = "java_class_catalog";
+    private static final String SUPPLIER_STORE = "supplier_store";
+    private static final String PART_STORE = "part_store";
+    private static final String SHIPMENT_STORE = "shipment_store";
+    private static final String SHIPMENT_PART_INDEX = "shipment_part_index";
+    private static final String SHIPMENT_SUPPLIER_INDEX =
+	"shipment_supplier_index";
+    private static final String SUPPLIER_CITY_INDEX = "supplier_city_index";
+
+    private Environment env;
+    private Database partDb;
+    private Database supplierDb;
+    private Database shipmentDb;
+    private SecondaryDatabase supplierByCityDb;
+    private SecondaryDatabase shipmentByPartDb;
+    private SecondaryDatabase shipmentBySupplierDb;
+    private StoredClassCatalog javaCatalog;
+
+    /**
+     * Open all storage containers, indices, and catalogs.
+     */
+    public SampleDatabase(String homeDirectory)
+        throws DatabaseException, FileNotFoundException {
+
+        // Open the Berkeley DB environment in transactional mode.
+        //
+        System.out.println("Opening environment in: " + homeDirectory);
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+        env = new Environment(new File(homeDirectory), envConfig);
+
+        // Set the Berkeley DB config for opening all stores.
+        //
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+
+        // Create the Serial class catalog.  This holds the serialized class
+        // format for all database records of serial format.
+        //
+        Database catalogDb = env.openDatabase(null, CLASS_CATALOG, dbConfig);
+        javaCatalog = new StoredClassCatalog(catalogDb);
+
+        // Open the Berkeley DB database for the part, supplier and shipment
+        // stores.  The stores are opened with no duplicate keys allowed.
+        //
+        partDb = env.openDatabase(null, PART_STORE, dbConfig);
+
+        supplierDb = env.openDatabase(null, SUPPLIER_STORE, dbConfig);
+
+        shipmentDb = env.openDatabase(null, SHIPMENT_STORE, dbConfig);
+
+        // Open the SecondaryDatabase for the city index of the supplier store,
+        // and for the part and supplier indices of the shipment store.
+        // Duplicate keys are allowed since more than one supplier may be in
+        // the same city, and more than one shipment may exist for the same
+        // supplier or part.  A foreign key constraint is defined for the
+        // supplier and part indices to ensure that a shipment only refers to
+        // existing part and supplier keys.  The CASCADE delete action means
+        // that shipments will be deleted if their associated part or supplier
+        // is deleted.
+        //
+        SecondaryConfig secConfig = new SecondaryConfig();
+        secConfig.setTransactional(true);
+        secConfig.setAllowCreate(true);
+        secConfig.setSortedDuplicates(true);
+
+        secConfig.setKeyCreator(
+            new SupplierByCityKeyCreator(javaCatalog,
+                                         SupplierKey.class,
+                                         SupplierData.class,
+                                         String.class));
+        supplierByCityDb = env.openSecondaryDatabase(null, SUPPLIER_CITY_INDEX,
+                                                     supplierDb, secConfig);
+
+        secConfig.setForeignKeyDatabase(partDb);
+        secConfig.setForeignKeyDeleteAction(ForeignKeyDeleteAction.CASCADE);
+        secConfig.setKeyCreator(
+            new ShipmentByPartKeyCreator(javaCatalog,
+                                         ShipmentKey.class,
+                                         ShipmentData.class,
+                                         PartKey.class));
+        shipmentByPartDb = env.openSecondaryDatabase(null, SHIPMENT_PART_INDEX,
+                                                     shipmentDb, secConfig);
+
+        secConfig.setForeignKeyDatabase(supplierDb);
+        secConfig.setForeignKeyDeleteAction(ForeignKeyDeleteAction.CASCADE);
+        secConfig.setKeyCreator(
+            new ShipmentBySupplierKeyCreator(javaCatalog,
+                                             ShipmentKey.class,
+                                             ShipmentData.class,
+                                             SupplierKey.class));
+        shipmentBySupplierDb = env.openSecondaryDatabase(null,
+                                                     SHIPMENT_SUPPLIER_INDEX,
+                                                     shipmentDb, secConfig);
+    }
+
+    /**
+     * Return the storage environment for the database.
+     */
+    public final Environment getEnvironment() {
+
+        return env;
+    }
+
+    /**
+     * Return the class catalog.
+     */
+    public final StoredClassCatalog getClassCatalog() {
+
+        return javaCatalog;
+    }
+
+    /**
+     * Return the part storage container.
+     */
+    public final Database getPartDatabase() {
+
+        return partDb;
+    }
+
+    /**
+     * Return the supplier storage container.
+     */
+    public final Database getSupplierDatabase() {
+
+        return supplierDb;
+    }
+
+    /**
+     * Return the shipment storage container.
+     */
+    public final Database getShipmentDatabase() {
+
+        return shipmentDb;
+    }
+
+    /**
+     * Return the shipment-by-part index.
+     */
+    public final SecondaryDatabase getShipmentByPartDatabase() {
+
+        return shipmentByPartDb;
+    }
+
+    /**
+     * Return the shipment-by-supplier index.
+     */
+    public final SecondaryDatabase getShipmentBySupplierDatabase() {
+
+        return shipmentBySupplierDb;
+    }
+
+    /**
+     * Return the supplier-by-city index.
+     */
+    public final SecondaryDatabase getSupplierByCityDatabase() {
+
+        return supplierByCityDb;
+    }
+
+    /**
+     * Close all stores (closing a store automatically closes its indices).
+     */
+    public void close()
+        throws DatabaseException {
+
+        // Close secondary databases, then primary databases.
+        supplierByCityDb.close();
+        shipmentByPartDb.close();
+        shipmentBySupplierDb.close();
+        partDb.close();
+        supplierDb.close();
+        shipmentDb.close();
+        // And don't forget to close the catalog and the environment.
+        javaCatalog.close();
+        env.close();
+    }
+
+    /**
+     * The SecondaryKeyCreator for the SupplierByCity index.  This is an
+     * extension of the abstract class SerialSerialKeyCreator, which implements
+     * SecondaryKeyCreator for the case where the data keys and value are all
+     * of the serial format.
+     */
+    private static class SupplierByCityKeyCreator
+        extends SerialSerialKeyCreator {
+
+        /**
+         * Construct the city key extractor.
+         * @param catalog is the class catalog.
+         * @param primaryKeyClass is the supplier key class.
+         * @param valueClass is the supplier value class.
+         * @param indexKeyClass is the city key class.
+         */
+        private SupplierByCityKeyCreator(ClassCatalog catalog,
+                                         Class primaryKeyClass,
+                                         Class valueClass,
+                                         Class indexKeyClass) {
+
+            super(catalog, primaryKeyClass, valueClass, indexKeyClass);
+        }
+
+        /**
+         * Extract the city key from a supplier key/value pair.  The city key
+         * is stored in the supplier value, so the supplier key is not used.
+         */
+        public Object createSecondaryKey(Object primaryKeyInput,
+                                         Object valueInput) {
+
+            SupplierData supplierData = (SupplierData) valueInput;
+            return supplierData.getCity();
+        }
+    }
+
+    /**
+     * The SecondaryKeyCreator for the ShipmentByPart index.  This is an
+     * extension of the abstract class SerialSerialKeyCreator, which implements
+     * SecondaryKeyCreator for the case where the data keys and value are all
+     * of the serial format.
+     */
+    private static class ShipmentByPartKeyCreator
+        extends SerialSerialKeyCreator {
+
+        /**
+         * Construct the part key extractor.
+         * @param catalog is the class catalog.
+         * @param primaryKeyClass is the shipment key class.
+         * @param valueClass is the shipment value class.
+         * @param indexKeyClass is the part key class.
+         */
+        private ShipmentByPartKeyCreator(ClassCatalog catalog,
+                                         Class primaryKeyClass,
+                                         Class valueClass,
+                                         Class indexKeyClass) {
+
+            super(catalog, primaryKeyClass, valueClass, indexKeyClass);
+        }
+
+        /**
+         * Extract the part key from a shipment key/value pair.  The part key
+         * is stored in the shipment key, so the shipment value is not used.
+         */
+        public Object createSecondaryKey(Object primaryKeyInput,
+                                         Object valueInput) {
+
+            ShipmentKey shipmentKey = (ShipmentKey) primaryKeyInput;
+            return new PartKey(shipmentKey.getPartNumber());
+        }
+    }
+
+    /**
+     * The SecondaryKeyCreator for the ShipmentBySupplier index.  This is an
+     * extension of the abstract class SerialSerialKeyCreator, which implements
+     * SecondaryKeyCreator for the case where the data keys and value are all
+     * of the serial format.
+     */
+    private static class ShipmentBySupplierKeyCreator
+        extends SerialSerialKeyCreator {
+
+        /**
+         * Construct the supplier key extractor.
+         * @param catalog is the class catalog.
+         * @param primaryKeyClass is the shipment key class.
+         * @param valueClass is the shipment value class.
+         * @param indexKeyClass is the supplier key class.
+         */
+        private ShipmentBySupplierKeyCreator(ClassCatalog catalog,
+                                             Class primaryKeyClass,
+                                             Class valueClass,
+                                             Class indexKeyClass) {
+
+            super(catalog, primaryKeyClass, valueClass, indexKeyClass);
+        }
+
+        /**
+         * Extract the supplier key from a shipment key/value pair.  The part
+         * key is stored in the shipment key, so the shipment value is not
+         * used.
+         */
+        public Object createSecondaryKey(Object primaryKeyInput,
+                                         Object valueInput) {
+
+            ShipmentKey shipmentKey = (ShipmentKey) primaryKeyInput;
+            return new SupplierKey(shipmentKey.getSupplierNumber());
+        }
+    }
+}
diff --git a/examples/collections/ship/index/SampleViews.java b/examples/collections/ship/index/SampleViews.java
new file mode 100644
index 0000000000000000000000000000000000000000..279f2fbf8753eec6bcb316c0725854302f32aa5a
--- /dev/null
+++ b/examples/collections/ship/index/SampleViews.java
@@ -0,0 +1,161 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SampleViews.java,v 1.17.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.index;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.serial.ClassCatalog;
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.collections.StoredEntrySet;
+import com.sleepycat.collections.StoredSortedMap;
+
+/**
+ * SampleViews defines the data bindings and collection views for the sample
+ * database.
+ *
+ * @author Mark Hayes
+ */
+public class SampleViews {
+
+    private StoredSortedMap partMap;
+    private StoredSortedMap supplierMap;
+    private StoredSortedMap shipmentMap;
+    private StoredSortedMap shipmentByPartMap;
+    private StoredSortedMap shipmentBySupplierMap;
+    private StoredSortedMap supplierByCityMap;
+
+    /**
+     * Create the data bindings and collection views.
+     */
+    public SampleViews(SampleDatabase db) {
+
+        // Create the data bindings.
+        // In this sample, the stored key and data entries are used directly
+        // rather than mapping them to separate objects. Therefore, no binding
+        // classes are defined here and the SerialBinding class is used.
+        //
+        ClassCatalog catalog = db.getClassCatalog();
+        EntryBinding partKeyBinding =
+            new SerialBinding(catalog, PartKey.class);
+        EntryBinding partDataBinding =
+            new SerialBinding(catalog, PartData.class);
+        EntryBinding supplierKeyBinding =
+            new SerialBinding(catalog, SupplierKey.class);
+        EntryBinding supplierDataBinding =
+            new SerialBinding(catalog, SupplierData.class);
+        EntryBinding shipmentKeyBinding =
+            new SerialBinding(catalog, ShipmentKey.class);
+        EntryBinding shipmentDataBinding =
+            new SerialBinding(catalog, ShipmentData.class);
+        EntryBinding cityKeyBinding =
+            new SerialBinding(catalog, String.class);
+
+        // Create map views for all stores and indices.
+        // StoredSortedMap is not used since the stores and indices are
+        // ordered by serialized key objects, which do not provide a very
+        // useful ordering.
+        //
+        partMap =
+            new StoredSortedMap(db.getPartDatabase(),
+                                partKeyBinding, partDataBinding, true);
+        supplierMap =
+            new StoredSortedMap(db.getSupplierDatabase(),
+                                supplierKeyBinding, supplierDataBinding, true);
+        shipmentMap =
+            new StoredSortedMap(db.getShipmentDatabase(),
+                                shipmentKeyBinding, shipmentDataBinding, true);
+        shipmentByPartMap =
+            new StoredSortedMap(db.getShipmentByPartDatabase(),
+                                partKeyBinding, shipmentDataBinding, true);
+        shipmentBySupplierMap =
+            new StoredSortedMap(db.getShipmentBySupplierDatabase(),
+                                supplierKeyBinding, shipmentDataBinding, true);
+        supplierByCityMap =
+            new StoredSortedMap(db.getSupplierByCityDatabase(),
+                                cityKeyBinding, supplierDataBinding, true);
+    }
+
+    // The views returned below can be accessed using the java.util.Map or
+    // java.util.Set interfaces, or using the StoredSortedMap and
+    // StoredEntrySet classes, which provide additional methods.  The entry
+    // sets could be obtained directly from the Map.entrySet() method, but
+    // convenience methods are provided here to return them in order to avoid
+    // down-casting elsewhere.
+
+    /**
+     * Return a map view of the part storage container.
+     */
+    public final StoredSortedMap getPartMap() {
+
+        return partMap;
+    }
+
+    /**
+     * Return a map view of the supplier storage container.
+     */
+    public final StoredSortedMap getSupplierMap() {
+
+        return supplierMap;
+    }
+
+    /**
+     * Return a map view of the shipment storage container.
+     */
+    public final StoredSortedMap getShipmentMap() {
+
+        return shipmentMap;
+    }
+
+    /**
+     * Return an entry set view of the part storage container.
+     */
+    public final StoredEntrySet getPartEntrySet() {
+
+        return (StoredEntrySet) partMap.entrySet();
+    }
+
+    /**
+     * Return an entry set view of the supplier storage container.
+     */
+    public final StoredEntrySet getSupplierEntrySet() {
+
+        return (StoredEntrySet) supplierMap.entrySet();
+    }
+
+    /**
+     * Return an entry set view of the shipment storage container.
+     */
+    public final StoredEntrySet getShipmentEntrySet() {
+
+        return (StoredEntrySet) shipmentMap.entrySet();
+    }
+
+    /**
+     * Return a map view of the shipment-by-part index.
+     */
+    public StoredSortedMap getShipmentByPartMap() {
+
+        return shipmentByPartMap;
+    }
+
+    /**
+     * Return a map view of the shipment-by-supplier index.
+     */
+    public StoredSortedMap getShipmentBySupplierMap() {
+
+        return shipmentBySupplierMap;
+    }
+
+    /**
+     * Return a map view of the supplier-by-city index.
+     */
+    public final StoredSortedMap getSupplierByCityMap() {
+
+        return supplierByCityMap;
+    }
+}
diff --git a/examples/collections/ship/index/ShipmentData.java b/examples/collections/ship/index/ShipmentData.java
new file mode 100644
index 0000000000000000000000000000000000000000..cd7c23298722097b4bdf7a026980d7c9fdac373f
--- /dev/null
+++ b/examples/collections/ship/index/ShipmentData.java
@@ -0,0 +1,41 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ShipmentData.java,v 1.14.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.index;
+
+import java.io.Serializable;
+
+/**
+ * A ShipmentData serves as the data in the key/data pair for a shipment
+ * entity.
+ *
+ * <p> In this sample, ShipmentData is used both as the storage data for the
+ * data as well as the object binding to the data.  Because it is used
+ * directly as storage data using serial format, it must be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class ShipmentData implements Serializable {
+
+    private int quantity;
+
+    public ShipmentData(int quantity) {
+
+        this.quantity = quantity;
+    }
+
+    public final int getQuantity() {
+
+        return quantity;
+    }
+
+    public String toString() {
+
+        return "[ShipmentData: quantity=" + quantity + ']';
+    }
+}
diff --git a/examples/collections/ship/index/ShipmentKey.java b/examples/collections/ship/index/ShipmentKey.java
new file mode 100644
index 0000000000000000000000000000000000000000..0031a22d4ee6feeb18ef360bfb9555026bb6807c
--- /dev/null
+++ b/examples/collections/ship/index/ShipmentKey.java
@@ -0,0 +1,48 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ShipmentKey.java,v 1.14.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.index;
+
+import java.io.Serializable;
+
+/**
+ * A ShipmentKey serves as the key in the key/data pair for a shipment entity.
+ *
+ * <p> In this sample, ShipmentKey is used both as the storage data for the key
+ * as well as the object binding to the key.  Because it is used directly as
+ * storage data using serial format, it must be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class ShipmentKey implements Serializable {
+
+    private String partNumber;
+    private String supplierNumber;
+
+    public ShipmentKey(String partNumber, String supplierNumber) {
+
+        this.partNumber = partNumber;
+        this.supplierNumber = supplierNumber;
+    }
+
+    public final String getPartNumber() {
+
+        return partNumber;
+    }
+
+    public final String getSupplierNumber() {
+
+        return supplierNumber;
+    }
+
+    public String toString() {
+
+        return "[ShipmentKey: supplier=" + supplierNumber +
+	    " part=" + partNumber + ']';
+    }
+}
diff --git a/examples/collections/ship/index/SupplierData.java b/examples/collections/ship/index/SupplierData.java
new file mode 100644
index 0000000000000000000000000000000000000000..80e15c47be84715a3bb32837e780129d1b8eed39
--- /dev/null
+++ b/examples/collections/ship/index/SupplierData.java
@@ -0,0 +1,57 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SupplierData.java,v 1.15.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.index;
+
+import java.io.Serializable;
+
+/**
+ * A SupplierData serves as the data in the key/data pair for a supplier
+ * entity.
+ *
+ * <p> In this sample, SupplierData is used both as the storage data for the
+ * data as well as the object binding to the data.  Because it is used
+ * directly as storage data using serial format, it must be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class SupplierData implements Serializable {
+
+    private String name;
+    private int status;
+    private String city;
+
+    public SupplierData(String name, int status, String city) {
+
+        this.name = name;
+        this.status = status;
+        this.city = city;
+    }
+
+    public final String getName() {
+
+        return name;
+    }
+
+    public final int getStatus() {
+
+        return status;
+    }
+
+    public final String getCity() {
+
+        return city;
+    }
+
+    public String toString() {
+
+        return "[SupplierData: name=" + name +
+	    " status=" + status +
+	    " city=" + city + ']';
+    }
+}
diff --git a/examples/collections/ship/index/SupplierKey.java b/examples/collections/ship/index/SupplierKey.java
new file mode 100644
index 0000000000000000000000000000000000000000..f1ad226e53c81e325b46114d782f7777de42d883
--- /dev/null
+++ b/examples/collections/ship/index/SupplierKey.java
@@ -0,0 +1,40 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SupplierKey.java,v 1.13.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.index;
+
+import java.io.Serializable;
+
+/**
+ * A SupplierKey serves as the key in the key/data pair for a supplier entity.
+ *
+ * <p> In this sample, SupplierKey is used both as the storage data for the key
+ * as well as the object binding to the key.  Because it is used directly as
+ * storage data using serial format, it must be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class SupplierKey implements Serializable {
+
+    private String number;
+
+    public SupplierKey(String number) {
+
+        this.number = number;
+    }
+
+    public final String getNumber() {
+
+        return number;
+    }
+
+    public String toString() {
+
+        return "[SupplierKey: number=" + number + ']';
+    }
+}
diff --git a/examples/collections/ship/index/Weight.java b/examples/collections/ship/index/Weight.java
new file mode 100644
index 0000000000000000000000000000000000000000..0ba116535e62537e31c465a3cc9dfac6fb26fc91
--- /dev/null
+++ b/examples/collections/ship/index/Weight.java
@@ -0,0 +1,49 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Weight.java,v 1.11.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.index;
+
+import java.io.Serializable;
+
+/**
+ * Weight represents a weight amount and unit of measure.
+ *
+ * <p> In this sample, Weight is embedded in part data values which are stored
+ * as Serial serialized objects; therefore Weight must be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class Weight implements Serializable {
+
+    public final static String GRAMS = "grams";
+    public final static String OUNCES = "ounces";
+
+    private double amount;
+    private String units;
+
+    public Weight(double amount, String units) {
+
+        this.amount = amount;
+        this.units = units;
+    }
+
+    public final double getAmount() {
+
+        return amount;
+    }
+
+    public final String getUnits() {
+
+        return units;
+    }
+
+    public String toString() {
+
+        return "[" + amount + ' ' + units + ']';
+    }
+}
diff --git a/examples/collections/ship/marshal/MarshalledEntity.java b/examples/collections/ship/marshal/MarshalledEntity.java
new file mode 100644
index 0000000000000000000000000000000000000000..b6bb17a9d0ae816b54b7a3611cd7dbd7ed446434
--- /dev/null
+++ b/examples/collections/ship/marshal/MarshalledEntity.java
@@ -0,0 +1,42 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: MarshalledEntity.java,v 1.17.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.marshal;
+
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+
+/**
+ * MarshalledEntity is implemented by entity (combined key/data) objects and
+ * called by {@link SampleViews.MarshalledEntityBinding}.  In this sample,
+ * MarshalledEntity is implemented by {@link Part}, {@link Supplier}, and
+ * {@link Shipment}.  This interface is package-protected rather than public
+ * to hide the marshalling interface from other users of the data objects.
+ * Note that a MarshalledEntity must also have a no arguments constructor so
+ * that it can be instantiated by the binding.
+ *
+ * @author Mark Hayes
+ */
+interface MarshalledEntity {
+
+    /**
+     * Extracts the entity's primary key and writes it to the key output.
+     */
+    void marshalPrimaryKey(TupleOutput keyOutput);
+
+    /**
+     * Completes construction of the entity by setting its primary key from the
+     * stored primary key.
+     */
+    void unmarshalPrimaryKey(TupleInput keyInput);
+
+    /**
+     * Extracts the entity's index key and writes it to the key output.
+     */
+    boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput);
+}
diff --git a/examples/collections/ship/marshal/MarshalledKey.java b/examples/collections/ship/marshal/MarshalledKey.java
new file mode 100644
index 0000000000000000000000000000000000000000..156d3c440183d492b9f49f783fff4bc6290de894
--- /dev/null
+++ b/examples/collections/ship/marshal/MarshalledKey.java
@@ -0,0 +1,36 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: MarshalledKey.java,v 1.15.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.marshal;
+
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+
+/**
+ * MarshalledKey is implemented by key objects and called by {@link
+ * SampleViews.MarshalledKeyBinding}.  In this sample, MarshalledKey is
+ * implemented by {@link PartKey}, {@link SupplierKey}, and {@link
+ * ShipmentKey}.  This interface is package-protected rather than public to
+ * hide the marshalling interface from other users of the data objects.  Note
+ * that a MarshalledKey must also have a no arguments constructor so
+ * that it can be instantiated by the binding.
+ *
+ * @author Mark Hayes
+ */
+interface MarshalledKey {
+
+    /**
+     * Construct the key tuple entry from the key object.
+     */
+    void marshalKey(TupleOutput keyOutput);
+
+    /**
+     * Construct the key object from the key tuple entry.
+     */
+    void unmarshalKey(TupleInput keyInput);
+}
diff --git a/examples/collections/ship/marshal/Part.java b/examples/collections/ship/marshal/Part.java
new file mode 100644
index 0000000000000000000000000000000000000000..c461b980cd41752b3813ef9a8265f4b93cc838f6
--- /dev/null
+++ b/examples/collections/ship/marshal/Part.java
@@ -0,0 +1,116 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Part.java,v 1.17.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.marshal;
+
+import java.io.Serializable;
+
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+
+/**
+ * A Part represents the combined key/data pair for a part entity.
+ *
+ * <p> In this sample, Part is bound to the stored key/data entry by
+ * implementing the MarshalledEntity interface, which is called by {@link
+ * SampleViews.MarshalledEntityBinding}. </p>
+ *
+ * <p> The binding is "tricky" in that it uses this class for both the stored
+ * data entry and the combined entity object.  To do this, the key field(s) are
+ * transient and are set by the binding after the data object has been
+ * deserialized. This avoids the use of a PartData class completely. </p>
+ *
+ * <p> Since this class is used directly for data storage, it must be
+ * Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class Part implements Serializable, MarshalledEntity {
+
+    private transient String number;
+    private String name;
+    private String color;
+    private Weight weight;
+    private String city;
+
+    public Part(String number, String name, String color, Weight weight,
+                String city) {
+
+        this.number = number;
+        this.name = name;
+        this.color = color;
+        this.weight = weight;
+        this.city = city;
+    }
+
+    /**
+     * Set the transient key fields after deserializing.  This method is only
+     * called by data bindings.
+     */
+    final void setKey(String number) {
+
+        this.number = number;
+    }
+
+    public final String getNumber() {
+
+        return number;
+    }
+
+    public final String getName() {
+
+        return name;
+    }
+
+    public final String getColor() {
+
+        return color;
+    }
+
+    public final Weight getWeight() {
+
+        return weight;
+    }
+
+    public final String getCity() {
+
+        return city;
+    }
+
+    public String toString() {
+
+        return "[Part: number=" + number +
+               " name=" + name +
+               " color=" + color +
+               " weight=" + weight +
+               " city=" + city + ']';
+    }
+
+    // --- MarshalledEntity implementation ---
+
+    Part() {
+
+        // A no-argument constructor is necessary only to allow the binding to
+        // instantiate objects of this class.
+    }
+
+    public void unmarshalPrimaryKey(TupleInput keyInput) {
+
+        this.number = keyInput.readString();
+    }
+
+    public void marshalPrimaryKey(TupleOutput keyOutput) {
+
+        keyOutput.writeString(this.number);
+    }
+
+    public boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput) {
+
+        throw new UnsupportedOperationException(keyName);
+    }
+}
diff --git a/examples/collections/ship/marshal/PartKey.java b/examples/collections/ship/marshal/PartKey.java
new file mode 100644
index 0000000000000000000000000000000000000000..c536354ec30e5d753b8fff094fe7418665f97f92
--- /dev/null
+++ b/examples/collections/ship/marshal/PartKey.java
@@ -0,0 +1,59 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PartKey.java,v 1.14.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.marshal;
+
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+
+/**
+ * A PartKey serves as the key in the key/data pair for a part entity.
+ *
+ * <p> In this sample, PartKey is bound to the stored key tuple entry by
+ * implementing the MarshalledKey interface, which is called by {@link
+ * SampleViews.MarshalledKeyBinding}. </p>
+ *
+ * @author Mark Hayes
+ */
+public class PartKey implements MarshalledKey {
+
+    private String number;
+
+    public PartKey(String number) {
+
+        this.number = number;
+    }
+
+    public final String getNumber() {
+
+        return number;
+    }
+
+    public String toString() {
+
+        return "[PartKey: number=" + number + ']';
+    }
+
+    // --- MarshalledKey implementation ---
+
+    PartKey() {
+
+        // A no-argument constructor is necessary only to allow the binding to
+        // instantiate objects of this class.
+    }
+
+    public void unmarshalKey(TupleInput keyInput) {
+
+        this.number = keyInput.readString();
+    }
+
+    public void marshalKey(TupleOutput keyOutput) {
+
+        keyOutput.writeString(this.number);
+    }
+}
diff --git a/examples/collections/ship/marshal/Sample.java b/examples/collections/ship/marshal/Sample.java
new file mode 100644
index 0000000000000000000000000000000000000000..cc91ba21926d68738e36c1a3992d07bc26c33468
--- /dev/null
+++ b/examples/collections/ship/marshal/Sample.java
@@ -0,0 +1,236 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Sample.java,v 1.20.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.marshal;
+
+import java.io.FileNotFoundException;
+import java.util.Iterator;
+import java.util.Set;
+
+import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+import com.sleepycat.je.DatabaseException;
+
+/**
+ * Sample is the main entry point for the sample program and may be run as
+ * follows:
+ *
+ * <pre>
+ * java collections.ship.marshal.Sample
+ *      [-h <home-directory> ]
+ * </pre>
+ *
+ * <p> The default for the home directory is ./tmp -- the tmp subdirectory of
+ * the current directory where the sample is run. To specify a different home
+ * directory, use the -home option. The home directory must exist before
+ * running the sample.  To recreate the sample database from scratch, delete
+ * all files in the home directory before running the sample. </p>
+ *
+ * @author Mark Hayes
+ */
+public class Sample {
+
+    private SampleDatabase db;
+    private SampleViews views;
+
+    /**
+     * Run the sample program.
+     */
+    public static void main(String[] args) {
+
+        System.out.println("\nRunning sample: " + Sample.class);
+
+        // Parse the command line arguments.
+        //
+        String homeDir = "./tmp";
+        for (int i = 0; i < args.length; i += 1) {
+            if (args[i].equals("-h") && i < args.length - 1) {
+                i += 1;
+                homeDir = args[i];
+            } else {
+                System.err.println("Usage:\n java " + Sample.class.getName() +
+				   "\n  [-h <home-directory>]");
+                System.exit(2);
+            }
+        }
+
+        // Run the sample.
+        //
+        Sample sample = null;
+        try {
+            sample = new Sample(homeDir);
+            sample.run();
+        } catch (Exception e) {
+            // If an exception reaches this point, the last transaction did not
+            // complete.  If the exception is RunRecoveryException, follow
+            // the Berkeley DB recovery procedures before running again.
+            e.printStackTrace();
+        } finally {
+            if (sample != null) {
+                try {
+                    // Always attempt to close the database cleanly.
+                    sample.close();
+                } catch (Exception e) {
+                    System.err.println("Exception during database close:");
+                    e.printStackTrace();
+                }
+            }
+        }
+    }
+
+    /**
+     * Open the database and views.
+     */
+    private Sample(String homeDir)
+        throws DatabaseException, FileNotFoundException {
+
+        db = new SampleDatabase(homeDir);
+        views = new SampleViews(db);
+    }
+
+    /**
+     * Close the database cleanly.
+     */
+    private void close()
+        throws DatabaseException {
+
+        db.close();
+    }
+
+    /**
+     * Run two transactions to populate and print the database.  A
+     * TransactionRunner is used to ensure consistent handling of transactions,
+     * including deadlock retries.  But the best transaction handling mechanism
+     * to use depends on the application.
+     */
+    private void run()
+        throws Exception {
+
+        TransactionRunner runner = new TransactionRunner(db.getEnvironment());
+        runner.run(new PopulateDatabase());
+        runner.run(new PrintDatabase());
+    }
+
+    /**
+     * Populate the database in a single transaction.
+     */
+    private class PopulateDatabase implements TransactionWorker {
+
+        public void doWork()
+            throws Exception {
+            addSuppliers();
+            addParts();
+            addShipments();
+        }
+    }
+
+    /**
+     * Print the database in a single transaction.  All entities are printed
+     * and the indices are used to print the entities for certain keys.
+     *
+     * <p> Note the use of special iterator() methods.  These are used here
+     * with indices to find the shipments for certain keys.</p>
+     */
+    private class PrintDatabase implements TransactionWorker {
+
+        public void doWork()
+            throws Exception {
+            printValues("Parts",
+			views.getPartSet().iterator());
+            printValues("Suppliers",
+			views.getSupplierSet().iterator());
+            printValues("Suppliers for City Paris",
+                        views.getSupplierByCityMap().duplicates(
+                                            "Paris").iterator());
+            printValues("Shipments",
+			views.getShipmentSet().iterator());
+            printValues("Shipments for Part P1",
+                        views.getShipmentByPartMap().duplicates(
+                                            new PartKey("P1")).iterator());
+            printValues("Shipments for Supplier S1",
+                        views.getShipmentBySupplierMap().duplicates(
+                                            new SupplierKey("S1")).iterator());
+        }
+    }
+
+    /**
+     * Populate the part entities in the database.  If the part set is not
+     * empty, assume that this has already been done.
+     */
+    private void addParts() {
+
+        Set parts = views.getPartSet();
+        if (parts.isEmpty()) {
+            System.out.println("Adding Parts");
+            parts.add(new Part("P1", "Nut", "Red",
+			       new Weight(12.0, Weight.GRAMS), "London"));
+            parts.add(new Part("P2", "Bolt", "Green",
+			       new Weight(17.0, Weight.GRAMS), "Paris"));
+            parts.add(new Part("P3", "Screw", "Blue",
+			       new Weight(17.0, Weight.GRAMS), "Rome"));
+            parts.add(new Part("P4", "Screw", "Red",
+			       new Weight(14.0, Weight.GRAMS), "London"));
+            parts.add(new Part("P5", "Cam", "Blue",
+			       new Weight(12.0, Weight.GRAMS), "Paris"));
+            parts.add(new Part("P6", "Cog", "Red",
+			       new Weight(19.0, Weight.GRAMS), "London"));
+        }
+    }
+
+    /**
+     * Populate the supplier entities in the database.  If the supplier set is
+     * not empty, assume that this has already been done.
+     */
+    private void addSuppliers() {
+
+        Set suppliers = views.getSupplierSet();
+        if (suppliers.isEmpty()) {
+            System.out.println("Adding Suppliers");
+            suppliers.add(new Supplier("S1", "Smith", 20, "London"));
+            suppliers.add(new Supplier("S2", "Jones", 10, "Paris"));
+            suppliers.add(new Supplier("S3", "Blake", 30, "Paris"));
+            suppliers.add(new Supplier("S4", "Clark", 20, "London"));
+            suppliers.add(new Supplier("S5", "Adams", 30, "Athens"));
+        }
+    }
+
+    /**
+     * Populate the shipment entities in the database.  If the shipment set
+     * is not empty, assume that this has already been done.
+     */
+    private void addShipments() {
+
+        Set shipments = views.getShipmentSet();
+        if (shipments.isEmpty()) {
+            System.out.println("Adding Shipments");
+            shipments.add(new Shipment("P1", "S1", 300));
+            shipments.add(new Shipment("P2", "S1", 200));
+            shipments.add(new Shipment("P3", "S1", 400));
+            shipments.add(new Shipment("P4", "S1", 200));
+            shipments.add(new Shipment("P5", "S1", 100));
+            shipments.add(new Shipment("P6", "S1", 100));
+            shipments.add(new Shipment("P1", "S2", 300));
+            shipments.add(new Shipment("P2", "S2", 400));
+            shipments.add(new Shipment("P2", "S3", 200));
+            shipments.add(new Shipment("P2", "S4", 200));
+            shipments.add(new Shipment("P4", "S4", 300));
+            shipments.add(new Shipment("P5", "S4", 400));
+        }
+    }
+
+    /**
+     * Print the objects returned by an iterator of entity value objects.
+     */
+    private void printValues(String label, Iterator iterator) {
+
+        System.out.println("\n--- " + label + " ---");
+        while (iterator.hasNext()) {
+            System.out.println(iterator.next().toString());
+        }
+    }
+}
diff --git a/examples/collections/ship/marshal/SampleDatabase.java b/examples/collections/ship/marshal/SampleDatabase.java
new file mode 100644
index 0000000000000000000000000000000000000000..a8674dde556d7464bd40729b88f3bab050e1d35c
--- /dev/null
+++ b/examples/collections/ship/marshal/SampleDatabase.java
@@ -0,0 +1,251 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SampleDatabase.java,v 1.27.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.marshal;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+
+import com.sleepycat.bind.serial.ClassCatalog;
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.bind.serial.TupleSerialKeyCreator;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.ForeignKeyDeleteAction;
+import com.sleepycat.je.SecondaryConfig;
+import com.sleepycat.je.SecondaryDatabase;
+
+/**
+ * SampleDatabase defines the storage containers, indices and foreign keys
+ * for the sample database.
+ *
+ * @author Mark Hayes
+ */
+public class SampleDatabase {
+
+    private static final String CLASS_CATALOG = "java_class_catalog";
+    private static final String SUPPLIER_STORE = "supplier_store";
+    private static final String PART_STORE = "part_store";
+    private static final String SHIPMENT_STORE = "shipment_store";
+    private static final String SHIPMENT_PART_INDEX = "shipment_part_index";
+    private static final String SHIPMENT_SUPPLIER_INDEX =
+	"shipment_supplier_index";
+    private static final String SUPPLIER_CITY_INDEX = "supplier_city_index";
+
+    private Environment env;
+    private Database partDb;
+    private Database supplierDb;
+    private Database shipmentDb;
+    private SecondaryDatabase supplierByCityDb;
+    private SecondaryDatabase shipmentByPartDb;
+    private SecondaryDatabase shipmentBySupplierDb;
+    private StoredClassCatalog javaCatalog;
+
+    /**
+     * Open all storage containers, indices, and catalogs.
+     */
+    public SampleDatabase(String homeDirectory)
+        throws DatabaseException, FileNotFoundException {
+
+        // Open the Berkeley DB environment in transactional mode.
+        //
+        System.out.println("Opening environment in: " + homeDirectory);
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+        env = new Environment(new File(homeDirectory), envConfig);
+
+        // Set the Berkeley DB config for opening all stores.
+        //
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+
+        // Create the Serial class catalog.  This holds the serialized class
+        // format for all database records of serial format.
+        //
+        Database catalogDb = env.openDatabase(null, CLASS_CATALOG, dbConfig);
+        javaCatalog = new StoredClassCatalog(catalogDb);
+
+        // Open the Berkeley DB database for the part, supplier and shipment
+        // stores.  The stores are opened with no duplicate keys allowed.
+        //
+        partDb = env.openDatabase(null, PART_STORE, dbConfig);
+
+        supplierDb = env.openDatabase(null, SUPPLIER_STORE, dbConfig);
+
+        shipmentDb = env.openDatabase(null, SHIPMENT_STORE, dbConfig);
+
+        // Open the SecondaryDatabase for the city index of the supplier store,
+        // and for the part and supplier indices of the shipment store.
+        // Duplicate keys are allowed since more than one supplier may be in
+        // the same city, and more than one shipment may exist for the same
+        // supplier or part.  A foreign key constraint is defined for the
+        // supplier and part indices to ensure that a shipment only refers to
+        // existing part and supplier keys.  The CASCADE delete action means
+        // that shipments will be deleted if their associated part or supplier
+        // is deleted.
+        //
+        SecondaryConfig secConfig = new SecondaryConfig();
+        secConfig.setTransactional(true);
+        secConfig.setAllowCreate(true);
+        secConfig.setSortedDuplicates(true);
+
+        secConfig.setKeyCreator(new MarshalledKeyCreator(javaCatalog,
+                                                         Supplier.class,
+                                                         Supplier.CITY_KEY));
+        supplierByCityDb = env.openSecondaryDatabase(null, SUPPLIER_CITY_INDEX,
+                                                     supplierDb, secConfig);
+
+        secConfig.setForeignKeyDatabase(partDb);
+        secConfig.setForeignKeyDeleteAction(ForeignKeyDeleteAction.CASCADE);
+        secConfig.setKeyCreator(new MarshalledKeyCreator(javaCatalog,
+                                                         Shipment.class,
+                                                         Shipment.PART_KEY));
+        shipmentByPartDb = env.openSecondaryDatabase(null, SHIPMENT_PART_INDEX,
+                                                     shipmentDb, secConfig);
+
+        secConfig.setForeignKeyDatabase(supplierDb);
+        secConfig.setForeignKeyDeleteAction(ForeignKeyDeleteAction.CASCADE);
+        secConfig.setKeyCreator(new MarshalledKeyCreator(javaCatalog,
+                                                         Shipment.class,
+                                                     Shipment.SUPPLIER_KEY));
+        shipmentBySupplierDb = env.openSecondaryDatabase(null,
+                                                     SHIPMENT_SUPPLIER_INDEX,
+                                                     shipmentDb, secConfig);
+    }
+
+    /**
+     * Return the storage environment for the database.
+     */
+    public final Environment getEnvironment() {
+
+        return env;
+    }
+
+    /**
+     * Return the class catalog.
+     */
+    public final StoredClassCatalog getClassCatalog() {
+
+        return javaCatalog;
+    }
+
+    /**
+     * Return the part storage container.
+     */
+    public final Database getPartDatabase() {
+
+        return partDb;
+    }
+
+    /**
+     * Return the supplier storage container.
+     */
+    public final Database getSupplierDatabase() {
+
+        return supplierDb;
+    }
+
+    /**
+     * Return the shipment storage container.
+     */
+    public final Database getShipmentDatabase() {
+
+        return shipmentDb;
+    }
+
+    /**
+     * Return the shipment-by-part index.
+     */
+    public final SecondaryDatabase getShipmentByPartDatabase() {
+
+        return shipmentByPartDb;
+    }
+
+    /**
+     * Return the shipment-by-supplier index.
+     */
+    public final SecondaryDatabase getShipmentBySupplierDatabase() {
+
+        return shipmentBySupplierDb;
+    }
+
+    /**
+     * Return the supplier-by-city index.
+     */
+    public final SecondaryDatabase getSupplierByCityDatabase() {
+
+        return supplierByCityDb;
+    }
+
+    /**
+     * Close all stores (closing a store automatically closes its indices).
+     */
+    public void close()
+        throws DatabaseException {
+
+        // Close secondary databases, then primary databases.
+        supplierByCityDb.close();
+        shipmentByPartDb.close();
+        shipmentBySupplierDb.close();
+        partDb.close();
+        supplierDb.close();
+        shipmentDb.close();
+        // And don't forget to close the catalog and the environment.
+        javaCatalog.close();
+        env.close();
+    }
+
+    /**
+     * The SecondaryKeyCreator for MarshalledEntity objects.  This is an
+     * extension of the abstract class TupleSerialKeyCreator, which implements
+     * SecondaryKeyCreator for the case where the data keys are of the format
+     * TupleFormat and the data values are of the format SerialFormat.
+     */
+    private static class MarshalledKeyCreator
+        extends TupleSerialKeyCreator {
+
+        private String keyName;
+
+        /**
+         * Construct the key creator.
+         * @param catalog is the class catalog.
+         * @param valueClass is the supplier value class.
+         * @param keyName is the key name passed to the marshalling methods.
+         */
+        private MarshalledKeyCreator(ClassCatalog catalog,
+                                     Class valueClass,
+                                     String keyName) {
+
+            super(catalog, valueClass);
+            this.keyName = keyName;
+        }
+
+        /**
+         * Extract the city key from a supplier key/value pair.  The city key
+         * is stored in the supplier value, so the supplier key is not used.
+         */
+        public boolean createSecondaryKey(TupleInput primaryKeyInput,
+                                          Object valueInput,
+                                          TupleOutput indexKeyOutput) {
+
+            // the primary key is unmarshalled before marshalling the index
+            // key, to account for cases where the index key is composed of
+            // data elements from the primary key
+            MarshalledEntity entity = (MarshalledEntity) valueInput;
+            entity.unmarshalPrimaryKey(primaryKeyInput);
+            return entity.marshalSecondaryKey(keyName, indexKeyOutput);
+        }
+    }
+}
diff --git a/examples/collections/ship/marshal/SampleViews.java b/examples/collections/ship/marshal/SampleViews.java
new file mode 100644
index 0000000000000000000000000000000000000000..92e741c896f7a0573bc97331e7b76d8dd1181421
--- /dev/null
+++ b/examples/collections/ship/marshal/SampleViews.java
@@ -0,0 +1,276 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SampleViews.java,v 1.23.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.marshal;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.serial.ClassCatalog;
+import com.sleepycat.bind.serial.TupleSerialBinding;
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+import com.sleepycat.collections.StoredSortedMap;
+import com.sleepycat.collections.StoredSortedValueSet;
+import com.sleepycat.util.RuntimeExceptionWrapper;
+
+/**
+ * SampleViews defines the data bindings and collection views for the sample
+ * database.
+ *
+ * @author Mark Hayes
+ */
+public class SampleViews {
+
+    private StoredSortedMap partMap;
+    private StoredSortedMap supplierMap;
+    private StoredSortedMap shipmentMap;
+    private StoredSortedMap shipmentByPartMap;
+    private StoredSortedMap shipmentBySupplierMap;
+    private StoredSortedMap supplierByCityMap;
+
+    /**
+     * Create the data bindings and collection views.
+     */
+    public SampleViews(SampleDatabase db) {
+
+        // Create the data bindings.
+        // In this sample, EntityBinding classes are used to bind the stored
+        // key/data entry pair to a combined data object; a "tricky" binding
+        // that uses transient fields is used--see PartBinding, etc, for
+        // details.  For keys, a one-to-one binding is implemented with
+        // EntryBinding classes to bind the stored tuple entry to a key Object.
+        //
+        ClassCatalog catalog = db.getClassCatalog();
+        EntryBinding partKeyBinding =
+            new MarshalledKeyBinding(PartKey.class);
+        EntityBinding partDataBinding =
+            new MarshalledEntityBinding(catalog, Part.class);
+        EntryBinding supplierKeyBinding =
+            new MarshalledKeyBinding(SupplierKey.class);
+        EntityBinding supplierDataBinding =
+            new MarshalledEntityBinding(catalog, Supplier.class);
+        EntryBinding shipmentKeyBinding =
+            new MarshalledKeyBinding(ShipmentKey.class);
+        EntityBinding shipmentDataBinding =
+            new MarshalledEntityBinding(catalog, Shipment.class);
+        EntryBinding cityKeyBinding =
+            TupleBinding.getPrimitiveBinding(String.class);
+
+        // Create map views for all stores and indices.
+        // StoredSortedMap is used since the stores and indices are ordered
+        // (they use the DB_BTREE access method).
+        //
+        partMap =
+            new StoredSortedMap(db.getPartDatabase(),
+				partKeyBinding, partDataBinding, true);
+        supplierMap =
+            new StoredSortedMap(db.getSupplierDatabase(),
+				supplierKeyBinding, supplierDataBinding, true);
+        shipmentMap =
+            new StoredSortedMap(db.getShipmentDatabase(),
+				shipmentKeyBinding, shipmentDataBinding, true);
+        shipmentByPartMap =
+            new StoredSortedMap(db.getShipmentByPartDatabase(),
+                                partKeyBinding, shipmentDataBinding, true);
+        shipmentBySupplierMap =
+            new StoredSortedMap(db.getShipmentBySupplierDatabase(),
+                                supplierKeyBinding, shipmentDataBinding, true);
+        supplierByCityMap =
+            new StoredSortedMap(db.getSupplierByCityDatabase(),
+                                cityKeyBinding, supplierDataBinding, true);
+    }
+
+    // The views returned below can be accessed using the java.util.Map or
+    // java.util.Set interfaces, or using the StoredSortedMap and
+    // StoredValueSet classes, which provide additional methods.  The entity
+    // sets could be obtained directly from the Map.values() method but
+    // convenience methods are provided here to return them in order to avoid
+    // down-casting elsewhere.
+
+    /**
+     * Return a map view of the part storage container.
+     */
+    public StoredSortedMap getPartMap() {
+
+        return partMap;
+    }
+
+    /**
+     * Return a map view of the supplier storage container.
+     */
+    public StoredSortedMap getSupplierMap() {
+
+        return supplierMap;
+    }
+
+    /**
+     * Return a map view of the shipment storage container.
+     */
+    public StoredSortedMap getShipmentMap() {
+
+        return shipmentMap;
+    }
+
+    /**
+     * Return an entity set view of the part storage container.
+     */
+    public StoredSortedValueSet getPartSet() {
+
+        return (StoredSortedValueSet) partMap.values();
+    }
+
+    /**
+     * Return an entity set view of the supplier storage container.
+     */
+    public StoredSortedValueSet getSupplierSet() {
+
+        return (StoredSortedValueSet) supplierMap.values();
+    }
+
+    /**
+     * Return an entity set view of the shipment storage container.
+     */
+    public StoredSortedValueSet getShipmentSet() {
+
+        return (StoredSortedValueSet) shipmentMap.values();
+    }
+
+    /**
+     * Return a map view of the shipment-by-part index.
+     */
+    public StoredSortedMap getShipmentByPartMap() {
+
+        return shipmentByPartMap;
+    }
+
+    /**
+     * Return a map view of the shipment-by-supplier index.
+     */
+    public StoredSortedMap getShipmentBySupplierMap() {
+
+        return shipmentBySupplierMap;
+    }
+
+    /**
+     * Return a map view of the supplier-by-city index.
+     */
+    public final StoredSortedMap getSupplierByCityMap() {
+
+        return supplierByCityMap;
+    }
+
+    /**
+     * MarshalledKeyBinding is used to bind the stored key tuple entry to a key
+     * object representation.  To do this, it calls the MarshalledKey interface
+     * implemented by the key class.
+     */
+    private static class MarshalledKeyBinding extends TupleBinding {
+
+        private Class keyClass;
+
+        /**
+         * Construct the binding object.
+         */
+        private MarshalledKeyBinding(Class keyClass) {
+
+            // The key class will be used to instantiate the key object.
+            //
+            if (!MarshalledKey.class.isAssignableFrom(keyClass)) {
+                throw new IllegalArgumentException(keyClass.toString() +
+                                       " does not implement MarshalledKey");
+            }
+            this.keyClass = keyClass;
+        }
+
+        /**
+         * Create the key object from the stored key tuple entry.
+         */
+        public Object entryToObject(TupleInput input) {
+
+            try {
+                MarshalledKey key = (MarshalledKey) keyClass.newInstance();
+                key.unmarshalKey(input);
+                return key;
+            } catch (IllegalAccessException e) {
+                throw new RuntimeExceptionWrapper(e);
+            } catch (InstantiationException e) {
+                throw new RuntimeExceptionWrapper(e);
+            }
+        }
+
+        /**
+         * Create the stored key tuple entry from the key object.
+         */
+        public void objectToEntry(Object object, TupleOutput output) {
+
+            MarshalledKey key = (MarshalledKey) object;
+            key.marshalKey(output);
+        }
+    }
+
+    /**
+     * MarshalledEntityBinding is used to bind the stored key/data entry pair
+     * to a combined to an entity object representation.  To do this, it calls
+     * the MarshalledEntity interface implemented by the entity class.
+     *
+     * <p> The binding is "tricky" in that it uses the entity class for both
+     * the stored data entry and the combined entity object.  To do this,
+     * entity's key field(s) are transient and are set by the binding after the
+     * data object has been deserialized. This avoids the use of a "data" class
+     * completely. </p>
+     */
+    private static class MarshalledEntityBinding extends TupleSerialBinding {
+
+        /**
+         * Construct the binding object.
+         */
+        private MarshalledEntityBinding(ClassCatalog classCatalog,
+                                        Class entityClass) {
+
+            super(classCatalog, entityClass);
+
+            // The entity class will be used to instantiate the entity object.
+            //
+            if (!MarshalledEntity.class.isAssignableFrom(entityClass)) {
+                throw new IllegalArgumentException(entityClass.toString() +
+                                       " does not implement MarshalledEntity");
+            }
+        }
+
+        /**
+         * Create the entity by combining the stored key and data.
+         * This "tricky" binding returns the stored data as the entity, but
+         * first it sets the transient key fields from the stored key.
+         */
+        public Object entryToObject(TupleInput tupleInput, Object javaInput) {
+
+            MarshalledEntity entity = (MarshalledEntity) javaInput;
+            entity.unmarshalPrimaryKey(tupleInput);
+            return entity;
+        }
+
+        /**
+         * Create the stored key from the entity.
+         */
+        public void objectToKey(Object object, TupleOutput output) {
+
+            MarshalledEntity entity = (MarshalledEntity) object;
+            entity.marshalPrimaryKey(output);
+        }
+
+        /**
+         * Return the entity as the stored data.  There is nothing to do here
+         * since the entity's key fields are transient.
+         */
+        public Object objectToData(Object object) {
+
+            return object;
+        }
+    }
+}
diff --git a/examples/collections/ship/marshal/Shipment.java b/examples/collections/ship/marshal/Shipment.java
new file mode 100644
index 0000000000000000000000000000000000000000..f43405eaf2ac891c98d9d9d67babaaf84bd9d96a
--- /dev/null
+++ b/examples/collections/ship/marshal/Shipment.java
@@ -0,0 +1,113 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Shipment.java,v 1.17.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.marshal;
+
+import java.io.Serializable;
+
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+
+/**
+ * A Shipment represents the combined key/data pair for a shipment entity.
+ *
+ * <p> In this sample, Shipment is bound to the stored key/data entry by
+ * implementing the MarshalledEntity interface, which is called by {@link
+ * SampleViews.MarshalledEntityBinding}. </p>
+ *
+ * <p> The binding is "tricky" in that it uses this class for both the stored
+ * data entry and the combined entity object.  To do this, the key field(s) are
+ * transient and are set by the binding after the data object has been
+ * deserialized. This avoids the use of a ShipmentData class completely. </p>
+ *
+ * <p> Since this class is used directly for data storage, it must be
+ * Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class Shipment implements Serializable, MarshalledEntity {
+
+    static final String PART_KEY = "part";
+    static final String SUPPLIER_KEY = "supplier";
+
+    private transient String partNumber;
+    private transient String supplierNumber;
+    private int quantity;
+
+    public Shipment(String partNumber, String supplierNumber, int quantity) {
+
+        this.partNumber = partNumber;
+        this.supplierNumber = supplierNumber;
+        this.quantity = quantity;
+    }
+
+    /**
+     * Set the transient key fields after deserializing.  This method is only
+     * called by data bindings.
+     */
+    void setKey(String partNumber, String supplierNumber) {
+
+        this.partNumber = partNumber;
+        this.supplierNumber = supplierNumber;
+    }
+
+    public final String getPartNumber() {
+
+        return partNumber;
+    }
+
+    public final String getSupplierNumber() {
+
+        return supplierNumber;
+    }
+
+    public final int getQuantity() {
+
+        return quantity;
+    }
+
+    public String toString() {
+
+        return "[Shipment: part=" + partNumber +
+                " supplier=" + supplierNumber +
+                " quantity=" + quantity + ']';
+    }
+
+    // --- MarshalledEntity implementation ---
+
+    Shipment() {
+
+        // A no-argument constructor is necessary only to allow the binding to
+        // instantiate objects of this class.
+    }
+
+    public void unmarshalPrimaryKey(TupleInput keyInput) {
+
+        this.partNumber = keyInput.readString();
+        this.supplierNumber = keyInput.readString();
+    }
+
+    public void marshalPrimaryKey(TupleOutput keyOutput) {
+
+        keyOutput.writeString(this.partNumber);
+        keyOutput.writeString(this.supplierNumber);
+    }
+
+    public boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput) {
+
+        if (keyName.equals(PART_KEY)) {
+            keyOutput.writeString(this.partNumber);
+            return true;
+        } else if (keyName.equals(SUPPLIER_KEY)) {
+            keyOutput.writeString(this.supplierNumber);
+            return true;
+        } else {
+            throw new UnsupportedOperationException(keyName);
+        }
+    }
+}
diff --git a/examples/collections/ship/marshal/ShipmentKey.java b/examples/collections/ship/marshal/ShipmentKey.java
new file mode 100644
index 0000000000000000000000000000000000000000..4e370b19fe6bdf1098b9fd2a4d070af99f1a5ed1
--- /dev/null
+++ b/examples/collections/ship/marshal/ShipmentKey.java
@@ -0,0 +1,69 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ShipmentKey.java,v 1.14.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.marshal;
+
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+
+/**
+ * A ShipmentKey serves as the key in the key/data pair for a shipment entity.
+ *
+ * <p> In this sample, ShipmentKey is bound to the stored key tuple entry by
+ * implementing the MarshalledKey interface, which is called by {@link
+ * SampleViews.MarshalledKeyBinding}. </p>
+ *
+ * @author Mark Hayes
+ */
+public class ShipmentKey implements MarshalledKey {
+
+    private String partNumber;
+    private String supplierNumber;
+
+    public ShipmentKey(String partNumber, String supplierNumber) {
+
+        this.partNumber = partNumber;
+        this.supplierNumber = supplierNumber;
+    }
+
+    public final String getPartNumber() {
+
+        return partNumber;
+    }
+
+    public final String getSupplierNumber() {
+
+        return supplierNumber;
+    }
+
+    public String toString() {
+
+        return "[ShipmentKey: supplier=" + supplierNumber +
+                " part=" + partNumber + ']';
+    }
+
+    // --- MarshalledKey implementation ---
+
+    ShipmentKey() {
+
+        // A no-argument constructor is necessary only to allow the binding to
+        // instantiate objects of this class.
+    }
+
+    public void unmarshalKey(TupleInput keyInput) {
+
+        this.partNumber = keyInput.readString();
+        this.supplierNumber = keyInput.readString();
+    }
+
+    public void marshalKey(TupleOutput keyOutput) {
+
+        keyOutput.writeString(this.partNumber);
+        keyOutput.writeString(this.supplierNumber);
+    }
+}
diff --git a/examples/collections/ship/marshal/Supplier.java b/examples/collections/ship/marshal/Supplier.java
new file mode 100644
index 0000000000000000000000000000000000000000..69e2100dd53c2a94f703265a57313337dc032bdf
--- /dev/null
+++ b/examples/collections/ship/marshal/Supplier.java
@@ -0,0 +1,118 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Supplier.java,v 1.17.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.marshal;
+
+import java.io.Serializable;
+
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+
+/**
+ * A Supplier represents the combined key/data pair for a supplier entity.
+ *
+ * <p> In this sample, Supplier is bound to the stored key/data entry by
+ * implementing the MarshalledEntity interface, which is called by {@link
+ * SampleViews.MarshalledEntityBinding}. </p>
+ *
+ * <p> The binding is "tricky" in that it uses this class for both the stored
+ * data entry and the combined entity object.  To do this, the key field(s) are
+ * transient and are set by the binding after the data object has been
+ * deserialized. This avoids the use of a SupplierData class completely. </p>
+ *
+ * <p> Since this class is used directly for data storage, it must be
+ * Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class Supplier implements Serializable, MarshalledEntity {
+
+    static final String CITY_KEY = "city";
+
+    private transient String number;
+    private String name;
+    private int status;
+    private String city;
+
+    public Supplier(String number, String name, int status, String city) {
+
+        this.number = number;
+        this.name = name;
+        this.status = status;
+        this.city = city;
+    }
+
+    /**
+     * Set the transient key fields after deserializing.  This method is only
+     * called by data bindings.
+     */
+    void setKey(String number) {
+
+        this.number = number;
+    }
+
+    public final String getNumber() {
+
+        return number;
+    }
+
+    public final String getName() {
+
+        return name;
+    }
+
+    public final int getStatus() {
+
+        return status;
+    }
+
+    public final String getCity() {
+
+        return city;
+    }
+
+    public String toString() {
+
+        return "[Supplier: number=" + number +
+               " name=" + name +
+               " status=" + status +
+               " city=" + city + ']';
+    }
+
+    // --- MarshalledEntity implementation ---
+
+    Supplier() {
+
+        // A no-argument constructor is necessary only to allow the binding to
+        // instantiate objects of this class.
+    }
+
+    public void unmarshalPrimaryKey(TupleInput keyInput) {
+
+        this.number = keyInput.readString();
+    }
+
+    public void marshalPrimaryKey(TupleOutput keyOutput) {
+
+        keyOutput.writeString(this.number);
+    }
+
+    public boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput) {
+
+        if (keyName.equals(CITY_KEY)) {
+            if (this.city != null) {
+                keyOutput.writeString(this.city);
+                return true;
+            } else {
+                return false;
+            }
+        } else {
+            throw new UnsupportedOperationException(keyName);
+        }
+    }
+}
diff --git a/examples/collections/ship/marshal/SupplierKey.java b/examples/collections/ship/marshal/SupplierKey.java
new file mode 100644
index 0000000000000000000000000000000000000000..45378056c335a35f119b10fd52b10ce8db331365
--- /dev/null
+++ b/examples/collections/ship/marshal/SupplierKey.java
@@ -0,0 +1,59 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SupplierKey.java,v 1.14.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.marshal;
+
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+
+/**
+ * A SupplierKey serves as the key in the key/data pair for a supplier entity.
+ *
+ * <p> In this sample, SupplierKey is bound to the stored key tuple entry by
+ * implementing the MarshalledKey interface, which is called by {@link
+ * SampleViews.MarshalledKeyBinding}. </p>
+ *
+ * @author Mark Hayes
+ */
+public class SupplierKey implements MarshalledKey {
+
+    private String number;
+
+    public SupplierKey(String number) {
+
+        this.number = number;
+    }
+
+    public final String getNumber() {
+
+        return number;
+    }
+
+    public String toString() {
+
+        return "[SupplierKey: number=" + number + ']';
+    }
+
+    // --- MarshalledKey implementation ---
+
+    SupplierKey() {
+
+        // A no-argument constructor is necessary only to allow the binding to
+        // instantiate objects of this class.
+    }
+
+    public void unmarshalKey(TupleInput keyInput) {
+
+        this.number = keyInput.readString();
+    }
+
+    public void marshalKey(TupleOutput keyOutput) {
+
+        keyOutput.writeString(this.number);
+    }
+}
diff --git a/examples/collections/ship/marshal/Weight.java b/examples/collections/ship/marshal/Weight.java
new file mode 100644
index 0000000000000000000000000000000000000000..0bb56a6de5f56730b3877f2af89c23ebba58b3ab
--- /dev/null
+++ b/examples/collections/ship/marshal/Weight.java
@@ -0,0 +1,49 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Weight.java,v 1.11.2.2 2010/01/04 15:30:25 cwl Exp $
+ */
+
+package collections.ship.marshal;
+
+import java.io.Serializable;
+
+/**
+ * Weight represents a weight amount and unit of measure.
+ *
+ * <p> In this sample, Weight is embedded in part data values which are stored
+ * as Java serialized objects; therefore Weight must be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class Weight implements Serializable {
+
+    public final static String GRAMS = "grams";
+    public final static String OUNCES = "ounces";
+
+    private double amount;
+    private String units;
+
+    public Weight(double amount, String units) {
+
+        this.amount = amount;
+        this.units = units;
+    }
+
+    public final double getAmount() {
+
+        return amount;
+    }
+
+    public final String getUnits() {
+
+        return units;
+    }
+
+    public String toString() {
+
+        return "[" + amount + ' ' + units + ']';
+    }
+}
diff --git a/examples/collections/ship/sentity/Part.java b/examples/collections/ship/sentity/Part.java
new file mode 100644
index 0000000000000000000000000000000000000000..c3a99fbd6e5189610900ce989babdf516c702b0e
--- /dev/null
+++ b/examples/collections/ship/sentity/Part.java
@@ -0,0 +1,90 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Part.java,v 1.13.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package collections.ship.sentity;
+
+import java.io.Serializable;
+
+/**
+ * A Part represents the combined key/data pair for a part entity.
+ *
+ * <p> In this sample, Part is created from the stored key/data entry using a
+ * TupleSerialEntityBinding.  See {@link SampleViews.PartBinding} for details.
+ * </p>
+ *
+ * <p> The binding is "tricky" in that it uses this class for both the stored
+ * data entry and the combined entity object.  To do this, the key field(s) are
+ * transient and are set by the binding after the data object has been
+ * deserialized. This avoids the use of a PartData class completely. </p>
+ *
+ * <p> Since this class is used directly for data storage, it must be
+ * Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class Part implements Serializable {
+
+    private transient String number;
+    private String name;
+    private String color;
+    private Weight weight;
+    private String city;
+
+    public Part(String number, String name, String color, Weight weight,
+                String city) {
+
+        this.number = number;
+        this.name = name;
+        this.color = color;
+        this.weight = weight;
+        this.city = city;
+    }
+
+    /**
+     * Set the transient key fields after deserializing.  This method is only
+     * called by data bindings.
+     */
+    final void setKey(String number) {
+
+        this.number = number;
+    }
+
+    public final String getNumber() {
+
+        return number;
+    }
+
+    public final String getName() {
+
+        return name;
+    }
+
+    public final String getColor() {
+
+        return color;
+    }
+
+    public final Weight getWeight() {
+
+        return weight;
+    }
+
+    public final String getCity() {
+
+        return city;
+    }
+
+    public String toString() {
+
+        return "[Part: number=" + number +
+               " name=" + name +
+               " color=" + color +
+               " weight=" + weight +
+               " city=" + city + ']';
+    }
+}
diff --git a/examples/collections/ship/sentity/PartKey.java b/examples/collections/ship/sentity/PartKey.java
new file mode 100644
index 0000000000000000000000000000000000000000..04c3846dd344fdb4ed2f6f091c86e7dfc78c5e30
--- /dev/null
+++ b/examples/collections/ship/sentity/PartKey.java
@@ -0,0 +1,38 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PartKey.java,v 1.12.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package collections.ship.sentity;
+
+/**
+ * A PartKey serves as the key in the key/data pair for a part entity.
+ *
+ * <p> In this sample, PartKey is bound to the key's tuple storage entry using
+ * a TupleBinding.  Because it is not used directly as storage data, it does
+ * not need to be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class PartKey {
+
+    private String number;
+
+    public PartKey(String number) {
+
+        this.number = number;
+    }
+
+    public final String getNumber() {
+
+        return number;
+    }
+
+    public String toString() {
+
+        return "[PartKey: number=" + number + ']';
+    }
+}
diff --git a/examples/collections/ship/sentity/Sample.java b/examples/collections/ship/sentity/Sample.java
new file mode 100644
index 0000000000000000000000000000000000000000..82517e07318c5c4ee07106ac75937ed1463f0153
--- /dev/null
+++ b/examples/collections/ship/sentity/Sample.java
@@ -0,0 +1,236 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Sample.java,v 1.20.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package collections.ship.sentity;
+
+import java.io.FileNotFoundException;
+import java.util.Iterator;
+import java.util.Set;
+
+import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+import com.sleepycat.je.DatabaseException;
+
+/**
+ * Sample is the main entry point for the sample program and may be run as
+ * follows:
+ *
+ * <pre>
+ * java collections.ship.sentity.Sample
+ *      [-h <home-directory> ]
+ * </pre>
+ *
+ * <p> The default for the home directory is ./tmp -- the tmp subdirectory of
+ * the current directory where the sample is run. To specify a different home
+ * directory, use the -home option. The home directory must exist before
+ * running the sample.  To recreate the sample database from scratch, delete
+ * all files in the home directory before running the sample. </p>
+ *
+ * @author Mark Hayes
+ */
+public class Sample {
+
+    private SampleDatabase db;
+    private SampleViews views;
+
+    /**
+     * Run the sample program.
+     */
+    public static void main(String[] args) {
+
+        System.out.println("\nRunning sample: " + Sample.class);
+
+        // Parse the command line arguments.
+        //
+        String homeDir = "./tmp";
+        for (int i = 0; i < args.length; i += 1) {
+            if (args[i].equals("-h") && i < args.length - 1) {
+                i += 1;
+                homeDir = args[i];
+            } else {
+                System.err.println("Usage:\n java " + Sample.class.getName() +
+				   "\n  [-h <home-directory>]");
+                System.exit(2);
+            }
+        }
+
+        // Run the sample.
+        //
+        Sample sample = null;
+        try {
+            sample = new Sample(homeDir);
+            sample.run();
+        } catch (Exception e) {
+            // If an exception reaches this point, the last transaction did not
+            // complete.  If the exception is RunRecoveryException, follow
+            // the Berkeley DB recovery procedures before running again.
+            e.printStackTrace();
+        } finally {
+            if (sample != null) {
+                try {
+                    // Always attempt to close the database cleanly.
+                    sample.close();
+                } catch (Exception e) {
+                    System.err.println("Exception during database close:");
+                    e.printStackTrace();
+                }
+            }
+        }
+    }
+
+    /**
+     * Open the database and views.
+     */
+    private Sample(String homeDir)
+        throws DatabaseException, FileNotFoundException {
+
+        db = new SampleDatabase(homeDir);
+        views = new SampleViews(db);
+    }
+
+    /**
+     * Close the database cleanly.
+     */
+    private void close()
+        throws DatabaseException {
+
+        db.close();
+    }
+
+    /**
+     * Run two transactions to populate and print the database.  A
+     * TransactionRunner is used to ensure consistent handling of transactions,
+     * including deadlock retries.  But the best transaction handling mechanism
+     * to use depends on the application.
+     */
+    private void run()
+        throws Exception {
+
+        TransactionRunner runner = new TransactionRunner(db.getEnvironment());
+        runner.run(new PopulateDatabase());
+        runner.run(new PrintDatabase());
+    }
+
+    /**
+     * Populate the database in a single transaction.
+     */
+    private class PopulateDatabase implements TransactionWorker {
+
+        public void doWork()
+            throws Exception {
+            addSuppliers();
+            addParts();
+            addShipments();
+        }
+    }
+
+    /**
+     * Print the database in a single transaction.  All entities are printed
+     * and the indices are used to print the entities for certain keys.
+     *
+     * <p> Note the use of special iterator() methods.  These are used here
+     * with indices to find the shipments for certain keys.</p>
+     */
+    private class PrintDatabase implements TransactionWorker {
+
+        public void doWork()
+            throws Exception {
+            printValues("Parts",
+			views.getPartSet().iterator());
+            printValues("Suppliers",
+			views.getSupplierSet().iterator());
+            printValues("Suppliers for City Paris",
+                        views.getSupplierByCityMap().duplicates(
+                                            "Paris").iterator());
+            printValues("Shipments",
+			views.getShipmentSet().iterator());
+            printValues("Shipments for Part P1",
+                        views.getShipmentByPartMap().duplicates(
+                                            new PartKey("P1")).iterator());
+            printValues("Shipments for Supplier S1",
+                        views.getShipmentBySupplierMap().duplicates(
+                                            new SupplierKey("S1")).iterator());
+        }
+    }
+
+    /**
+     * Populate the part entities in the database.  If the part set is not
+     * empty, assume that this has already been done.
+     */
+    private void addParts() {
+
+        Set parts = views.getPartSet();
+        if (parts.isEmpty()) {
+            System.out.println("Adding Parts");
+            parts.add(new Part("P1", "Nut", "Red",
+			       new Weight(12.0, Weight.GRAMS), "London"));
+            parts.add(new Part("P2", "Bolt", "Green",
+			       new Weight(17.0, Weight.GRAMS), "Paris"));
+            parts.add(new Part("P3", "Screw", "Blue",
+			       new Weight(17.0, Weight.GRAMS), "Rome"));
+            parts.add(new Part("P4", "Screw", "Red",
+			       new Weight(14.0, Weight.GRAMS), "London"));
+            parts.add(new Part("P5", "Cam", "Blue",
+			       new Weight(12.0, Weight.GRAMS), "Paris"));
+            parts.add(new Part("P6", "Cog", "Red",
+			       new Weight(19.0, Weight.GRAMS), "London"));
+        }
+    }
+
+    /**
+     * Populate the supplier entities in the database.  If the supplier set is
+     * not empty, assume that this has already been done.
+     */
+    private void addSuppliers() {
+
+        Set suppliers = views.getSupplierSet();
+        if (suppliers.isEmpty()) {
+            System.out.println("Adding Suppliers");
+            suppliers.add(new Supplier("S1", "Smith", 20, "London"));
+            suppliers.add(new Supplier("S2", "Jones", 10, "Paris"));
+            suppliers.add(new Supplier("S3", "Blake", 30, "Paris"));
+            suppliers.add(new Supplier("S4", "Clark", 20, "London"));
+            suppliers.add(new Supplier("S5", "Adams", 30, "Athens"));
+        }
+    }
+
+    /**
+     * Populate the shipment entities in the database.  If the shipment set
+     * is not empty, assume that this has already been done.
+     */
+    private void addShipments() {
+
+        Set shipments = views.getShipmentSet();
+        if (shipments.isEmpty()) {
+            System.out.println("Adding Shipments");
+            shipments.add(new Shipment("P1", "S1", 300));
+            shipments.add(new Shipment("P2", "S1", 200));
+            shipments.add(new Shipment("P3", "S1", 400));
+            shipments.add(new Shipment("P4", "S1", 200));
+            shipments.add(new Shipment("P5", "S1", 100));
+            shipments.add(new Shipment("P6", "S1", 100));
+            shipments.add(new Shipment("P1", "S2", 300));
+            shipments.add(new Shipment("P2", "S2", 400));
+            shipments.add(new Shipment("P2", "S3", 200));
+            shipments.add(new Shipment("P2", "S4", 200));
+            shipments.add(new Shipment("P4", "S4", 300));
+            shipments.add(new Shipment("P5", "S4", 400));
+        }
+    }
+
+    /**
+     * Print the objects returned by an iterator of entity value objects.
+     */
+    private void printValues(String label, Iterator iterator) {
+
+        System.out.println("\n--- " + label + " ---");
+        while (iterator.hasNext()) {
+            System.out.println(iterator.next().toString());
+        }
+    }
+}
diff --git a/examples/collections/ship/sentity/SampleDatabase.java b/examples/collections/ship/sentity/SampleDatabase.java
new file mode 100644
index 0000000000000000000000000000000000000000..7c4d90bd1e64a4c1ab696c4a4408694722e64e83
--- /dev/null
+++ b/examples/collections/ship/sentity/SampleDatabase.java
@@ -0,0 +1,314 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SampleDatabase.java,v 1.27.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package collections.ship.sentity;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+
+import com.sleepycat.bind.serial.ClassCatalog;
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.bind.serial.TupleSerialKeyCreator;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.ForeignKeyDeleteAction;
+import com.sleepycat.je.SecondaryConfig;
+import com.sleepycat.je.SecondaryDatabase;
+
+/**
+ * SampleDatabase defines the storage containers, indices and foreign keys
+ * for the sample database.
+ *
+ * @author Mark Hayes
+ */
+public class SampleDatabase {
+
+    private static final String CLASS_CATALOG = "java_class_catalog";
+    private static final String SUPPLIER_STORE = "supplier_store";
+    private static final String PART_STORE = "part_store";
+    private static final String SHIPMENT_STORE = "shipment_store";
+    private static final String SHIPMENT_PART_INDEX = "shipment_part_index";
+    private static final String SHIPMENT_SUPPLIER_INDEX =
+	"shipment_supplier_index";
+    private static final String SUPPLIER_CITY_INDEX = "supplier_city_index";
+
+    private Environment env;
+    private Database partDb;
+    private Database supplierDb;
+    private Database shipmentDb;
+    private SecondaryDatabase supplierByCityDb;
+    private SecondaryDatabase shipmentByPartDb;
+    private SecondaryDatabase shipmentBySupplierDb;
+    private StoredClassCatalog javaCatalog;
+
+    /**
+     * Open all storage containers, indices, and catalogs.
+     */
+    public SampleDatabase(String homeDirectory)
+        throws DatabaseException, FileNotFoundException {
+
+        // Open the Berkeley DB environment in transactional mode.
+        //
+        System.out.println("Opening environment in: " + homeDirectory);
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+        env = new Environment(new File(homeDirectory), envConfig);
+
+        // Set the Berkeley DB config for opening all stores.
+        //
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+
+        // Create the Serial class catalog.  This holds the serialized class
+        // format for all database records of serial format.
+        //
+        Database catalogDb = env.openDatabase(null, CLASS_CATALOG, dbConfig);
+        javaCatalog = new StoredClassCatalog(catalogDb);
+
+        // Open the Berkeley DB database for the part, supplier and shipment
+        // stores.  The stores are opened with no duplicate keys allowed.
+        //
+        partDb = env.openDatabase(null, PART_STORE, dbConfig);
+
+        supplierDb = env.openDatabase(null, SUPPLIER_STORE, dbConfig);
+
+        shipmentDb = env.openDatabase(null, SHIPMENT_STORE, dbConfig);
+
+        // Open the SecondaryDatabase for the city index of the supplier store,
+        // and for the part and supplier indices of the shipment store.
+        // Duplicate keys are allowed since more than one supplier may be in
+        // the same city, and more than one shipment may exist for the same
+        // supplier or part.  A foreign key constraint is defined for the
+        // supplier and part indices to ensure that a shipment only refers to
+        // existing part and supplier keys.  The CASCADE delete action means
+        // that shipments will be deleted if their associated part or supplier
+        // is deleted.
+        //
+        SecondaryConfig secConfig = new SecondaryConfig();
+        secConfig.setTransactional(true);
+        secConfig.setAllowCreate(true);
+        secConfig.setSortedDuplicates(true);
+
+        secConfig.setKeyCreator(new SupplierByCityKeyCreator(javaCatalog,
+                                                             Supplier.class));
+        supplierByCityDb = env.openSecondaryDatabase(null, SUPPLIER_CITY_INDEX,
+                                                     supplierDb, secConfig);
+
+        secConfig.setForeignKeyDatabase(partDb);
+        secConfig.setForeignKeyDeleteAction(ForeignKeyDeleteAction.CASCADE);
+        secConfig.setKeyCreator(new ShipmentByPartKeyCreator(javaCatalog,
+                                                             Shipment.class));
+        shipmentByPartDb = env.openSecondaryDatabase(null, SHIPMENT_PART_INDEX,
+                                                     shipmentDb, secConfig);
+
+        secConfig.setForeignKeyDatabase(supplierDb);
+        secConfig.setForeignKeyDeleteAction(ForeignKeyDeleteAction.CASCADE);
+        secConfig.setKeyCreator(new ShipmentBySupplierKeyCreator(javaCatalog,
+                                                              Shipment.class));
+        shipmentBySupplierDb = env.openSecondaryDatabase(null,
+                                                     SHIPMENT_SUPPLIER_INDEX,
+                                                     shipmentDb, secConfig);
+    }
+
+    /**
+     * Return the storage environment for the database.
+     */
+    public final Environment getEnvironment() {
+
+        return env;
+    }
+
+    /**
+     * Return the class catalog.
+     */
+    public final StoredClassCatalog getClassCatalog() {
+
+        return javaCatalog;
+    }
+
+    /**
+     * Return the part storage container.
+     */
+    public final Database getPartDatabase() {
+
+        return partDb;
+    }
+
+    /**
+     * Return the supplier storage container.
+     */
+    public final Database getSupplierDatabase() {
+
+        return supplierDb;
+    }
+
+    /**
+     * Return the shipment storage container.
+     */
+    public final Database getShipmentDatabase() {
+
+        return shipmentDb;
+    }
+
+    /**
+     * Return the shipment-by-part index.
+     */
+    public final SecondaryDatabase getShipmentByPartDatabase() {
+
+        return shipmentByPartDb;
+    }
+
+    /**
+     * Return the shipment-by-supplier index.
+     */
+    public final SecondaryDatabase getShipmentBySupplierDatabase() {
+
+        return shipmentBySupplierDb;
+    }
+
+    /**
+     * Return the supplier-by-city index.
+     */
+    public final SecondaryDatabase getSupplierByCityDatabase() {
+
+        return supplierByCityDb;
+    }
+
+    /**
+     * Close all stores (closing a store automatically closes its indices).
+     */
+    public void close()
+        throws DatabaseException {
+
+        // Close secondary databases, then primary databases.
+        supplierByCityDb.close();
+        shipmentByPartDb.close();
+        shipmentBySupplierDb.close();
+        partDb.close();
+        supplierDb.close();
+        shipmentDb.close();
+        // And don't forget to close the catalog and the environment.
+        javaCatalog.close();
+        env.close();
+    }
+
+    /**
+     * The SecondaryKeyCreator for the SupplierByCity index.  This is an
+     * extension of the abstract class TupleSerialKeyCreator, which implements
+     * SecondaryKeyCreator for the case where the data keys are of the format
+     * TupleFormat and the data values are of the format SerialFormat.
+     */
+    private static class SupplierByCityKeyCreator
+        extends TupleSerialKeyCreator {
+
+        /**
+         * Construct the city key extractor.
+         * @param catalog is the class catalog.
+         * @param valueClass is the supplier value class.
+         */
+        private SupplierByCityKeyCreator(ClassCatalog catalog,
+                                         Class valueClass) {
+
+            super(catalog, valueClass);
+        }
+
+        /**
+         * Extract the city key from a supplier key/value pair.  The city key
+         * is stored in the supplier value, so the supplier key is not used.
+         */
+        public boolean createSecondaryKey(TupleInput primaryKeyInput,
+                                          Object valueInput,
+                                          TupleOutput indexKeyOutput) {
+
+            Supplier supplier = (Supplier) valueInput;
+            String city = supplier.getCity();
+            if (city != null) {
+                indexKeyOutput.writeString(supplier.getCity());
+                return true;
+            } else {
+                return false;
+            }
+        }
+    }
+
+    /**
+     * The SecondaryKeyCreator for the ShipmentByPart index.  This is an
+     * extension of the abstract class TupleSerialKeyCreator, which implements
+     * SecondaryKeyCreator for the case where the data keys are of the format
+     * TupleFormat and the data values are of the format SerialFormat.
+     */
+    private static class ShipmentByPartKeyCreator
+        extends TupleSerialKeyCreator {
+
+        /**
+         * Construct the part key extractor.
+         * @param catalog is the class catalog.
+         * @param valueClass is the shipment value class.
+         */
+        private ShipmentByPartKeyCreator(ClassCatalog catalog,
+                                         Class valueClass) {
+            super(catalog, valueClass);
+        }
+
+        /**
+         * Extract the part key from a shipment key/value pair.  The part key
+         * is stored in the shipment key, so the shipment value is not used.
+         */
+        public boolean createSecondaryKey(TupleInput primaryKeyInput,
+                                          Object valueInput,
+                                          TupleOutput indexKeyOutput) {
+
+            String partNumber = primaryKeyInput.readString();
+            // don't bother reading the supplierNumber
+            indexKeyOutput.writeString(partNumber);
+            return true;
+        }
+    }
+
+    /**
+     * The SecondaryKeyCreator for the ShipmentBySupplier index.  This is an
+     * extension of the abstract class TupleSerialKeyCreator, which implements
+     * SecondaryKeyCreator for the case where the data keys are of the format
+     * TupleFormat and the data values are of the format SerialFormat.
+     */
+    private static class ShipmentBySupplierKeyCreator
+        extends TupleSerialKeyCreator {
+
+        /**
+         * Construct the supplier key extractor.
+         * @param catalog is the class catalog.
+         * @param valueClass is the shipment value class.
+         */
+        private ShipmentBySupplierKeyCreator(ClassCatalog catalog,
+                                             Class valueClass) {
+            super(catalog, valueClass);
+        }
+
+        /**
+         * Extract the supplier key from a shipment key/value pair.  The
+         * supplier key is stored in the shipment key, so the shipment value is
+         * not used.
+         */
+        public boolean createSecondaryKey(TupleInput primaryKeyInput,
+                                          Object valueInput,
+                                          TupleOutput indexKeyOutput) {
+
+            primaryKeyInput.readString(); // skip the partNumber
+            String supplierNumber = primaryKeyInput.readString();
+            indexKeyOutput.writeString(supplierNumber);
+            return true;
+        }
+    }
+}
diff --git a/examples/collections/ship/sentity/SampleViews.java b/examples/collections/ship/sentity/SampleViews.java
new file mode 100644
index 0000000000000000000000000000000000000000..1c9aab827a2dd466e50605f087a410dc48efd7f7
--- /dev/null
+++ b/examples/collections/ship/sentity/SampleViews.java
@@ -0,0 +1,419 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SampleViews.java,v 1.22.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package collections.ship.sentity;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.serial.ClassCatalog;
+import com.sleepycat.bind.serial.TupleSerialBinding;
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+import com.sleepycat.collections.StoredSortedMap;
+import com.sleepycat.collections.StoredSortedValueSet;
+
+/**
+ * SampleViews defines the data bindings and collection views for the sample
+ * database.
+ *
+ * @author Mark Hayes
+ */
+public class SampleViews {
+
+    private StoredSortedMap partMap;
+    private StoredSortedMap supplierMap;
+    private StoredSortedMap shipmentMap;
+    private StoredSortedMap shipmentByPartMap;
+    private StoredSortedMap shipmentBySupplierMap;
+    private StoredSortedMap supplierByCityMap;
+
+    /**
+     * Create the data bindings and collection views.
+     */
+    public SampleViews(SampleDatabase db) {
+
+        // Create the data bindings.
+        // In this sample, EntityBinding classes are used to bind the stored
+        // key/data entry pair to a combined data object; a "tricky" binding
+        // that uses transient fields is used--see PartBinding, etc, for
+        // details.  For keys, a one-to-one binding is implemented with
+        // EntryBinding classes to bind the stored tuple entry to a key Object.
+        //
+        ClassCatalog catalog = db.getClassCatalog();
+        EntryBinding partKeyBinding =
+            new PartKeyBinding();
+        EntityBinding partDataBinding =
+            new PartBinding(catalog, Part.class);
+        EntryBinding supplierKeyBinding =
+            new SupplierKeyBinding();
+        EntityBinding supplierDataBinding =
+            new SupplierBinding(catalog, Supplier.class);
+        EntryBinding shipmentKeyBinding =
+            new ShipmentKeyBinding();
+        EntityBinding shipmentDataBinding =
+            new ShipmentBinding(catalog, Shipment.class);
+        EntryBinding cityKeyBinding =
+            TupleBinding.getPrimitiveBinding(String.class);
+
+        // Create map views for all stores and indices.
+        // StoredSortedMap is used since the stores and indices are ordered
+        // (they use the DB_BTREE access method).
+        //
+        partMap =
+            new StoredSortedMap(db.getPartDatabase(),
+				partKeyBinding, partDataBinding, true);
+        supplierMap =
+            new StoredSortedMap(db.getSupplierDatabase(),
+				supplierKeyBinding, supplierDataBinding, true);
+        shipmentMap =
+            new StoredSortedMap(db.getShipmentDatabase(),
+				shipmentKeyBinding, shipmentDataBinding, true);
+        shipmentByPartMap =
+            new StoredSortedMap(db.getShipmentByPartDatabase(),
+                                partKeyBinding, shipmentDataBinding, true);
+        shipmentBySupplierMap =
+            new StoredSortedMap(db.getShipmentBySupplierDatabase(),
+                                supplierKeyBinding, shipmentDataBinding, true);
+        supplierByCityMap =
+            new StoredSortedMap(db.getSupplierByCityDatabase(),
+                                cityKeyBinding, supplierDataBinding, true);
+    }
+
+    // The views returned below can be accessed using the java.util.Map or
+    // java.util.Set interfaces, or using the StoredSortedMap and
+    // StoredValueSet classes, which provide additional methods.  The entity
+    // sets could be obtained directly from the Map.values() method but
+    // convenience methods are provided here to return them in order to avoid
+    // down-casting elsewhere.
+
+    /**
+     * Return a map view of the part storage container.
+     */
+    public StoredSortedMap getPartMap() {
+
+        return partMap;
+    }
+
+    /**
+     * Return a map view of the supplier storage container.
+     */
+    public StoredSortedMap getSupplierMap() {
+
+        return supplierMap;
+    }
+
+    /**
+     * Return a map view of the shipment storage container.
+     */
+    public StoredSortedMap getShipmentMap() {
+
+        return shipmentMap;
+    }
+
+    /**
+     * Return an entity set view of the part storage container.
+     */
+    public StoredSortedValueSet getPartSet() {
+
+        return (StoredSortedValueSet) partMap.values();
+    }
+
+    /**
+     * Return an entity set view of the supplier storage container.
+     */
+    public StoredSortedValueSet getSupplierSet() {
+
+        return (StoredSortedValueSet) supplierMap.values();
+    }
+
+    /**
+     * Return an entity set view of the shipment storage container.
+     */
+    public StoredSortedValueSet getShipmentSet() {
+
+        return (StoredSortedValueSet) shipmentMap.values();
+    }
+
+    /**
+     * Return a map view of the shipment-by-part index.
+     */
+    public StoredSortedMap getShipmentByPartMap() {
+
+        return shipmentByPartMap;
+    }
+
+    /**
+     * Return a map view of the shipment-by-supplier index.
+     */
+    public StoredSortedMap getShipmentBySupplierMap() {
+
+        return shipmentBySupplierMap;
+    }
+
+    /**
+     * Return a map view of the supplier-by-city index.
+     */
+    public final StoredSortedMap getSupplierByCityMap() {
+
+        return supplierByCityMap;
+    }
+
+    /**
+     * PartKeyBinding is used to bind the stored key tuple entry for a part to
+     * a key object representation.
+     */
+    private static class PartKeyBinding extends TupleBinding {
+
+        /**
+         * Construct the binding object.
+         */
+        private PartKeyBinding() {
+        }
+
+        /**
+         * Create the key object from the stored key tuple entry.
+         */
+        public Object entryToObject(TupleInput input) {
+
+            String number = input.readString();
+            return new PartKey(number);
+        }
+
+        /**
+         * Create the stored key tuple entry from the key object.
+         */
+        public void objectToEntry(Object object, TupleOutput output) {
+
+            PartKey key = (PartKey) object;
+            output.writeString(key.getNumber());
+        }
+    }
+
+    /**
+     * PartBinding is used to bind the stored key/data entry pair for a part
+     * to a combined data object (entity).
+     *
+     * <p> The binding is "tricky" in that it uses the Part class for both the
+     * stored data entry and the combined entity object.  To do this, Part's
+     * key field(s) are transient and are set by the binding after the data
+     * object has been deserialized. This avoids the use of a PartData class
+     * completely. </p>
+     */
+    private static class PartBinding extends TupleSerialBinding {
+
+        /**
+         * Construct the binding object.
+         */
+        private PartBinding(ClassCatalog classCatalog, Class dataClass) {
+
+            super(classCatalog, dataClass);
+        }
+
+        /**
+         * Create the entity by combining the stored key and data.
+         * This "tricky" binding returns the stored data as the entity, but
+         * first it sets the transient key fields from the stored key.
+         */
+        public Object entryToObject(TupleInput keyInput, Object dataInput) {
+
+            String number = keyInput.readString();
+            Part part = (Part) dataInput;
+            part.setKey(number);
+            return part;
+        }
+
+        /**
+         * Create the stored key from the entity.
+         */
+        public void objectToKey(Object object, TupleOutput output) {
+
+            Part part = (Part) object;
+            output.writeString(part.getNumber());
+        }
+
+        /**
+         * Return the entity as the stored data.  There is nothing to do here
+         * since the entity's key fields are transient.
+         */
+        public Object objectToData(Object object) {
+
+            return object;
+        }
+    }
+
+    /**
+     * SupplierKeyBinding is used to bind the stored key tuple entry for a
+     * supplier to a key object representation.
+     */
+    private static class SupplierKeyBinding extends TupleBinding {
+
+        /**
+         * Construct the binding object.
+         */
+        private SupplierKeyBinding() {
+        }
+
+        /**
+         * Create the key object from the stored key tuple entry.
+         */
+        public Object entryToObject(TupleInput input) {
+
+            String number = input.readString();
+            return new SupplierKey(number);
+        }
+
+        /**
+         * Create the stored key tuple entry from the key object.
+         */
+        public void objectToEntry(Object object, TupleOutput output) {
+
+            SupplierKey key = (SupplierKey) object;
+            output.writeString(key.getNumber());
+        }
+    }
+
+    /**
+     * SupplierBinding is used to bind the stored key/data entry pair for a
+     * supplier to a combined data object (entity).
+     *
+     * <p> The binding is "tricky" in that it uses the Supplier class for both
+     * the stored data entry and the combined entity object.  To do this,
+     * Supplier's key field(s) are transient and are set by the binding after
+     * the data object has been deserialized. This avoids the use of a
+     * SupplierData class completely. </p>
+     */
+    private static class SupplierBinding extends TupleSerialBinding {
+
+        /**
+         * Construct the binding object.
+         */
+        private SupplierBinding(ClassCatalog classCatalog, Class dataClass) {
+
+            super(classCatalog, dataClass);
+        }
+
+        /**
+         * Create the entity by combining the stored key and data.
+         * This "tricky" binding returns the stored data as the entity, but
+         * first it sets the transient key fields from the stored key.
+         */
+        public Object entryToObject(TupleInput keyInput, Object dataInput) {
+
+            String number = keyInput.readString();
+            Supplier supplier = (Supplier) dataInput;
+            supplier.setKey(number);
+            return supplier;
+        }
+
+        /**
+         * Create the stored key from the entity.
+         */
+        public void objectToKey(Object object, TupleOutput output) {
+
+            Supplier supplier = (Supplier) object;
+            output.writeString(supplier.getNumber());
+        }
+
+        /**
+         * Return the entity as the stored data.  There is nothing to do here
+         * since the entity's key fields are transient.
+         */
+        public Object objectToData(Object object) {
+
+            return object;
+        }
+    }
+
+    /**
+     * ShipmentKeyBinding is used to bind the stored key tuple entry for a
+     * shipment to a key object representation.
+     */
+    private static class ShipmentKeyBinding extends TupleBinding {
+
+        /**
+         * Construct the binding object.
+         */
+        private ShipmentKeyBinding() {
+        }
+
+        /**
+         * Create the key object from the stored key tuple entry.
+         */
+        public Object entryToObject(TupleInput input) {
+
+            String partNumber = input.readString();
+            String supplierNumber = input.readString();
+            return new ShipmentKey(partNumber, supplierNumber);
+        }
+
+        /**
+         * Create the stored key tuple entry from the key object.
+         */
+        public void objectToEntry(Object object, TupleOutput output) {
+
+            ShipmentKey key = (ShipmentKey) object;
+            output.writeString(key.getPartNumber());
+            output.writeString(key.getSupplierNumber());
+        }
+    }
+
+    /**
+     * ShipmentBinding is used to bind the stored key/data entry pair for a
+     * shipment to a combined data object (entity).
+     *
+     * <p> The binding is "tricky" in that it uses the Shipment class for both
+     * the stored data entry and the combined entity object.  To do this,
+     * Shipment's key field(s) are transient and are set by the binding after
+     * the data object has been deserialized. This avoids the use of a
+     * ShipmentData class completely. </p>
+     */
+    private static class ShipmentBinding extends TupleSerialBinding {
+
+        /**
+         * Construct the binding object.
+         */
+        private ShipmentBinding(ClassCatalog classCatalog, Class dataClass) {
+
+            super(classCatalog, dataClass);
+        }
+
+        /**
+         * Create the entity by combining the stored key and data.
+         * This "tricky" binding returns the stored data as the entity, but
+         * first it sets the transient key fields from the stored key.
+         */
+        public Object entryToObject(TupleInput keyInput, Object dataInput) {
+
+            String partNumber = keyInput.readString();
+            String supplierNumber = keyInput.readString();
+            Shipment shipment = (Shipment) dataInput;
+            shipment.setKey(partNumber, supplierNumber);
+            return shipment;
+        }
+
+        /**
+         * Create the stored key from the entity.
+         */
+        public void objectToKey(Object object, TupleOutput output) {
+
+            Shipment shipment = (Shipment) object;
+            output.writeString(shipment.getPartNumber());
+            output.writeString(shipment.getSupplierNumber());
+        }
+
+        /**
+         * Return the entity as the stored data.  There is nothing to do here
+         * since the entity's key fields are transient.
+         */
+        public Object objectToData(Object object) {
+
+            return object;
+        }
+    }
+}
diff --git a/examples/collections/ship/sentity/Shipment.java b/examples/collections/ship/sentity/Shipment.java
new file mode 100644
index 0000000000000000000000000000000000000000..533baf504e2d6daf1d7750d5a0cc5ad064a9146b
--- /dev/null
+++ b/examples/collections/ship/sentity/Shipment.java
@@ -0,0 +1,75 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Shipment.java,v 1.13.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package collections.ship.sentity;
+
+import java.io.Serializable;
+
+/**
+ * A Shipment represents the combined key/data pair for a shipment entity.
+ *
+ * <p> In this sample, Shipment is created from the stored key/data entry
+ * using TupleSerialEntityBinding.  See {@link SampleViews.PartBinding} for
+ * details.
+ * </p>
+ *
+ * <p> The binding is "tricky" in that it uses this class for both the stored
+ * data entry and the combined entity object.  To do this, the key field(s)
+ * are transient and are set by the binding after the data object has been
+ * deserialized. This avoids the use of a ShipmentData class completely. </p>
+ *
+ * <p> Since this class is used directly for data storage, it must be
+ * Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class Shipment implements Serializable {
+
+    private transient String partNumber;
+    private transient String supplierNumber;
+    private int quantity;
+
+    public Shipment(String partNumber, String supplierNumber, int quantity) {
+
+        this.partNumber = partNumber;
+        this.supplierNumber = supplierNumber;
+        this.quantity = quantity;
+    }
+
+    /**
+     * Set the transient key fields after deserializing.  This method is only
+     * called by data bindings.
+     */
+    void setKey(String partNumber, String supplierNumber) {
+
+        this.partNumber = partNumber;
+        this.supplierNumber = supplierNumber;
+    }
+
+    public final String getPartNumber() {
+
+        return partNumber;
+    }
+
+    public final String getSupplierNumber() {
+
+        return supplierNumber;
+    }
+
+    public final int getQuantity() {
+
+        return quantity;
+    }
+
+    public String toString() {
+
+        return "[Shipment: part=" + partNumber +
+                " supplier=" + supplierNumber +
+                " quantity=" + quantity + ']';
+    }
+}
diff --git a/examples/collections/ship/sentity/ShipmentKey.java b/examples/collections/ship/sentity/ShipmentKey.java
new file mode 100644
index 0000000000000000000000000000000000000000..bb0636132b400145e1ca4245e186800eb673f7e9
--- /dev/null
+++ b/examples/collections/ship/sentity/ShipmentKey.java
@@ -0,0 +1,46 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ShipmentKey.java,v 1.13.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package collections.ship.sentity;
+
+/**
+ * A ShipmentKey serves as the key in the key/data pair for a shipment entity.
+ *
+ * <p> In this sample, ShipmentKey is bound to the key's tuple storage entry
+ * using a TupleBinding.  Because it is not used directly as storage data, it
+ * does not need to be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class ShipmentKey {
+
+    private String partNumber;
+    private String supplierNumber;
+
+    public ShipmentKey(String partNumber, String supplierNumber) {
+
+        this.partNumber = partNumber;
+        this.supplierNumber = supplierNumber;
+    }
+
+    public final String getPartNumber() {
+
+        return partNumber;
+    }
+
+    public final String getSupplierNumber() {
+
+        return supplierNumber;
+    }
+
+    public String toString() {
+
+        return "[ShipmentKey: supplier=" + supplierNumber +
+	    " part=" + partNumber + ']';
+    }
+}
diff --git a/examples/collections/ship/sentity/Supplier.java b/examples/collections/ship/sentity/Supplier.java
new file mode 100644
index 0000000000000000000000000000000000000000..c1b0d316508d016a029821a3053131ae7e5769c0
--- /dev/null
+++ b/examples/collections/ship/sentity/Supplier.java
@@ -0,0 +1,82 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Supplier.java,v 1.13.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package collections.ship.sentity;
+
+import java.io.Serializable;
+
+/**
+ * A Supplier represents the combined key/data pair for a supplier entity.
+ *
+ * <p> In this sample, Supplier is created from the stored key/data entry
+ * using TupleSerialEntityBinding.  See {@link SampleViews.PartBinding} for
+ * details.
+ * </p>
+ *
+ * <p> The binding is "tricky" in that it uses this class for both the stored
+ * data entry and the combined entity object.  To do this, the key field(s) are
+ * transient and are set by the binding after the data object has been
+ * deserialized. This avoids the use of a SupplierData class completely. </p>
+ *
+ * <p> Since this class is used directly for data storage, it must be
+ * Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class Supplier implements Serializable {
+
+    private transient String number;
+    private String name;
+    private int status;
+    private String city;
+
+    public Supplier(String number, String name, int status, String city) {
+
+        this.number = number;
+        this.name = name;
+        this.status = status;
+        this.city = city;
+    }
+
+    /**
+     * Set the transient key fields after deserializing.  This method is only
+     * called by data bindings.
+     */
+    void setKey(String number) {
+
+        this.number = number;
+    }
+
+    public final String getNumber() {
+
+        return number;
+    }
+
+    public final String getName() {
+
+        return name;
+    }
+
+    public final int getStatus() {
+
+        return status;
+    }
+
+    public final String getCity() {
+
+        return city;
+    }
+
+    public String toString() {
+
+        return "[Supplier: number=" + number +
+               " name=" + name +
+               " status=" + status +
+               " city=" + city + ']';
+    }
+}
diff --git a/examples/collections/ship/sentity/SupplierKey.java b/examples/collections/ship/sentity/SupplierKey.java
new file mode 100644
index 0000000000000000000000000000000000000000..c05abffd4c8e6480b3e663e79b555497ec4428d3
--- /dev/null
+++ b/examples/collections/ship/sentity/SupplierKey.java
@@ -0,0 +1,38 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SupplierKey.java,v 1.12.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package collections.ship.sentity;
+
+/**
+ * A SupplierKey serves as the key in the key/data pair for a supplier entity.
+ *
+ * <p> In this sample, SupplierKey is bound to the key's tuple storage entry
+ * using a TupleBinding.  Because it is not used directly as storage data, it
+ * does not need to be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class SupplierKey {
+
+    private String number;
+
+    public SupplierKey(String number) {
+
+        this.number = number;
+    }
+
+    public final String getNumber() {
+
+        return number;
+    }
+
+    public String toString() {
+
+        return "[SupplierKey: number=" + number + ']';
+    }
+}
diff --git a/examples/collections/ship/sentity/Weight.java b/examples/collections/ship/sentity/Weight.java
new file mode 100644
index 0000000000000000000000000000000000000000..fe62a1b7c2556e3b4ac0c4644c4a4db1feb476e4
--- /dev/null
+++ b/examples/collections/ship/sentity/Weight.java
@@ -0,0 +1,49 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Weight.java,v 1.11.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package collections.ship.sentity;
+
+import java.io.Serializable;
+
+/**
+ * Weight represents a weight amount and unit of measure.
+ *
+ * <p> In this sample, Weight is embedded in part data values which are stored
+ * as Java serialized objects; therefore Weight must be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class Weight implements Serializable {
+
+    public final static String GRAMS = "grams";
+    public final static String OUNCES = "ounces";
+
+    private double amount;
+    private String units;
+
+    public Weight(double amount, String units) {
+
+        this.amount = amount;
+        this.units = units;
+    }
+
+    public final double getAmount() {
+
+        return amount;
+    }
+
+    public final String getUnits() {
+
+        return units;
+    }
+
+    public String toString() {
+
+        return "[" + amount + ' ' + units + ']';
+    }
+}
diff --git a/examples/collections/ship/tuple/Part.java b/examples/collections/ship/tuple/Part.java
new file mode 100644
index 0000000000000000000000000000000000000000..2ab8f0062948f05276fc0391265fcca74e53b595
--- /dev/null
+++ b/examples/collections/ship/tuple/Part.java
@@ -0,0 +1,72 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Part.java,v 1.13.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package collections.ship.tuple;
+
+/**
+ * A Part represents the combined key/data pair for a part entity.
+ *
+ * <p> In this sample, Part is created from the stored key/data entry using a
+ * SerialSerialBinding.  See {@link SampleViews.PartBinding} for details.
+ * Since this class is not directly used for data storage, it does not need to
+ * be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class Part {
+
+    private String number;
+    private String name;
+    private String color;
+    private Weight weight;
+    private String city;
+
+    public Part(String number, String name, String color, Weight weight,
+                String city) {
+
+        this.number = number;
+        this.name = name;
+        this.color = color;
+        this.weight = weight;
+        this.city = city;
+    }
+
+    public final String getNumber() {
+
+        return number;
+    }
+
+    public final String getName() {
+
+        return name;
+    }
+
+    public final String getColor() {
+
+        return color;
+    }
+
+    public final Weight getWeight() {
+
+        return weight;
+    }
+
+    public final String getCity() {
+
+        return city;
+    }
+
+    public String toString() {
+
+        return "[Part: number=" + number +
+               " name=" + name +
+               " color=" + color +
+               " weight=" + weight +
+               " city=" + city + ']';
+    }
+}
diff --git a/examples/collections/ship/tuple/PartData.java b/examples/collections/ship/tuple/PartData.java
new file mode 100644
index 0000000000000000000000000000000000000000..173689cf5af6a9ea46da1a8b32488fd866134985
--- /dev/null
+++ b/examples/collections/ship/tuple/PartData.java
@@ -0,0 +1,65 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PartData.java,v 1.14.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package collections.ship.tuple;
+
+import java.io.Serializable;
+
+/**
+ * A PartData serves as the value in the key/value pair for a part entity.
+ *
+ * <p> In this sample, PartData is used only as the storage data for the
+ * value, while the Part object is used as the value's object representation.
+ * Because it is used directly as storage data using serial format, it must be
+ * Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class PartData implements Serializable {
+
+    private String name;
+    private String color;
+    private Weight weight;
+    private String city;
+
+    public PartData(String name, String color, Weight weight, String city) {
+
+        this.name = name;
+        this.color = color;
+        this.weight = weight;
+        this.city = city;
+    }
+
+    public final String getName() {
+
+        return name;
+    }
+
+    public final String getColor() {
+
+        return color;
+    }
+
+    public final Weight getWeight() {
+
+        return weight;
+    }
+
+    public final String getCity() {
+
+        return city;
+    }
+
+    public String toString() {
+
+        return "[PartData: name=" + name +
+	    " color=" + color +
+	    " weight=" + weight +
+	    " city=" + city + ']';
+    }
+}
diff --git a/examples/collections/ship/tuple/PartKey.java b/examples/collections/ship/tuple/PartKey.java
new file mode 100644
index 0000000000000000000000000000000000000000..2fc0dbf81d6d76ac98eaa655e53301d95a3f6bd4
--- /dev/null
+++ b/examples/collections/ship/tuple/PartKey.java
@@ -0,0 +1,38 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PartKey.java,v 1.12.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package collections.ship.tuple;
+
+/**
+ * A PartKey serves as the key in the key/data pair for a part entity.
+ *
+ * <p> In this sample, PartKey is bound to the key's tuple storage entry using
+ * a TupleBinding.  Because it is not used directly as storage data, it does
+ * not need to be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class PartKey {
+
+    private String number;
+
+    public PartKey(String number) {
+
+        this.number = number;
+    }
+
+    public final String getNumber() {
+
+        return number;
+    }
+
+    public String toString() {
+
+        return "[PartKey: number=" + number + ']';
+    }
+}
diff --git a/examples/collections/ship/tuple/Sample.java b/examples/collections/ship/tuple/Sample.java
new file mode 100644
index 0000000000000000000000000000000000000000..df257d72bf1361c528ca0bc57864d186c5129503
--- /dev/null
+++ b/examples/collections/ship/tuple/Sample.java
@@ -0,0 +1,235 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Sample.java,v 1.20.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package collections.ship.tuple;
+
+import java.io.FileNotFoundException;
+import java.util.Iterator;
+import java.util.Set;
+
+import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+import com.sleepycat.je.DatabaseException;
+
+/**
+ * Sample is the main entry point for the sample program and may be run as
+ * follows:
+ *
+ * <pre>
+ * java collections.ship.tuple.Sample
+ *      [-h <home-directory> ]
+ * </pre>
+ *
+ * <p> The default for the home directory is ./tmp -- the tmp subdirectory of
+ * the current directory where the sample is run. The home directory must exist
+ * before running the sample.  To recreate the sample database from scratch,
+ * delete all files in the home directory before running the sample.  </p>
+ *
+ * @author Mark Hayes
+ */
+public class Sample {
+
+    private SampleDatabase db;
+    private SampleViews views;
+
+    /**
+     * Run the sample program.
+     */
+    public static void main(String[] args) {
+
+        System.out.println("\nRunning sample: " + Sample.class);
+
+        // Parse the command line arguments.
+        //
+        String homeDir = "./tmp";
+        for (int i = 0; i < args.length; i += 1) {
+            if (args[i].equals("-h") && i < args.length - 1) {
+                i += 1;
+                homeDir = args[i];
+            } else {
+                System.err.println("Usage:\n java " + Sample.class.getName() +
+				   "\n  [-h <home-directory>]");
+                System.exit(2);
+            }
+        }
+
+        // Run the sample.
+        //
+        Sample sample = null;
+        try {
+            sample = new Sample(homeDir);
+            sample.run();
+        } catch (Exception e) {
+            // If an exception reaches this point, the last transaction did not
+            // complete.  If the exception is RunRecoveryException, follow
+            // the Berkeley DB recovery procedures before running again.
+            e.printStackTrace();
+        } finally {
+            if (sample != null) {
+                try {
+                    // Always attempt to close the database cleanly.
+                    sample.close();
+                } catch (Exception e) {
+                    System.err.println("Exception during database close:");
+                    e.printStackTrace();
+                }
+            }
+        }
+    }
+
+    /**
+     * Open the database and views.
+     */
+    private Sample(String homeDir)
+        throws DatabaseException, FileNotFoundException {
+
+        db = new SampleDatabase(homeDir);
+        views = new SampleViews(db);
+    }
+
+    /**
+     * Close the database cleanly.
+     */
+    private void close()
+        throws DatabaseException {
+
+        db.close();
+    }
+
+    /**
+     * Run two transactions to populate and print the database.  A
+     * TransactionRunner is used to ensure consistent handling of transactions,
+     * including deadlock retries.  But the best transaction handling mechanism
+     * to use depends on the application.
+     */
+    private void run()
+        throws Exception {
+
+        TransactionRunner runner = new TransactionRunner(db.getEnvironment());
+        runner.run(new PopulateDatabase());
+        runner.run(new PrintDatabase());
+    }
+
+    /**
+     * Populate the database in a single transaction.
+     */
+    private class PopulateDatabase implements TransactionWorker {
+
+        public void doWork()
+            throws Exception {
+            addSuppliers();
+            addParts();
+            addShipments();
+        }
+    }
+
+    /**
+     * Print the database in a single transaction.  All entities are printed
+     * and the indices are used to print the entities for certain keys.
+     *
+     * <p> Note the use of special iterator() methods.  These are used here
+     * with indices to find the shipments for certain keys.</p>
+     */
+    private class PrintDatabase implements TransactionWorker {
+
+        public void doWork()
+            throws Exception {
+            printValues("Parts",
+			views.getPartSet().iterator());
+            printValues("Suppliers",
+			views.getSupplierSet().iterator());
+            printValues("Suppliers for City Paris",
+                        views.getSupplierByCityMap().duplicates(
+                                            "Paris").iterator());
+            printValues("Shipments",
+			views.getShipmentSet().iterator());
+            printValues("Shipments for Part P1",
+                        views.getShipmentByPartMap().duplicates(
+                                            new PartKey("P1")).iterator());
+            printValues("Shipments for Supplier S1",
+                        views.getShipmentBySupplierMap().duplicates(
+                                            new SupplierKey("S1")).iterator());
+        }
+    }
+
+    /**
+     * Populate the part entities in the database.  If the part set is not
+     * empty, assume that this has already been done.
+     */
+    private void addParts() {
+
+        Set parts = views.getPartSet();
+        if (parts.isEmpty()) {
+            System.out.println("Adding Parts");
+            parts.add(new Part("P1", "Nut", "Red",
+			       new Weight(12.0, Weight.GRAMS), "London"));
+            parts.add(new Part("P2", "Bolt", "Green",
+			       new Weight(17.0, Weight.GRAMS), "Paris"));
+            parts.add(new Part("P3", "Screw", "Blue",
+			       new Weight(17.0, Weight.GRAMS), "Rome"));
+            parts.add(new Part("P4", "Screw", "Red",
+			       new Weight(14.0, Weight.GRAMS), "London"));
+            parts.add(new Part("P5", "Cam", "Blue",
+			       new Weight(12.0, Weight.GRAMS), "Paris"));
+            parts.add(new Part("P6", "Cog", "Red",
+			       new Weight(19.0, Weight.GRAMS), "London"));
+        }
+    }
+
+    /**
+     * Populate the supplier entities in the database.  If the supplier set is
+     * not empty, assume that this has already been done.
+     */
+    private void addSuppliers() {
+
+        Set suppliers = views.getSupplierSet();
+        if (suppliers.isEmpty()) {
+            System.out.println("Adding Suppliers");
+            suppliers.add(new Supplier("S1", "Smith", 20, "London"));
+            suppliers.add(new Supplier("S2", "Jones", 10, "Paris"));
+            suppliers.add(new Supplier("S3", "Blake", 30, "Paris"));
+            suppliers.add(new Supplier("S4", "Clark", 20, "London"));
+            suppliers.add(new Supplier("S5", "Adams", 30, "Athens"));
+        }
+    }
+
+    /**
+     * Populate the shipment entities in the database.  If the shipment set
+     * is not empty, assume that this has already been done.
+     */
+    private void addShipments() {
+
+        Set shipments = views.getShipmentSet();
+        if (shipments.isEmpty()) {
+            System.out.println("Adding Shipments");
+            shipments.add(new Shipment("P1", "S1", 300));
+            shipments.add(new Shipment("P2", "S1", 200));
+            shipments.add(new Shipment("P3", "S1", 400));
+            shipments.add(new Shipment("P4", "S1", 200));
+            shipments.add(new Shipment("P5", "S1", 100));
+            shipments.add(new Shipment("P6", "S1", 100));
+            shipments.add(new Shipment("P1", "S2", 300));
+            shipments.add(new Shipment("P2", "S2", 400));
+            shipments.add(new Shipment("P2", "S3", 200));
+            shipments.add(new Shipment("P2", "S4", 200));
+            shipments.add(new Shipment("P4", "S4", 300));
+            shipments.add(new Shipment("P5", "S4", 400));
+        }
+    }
+
+    /**
+     * Print the objects returned by an iterator of entity value objects.
+     */
+    private void printValues(String label, Iterator iterator) {
+
+        System.out.println("\n--- " + label + " ---");
+        while (iterator.hasNext()) {
+            System.out.println(iterator.next().toString());
+        }
+    }
+}
diff --git a/examples/collections/ship/tuple/SampleDatabase.java b/examples/collections/ship/tuple/SampleDatabase.java
new file mode 100644
index 0000000000000000000000000000000000000000..e608f84cbd0d8c0f7b4ef5f81020eeb034aee23a
--- /dev/null
+++ b/examples/collections/ship/tuple/SampleDatabase.java
@@ -0,0 +1,314 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SampleDatabase.java,v 1.27.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package collections.ship.tuple;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+
+import com.sleepycat.bind.serial.ClassCatalog;
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.bind.serial.TupleSerialKeyCreator;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.ForeignKeyDeleteAction;
+import com.sleepycat.je.SecondaryConfig;
+import com.sleepycat.je.SecondaryDatabase;
+
+/**
+ * SampleDatabase defines the storage containers, indices and foreign keys
+ * for the sample database.
+ *
+ * @author Mark Hayes
+ */
+public class SampleDatabase {
+
+    private static final String CLASS_CATALOG = "java_class_catalog";
+    private static final String SUPPLIER_STORE = "supplier_store";
+    private static final String PART_STORE = "part_store";
+    private static final String SHIPMENT_STORE = "shipment_store";
+    private static final String SHIPMENT_PART_INDEX = "shipment_part_index";
+    private static final String SHIPMENT_SUPPLIER_INDEX =
+	"shipment_supplier_index";
+    private static final String SUPPLIER_CITY_INDEX = "supplier_city_index";
+
+    private Environment env;
+    private Database partDb;
+    private Database supplierDb;
+    private Database shipmentDb;
+    private SecondaryDatabase supplierByCityDb;
+    private SecondaryDatabase shipmentByPartDb;
+    private SecondaryDatabase shipmentBySupplierDb;
+    private StoredClassCatalog javaCatalog;
+
+    /**
+     * Open all storage containers, indices, and catalogs.
+     */
+    public SampleDatabase(String homeDirectory)
+        throws DatabaseException, FileNotFoundException {
+
+        // Open the Berkeley DB environment in transactional mode.
+        //
+        System.out.println("Opening environment in: " + homeDirectory);
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+        env = new Environment(new File(homeDirectory), envConfig);
+
+        // Set the Berkeley DB config for opening all stores.
+        //
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+
+        // Create the Serial class catalog.  This holds the serialized class
+        // format for all database records of serial format.
+        //
+        Database catalogDb = env.openDatabase(null, CLASS_CATALOG, dbConfig);
+        javaCatalog = new StoredClassCatalog(catalogDb);
+
+        // Open the Berkeley DB database for the part, supplier and shipment
+        // stores.  The stores are opened with no duplicate keys allowed.
+        //
+        partDb = env.openDatabase(null, PART_STORE, dbConfig);
+
+        supplierDb = env.openDatabase(null, SUPPLIER_STORE, dbConfig);
+
+        shipmentDb = env.openDatabase(null, SHIPMENT_STORE, dbConfig);
+
+        // Open the SecondaryDatabase for the city index of the supplier store,
+        // and for the part and supplier indices of the shipment store.
+        // Duplicate keys are allowed since more than one supplier may be in
+        // the same city, and more than one shipment may exist for the same
+        // supplier or part.  A foreign key constraint is defined for the
+        // supplier and part indices to ensure that a shipment only refers to
+        // existing part and supplier keys.  The CASCADE delete action means
+        // that shipments will be deleted if their associated part or supplier
+        // is deleted.
+        //
+        SecondaryConfig secConfig = new SecondaryConfig();
+        secConfig.setTransactional(true);
+        secConfig.setAllowCreate(true);
+        secConfig.setSortedDuplicates(true);
+
+        secConfig.setKeyCreator(new SupplierByCityKeyCreator(javaCatalog,
+                                                     SupplierData.class));
+        supplierByCityDb = env.openSecondaryDatabase(null, SUPPLIER_CITY_INDEX,
+                                                     supplierDb, secConfig);
+
+        secConfig.setForeignKeyDatabase(partDb);
+        secConfig.setForeignKeyDeleteAction(ForeignKeyDeleteAction.CASCADE);
+        secConfig.setKeyCreator(new ShipmentByPartKeyCreator(javaCatalog,
+                                                     ShipmentData.class));
+        shipmentByPartDb = env.openSecondaryDatabase(null, SHIPMENT_PART_INDEX,
+                                                     shipmentDb, secConfig);
+
+        secConfig.setForeignKeyDatabase(supplierDb);
+        secConfig.setForeignKeyDeleteAction(ForeignKeyDeleteAction.CASCADE);
+        secConfig.setKeyCreator(new ShipmentBySupplierKeyCreator(javaCatalog,
+                                                     ShipmentData.class));
+        shipmentBySupplierDb = env.openSecondaryDatabase(null,
+                                                     SHIPMENT_SUPPLIER_INDEX,
+                                                     shipmentDb, secConfig);
+    }
+
+    /**
+     * Return the storage environment for the database.
+     */
+    public final Environment getEnvironment() {
+
+        return env;
+    }
+
+    /**
+     * Return the class catalog.
+     */
+    public final StoredClassCatalog getClassCatalog() {
+
+        return javaCatalog;
+    }
+
+    /**
+     * Return the part storage container.
+     */
+    public final Database getPartDatabase() {
+
+        return partDb;
+    }
+
+    /**
+     * Return the supplier storage container.
+     */
+    public final Database getSupplierDatabase() {
+
+        return supplierDb;
+    }
+
+    /**
+     * Return the shipment storage container.
+     */
+    public final Database getShipmentDatabase() {
+
+        return shipmentDb;
+    }
+
+    /**
+     * Return the shipment-by-part index.
+     */
+    public final SecondaryDatabase getShipmentByPartDatabase() {
+
+        return shipmentByPartDb;
+    }
+
+    /**
+     * Return the shipment-by-supplier index.
+     */
+    public final SecondaryDatabase getShipmentBySupplierDatabase() {
+
+        return shipmentBySupplierDb;
+    }
+
+    /**
+     * Return the supplier-by-city index.
+     */
+    public final SecondaryDatabase getSupplierByCityDatabase() {
+
+        return supplierByCityDb;
+    }
+
+    /**
+     * Close all stores (closing a store automatically closes its indices).
+     */
+    public void close()
+        throws DatabaseException {
+
+        // Close secondary databases, then primary databases.
+        supplierByCityDb.close();
+        shipmentByPartDb.close();
+        shipmentBySupplierDb.close();
+        partDb.close();
+        supplierDb.close();
+        shipmentDb.close();
+        // And don't forget to close the catalog and the environment.
+        javaCatalog.close();
+        env.close();
+    }
+
+    /**
+     * The SecondaryKeyCreator for the SupplierByCity index.  This is an
+     * extension of the abstract class TupleSerialKeyCreator, which implements
+     * SecondaryKeyCreator for the case where the data keys are of the format
+     * TupleFormat and the data values are of the format SerialFormat.
+     */
+    private static class SupplierByCityKeyCreator
+        extends TupleSerialKeyCreator {
+
+        /**
+         * Construct the city key extractor.
+         * @param catalog is the class catalog.
+         * @param valueClass is the supplier value class.
+         */
+        private SupplierByCityKeyCreator(ClassCatalog catalog,
+                                         Class valueClass) {
+
+            super(catalog, valueClass);
+        }
+
+        /**
+         * Extract the city key from a supplier key/value pair.  The city key
+         * is stored in the supplier value, so the supplier key is not used.
+         */
+        public boolean createSecondaryKey(TupleInput primaryKeyInput,
+                                          Object valueInput,
+                                          TupleOutput indexKeyOutput) {
+
+            SupplierData supplierData = (SupplierData) valueInput;
+            String city = supplierData.getCity();
+            if (city != null) {
+                indexKeyOutput.writeString(supplierData.getCity());
+                return true;
+            } else {
+                return false;
+            }
+        }
+    }
+
+    /**
+     * The SecondaryKeyCreator for the ShipmentByPart index.  This is an
+     * extension of the abstract class TupleSerialKeyCreator, which implements
+     * SecondaryKeyCreator for the case where the data keys are of the format
+     * TupleFormat and the data values are of the format SerialFormat.
+     */
+    private static class ShipmentByPartKeyCreator
+        extends TupleSerialKeyCreator {
+
+        /**
+         * Construct the part key extractor.
+         * @param catalog is the class catalog.
+         * @param valueClass is the shipment value class.
+         */
+        private ShipmentByPartKeyCreator(ClassCatalog catalog,
+                                         Class valueClass) {
+            super(catalog, valueClass);
+        }
+
+        /**
+         * Extract the part key from a shipment key/value pair.  The part key
+         * is stored in the shipment key, so the shipment value is not used.
+         */
+        public boolean createSecondaryKey(TupleInput primaryKeyInput,
+                                          Object valueInput,
+                                          TupleOutput indexKeyOutput) {
+
+            String partNumber = primaryKeyInput.readString();
+            // don't bother reading the supplierNumber
+            indexKeyOutput.writeString(partNumber);
+            return true;
+        }
+    }
+
+    /**
+     * The SecondaryKeyCreator for the ShipmentBySupplier index.  This is an
+     * extension of the abstract class TupleSerialKeyCreator, which implements
+     * SecondaryKeyCreator for the case where the data keys are of the format
+     * TupleFormat and the data values are of the format SerialFormat.
+     */
+    private static class ShipmentBySupplierKeyCreator
+        extends TupleSerialKeyCreator {
+
+        /**
+         * Construct the supplier key extractor.
+         * @param catalog is the class catalog.
+         * @param valueClass is the shipment value class.
+         */
+        private ShipmentBySupplierKeyCreator(ClassCatalog catalog,
+                                             Class valueClass) {
+            super(catalog, valueClass);
+        }
+
+        /**
+         * Extract the supplier key from a shipment key/value pair.  The
+         * supplier key is stored in the shipment key, so the shipment value is
+         * not used.
+         */
+        public boolean createSecondaryKey(TupleInput primaryKeyInput,
+                                          Object valueInput,
+                                          TupleOutput indexKeyOutput) {
+
+            primaryKeyInput.readString(); // skip the partNumber
+            String supplierNumber = primaryKeyInput.readString();
+            indexKeyOutput.writeString(supplierNumber);
+            return true;
+        }
+    }
+}
diff --git a/examples/collections/ship/tuple/SampleViews.java b/examples/collections/ship/tuple/SampleViews.java
new file mode 100644
index 0000000000000000000000000000000000000000..7afd908e193b2cdd4fa081653f2afa4ee044e97a
--- /dev/null
+++ b/examples/collections/ship/tuple/SampleViews.java
@@ -0,0 +1,396 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SampleViews.java,v 1.22.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package collections.ship.tuple;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.serial.ClassCatalog;
+import com.sleepycat.bind.serial.TupleSerialBinding;
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+import com.sleepycat.collections.StoredSortedMap;
+import com.sleepycat.collections.StoredSortedValueSet;
+
+/**
+ * SampleViews defines the data bindings and collection views for the sample
+ * database.
+ *
+ * @author Mark Hayes
+ */
+public class SampleViews {
+
+    private StoredSortedMap partMap;
+    private StoredSortedMap supplierMap;
+    private StoredSortedMap shipmentMap;
+    private StoredSortedMap shipmentByPartMap;
+    private StoredSortedMap shipmentBySupplierMap;
+    private StoredSortedMap supplierByCityMap;
+
+    /**
+     * Create the data bindings and collection views.
+     */
+    public SampleViews(SampleDatabase db) {
+
+        // Create the data bindings.
+        // In this sample, EntityBinding classes are used to bind the stored
+        // key/data entry pair to a combined data object.  For keys, a
+        // one-to-one binding is implemented with EntryBinding classes to bind
+        // the stored tuple entry to a key Object.
+        //
+        ClassCatalog catalog = db.getClassCatalog();
+        EntryBinding partKeyBinding =
+            new PartKeyBinding();
+        EntityBinding partDataBinding =
+            new PartBinding(catalog, PartData.class);
+        EntryBinding supplierKeyBinding =
+            new SupplierKeyBinding();
+        EntityBinding supplierDataBinding =
+            new SupplierBinding(catalog, SupplierData.class);
+        EntryBinding shipmentKeyBinding =
+            new ShipmentKeyBinding();
+        EntityBinding shipmentDataBinding =
+            new ShipmentBinding(catalog, ShipmentData.class);
+        EntryBinding cityKeyBinding =
+            TupleBinding.getPrimitiveBinding(String.class);
+
+        // Create map views for all stores and indices.
+        // StoredSortedMap is used since the stores and indices are ordered
+        // (they use the DB_BTREE access method).
+        //
+        partMap =
+            new StoredSortedMap(db.getPartDatabase(),
+				partKeyBinding, partDataBinding, true);
+        supplierMap =
+            new StoredSortedMap(db.getSupplierDatabase(),
+				supplierKeyBinding, supplierDataBinding, true);
+        shipmentMap =
+            new StoredSortedMap(db.getShipmentDatabase(),
+				shipmentKeyBinding, shipmentDataBinding, true);
+        shipmentByPartMap =
+            new StoredSortedMap(db.getShipmentByPartDatabase(),
+                                partKeyBinding, shipmentDataBinding, true);
+        shipmentBySupplierMap =
+            new StoredSortedMap(db.getShipmentBySupplierDatabase(),
+                                supplierKeyBinding, shipmentDataBinding, true);
+        supplierByCityMap =
+            new StoredSortedMap(db.getSupplierByCityDatabase(),
+                                cityKeyBinding, supplierDataBinding, true);
+    }
+
+    // The views returned below can be accessed using the java.util.Map or
+    // java.util.Set interfaces, or using the StoredSortedMap and
+    // StoredValueSet classes, which provide additional methods.  The entity
+    // sets could be obtained directly from the Map.values() method but
+    // convenience methods are provided here to return them in order to avoid
+    // down-casting elsewhere.
+
+    /**
+     * Return a map view of the part storage container.
+     */
+    public StoredSortedMap getPartMap() {
+
+        return partMap;
+    }
+
+    /**
+     * Return a map view of the supplier storage container.
+     */
+    public StoredSortedMap getSupplierMap() {
+
+        return supplierMap;
+    }
+
+    /**
+     * Return a map view of the shipment storage container.
+     */
+    public StoredSortedMap getShipmentMap() {
+
+        return shipmentMap;
+    }
+
+    /**
+     * Return an entity set view of the part storage container.
+     */
+    public StoredSortedValueSet getPartSet() {
+
+        return (StoredSortedValueSet) partMap.values();
+    }
+
+    /**
+     * Return an entity set view of the supplier storage container.
+     */
+    public StoredSortedValueSet getSupplierSet() {
+
+        return (StoredSortedValueSet) supplierMap.values();
+    }
+
+    /**
+     * Return an entity set view of the shipment storage container.
+     */
+    public StoredSortedValueSet getShipmentSet() {
+
+        return (StoredSortedValueSet) shipmentMap.values();
+    }
+
+    /**
+     * Return a map view of the shipment-by-part index.
+     */
+    public StoredSortedMap getShipmentByPartMap() {
+
+        return shipmentByPartMap;
+    }
+
+    /**
+     * Return a map view of the shipment-by-supplier index.
+     */
+    public StoredSortedMap getShipmentBySupplierMap() {
+
+        return shipmentBySupplierMap;
+    }
+
+    /**
+     * Return a map view of the supplier-by-city index.
+     */
+    public final StoredSortedMap getSupplierByCityMap() {
+
+        return supplierByCityMap;
+    }
+
+    /**
+     * PartKeyBinding is used to bind the stored key tuple entry for a part to
+     * a key object representation.
+     */
+    private static class PartKeyBinding extends TupleBinding {
+
+        /**
+         * Construct the binding object.
+         */
+        private PartKeyBinding() {
+        }
+
+        /**
+         * Create the key object from the stored key tuple entry.
+         */
+        public Object entryToObject(TupleInput input) {
+
+            String number = input.readString();
+            return new PartKey(number);
+        }
+
+        /**
+         * Create the stored key tuple entry from the key object.
+         */
+        public void objectToEntry(Object object, TupleOutput output) {
+
+            PartKey key = (PartKey) object;
+            output.writeString(key.getNumber());
+        }
+    }
+
+    /**
+     * PartBinding is used to bind the stored key/data entry pair for a part
+     * to a combined data object (entity).
+     */
+    private static class PartBinding extends TupleSerialBinding {
+
+        /**
+         * Construct the binding object.
+         */
+        private PartBinding(ClassCatalog classCatalog, Class dataClass) {
+
+            super(classCatalog, dataClass);
+        }
+
+        /**
+         * Create the entity by combining the stored key and data.
+         */
+        public Object entryToObject(TupleInput keyInput, Object dataInput) {
+
+            String number = keyInput.readString();
+            PartData data = (PartData) dataInput;
+            return new Part(number, data.getName(), data.getColor(),
+                            data.getWeight(), data.getCity());
+        }
+
+        /**
+         * Create the stored key from the entity.
+         */
+        public void objectToKey(Object object, TupleOutput output) {
+
+            Part part = (Part) object;
+            output.writeString(part.getNumber());
+        }
+
+        /**
+         * Create the stored data from the entity.
+         */
+        public Object objectToData(Object object) {
+
+            Part part = (Part) object;
+            return new PartData(part.getName(), part.getColor(),
+                                 part.getWeight(), part.getCity());
+        }
+    }
+
+    /**
+     * SupplierKeyBinding is used to bind the stored key tuple entry for a
+     * supplier to a key object representation.
+     */
+    private static class SupplierKeyBinding extends TupleBinding {
+
+        /**
+         * Construct the binding object.
+         */
+        private SupplierKeyBinding() {
+        }
+
+        /**
+         * Create the key object from the stored key tuple entry.
+         */
+        public Object entryToObject(TupleInput input) {
+
+            String number = input.readString();
+            return new SupplierKey(number);
+        }
+
+        /**
+         * Create the stored key tuple entry from the key object.
+         */
+        public void objectToEntry(Object object, TupleOutput output) {
+
+            SupplierKey key = (SupplierKey) object;
+            output.writeString(key.getNumber());
+        }
+    }
+
+    /**
+     * SupplierBinding is used to bind the stored key/data entry pair for a
+     * supplier to a combined data object (entity).
+     */
+    private static class SupplierBinding extends TupleSerialBinding {
+
+        /**
+         * Construct the binding object.
+         */
+        private SupplierBinding(ClassCatalog classCatalog, Class dataClass) {
+
+            super(classCatalog, dataClass);
+        }
+
+        /**
+         * Create the entity by combining the stored key and data.
+         */
+        public Object entryToObject(TupleInput keyInput, Object dataInput) {
+
+            String number = keyInput.readString();
+            SupplierData data = (SupplierData) dataInput;
+            return new Supplier(number, data.getName(),
+                                data.getStatus(), data.getCity());
+        }
+
+        /**
+         * Create the stored key from the entity.
+         */
+        public void objectToKey(Object object, TupleOutput output) {
+
+            Supplier supplier = (Supplier) object;
+            output.writeString(supplier.getNumber());
+        }
+
+        /**
+         * Create the stored data from the entity.
+         */
+        public Object objectToData(Object object) {
+
+            Supplier supplier = (Supplier) object;
+            return new SupplierData(supplier.getName(), supplier.getStatus(),
+                                     supplier.getCity());
+        }
+    }
+
+    /**
+     * ShipmentKeyBinding is used to bind the stored key tuple entry for a
+     * shipment to a key object representation.
+     */
+    private static class ShipmentKeyBinding extends TupleBinding {
+
+        /**
+         * Construct the binding object.
+         */
+        private ShipmentKeyBinding() {
+        }
+
+        /**
+         * Create the key object from the stored key tuple entry.
+         */
+        public Object entryToObject(TupleInput input) {
+
+            String partNumber = input.readString();
+            String supplierNumber = input.readString();
+            return new ShipmentKey(partNumber, supplierNumber);
+        }
+
+        /**
+         * Create the stored key tuple entry from the key object.
+         */
+        public void objectToEntry(Object object, TupleOutput output) {
+
+            ShipmentKey key = (ShipmentKey) object;
+            output.writeString(key.getPartNumber());
+            output.writeString(key.getSupplierNumber());
+        }
+    }
+
+    /**
+     * ShipmentBinding is used to bind the stored key/data entry pair for a
+     * shipment to a combined data object (entity).
+     */
+    private static class ShipmentBinding extends TupleSerialBinding {
+
+        /**
+         * Construct the binding object.
+         */
+        private ShipmentBinding(ClassCatalog classCatalog, Class dataClass) {
+
+            super(classCatalog, dataClass);
+        }
+
+        /**
+         * Create the entity by combining the stored key and data.
+         */
+        public Object entryToObject(TupleInput keyInput, Object dataInput) {
+
+            String partNumber = keyInput.readString();
+            String supplierNumber = keyInput.readString();
+            ShipmentData data = (ShipmentData) dataInput;
+            return new Shipment(partNumber, supplierNumber,
+                                data.getQuantity());
+        }
+
+        /**
+         * Create the stored key from the entity.
+         */
+        public void objectToKey(Object object, TupleOutput output) {
+
+            Shipment shipment = (Shipment) object;
+            output.writeString(shipment.getPartNumber());
+            output.writeString(shipment.getSupplierNumber());
+        }
+
+        /**
+         * Create the stored data from the entity.
+         */
+        public Object objectToData(Object object) {
+
+            Shipment shipment = (Shipment) object;
+            return new ShipmentData(shipment.getQuantity());
+        }
+    }
+}
diff --git a/examples/collections/ship/tuple/Shipment.java b/examples/collections/ship/tuple/Shipment.java
new file mode 100644
index 0000000000000000000000000000000000000000..c34cd7d3f6d6fc9985da008c1643748395a33fc4
--- /dev/null
+++ b/examples/collections/ship/tuple/Shipment.java
@@ -0,0 +1,55 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Shipment.java,v 1.12.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package collections.ship.tuple;
+
+/**
+ * A Shipment represents the combined key/data pair for a shipment entity.
+ *
+ * <p> In this sample, Shipment is created from the stored key/data entry
+ * using a SerialSerialBinding.  See {@link SampleViews.ShipmentBinding} for
+ * details.  Since this class is not used directly for data storage, it does
+ * not need to be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class Shipment {
+
+    private String partNumber;
+    private String supplierNumber;
+    private int quantity;
+
+    public Shipment(String partNumber, String supplierNumber, int quantity) {
+
+        this.partNumber = partNumber;
+        this.supplierNumber = supplierNumber;
+        this.quantity = quantity;
+    }
+
+    public final String getPartNumber() {
+
+        return partNumber;
+    }
+
+    public final String getSupplierNumber() {
+
+        return supplierNumber;
+    }
+
+    public final int getQuantity() {
+
+        return quantity;
+    }
+
+    public String toString() {
+
+        return "[Shipment: part=" + partNumber +
+                " supplier=" + supplierNumber +
+                " quantity=" + quantity + ']';
+    }
+}
diff --git a/examples/collections/ship/tuple/ShipmentData.java b/examples/collections/ship/tuple/ShipmentData.java
new file mode 100644
index 0000000000000000000000000000000000000000..22af46bd728e24261769ee370964dc8d5cc7367c
--- /dev/null
+++ b/examples/collections/ship/tuple/ShipmentData.java
@@ -0,0 +1,42 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ShipmentData.java,v 1.13.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package collections.ship.tuple;
+
+import java.io.Serializable;
+
+/**
+ * A ShipmentData serves as the value in the key/value pair for a shipment
+ * entity.
+ *
+ * <p> In this sample, ShipmentData is used only as the storage data for the
+ * value, while the Shipment object is used as the value's object
+ * representation.  Because it is used directly as storage data using
+ * serial format, it must be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class ShipmentData implements Serializable {
+
+    private int quantity;
+
+    public ShipmentData(int quantity) {
+
+        this.quantity = quantity;
+    }
+
+    public final int getQuantity() {
+
+        return quantity;
+    }
+
+    public String toString() {
+
+        return "[ShipmentData: quantity=" + quantity + ']';
+    }
+}
diff --git a/examples/collections/ship/tuple/ShipmentKey.java b/examples/collections/ship/tuple/ShipmentKey.java
new file mode 100644
index 0000000000000000000000000000000000000000..821b3818595cedb0f2af81495e3e8f05b4ed4f5e
--- /dev/null
+++ b/examples/collections/ship/tuple/ShipmentKey.java
@@ -0,0 +1,46 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ShipmentKey.java,v 1.13.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package collections.ship.tuple;
+
+/**
+ * A ShipmentKey serves as the key in the key/data pair for a shipment entity.
+ *
+ * <p> In this sample, ShipmentKey is bound to the key's tuple storage entry
+ * using a TupleBinding.  Because it is not used directly as storage data, it
+ * does not need to be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class ShipmentKey {
+
+    private String partNumber;
+    private String supplierNumber;
+
+    public ShipmentKey(String partNumber, String supplierNumber) {
+
+        this.partNumber = partNumber;
+        this.supplierNumber = supplierNumber;
+    }
+
+    public final String getPartNumber() {
+
+        return partNumber;
+    }
+
+    public final String getSupplierNumber() {
+
+        return supplierNumber;
+    }
+
+    public String toString() {
+
+        return "[ShipmentKey: supplier=" + supplierNumber +
+	    " part=" + partNumber + ']';
+    }
+}
diff --git a/examples/collections/ship/tuple/Supplier.java b/examples/collections/ship/tuple/Supplier.java
new file mode 100644
index 0000000000000000000000000000000000000000..46a60e03387e91b9d236b79f6d52b145b50aba53
--- /dev/null
+++ b/examples/collections/ship/tuple/Supplier.java
@@ -0,0 +1,63 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Supplier.java,v 1.12.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package collections.ship.tuple;
+
+/**
+ * A Supplier represents the combined key/data pair for a supplier entity.
+ *
+ * <p> In this sample, Supplier is created from the stored key/data entry
+ * using a SerialSerialBinding.  See {@link SampleViews.SupplierBinding} for
+ * details.  Since this class is not used directly for data storage, it does
+ * not need to be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class Supplier {
+
+    private String number;
+    private String name;
+    private int status;
+    private String city;
+
+    public Supplier(String number, String name, int status, String city) {
+
+        this.number = number;
+        this.name = name;
+        this.status = status;
+        this.city = city;
+    }
+
+    public final String getNumber() {
+
+        return number;
+    }
+
+    public final String getName() {
+
+        return name;
+    }
+
+    public final int getStatus() {
+
+        return status;
+    }
+
+    public final String getCity() {
+
+        return city;
+    }
+
+    public String toString() {
+
+        return "[Supplier: number=" + number +
+               " name=" + name +
+               " status=" + status +
+               " city=" + city + ']';
+    }
+}
diff --git a/examples/collections/ship/tuple/SupplierData.java b/examples/collections/ship/tuple/SupplierData.java
new file mode 100644
index 0000000000000000000000000000000000000000..ec0f7f7da4fab8d717a4372dd92d0841fd2c4676
--- /dev/null
+++ b/examples/collections/ship/tuple/SupplierData.java
@@ -0,0 +1,58 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SupplierData.java,v 1.14.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package collections.ship.tuple;
+
+import java.io.Serializable;
+
+/**
+ * A SupplierData serves as the value in the key/value pair for a supplier
+ * entity.
+ *
+ * <p> In this sample, SupplierData is used only as the storage data for the
+ * value, while the Supplier object is used as the value's object
+ * representation.  Because it is used directly as storage data using
+ * serial format, it must be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class SupplierData implements Serializable {
+
+    private String name;
+    private int status;
+    private String city;
+
+    public SupplierData(String name, int status, String city) {
+
+        this.name = name;
+        this.status = status;
+        this.city = city;
+    }
+
+    public final String getName() {
+
+        return name;
+    }
+
+    public final int getStatus() {
+
+        return status;
+    }
+
+    public final String getCity() {
+
+        return city;
+    }
+
+    public String toString() {
+
+        return "[SupplierData: name=" + name +
+	    " status=" + status +
+	    " city=" + city + ']';
+    }
+}
diff --git a/examples/collections/ship/tuple/SupplierKey.java b/examples/collections/ship/tuple/SupplierKey.java
new file mode 100644
index 0000000000000000000000000000000000000000..c2f35e40c3160797f7b631beaef080193224ca3a
--- /dev/null
+++ b/examples/collections/ship/tuple/SupplierKey.java
@@ -0,0 +1,38 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SupplierKey.java,v 1.12.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package collections.ship.tuple;
+
+/**
+ * A SupplierKey serves as the key in the key/data pair for a supplier entity.
+ *
+ * <p> In this sample, SupplierKey is bound to the key's tuple storage entry
+ * using a TupleBinding.  Because it is not used directly as storage data, it
+ * does not need to be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class SupplierKey {
+
+    private String number;
+
+    public SupplierKey(String number) {
+
+        this.number = number;
+    }
+
+    public final String getNumber() {
+
+        return number;
+    }
+
+    public String toString() {
+
+        return "[SupplierKey: number=" + number + ']';
+    }
+}
diff --git a/examples/collections/ship/tuple/Weight.java b/examples/collections/ship/tuple/Weight.java
new file mode 100644
index 0000000000000000000000000000000000000000..d45d777343cb83d6eb561c603e0f40e43d9f9f70
--- /dev/null
+++ b/examples/collections/ship/tuple/Weight.java
@@ -0,0 +1,49 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Weight.java,v 1.11.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package collections.ship.tuple;
+
+import java.io.Serializable;
+
+/**
+ * Weight represents a weight amount and unit of measure.
+ *
+ * <p> In this sample, Weight is embedded in part data values which are stored
+ * as Java serialized objects; therefore Weight must be Serializable. </p>
+ *
+ * @author Mark Hayes
+ */
+public class Weight implements Serializable {
+
+    public final static String GRAMS = "grams";
+    public final static String OUNCES = "ounces";
+
+    private double amount;
+    private String units;
+
+    public Weight(double amount, String units) {
+
+        this.amount = amount;
+        this.units = units;
+    }
+
+    public final double getAmount() {
+
+        return amount;
+    }
+
+    public final String getUnits() {
+
+        return units;
+    }
+
+    public String toString() {
+
+        return "[" + amount + ' ' + units + ']';
+    }
+}
diff --git a/examples/jca/HOWTO-jboss.txt b/examples/jca/HOWTO-jboss.txt
new file mode 100644
index 0000000000000000000000000000000000000000..391020b8bcf5ee6fc21fd9d4974b429e8d03d2c1
--- /dev/null
+++ b/examples/jca/HOWTO-jboss.txt
@@ -0,0 +1,217 @@
+How to use the Berkeley DB Java Edition JCA Resource Adapter in JBoss 3.2.6
+
+Prerequisites:
+
+JBoss 3.2.6
+ant 1.5.4 or later
+J2EE jar files (available in the JBoss distribution)
+
+This HOWTO describes: 
+
+    (1) how to build and deploy the Berkeley DB Java Edition JCA Resource
+        Adapter under the JBoss Application Server (v3.2.6).
+
+    (2) how to run a simple smoke test to test that the RA has been
+        deployed correctly. 
+        
+    (3) some notes on writing applications that use the RA.
+
+The Berkeley DB Java Edition (JE) JCA code depends on J2EE libraries,
+but the regular JE code does not require these libraries in order
+to build.  Therefore, the "ant compile" target only builds the
+non-J2EE based code.  To build the JE JCA libraries and examples, it is
+necessary to have the appropriate J2EE jar files available and to use
+additional ant targets.
+
+Building the Resource Adapter
+-----------------------------
+
+- Edit <JE_HOME>/src/com/sleepycat/je/jca/ra/ra.xml.  
+
+  (1) Search for "<transaction-support>"
+
+  (2) Select the appropriate value (LocalTransaction, NoTransaction, or
+      XATransaction), and comment out or delete the other two.  Don't use
+      multiple values of <transaction-support>.  
+
+  (3) Change the value of the <security-permission-spec> to refer to the JE
+  environment directory.  JBoss needs this to grant access permission
+  to JE, otherwise security exceptions will result.
+
+  Note:
+
+  If you use XATransaction, all your databases must be transactional.
+
+- Edit <JE-HOME>/build.properties:
+
+  (1) Set j2ee.jarfile to an appropriate j2ee.jar. For example,
+
+          j2ee.jarfile = <JBOSS-HOME>/client/jbossall-client.jar
+
+       The value specified for j2ee.jarfile should contain all the classes
+       necessary for proper execution of the JCA Resource Adapter (for
+       example, JNDI).  The jbossall-client.jar is sufficient.
+
+  (2) Set example.resources to an appropriate value, e.g.
+
+         example.resources = <JE-HOME>/examples/resources/jboss
+
+      The example.resources property should contain a jndi.properties file
+      that is correct for the target environment.  If you are using the
+      jndi.properties supplied in the {examples.resources} directory,
+      review it to make sure it has correct values.
+
+- With the current directory set to <JE-HOME>, execute
+
+     ant jca
+
+  This creates a jejca.rar Resource Adapter Archive in <JE_HOME>/build/lib.
+  The jejca.rar archive contains a je.jar file.
+
+- Deploy the JE Resource Adapter (<JE_HOME>/build/lib/jejca.rar),
+  using an appropriate JBoss deployment tool or by simply copying it
+  to the JBoss deployment directory.  For example,
+
+    copy <JE_HOME>/build/lib/jejca.rar <JBOSS>/server/default/deploy
+
+- If the JBoss server is not already running, start it now.
+
+Building the "SimpleBean" Example:
+----------------------------------
+
+The SimpleBean example is an EJB that has two methods, get() and
+put(), which get and put data using the JE Resource Adapter on the
+JBoss server.  You can use this example to test the JE Resource
+Adapter that you just deployed.
+
+- Edit <JE_HOME>/build.properties:
+
+  (1) Set example.jca.srcdir to <JE_HOME>/examples/jca/jboss
+
+        example.jca.srcdir = <JE_HOME>/examples/jca/jboss
+
+      This is the directory where the JBoss specific deployment descriptor
+      for the "simple" EJB resides.
+
+  (2) Set example.jca.descriptorname to jboss.xml.
+
+        example.jca.desciptorname = jboss.xml
+
+      This is the name of the jboss specific deployment descriptor for the
+      "simple" EJB.
+
+- Edit the source code for SimpleBean to refer to the correct
+  directory for the JE Environment.  The JE Environment directory is
+  the same one that was specified in the ra.xml file under the
+  <security-permission-spec> tag.  This directory should exist and
+  the JBoss server should have write permission to that directory.
+  The source code for SimpleBean is in
+
+     <JE_HOME>/examples/jca/simple/SimpleBean.java
+
+  To set the directory, change the value of JE_ENV at the top of the
+  class.  For example,
+
+     private final String JE_ENV = "/tmp/je_store";
+
+- Edit the jboss.xml descriptor in
+
+     <JE_HOME>/examples/jca/jboss/jboss.xml
+
+  to use the jndi-name that corresponds to the transaction-support
+  value in the ra.xml file above.  That is, select one of the
+  following three lines and comment out or remove the other two:
+
+            <jndi-name>java:/LocalTransJE</jndi-name>
+            <jndi-name>java:/NoTransJE</jndi-name>
+            <jndi-name>java:/XATransJE</jndi-name>
+
+- Build the SimpleBean example and jar file.
+
+     ant jca-examples
+
+  This builds a jejca-example.jar file and places it in the
+  <JE_HOME>/build/lib directory.  The jar file contains the SimpleBean
+  classes, and the ejb-jar.xml and jboss.xml descriptor files.
+
+- Deploy the jejca-example jar by copying it to a deployment directory
+  (or use an appropriate deployment tool).  For example,
+
+    copy <JE_HOME>/build/lib/jejca-example.jar <JBOSS>/server/default/deploy
+
+- Depending on which transaction support you have selected, examine the
+  corresponding RA service configuration file in
+
+     <JE_HOME>/examples/jca/jboss
+
+  (e.g. je-localtx-ds.xml).  Ensure that the jndi-name matches the
+  name that you selected in the jboss.xml file in the same directory.
+
+- Deploy the RA service configuration file (e.g. je-localtx-ds.xml) by
+  copying it to the JBoss server deployment directory or using an
+  appropriate deployment tool.  For example,
+
+     copy <JE_HOME>/examples/jca/jboss/je-localtx-ds.xml
+          <JBOSS>/server/default/deploy
+
+Running the "SimpleBean" Example:
+---------------------------------
+
+- Verify that the JBoss server has been started.
+
+- Run the client:
+
+    ant testex-jejcasimple -Dkey=foo -Ddata=bar
+
+  This should produce:
+
+    Buildfile: build.xml
+
+    testex-jejcasimple:
+     [java] Created Simple
+     [java] Simple.get('foo') = bar
+
+    BUILD SUCCESSFUL
+    Total time: 3 seconds
+
+If you don't see
+
+     [java] Simple.get('foo') = bar
+
+printed (for example, you see Simple.get('foo') = null), there may be
+a configuration problem.  Check the server logfile for details.
+
+Implementation Notes for Applications Using the RA
+--------------------------------------------------
+
+Please refer to the SimpleBean example in
+
+    <JE_HOME>/examples/jca/simple/SimpleBean.java
+
+- Obtain a JEConnection using the
+
+     JEConnectionFactory.getConnection()
+
+  method and passing it an environment home directory and
+  EnvironmentConfig object. Once the JEConnection has been obtained,
+  you can obtain the Environment handle by calling
+
+     JEConnection.getEnvironment();
+
+- Database handle cache available
+
+Because bean business methods may be relatively short, the underlying
+ManagedConnection object for JE provides a Database handle cache.
+This speeds up the Database open operation since the handle
+(generally) already exists in the cache.  Normally, a program opens a
+database using the Environment.openDatabase() method.  In the EJB
+environment, the program should call JEConnection.openDatabase()
+instead.  Database handles obtained using this method should not be
+close()'d as the ManagedConnection code does that when the
+ManagedConnection is closed.
+
+- Databases under XA must be transactional
+
+If you are using the XATransaction environment (as specified in the
+ra.xml file), all JE Databases used in that environment must be
+transactional.
diff --git a/examples/jca/HOWTO-oc4j.txt b/examples/jca/HOWTO-oc4j.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2456a03a8646d3a2f62de95f13c7650044436171
--- /dev/null
+++ b/examples/jca/HOWTO-oc4j.txt
@@ -0,0 +1,266 @@
+How to use the Berkeley DB Java Edition JCA Resource Adapter in the
+Oracle Containers for J2EE version 10.1.3.2.0.
+
+Prerequisites:
+
+- OC4J version 10.1.3.2.0
+- ant 1.5.4 or later
+- J2EE jar files (available in the OC4J distribution)
+
+This HOWTO describes: 
+
+    (1) how to build and deploy the Berkeley DB Java Edition JCA Resource
+        Adapter under OC4J version 10.1.3.2.0
+
+    (2) how to run a simple smoke test to test that the RA has been
+        deployed correctly.
+
+    (3) some notes on writing applications that use the RA.
+
+The Berkeley DB Java Edition (JE) JCA code depends on J2EE libraries,
+but the regular JE code does not require these libraries in order to
+be built.  Therefore, the "ant compile" target only builds the
+non-J2EE based code.  To build the JE JCA libraries and examples, it
+is necessary to have the appropriate J2EE jar files available and to
+use additional ant targets.
+
+Building the Resource Adapter
+-----------------------------
+
+- Edit <JE_HOME>/src/com/sleepycat/je/jca/ra/ra.xml.  
+
+  (1) Search for "<transaction-support>"
+
+  (2) Select the appropriate value (LocalTransaction, NoTransaction, or
+      XATransaction) and comment out or delete the other two.  Don't use
+      multiple values of <transaction-support>.  
+  
+  (3) Change the value of the <security-permission-spec> to refer to the
+      JE environment directory.  OC4J needs this to file grant access
+      permission to JE, otherwise security exceptions will result.
+
+- Edit <JE_HOME>/build.properties:
+
+  (1) Set j2ee.jarfile to an appropriate j2ee.jar. For example,
+
+         j2ee.jarfile = <OC4J-HOME>/j2ee/home/lib/ejb.jar:
+	                <OC4J-HOME>/oc4j/j2ee/home/lib/connector.jar:
+			<OC4J-HOME>/oc4j/j2ee/home/lib/oc4j-internal.jar
+
+       The value specified for j2ee.jarfile should contain all the classes
+       necessary for proper execution of the JCA Resource Adapter (for
+       example, JNDI).
+
+  (2) Set example.resources to an appropriate value, e.g.
+
+         example.resources = <JE-HOME>/examples/resources/oc4j/oc4j.jar
+
+      The oc4j.jar file contains an application-client.xml file which looks
+      like this:
+
+bash-3.00$ cat examples/resources/oc4j/META-INF/application-client.xml
+<application-client>
+<ejb-ref>
+  <ejb-ref-name>SimpleBean</ejb-ref-name>
+  <ejb-ref-type>Session</ejb-ref-type>
+  <home>jca.simple.SimpleHome</home>
+  <remote>jca.simple.Simple</remote>
+</ejb-ref>
+</application-client>
+
+- With the current directory set to <JE_HOME>, execute
+
+     ant jca
+
+  This creates a jejca.rar Resource Adapter Archive in
+  <JE_HOME>/build/lib.  The jejca.rar contains a je.jar file.
+
+- If OC4J is not already started, start it now.
+
+     oc4j -start
+
+  Note:
+
+  The server can be stopped with the asadmin stop-domain command.  e.g.
+
+     oc4j -shutdown -port 23791 -password <your admin password>
+
+or
+
+     java -jar <OC4J-HOME>/j2ee/home/admin_client.jar
+           deployer:oc4j:localhost:23791 oc4jadmin <password> -shutdown
+
+- Deploy the JE Resource Adapter (<JE_HOME>/build/lib/jejca.rar),
+  using the Oracle Application Server Control web tool:
+
+    http://<hostname>:8888
+    login if necessary
+    Applications tab
+    In the "View" pulldown, select 'Standalone Resource Adapters'
+    Select "Deploy"
+    In the "Archive" section of the screen, enter the file name of the
+    jejca.rar file (<JE-HOME>/build/lib/jejca.rar).
+    Select "Next"
+    Enter "JEConnection" for Resource Adapter Name
+    Select "Next"
+    Select "Deploy"
+
+- Create the connector connection pool and connector resource:
+
+    After the RA has been successfully deployed, select "Return"
+    On the Home | Applications | Stand Alone Resource Adapters page,
+      select "Berkeley DB Java Edition JCA Adapter" link
+    Select "Connection Factories"
+    Under "Shared Connection Pools", select "Create"
+    Enter "JEConnectionPool" for Connection Pool Name and Select "OK"
+    Above "JNDI Location" select "Create"
+    Make sure that com.sleepycat.je.jca.ra.JEConnectionFactory is selected
+      in the pull down menu and select "Continue"
+    Enter "ra/JEConnectionPool" for "JNDI Location"
+    Select "Use Shared Connection Pool" and chose "JEConnectionPool" in the
+      menu.  Select "Finish"
+
+- If you change or rebuild the jejca.rar file, you must redeploy the Resource
+  Adapter file with the same steps above.
+
+Building the "SimpleBean" Example:
+----------------------------------
+
+The SimpleBean example is an EJB that has two methods, get() and
+put(), which get and put data using the JE Resource Adapter on the
+OC4J server.  You can use this example to test the JE RA that you
+just deployed.
+
+- Edit <JE_HOME>/build.properties:
+
+  (1) Set example.jca.srcdir to <JE_HOME>/examples/jca/oc4j
+
+         example.jca.srcdir = <JE_HOME>/examples/jca/oc4j
+
+    This is the directory where the OC4J specific deployment descriptor
+    for the "simple" EJB resides.
+
+  (2) Set example.jca.descriptorname to orion-ejb-jar.xml.
+
+        example.jca.desciptorname = orion-ejb-jar.xml
+
+      This is the name of the OC4J specific deployment descriptor for the
+      "simple" EJB.
+
+- Edit <JE_HOME>/examples/jca/simple/ejb-jar.xml
+  Uncomment the <assembly-descriptor> section at the end of the file.
+
+- Edit the source code for SimpleBean to refer to the correct
+  directory for the JE Environment.  The JE Environment directory is
+  the same one that was specified in the ra.xml file under the
+  <security-permission-spec> tag.  This directory should exist and
+  the OC4J server should have write permission to that directory.
+  The source code for SimpleBean is in
+
+     <JE_HOME>/examples/jca/simple/SimpleBean.java
+
+  To set the directory, change the value of JE_ENV at the top of the
+  class.  For example,
+
+     private final String JE_ENV = "/tmp/je_store";
+
+- Edit the source code for SimpleBean to have the correct value for
+  TRANSACTIONAL.  If you set it to true, you should also set the
+  proper value in the ra.xml for <transaction-support> (either
+  LocalTransaction or XATransaction).  
+
+- Edit the SimpleClient.java file to have correct values for the JNDI lookup
+  properties (java.naming.*).
+
+- Edit the SimpleClient.java file to change the value of OC4J to true.
+
+- Build the SimpleBean example and jar file.
+
+     ant jca-examples
+
+  This builds a jejca-example.jar file and places it in the
+  <JE_HOME>/build/lib directory.  The jar file contains the SimpleBean
+  classes, and the ejb-jar.xml and sun-ejb-jar.xml descriptor files.
+
+- Deploy the jejca-example jar using the Oracle Application Server
+  Control web tool:
+
+    http://<hostname>:8888
+    login if necessary
+    Applications tab
+    Select "Applications" from the "View" pulldown.
+    Select "Deploy"
+    Enter the location of the jejca-example.jar file
+    (<JE-HOME>/build/lib/jejca-example.jar) in the
+    "Location on Server" box in the "Archive" section.
+    Select "Next".
+    Enter "Simple" in the "Application Name" box.  Select "Next".
+    On the "Deploy: Deployment Settings" page, click the pencil next to
+    "Map Environment References".
+    In the "Map Resource References" section, enter "ra/JEConnectionFactory"
+    in the form box for the "ra/JEConnectionFactory" Resource Reference.
+    Select "OK".
+    Select "Deploy".
+
+
+Running the "SimpleBean" Example:
+---------------------------------
+
+- Verify that the OC4J server has been started.
+
+- Run the client:
+
+    ant testex-jejcasimple -Dkey=foo -Ddata=bar
+
+  This should produce:
+
+    Buildfile: build.xml
+
+    testex-jejcasimple:
+     [java] Created Simple
+     [java] Simple.get('foo') = bar
+
+    BUILD SUCCESSFUL
+    Total time: 3 seconds
+
+If you don't see
+
+     [java] Simple.get('foo') = bar
+
+printed (for example, you see Simple.get('foo') = null), there may be
+a configuration problem.  Check the server.log for details.
+
+Implementation Notes for Applications Using the RA
+--------------------------------------------------
+
+Please refer to the SimpleBean example in
+
+    <JE_HOME>/examples/jca/simple/SimpleBean.java
+
+- Obtain a JEConnection using the
+
+     JEConnectionFactory.getConnection()
+
+  method and passing it an environment home directory and
+  EnvironmentConfig object. Once the JEConnection has been obtained,
+  you can obtain the Environment handle by calling
+
+     JEConnection.getEnvironment();
+
+- Database handle cache available
+
+Because bean business methods may be relatively short, the underlying
+ManagedConnection object for JE provides a Database handle cache.
+This speeds up the Database open operation since the handle
+(generally) already exists in the cache.  Normally, a program opens a
+database using the Environment.openDatabase() method.  In the EJB
+environment, the program should call JEConnection.openDatabase()
+instead.  Database handles obtained using this method should not be
+close()'d as the ManagedConnection code does that when the
+ManagedConnection is closed.
+
+- Databases under XA must be transactional
+
+If you are using the XATransaction environment (as specified in the
+ra.xml file), all JE Databases used in that environment must be
+transactional.
diff --git a/examples/jca/HOWTO-sjsas.txt b/examples/jca/HOWTO-sjsas.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2b0643716337fb8f68bf233bd185daefe12c2f46
--- /dev/null
+++ b/examples/jca/HOWTO-sjsas.txt
@@ -0,0 +1,243 @@
+How to use the Berkeley DB Java Edition JCA Resource Adapter in the
+Sun Java System Application Server 8.1
+
+Prerequisites:
+
+- Sun Java System Application Server 8.1
+- ant 1.5.4 or later
+- J2EE jar files (available in the SJSAS distribution)
+
+This HOWTO describes: 
+
+    (1) how to build and deploy the Berkeley DB Java Edition JCA Resource
+        Adapter under the Sun Java System Application Server (v8.1).
+
+    (2) how to run a simple smoke test to test that the RA has been
+        deployed correctly.
+    
+    (3) some notes on writing applications that use the RA.
+
+The Berkeley DB Java Edition (JE) JCA code depends on J2EE libraries,
+but the regular JE code does not require these libraries in order to
+be built.  Therefore, the "ant compile" target only builds the
+non-J2EE based code.  To build the JE JCA libraries and examples, it
+is necessary to have the appropriate J2EE jar files available and to
+use additional ant targets.
+
+Building the Resource Adapter
+-----------------------------
+
+- Edit <JE_HOME>/src/com/sleepycat/je/jca/ra/ra.xml.  
+
+  (1) Search for "<transaction-support>"
+
+  (2) Select the appropriate value (LocalTransaction, NoTransaction, or
+      XATransaction) and comment out or delete the other two.  Don't use
+      multiple values of <transaction-support>.  
+  
+  (3) Change the value of the <security-permission-spec> to refer to the
+      JE environment directory.  SJSAS needs this to file grant access
+      permission to JE, otherwise security exceptions will result.
+
+- Edit <SJSAS-HOME>/domains/domain1/config/server.policy to include
+
+	permission java.io.FilePermission
+         "/tmp/je_store/*", "read,write";
+
+	permission java.util.logging.LoggingPermission "control";
+ 
+  in the section with the comment:
+
+   // Basic set of required permissions granted to all remaining code
+
+  You chould grant java.io.FilePermission to the server for the
+  directory where your JE environment will reside (i.e. /tmp/je_store,
+  or whatever directory you are using).
+
+- Edit <JE_HOME>/build.properties:
+
+  (1) Set j2ee.jarfile to an appropriate j2ee.jar. For example,
+
+         j2ee.jarfile = <SJSAS-HOME>/lib/j2ee.jar
+
+       The value specified for j2ee.jarfile should contain all the classes
+       necessary for proper execution of the JCA Resource Adapter (for
+       example, JNDI).  The j2ee.jar file noted here meets all the
+       necessary requirements.
+
+  (2) Set example.resources to an appropriate value, e.g.
+
+         example.resources = <SJSAS-HOME>/lib/appserv-rt.jar
+
+      The example.resources property should contain a jndi.properties file
+      that is correct for the target environment.  appserv-rt.jar contains
+      an appropriate jndi.properties.
+
+- With the current directory set to <JE_HOME>, execute
+
+     ant jca
+
+  This creates a jejca.rar Resource Adapter Archive in
+  <JE_HOME>/build/lib.  The jejca.rar contains a je.jar file.
+
+- If SJSAS is not already started, start it now.
+
+     asadmin start-domain domain1
+
+  Note:
+
+  The server can be stopped with the asadmin stop-domain command.  e.g.
+
+     asadmin stop-domain
+
+- Deploy the JE Resource Adapter (<JE_HOME>/build/lib/jejca.rar),
+  using the asadmin tool:
+
+     asadmin deploy --user administrator --host localhost --port 4848 \
+             --force=true --name JEConnection --upload=true \
+	     build/lib/jejca.rar
+
+- Create the connector connection pool and connector resource:
+
+     asadmin create-connector-connection-pool --user administrator \
+             --host localhost --port 4848 --raname JEConnection \
+	     --connectiondefinition \
+	     com.sleepycat.je.jca.ra.JEConnectionFactory \
+	     JEConnectionPool
+
+     asadmin create-connector-resource --user administrator --host localhost \
+             --port 4848 --poolname JEConnectionPool ra/JEConnectionFactory
+
+  Note:
+
+  The connector resource and connection pool can be deleted with the
+  delete-connector-resource and delete-connector-connection-pool options
+  to asadmin.  For example,
+
+      asadmin delete-connector-resource --user administrator --host localhost \
+              --port 4848 ra/JEConnectionFactory
+
+      asadmin delete-connector-connection-pool --user administrator \
+              --host localhost --port 4848 --cascade=true JEConnectionPool
+
+Building the "SimpleBean" Example:
+----------------------------------
+
+The SimpleBean example is an EJB that has two methods, get() and
+put(), which get and put data using the JE Resource Adapter on the
+SJSAS server.  You can use this example to test the JE RA that you
+just deployed.
+
+- Edit <JE_HOME>/build.properties:
+
+  (1) Set example.jca.srcdir to <JE_HOME>/examples/jca/sjsas8_1
+
+         example.jca.srcdir = <JE_HOME>/examples/jca/sjsas8_1
+
+    This is the directory where the SJSAS specific deployment descriptor
+    for the "simple" EJB resides.
+
+  (2) Set example.jca.descriptorname to sun-ejb-jar.xml.
+
+        example.jca.desciptorname = sun-ejb-jar.xml
+
+      This is the name of the SJSAS specific deployment descriptor for the
+      "simple" EJB.
+
+- Edit the source code for SimpleBean to refer to the correct
+  directory for the JE Environment.  The JE Environment directory is
+  the same one that was specified in the ra.xml file under the
+  <security-permission-spec> tag.  This directory should exist and
+  the SJSAS server should have write permission to that directory.
+  The source code for SimpleBean is in
+
+     <JE_HOME>/examples/jca/simple/SimpleBean.java
+
+  To set the directory, change the value of JE_ENV at the top of the
+  class.  For example,
+
+     private final String JE_ENV = "/tmp/je_store";
+
+- Edit the sun-ejb-jar.xml descriptor in
+
+     <JE_HOME>/examples/jca/sjsas8_1/sun-ejb-jar.xml
+
+  and ensure that the jndi-name and res-ref-name correspond to the
+  name of the connector resource that was created above during the RA
+  deployment.  It should be "ra/JEConnectionFactory".
+
+- Build the SimpleBean example and jar file.
+
+     ant jca-examples
+
+  This builds a jejca-example.jar file and places it in the
+  <JE_HOME>/build/lib directory.  The jar file contains the SimpleBean
+  classes, and the ejb-jar.xml and sun-ejb-jar.xml descriptor files.
+
+- Deploy the jejca-example jar using the asadmin tool.
+
+     asadmin deploy --user administrator --host localhost --port 4848 \
+             --force=true --name Simple --upload=true \
+	     build/lib/jejca-example.jar
+
+Running the "SimpleBean" Example:
+---------------------------------
+
+- Verify that the SJSAS server has been started.
+
+- Run the client:
+
+    ant testex-jejcasimple -Dkey=foo -Ddata=bar
+
+  This should produce:
+
+    Buildfile: build.xml
+
+    testex-jejcasimple:
+     [java] Created Simple
+     [java] Simple.get('foo') = bar
+
+    BUILD SUCCESSFUL
+    Total time: 3 seconds
+
+If you don't see
+
+     [java] Simple.get('foo') = bar
+
+printed (for example, you see Simple.get('foo') = null), there may be
+a configuration problem.  Check the server.log for details.
+
+Implementation Notes for Applications Using the RA
+--------------------------------------------------
+
+Please refer to the SimpleBean example in
+
+    <JE_HOME>/examples/jca/simple/SimpleBean.java
+
+- Obtain a JEConnection using the
+
+     JEConnectionFactory.getConnection()
+
+  method and passing it an environment home directory and
+  EnvironmentConfig object. Once the JEConnection has been obtained,
+  you can obtain the Environment handle by calling
+
+     JEConnection.getEnvironment();
+
+- Database handle cache available
+
+Because bean business methods may be relatively short, the underlying
+ManagedConnection object for JE provides a Database handle cache.
+This speeds up the Database open operation since the handle
+(generally) already exists in the cache.  Normally, a program opens a
+database using the Environment.openDatabase() method.  In the EJB
+environment, the program should call JEConnection.openDatabase()
+instead.  Database handles obtained using this method should not be
+close()'d as the ManagedConnection code does that when the
+ManagedConnection is closed.
+
+- Databases under XA must be transactional
+
+If you are using the XATransaction environment (as specified in the
+ra.xml file), all JE Databases used in that environment must be
+transactional.
diff --git a/examples/jca/jboss/jboss.xml b/examples/jca/jboss/jboss.xml
new file mode 100644
index 0000000000000000000000000000000000000000..a2e83bb28edd78eb5a88dd99c892c71070350766
--- /dev/null
+++ b/examples/jca/jboss/jboss.xml
@@ -0,0 +1,16 @@
+<?xml version="1.0"?>
+<jboss>
+   <enterprise-beans>
+      <session>
+         <ejb-name>SimpleBean</ejb-name>
+         <resource-ref>
+            <res-ref-name>ra/JEConnectionFactory</res-ref-name>
+            <jndi-name>java:/LocalTransJE</jndi-name>
+	    <!--
+            <jndi-name>java:/NoTransJE</jndi-name>
+            <jndi-name>java:/XATransJE</jndi-name>
+	    -->
+         </resource-ref>
+      </session>
+   </enterprise-beans>
+</jboss>
diff --git a/examples/jca/jboss/je-localtx-ds.xml b/examples/jca/jboss/je-localtx-ds.xml
new file mode 100644
index 0000000000000000000000000000000000000000..384fb1bd654956cfa8735e5c4d1b6f8df6ccd15a
--- /dev/null
+++ b/examples/jca/jboss/je-localtx-ds.xml
@@ -0,0 +1,9 @@
+<!-- The Berkeley DB Java Edition Local transaction Resource
+     Adaptor service configuration. -->
+<connection-factories>
+   <tx-connection-factory>
+      <jndi-name>LocalTransJE</jndi-name>
+      <local-transaction/>
+      <adapter-display-name>Berkeley DB Java Edition JCA Adapter</adapter-display-name>
+   </tx-connection-factory>
+</connection-factories>
diff --git a/examples/jca/jboss/je-no-tx-ds.xml b/examples/jca/jboss/je-no-tx-ds.xml
new file mode 100644
index 0000000000000000000000000000000000000000..924029dca9af5f6f873a1531da132f630a266048
--- /dev/null
+++ b/examples/jca/jboss/je-no-tx-ds.xml
@@ -0,0 +1,8 @@
+<!-- The Berkeley DB Java Edition No transaction Resource
+     Adaptor service configuration. -->
+<connection-factories>
+   <no-tx-connection-factory>
+      <jndi-name>NoTransJE</jndi-name>
+      <adapter-display-name>Berkeley DB Java Edition JCA Adapter</adapter-display-name>
+   </no-tx-connection-factory>
+</connection-factories>
diff --git a/examples/jca/jboss/je-xa-ds.xml b/examples/jca/jboss/je-xa-ds.xml
new file mode 100644
index 0000000000000000000000000000000000000000..07a5f98fd472c0fbedef122b92f482a82aa4c7d2
--- /dev/null
+++ b/examples/jca/jboss/je-xa-ds.xml
@@ -0,0 +1,9 @@
+<!-- The Berkeley DB Java Edition XA transaction Resource
+     Adaptor service configuration. -->
+<connection-factories>
+   <tx-connection-factory>
+      <jndi-name>XATransJE</jndi-name>
+      <xa-transaction/>
+      <adapter-display-name>Berkeley DB Java Edition JCA Adapter</adapter-display-name>
+   </tx-connection-factory>
+</connection-factories>
diff --git a/examples/jca/oc4j/orion-ejb-jar.xml b/examples/jca/oc4j/orion-ejb-jar.xml
new file mode 100644
index 0000000000000000000000000000000000000000..a1f54b20d07261b1105008dd53e406b70f629a5b
--- /dev/null
+++ b/examples/jca/oc4j/orion-ejb-jar.xml
@@ -0,0 +1,2 @@
+<orion-ejb-jar>
+</orion-ejb-jar>
diff --git a/examples/jca/simple/Simple.java b/examples/jca/simple/Simple.java
new file mode 100644
index 0000000000000000000000000000000000000000..69e75df68bdd27942d25ddbcd75e1399d0f7b8d9
--- /dev/null
+++ b/examples/jca/simple/Simple.java
@@ -0,0 +1,16 @@
+package jca.simple;
+
+import java.rmi.RemoteException;
+import javax.ejb.EJBObject;
+
+public interface Simple extends EJBObject {
+
+    public void put(String key, String data)
+        throws RemoteException;
+
+    public String get(String key)
+        throws RemoteException;
+
+    public void removeDatabase()
+	throws RemoteException;
+}
diff --git a/examples/jca/simple/SimpleBean.java b/examples/jca/simple/SimpleBean.java
new file mode 100644
index 0000000000000000000000000000000000000000..6c814ff43f81a2d922cea6931e748468b30d4967
--- /dev/null
+++ b/examples/jca/simple/SimpleBean.java
@@ -0,0 +1,208 @@
+package jca.simple;
+
+import java.rmi.RemoteException;
+
+import javax.ejb.SessionBean;
+import javax.ejb.SessionContext;
+import javax.naming.Context;
+import javax.naming.InitialContext;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.SecondaryConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.SecondaryKeyCreator;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.jca.ra.JEConnection;
+import com.sleepycat.je.jca.ra.JEConnectionFactory;
+
+public class SimpleBean implements SessionBean {
+
+    /*
+     * Set this to something appropriate for your environment.  Make sure it
+     * matches the ra.xml.
+     */
+    private final String JE_ENV = "/tmp/je_store";
+    private final boolean TRANSACTIONAL = true;
+
+    private SessionContext sessionCtx;
+
+    public void ejbCreate() {
+    }
+
+    public void ejbRemove() {
+    }
+
+    public void setSessionContext(SessionContext context) {
+	sessionCtx = context;
+    }
+
+    public void unsetSessionContext() {
+	sessionCtx = null;
+    }
+
+    public void ejbActivate() {
+    }
+
+    public void ejbPassivate() {
+    }
+
+    public void put(String key, String data)
+	throws RemoteException {
+
+	try {
+	    @SuppressWarnings("unused")
+            Environment env = null;
+	    @SuppressWarnings("unused")
+            Transaction txn = null;
+	    Database db = null;
+	    @SuppressWarnings("unused")
+            SecondaryDatabase secDb = null;
+	    Cursor cursor = null;
+	    JEConnection dc = null;
+	    try {
+		dc = getConnection(JE_ENV);
+
+		env = dc.getEnvironment();
+		DatabaseConfig dbConfig = new DatabaseConfig();
+		SecondaryConfig secDbConfig = new SecondaryConfig();
+		dbConfig.setAllowCreate(true);
+		dbConfig.setTransactional(TRANSACTIONAL);
+		secDbConfig.setAllowCreate(true);
+		secDbConfig.setTransactional(TRANSACTIONAL);
+		secDbConfig.setKeyCreator(new MyKeyCreator());
+
+		/*
+		 * Use JEConnection.openDatabase() to obtain a cached Database
+		 * handle.  Do not call close() on Database handles obtained
+		 * using this method.
+		 */
+		db = dc.openDatabase("db", dbConfig);
+		secDb = dc.openSecondaryDatabase("secDb", db, secDbConfig);
+		cursor = db.openCursor(null, null);
+		cursor.put(new DatabaseEntry(key.getBytes("UTF-8")),
+			   new DatabaseEntry(data.getBytes("UTF-8")));
+	    } finally {
+		if (cursor != null) {
+		    cursor.close();
+		}
+		if (dc != null) {
+		    dc.close();
+		}
+	    }
+	} catch (Exception e) {
+	    System.err.println("Failure in put" + e);
+	}
+    }
+
+    public void removeDatabase()
+	throws RemoteException {
+
+	try {
+	    JEConnection dc = null;
+	    try {
+		dc = getConnection(JE_ENV);
+
+		DatabaseConfig dbConfig = new DatabaseConfig();
+		dbConfig.setAllowCreate(true);
+		dbConfig.setTransactional(TRANSACTIONAL);
+
+		/*
+		 * Once you have removed a database from the environment,
+		 * do not try to open it anymore.
+		 */
+		dc.removeDatabase("db");
+	    } finally {
+		if (dc != null) {
+		    dc.close();
+		}
+	    }
+	} catch (Exception e) {
+	    System.err.println("Failure in remove " + e);
+	    e.printStackTrace();
+	}
+    }
+
+    public String get(String key)
+	throws RemoteException {
+
+	try {
+	    @SuppressWarnings("unused")
+            Environment env = null;
+	    @SuppressWarnings("unused")
+            Transaction txn = null;
+	    Database db = null;
+	    Cursor cursor = null;
+	    JEConnection dc = null;
+	    try {
+		dc = getConnection(JE_ENV);
+
+		env = dc.getEnvironment();
+		DatabaseConfig dbConfig = new DatabaseConfig();
+		dbConfig.setAllowCreate(true);
+		dbConfig.setTransactional(TRANSACTIONAL);
+
+		/*
+		 * Use JEConnection.openDatabase() to obtain a cached Database
+		 * handle.  Do not call close() on Database handles obtained
+		 * using this method.
+		 */
+		db = dc.openDatabase("db", dbConfig);
+		cursor = db.openCursor(null, null);
+		DatabaseEntry data = new DatabaseEntry();
+		cursor.getSearchKey(new DatabaseEntry(key.getBytes("UTF-8")),
+				    data,
+				    null);
+		return new String(data.getData(), "UTF-8");
+	    } finally {
+		if (cursor != null) {
+		    cursor.close();
+		}
+		if (dc != null) {
+		    dc.close();
+		}
+	    }
+	} catch (Exception e) {
+	    System.err.println("Failure in get" + e);
+	    e.printStackTrace();
+	}
+	return null;
+    }
+
+    private JEConnection getConnection(String envDir) {
+	try {
+	    EnvironmentConfig envConfig = new EnvironmentConfig();
+	    envConfig.setTransactional(true);
+	    envConfig.setAllowCreate(true);
+	    InitialContext iniCtx = new InitialContext();
+	    Context enc = (Context) iniCtx.lookup("java:comp/env");
+	    Object ref = enc.lookup("ra/JEConnectionFactory");
+	    JEConnectionFactory dcf = (JEConnectionFactory) ref;
+	    JEConnection dc = dcf.getConnection(envDir, envConfig);
+	    return dc;
+	} catch(Exception e) {
+	    System.err.println("Failure in getConnection " + e);
+	}
+	return null;
+    }
+
+    private static class MyKeyCreator implements SecondaryKeyCreator {
+
+        MyKeyCreator() {
+        }
+
+        public boolean createSecondaryKey(SecondaryDatabase secondaryDb,
+                                          DatabaseEntry keyEntry,
+                                          DatabaseEntry dataEntry,
+                                          DatabaseEntry resultEntry)
+            throws DatabaseException {
+
+	    return false;
+        }
+    }
+}
diff --git a/examples/jca/simple/SimpleClient.java b/examples/jca/simple/SimpleClient.java
new file mode 100644
index 0000000000000000000000000000000000000000..e58bd1f4890ff223a798ff3c2e6ec83a57805694
--- /dev/null
+++ b/examples/jca/simple/SimpleClient.java
@@ -0,0 +1,35 @@
+package jca.simple;
+
+import javax.naming.InitialContext;
+
+import java.util.Hashtable;
+
+public class SimpleClient {
+
+    public static void main(String args[])
+	throws Exception {
+
+	final boolean OC4J = true;
+
+	InitialContext iniCtx = null;
+	Hashtable env = new Hashtable();
+	if (OC4J) {
+	    env.put("java.naming.factory.initial",
+		    "com.evermind.server.ApplicationClientInitialContextFactory");
+	    env.put("java.naming.provider.url","ormi://localhost:23791/Simple");
+	    env.put("java.naming.security.principal","oc4jadmin");
+	    env.put("java.naming.security.credentials","oc4jadmin");
+	    iniCtx = new InitialContext(env);
+	} else {
+	    iniCtx = new InitialContext();
+	}
+
+	Object ref = iniCtx.lookup("SimpleBean");
+	SimpleHome home = (SimpleHome) ref;
+	Simple simple = home.create();
+	System.out.println("Created Simple");
+	simple.put(args[0], args[1]);
+	System.out.println("Simple.get('" + args[0] + "') = " +
+			   simple.get(args[0]));
+    }
+}
diff --git a/examples/jca/simple/SimpleHome.java b/examples/jca/simple/SimpleHome.java
new file mode 100644
index 0000000000000000000000000000000000000000..ba8d9630e3fe25213848a7bcc482558016edd3d6
--- /dev/null
+++ b/examples/jca/simple/SimpleHome.java
@@ -0,0 +1,11 @@
+package jca.simple;
+
+import java.rmi.RemoteException;
+import javax.ejb.CreateException;
+import javax.ejb.EJBHome;
+
+public interface SimpleHome extends EJBHome {
+
+   public Simple create()
+      throws RemoteException, CreateException;
+}
diff --git a/examples/jca/simple/ejb-jar.xml b/examples/jca/simple/ejb-jar.xml
new file mode 100644
index 0000000000000000000000000000000000000000..e9950b270edddf926b65d6b7c06483c120f9c34f
--- /dev/null
+++ b/examples/jca/simple/ejb-jar.xml
@@ -0,0 +1,37 @@
+<?xml version="1.0"?>
+<!DOCTYPE ejb-jar
+   PUBLIC "-//Sun Microsystems, Inc.//DTD Enterprise JavaBeans 2.0//EN"
+   "http://java.sun.com/dtd/ejb-jar_2_0.dtd"
+>
+
+<ejb-jar>
+   <enterprise-beans>
+      <session>
+        <display-name>SimpleBean</display-name>
+	<ejb-name>SimpleBean</ejb-name>
+	<home>jca.simple.SimpleHome</home>
+	<remote>jca.simple.Simple</remote>
+	<ejb-class>jca.simple.SimpleBean</ejb-class>
+        <session-type>Stateless</session-type>
+        <transaction-type>Container</transaction-type>
+        <resource-ref>
+         <res-ref-name>ra/JEConnectionFactory</res-ref-name>
+         <res-type>com.sleepycat.je.jca.ra.JEConnectionFactory</res-type>
+         <res-auth>Container</res-auth>
+	 <res-sharing-scope>Shareable</res-sharing-scope>
+        </resource-ref>
+      </session>
+
+   </enterprise-beans>
+   <!--
+   <assembly-descriptor>
+     <container-transaction>
+       <method>
+         <ejb-name>SimpleBean</ejb-name>
+         <method-name>*</method-name>
+       </method>
+       <trans-attribute>Supports</trans-attribute>
+     </container-transaction>
+   </assembly-descriptor>
+   -->
+</ejb-jar>
diff --git a/examples/jca/sjsas8_1/sun-ejb-jar.xml b/examples/jca/sjsas8_1/sun-ejb-jar.xml
new file mode 100644
index 0000000000000000000000000000000000000000..1ac8552ff76cf85f0f2598f74547679006f73818
--- /dev/null
+++ b/examples/jca/sjsas8_1/sun-ejb-jar.xml
@@ -0,0 +1,21 @@
+<sun-ejb-jar>
+<enterprise-beans>
+<name>EBJAR</name>
+<unique-id>1</unique-id>
+<ejb>
+     <ejb-name>SimpleBean</ejb-name>
+     <jndi-name>SimpleBean</jndi-name>
+     <pass-by-reference>false</pass-by-reference>
+     <resource-ref>
+       <res-ref-name>ra/JEConnectionFactory</res-ref-name>
+       <jndi-name>ra/JEConnectionFactory</jndi-name>
+       <!--
+       <res-type>com.sleepycat.je.jca.ra.JEConnectionFactory</res-type>
+       <res-auth>Container</res-auth>
+       -->
+     </resource-ref>
+     <is-read-only-bean>false</is-read-only-bean>
+     <gen-classes/>
+</ejb>
+</enterprise-beans>
+</sun-ejb-jar>
diff --git a/examples/je/BindingExample.java b/examples/je/BindingExample.java
new file mode 100644
index 0000000000000000000000000000000000000000..bd0ac082a3c346d90d57ab46d7496534966b52db
--- /dev/null
+++ b/examples/je/BindingExample.java
@@ -0,0 +1,238 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2004,2008 Oracle.  All rights reserved.
+ *
+ * $Id: BindingExample.java,v 1.22 2008/05/27 15:30:31 mark Exp $
+ */
+
+package je;
+
+import java.io.File;
+import java.io.Serializable;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+
+/**
+ * BindingExample operates in the same way as SimpleExample, but uses a
+ * IntegerBinding and a SerialBinding to map between Java objects and stored
+ * DatabaseEntry objects.
+ */
+class BindingExample {
+    private static final int EXIT_SUCCESS = 0;
+    private static final int EXIT_FAILURE = 1;
+
+    private int numRecords;   // num records to insert or retrieve
+    private int offset;       // where we want to start inserting
+    private boolean doInsert; // if true, insert, else retrieve
+    private File envDir;
+
+    public BindingExample(int numRecords,
+                          boolean doInsert,
+                          File envDir,
+                          int offset) {
+        this.numRecords = numRecords;
+        this.doInsert = doInsert;
+        this.envDir = envDir;
+        this.offset = offset;
+    }
+
+    /**
+     * Usage string
+     */
+    public static void usage() {
+        System.out.println("usage: java " +
+                           "je.BindingExample " +
+                           "<envHomeDirectory> " +
+                           "<insert|retrieve> <numRecords> [offset]");
+        System.exit(EXIT_FAILURE);
+    }
+
+    /**
+     * Main
+     */
+    public static void main(String argv[]) {
+
+        if (argv.length < 2) {
+            usage();
+            return;
+        }
+        File envHomeDirectory = new File(argv[0]);
+
+        boolean doInsertArg = false;
+        if (argv[1].equalsIgnoreCase("insert")) {
+            doInsertArg = true;
+        } else if (argv[1].equalsIgnoreCase("retrieve")) {
+            doInsertArg = false;
+        } else {
+            usage();
+        }
+
+        int startOffset = 0;
+        int numRecordsVal = 0;
+
+        if (doInsertArg) {
+
+            if (argv.length > 2) {
+                numRecordsVal = Integer.parseInt(argv[2]);
+            } else {
+                usage();
+                return;
+            }
+
+            if (argv.length > 3) {
+                startOffset = Integer.parseInt(argv[3]);
+            }
+        }
+
+        try {
+            BindingExample app = new BindingExample(numRecordsVal,
+                                                    doInsertArg,
+                                                    envHomeDirectory,
+                                                    startOffset);
+            app.run();
+        } catch (DatabaseException e) {
+            e.printStackTrace();
+            System.exit(EXIT_FAILURE);
+        }
+        System.exit(EXIT_SUCCESS);
+    }
+
+    /**
+     * Insert or retrieve data
+     */
+    public void run() throws DatabaseException {
+        /* Create a new, transactional database environment */
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+        Environment exampleEnv = new Environment(envDir, envConfig);
+
+        /* Make a database within that environment */
+        Transaction txn = exampleEnv.beginTransaction(null, null);
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(true);
+        Database exampleDb = exampleEnv.openDatabase(txn,
+                                                     "bindingsDb",
+                                                     dbConfig);
+
+
+        /*
+         * In our example, the database record is composed of an integer
+         * key and and instance of the MyData class as data.
+         *
+         * A class catalog database is needed for storing class descriptions
+         * for the serial binding used below.  This avoids storing class
+         * descriptions redundantly in each record.
+         */
+        DatabaseConfig catalogConfig = new DatabaseConfig();
+        catalogConfig.setTransactional(true);
+        catalogConfig.setAllowCreate(true);
+        Database catalogDb = exampleEnv.openDatabase(txn,
+                                                     "catalogDb",
+                                                     catalogConfig);
+        StoredClassCatalog catalog = new StoredClassCatalog(catalogDb);
+
+        /*
+         * Create a serial binding for MyData data objects.  Serial bindings
+         * can be used to store any Serializable object.
+         */
+        EntryBinding<MyData> dataBinding =
+            new SerialBinding<MyData>(catalog, MyData.class);
+
+        txn.commit();
+
+        /*
+         * Further below we'll use a tuple binding (IntegerBinding
+         * specifically) for integer keys.  Tuples, unlike serialized Java
+         * objects, have a well defined sort order.
+         */
+
+        /* DatabaseEntry represents the key and data of each record */
+        DatabaseEntry keyEntry = new DatabaseEntry();
+        DatabaseEntry dataEntry = new DatabaseEntry();
+
+        if (doInsert) {
+
+            /* put some data in */
+            for (int i = offset; i < numRecords + offset; i++) {
+
+                StringBuffer stars = new StringBuffer();
+                for (int j = 0; j < i; j++) {
+                    stars.append('*');
+                }
+                MyData data = new MyData(i, stars.toString());
+
+                IntegerBinding.intToEntry(i, keyEntry);
+                dataBinding.objectToEntry(data, dataEntry);
+
+                txn = exampleEnv.beginTransaction(null, null);
+                OperationStatus status =
+                    exampleDb.put(txn, keyEntry, dataEntry);
+
+                /*
+                 * Note that put will throw a DatabaseException when
+                 * error conditions are found such as deadlock.
+                 * However, the status return conveys a variety of
+                 * information. For example, the put might succeed,
+                 * or it might not succeed if the record exists
+                 * and duplicates were not
+                 */
+                if (status != OperationStatus.SUCCESS) {
+                    throw new DatabaseException("Data insertion got status " +
+                                                status);
+                }
+                txn.commit();
+            }
+        } else {
+
+            /* retrieve the data */
+            Cursor cursor = exampleDb.openCursor(null, null);
+
+            while (cursor.getNext(keyEntry, dataEntry, LockMode.DEFAULT) ==
+                   OperationStatus.SUCCESS) {
+
+                int key = IntegerBinding.entryToInt(keyEntry);
+                MyData data = dataBinding.entryToObject(dataEntry);
+
+                System.out.println("key=" + key + " data=" + data);
+            }
+            cursor.close();
+        }
+
+        catalogDb.close();
+        exampleDb.close();
+        exampleEnv.close();
+    }
+
+    @SuppressWarnings("serial")
+    private static class MyData implements Serializable {
+
+        private int num;
+        private String msg;
+
+        MyData(int number, String message) {
+            this.num = number;
+            this.msg = message;
+        }
+
+        public String toString() {
+            return String.valueOf(num) + ' ' + msg;
+        }
+    }
+}
diff --git a/examples/je/MeasureInsertSize.java b/examples/je/MeasureInsertSize.java
new file mode 100644
index 0000000000000000000000000000000000000000..b38c0856730b7ff72a09a159c78388265173efe6
--- /dev/null
+++ b/examples/je/MeasureInsertSize.java
@@ -0,0 +1,231 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2004,2008 Oracle.  All rights reserved.
+ *
+ * $Id: MeasureInsertSize.java,v 1.3 2008/06/02 16:53:23 mark Exp $
+ */
+
+package je;
+
+import java.io.File;
+
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.bind.tuple.TupleOutput;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.Transaction;
+
+/**
+ * MeasureInsertSize inserts a given set of key/value pairs in order to measure
+ * the disk space consumed by a given data set.
+ *
+ * To see how much disk space is consumed, simply add up the size of the log
+ * files or for a rough estimate multiply the number of files by 10 MB.
+ *
+ * This program does sequential inserts.  For random inserts, more disk space
+ * will be consumed, especially if the entire data set does not fit in the
+ * cache.
+ *
+ * This program does not insert into secondary databases, but can be used to
+ * measure the size of a secondary by specifying the key and data sizes of the
+ * secondary records.  The data size for a secondary record should be specified
+ * as the size of the primary key.
+ *
+ * Checkpoints are performed by this program as usual, and checkpoints will
+ * added to the size of the log.  This is realistic for a typical application
+ * but note that a smaller disk size can be obtained using a bulk load.
+ *
+ * For command line parameters see the usage() method.
+ */
+public class MeasureInsertSize {
+
+    private File home;
+    private int records;
+    private int keySize;
+    private int dataSize = -1;
+    private int insertsPerTxn;
+    private boolean deferredWrite;
+    private Environment env;
+    private Database db;
+
+    public static void main(String args[]) {
+        try {
+            MeasureInsertSize example = new MeasureInsertSize(args);
+            example.open();
+            example.doInserts();
+            example.close();
+            System.exit(0);
+        } catch (Exception e) {
+            e.printStackTrace();
+            System.exit(1);
+        }
+    }
+
+    public MeasureInsertSize(String[] args) {
+        for (int i = 0; i < args.length; i += 1) {
+            String name = args[i];
+            String val = null;
+            if (i < args.length - 1 && !args[i + 1].startsWith("-")) {
+                i += 1;
+                val = args[i];
+            }
+            if (name.equals("-h")) {
+                if (val == null) {
+                    usage("No value after -h");
+                }
+                home = new File(val);
+            } else if (name.equals("-records")) {
+                if (val == null) {
+                    usage("No value after -records");
+                }
+                try {
+                    records = Integer.parseInt(val);
+                } catch (NumberFormatException e) {
+                    usage(val + " is not a number");
+                }
+                if (records <= 0) {
+                    usage(val + " is not a positive integer");
+                }
+            } else if (name.equals("-key")) {
+                if (val == null) {
+                    usage("No value after -key");
+                }
+                try {
+                    keySize = Integer.parseInt(val);
+                } catch (NumberFormatException e) {
+                    usage(val + " is not a number");
+                }
+                if (keySize < 4) {
+                    usage(val + " is not four or greater");
+                }
+            } else if (name.equals("-data")) {
+                if (val == null) {
+                    usage("No value after -data");
+                }
+                try {
+                    dataSize = Integer.parseInt(val);
+                } catch (NumberFormatException e) {
+                    usage(val + " is not a number");
+                }
+                if (dataSize < 0) {
+                    usage(val + " is not a positive integer");
+                }
+            } else if (name.equals("-txn")) {
+                if (val == null) {
+                    usage("No value after -txn");
+                }
+                try {
+                    insertsPerTxn = Integer.parseInt(val);
+                } catch (NumberFormatException e) {
+                    usage(val + " is not a number");
+                }
+            } else if (name.equals("-deferredwrite")) {
+                deferredWrite = true;
+            } else {
+                usage("Unknown arg: " + name);
+            }
+        }
+
+        if (home == null) {
+            usage("-h not specified");
+        }
+
+        if (records == 0) {
+            usage("-records not specified");
+        }
+
+        if (keySize == -1) {
+            usage("-key not specified");
+        }
+
+        if (dataSize == -1) {
+            usage("-data not specified");
+        }
+    }
+
+    private void usage(String msg) {
+
+        if (msg != null) {
+            System.out.println(msg);
+        }
+
+        System.out.println
+            ("usage:" +
+             "\njava "  + MeasureInsertSize.class.getName() +
+             "\n   -h <directory>" +
+             "\n      # Environment home directory; required" +
+             "\n   -records <count>" +
+             "\n      # Total records (key/data pairs); required" +
+             "\n   -key <bytes> " +
+             "\n      # Average key bytes per record; required" +
+             "\n   -data <bytes>" +
+             "\n      # Average data bytes per record; required" +
+             "\n  [-txn <insertsPerTransaction>]" +
+             "\n      # Inserts per txn; default: 0 (non-transactional)" +
+             "\n  [-deferredwrite]" +
+             "\n      # Use a Deferred Write database");
+
+        System.exit(2);
+    }
+
+    private boolean isTransactional() {
+        return insertsPerTxn > 0;
+    }
+
+    private void open()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setTransactional(isTransactional());
+        env = new Environment(home, envConfig);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setTransactional(isTransactional());
+        dbConfig.setDeferredWrite(deferredWrite);
+        db = env.openDatabase(null, "foo", dbConfig);
+    }
+
+    private void close()
+        throws DatabaseException {
+
+        db.close();
+        env.close();
+    }
+
+    public void doInserts()
+        throws DatabaseException {
+
+        DatabaseEntry data = new DatabaseEntry(new byte[dataSize]);
+        DatabaseEntry key = new DatabaseEntry();
+        byte[] keyBuffer = new byte[keySize];
+        byte[] keyPadding = new byte[keySize - 4];
+
+        Transaction txn = null;
+
+        for (int i = 1; i <= records; i += 1) {
+
+            TupleOutput keyOutput = new TupleOutput(keyBuffer);
+            keyOutput.writeInt(i);
+            keyOutput.writeFast(keyPadding);
+            TupleBinding.outputToEntry(keyOutput, key);
+
+            if (isTransactional() && txn == null) {
+                txn = env.beginTransaction(null, null);
+            }
+
+            db.put(txn, key, data);
+
+            if (txn != null && i % insertsPerTxn == 0) {
+                txn.commit();
+                txn = null;
+            }
+        }
+    }
+}
diff --git a/examples/je/SecondaryExample.java b/examples/je/SecondaryExample.java
new file mode 100644
index 0000000000000000000000000000000000000000..77ed45d02cd3438932dd785ef82495d0ed61e443
--- /dev/null
+++ b/examples/je/SecondaryExample.java
@@ -0,0 +1,325 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2004,2008 Oracle.  All rights reserved.
+ *
+ * $Id: SecondaryExample.java,v 1.26 2008/05/30 14:04:14 mark Exp $
+ */
+
+package je;
+
+import java.io.File;
+import java.io.Serializable;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.SecondaryConfig;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.je.SecondaryKeyCreator;
+import com.sleepycat.je.Transaction;
+
+/**
+ * SecondaryExample operates in the same way as BindingExample, but adds a
+ * SecondaryDatabase for accessing the primary database by a secondary key.
+ */
+class SecondaryExample {
+    private static final int EXIT_SUCCESS = 0;
+    private static final int EXIT_FAILURE = 1;
+
+    private int numRecords;   // num records to insert or retrieve
+    private int offset;       // where we want to start inserting
+    private boolean doInsert; // if true, insert, else retrieve
+    private File envDir;
+
+    public SecondaryExample(int numRecords,
+                          boolean doInsert,
+                          File envDir,
+                          int offset) {
+        this.numRecords = numRecords;
+        this.doInsert = doInsert;
+        this.envDir = envDir;
+        this.offset = offset;
+    }
+
+    /**
+     * Usage string
+     */
+    public static void usage() {
+        System.out.println("usage: java " +
+                           "je.SecondaryExample " +
+                           "<dbEnvHomeDirectory> " +
+                           "<insert|retrieve> <numRecords> [offset]");
+        System.exit(EXIT_FAILURE);
+    }
+
+    public static void main(String argv[]) {
+
+        if (argv.length < 2) {
+            usage();
+            return;
+        }
+        File envHomeDirectory = new File(argv[0]);
+
+        boolean doInsertArg = false;
+        if (argv[1].equalsIgnoreCase("insert")) {
+            doInsertArg = true;
+        } else if (argv[1].equalsIgnoreCase("retrieve")) {
+            doInsertArg = false;
+        } else {
+            usage();
+        }
+
+        int startOffset = 0;
+        int numRecordsVal = 0;
+
+        if (doInsertArg) {
+
+            if (argv.length > 2) {
+                numRecordsVal = Integer.parseInt(argv[2]);
+            } else {
+                usage();
+                return;
+            }
+
+            if (argv.length > 3) {
+                startOffset = Integer.parseInt(argv[3]);
+            }
+        }
+
+        try {
+            SecondaryExample app = new SecondaryExample(numRecordsVal,
+                                                        doInsertArg,
+                                                        envHomeDirectory,
+                                                        startOffset);
+            app.run();
+        } catch (DatabaseException e) {
+            e.printStackTrace();
+            System.exit(EXIT_FAILURE);
+        }
+        System.exit(EXIT_SUCCESS);
+    }
+
+    /**
+     * Insert or retrieve data.
+     */
+    public void run() throws DatabaseException {
+
+        /* Create a new, transactional database environment. */
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+        Environment exampleEnv = new Environment(envDir, envConfig);
+
+        /*
+         * Make a database within that environment. Because this will be used
+         * as a primary database, it must not allow duplicates. The primary key
+         * of a primary database must be unique.
+         */
+        Transaction txn = exampleEnv.beginTransaction(null, null);
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        Database exampleDb =
+	    exampleEnv.openDatabase(txn, "bindingsDb", dbConfig);
+
+        /*
+         * In our example, the database record is composed of an integer key
+         * and and instance of the MyData class as data.
+         *
+         * A class catalog database is needed for storing class descriptions
+         * for the serial binding used below.  This avoids storing class
+         * descriptions redundantly in each record.
+         */
+        DatabaseConfig catalogConfig = new DatabaseConfig();
+        catalogConfig.setTransactional(true);
+        catalogConfig.setAllowCreate(true);
+        Database catalogDb =
+	    exampleEnv.openDatabase(txn, "catalogDb", catalogConfig);
+        StoredClassCatalog catalog = new StoredClassCatalog(catalogDb);
+
+        /*
+         * Create a serial binding for MyData data objects.  Serial
+         * bindings can be used to store any Serializable object.
+         */
+        EntryBinding<MyData> dataBinding =
+            new SerialBinding<MyData>(catalog, MyData.class);
+
+        /*
+         * Further below we'll use a tuple binding (IntegerBinding
+         * specifically) for integer keys.  Tuples, unlike serialized
+         * Java objects, have a well defined sort order.
+         */
+
+        /*
+         * Define a String tuple binding for a secondary key.  The
+         * secondary key is the msg field of the MyData object.
+         */
+        EntryBinding<String> secKeyBinding =
+            TupleBinding.getPrimitiveBinding(String.class);
+
+        /*
+         * Open a secondary database to allow accessing the primary
+         * database by the secondary key value.
+         */
+        SecondaryConfig secConfig = new SecondaryConfig();
+        secConfig.setTransactional(true);
+        secConfig.setAllowCreate(true);
+        secConfig.setSortedDuplicates(true);
+        secConfig.setKeyCreator(new MyKeyCreator(secKeyBinding, dataBinding));
+        SecondaryDatabase exampleSecDb =
+	    exampleEnv.openSecondaryDatabase(txn, "bindingsSecDb",
+					     exampleDb, secConfig);
+        txn.commit();
+
+        /* DatabaseEntry represents the key and data of each record. */
+        DatabaseEntry keyEntry = new DatabaseEntry();
+        DatabaseEntry dataEntry = new DatabaseEntry();
+
+        if (doInsert) {
+
+            /*
+             * Put some data in.  Note that the primary database is always used
+             * to add data.  Adding or changing data in the secondary database
+             * is not allowed; however, deleting through the secondary database
+             * is allowed.
+             */
+            for (int i = offset; i < numRecords + offset; i++) {
+                txn = exampleEnv.beginTransaction(null, null);
+                StringBuffer stars = new StringBuffer();
+                for (int j = 0; j < i; j++) {
+                    stars.append('*');
+                }
+                MyData data = new MyData(i, stars.toString());
+
+                IntegerBinding.intToEntry(i, keyEntry);
+                dataBinding.objectToEntry(data, dataEntry);
+
+                OperationStatus status =
+                    exampleDb.put(txn, keyEntry, dataEntry);
+
+                /*
+                 * Note that put will throw a DatabaseException when error
+                 * conditions are found such as deadlock.  However, the status
+                 * return conveys a variety of information. For example, the
+                 * put might succeed, or it might not succeed if the record
+                 * exists and duplicates were not
+                 */
+                if (status != OperationStatus.SUCCESS) {
+                    throw new DatabaseException
+			("Data insertion got status " + status);
+                }
+                txn.commit();
+            }
+        } else {
+
+            /*
+	     * Retrieve the data by secondary key by opening a cursor on the
+	     * secondary database.  The key parameter for a secondary cursor is
+	     * always the secondary key, but the data parameter is always the
+	     * data of the primary database.  You can cast the cursor to a
+	     * SecondaryCursor and use additional method signatures for
+	     * retrieving the primary key also.  Or you can call
+	     * openSecondaryCursor() to avoid casting.
+	     */
+            txn = exampleEnv.beginTransaction(null, null);
+            Cursor cursor = exampleSecDb.openCursor(txn, null);
+
+            while (cursor.getNext(keyEntry, dataEntry, LockMode.DEFAULT) ==
+                   OperationStatus.SUCCESS) {
+
+                String key = secKeyBinding.entryToObject(keyEntry);
+                MyData data = dataBinding.entryToObject(dataEntry);
+
+                System.out.println("key=" + key + " data=" + data);
+            }
+            cursor.close();
+            txn.commit();
+        }
+
+        /*
+         * Always close secondary databases before closing their associated
+         * primary database.
+         */
+        catalogDb.close();
+        exampleSecDb.close();
+        exampleDb.close();
+        exampleEnv.close();
+    }
+
+    @SuppressWarnings("serial")
+    private static class MyData implements Serializable {
+
+        private int num;
+        private String msg;
+
+        MyData(int number, String message) {
+            this.num = number;
+            this.msg = message;
+        }
+
+        String getMessage() {
+            return msg;
+        }
+
+        public String toString() {
+            return String.valueOf(num) + ' ' + msg;
+        }
+    }
+
+    /**
+     * A key creator that knows how to extract the secondary key from the data
+     * entry of the primary database.  To do so, it uses both the dataBinding
+     * of the primary database and the secKeyBinding.
+     */
+    private static class MyKeyCreator implements SecondaryKeyCreator {
+
+        private EntryBinding<String> secKeyBinding;
+        private EntryBinding<MyData> dataBinding;
+
+        MyKeyCreator(EntryBinding<String> secKeyBinding,
+                     EntryBinding<MyData> dataBinding) {
+            this.secKeyBinding = secKeyBinding;
+            this.dataBinding = dataBinding;
+        }
+
+        public boolean createSecondaryKey(SecondaryDatabase secondaryDb,
+                                          DatabaseEntry keyEntry,
+                                          DatabaseEntry dataEntry,
+                                          DatabaseEntry resultEntry)
+            throws DatabaseException {
+
+            /*
+             * Convert the data entry to a MyData object, extract the secondary
+             * key value from it, and then convert it to the resulting
+             * secondary key entry.
+             */
+            MyData data = dataBinding.entryToObject(dataEntry);
+            String key = data.getMessage();
+            if (key != null) {
+                secKeyBinding.objectToEntry(key, resultEntry);
+                return true;
+            } else {
+
+                /*
+                 * The message property of MyData is optional, so if it is null
+                 * then return false to prevent it from being indexed.  Note
+                 * that if a required key is missing or an error occurs, an
+                 * exception should be thrown by this method.
+                 */
+                return false;
+            }
+        }
+    }
+}
diff --git a/examples/je/SequenceExample.java b/examples/je/SequenceExample.java
new file mode 100644
index 0000000000000000000000000000000000000000..9b092aa47ccc07a1100fe4328815396e5c64b06f
--- /dev/null
+++ b/examples/je/SequenceExample.java
@@ -0,0 +1,88 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997,2008 Oracle.  All rights reserved.
+ *
+ * $Id: SequenceExample.java,v 1.10 2008/01/07 14:28:41 cwl Exp $
+ */
+
+package je;
+
+import java.io.File;
+import java.io.IOException;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.Sequence;
+import com.sleepycat.je.SequenceConfig;
+
+public class SequenceExample {
+    private static final int EXIT_SUCCESS = 0;
+    private static final int EXIT_FAILURE = 1;
+    private static final String DB_NAME = "sequence.db";
+    private static final String KEY_NAME = "my_sequence";
+
+    public SequenceExample() {
+    }
+
+    public static void usage() {
+        System.out.println("usage: java " +
+                           "je.SequenceExample " +
+                           "<dbEnvHomeDirectory>");
+        System.exit(EXIT_FAILURE);
+    }
+
+    public static void main(String[] argv) {
+
+        if (argv.length != 1) {
+            usage();
+            return;
+        }
+        File envHomeDirectory = new File(argv[0]);
+
+        try {
+            SequenceExample app = new SequenceExample();
+            app.run(envHomeDirectory);
+        } catch (Exception e) {
+            e.printStackTrace();
+            System.exit(EXIT_FAILURE);
+        }
+        System.exit(EXIT_SUCCESS);
+    }
+
+    public void run(File envHomeDirectory)
+        throws DatabaseException, IOException {
+
+        /* Create the environment object. */
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setAllowCreate(true);
+        Environment env = new Environment(envHomeDirectory, envConfig);
+
+        /* Create the database object. */
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        Database db = env.openDatabase(null, DB_NAME, dbConfig);
+
+        /* Create the sequence oject. */
+        SequenceConfig config = new SequenceConfig();
+        config.setAllowCreate(true);
+        DatabaseEntry key =
+            new DatabaseEntry(KEY_NAME.getBytes("UTF-8"));
+        Sequence seq = db.openSequence(null, key, config);
+
+        /* Allocate a few sequence numbers. */
+        for (int i = 0; i < 10; i++) {
+            long seqnum = seq.get(null, 1);
+            System.out.println("Got sequence number: " + seqnum);
+        }
+
+        /* Close all. */
+        seq.close();
+        db.close();
+        env.close();
+    }
+}
diff --git a/examples/je/SimpleExample.java b/examples/je/SimpleExample.java
new file mode 100644
index 0000000000000000000000000000000000000000..05a73c695526eeeecdabd7bd4f8c419e808a9b26
--- /dev/null
+++ b/examples/je/SimpleExample.java
@@ -0,0 +1,202 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2004,2008 Oracle.  All rights reserved.
+ *
+ * $Id: SimpleExample.java,v 1.51 2008/01/07 14:28:41 cwl Exp $
+ */
+
+package je;
+
+import java.io.File;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+
+/**
+ * SimpleExample creates a database environment, a database, and a database
+ * cursor, inserts and retrieves data.
+ */
+class SimpleExample {
+    private static final int EXIT_SUCCESS = 0;
+    private static final int EXIT_FAILURE = 1;
+
+    private int numRecords;   // num records to insert or retrieve
+    private int offset;       // where we want to start inserting
+    private boolean doInsert; // if true, insert, else retrieve
+    private File envDir;
+
+    public SimpleExample(int numRecords,
+                         boolean doInsert,
+                         File envDir,
+                         int offset) {
+        this.numRecords = numRecords;
+        this.doInsert = doInsert;
+        this.envDir = envDir;
+        this.offset = offset;
+    }
+
+    /**
+     * Usage string
+     */
+    public static void usage() {
+        System.out.println("usage: java " +
+                           "je.SimpleExample " +
+                           "<dbEnvHomeDirectory> " +
+                           "<insert|retrieve> <numRecords> [offset]");
+        System.exit(EXIT_FAILURE);
+    }
+
+    /**
+     * Main
+     */
+    public static void main(String argv[]) {
+
+        if (argv.length < 2) {
+            usage();
+            return;
+        }
+        File envHomeDirectory = new File(argv[0]);
+
+        boolean doInsertArg = false;
+        if (argv[1].equalsIgnoreCase("insert")) {
+            doInsertArg = true;
+        } else if (argv[1].equalsIgnoreCase("retrieve")) {
+            doInsertArg = false;
+        } else {
+            usage();
+        }
+
+        int startOffset = 0;
+        int numRecordsVal = 0;
+
+        if (doInsertArg) {
+
+            if (argv.length > 2) {
+                numRecordsVal = Integer.parseInt(argv[2]);
+            } else {
+                usage();
+                return;
+            }
+
+            if (argv.length > 3) {
+                startOffset = Integer.parseInt(argv[3]);
+            }
+        }
+
+        try {
+            SimpleExample app = new SimpleExample(numRecordsVal,
+                                                  doInsertArg,
+                                                  envHomeDirectory,
+                                                  startOffset);
+            app.run();
+        } catch (DatabaseException e) {
+            e.printStackTrace();
+            System.exit(EXIT_FAILURE);
+        }
+        System.exit(EXIT_SUCCESS);
+    }
+
+    /**
+     * Insert or retrieve data
+     */
+    public void run() throws DatabaseException {
+        /* Create a new, transactional database environment */
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+        Environment exampleEnv = new Environment(envDir, envConfig);
+
+        /*
+         * Make a database within that environment
+         *
+         * Notice that we use an explicit transaction to
+         * perform this database open, and that we
+         * immediately commit the transaction once the
+         * database is opened. This is required if we
+         * want transactional support for the database.
+         * However, we could have used autocommit to
+         * perform the same thing by simply passing a
+         * null txn handle to openDatabase().
+         */
+        Transaction txn = exampleEnv.beginTransaction(null, null);
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(true);
+        Database exampleDb = exampleEnv.openDatabase(txn,
+                                                     "simpleDb",
+                                                     dbConfig);
+        txn.commit();
+
+        /*
+         * Insert or retrieve data. In our example, database records are
+         * integer pairs.
+         */
+
+        /* DatabaseEntry represents the key and data of each record */
+        DatabaseEntry keyEntry = new DatabaseEntry();
+        DatabaseEntry dataEntry = new DatabaseEntry();
+
+        if (doInsert) {
+
+            /* put some data in */
+            for (int i = offset; i < numRecords + offset; i++) {
+                /*
+                 * Note that autocommit mode, described in the Getting
+                 * Started Guide, is an alternative to explicitly
+                 * creating the transaction object.
+                 */
+                txn = exampleEnv.beginTransaction(null, null);
+
+                /* Use a binding to convert the int into a DatabaseEntry. */
+
+                IntegerBinding.intToEntry(i, keyEntry);
+                IntegerBinding.intToEntry(i+1, dataEntry);
+                OperationStatus status =
+                    exampleDb.put(txn, keyEntry, dataEntry);
+
+                /*
+                 * Note that put will throw a DatabaseException when
+                 * error conditions are found such as deadlock.
+                 * However, the status return conveys a variety of
+                 * information. For example, the put might succeed,
+                 * or it might not succeed if the record alread exists
+                 * and the database was not configured for duplicate
+                 * records.
+                 */
+                if (status != OperationStatus.SUCCESS) {
+                    throw new DatabaseException("Data insertion got status " +
+                                                status);
+                }
+                txn.commit();
+            }
+        } else {
+            /* retrieve the data */
+            Cursor cursor = exampleDb.openCursor(null, null);
+
+            while (cursor.getNext(keyEntry, dataEntry, LockMode.DEFAULT) ==
+                   OperationStatus.SUCCESS) {
+                System.out.println("key=" +
+                                   IntegerBinding.entryToInt(keyEntry) +
+                                   " data=" +
+                                   IntegerBinding.entryToInt(dataEntry));
+
+            }
+            cursor.close();
+        }
+
+        exampleDb.close();
+        exampleEnv.close();
+
+    }
+}
diff --git a/examples/je/ToManyExample.java b/examples/je/ToManyExample.java
new file mode 100644
index 0000000000000000000000000000000000000000..1618f958cab62873fb1da0d5069b24240803f2b1
--- /dev/null
+++ b/examples/je/ToManyExample.java
@@ -0,0 +1,474 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2004,2008 Oracle.  All rights reserved.
+ *
+ * $Id: ToManyExample.java,v 1.10 2008/05/27 15:30:31 mark Exp $
+ */
+
+package je;
+
+import java.io.File;
+import java.io.Serializable;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Set;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.bind.tuple.StringBinding;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.ForeignKeyDeleteAction;
+import com.sleepycat.je.ForeignMultiKeyNullifier;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.SecondaryConfig;
+import com.sleepycat.je.SecondaryCursor;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.je.SecondaryMultiKeyCreator;
+import com.sleepycat.je.Transaction;
+
+/**
+ * An example of using many-to-many and one-to-many secondary indices.
+ */
+public class ToManyExample {
+
+    private Environment env;
+    private Database catalogDb;
+    private Database animalDb;
+    private Database personDb;
+    private SecondaryDatabase personByEmail;
+    private SecondaryDatabase personByAnimal;
+    private EntryBinding<String> keyBinding;
+    private EntryBinding<Person> personBinding;
+    private EntryBinding<Animal> animalBinding;
+
+    /**
+     * Runs the example program, given a single "-h HOME" argument.
+     */
+    public static void main(String[] args) {
+
+        if (args.length != 2 || !"-h".equals(args[0])) {
+            System.out.println("Usage: java " +
+                               ToManyExample.class.getName() +
+                               " -h ENV_HOME");
+            System.exit(1);
+        }
+        String homeDir = args[1];
+
+        try {
+            ToManyExample example = new ToManyExample(homeDir);
+            example.exec();
+            example.close();
+        } catch (DatabaseException e) {
+            e.printStackTrace();
+        }
+    }
+
+    /**
+     * Opens the environment and all databases.
+     */
+    private ToManyExample(String homeDir) throws DatabaseException {
+
+        /* Open the environment. */
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setTransactional(true);
+        env = new Environment(new File(homeDir), envConfig);
+
+        /* Open/create all databases in a transaction. */
+        Transaction txn = env.beginTransaction(null, null);
+        try {
+            /* A standard (no duplicates) database config. */
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setAllowCreate(true);
+            dbConfig.setTransactional(true);
+
+            /* The catalog is used for the serial binding. */
+            catalogDb = env.openDatabase(txn, "catalog", dbConfig);
+            StoredClassCatalog catalog = new StoredClassCatalog(catalogDb);
+            personBinding = new SerialBinding(catalog, null);
+            animalBinding = new SerialBinding(catalog, null);
+            keyBinding = new StringBinding();
+
+            /* Open the person and animal primary DBs. */
+            animalDb = env.openDatabase(txn, "animal", dbConfig);
+            personDb = env.openDatabase(txn, "person", dbConfig);
+
+            /*
+             * A standard secondary config; duplicates, key creators and key
+             * nullifiers are specified below.
+             */
+            SecondaryConfig secConfig = new SecondaryConfig();
+            secConfig.setAllowCreate(true);
+            secConfig.setTransactional(true);
+
+            /*
+             * Open the secondary database for personByEmail.  This is a
+             * one-to-many index because duplicates are not configured.
+             */
+            secConfig.setSortedDuplicates(false);
+            secConfig.setMultiKeyCreator(new EmailKeyCreator());
+            personByEmail = env.openSecondaryDatabase(txn, "personByEmail",
+                                                      personDb, secConfig);
+
+            /*
+             * Open the secondary database for personByAnimal.  This is a
+             * many-to-many index because duplicates are configured.  Foreign
+             * key constraints are specified to ensure that all animal keys
+             * exist in the animal database.
+             */
+            secConfig.setSortedDuplicates(true);
+            secConfig.setMultiKeyCreator(new AnimalKeyCreator());
+            secConfig.setForeignMultiKeyNullifier(new AnimalKeyNullifier());
+            secConfig.setForeignKeyDatabase(animalDb);
+            secConfig.setForeignKeyDeleteAction(ForeignKeyDeleteAction.NULLIFY);
+            personByAnimal = env.openSecondaryDatabase(txn, "personByAnimal",
+                                                       personDb, secConfig);
+
+            txn.commit();
+        } catch (DatabaseException e) {
+            txn.abort();
+            throw e;
+        } catch (RuntimeException e) {
+            txn.abort();
+            throw e;
+        }
+    }
+
+    /**
+     * Closes all databases and the environment.
+     */
+    private void close() throws DatabaseException {
+
+        if (personByEmail != null) {
+            personByEmail.close();
+        }
+        if (personByAnimal != null) {
+            personByAnimal.close();
+        }
+        if (catalogDb != null) {
+            catalogDb.close();
+        }
+        if (personDb != null) {
+            personDb.close();
+        }
+        if (animalDb != null) {
+            animalDb.close();
+        }
+        if (env != null) {
+            env.close();
+        }
+    }
+
+    /**
+     * Adds, updates, prints and deletes Person records with many-to-many and
+     * one-to-many secondary indices.
+     */
+    private void exec()
+        throws DatabaseException {
+
+        System.out.println
+            ("\nInsert some animals.");
+        Animal dogs = insertAndPrintAnimal("dogs", true);
+        Animal fish = insertAndPrintAnimal("fish", false);
+        Animal horses = insertAndPrintAnimal("horses", true);
+        Animal donkeys = insertAndPrintAnimal("donkeys", true);
+
+        System.out.println
+            ("\nInsert a new empty person.");
+        Person kathy = new Person();
+        kathy.name = "Kathy";
+        putPerson(kathy);
+        printPerson("Kathy");
+
+        System.out.println
+            ("\nAdd favorites/addresses and update the record.");
+        kathy.favoriteAnimals.add(horses.name);
+        kathy.favoriteAnimals.add(dogs.name);
+        kathy.favoriteAnimals.add(fish.name);
+        kathy.emailAddresses.add("kathy@kathy.com");
+        kathy.emailAddresses.add("kathy@yahoo.com");
+        putPerson(kathy);
+        printPerson("Kathy");
+
+        System.out.println
+            ("\nChange favorites and addresses and update the person record.");
+        kathy.favoriteAnimals.remove(fish.name);
+        kathy.favoriteAnimals.add(donkeys.name);
+        kathy.emailAddresses.add("kathy@gmail.com");
+        kathy.emailAddresses.remove("kathy@yahoo.com");
+        putPerson(kathy);
+        printPerson("Kathy");
+
+        System.out.println
+            ("\nInsert another person with some of the same favorites.");
+        Person mark = new Person();
+        mark.favoriteAnimals.add(dogs.name);
+        mark.favoriteAnimals.add(horses.name);
+        mark.name = "Mark";
+        putPerson(mark);
+        printPerson("Mark");
+
+        System.out.println
+            ("\nPrint by favorite animal index.");
+        printByIndex(personByAnimal);
+
+        System.out.println
+            ("\nPrint by email address index.");
+        printByIndex(personByEmail);
+
+        System.out.println
+            ("\nDelete 'dogs' and print again by favorite animal index.");
+        deleteAnimal(dogs.name);
+        printPerson("Kathy");
+        printPerson("Mark");
+        printByIndex(personByAnimal);
+
+        System.out.println
+            ("\nDelete both records and print again (should print nothing).");
+        deletePerson("Kathy");
+        deletePerson("Mark");
+        printPerson("Kathy");
+        printPerson("Mark");
+        printByIndex(personByAnimal);
+        printByIndex(personByEmail);
+    }
+
+    /**
+     * Inserts an animal record and prints it.  Uses auto-commit.
+     */
+    private Animal insertAndPrintAnimal(String name, boolean furry)
+        throws DatabaseException {
+
+        Animal animal = new Animal();
+        animal.name = name;
+        animal.furry = furry;
+
+        DatabaseEntry key = new DatabaseEntry();
+        keyBinding.objectToEntry(name, key);
+
+        DatabaseEntry data = new DatabaseEntry();
+        animalBinding.objectToEntry(animal, data);
+
+        OperationStatus status = animalDb.putNoOverwrite(null, key, data);
+        if (status == OperationStatus.SUCCESS) {
+            System.out.println(animal);
+        } else {
+            System.out.println("Animal was not inserted: " + name +
+                               " (" + status + ')');
+        }
+
+        return animal;
+    }
+
+    /**
+     * Deletes an animal.  Uses auto-commit.
+     */
+    private boolean deleteAnimal(String name)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        keyBinding.objectToEntry(name, key);
+
+        OperationStatus status = animalDb.delete(null, key);
+        return status == OperationStatus.SUCCESS;
+    }
+
+    /**
+     * Gets a person by name and prints it.
+     */
+    private void printPerson(String name)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        keyBinding.objectToEntry(name, key);
+
+        DatabaseEntry data = new DatabaseEntry();
+
+        OperationStatus status = personDb.get(null, key, data, null);
+        if (status == OperationStatus.SUCCESS) {
+            Person person = personBinding.entryToObject(data);
+            person.name = keyBinding.entryToObject(key);
+            System.out.println(person);
+        } else {
+            System.out.println("Person not found: " + name);
+        }
+    }
+
+    /**
+     * Prints all person records by a given secondary index.
+     */
+    private void printByIndex(SecondaryDatabase secDb)
+        throws DatabaseException {
+
+        DatabaseEntry secKey = new DatabaseEntry();
+        DatabaseEntry priKey = new DatabaseEntry();
+        DatabaseEntry priData = new DatabaseEntry();
+
+        SecondaryCursor cursor = secDb.openSecondaryCursor(null, null);
+        try {
+            while (cursor.getNext(secKey, priKey, priData, null) ==
+                   OperationStatus.SUCCESS) {
+                Person person = personBinding.entryToObject(priData);
+                person.name = keyBinding.entryToObject(priKey);
+                System.out.println("Index key [" +
+                                   keyBinding.entryToObject(secKey) +
+                                   "] maps to primary key [" +
+                                   person.name + ']');
+            }
+        } finally {
+            cursor.close();
+        }
+    }
+
+    /**
+     * Inserts or updates a person.  Uses auto-commit.
+     */
+    private void putPerson(Person person)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        keyBinding.objectToEntry(person.name, key);
+
+        DatabaseEntry data = new DatabaseEntry();
+        personBinding.objectToEntry(person, data);
+
+        personDb.put(null, key, data);
+    }
+
+    /**
+     * Deletes a person.  Uses auto-commit.
+     */
+    private boolean deletePerson(String name)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        keyBinding.objectToEntry(name, key);
+
+        OperationStatus status = personDb.delete(null, key);
+        return status == OperationStatus.SUCCESS;
+    }
+
+    /**
+     * A person object.
+     */
+    @SuppressWarnings("serial")
+    private static class Person implements Serializable {
+
+        /** The primary key. */
+        private transient String name;
+
+        /** A many-to-many set of keys. */
+        private Set<String> favoriteAnimals = new HashSet<String>();
+
+        /** A one-to-many set of keys. */
+        private Set<String> emailAddresses = new HashSet<String>();
+
+        public String toString() {
+            return "Person {" +
+                   "\n  Name: " + name +
+                   "\n  FavoriteAnimals: " + favoriteAnimals +
+                   "\n  EmailAddresses: " + emailAddresses +
+                   "\n}";
+        }
+    }
+
+    /**
+     * An animal object.
+     */
+    @SuppressWarnings("serial")
+    private static class Animal implements Serializable {
+
+        /** The primary key. */
+        private transient String name;
+
+        /** A non-indexed property. */
+        private boolean furry;
+
+        public String toString() {
+            return "Animal {" +
+                   "\n  Name: " + name +
+                   "\n  Furry: " + furry +
+                   "\n}";
+        }
+    }
+
+    /**
+     * Returns the set of email addresses for a person.  This is an example
+     * of a multi-key creator for a to-many index.
+     */
+    private class EmailKeyCreator implements SecondaryMultiKeyCreator {
+
+        public void createSecondaryKeys(SecondaryDatabase secondary,
+                                        DatabaseEntry primaryKey,
+                                        DatabaseEntry primaryData,
+                                        Set<DatabaseEntry> results)
+            throws DatabaseException {
+
+            Person person = personBinding.entryToObject(primaryData);
+            copyKeysToEntries(person.emailAddresses, results);
+        }
+    }
+
+    /**
+     * Returns the set of favorite animals for a person.  This is an example
+     * of a multi-key creator for a to-many index.
+     */
+    private class AnimalKeyCreator implements SecondaryMultiKeyCreator {
+
+        public void createSecondaryKeys(SecondaryDatabase secondary,
+                                        DatabaseEntry primaryKey,
+                                        DatabaseEntry primaryData,
+                                        Set<DatabaseEntry> results)
+            throws DatabaseException {
+
+            Person person = personBinding.entryToObject(primaryData);
+            copyKeysToEntries(person.favoriteAnimals, results);
+        }
+    }
+
+    /**
+     * A utility method to copy a set of keys (Strings) into a set of
+     * DatabaseEntry objects.
+     */
+    private void copyKeysToEntries(Set<String> keys,
+                                   Set<DatabaseEntry> entries) {
+
+        for (Iterator<String> i = keys.iterator(); i.hasNext();) {
+            DatabaseEntry entry = new DatabaseEntry();
+            keyBinding.objectToEntry(i.next(), entry);
+            entries.add(entry);
+        }
+    }
+
+    /**
+     * Removes a given key from the set of favorite animals for a person.  This
+     * is an example of a nullifier for a to-many index.  The nullifier is
+     * called when an animal record is deleted because we configured this
+     * secondary with ForeignKeyDeleteAction.NULLIFY.
+     */
+    private class AnimalKeyNullifier implements ForeignMultiKeyNullifier {
+
+        public boolean nullifyForeignKey(SecondaryDatabase secondary,
+                                         DatabaseEntry primaryKey,
+                                         DatabaseEntry primaryData,
+                                         DatabaseEntry secKey)
+            throws DatabaseException {
+
+            Person person = personBinding.entryToObject(primaryData);
+            String key = keyBinding.entryToObject(secKey);
+            if (person.favoriteAnimals.remove(key)) {
+                personBinding.objectToEntry(person, primaryData);
+                return true;
+            } else {
+                return false;
+            }
+        }
+    }
+}
diff --git a/examples/je/gettingStarted/ExampleDatabasePut.java b/examples/je/gettingStarted/ExampleDatabasePut.java
new file mode 100644
index 0000000000000000000000000000000000000000..0773ded3625a16a5c8db65478058e0bfd9f2411a
--- /dev/null
+++ b/examples/je/gettingStarted/ExampleDatabasePut.java
@@ -0,0 +1,229 @@
+// file: ExampleDatabasePut.java
+// $Id: ExampleDatabasePut.java,v 1.10 2008/05/15 01:47:58 linda Exp $
+
+package je.gettingStarted;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.util.ArrayList;
+import java.util.List;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Transaction;
+
+public class ExampleDatabasePut {
+
+    private static File myDbEnvPath = new File("/tmp/JEDB");
+    private static File inventoryFile = new File("./inventory.txt");
+    private static File vendorsFile = new File("./vendors.txt");
+
+    // DatabaseEntries used for loading records
+    private static DatabaseEntry theKey = new DatabaseEntry();
+    private static DatabaseEntry theData = new DatabaseEntry();
+
+    // Encapsulates the environment and databases.
+    private static MyDbEnv myDbEnv = new MyDbEnv();
+
+    private static void usage() {
+        System.out.println("ExampleDatabasePut [-h <env directory>]");
+        System.out.println("      [-s <selections file>] [-v <vendors file>]");
+        System.exit(-1);
+    }
+
+
+    public static void main(String args[]) {
+        ExampleDatabasePut edp = new ExampleDatabasePut();
+        try {
+            edp.run(args);
+        } catch (DatabaseException dbe) {
+            System.err.println("ExampleDatabasePut: " + dbe.toString());
+            dbe.printStackTrace();
+            dbe.printStackTrace();
+        } catch (Exception e) {
+            System.out.println("Exception: " + e.toString());
+            e.printStackTrace();
+        } finally {
+            myDbEnv.close();
+        }
+        System.out.println("All done.");
+    }
+
+
+    private void run(String args[])
+        throws DatabaseException {
+        // Parse the arguments list
+        parseArgs(args);
+
+        myDbEnv.setup(myDbEnvPath, // path to the environment home
+                      false);      // is this environment read-only?
+
+        System.out.println("loading vendors db....");
+        loadVendorsDb();
+
+        System.out.println("loading inventory db....");
+        loadInventoryDb();
+    }
+
+
+    private void loadVendorsDb()
+            throws DatabaseException {
+
+        // loadFile opens a flat-text file that contains our data
+        // and loads it into a list for us to work with. The integer
+        // parameter represents the number of fields expected in the
+        // file.
+        List<String[]> vendors = loadFile(vendorsFile, 8);
+
+        // Now load the data into the database. The vendor's name is the
+        // key, and the data is a Vendor class object.
+
+        // Need a serial binding for the data
+        EntryBinding dataBinding =
+            new SerialBinding(myDbEnv.getClassCatalog(), Vendor.class);
+
+        for (int i = 0; i < vendors.size(); i++) {
+            String[] sArray = vendors.get(i);
+            Vendor theVendor = new Vendor();
+            theVendor.setVendorName(sArray[0]);
+            theVendor.setAddress(sArray[1]);
+            theVendor.setCity(sArray[2]);
+            theVendor.setState(sArray[3]);
+            theVendor.setZipcode(sArray[4]);
+            theVendor.setBusinessPhoneNumber(sArray[5]);
+            theVendor.setRepName(sArray[6]);
+            theVendor.setRepPhoneNumber(sArray[7]);
+
+            // The key is the vendor's name.
+            // ASSUMES THE VENDOR'S NAME IS UNIQUE!
+            String vendorName = theVendor.getVendorName();
+            try {
+                theKey = new DatabaseEntry(vendorName.getBytes("UTF-8"));
+            } catch (IOException willNeverOccur) {}
+
+            // Convert the Vendor object to a DatabaseEntry object
+            // using our SerialBinding
+            dataBinding.objectToEntry(theVendor, theData);
+
+            // Put it in the database. These puts are transactionally protected
+            // (we're using autocommit).
+            myDbEnv.getVendorDB().put(null, theKey, theData);
+        }
+    }
+
+    private void loadInventoryDb()
+        throws DatabaseException {
+
+        // loadFile opens a flat-text file that contains our data
+        // and loads it into a list for us to work with. The integer
+        // parameter represents the number of fields expected in the
+        // file.
+        List<String[]> inventoryArray = loadFile(inventoryFile, 6);
+
+        // Now load the data into the database. The item's sku is the
+        // key, and the data is an Inventory class object.
+
+        // Need a tuple binding for the Inventory class.
+        TupleBinding inventoryBinding = new InventoryBinding();
+
+        // Start a transaction. All inventory items get loaded using a
+        // single transaction.
+        Transaction txn = myDbEnv.getEnv().beginTransaction(null, null);
+
+        for (int i = 0; i < inventoryArray.size(); i++) {
+            String[] sArray = inventoryArray.get(i);
+            String sku = sArray[1];
+            try {
+                theKey = new DatabaseEntry(sku.getBytes("UTF-8"));
+            } catch (IOException willNeverOccur) {}
+
+            Inventory theInventory = new Inventory();
+            theInventory.setItemName(sArray[0]);
+            theInventory.setSku(sArray[1]);
+            theInventory.setVendorPrice((new Float(sArray[2])).floatValue());
+            theInventory.setVendorInventory((new Integer(sArray[3])).intValue());
+            theInventory.setCategory(sArray[4]);
+            theInventory.setVendor(sArray[5]);
+
+            // Place the Vendor object on the DatabaseEntry object using our
+            // the tuple binding we implemented in InventoryBinding.java
+            inventoryBinding.objectToEntry(theInventory, theData);
+
+            // Put it in the database. Note that this causes our secondary database
+            // to be automatically updated for us.
+            try {
+                myDbEnv.getInventoryDB().put(txn, theKey, theData);
+            } catch (DatabaseException dbe) {
+                try {
+                System.out.println("Error putting entry " +
+                                sku.getBytes("UTF-8"));
+                } catch (IOException willNeverOccur) {}
+                txn.abort();
+                throw dbe;
+            }
+        }
+        // Commit the transaction. The data is now safely written to the
+        // inventory database.
+        txn.commit();
+    }
+
+
+    private static void parseArgs(String args[]) {
+        for(int i = 0; i < args.length; ++i) {
+            if (args[i].startsWith("-")) {
+                switch(args[i].charAt(1)) {
+                  case 'h':
+                    myDbEnvPath = new File(args[++i]);
+                    break;
+                  case 'i':
+                    inventoryFile = new File(args[++i]);
+                    break;
+                  case 'v':
+                    vendorsFile = new File(args[++i]);
+                    break;
+                  default:
+                    usage();
+                }
+            }
+        }
+    }
+
+    private List<String[]> loadFile(File theFile, int numFields) {
+        List<String[]> records = new ArrayList<String[]>();
+        try {
+            String theLine = null;
+            FileInputStream fis = new FileInputStream(theFile);
+            BufferedReader br = new BufferedReader(new InputStreamReader(fis));
+            while((theLine=br.readLine()) != null) {
+                String[] theLineArray = theLine.split("#");
+                if (theLineArray.length != numFields) {
+                    System.out.println("Malformed line found in " + theFile.getPath());
+                    System.out.println("Line was: '" + theLine);
+                    System.out.println("length found was: " + theLineArray.length);
+                    System.exit(-1);
+                }
+                records.add(theLineArray);
+            }
+            // Close the input stream handle
+            fis.close();
+        } catch (FileNotFoundException e) {
+            System.err.println(theFile.getPath() + " does not exist.");
+            e.printStackTrace();
+            usage();
+        } catch (IOException e)  {
+            System.err.println("IO Exception: " + e.toString());
+            e.printStackTrace();
+            System.exit(-1);
+        }
+        return records;
+    }
+
+    protected ExampleDatabasePut() {}
+}
diff --git a/examples/je/gettingStarted/ExampleInventoryRead.java b/examples/je/gettingStarted/ExampleInventoryRead.java
new file mode 100644
index 0000000000000000000000000000000000000000..2f9afab73b0aa9584a230f762955fb133dc26db0
--- /dev/null
+++ b/examples/je/gettingStarted/ExampleInventoryRead.java
@@ -0,0 +1,202 @@
+// file ExampleInventoryRead
+// $Id: ExampleInventoryRead.java,v 1.9 2007/11/12 18:29:42 cwl Exp $
+
+package je.gettingStarted;
+
+import java.io.File;
+import java.io.IOException;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.SecondaryCursor;
+
+public class ExampleInventoryRead {
+
+    private static File myDbEnvPath =
+        new File("/tmp/JEDB");
+
+    // Encapsulates the database environment and databases.
+    private static MyDbEnv myDbEnv = new MyDbEnv();
+
+    private static TupleBinding inventoryBinding;
+    private static EntryBinding vendorBinding;
+
+    // The item to locate if the -s switch is used
+    private static String locateItem;
+
+    private static void usage() {
+        System.out.println("ExampleInventoryRead [-h <env directory>]" +
+                           "[-s <item to locate>]");
+        System.exit(-1);
+    }
+
+    public static void main(String args[]) {
+        ExampleInventoryRead eir = new ExampleInventoryRead();
+        try {
+            eir.run(args);
+        } catch (DatabaseException dbe) {
+            System.err.println("ExampleInventoryRead: " + dbe.toString());
+            dbe.printStackTrace();
+        } finally {
+            myDbEnv.close();
+        }
+        System.out.println("All done.");
+    }
+
+    private void run(String args[])
+        throws DatabaseException {
+        // Parse the arguments list
+        parseArgs(args);
+
+        myDbEnv.setup(myDbEnvPath, // path to the environment home
+                      true);      // is this environment read-only?
+
+        // Setup our bindings.
+        inventoryBinding = new InventoryBinding();
+        vendorBinding =
+             new SerialBinding(myDbEnv.getClassCatalog(),
+                               Vendor.class);
+
+        if (locateItem != null) {
+            showItem();
+        } else {
+            showAllInventory();
+        }
+    }
+
+    private void showItem() throws DatabaseException {
+
+        SecondaryCursor secCursor = null;
+        try {
+            // searchKey is the key that we want to find in the
+            // secondary db.
+            DatabaseEntry searchKey =
+                new DatabaseEntry(locateItem.getBytes("UTF-8"));
+
+            // foundKey and foundData are populated from the primary
+            // entry that is associated with the secondary db key.
+            DatabaseEntry foundKey = new DatabaseEntry();
+            DatabaseEntry foundData = new DatabaseEntry();
+
+            // open a secondary cursor
+            secCursor =
+                myDbEnv.getNameIndexDB().openSecondaryCursor(null, null);
+
+            // Search for the secondary database entry.
+            OperationStatus retVal =
+                secCursor.getSearchKey(searchKey, foundKey,
+                    foundData, LockMode.DEFAULT);
+
+            // Display the entry, if one is found. Repeat until no more
+            // secondary duplicate entries are found
+            while(retVal == OperationStatus.SUCCESS) {
+                Inventory theInventory =
+                    (Inventory)inventoryBinding.entryToObject(foundData);
+                displayInventoryRecord(foundKey, theInventory);
+                retVal = secCursor.getNextDup(searchKey, foundKey,
+                    foundData, LockMode.DEFAULT);
+            }
+        } catch (Exception e) {
+            System.err.println("Error on inventory secondary cursor:");
+            System.err.println(e.toString());
+            e.printStackTrace();
+        } finally {
+            if (secCursor != null) {
+                secCursor.close();
+            }
+        }
+    }
+
+    private void showAllInventory()
+        throws DatabaseException {
+        // Get a cursor
+        Cursor cursor = myDbEnv.getInventoryDB().openCursor(null, null);
+
+        // DatabaseEntry objects used for reading records
+        DatabaseEntry foundKey = new DatabaseEntry();
+        DatabaseEntry foundData = new DatabaseEntry();
+
+        try { // always want to make sure the cursor gets closed
+            while (cursor.getNext(foundKey, foundData,
+                        LockMode.DEFAULT) == OperationStatus.SUCCESS) {
+                Inventory theInventory =
+                    (Inventory)inventoryBinding.entryToObject(foundData);
+                displayInventoryRecord(foundKey, theInventory);
+            }
+        } catch (Exception e) {
+            System.err.println("Error on inventory cursor:");
+            System.err.println(e.toString());
+            e.printStackTrace();
+        } finally {
+            cursor.close();
+        }
+    }
+
+    private void displayInventoryRecord(DatabaseEntry theKey,
+                                        Inventory theInventory)
+        throws DatabaseException {
+
+
+        DatabaseEntry searchKey = null;
+        try {
+            String theSKU = new String(theKey.getData(), "UTF-8");
+            System.out.println(theSKU + ":");
+            System.out.println("\t " + theInventory.getItemName());
+            System.out.println("\t " + theInventory.getCategory());
+            System.out.println("\t " + theInventory.getVendor());
+            System.out.println("\t\tNumber in stock: " +
+                theInventory.getVendorInventory());
+            System.out.println("\t\tPrice per unit:  " +
+                theInventory.getVendorPrice());
+            System.out.println("\t\tContact: ");
+
+            searchKey =
+                new DatabaseEntry(theInventory.getVendor().getBytes("UTF-8"));
+        } catch (IOException willNeverOccur) {}
+        DatabaseEntry foundVendor = new DatabaseEntry();
+
+        if (myDbEnv.getVendorDB().get(null, searchKey, foundVendor,
+                LockMode.DEFAULT) != OperationStatus.SUCCESS) {
+            System.out.println("Could not find vendor: " +
+                theInventory.getVendor() + ".");
+            System.exit(-1);
+        } else {
+            Vendor theVendor =
+                (Vendor)vendorBinding.entryToObject(foundVendor);
+            System.out.println("\t\t " + theVendor.getAddress());
+            System.out.println("\t\t " + theVendor.getCity() + ", " +
+                theVendor.getState() + " " + theVendor.getZipcode());
+            System.out.println("\t\t Business Phone: " +
+                theVendor.getBusinessPhoneNumber());
+            System.out.println("\t\t Sales Rep: " +
+                                theVendor.getRepName());
+            System.out.println("\t\t            " +
+                theVendor.getRepPhoneNumber());
+       }
+    }
+
+    protected ExampleInventoryRead() {}
+
+    private static void parseArgs(String args[]) {
+        for(int i = 0; i < args.length; ++i) {
+            if (args[i].startsWith("-")) {
+                switch(args[i].charAt(1)) {
+                    case 'h':
+                        myDbEnvPath = new File(args[++i]);
+                        break;
+                    case 's':
+                        locateItem = new String(args[++i]);
+                        break;
+                    default:
+                        usage();
+                }
+            }
+        }
+    }
+}
diff --git a/examples/je/gettingStarted/Inventory.java b/examples/je/gettingStarted/Inventory.java
new file mode 100644
index 0000000000000000000000000000000000000000..61d16534388bb91a569a31e3bf87d19841bbab6c
--- /dev/null
+++ b/examples/je/gettingStarted/Inventory.java
@@ -0,0 +1,64 @@
+// file Inventory.java
+// $Id: Inventory.java,v 1.3 2005/06/09 17:20:52 mark Exp $
+
+package je.gettingStarted;
+
+
+public class Inventory {
+
+    private String sku;
+    private String itemName;
+    private String category;
+    private String vendor;
+    private int vendorInventory;
+    private float vendorPrice;
+
+    public void setSku(String data) {
+        sku = data;
+    }
+
+    public void setItemName(String data) {
+        itemName = data;
+    }
+
+    public void setCategory(String data) {
+        category = data;
+    }
+
+    public void setVendorInventory(int data) {
+        vendorInventory = data;
+    }
+
+    public void setVendor(String data) {
+        vendor = data;
+    }
+
+    public void setVendorPrice(float data) {
+        vendorPrice = data;
+    }
+
+    public String getSku() {
+        return sku;
+    }
+
+    public String getItemName() {
+        return itemName;
+    }
+
+    public String getCategory() {
+        return category;
+    }
+
+    public int getVendorInventory() {
+        return vendorInventory;
+    }
+
+    public String getVendor() {
+        return vendor;
+    }
+
+    public float getVendorPrice() {
+        return vendorPrice;
+    }
+}
+
diff --git a/examples/je/gettingStarted/InventoryBinding.java b/examples/je/gettingStarted/InventoryBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..6ba400dc58af1d7ef1e0772c662859dcc32f71c4
--- /dev/null
+++ b/examples/je/gettingStarted/InventoryBinding.java
@@ -0,0 +1,47 @@
+// file InventoryBinding.java
+// $Id: InventoryBinding.java,v 1.4 2005/06/09 17:20:54 mark Exp $
+
+package je.gettingStarted;
+
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+
+public class InventoryBinding extends TupleBinding {
+
+    // Implement this abstract method. Used to convert
+    // a DatabaseEntry to an Inventory object.
+    public Object entryToObject(TupleInput ti) {
+
+        String sku = ti.readString();
+        String itemName = ti.readString();
+        String category = ti.readString();
+        String vendor = ti.readString();
+        int vendorInventory = ti.readInt();
+        float vendorPrice = ti.readFloat();
+
+        Inventory inventory = new Inventory();
+        inventory.setSku(sku);
+        inventory.setItemName(itemName);
+        inventory.setCategory(category);
+        inventory.setVendor(vendor);
+        inventory.setVendorInventory(vendorInventory);
+        inventory.setVendorPrice(vendorPrice);
+
+        return inventory;
+    }
+
+    // Implement this abstract method. Used to convert a
+    // Inventory object to a DatabaseEntry object.
+    public void objectToEntry(Object object, TupleOutput to) {
+
+        Inventory inventory = (Inventory)object;
+
+        to.writeString(inventory.getSku());
+        to.writeString(inventory.getItemName());
+        to.writeString(inventory.getCategory());
+        to.writeString(inventory.getVendor());
+        to.writeInt(inventory.getVendorInventory());
+        to.writeFloat(inventory.getVendorPrice());
+    }
+}
diff --git a/examples/je/gettingStarted/ItemNameKeyCreator.java b/examples/je/gettingStarted/ItemNameKeyCreator.java
new file mode 100644
index 0000000000000000000000000000000000000000..d638747d67c630f87ae6f757cc4a4599180203d3
--- /dev/null
+++ b/examples/je/gettingStarted/ItemNameKeyCreator.java
@@ -0,0 +1,42 @@
+// file ItemNameKeyCreator.java
+// $Id: ItemNameKeyCreator.java,v 1.5 2006/03/30 00:39:55 sarette Exp $
+
+package je.gettingStarted;
+
+import com.sleepycat.je.SecondaryKeyCreator;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.bind.tuple.TupleBinding;
+
+import java.io.IOException;
+
+public class ItemNameKeyCreator implements SecondaryKeyCreator {
+
+    private TupleBinding theBinding;
+
+    // Use the constructor to set the tuple binding
+    ItemNameKeyCreator(TupleBinding binding) {
+        theBinding = binding;
+    }
+
+    // Abstract method that we must implement
+    public boolean createSecondaryKey(SecondaryDatabase secDb,
+             DatabaseEntry keyEntry,    // From the primary
+             DatabaseEntry dataEntry,   // From the primary
+             DatabaseEntry resultEntry) // set the key data on this.
+         throws DatabaseException {
+
+        if (dataEntry != null) {
+            // Convert dataEntry to an Inventory object
+            Inventory inventoryItem =
+                  (Inventory)theBinding.entryToObject(dataEntry);
+            // Get the item name and use that as the key
+            String theItem = inventoryItem.getItemName();
+            try {
+                resultEntry.setData(theItem.getBytes("UTF-8"));
+            } catch (IOException willNeverOccur) {}
+        }
+        return true;
+    }
+}
diff --git a/examples/je/gettingStarted/MyDbEnv.java b/examples/je/gettingStarted/MyDbEnv.java
new file mode 100644
index 0000000000000000000000000000000000000000..014692f840dbfe6fa6f49a6d115524780c28f043
--- /dev/null
+++ b/examples/je/gettingStarted/MyDbEnv.java
@@ -0,0 +1,158 @@
+// file MyDbEnv.java
+// $Id: MyDbEnv.java,v 1.8 2007/11/12 18:29:42 cwl Exp $
+
+package je.gettingStarted;
+
+import java.io.File;
+
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.SecondaryConfig;
+import com.sleepycat.je.SecondaryDatabase;
+
+
+public class MyDbEnv {
+
+    private Environment myEnv;
+
+    // The databases that our application uses
+    private Database vendorDb;
+    private Database inventoryDb;
+    private Database classCatalogDb;
+    private SecondaryDatabase itemNameIndexDb;
+
+    // Needed for object serialization
+    private StoredClassCatalog classCatalog;
+
+    // Our constructor does nothing
+    public MyDbEnv() {}
+
+    // The setup() method opens all our databases and the environment
+    // for us.
+    public void setup(File envHome, boolean readOnly)
+        throws DatabaseException {
+
+        EnvironmentConfig myEnvConfig = new EnvironmentConfig();
+        DatabaseConfig myDbConfig = new DatabaseConfig();
+        SecondaryConfig mySecConfig = new SecondaryConfig();
+
+        // If the environment is read-only, then
+        // make the databases read-only too.
+        myEnvConfig.setReadOnly(readOnly);
+        myDbConfig.setReadOnly(readOnly);
+        mySecConfig.setReadOnly(readOnly);
+
+        // If the environment is opened for write, then we want to be
+        // able to create the environment and databases if
+        // they do not exist.
+        myEnvConfig.setAllowCreate(!readOnly);
+        myDbConfig.setAllowCreate(!readOnly);
+        mySecConfig.setAllowCreate(!readOnly);
+
+        // Allow transactions if we are writing to the database
+        myEnvConfig.setTransactional(!readOnly);
+        myDbConfig.setTransactional(!readOnly);
+        mySecConfig.setTransactional(!readOnly);
+
+        // Open the environment
+        myEnv = new Environment(envHome, myEnvConfig);
+
+        // Now open, or create and open, our databases
+        // Open the vendors and inventory databases
+        vendorDb = myEnv.openDatabase(null,
+                                      "VendorDB",
+                                       myDbConfig);
+
+        inventoryDb = myEnv.openDatabase(null,
+                                        "InventoryDB",
+                                         myDbConfig);
+
+        // Open the class catalog db. This is used to
+        // optimize class serialization.
+        classCatalogDb =
+            myEnv.openDatabase(null,
+                               "ClassCatalogDB",
+                               myDbConfig);
+
+        // Create our class catalog
+        classCatalog = new StoredClassCatalog(classCatalogDb);
+
+        // Need a tuple binding for the Inventory class.
+        // We use the InventoryBinding class
+        // that we implemented for this purpose.
+        TupleBinding inventoryBinding = new InventoryBinding();
+
+        // Open the secondary database. We use this to create a
+        // secondary index for the inventory database
+
+        // We want to maintain an index for the inventory entries based
+        // on the item name. So, instantiate the appropriate key creator
+        // and open a secondary database.
+        ItemNameKeyCreator keyCreator =
+            new ItemNameKeyCreator(inventoryBinding);
+
+
+        // Set up additional secondary properties
+        // Need to allow duplicates for our secondary database
+        mySecConfig.setSortedDuplicates(true);
+        mySecConfig.setAllowPopulate(true); // Allow autopopulate
+        mySecConfig.setKeyCreator(keyCreator);
+
+        // Now open it
+        itemNameIndexDb =
+            myEnv.openSecondaryDatabase(
+                    null,
+                    "itemNameIndex", // index name
+                    inventoryDb,     // the primary db that we're indexing
+                    mySecConfig);    // the secondary config
+    }
+
+   // getter methods
+
+    // Needed for things like beginning transactions
+    public Environment getEnv() {
+        return myEnv;
+    }
+
+    public Database getVendorDB() {
+        return vendorDb;
+    }
+
+    public Database getInventoryDB() {
+        return inventoryDb;
+    }
+
+    public SecondaryDatabase getNameIndexDB() {
+        return itemNameIndexDb;
+    }
+
+    public StoredClassCatalog getClassCatalog() {
+        return classCatalog;
+    }
+
+    //Close the environment
+    public void close() {
+        if (myEnv != null) {
+            try {
+                //Close the secondary before closing the primaries
+                itemNameIndexDb.close();
+                vendorDb.close();
+                inventoryDb.close();
+                classCatalogDb.close();
+
+                // Finally, close the environment.
+                myEnv.close();
+            } catch(DatabaseException dbe) {
+                System.err.println("Error closing MyDbEnv: " +
+                                    dbe.toString());
+               System.exit(-1);
+            }
+        }
+    }
+}
+
diff --git a/examples/je/gettingStarted/Vendor.java b/examples/je/gettingStarted/Vendor.java
new file mode 100644
index 0000000000000000000000000000000000000000..2ad0f9a9e40253c46b9eae34ca3f71b713135714
--- /dev/null
+++ b/examples/je/gettingStarted/Vendor.java
@@ -0,0 +1,83 @@
+// file Vendor.java
+// $Id: Vendor.java,v 1.3 2005/06/09 17:20:54 mark Exp $
+package je.gettingStarted;
+
+import java.io.Serializable;
+
+public class Vendor implements Serializable {
+
+    private String repName;
+    private String address;
+    private String city;
+    private String state;
+    private String zipcode;
+    private String bizPhoneNumber;
+    private String repPhoneNumber;
+    private String vendor;
+
+    public void setRepName(String data) {
+        repName = data;
+    }
+
+    public void setAddress(String data) {
+        address = data;
+    }
+
+    public void setCity(String data) {
+        city = data;
+    }
+
+    public void setState(String data) {
+        state = data;
+    }
+
+    public void setZipcode(String data) {
+        zipcode = data;
+    }
+
+    public void setBusinessPhoneNumber(String data) {
+        bizPhoneNumber = data;
+    }
+
+    public void setRepPhoneNumber(String data) {
+        repPhoneNumber = data;
+    }
+
+    public void setVendorName(String data) {
+        vendor = data;
+    }
+
+    public String getRepName() {
+        return repName;
+    }
+
+    public String getAddress() {
+        return address;
+    }
+
+    public String getCity() {
+        return city;
+    }
+
+    public String getState() {
+        return state;
+    }
+
+    public String getZipcode() {
+        return zipcode;
+    }
+
+    public String getBusinessPhoneNumber() {
+        return bizPhoneNumber;
+    }
+
+    public String getRepPhoneNumber() {
+        return repPhoneNumber;
+    }
+
+    public String getVendorName() {
+        return vendor;
+    }
+
+}
+
diff --git a/examples/je/gettingStarted/inventory.txt b/examples/je/gettingStarted/inventory.txt
new file mode 100644
index 0000000000000000000000000000000000000000..385c980051e8b6c0f0fac35b721b06281becafcd
--- /dev/null
+++ b/examples/je/gettingStarted/inventory.txt
@@ -0,0 +1,800 @@
+Oranges#OranfruiRu6Ghr#0.71#451#fruits#TriCounty Produce
+Oranges#OranfruiXRPFn1#0.73#263#fruits#Simply Fresh
+Oranges#OranfruiLEuzQj#0.69#261#fruits#Off the Vine
+Apples#ApplfruiZls4Du#1.20#472#fruits#TriCounty Produce
+Apples#Applfrui8fewZe#1.21#402#fruits#Simply Fresh
+Apples#ApplfruiXoT6xG#1.20#728#fruits#Off the Vine
+Bananas#BanafruipIlluX#0.50#207#fruits#TriCounty Produce
+Bananas#BanafruiEQhWuj#0.50#518#fruits#Simply Fresh
+Bananas#BanafruimpRgPO#0.50#741#fruits#Off the Vine
+Almonds#AlmofruiPPCLz8#0.55#600#fruits#TriCounty Produce
+Almonds#AlmofruidMyKmp#0.54#745#fruits#Simply Fresh
+Almonds#Almofrui7K0xzH#0.53#405#fruits#Off the Vine
+Allspice#AllsfruibJGK4R#0.94#669#fruits#TriCounty Produce
+Allspice#Allsfruilfvoeg#0.94#244#fruits#Simply Fresh
+Allspice#Allsfruio12BOS#0.95#739#fruits#Off the Vine
+Apricot#AprifruijphEpM#0.89#560#fruits#TriCounty Produce
+Apricot#AprifruiU1zIDn#0.91#980#fruits#Simply Fresh
+Apricot#AprifruichcwYS#0.95#668#fruits#Off the Vine
+Avocado#AvocfruiwYYomu#0.99#379#fruits#TriCounty Produce
+Avocado#AvocfruiT6IwWE#1.02#711#fruits#Simply Fresh
+Avocado#AvocfruisbK1h5#0.97#856#fruits#Off the Vine
+Bael Fruit#BaelfruilAU7Hj#0.41#833#fruits#TriCounty Produce
+Bael Fruit#BaelfruiX2KvqV#0.40#770#fruits#Simply Fresh
+Bael Fruit#Baelfruidjne4e#0.39#778#fruits#Off the Vine
+Betel Nut#BetefruiQYdHqQ#0.34#926#fruits#TriCounty Produce
+Betel Nut#Betefrui32BKAz#0.37#523#fruits#Simply Fresh
+Betel Nut#BetefruisaWzY4#0.34#510#fruits#Off the Vine
+Black Walnut#BlacfruiXxIuMU#0.57#923#fruits#TriCounty Produce
+Black Walnut#BlacfruiZXgY9t#0.59#312#fruits#Simply Fresh
+Black Walnut#BlacfruikWO0vz#0.60#877#fruits#Off the Vine
+Blueberry#BluefruiCbxb4t#1.02#276#fruits#TriCounty Produce
+Blueberry#BluefruiBuCfgO#1.03#522#fruits#Simply Fresh
+Blueberry#Bluefruixz8MkE#1.01#278#fruits#Off the Vine
+Boysenberry#BoysfruizxyMuz#1.05#239#fruits#TriCounty Produce
+Boysenberry#Boysfrui3hTRQu#1.09#628#fruits#Simply Fresh
+Boysenberry#BoysfruinpLvr3#1.02#349#fruits#Off the Vine
+Breadnut#Breafrui0kDPs6#0.31#558#fruits#TriCounty Produce
+Breadnut#Breafrui44s3og#0.32#879#fruits#Simply Fresh
+Breadnut#BreafruiwyLKhJ#0.30#407#fruits#Off the Vine
+Cactus#Cactfruiyo2ddH#0.56#601#fruits#TriCounty Produce
+Cactus#CactfruixTOLv5#0.54#477#fruits#Simply Fresh
+Cactus#Cactfrui4ioUav#0.55#896#fruits#Off the Vine
+California Wild Grape#CalifruiZsWAa6#0.78#693#fruits#TriCounty Produce
+California Wild Grape#Califruid84xyt#0.83#293#fruits#Simply Fresh
+California Wild Grape#CalifruiLSJFoJ#0.81#543#fruits#Off the Vine
+Cashew#CashfruihaOFVP#0.37#221#fruits#TriCounty Produce
+Cashew#Cashfruizzcw1E#0.38#825#fruits#Simply Fresh
+Cashew#CashfruiqtMe2Q#0.38#515#fruits#Off the Vine
+Chico Sapote#ChicfruiY534SX#0.47#216#fruits#TriCounty Produce
+Chico Sapote#ChicfruiSqL3Lc#0.45#476#fruits#Simply Fresh
+Chico Sapote#ChicfruiurzIp4#0.47#200#fruits#Off the Vine
+Chinese Jello#ChinfruiyRg75u#0.64#772#fruits#TriCounty Produce
+Chinese Jello#ChinfruiuIUj0X#0.65#624#fruits#Simply Fresh
+Chinese Jello#ChinfruiwXbRrL#0.67#719#fruits#Off the Vine
+Common Guava#Commfruib6znSI#0.80#483#fruits#TriCounty Produce
+Common Guava#Commfrui6eUivL#0.81#688#fruits#Simply Fresh
+Common Guava#CommfruibWKnz3#0.84#581#fruits#Off the Vine
+Crabapple#CrabfruioY2L63#0.94#582#fruits#TriCounty Produce
+Crabapple#Crabfruijxcxyt#0.94#278#fruits#Simply Fresh
+Crabapple#CrabfruibvWd8K#0.95#213#fruits#Off the Vine
+Cranberry#CranfruiJxmKr5#0.83#923#fruits#TriCounty Produce
+Cranberry#CranfruiPlklAF#0.84#434#fruits#Simply Fresh
+Cranberry#Cranfrui3G5XL9#0.84#880#fruits#Off the Vine
+Damson Plum#DamsfruibMRMwe#0.98#782#fruits#TriCounty Produce
+Damson Plum#DamsfruiV6wFLk#1.03#400#fruits#Simply Fresh
+Damson Plum#DamsfruiLhqFrQ#0.98#489#fruits#Off the Vine
+Date Palm#DatefruigS31GU#1.14#315#fruits#TriCounty Produce
+Date Palm#DatefruipKPaJK#1.09#588#fruits#Simply Fresh
+Date Palm#Datefrui5fTyNS#1.14#539#fruits#Off the Vine
+Dragon's Eye#DragfruirGJ3aI#0.28#315#fruits#TriCounty Produce
+Dragon's Eye#DragfruiBotxqt#0.27#705#fruits#Simply Fresh
+Dragon's Eye#DragfruiPsSnV9#0.29#482#fruits#Off the Vine
+East Indian Wine Palm#EastfruiNXFJuG#0.43#992#fruits#TriCounty Produce
+East Indian Wine Palm#Eastfruiq06fRr#0.40#990#fruits#Simply Fresh
+East Indian Wine Palm#Eastfrui4QUwl2#0.43#351#fruits#Off the Vine
+English Walnut#EnglfruiBMtHtW#1.04#787#fruits#TriCounty Produce
+English Walnut#EnglfruiHmVzxV#1.03#779#fruits#Simply Fresh
+English Walnut#Englfrui18Tc9n#1.06#339#fruits#Off the Vine
+False Mangosteen#FalsfruibkmYqH#0.66#971#fruits#TriCounty Produce
+False Mangosteen#FalsfruipBsbcX#0.68#250#fruits#Simply Fresh
+False Mangosteen#FalsfruiPrFfhe#0.70#386#fruits#Off the Vine
+Fried Egg Tree#FriefruiihHUdc#0.29#649#fruits#TriCounty Produce
+Fried Egg Tree#FriefruimdD1rf#0.28#527#fruits#Simply Fresh
+Fried Egg Tree#FriefruivyAzYq#0.29#332#fruits#Off the Vine
+Genipap#GenifruiDtKusQ#0.62#986#fruits#TriCounty Produce
+Genipap#GenifruiXq32eP#0.61#326#fruits#Simply Fresh
+Genipap#Genifruiphwwyq#0.61#794#fruits#Off the Vine
+Ginger#GingfruiQLbRZI#0.28#841#fruits#TriCounty Produce
+Ginger#GingfruiS8kK4p#0.29#432#fruits#Simply Fresh
+Ginger#GingfruioL3Y4S#0.27#928#fruits#Off the Vine
+Grapefruit#Grapfruih86Zxh#1.07#473#fruits#TriCounty Produce
+Grapefruit#GrapfruiwL1v0N#1.08#878#fruits#Simply Fresh
+Grapefruit#GrapfruihmJzWm#1.02#466#fruits#Off the Vine
+Hackberry#HackfruiQjomN7#0.22#938#fruits#TriCounty Produce
+Hackberry#HackfruiWS0eKp#0.20#780#fruits#Simply Fresh
+Hackberry#Hackfrui0MIv6J#0.21#345#fruits#Off the Vine
+Honey Locust#HonefruiebXGRc#1.08#298#fruits#TriCounty Produce
+Honey Locust#HonefruiPSqILB#1.00#427#fruits#Simply Fresh
+Honey Locust#Honefrui6UXtvW#1.03#422#fruits#Off the Vine
+Japanese Plum#JapafruihTmoYR#0.40#658#fruits#TriCounty Produce
+Japanese Plum#JapafruifGqz0l#0.40#700#fruits#Simply Fresh
+Japanese Plum#JapafruiufWkLx#0.39#790#fruits#Off the Vine
+Jojoba#JojofruisE0wTh#0.97#553#fruits#TriCounty Produce
+Jojoba#JojofruiwiYLp2#1.02#969#fruits#Simply Fresh
+Jojoba#JojofruigMD1ej#0.96#899#fruits#Off the Vine
+Jostaberry#JostfruiglsEGV#0.50#300#fruits#TriCounty Produce
+Jostaberry#JostfruiV3oo1h#0.52#423#fruits#Simply Fresh
+Jostaberry#JostfruiUBerur#0.53#562#fruits#Off the Vine
+Kangaroo Apple#KangfruiEQknz8#0.60#661#fruits#TriCounty Produce
+Kangaroo Apple#KangfruiNabdFq#0.60#377#fruits#Simply Fresh
+Kangaroo Apple#Kangfrui7hky1i#0.60#326#fruits#Off the Vine
+Ken's Red#Ken'fruinPUSIm#0.21#337#fruits#TriCounty Produce
+Ken's Red#Ken'fruiAoZlpl#0.21#902#fruits#Simply Fresh
+Ken's Red#Ken'frui5rmbd4#0.22#972#fruits#Off the Vine
+Ketembilla#Ketefrui3yAKxQ#0.31#303#fruits#TriCounty Produce
+Ketembilla#KetefruiROn6F5#0.34#283#fruits#Simply Fresh
+Ketembilla#Ketefrui16Rsts#0.33#887#fruits#Off the Vine
+King Orange#KingfruisOFzWk#0.74#429#fruits#TriCounty Produce
+King Orange#KingfruiBmzRJT#0.74#500#fruits#Simply Fresh
+King Orange#KingfruiGsrgRX#0.78#994#fruits#Off the Vine
+Kola Nut#KolafruiBbtAuw#0.58#991#fruits#TriCounty Produce
+Kola Nut#KolafruirbnLVS#0.62#733#fruits#Simply Fresh
+Kola Nut#Kolafrui1ItXJx#0.58#273#fruits#Off the Vine
+Kuko#Kukofrui6YH5Ds#0.41#647#fruits#TriCounty Produce
+Kuko#Kukofrui7WZaZK#0.39#241#fruits#Simply Fresh
+Kuko#Kukofruig9MQFT#0.40#204#fruits#Off the Vine
+Kumquat#KumqfruiT6WKQL#0.73#388#fruits#TriCounty Produce
+Kumquat#KumqfruidLiFLU#0.70#393#fruits#Simply Fresh
+Kumquat#KumqfruiL6zhQX#0.71#994#fruits#Off the Vine
+Kwai Muk#KwaifruiQK1zOE#1.10#249#fruits#TriCounty Produce
+Kwai Muk#KwaifruifbCRlT#1.14#657#fruits#Simply Fresh
+Kwai Muk#Kwaifruipe7T2m#1.09#617#fruits#Off the Vine
+Lanzone#LanzfruijsPf1v#0.34#835#fruits#TriCounty Produce
+Lanzone#LanzfruibU3QoL#0.34#404#fruits#Simply Fresh
+Lanzone#LanzfruiYgHwv6#0.34#237#fruits#Off the Vine
+Lemon#Lemofrui4Tgsg2#0.46#843#fruits#TriCounty Produce
+Lemon#LemofruivK6qvj#0.43#207#fruits#Simply Fresh
+Lemon#LemofruiXSXqJ0#0.44#910#fruits#Off the Vine
+Lemon Grass#LemofruiVFgVh5#0.40#575#fruits#TriCounty Produce
+Lemon Grass#LemofruiWIelvi#0.41#386#fruits#Simply Fresh
+Lemon Grass#LemofruiGVAow0#0.39#918#fruits#Off the Vine
+Lilly-pilly#LillfruiEQnW1m#1.21#974#fruits#TriCounty Produce
+Lilly-pilly#LillfruiMqVuR5#1.23#303#fruits#Simply Fresh
+Lilly-pilly#LillfruiVGH9p4#1.17#512#fruits#Off the Vine
+Ling Nut#LingfruiGtOf8X#0.85#540#fruits#TriCounty Produce
+Ling Nut#LingfruiuP0Jf9#0.83#200#fruits#Simply Fresh
+Ling Nut#LingfruiuO5qf5#0.81#319#fruits#Off the Vine
+Lipote#LipofruisxD2Qc#0.85#249#fruits#TriCounty Produce
+Lipote#LipofruiHNdIqL#0.85#579#fruits#Simply Fresh
+Lipote#LipofruiSQ2pKK#0.83#472#fruits#Off the Vine
+Litchee#Litcfrui1R6Ydz#0.99#806#fruits#TriCounty Produce
+Litchee#LitcfruiwtDM79#1.01#219#fruits#Simply Fresh
+Litchee#LitcfruilpPZbC#1.05#419#fruits#Off the Vine
+Longan#LongfruiEI0lWF#1.02#573#fruits#TriCounty Produce
+Longan#LongfruiPQxxSF#1.04#227#fruits#Simply Fresh
+Longan#LongfruisdI812#0.99#993#fruits#Off the Vine
+Love-in-a-mist#LovefruiKYPW70#0.69#388#fruits#TriCounty Produce
+Love-in-a-mist#LovefruiHrgjDa#0.67#478#fruits#Simply Fresh
+Love-in-a-mist#LovefruipSOWVz#0.71#748#fruits#Off the Vine
+Lychee#LychfruiicVLnY#0.38#276#fruits#TriCounty Produce
+Lychee#LychfruiGY6yJr#0.38#602#fruits#Simply Fresh
+Lychee#LychfruiTzDCq2#0.40#572#fruits#Off the Vine
+Mabolo#MabofruiSY8RQS#0.97#263#fruits#TriCounty Produce
+Mabolo#MabofruiOWWk0n#0.98#729#fruits#Simply Fresh
+Mabolo#MabofruixQLOTF#0.98#771#fruits#Off the Vine
+Macadamia Nut#MacafruiZppJPw#1.22#888#fruits#TriCounty Produce
+Macadamia Nut#MacafruiI7XFMV#1.24#484#fruits#Simply Fresh
+Macadamia Nut#Macafrui4x8bxV#1.20#536#fruits#Off the Vine
+Madagascar Plum#MadafruiVj5fDf#1.14#596#fruits#TriCounty Produce
+Madagascar Plum#MadafruivJhAFI#1.15#807#fruits#Simply Fresh
+Madagascar Plum#Madafrui7MTe1x#1.17#355#fruits#Off the Vine
+Magnolia Vine#MagnfruiigN4Y1#1.17#321#fruits#TriCounty Produce
+Magnolia Vine#MagnfruicKtiHd#1.15#353#fruits#Simply Fresh
+Magnolia Vine#MagnfruiLPDSCp#1.23#324#fruits#Off the Vine
+Mamey#Mamefrui5rjLF6#0.36#683#fruits#TriCounty Produce
+Mamey#MamefruiM6ndnR#0.38#404#fruits#Simply Fresh
+Mamey#Mamefruiq9KntD#0.36#527#fruits#Off the Vine
+Mandarin Orange#MandfruiRKpmKL#0.42#352#fruits#TriCounty Produce
+Mandarin Orange#Mandfrui1V0KLG#0.42#548#fruits#Simply Fresh
+Mandarin Orange#Mandfruig2o9Fg#0.41#686#fruits#Off the Vine
+Marany Nut#MarafruiqkrwoJ#1.14#273#fruits#TriCounty Produce
+Marany Nut#MarafruiCGKpke#1.12#482#fruits#Simply Fresh
+Marany Nut#MarafruiB1YE5x#1.09#412#fruits#Off the Vine
+Marula#MarufruiXF4biH#0.22#403#fruits#TriCounty Produce
+Marula#MarufruidZiVKZ#0.23#317#fruits#Simply Fresh
+Marula#MarufruiIS8BEp#0.21#454#fruits#Off the Vine
+Mayhaw#MayhfruiCSrm7k#0.24#220#fruits#TriCounty Produce
+Mayhaw#MayhfruiNRDzWs#0.25#710#fruits#Simply Fresh
+Mayhaw#MayhfruiIUCyEg#0.24#818#fruits#Off the Vine
+Meiwa Kumquat#MeiwfruiYhv3AY#0.21#997#fruits#TriCounty Produce
+Meiwa Kumquat#MeiwfruiyzQFNR#0.22#347#fruits#Simply Fresh
+Meiwa Kumquat#Meiwfruict4OUp#0.21#923#fruits#Off the Vine
+Mexican Barberry#Mexifrui2P2dXi#0.28#914#fruits#TriCounty Produce
+Mexican Barberry#MexifruiywUTMI#0.29#782#fruits#Simply Fresh
+Mexican Barberry#MexifruijPHu5X#0.29#367#fruits#Off the Vine
+Meyer Lemon#Meyefruin9901J#0.38#824#fruits#TriCounty Produce
+Meyer Lemon#MeyefruiNeQpjO#0.37#617#fruits#Simply Fresh
+Meyer Lemon#MeyefruiYEVznZ#0.37#741#fruits#Off the Vine
+Mississippi Honeyberry#Missfruipb5iW3#0.95#595#fruits#TriCounty Produce
+Mississippi Honeyberry#MissfruiINiDbB#0.96#551#fruits#Simply Fresh
+Mississippi Honeyberry#MissfruiNUQ82a#0.93#396#fruits#Off the Vine
+Monkey Pot#MonkfruiXlTW4j#0.90#896#fruits#TriCounty Produce
+Monkey Pot#Monkfrui1p7a4h#0.88#344#fruits#Simply Fresh
+Monkey Pot#Monkfrui4eKggb#0.92#917#fruits#Off the Vine
+Monos Plum#Monofrui0Mv9aV#1.11#842#fruits#TriCounty Produce
+Monos Plum#Monofrui6iTGQY#1.14#570#fruits#Simply Fresh
+Monos Plum#MonofruiNu2uGH#1.13#978#fruits#Off the Vine
+Moosewood#MoosfruiMXEGex#0.86#969#fruits#TriCounty Produce
+Moosewood#Moosfrui8805mB#0.86#963#fruits#Simply Fresh
+Moosewood#MoosfruiOsnDFL#0.88#594#fruits#Off the Vine
+Natal Orange#NatafruitB8Kh2#0.42#332#fruits#TriCounty Produce
+Natal Orange#NatafruiOhqRrd#0.42#982#fruits#Simply Fresh
+Natal Orange#NatafruiRObMf6#0.41#268#fruits#Off the Vine
+Nectarine#NectfruilNfeD8#0.36#601#fruits#TriCounty Produce
+Nectarine#NectfruiQfjt6b#0.35#818#fruits#Simply Fresh
+Nectarine#Nectfrui5U7U96#0.37#930#fruits#Off the Vine
+Neem Tree#NeemfruiCruEMF#0.24#222#fruits#TriCounty Produce
+Neem Tree#NeemfruiGv0pv5#0.24#645#fruits#Simply Fresh
+Neem Tree#NeemfruiUFPVfk#0.25#601#fruits#Off the Vine
+New Zealand Spinach#New fruihDIgec#0.87#428#fruits#TriCounty Produce
+New Zealand Spinach#New fruiaoR9TP#0.87#630#fruits#Simply Fresh
+New Zealand Spinach#New fruiy8LBul#0.94#570#fruits#Off the Vine
+Olosapo#OlosfruiGXvaMm#0.76#388#fruits#TriCounty Produce
+Olosapo#OlosfruiESlpB3#0.76#560#fruits#Simply Fresh
+Olosapo#OlosfruiFNEkER#0.76#962#fruits#Off the Vine
+Oregon Grape#OregfruiWxhzrf#1.14#892#fruits#TriCounty Produce
+Oregon Grape#OregfruiMgjHUn#1.20#959#fruits#Simply Fresh
+Oregon Grape#OregfruiC5UCxX#1.17#419#fruits#Off the Vine
+Otaheite Apple#OtahfruilT0iFj#0.21#579#fruits#TriCounty Produce
+Otaheite Apple#Otahfrui92PyMY#0.22#857#fruits#Simply Fresh
+Otaheite Apple#OtahfruiLGD1EH#0.20#807#fruits#Off the Vine
+Oyster Plant#OystfruimGxOsj#0.77#835#fruits#TriCounty Produce
+Oyster Plant#Oystfrui1kudBX#0.81#989#fruits#Simply Fresh
+Oyster Plant#OystfruiaX3uO2#0.80#505#fruits#Off the Vine
+Panama Berry#PanafruiZG0Vp4#1.19#288#fruits#TriCounty Produce
+Panama Berry#PanafruiobvXPE#1.21#541#fruits#Simply Fresh
+Panama Berry#PanafruipaW8F3#1.16#471#fruits#Off the Vine
+Peach Tomato#PeacfruiQpovYH#1.20#475#fruits#TriCounty Produce
+Peach Tomato#PeacfruixYXLTN#1.18#655#fruits#Simply Fresh
+Peach Tomato#PeacfruiILDYAp#1.23#876#fruits#Off the Vine
+Peanut#Peanfruiy8M7pt#0.69#275#fruits#TriCounty Produce
+Peanut#PeanfruiEimbED#0.65#307#fruits#Simply Fresh
+Peanut#Peanfruic452Vc#0.68#937#fruits#Off the Vine
+Peanut Butter Fruit#PeanfruixEDt9Y#0.27#628#fruits#TriCounty Produce
+Peanut Butter Fruit#PeanfruiST0T0R#0.27#910#fruits#Simply Fresh
+Peanut Butter Fruit#Peanfrui7jeRN2#0.27#938#fruits#Off the Vine
+Pear#PearfruiB5YmSJ#0.20#945#fruits#TriCounty Produce
+Pear#PearfruiA93XZx#0.21#333#fruits#Simply Fresh
+Pear#PearfruioNKiIf#0.21#715#fruits#Off the Vine
+Pecan#PecafruiiTIv1Z#0.26#471#fruits#TriCounty Produce
+Pecan#PecafruiMGkqla#0.26#889#fruits#Simply Fresh
+Pecan#Pecafrui1szYz2#0.25#929#fruits#Off the Vine
+Purple Passion Fruit#Purpfrui4mMGkD#1.04#914#fruits#TriCounty Produce
+Purple Passion Fruit#Purpfrui5XOW3K#1.06#423#fruits#Simply Fresh
+Purple Passion Fruit#PurpfruifDTAgW#1.05#549#fruits#Off the Vine
+Red Mulberry#Red fruiVLOXIW#1.24#270#fruits#TriCounty Produce
+Red Mulberry#Red fruiXNXt4a#1.21#836#fruits#Simply Fresh
+Red Mulberry#Red fruiUseWLG#1.21#795#fruits#Off the Vine
+Red Princess#Red fruigJLR4V#0.23#829#fruits#TriCounty Produce
+Red Princess#Red fruinVKps5#0.23#558#fruits#Simply Fresh
+Red Princess#Red frui0jl9mg#0.24#252#fruits#Off the Vine
+Striped Screw Pine#StrifruiUKzjoU#0.60#226#fruits#TriCounty Produce
+Striped Screw Pine#StrifruivWLDzH#0.64#685#fruits#Simply Fresh
+Striped Screw Pine#StrifruiiF7CGH#0.60#983#fruits#Off the Vine
+Tapioca#Tapifruib4LCqt#0.40#955#fruits#TriCounty Produce
+Tapioca#TapifruiwgQLj9#0.41#889#fruits#Simply Fresh
+Tapioca#TapifruiZ6Igg3#0.41#655#fruits#Off the Vine
+Tavola#Tavofrui0k9XOt#1.16#938#fruits#TriCounty Produce
+Tavola#Tavofrui8DuRxL#1.08#979#fruits#Simply Fresh
+Tavola#TavofruiNZEuJZ#1.16#215#fruits#Off the Vine
+Tea#TeafruiL0357s#1.11#516#fruits#TriCounty Produce
+Tea#TeafruiD5soTf#1.13#970#fruits#Simply Fresh
+Tea#TeafruiOWq4oO#1.19#357#fruits#Off the Vine
+Ugli Fruit#UglifruipKNCpf#0.24#501#fruits#TriCounty Produce
+Ugli Fruit#UglifruifbDrzc#0.24#642#fruits#Simply Fresh
+Ugli Fruit#Uglifruiwx8or4#0.24#280#fruits#Off the Vine
+Vegetable Brain#VegefruieXLBoc#0.73#355#fruits#TriCounty Produce
+Vegetable Brain#Vegefruik5FSdl#0.71#498#fruits#Simply Fresh
+Vegetable Brain#VegefruiKBfzN0#0.72#453#fruits#Off the Vine
+White Walnut#Whitfruit3oVHL#0.30#501#fruits#TriCounty Produce
+White Walnut#WhitfruiHygydw#0.30#913#fruits#Simply Fresh
+White Walnut#WhitfruieNtplo#0.30#401#fruits#Off the Vine
+Wood Apple#WoodfruijVPRqA#0.68#501#fruits#TriCounty Produce
+Wood Apple#Woodfrui4Zk69T#0.68#616#fruits#Simply Fresh
+Wood Apple#WoodfruiuSLHZK#0.70#474#fruits#Off the Vine
+Yellow Horn#Yellfrui5igjjf#1.18#729#fruits#TriCounty Produce
+Yellow Horn#Yellfrui0DiPqa#1.13#517#fruits#Simply Fresh
+Yellow Horn#Yellfrui0ljvqC#1.14#853#fruits#Off the Vine
+Yellow Sapote#YellfruilGmCfq#0.93#204#fruits#TriCounty Produce
+Yellow Sapote#Yellfrui4J2mke#0.88#269#fruits#Simply Fresh
+Yellow Sapote#Yellfrui6PuXaL#0.86#575#fruits#Off the Vine
+Ylang-ylang#Ylanfrui3rmByO#0.76#429#fruits#TriCounty Produce
+Ylang-ylang#YlanfruiA80Nkq#0.76#886#fruits#Simply Fresh
+Ylang-ylang#YlanfruinUEm5d#0.72#747#fruits#Off the Vine
+Zapote Blanco#ZapofruisZ5sMA#0.67#428#fruits#TriCounty Produce
+Zapote Blanco#ZapofruilKxl7N#0.65#924#fruits#Simply Fresh
+Zapote Blanco#ZapofruiAe6Eu1#0.68#255#fruits#Off the Vine
+Zulu Nut#Zulufrui469K4k#0.71#445#fruits#TriCounty Produce
+Zulu Nut#ZulufruiWbz6vU#0.71#653#fruits#Simply Fresh
+Zulu Nut#Zulufrui0LJnWK#0.71#858#fruits#Off the Vine
+Artichoke#ArtivegeIuqmS4#0.71#282#vegetables#The Pantry
+Artichoke#Artivegebljjnf#0.69#66#vegetables#TriCounty Produce
+Artichoke#ArtivegeTa2lcF#0.70#618#vegetables#Off the Vine
+Asparagus#AspavegezC0cDl#0.23#70#vegetables#The Pantry
+Asparagus#AspavegeM1q5Kt#0.24#546#vegetables#TriCounty Produce
+Asparagus#AspavegeXWbCb8#0.24#117#vegetables#Off the Vine
+Basil#Basivegev08fzf#0.31#213#vegetables#The Pantry
+Basil#BasivegeF3Uha7#0.29#651#vegetables#TriCounty Produce
+Basil#BasivegeqR8SHC#0.31#606#vegetables#Off the Vine
+Bean#BeanvegegCFUOp#0.27#794#vegetables#The Pantry
+Bean#BeanvegeqMSEVq#0.27#468#vegetables#TriCounty Produce
+Bean#Beanvege4IGUwX#0.27#463#vegetables#Off the Vine
+Beet#BeetvegedEv4Ic#0.35#120#vegetables#The Pantry
+Beet#Beetvegegi1bz1#0.35#540#vegetables#TriCounty Produce
+Beet#BeetvegemztZcN#0.36#386#vegetables#Off the Vine
+Blackeyed Pea#Blacvege3TPldr#0.86#133#vegetables#The Pantry
+Blackeyed Pea#Blacvege3Zqnep#0.88#67#vegetables#TriCounty Produce
+Blackeyed Pea#Blacvege3khffZ#0.90#790#vegetables#Off the Vine
+Cabbage#CabbvegeY0c4Fw#0.82#726#vegetables#The Pantry
+Cabbage#CabbvegeoaK7Co#0.85#439#vegetables#TriCounty Produce
+Cabbage#CabbvegeVvO646#0.82#490#vegetables#Off the Vine
+Carrot#CarrvegeEbI0sw#0.45#717#vegetables#The Pantry
+Carrot#CarrvegeEZndWL#0.49#284#vegetables#TriCounty Produce
+Carrot#CarrvegewUkHao#0.47#122#vegetables#Off the Vine
+Cauliflower#Caulvege1CPeNG#0.68#756#vegetables#The Pantry
+Cauliflower#CaulvegedrPqib#0.66#269#vegetables#TriCounty Produce
+Cauliflower#CaulvegeT6cka8#0.65#728#vegetables#Off the Vine
+Chayote#ChayvegePRReGE#0.14#233#vegetables#The Pantry
+Chayote#Chayvegep058f7#0.14#88#vegetables#TriCounty Produce
+Chayote#ChayvegeoxO40S#0.14#611#vegetables#Off the Vine
+Corn#CornvegeukXkv6#0.72#632#vegetables#The Pantry
+Corn#CornvegePnPREC#0.72#609#vegetables#TriCounty Produce
+Corn#CornvegeO0GwoQ#0.70#664#vegetables#Off the Vine
+Cucumber#CucuvegeEqQeA7#0.94#499#vegetables#The Pantry
+Cucumber#CucuvegewmKbJ1#0.94#738#vegetables#TriCounty Produce
+Cucumber#CucuvegeUW6JaA#0.94#565#vegetables#Off the Vine
+Cantaloupe#CantvegeIHs9vJ#0.66#411#vegetables#The Pantry
+Cantaloupe#CantvegeEaDdST#0.66#638#vegetables#TriCounty Produce
+Cantaloupe#CantvegewWQEa0#0.64#682#vegetables#Off the Vine
+Carraway#CarrvegewuL4Ma#0.32#740#vegetables#The Pantry
+Carraway#CarrvegeyiWfBj#0.32#265#vegetables#TriCounty Produce
+Carraway#CarrvegeMjb1i9#0.31#732#vegetables#Off the Vine
+Celeriac#CelevegeoTBicd#0.74#350#vegetables#The Pantry
+Celeriac#CelevegeCNABoZ#0.70#261#vegetables#TriCounty Produce
+Celeriac#Celevege9LUeww#0.70#298#vegetables#Off the Vine
+Celery#Celevegej40ZCc#0.59#740#vegetables#The Pantry
+Celery#CelevegerYlVRy#0.58#734#vegetables#TriCounty Produce
+Celery#Celevege67eimC#0.58#619#vegetables#Off the Vine
+Chervil#ChervegeuH4Dge#0.09#502#vegetables#The Pantry
+Chervil#Chervegea1OyKO#0.09#299#vegetables#TriCounty Produce
+Chervil#Chervegeq56gMO#0.09#474#vegetables#Off the Vine
+Chicory#Chicvege79qoQ8#0.09#709#vegetables#The Pantry
+Chicory#ChicvegeTSVBQq#0.10#477#vegetables#TriCounty Produce
+Chicory#Chicvege6qpcyi#0.10#282#vegetables#Off the Vine
+Chinese Cabbage#ChinvegeFNsSRn#0.78#408#vegetables#The Pantry
+Chinese Cabbage#Chinvege2ldNr3#0.80#799#vegetables#TriCounty Produce
+Chinese Cabbage#ChinvegeK3R2Td#0.80#180#vegetables#Off the Vine
+Chinese Beans#ChinvegebxbyPy#0.45#654#vegetables#The Pantry
+Chinese Beans#ChinvegewKGwgx#0.45#206#vegetables#TriCounty Produce
+Chinese Beans#ChinvegevVjzC0#0.47#643#vegetables#Off the Vine
+Chines Kale#ChinvegeCfdkss#0.70#239#vegetables#The Pantry
+Chines Kale#Chinvege6V6Dne#0.65#548#vegetables#TriCounty Produce
+Chines Kale#ChinvegeB7vE3x#0.66#380#vegetables#Off the Vine
+Chinese Radish#ChinvegeXcM4eq#0.22#190#vegetables#The Pantry
+Chinese Radish#ChinvegeTdUBqN#0.22#257#vegetables#TriCounty Produce
+Chinese Radish#ChinvegeMXMms8#0.22#402#vegetables#Off the Vine
+Chinese Mustard#ChinvegeRDdpdl#0.33#149#vegetables#The Pantry
+Chinese Mustard#ChinvegeABDhNd#0.32#320#vegetables#TriCounty Produce
+Chinese Mustard#Chinvege8NPwa2#0.34#389#vegetables#Off the Vine
+Cilantro#CilavegeQXBEsW#0.60#674#vegetables#The Pantry
+Cilantro#CilavegeRgjkUG#0.60#355#vegetables#TriCounty Produce
+Cilantro#CilavegelT2msu#0.59#464#vegetables#Off the Vine
+Collard#CollvegesTGGNw#0.32#745#vegetables#The Pantry
+Collard#CollvegeAwdor5#0.32#124#vegetables#TriCounty Produce
+Collard#CollvegeQe900L#0.30#796#vegetables#Off the Vine
+Coriander#CorivegeXxp4xY#0.26#560#vegetables#The Pantry
+Coriander#Corivege9xBAT0#0.27#321#vegetables#TriCounty Produce
+Coriander#CorivegeCfNjBx#0.27#709#vegetables#Off the Vine
+Dandelion#DandvegeJNcnbr#0.11#285#vegetables#The Pantry
+Dandelion#DandvegeGwBkHZ#0.11#733#vegetables#TriCounty Produce
+Dandelion#DandvegeZfwVqn#0.11#57#vegetables#Off the Vine
+Daikon Radish#DaikvegeHHsd7M#0.61#743#vegetables#The Pantry
+Daikon Radish#DaikvegeIu17yC#0.62#459#vegetables#TriCounty Produce
+Daikon Radish#DaikvegePzFjqf#0.63#296#vegetables#Off the Vine
+Eggplant#EggpvegeKJtydN#0.55#200#vegetables#The Pantry
+Eggplant#EggpvegeQMKrNs#0.53#208#vegetables#TriCounty Produce
+Eggplant#EggpvegeN0WnSo#0.51#761#vegetables#Off the Vine
+English Pea#Englvegea1ytIn#0.40#457#vegetables#The Pantry
+English Pea#EnglvegerU9Vty#0.37#263#vegetables#TriCounty Produce
+English Pea#EnglvegeCmkd3y#0.39#430#vegetables#Off the Vine
+Fennel#Fennvegebz2UM7#0.76#545#vegetables#The Pantry
+Fennel#FennvegeQzjtZ3#0.78#795#vegetables#TriCounty Produce
+Fennel#FennvegeXSrW61#0.75#79#vegetables#Off the Vine
+Garlic#GarlvegesR2yel#0.76#478#vegetables#The Pantry
+Garlic#GarlvegeEQvt8W#0.77#349#vegetables#TriCounty Produce
+Garlic#GarlvegedljBdK#0.80#708#vegetables#Off the Vine
+Ginger#GingvegeMNiTc2#0.88#563#vegetables#The Pantry
+Ginger#Gingvegeq366Sn#0.89#738#vegetables#TriCounty Produce
+Ginger#GingvegeznyyVj#0.89#598#vegetables#Off the Vine
+Horseradish#HorsvegemSwISt#0.12#622#vegetables#The Pantry
+Horseradish#HorsvegetCOS0x#0.11#279#vegetables#TriCounty Produce
+Horseradish#Horsvegew6XXaS#0.12#478#vegetables#Off the Vine
+Japanese Eggplant#JapavegeTdKDCL#0.57#539#vegetables#The Pantry
+Japanese Eggplant#JapavegevsJfGa#0.58#782#vegetables#TriCounty Produce
+Japanese Eggplant#JapavegeCIrIxd#0.57#777#vegetables#Off the Vine
+Jerusalem Artichoke#Jeruvege928cr0#0.13#231#vegetables#The Pantry
+Jerusalem Artichoke#JeruvegeC2v086#0.14#123#vegetables#TriCounty Produce
+Jerusalem Artichoke#JeruvegeehCYzi#0.14#196#vegetables#Off the Vine
+Jicama#JicavegeRWYj9n#0.75#79#vegetables#The Pantry
+Jicama#JicavegeGk5LKH#0.71#292#vegetables#TriCounty Produce
+Jicama#JicavegeUjpaX1#0.70#308#vegetables#Off the Vine
+Kale#Kalevegext6RNT#0.55#765#vegetables#The Pantry
+Kale#KalevegeFsp17B#0.53#107#vegetables#TriCounty Produce
+Kale#KalevegeAffBTS#0.57#573#vegetables#Off the Vine
+Kiwifruit#KiwivegeloZBKJ#0.60#769#vegetables#The Pantry
+Kiwifruit#KiwivegenCQAHw#0.59#307#vegetables#TriCounty Produce
+Kiwifruit#Kiwivege0Gi3P2#0.59#235#vegetables#Off the Vine
+Kohlrabi#KohlvegeJFKZDl#0.26#406#vegetables#The Pantry
+Kohlrabi#Kohlvege32UTAj#0.28#613#vegetables#TriCounty Produce
+Kohlrabi#KohlvegejNQC1M#0.28#326#vegetables#Off the Vine
+Leek#Leekvege5iaFtg#0.70#580#vegetables#The Pantry
+Leek#Leekvegei9Wxbz#0.68#188#vegetables#TriCounty Produce
+Leek#LeekvegewY4mAc#0.70#473#vegetables#Off the Vine
+Lettuce#LettvegesK9wDR#0.55#716#vegetables#The Pantry
+Lettuce#LettvegeWzMyCM#0.57#83#vegetables#TriCounty Produce
+Lettuce#LettvegeHgfGG8#0.56#268#vegetables#Off the Vine
+Melons#Melovege6t93WF#0.11#252#vegetables#The Pantry
+Melons#Melovegeq9kz7T#0.12#558#vegetables#TriCounty Produce
+Melons#Melovege9kLTXN#0.12#382#vegetables#Off the Vine
+Mushroom#MushvegeSq53h8#0.59#365#vegetables#The Pantry
+Mushroom#Mushvegedq6lYP#0.59#444#vegetables#TriCounty Produce
+Mushroom#Mushvege8o27D2#0.55#467#vegetables#Off the Vine
+Okra#OkravegeTszQSL#0.55#62#vegetables#The Pantry
+Okra#OkravegeJBWmfh#0.58#165#vegetables#TriCounty Produce
+Okra#OkravegeD6tF9n#0.55#77#vegetables#Off the Vine
+Onion#OniovegejwimQo#0.80#186#vegetables#The Pantry
+Onion#OniovegeUOwwks#0.80#417#vegetables#TriCounty Produce
+Onion#OniovegezcRDrc#0.80#435#vegetables#Off the Vine
+Oregano#OregvegetlU7Ez#0.71#119#vegetables#The Pantry
+Oregano#Oregvege9h9ZKy#0.70#173#vegetables#TriCounty Produce
+Oregano#OregvegebXr0PJ#0.70#773#vegetables#Off the Vine
+Parsley#ParsvegeXFEjjN#0.83#502#vegetables#The Pantry
+Parsley#ParsvegejAg5C4#0.80#454#vegetables#TriCounty Produce
+Parsley#ParsvegehAtH2H#0.84#523#vegetables#Off the Vine
+Parsnip#Parsvegee9Lp6D#0.46#626#vegetables#The Pantry
+Parsnip#ParsvegeSxXHSA#0.47#411#vegetables#TriCounty Produce
+Parsnip#Parsvegea0stPf#0.44#403#vegetables#Off the Vine
+Pea#Peavegecq4SxR#0.18#342#vegetables#The Pantry
+Pea#Peavege46Gdp9#0.18#255#vegetables#TriCounty Produce
+Pea#Peavegeov1gc5#0.18#251#vegetables#Off the Vine
+Pepper#PeppvegeUcBYRp#0.33#52#vegetables#The Pantry
+Pepper#PeppvegeB60btP#0.35#107#vegetables#TriCounty Produce
+Pepper#PeppvegeG4tP3e#0.34#481#vegetables#Off the Vine
+Pigeon Pea#Pigevegec5bAtm#0.94#391#vegetables#The Pantry
+Pigeon Pea#Pigevegeb93eLi#0.91#447#vegetables#TriCounty Produce
+Pigeon Pea#PigevegejEBDRa#0.89#259#vegetables#Off the Vine
+Irish Potato#IrisvegeJNQqby#0.72#355#vegetables#The Pantry
+Irish Potato#Irisvegewq1PLd#0.72#601#vegetables#TriCounty Produce
+Irish Potato#IrisvegeAfFLdO#0.68#740#vegetables#Off the Vine
+Pumpkin#PumpvegeiYsPR8#0.25#776#vegetables#The Pantry
+Pumpkin#PumpvegelqP1Kh#0.25#189#vegetables#TriCounty Produce
+Pumpkin#Pumpvegeb3nQU5#0.26#207#vegetables#Off the Vine
+Radish#RadivegeNwwSBJ#0.16#613#vegetables#The Pantry
+Radish#Radivege0tIBnL#0.16#779#vegetables#TriCounty Produce
+Radish#RadivegeNLqJCf#0.16#731#vegetables#Off the Vine
+Rhubarb#RhubvegeREfOti#0.12#301#vegetables#The Pantry
+Rhubarb#Rhubvege4Jc3b7#0.12#557#vegetables#TriCounty Produce
+Rhubarb#RhubvegeaXqF7H#0.12#378#vegetables#Off the Vine
+Rosemary#Rosevege16QStc#0.73#380#vegetables#The Pantry
+Rosemary#RosevegeNf6Oem#0.75#622#vegetables#TriCounty Produce
+Rosemary#RosevegeFgsOyN#0.74#631#vegetables#Off the Vine
+Rutabaga#RutavegecUYfQ3#0.55#676#vegetables#The Pantry
+Rutabaga#RutavegejOG5DF#0.55#273#vegetables#TriCounty Produce
+Rutabaga#RutavegewEVjzV#0.53#452#vegetables#Off the Vine
+Salsify#SalsvegeViS9HF#0.11#537#vegetables#The Pantry
+Salsify#Salsvegemd3HAL#0.11#753#vegetables#TriCounty Produce
+Salsify#SalsvegeuRCnmq#0.10#787#vegetables#Off the Vine
+Savory#Savovegee4DRWl#0.21#456#vegetables#The Pantry
+Savory#SavovegerZ90Xm#0.21#642#vegetables#TriCounty Produce
+Savory#Savovegeje7yy7#0.22#328#vegetables#Off the Vine
+Sesame#Sesavege4NAWZE#0.84#54#vegetables#The Pantry
+Sesame#SesavegeMTc9IN#0.84#458#vegetables#TriCounty Produce
+Sesame#SesavegegOwAjo#0.83#125#vegetables#Off the Vine
+Shallots#ShalvegeUO2pDO#0.26#599#vegetables#The Pantry
+Shallots#ShalvegeY1sekb#0.27#647#vegetables#TriCounty Produce
+Shallots#ShalvegeSDC8VY#0.27#369#vegetables#Off the Vine
+Sugar Snap Peas#SugavegepUZDTl#0.47#308#vegetables#The Pantry
+Sugar Snap Peas#Sugavege1XyzNH#0.48#205#vegetables#TriCounty Produce
+Sugar Snap Peas#SugavegeJuaG7f#0.46#348#vegetables#Off the Vine
+Soybean#SoybvegeqxSVRL#0.70#639#vegetables#The Pantry
+Soybean#SoybvegezEMjOG#0.68#423#vegetables#TriCounty Produce
+Soybean#SoybvegebanSFq#0.67#268#vegetables#Off the Vine
+Spaghetti Squash#SpagvegeMNO1yC#0.12#753#vegetables#The Pantry
+Spaghetti Squash#SpagvegeilpUaD#0.13#604#vegetables#TriCounty Produce
+Spaghetti Squash#SpagvegeAOoZNX#0.13#431#vegetables#Off the Vine
+Spinach#SpinvegeegXXou#0.10#742#vegetables#The Pantry
+Spinach#SpinvegeVcqXL6#0.11#708#vegetables#TriCounty Produce
+Spinach#SpinvegetZ26DN#0.11#625#vegetables#Off the Vine
+Sweet Potato#SweevegepNDQWb#0.94#720#vegetables#The Pantry
+Sweet Potato#Sweevegepnw7Tm#0.90#377#vegetables#TriCounty Produce
+Sweet Potato#Sweevegeyk0C82#0.89#242#vegetables#Off the Vine
+Swiss Chard#SwisvegeksalTA#0.54#545#vegetables#The Pantry
+Swiss Chard#SwisvegeKm2Kze#0.54#472#vegetables#TriCounty Produce
+Swiss Chard#SwisvegehteuMk#0.56#142#vegetables#Off the Vine
+Taro#Tarovege3fpGV6#0.87#155#vegetables#The Pantry
+Taro#TarovegerZkmof#0.86#371#vegetables#TriCounty Produce
+Taro#TarovegeXKPuzc#0.89#443#vegetables#Off the Vine
+Tarragon#TarrvegeCzVC6U#0.18#491#vegetables#The Pantry
+Tarragon#TarrvegesIkEfS#0.17#65#vegetables#TriCounty Produce
+Tarragon#TarrvegerZsKFP#0.18#180#vegetables#Off the Vine
+Thyme#Thymvege8Rv72c#0.41#442#vegetables#The Pantry
+Thyme#ThymvegeJoUdQS#0.42#237#vegetables#TriCounty Produce
+Thyme#ThymvegeRck5uO#0.43#491#vegetables#Off the Vine
+Tomato#Tomavegey0NHGK#0.31#60#vegetables#The Pantry
+Tomato#TomavegeKAjRUn#0.30#630#vegetables#TriCounty Produce
+Tomato#TomavegePZOHlH#0.30#70#vegetables#Off the Vine
+Turnip#TurnvegeRVQiV5#0.44#580#vegetables#The Pantry
+Turnip#TurnvegeVjIX9D#0.45#743#vegetables#TriCounty Produce
+Turnip#TurnvegelFhvuJ#0.44#219#vegetables#Off the Vine
+Watercress#WatevegelwzPLQ#0.54#230#vegetables#The Pantry
+Watercress#Watevege8oeDCT#0.54#774#vegetables#TriCounty Produce
+Watercress#Watevegexr8L1t#0.55#185#vegetables#Off the Vine
+Watermelon#WatevegeL83MRH#0.19#698#vegetables#The Pantry
+Watermelon#WatevegeR2S4Dq#0.21#488#vegetables#TriCounty Produce
+Watermelon#WatevegepFPXQu#0.21#439#vegetables#Off the Vine
+Kamote#KamovegegdON75#0.13#218#vegetables#The Pantry
+Kamote#KamovegevupDBf#0.13#98#vegetables#TriCounty Produce
+Kamote#KamovegeSQX7IA#0.14#703#vegetables#Off the Vine
+Alogbati#AlogvegeB1WaJU#0.41#775#vegetables#The Pantry
+Alogbati#AlogvegeVr5cPP#0.40#789#vegetables#TriCounty Produce
+Alogbati#AlogvegeyTUQzy#0.40#416#vegetables#Off the Vine
+Ampalaya#AmpavegemR9fSd#0.85#107#vegetables#The Pantry
+Ampalaya#AmpavegeJDu9Im#0.90#676#vegetables#TriCounty Produce
+Ampalaya#AmpavegepL8GH5#0.86#728#vegetables#Off the Vine
+Dahon ng sili#Dahovege6X9grk#0.11#369#vegetables#The Pantry
+Dahon ng sili#DahovegeiHZjQT#0.11#141#vegetables#TriCounty Produce
+Dahon ng sili#DahovegeoCDAH8#0.12#517#vegetables#Off the Vine
+Gabi#GabivegeVm4Xk3#0.44#396#vegetables#The Pantry
+Gabi#Gabivegeu6woqK#0.42#722#vegetables#TriCounty Produce
+Gabi#GabivegezcA7q1#0.42#394#vegetables#Off the Vine
+Kabute#Kabuvege6Tqrif#0.16#123#vegetables#The Pantry
+Kabute#KabuvegeA3uYdG#0.15#183#vegetables#TriCounty Produce
+Kabute#KabuvegeXW6ZiI#0.16#624#vegetables#Off the Vine
+Kamoteng Kahoy#KamovegeAdW37X#0.42#782#vegetables#The Pantry
+Kamoteng Kahoy#KamovegetFlqpC#0.42#515#vegetables#TriCounty Produce
+Kamoteng Kahoy#KamovegeMvxoLn#0.40#166#vegetables#Off the Vine
+Kangkong#KangvegeSFTvEz#0.35#759#vegetables#The Pantry
+Kangkong#KangvegeRLR6gL#0.34#695#vegetables#TriCounty Produce
+Kangkong#Kangvege9BFo14#0.35#783#vegetables#Off the Vine
+Labanos#Labavege3qrWJL#0.94#514#vegetables#The Pantry
+Labanos#LabavegekgVWDH#0.89#210#vegetables#TriCounty Produce
+Labanos#LabavegeiVPgMx#0.89#207#vegetables#Off the Vine
+Labong#LabovegeX3O8yz#0.85#722#vegetables#The Pantry
+Labong#LabovegeI1wSEs#0.87#472#vegetables#TriCounty Produce
+Labong#LabovegeOPiQht#0.85#740#vegetables#Off the Vine
+Malunggay#MaluvegeHkwAFm#0.30#252#vegetables#The Pantry
+Malunggay#Maluvegez6TiSY#0.30#245#vegetables#TriCounty Produce
+Malunggay#MaluvegewzY37D#0.31#405#vegetables#Off the Vine
+Munggo#MungvegeqeuwGw#0.25#362#vegetables#The Pantry
+Munggo#MungvegeNhqWvL#0.26#360#vegetables#TriCounty Produce
+Munggo#MungvegeGxNxQC#0.25#555#vegetables#Off the Vine
+Pechay#PechvegezDeHFZ#0.36#401#vegetables#The Pantry
+Pechay#Pechvegehi4Fcx#0.35#723#vegetables#TriCounty Produce
+Pechay#Pechvege8Pq8Eo#0.36#141#vegetables#Off the Vine
+Sigarilyas#SigavegeMJrtlV#0.88#335#vegetables#The Pantry
+Sigarilyas#SigavegeLhsoOB#0.87#768#vegetables#TriCounty Produce
+Sigarilyas#SigavegeS6RJcA#0.93#356#vegetables#Off the Vine
+Sitaw#Sitavege0hMi9z#0.65#153#vegetables#The Pantry
+Sitaw#Sitavegeez1g6N#0.67#561#vegetables#TriCounty Produce
+Sitaw#Sitavege0BCNeF#0.66#674#vegetables#Off the Vine
+Talong#TalovegevZjVK6#0.10#530#vegetables#The Pantry
+Talong#TalovegexX4MRw#0.09#305#vegetables#TriCounty Produce
+Talong#TalovegeO3U2ze#0.10#126#vegetables#Off the Vine
+Toge#TogevegeYelJUw#0.54#449#vegetables#The Pantry
+Toge#Togevegeilr1xK#0.54#274#vegetables#TriCounty Produce
+Toge#Togevegesvjnyn#0.51#316#vegetables#Off the Vine
+Ube#UbevegeoPnxvb#0.56#397#vegetables#The Pantry
+Ube#Ubevege2CNyve#0.55#450#vegetables#TriCounty Produce
+Ube#UbevegeC43sVj#0.55#263#vegetables#Off the Vine
+Upo#UpovegecOGRqC#0.22#404#vegetables#The Pantry
+Upo#Upovegekjl2wl#0.22#541#vegetables#TriCounty Produce
+Upo#UpovegemTTTwI#0.23#459#vegetables#Off the Vine
+Edamame#EdamvegeVYtk8z#0.79#296#vegetables#The Pantry
+Edamame#Edamvege608vXi#0.78#700#vegetables#TriCounty Produce
+Edamame#Edamvege1jiqGY#0.75#115#vegetables#Off the Vine
+Hairy melon#HairvegeFYFHIw#0.71#789#vegetables#The Pantry
+Hairy melon#HairvegeS7AAqI#0.72#302#vegetables#TriCounty Produce
+Hairy melon#HairvegeO6WJHL#0.72#444#vegetables#Off the Vine
+Burdock#BurdvegeyLstLV#0.56#761#vegetables#The Pantry
+Burdock#BurdvegeZsqAjT#0.56#582#vegetables#TriCounty Produce
+Burdock#BurdvegeycF7mo#0.55#566#vegetables#Off the Vine
+Snake gourd#SnakvegesfHGvt#0.92#626#vegetables#The Pantry
+Snake gourd#SnakvegedlNiBk#0.92#669#vegetables#TriCounty Produce
+Snake gourd#Snakvegec5n1UM#0.92#143#vegetables#Off the Vine
+Wasabi#Wasavege5P5pZp#0.67#751#vegetables#The Pantry
+Wasabi#Wasavege6EEE9r#0.68#559#vegetables#TriCounty Produce
+Wasabi#Wasavege1ve7TY#0.65#61#vegetables#Off the Vine
+Yam#YamvegeRN9ONH#0.57#438#vegetables#The Pantry
+Yam#YamvegeWjdzeA#0.56#564#vegetables#TriCounty Produce
+Yam#YamvegeI1AnyI#0.56#456#vegetables#Off the Vine
+Apple Fritters#AppldessDj96hw#6.12#0#desserts#Mom's Kitchen
+Apple Fritters#AppldessrN1kvM#6.06#0#desserts#The Baking Pan
+Banana Split#Banadess7tpjkJ#10.86#0#desserts#Mom's Kitchen
+Banana Split#Banadessfif758#11.07#0#desserts#The Baking Pan
+Blueberry Boy Bait#BluedesseX2LVU#3.72#0#desserts#Mom's Kitchen
+Blueberry Boy Bait#Bluedess9zLhaH#3.93#0#desserts#The Baking Pan
+Candied Cranberries#CanddessjW92p3#1.77#0#desserts#Mom's Kitchen
+Candied Cranberries#CanddesskhtVoQ#1.72#0#desserts#The Baking Pan
+Daiquiri Souffle#DaiqdessebnYcy#9.54#0#desserts#Mom's Kitchen
+Daiquiri Souffle#DaiqdessfM1DnX#9.72#0#desserts#The Baking Pan
+Bananas Flambe#BanadesscczumD#6.94#0#desserts#Mom's Kitchen
+Bananas Flambe#Banadess8qNfxd#7.07#0#desserts#The Baking Pan
+Pie, Apple#Pie,desshcSHhT#7.88#0#desserts#Mom's Kitchen
+Pie, Apple#Pie,dessTbiwDp#7.88#0#desserts#The Baking Pan
+Pie, Pumpkin#Pie,desswhPBPB#6.00#0#desserts#Mom's Kitchen
+Pie, Pumpkin#Pie,dessDg3NWl#6.24#0#desserts#The Baking Pan
+Pie, Blueberry#Pie,dessw9VdgD#2.14#0#desserts#Mom's Kitchen
+Pie, Blueberry#Pie,dessiSjZKD#2.12#0#desserts#The Baking Pan
+Pie, Pecan#Pie,dess2NqhNR#12.70#0#desserts#Mom's Kitchen
+Pie, Pecan#Pie,dessB1LfcE#12.33#0#desserts#The Baking Pan
+Pie, Cranberry Apple#Pie,dess1mL7IS#10.16#0#desserts#Mom's Kitchen
+Pie, Cranberry Apple#Pie,dessmDhkUA#10.16#0#desserts#The Baking Pan
+Pie, Banana Cream#Pie,dessH80DuG#7.35#0#desserts#Mom's Kitchen
+Pie, Banana Cream#Pie,dessf1YvFb#7.08#0#desserts#The Baking Pan
+Pie, Key Lime#Pie,desshtli5N#4.85#0#desserts#Mom's Kitchen
+Pie, Key Lime#Pie,dessMwQkKm#5.13#0#desserts#The Baking Pan
+Pie, Lemon Meringue#Pie,dess9naVkX#3.74#0#desserts#Mom's Kitchen
+Pie, Lemon Meringue#Pie,dessKYcNML#3.67#0#desserts#The Baking Pan
+Pie, Caramel#Pie,dessSUuiIU#2.27#0#desserts#Mom's Kitchen
+Pie, Caramel#Pie,dessvo8uHh#2.33#0#desserts#The Baking Pan
+Pie, Raspberry#Pie,dessUHhMlS#2.36#0#desserts#Mom's Kitchen
+Pie, Raspberry#Pie,dessJflbf5#2.36#0#desserts#The Baking Pan
+Ice Cream, Chocolate#Ice desseXuyxx#1.44#0#desserts#Mom's Kitchen
+Ice Cream, Chocolate#Ice dessASBohf#1.41#0#desserts#The Baking Pan
+Ice Cream, Vanilla#Ice dessYnzbbt#11.92#0#desserts#Mom's Kitchen
+Ice Cream, Vanilla#Ice dessUBBKp8#11.58#0#desserts#The Baking Pan
+Ice Cream, Strawberry#Ice dessfTwKhD#1.90#0#desserts#Mom's Kitchen
+Ice Cream, Strawberry#Ice dessaO9Fxf#1.99#0#desserts#The Baking Pan
+Ice Cream, Rocky Road#Ice dessyIri3P#13.10#0#desserts#Mom's Kitchen
+Ice Cream, Rocky Road#Ice dessZuLr8F#13.48#0#desserts#The Baking Pan
+Ice Cream, Mint Chocolate Chip#Ice dessV1IGG7#5.75#0#desserts#Mom's Kitchen
+Ice Cream, Mint Chocolate Chip#Ice dessX1gEQ4#5.64#0#desserts#The Baking Pan
+Ice Cream Sundae#Ice dessbhlAXt#5.62#0#desserts#Mom's Kitchen
+Ice Cream Sundae#Ice dessByapxl#5.72#0#desserts#The Baking Pan
+Cobbler, Peach#CobbdessYUGeOB#10.14#0#desserts#Mom's Kitchen
+Cobbler, Peach#CobbdessXfEtUK#10.43#0#desserts#The Baking Pan
+Cobbler, Berry-Pecan#Cobbdessx3htak#5.36#0#desserts#Mom's Kitchen
+Cobbler, Berry-Pecan#Cobbdesse4FUVI#5.41#0#desserts#The Baking Pan
+Cobbler, Blueberry#CobbdessbiI0oF#3.78#0#desserts#Mom's Kitchen
+Cobbler, Blueberry#CobbdessMXxbBN#3.57#0#desserts#The Baking Pan
+Cobbler, Cherry#CobbdessNSa8QW#12.58#0#desserts#Mom's Kitchen
+Cobbler, Cherry#CobbdessA1dADa#12.10#0#desserts#The Baking Pan
+Cobbler, Huckleberry#Cobbdess3t6O8d#3.99#0#desserts#Mom's Kitchen
+Cobbler, Huckleberry#CobbdessGI9euK#3.88#0#desserts#The Baking Pan
+Cobbler, Rhubarb#Cobbdess22X40Z#9.54#0#desserts#Mom's Kitchen
+Cobbler, Rhubarb#CobbdessPfnCT0#9.27#0#desserts#The Baking Pan
+Cobbler, Strawberry#CobbdessI78188#12.43#0#desserts#Mom's Kitchen
+Cobbler, Strawberry#CobbdessH3LdgQ#12.20#0#desserts#The Baking Pan
+Cobbler, Zucchini#Cobbdess5rK4dP#11.24#0#desserts#Mom's Kitchen
+Cobbler, Zucchini#Cobbdess4Ez8kS#10.51#0#desserts#The Baking Pan
+Brownies#BrowdessmogdTl#7.62#0#desserts#Mom's Kitchen
+Brownies#Browdess84Qc1z#7.55#0#desserts#The Baking Pan
+Fudge Bar#Fudgdess8iXSyf#11.72#0#desserts#Mom's Kitchen
+Fudge Bar#FudgdessakU1Id#12.29#0#desserts#The Baking Pan
+Cookies, Oatmeal#Cookdessnq9Oya#2.84#0#desserts#Mom's Kitchen
+Cookies, Oatmeal#CookdessBhgp7p#2.68#0#desserts#The Baking Pan
+Cookies, Chocolate Chip#CookdessRVszsZ#12.73#0#desserts#Mom's Kitchen
+Cookies, Chocolate Chip#CookdessSOoHmT#12.26#0#desserts#The Baking Pan
+Cookies, Peanut Butter#Cookdess2UcMI2#7.82#0#desserts#Mom's Kitchen
+Cookies, Peanut Butter#Cookdess1cILme#7.46#0#desserts#The Baking Pan
+Mousse, Chocolate#MousdessDpN4sQ#6.25#0#desserts#Mom's Kitchen
+Mousse, Chocolate#Mousdess8FyFT8#5.96#0#desserts#The Baking Pan
+Mousse, Blueberry Maple#MousdessacwrkO#7.28#0#desserts#Mom's Kitchen
+Mousse, Blueberry Maple#MousdessbiCMFg#7.21#0#desserts#The Baking Pan
+Mousse, Chocolate Banana#MousdessIeW4qz#5.13#0#desserts#Mom's Kitchen
+Mousse, Chocolate Banana#Mousdess1De9oL#5.08#0#desserts#The Baking Pan
+Mousse, Cherry#Mousdesss1bF8H#13.05#0#desserts#Mom's Kitchen
+Mousse, Cherry#Mousdess0ujevx#12.43#0#desserts#The Baking Pan
+Mousse, Eggnog#MousdessZ38hXj#9.07#0#desserts#Mom's Kitchen
+Mousse, Eggnog#Mousdesshs05ST#8.81#0#desserts#The Baking Pan
+Mousse, Strawberry#MousdessHCDlBK#5.58#0#desserts#Mom's Kitchen
+Mousse, Strawberry#MousdessSZ4PyW#5.36#0#desserts#The Baking Pan
+Sherbet, Cantaloupe#Sherdess3DCxUg#3.11#0#desserts#Mom's Kitchen
+Sherbet, Cantaloupe#Sherdesscp2VIz#2.99#0#desserts#The Baking Pan
+Sherbet, Lemon Milk#Sherdess1JVFOS#7.57#0#desserts#Mom's Kitchen
+Sherbet, Lemon Milk#SherdessC865vu#7.57#0#desserts#The Baking Pan
+Sherbet, Orange Crush#Sherdess8W8Mb9#4.32#0#desserts#Mom's Kitchen
+Sherbet, Orange Crush#SherdessxmVJBF#4.16#0#desserts#The Baking Pan
+Sherbet, Blueberry#SherdessFAgxqp#3.46#0#desserts#Mom's Kitchen
+Sherbet, Blueberry#SherdessMPL87u#3.60#0#desserts#The Baking Pan
+Sherbet, Raspberry#Sherdesse86ugA#6.08#0#desserts#Mom's Kitchen
+Sherbet, Raspberry#Sherdesslc1etR#5.85#0#desserts#The Baking Pan
+Sherbet, Strawberry#SherdessFwv09m#4.63#0#desserts#Mom's Kitchen
+Sherbet, Strawberry#SherdessKB0H7q#4.81#0#desserts#The Baking Pan
+Tart, Apple#TartdessrsTyXA#3.35#0#desserts#Mom's Kitchen
+Tart, Apple#Tartdessp7pyiy#3.13#0#desserts#The Baking Pan
+Tart, Almond#TartdessC7FARL#6.62#0#desserts#Mom's Kitchen
+Tart, Almond#Tartdess1V1A1c#6.68#0#desserts#The Baking Pan
+Tart, Blueberry#TartdesssQZRXX#10.28#0#desserts#Mom's Kitchen
+Tart, Blueberry#TartdessUSJSuc#10.28#0#desserts#The Baking Pan
+Tart, Chocolate-Pear#Tartdess2pdOE4#5.67#0#desserts#Mom's Kitchen
+Tart, Chocolate-Pear#TartdessL3aEDd#5.51#0#desserts#The Baking Pan
+Tart, Lemon Fudge#Tartdess9DhZUT#3.88#0#desserts#Mom's Kitchen
+Tart, Lemon Fudge#TartdesshzLOWt#3.96#0#desserts#The Baking Pan
+Tart, Pecan#TartdessvSbXzd#11.80#0#desserts#Mom's Kitchen
+Tart, Pecan#Tartdess6YXJec#11.04#0#desserts#The Baking Pan
+Tart, Pineapple#TartdesseMfJFe#9.01#0#desserts#Mom's Kitchen
+Tart, Pineapple#TartdessA2Wftr#8.44#0#desserts#The Baking Pan
+Tart, Pear#Tartdess4a1BUc#10.09#0#desserts#Mom's Kitchen
+Tart, Pear#TartdessNw8YPG#10.68#0#desserts#The Baking Pan
+Tart, Raspberry#TartdessAVnpP6#6.18#0#desserts#Mom's Kitchen
+Tart, Raspberry#TartdessfVxZFf#5.95#0#desserts#The Baking Pan
+Tart, Strawberry#Tartdess4IUcZW#4.75#0#desserts#Mom's Kitchen
+Tart, Strawberry#Tartdess2BeEDb#4.61#0#desserts#The Baking Pan
+Tart, Raspberry#TartdesshyBd24#1.85#0#desserts#Mom's Kitchen
+Tart, Raspberry#Tartdess5fqxgy#1.94#0#desserts#The Baking Pan
+Trifle, Berry#TrifdessmEkbU2#12.48#0#desserts#Mom's Kitchen
+Trifle, Berry#TrifdessAV9Ix8#12.60#0#desserts#The Baking Pan
+Trifle, American#TrifdesscsdSCd#4.70#0#desserts#Mom's Kitchen
+Trifle, American#TrifdessTArskm#4.35#0#desserts#The Baking Pan
+Trifle, English#TrifdessX87q8T#8.20#0#desserts#Mom's Kitchen
+Trifle, English#Trifdess52l955#8.12#0#desserts#The Baking Pan
+Trifle, Orange#TrifdesslUwxwe#9.74#0#desserts#Mom's Kitchen
+Trifle, Orange#TrifdessFrfCHP#10.22#0#desserts#The Baking Pan
+Trifle, Pumpkin#TrifdessJKFN96#4.72#0#desserts#Mom's Kitchen
+Trifle, Pumpkin#TrifdessMNw4EV#4.95#0#desserts#The Baking Pan
+Trifle, Scottish#TrifdessFa0JdK#13.63#0#desserts#Mom's Kitchen
+Trifle, Scottish#TrifdessAAUQCN#14.03#0#desserts#The Baking Pan
+Trifle, Sherry#TrifdesscuttJg#4.42#0#desserts#Mom's Kitchen
+Trifle, Sherry#TrifdesspRGpfP#4.21#0#desserts#The Baking Pan
+Trifle, Strawberry#TrifdessAd5TpV#3.58#0#desserts#Mom's Kitchen
+Trifle, Strawberry#Trifdess1rtW0A#3.58#0#desserts#The Baking Pan
+Trifle, Scotch Whiskey#Trifdess2zJsGi#5.44#0#desserts#Mom's Kitchen
+Trifle, Scotch Whiskey#TrifdessL8nuI6#5.18#0#desserts#The Baking Pan
+Cheesecake, Amaretto#CheedessOJBqfD#11.89#0#desserts#Mom's Kitchen
+Cheesecake, Amaretto#CheedessVnDf14#11.89#0#desserts#The Baking Pan
+Cheesecake, Apple#Cheedessuks1YK#11.22#0#desserts#Mom's Kitchen
+Cheesecake, Apple#CheedessMYKaKK#11.01#0#desserts#The Baking Pan
+Cheesecake, Apricot#CheedessKUxTYY#12.34#0#desserts#Mom's Kitchen
+Cheesecake, Apricot#CheedessMvB1pr#11.88#0#desserts#The Baking Pan
+Cheesecake, Australian#CheedessQ9WAIn#2.70#0#desserts#Mom's Kitchen
+Cheesecake, Australian#CheedessE6Jyjc#2.53#0#desserts#The Baking Pan
+Cheesecake, Arkansas#CheedessTbqzmw#6.98#0#desserts#Mom's Kitchen
+Cheesecake, Arkansas#CheedesstWJZfC#6.66#0#desserts#The Baking Pan
+Cheesecake, Blueberry#Cheedessyo51KL#8.07#0#desserts#Mom's Kitchen
+Cheesecake, Blueberry#Cheedess4Hz7P4#8.62#0#desserts#The Baking Pan
+Cheesecake, Cherry#CheedessEahRkC#4.40#0#desserts#Mom's Kitchen
+Cheesecake, Cherry#Cheedess3Nx4jZ#4.65#0#desserts#The Baking Pan
+Cheesecake, Cran-Raspberry#CheedessrJsr9i#13.47#0#desserts#Mom's Kitchen
+Cheesecake, Cran-Raspberry#CheedesshcuXCy#14.00#0#desserts#The Baking Pan
+Cheesecake, German Chocolate#CheedesswayvJL#12.03#0#desserts#Mom's Kitchen
+Cheesecake, German Chocolate#CheedessebTAeB#11.58#0#desserts#The Baking Pan
+Cheesecake, Turtle#CheedessLqgeIA#12.19#0#desserts#Mom's Kitchen
+Cheesecake, Turtle#CheedessvyNohA#12.07#0#desserts#The Baking Pan
+Brownies, Apple#BrowdessIDW1Cc#5.44#0#desserts#Mom's Kitchen
+Brownies, Apple#BrowdessyRMrAH#5.14#0#desserts#The Baking Pan
+Brownies, Fudge#BrowdessmIHIFJ#5.19#0#desserts#Mom's Kitchen
+Brownies, Fudge#BrowdessqewJ38#5.10#0#desserts#The Baking Pan
+Brownies, Almond Macaroon#BrowdessniK7QI#10.57#0#desserts#Mom's Kitchen
+Brownies, Almond Macaroon#BrowdessgkXURH#10.36#0#desserts#The Baking Pan
+Brownies, Butterscotch#BrowdesslpA06E#7.16#0#desserts#Mom's Kitchen
+Brownies, Butterscotch#BrowdessK5hofE#7.30#0#desserts#The Baking Pan
+Brownies, Caramel#BrowdessVGfoA8#3.07#0#desserts#Mom's Kitchen
+Brownies, Caramel#Browdess5jvVMM#3.13#0#desserts#The Baking Pan
+Brownies, Cherry#Browdessyoa66A#3.39#0#desserts#Mom's Kitchen
+Brownies, Cherry#BrowdessIg2JuF#3.39#0#desserts#The Baking Pan
+Brownies, Chocolate Chip#Browdessb9dc59#6.18#0#desserts#Mom's Kitchen
+Brownies, Chocolate Chip#BrowdessvW4nOx#6.43#0#desserts#The Baking Pan
+Brownies, Coconut#BrowdessWPHrVR#3.06#0#desserts#Mom's Kitchen
+Brownies, Coconut#BrowdessGVBlML#2.86#0#desserts#The Baking Pan
+Brownies, Cream Cheese#Browdess1OyRay#12.74#0#desserts#Mom's Kitchen
+Brownies, Cream Cheese#Browdess2fRsNv#12.61#0#desserts#The Baking Pan
+Brownies, Fudge Mint#Browdessl7DP7k#11.45#0#desserts#Mom's Kitchen
+Brownies, Fudge Mint#Browdessv70VKQ#11.34#0#desserts#The Baking Pan
+Brownies, Mint Chip#BrowdessDDMvF7#1.81#0#desserts#Mom's Kitchen
+Brownies, Mint Chip#Browdess0j9PBD#1.84#0#desserts#The Baking Pan
+Cake, Angel Food#CakedessEaqGaE#11.18#0#desserts#Mom's Kitchen
+Cake, Angel Food#CakedessJyAyFe#11.18#0#desserts#The Baking Pan
+Cake, Chocolate#CakedessKLXFbn#10.11#0#desserts#Mom's Kitchen
+Cake, Chocolate#CakedessfNP5Hg#9.91#0#desserts#The Baking Pan
+Cake, Carrot#CakedessUTgMoV#4.20#0#desserts#Mom's Kitchen
+Cake, Carrot#CakedessQdkaYg#4.00#0#desserts#The Baking Pan
+Cake, Lemon Blueberry#CakedessepkeEW#11.73#0#desserts#Mom's Kitchen
+Cake, Lemon Blueberry#CakedessHTKyQs#12.42#0#desserts#The Baking Pan
+Cake Triple Fudge#CakedessiZ75lR#7.92#0#desserts#Mom's Kitchen
+Cake Triple Fudge#CakedessWRrSXP#8.00#0#desserts#The Baking Pan
+Cake, Walnut#CakedessveYVXZ#10.83#0#desserts#Mom's Kitchen
+Cake, Walnut#Cakedesse22rT5#11.04#0#desserts#The Baking Pan
+Cake, French Apple#CakedessjA2Kxv#1.95#0#desserts#Mom's Kitchen
+Cake, French Apple#CakedessNBHCk0#1.86#0#desserts#The Baking Pan
+Cake, Fig#CakedessOncX4y#6.82#0#desserts#Mom's Kitchen
+Cake, Fig#CakedessTJtffn#7.08#0#desserts#The Baking Pan
+Cake, Maple#CakedessnoGPRF#3.04#0#desserts#Mom's Kitchen
+Cake, Maple#CakedessfVattM#3.22#0#desserts#The Baking Pan
+Cake, Devil's Food#CakedessiXcDCt#4.73#0#desserts#Mom's Kitchen
+Cake, Devil's Food#CakedessnBZk45#4.82#0#desserts#The Baking Pan
+Cake, Double-Lemon#CakedesskeS0Vd#3.46#0#desserts#Mom's Kitchen
+Cake, Double-Lemon#Cakedess50vx53#3.60#0#desserts#The Baking Pan
+Sorbet, Blackberry#SorbdessQoa0CE#9.88#0#desserts#Mom's Kitchen
+Sorbet, Blackberry#SorbdessqoOYzv#9.78#0#desserts#The Baking Pan
diff --git a/examples/je/gettingStarted/vendors.txt b/examples/je/gettingStarted/vendors.txt
new file mode 100644
index 0000000000000000000000000000000000000000..528e1b110baeb4ac31bca205f8cc998ebdd9a2a6
--- /dev/null
+++ b/examples/je/gettingStarted/vendors.txt
@@ -0,0 +1,6 @@
+TriCounty Produce#309 S. Main Street#Middle Town#MN#55432#763 555 5761#Mort Dufresne#763 555 5765
+Simply Fresh#15612 Bogart Lane#Harrigan#WI#53704#420 333 3912#Cheryl Swedberg#420 333 3952
+Off the Vine#133 American Ct.#Centennial#IA#52002#563 121 3800#Bob King#563 121 3800 x54
+The Pantry#1206 N. Creek Way#Middle Town#MN#55432#763 555 3391#Sully Beckstrom#763 555 3391
+Mom's Kitchen#53 Yerman Ct.#Middle Town#MN#55432#763 554 9200#Maggie Kultgen#763 554 9200 x12
+The Baking Pan#1415 53rd Ave.#Dutchin#MN#56304#320 442 2277#Mike Roan#320 442 6879
diff --git a/examples/je/txn/DBWriter.java b/examples/je/txn/DBWriter.java
new file mode 100644
index 0000000000000000000000000000000000000000..fb4d184e66f9790b0b3d6d535e620191c7a0739b
--- /dev/null
+++ b/examples/je/txn/DBWriter.java
@@ -0,0 +1,170 @@
+package je.txn;
+
+import java.util.Random;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.bind.tuple.StringBinding;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.CursorConfig;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DeadlockException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+
+public class DBWriter extends Thread
+{
+    private Database myDb = null;
+    private Environment myEnv = null;
+    private EntryBinding dataBinding = null;
+    private Random generator = new Random();
+
+    private static int MAX_RETRY = 20;
+
+    private static String[] keys = {"key 1", "key 2", "key 3",
+                                    "key 4", "key 5", "key 6",
+                                    "key 7", "key 8", "key 9",
+                                    "key 10"};
+
+
+    // Constructor. Get our DB handles from here
+    DBWriter(Environment env, Database db, StoredClassCatalog scc)
+        throws DatabaseException {
+        myDb = db;
+        myEnv = env;
+        dataBinding = new SerialBinding(scc, PayloadData.class);
+    }
+
+
+    // Thread method that writes a series of records
+    // to the database using transaction protection.
+    // Deadlock handling is demonstrated here.
+    public void run () {
+        Transaction txn = null;
+
+        // Perform 50 transactions
+        for (int i=0; i<50; i++) {
+
+           boolean retry = true;
+           int retry_count = 0;
+           // while loop is used for deadlock retries
+           while (retry) {
+                // try block used for deadlock detection and
+                // general db exception handling
+                try {
+
+                    // Get a transaction
+                    txn = myEnv.beginTransaction(null, null);
+
+                    // Write 10 records to the db
+                    // for each transaction
+                    for (int j = 0; j < 10; j++) {
+                        // Get the key
+                        DatabaseEntry key = new DatabaseEntry();
+                        StringBinding.stringToEntry(keys[j], key);
+
+                        // Get the data
+                        PayloadData pd = new PayloadData(i+j, getName(),
+                            generator.nextDouble());
+                        DatabaseEntry data = new DatabaseEntry();
+                        dataBinding.objectToEntry(pd, data);
+
+                        // Do the put
+                        myDb.put(txn, key, data);
+                    }
+
+                    // commit
+                    System.out.println(getName() + " : committing txn : " + i);
+
+                    System.out.println(getName() + " : Found " +
+                        countRecords(null) + " records in the database.");
+                    try {
+                        txn.commit();
+                        txn = null;
+                    } catch (DatabaseException e) {
+                        System.err.println("Error on txn commit: " +
+                            e.toString());
+                    }
+                    retry = false;
+
+                } catch (DeadlockException de) {
+                    System.out.println("################# " + getName() +
+                        " : caught deadlock");
+                    // retry if necessary
+                    if (retry_count < MAX_RETRY) {
+                        System.err.println(getName() +
+                            " : Retrying operation.");
+                        retry = true;
+                        retry_count++;
+                    } else {
+                        System.err.println(getName() +
+                            " : out of retries. Giving up.");
+                        retry = false;
+                    }
+                } catch (DatabaseException e) {
+                    // abort and don't retry
+                    retry = false;
+                    System.err.println(getName() +
+                        " : caught exception: " + e.toString());
+                    e.printStackTrace();
+                } finally {
+                    if (txn != null) {
+                        try {
+                            txn.abort();
+                        } catch (Exception e) {
+                            System.err.println("Error aborting transaction: " +
+                                e.toString());
+                            e.printStackTrace();
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    // This simply counts the number of records contained in the
+    // database and returns the result. You can use this method
+    // in three ways:
+    //
+    // First call it with an active txn handle.
+    // Secondly, configure the cursor for dirty reads
+    // Third, call count_records AFTER the writer has committed
+    //    its transaction.
+    //
+    // If you do none of these things, the writer thread will
+    // self-deadlock.
+    //
+    // Note that this method exists only for illustrative purposes.
+    // A more straight-forward way to count the number of records in
+    // a database is to use the Database.getStats() method.
+    private int countRecords(Transaction txn)  throws DatabaseException {
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        int count = 0;
+        Cursor cursor = null;
+
+        try {
+            // Get the cursor
+            CursorConfig cc = new CursorConfig();
+            cc.setReadUncommitted(true);
+            cursor = myDb.openCursor(txn, cc);
+            while (cursor.getNext(key, data, LockMode.DEFAULT) ==
+                    OperationStatus.SUCCESS) {
+
+                    count++;
+            }
+        } finally {
+            if (cursor != null) {
+                cursor.close();
+            }
+        }
+
+        return count;
+
+    }
+}
diff --git a/examples/je/txn/PayloadData.java b/examples/je/txn/PayloadData.java
new file mode 100644
index 0000000000000000000000000000000000000000..08a20da64b20f25e395ab3cb50b40a0edab617a6
--- /dev/null
+++ b/examples/je/txn/PayloadData.java
@@ -0,0 +1,19 @@
+package je.txn;
+
+import java.io.Serializable;
+
+public class PayloadData implements Serializable {
+    private int oID;
+    private String threadName;
+    private double doubleData;
+
+    PayloadData(int id, String name, double data) {
+        oID = id;
+        threadName = name;
+        doubleData = data;
+    }
+
+    public double getDoubleData() { return doubleData; }
+    public int getID() { return oID; }
+    public String getThreadName() { return threadName; }
+}
diff --git a/examples/je/txn/TxnGuide.java b/examples/je/txn/TxnGuide.java
new file mode 100644
index 0000000000000000000000000000000000000000..7799f8870f81f12a210263694858ef5c1cf8275e
--- /dev/null
+++ b/examples/je/txn/TxnGuide.java
@@ -0,0 +1,149 @@
+// File TxnGuide.java
+
+package je.txn;
+
+import java.io.File;
+
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+
+public class TxnGuide {
+
+    private static String myEnvPath = "./";
+    private static String dbName = "mydb.db";
+    private static String cdbName = "myclassdb.db";
+
+    // DB handles
+    private static Database myDb = null;
+    private static Database myClassDb = null;
+    private static Environment myEnv = null;
+
+    private static int NUMTHREADS = 5;
+
+    private static void usage() {
+        System.out.println("TxnGuide [-h <env directory>]");
+        System.exit(-1);
+    }
+
+    public static void main(String args[]) {
+        try {
+            // Parse the arguments list
+            parseArgs(args);
+            // Open the environment and databases
+            openEnv();
+            // Get our class catalog (used to serialize objects)
+            StoredClassCatalog classCatalog =
+                new StoredClassCatalog(myClassDb);
+
+            // Start the threads
+            DBWriter[] threadArray;
+            threadArray = new DBWriter[NUMTHREADS];
+            for (int i = 0; i < NUMTHREADS; i++) {
+                threadArray[i] = new DBWriter(myEnv, myDb, classCatalog);
+                threadArray[i].start();
+            }
+
+            for (int i = 0; i < NUMTHREADS; i++) {
+                threadArray[i].join();
+            }
+        } catch (Exception e) {
+            System.err.println("TxnGuide: " + e.toString());
+            e.printStackTrace();
+        } finally {
+            closeEnv();
+        }
+        System.out.println("All done.");
+    }
+
+
+    private static void openEnv() throws DatabaseException {
+        System.out.println("opening env");
+
+        // Set up the environment.
+        EnvironmentConfig myEnvConfig = new EnvironmentConfig();
+        myEnvConfig.setAllowCreate(true);
+        myEnvConfig.setTransactional(true);
+        // Environment handles are free-threaded in JE,
+        // so we do not have to do anything to cause the
+        // environment handle to be free-threaded.
+
+        // Set up the database
+        DatabaseConfig myDbConfig = new DatabaseConfig();
+        myDbConfig.setAllowCreate(true);
+        myDbConfig.setTransactional(true);
+        myDbConfig.setSortedDuplicates(true);
+        // no DatabaseConfig.setThreaded() method available.
+        // db handles in java are free-threaded so long as the
+        // env is also free-threaded.
+
+        // Open the environment
+        myEnv = new Environment(new File(myEnvPath),    // Env home
+                                myEnvConfig);
+
+        // Open the database. Do not provide a txn handle. This open
+        // is autocommitted because DatabaseConfig.setTransactional()
+        // is true.
+        myDb = myEnv.openDatabase(null,     // txn handle
+                                  dbName,   // Database file name
+                                  myDbConfig);
+
+        // Used by the bind API for serializing objects
+        // Class database must not support duplicates
+        myDbConfig.setSortedDuplicates(false);
+        myClassDb = myEnv.openDatabase(null,     // txn handle
+                                       cdbName,  // Database file name
+                                       myDbConfig);
+    }
+
+    private static void closeEnv() {
+        System.out.println("Closing env and databases");
+        if (myDb != null ) {
+            try {
+                myDb.close();
+            } catch (DatabaseException e) {
+                System.err.println("closeEnv: myDb: " +
+                    e.toString());
+                e.printStackTrace();
+            }
+        }
+
+        if (myClassDb != null ) {
+            try {
+                myClassDb.close();
+            } catch (DatabaseException e) {
+                System.err.println("closeEnv: myClassDb: " +
+                    e.toString());
+                e.printStackTrace();
+            }
+        }
+
+        if (myEnv != null ) {
+            try {
+                myEnv.close();
+            } catch (DatabaseException e) {
+                System.err.println("closeEnv: " + e.toString());
+                e.printStackTrace();
+            }
+        }
+    }
+
+    private TxnGuide() {}
+
+    private static void parseArgs(String args[]) {
+        for(int i = 0; i < args.length; ++i) {
+            if (args[i].startsWith("-")) {
+                switch(args[i].charAt(1)) {
+                    case 'h':
+                        myEnvPath = new String(args[++i]);
+                    break;
+                    default:
+                        usage();
+                }
+            }
+        }
+    }
+}
diff --git a/examples/jmx/JEApplicationMBean.java b/examples/jmx/JEApplicationMBean.java
new file mode 100644
index 0000000000000000000000000000000000000000..6902de93df18f29b37250aa897925225c2ec6be3
--- /dev/null
+++ b/examples/jmx/JEApplicationMBean.java
@@ -0,0 +1,324 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: JEApplicationMBean.java,v 1.10.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package jmx;
+
+import java.io.File;
+import java.lang.reflect.Constructor;
+import java.util.List;
+
+import javax.management.Attribute;
+import javax.management.AttributeList;
+import javax.management.AttributeNotFoundException;
+import javax.management.DynamicMBean;
+import javax.management.InvalidAttributeValueException;
+import javax.management.MBeanAttributeInfo;
+import javax.management.MBeanConstructorInfo;
+import javax.management.MBeanException;
+import javax.management.MBeanInfo;
+import javax.management.MBeanNotificationInfo;
+import javax.management.MBeanOperationInfo;
+import javax.management.MBeanParameterInfo;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.jmx.JEMBeanHelper;
+
+/**
+ * JEApplicationMBean is an example of how a JE application can incorporate JE
+ * monitoring into its existing MBean.  It may be installed as is, or used as a
+ * starting point for building a MBean which includes JE support.
+ * <p>
+ * JE management is divided between the JEApplicationMBean class and
+ * JEMBeanHelper class. JEApplicationMBean contains an instance of
+ * JEMBeanHelper, which knows about JE attributes, operations and
+ * notifications. JEApplicationMBean itself has the responsibility of
+ * configuring, opening and closing the JE environment along with any other
+ * resources used by the application, and maintains a
+ * com.sleepycat.je.Environment handle.
+ * <p>
+ * The approach taken for accessing the environment is an application specific
+ * choice. Some of the salient considerations are:
+ * <ul>
+ * <li>Applications may open one or many Environment objects per process
+ * against a given environment.</li>
+ *
+ * <li>All Environment handles reference the same underlying JE environment
+ * implementation object.</li>
+
+ * <li> The first Environment object instantiated in the process does the real
+ * work of configuring and opening the environment. Follow-on instantiations of
+ * Environment merely increment a reference count. Likewise,
+ * Environment.close() only does real work when it's called by the last
+ * Environment object in the process. </li>
+ * </ul>
+ * <p>
+ * Another MBean approach for environment access can be seen in
+ * com.sleepycat.je.jmx.JEMonitor. That MBean does not take responsibility for
+ * opening and closing environments, and can only operate against already-open
+ * environments.
+ */
+
+public class JEApplicationMBean implements DynamicMBean {
+
+    private static final String DESCRIPTION =
+        "A MBean for an application which uses JE. Provides open and close " +
+        "operations which configure and open a JE environment as part of the "+
+        "applications's resources. Also supports general JE monitoring.";
+
+    private MBeanInfo mbeanInfo;    // this MBean's visible interface.
+    private JEMBeanHelper jeHelper; // gets JE management interface
+    private Environment targetEnv;  // saved environment handle
+
+    /**
+     * This MBean provides an open operation to open the JE environment.
+     */
+    public  static final String OP_OPEN = "openJE";
+
+    /**
+     * This MBean provides a close operation to release the JE environment.
+     * Note that environments must be closed to release resources.
+     */
+    public static final String OP_CLOSE = "closeJE";
+
+    /**
+     * Instantiate a JEApplicationMBean
+     *
+     * @param environmentHome home directory of the target JE environment.
+     */
+    public JEApplicationMBean(String environmentHome) {
+
+        File environmentDirectory = new File(environmentHome);
+        jeHelper = new JEMBeanHelper(environmentDirectory, true);
+        resetMBeanInfo();
+    }
+
+    /**
+     * @see DynamicMBean#getAttribute
+     */
+    public Object getAttribute(String attributeName)
+        throws AttributeNotFoundException,
+               MBeanException {
+
+    	return jeHelper.getAttribute(targetEnv, attributeName);
+    }
+
+    /**
+     * @see DynamicMBean#setAttribute
+     */
+    public void setAttribute(Attribute attribute)
+        throws AttributeNotFoundException,
+               InvalidAttributeValueException {
+
+        jeHelper.setAttribute(targetEnv, attribute);
+    }
+
+    /**
+     * @see DynamicMBean#getAttributes
+     */
+    public AttributeList getAttributes(String[] attributes) {
+
+        /* Sanity checking. */
+	if (attributes == null) {
+	    throw new IllegalArgumentException("Attributes cannot be null");
+	}
+
+        /* Get each requested attribute. */
+        AttributeList results = new AttributeList();
+        for (int i = 0; i < attributes.length; i++) {
+            try {
+                String name = attributes[i];
+                Object value = jeHelper.getAttribute(targetEnv, name);
+                results.add(new Attribute(name, value));
+            } catch (Exception e) {
+                e.printStackTrace();
+            }
+        }
+        return results;
+    }
+
+    /**
+     * @see DynamicMBean#setAttributes
+     */
+    public AttributeList setAttributes(AttributeList attributes) {
+
+        /* Sanity checking. */
+	if (attributes == null) {
+	    throw new IllegalArgumentException("attribute list can't be null");
+	}
+
+        /* Set each attribute specified. */
+	AttributeList results = new AttributeList();
+        for (int i = 0; i < attributes.size(); i++) {
+            Attribute attr = (Attribute) attributes.get(i);
+            try {
+                /* Set new value. */
+                jeHelper.setAttribute(targetEnv, attr);
+
+                /*
+                 * Add the name and new value to the result list. Be sure
+                 * to ask the MBean for the new value, rather than simply
+                 * using attr.getValue(), because the new value may not
+                 * be same if it is modified according to the JE
+                 * implementation.
+                 */
+                String name = attr.getName();
+                Object newValue = jeHelper.getAttribute(targetEnv, name);
+                results.add(new Attribute(name, newValue));
+            } catch (Exception e) {
+                e.printStackTrace();
+            }
+        }
+        return results;
+    }
+
+    /**
+     * @see DynamicMBean#invoke
+     */
+    public Object invoke(String actionName,
+                         Object[] params,
+                         String[] signature)
+        throws MBeanException {
+
+        Object result = null;
+
+        if (actionName == null) {
+            throw new IllegalArgumentException("actionName cannot be null");
+        }
+
+        if (actionName.equals(OP_OPEN)) {
+            openEnvironment();
+            return null;
+        } else if (actionName.equals(OP_CLOSE)) {
+            closeEnvironment();
+            return null;
+        } else {
+            result = jeHelper.invoke(targetEnv, actionName, params, signature);
+        }
+
+        return result;
+    }
+
+    /**
+     * @see DynamicMBean#getMBeanInfo
+     */
+    public MBeanInfo getMBeanInfo() {
+	return mbeanInfo;
+    }
+
+    /**
+     * Create the available management interface for this environment.
+     * The attributes and operations available vary according to
+     * environment configuration.
+     *
+     */
+    private synchronized void resetMBeanInfo() {
+
+        /*
+         * Get JE attributes, operation and notification information
+         * from JEMBeanHelper. An application may choose to add functionality
+         * of its own when constructing the MBeanInfo.
+         */
+
+        /* Attributes. */
+        List attributeList =  jeHelper.getAttributeList(targetEnv);
+        MBeanAttributeInfo[] attributeInfo =
+            new MBeanAttributeInfo[attributeList.size()];
+        attributeList.toArray(attributeInfo);
+
+        /* Constructors. */
+        Constructor[] constructors = this.getClass().getConstructors();
+        MBeanConstructorInfo[] constructorInfo =
+            new MBeanConstructorInfo[constructors.length];
+        for (int i = 0; i < constructors.length; i++) {
+            constructorInfo[i] =
+                new MBeanConstructorInfo(this.getClass().getName(),
+                                         constructors[i]);
+        }
+
+        /* Operations. */
+
+        /*
+         * Get the list of operations available from the jeHelper. Then add
+         * an open and close operation.
+         */
+        List operationList = jeHelper.getOperationList(targetEnv);
+        if (targetEnv == null) {
+            operationList.add(
+             new MBeanOperationInfo(OP_OPEN,
+                                    "Configure and open the JE environment.",
+                                    new MBeanParameterInfo[0], // no params
+                                    "java.lang.Boolean",
+                                    MBeanOperationInfo.ACTION_INFO));
+        } else {
+            operationList.add(
+             new MBeanOperationInfo(OP_CLOSE,
+                                    "Close the JE environment.",
+                                    new MBeanParameterInfo[0], // no params
+                                    "void",
+                                    MBeanOperationInfo.ACTION_INFO));
+        }
+
+        MBeanOperationInfo[] operationInfo =
+            new MBeanOperationInfo[operationList.size()];
+        operationList.toArray(operationInfo);
+
+        /* Notifications. */
+        MBeanNotificationInfo[] notificationInfo =
+            jeHelper.getNotificationInfo(targetEnv);
+
+        /* Generate the MBean description. */
+        mbeanInfo = new MBeanInfo(this.getClass().getName(),
+                                  DESCRIPTION,
+                                  attributeInfo,
+                                  constructorInfo,
+                                  operationInfo,
+                                  notificationInfo);
+    }
+
+    /**
+     * Open a JE environment using the configuration specified through
+     * MBean attributes and recorded within the JEMBeanHelper.
+     */
+    private  void openEnvironment()
+        throws MBeanException {
+
+        try {
+            if (targetEnv == null) {
+                /*
+                 * The environment configuration has been set through
+                 * mbean attributes managed by the JEMBeanHelper.
+                 */
+                targetEnv =
+                    new Environment(jeHelper.getEnvironmentHome(),
+                                    jeHelper.getEnvironmentOpenConfig());
+                resetMBeanInfo();
+            }
+        } catch (DatabaseException e) {
+            throw new MBeanException(e);
+        }
+    }
+
+    /**
+     * Release the environment handle contained within the MBean to properly
+     * release resources.
+     */
+    private void closeEnvironment()
+        throws MBeanException {
+
+        try {
+            if (targetEnv != null) {
+                targetEnv.close();
+                targetEnv = null;
+                resetMBeanInfo();
+            }
+        } catch (DatabaseException e) {
+            throw new MBeanException(e);
+        }
+    }
+}
diff --git a/examples/jmx/README.txt b/examples/jmx/README.txt
new file mode 100644
index 0000000000000000000000000000000000000000..17fbdcc4cb8b17a2e5acb60f81673edd44f7d3c5
--- /dev/null
+++ b/examples/jmx/README.txt
@@ -0,0 +1,47 @@
+JE provides a fully functional JMX MBean in com.sleepycat.je.jmx.JEMonitor.
+To use this MBean, build and deploy jejmx.jar:
+
+      1. cd <jehome>
+      2. modify <jehome>/build.properties and set j2ee.jarfile to an 
+         appropriate J2EE jar.
+      3. ant jmx
+
+This builds a jejmx.jar in <jehome>/build/lib which contains the
+MBean. A sample JBoss service descriptor can be found in
+je-jboss-service.xml in this directory. The MBean can be deployed
+by modifying the service file to point to a JE environment, and
+then copying the service file, jejmx.jar, and je.jar to the JBoss
+deployment directory.
+
+JEMonitor expects another component in the JVM to configure and open
+the JE environment; it will only access a JE environment that is
+already active. It is intended for these use cases:
+
+-  The application wants to add database monitoring with minimal effort and
+   little knowledge of JMX. Configuring JEMonitor within the JMX container
+   provides monitoring without requiring application code changes. 
+
+-  An application already supports JMX and wants to add database monitoring
+   without modifying its existing MBean.  The user can configure JEMonitor in
+   the JMX container in conjunction with other application MBeans that are
+   non-overlapping with JE monitoring.  No application code changes are
+   required. 
+
+Users may want to incorporate JE management functionality into their
+own MBeans, expecially if their application configures and opens the
+JE environment. This can be done by using the utility class
+com.sleepycat.je.jmx.JEMBeanHelper and an example implementation,
+com.sleepycat.je.JEApplicationMBean which is provided in this
+directory. This MBean differs from JEMonitor by supporting environment
+configuration and creation from within the MBean. JEApplicationMBean
+may be deployed, or used as a starting point for an alternate
+implementation. To build the example,
+
+      1. cd <jehome>
+      2. modify <jehome>/build.properties and set j2ee.jarfile to an 
+         appropriate J2EE jar.
+      3. ant jmx-examples
+ 
+This creates a jejmx-example.jar in <jehome>/build/lib that can be
+copied to the appropriate deployment directory. See the 
+je-jboss-service.xml file for an example of how this might be done for JBoss.
diff --git a/examples/jmx/je-jboss-service.xml b/examples/jmx/je-jboss-service.xml
new file mode 100644
index 0000000000000000000000000000000000000000..a0de827592ffa9223bc8f0313651a04ab9b96008
--- /dev/null
+++ b/examples/jmx/je-jboss-service.xml
@@ -0,0 +1,45 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!-- $Id: je-jboss-service.xml,v 1.7 2006/01/24 02:12:35 linda Exp $ -->
+
+<!-- ===================================================================== -->
+<!--                                                                       -->
+<!--  JE JBoss MBean Service Configuration for                             -->
+<!--  com.sleepycat.je.jmx.JEMonitor and                                   -->
+<!--  je.JEApplicationMBean                         -->
+<!--                                                                       -->
+<!-- ===================================================================== -->
+
+<server>
+
+  <classpath codebase="." archives="je.jar,jejmx.jar"/>
+
+  <!-- This mbean can only access a JE environment which is already active -->
+  <mbean code="com.sleepycat.je.jmx.JEMonitor" 
+         name="user:service=JEMonitor">
+     <constructor> 
+         <!-- Environment directory -->
+     	 <arg type="java.lang.String" value="c:/tmp/je_store"/>
+     </constructor>
+  </mbean>
+
+  <!-- 
+
+       This mbean can configure and open a JE environment, as well as provide
+       monitoring support. To enable this mbean,
+           -create a jejmx-example.jar: cd <jehome>; ant jmx-examples
+           -add jejmx-example.jar to the classpath property 
+           -uncomment the section below.
+
+  <classpath codebase="." archives="je.jar,jejmx.jar,jejmx-example.jar"/>
+
+  <mbean code="jmx.JEApplicationMBean" 
+          name="user:service=JEApplicationMBean">
+     <constructor>  
+     	 <arg type="java.lang.String" value="c:/tmp/je_store2"/>
+     </constructor>
+  </mbean>
+
+  -->
+
+</server>
diff --git a/examples/persist/CustomKeyOrderExample.java b/examples/persist/CustomKeyOrderExample.java
new file mode 100644
index 0000000000000000000000000000000000000000..11b86508781216aa2986405f24f637fe2f585ead
--- /dev/null
+++ b/examples/persist/CustomKeyOrderExample.java
@@ -0,0 +1,123 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: CustomKeyOrderExample.java,v 1.6.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package persist;
+
+import java.io.File;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.persist.EntityCursor;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.KeyField;
+import com.sleepycat.persist.model.Persistent;
+import com.sleepycat.persist.model.PrimaryKey;
+
+public class CustomKeyOrderExample {
+
+    @Entity
+    static class Person {
+
+        @PrimaryKey
+        ReverseOrder name;
+
+        Person(String name) {
+            this.name = new ReverseOrder(name);
+        }
+
+        private Person() {} // For deserialization
+
+        @Override
+        public String toString() {
+            return name.value;
+        }
+    }
+
+    @Persistent
+    static class ReverseOrder implements Comparable<ReverseOrder> {
+
+        @KeyField(1)
+        String value;
+
+        ReverseOrder(String value) {
+            this.value = value;
+        }
+
+        private ReverseOrder() {} // For deserialization
+
+        public int compareTo(ReverseOrder o) {
+            return o.value.compareTo(value);
+        }
+    }
+
+    public static void main(String[] args)
+        throws DatabaseException {
+
+        if (args.length != 2 || !"-h".equals(args[0])) {
+            System.err.println
+                ("Usage: java " + CustomKeyOrderExample.class.getName() +
+                 " -h <envHome>");
+            System.exit(2);
+        }
+        CustomKeyOrderExample example =
+            new CustomKeyOrderExample(new File(args[1]));
+        example.run();
+        example.close();
+    }
+
+    private Environment env;
+    private EntityStore store;
+
+    private CustomKeyOrderExample(File envHome)
+        throws DatabaseException {
+
+        /* Open a transactional Berkeley DB engine environment. */
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setTransactional(true);
+        env = new Environment(envHome, envConfig);
+
+        /* Open a transactional entity store. */
+        StoreConfig storeConfig = new StoreConfig();
+        storeConfig.setAllowCreate(true);
+        storeConfig.setTransactional(true);
+        store = new EntityStore(env, "TestStore", storeConfig);
+    }
+
+    private void run()
+        throws DatabaseException {
+
+        PrimaryIndex<ReverseOrder,Person> index =
+            store.getPrimaryIndex(ReverseOrder.class, Person.class);
+
+        index.put(new Person("Andy"));
+        index.put(new Person("Lisa"));
+        index.put(new Person("Zola"));
+
+        /* Print the entities in key order. */
+        EntityCursor<Person> people = index.entities();
+        try {
+            for (Person person : people) {
+                System.out.println(person);
+            }
+        } finally {
+            people.close();
+        }
+    }
+
+    private void close()
+        throws DatabaseException {
+
+        store.close();
+        env.close();
+    }
+}
diff --git a/examples/persist/DplDump.java b/examples/persist/DplDump.java
new file mode 100644
index 0000000000000000000000000000000000000000..5bae72177fff293e79236ac597fbf6dab76df84b
--- /dev/null
+++ b/examples/persist/DplDump.java
@@ -0,0 +1,155 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DplDump.java,v 1.3.2.3 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package persist;
+
+import java.io.File;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.persist.EntityCursor;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.model.EntityMetadata;
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.raw.RawObject;
+import com.sleepycat.persist.raw.RawStore;
+import com.sleepycat.persist.raw.RawType;
+
+/**
+ * Dumps a store or all stores to standard output in raw XML format.  This
+ * sample is intended to be modifed to dump in application specific ways.
+ * @see #usage
+ */
+public class DplDump {
+
+    private File envHome;
+    private String storeName;
+    private boolean dumpMetadata;
+    private Environment env;
+
+    public static void main(String[] args) {
+        try {
+            DplDump dump = new DplDump(args);
+            dump.open();
+            dump.execute();
+            dump.close();
+        } catch (Throwable e) {
+            e.printStackTrace();
+            System.exit(1);
+        }
+    }
+
+    private DplDump(String[] args) {
+
+        for (int i = 0; i < args.length; i += 1) {
+            String name = args[i];
+            String val = null;
+            if (i < args.length - 1 && !args[i + 1].startsWith("-")) {
+                i += 1;
+                val = args[i];
+            }
+            if (name.equals("-h")) {
+                if (val == null) {
+                    usage("No value after -h");
+                }
+                envHome = new File(val);
+            } else if (name.equals("-s")) {
+                if (val == null) {
+                    usage("No value after -s");
+                }
+                storeName = val;
+            } else if (name.equals("-meta")) {
+                dumpMetadata = true;
+            } else {
+                usage("Unknown arg: " + name);
+            }
+        }
+
+        if (envHome == null) {
+            usage("-h not specified");
+        }
+    }
+
+    private void usage(String msg) {
+
+        if (msg != null) {
+            System.out.println(msg);
+        }
+
+        System.out.println
+            ("usage:" +
+             "\njava "  + DplDump.class.getName() +
+             "\n   -h <envHome>" +
+             "\n      # Environment home directory" +
+             "\n  [-meta]" +
+             "\n      # Dump metadata; default: false" +
+             "\n  [-s <storeName>]" +
+             "\n      # Store to dump; default: dump all stores");
+
+        System.exit(2);
+    }
+
+    private void open()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setReadOnly(true);
+        env = new Environment(envHome, envConfig);
+    }
+
+    private void close()
+        throws DatabaseException {
+
+        env.close();
+    }
+
+    private void execute()
+        throws DatabaseException {
+
+        if (storeName != null) {
+            dump();
+        } else {
+            for (String name : EntityStore.getStoreNames(env)) {
+                storeName = name;
+                dump();
+            }
+        }
+    }
+
+    private void dump()
+        throws DatabaseException {
+
+        StoreConfig storeConfig = new StoreConfig();
+        storeConfig.setReadOnly(true);
+        RawStore store = new RawStore(env, storeName, storeConfig);
+
+        EntityModel model = store.getModel();
+        if (dumpMetadata) {
+            for (RawType type : model.getAllRawTypes()) {
+                System.out.println(type);
+            }
+        } else {
+            for (String clsName : model.getKnownClasses()) {
+                if (model.getEntityMetadata(clsName) != null) {
+                    PrimaryIndex<Object,RawObject> index =
+                        store.getPrimaryIndex(clsName);
+                    EntityCursor<RawObject> entities = index.entities();
+                    for (RawObject entity : entities) {
+                        System.out.println(entity);
+                    }
+                    entities.close();
+                }
+            }
+        }
+
+        store.close();
+    }
+}
diff --git a/examples/persist/EventExample.java b/examples/persist/EventExample.java
new file mode 100644
index 0000000000000000000000000000000000000000..a720eb88c26e16b65b1c089910b6df0986e5f8fe
--- /dev/null
+++ b/examples/persist/EventExample.java
@@ -0,0 +1,416 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2004,2008 Oracle.  All rights reserved.
+ *
+ * $Id: EventExample.java,v 1.8 2008/05/30 14:04:14 mark Exp $
+ */
+
+package persist;
+
+import java.io.File;
+import java.io.Serializable;
+import java.util.Calendar;
+import java.util.Date;
+import java.util.HashSet;
+import java.util.Random;
+import java.util.Set;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.bind.tuple.LongBinding;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.SecondaryConfig;
+import com.sleepycat.je.SecondaryCursor;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.je.SecondaryKeyCreator;
+import com.sleepycat.je.Transaction;
+
+/**
+ * EventExample is a trivial example which stores Java objects that represent
+ * an event. Events are primarily indexed by a timestamp, but have other
+ * attributes, such as price, account reps, customer name and quantity.
+ * Some of those other attributes are indexed.
+ * <p>
+ * The example simply shows the creation of a JE environment and database,
+ * inserting some events, and retrieving the events.
+ * <p>
+ * This example is meant to be paired with its twin, EventExampleDPL.java.
+ * EventExample.java and EventExampleDPL.java perform the same functionality,
+ * but use the Base API and the Direct Persistence Layer API, respectively.
+ * This may be a useful way to compare the two APIs.
+ * <p>
+ * To run the example:
+ * <pre>
+ * cd jehome/examples
+ * javac je/EventExample.java
+ * java -cp "../lib/je.jar;." je.EventExample -h <environmentDirectory>
+ * </pre>
+ */
+public class EventExample {
+
+    /*
+     * The Event class embodies our example event and is the application
+     * data. JE data records are represented at key/data tuples. In this
+     * example, the key portion of the record is the event time, and the data
+     * portion is the Event instance.
+     */
+    @SuppressWarnings("serial")
+    static class Event implements Serializable {
+
+        /* This example will add secondary indices on price and accountReps. */
+        private int price;
+        private Set<String> accountReps;
+
+        private String customerName;
+        private int quantity;
+
+        Event(int price,
+              String customerName) {
+
+            this.price = price;
+            this.customerName = customerName;
+            this.accountReps = new HashSet<String>();
+        }
+
+        void addRep(String rep) {
+            accountReps.add(rep);
+        }
+
+        @Override
+        public String toString() {
+            StringBuilder sb = new StringBuilder();
+            sb.append(" price=").append(price);
+            sb.append(" customerName=").append(customerName);
+            sb.append(" reps=");
+            if (accountReps.size() == 0) {
+                sb.append("none");
+            } else {
+                for (String rep: accountReps) {
+                    sb.append(rep).append(" ");
+                }
+            }
+            return sb.toString();
+        }
+
+        int getPrice() {
+            return price;
+        }
+    }
+
+    /* A JE environment is roughly equivalent to a relational database. */
+    private Environment env;
+
+    /*
+     * A JE table is roughly equivalent to a relational table with a
+     * primary index.
+     */
+    private Database eventDb;
+
+    /* A secondary database indexes an additional field of the data record */
+    private SecondaryDatabase eventByPriceDb;
+
+    /*
+     * The catalogs and bindings are used to convert Java objects to the byte
+     * array format used by JE key/data in the base API. The Direct Persistence
+     * Layer API supports Java objects as arguments directly.
+     */
+    private Database catalogDb;
+    private EntryBinding eventBinding;
+
+    /* Used for generating example data. */
+    private Calendar cal;
+
+
+    /*
+     * First manually make a directory to house the JE environment.
+     * Usage: java -cp je.jar EventExample -h <envHome>
+     * All JE on-disk storage is held within envHome.
+     */
+    public static void main(String[] args)
+        throws DatabaseException {
+
+        if (args.length != 2 || !"-h".equals(args[0])) {
+            System.err.println
+                ("Usage: java " + EventExample.class.getName() +
+                 " -h <envHome>");
+            System.exit(2);
+        }
+        EventExample example = new EventExample(new File(args[1]));
+        example.run();
+        example.close();
+    }
+
+    private EventExample(File envHome)
+        throws DatabaseException {
+
+        /* Open a transactional Berkeley DB engine environment. */
+        System.out.println("-> Creating a JE environment");
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setTransactional(true);
+        env = new Environment(envHome, envConfig);
+
+        init();
+        cal = Calendar.getInstance();
+    }
+
+    /**
+     * Create all primary and secondary indices.
+     */
+    private void init()
+        throws DatabaseException {
+
+        System.out.println("-> Creating a JE database");
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        eventDb = env.openDatabase(null,      // use auto-commit txn
+                                     "eventDb", // database name
+                                     dbConfig);
+
+
+        /*
+         * In our example, the database record is composed of a key portion
+         * which represents the event timestamp, and a data portion holds an
+         * instance of the Event class.
+         *
+         * JE's base API accepts and returns key and data as byte arrays, so we
+         * need some support for marshaling between objects and byte arrays. We
+         * call this binding, and supply a package of helper classes to support
+         * this. It's entirely possible to do all binding on your own.
+         *
+         * A class catalog database is needed for storing class descriptions
+         * for the serial binding used below. This avoids storing class
+         * descriptions redundantly in each record.
+         */
+        DatabaseConfig catalogConfig = new DatabaseConfig();
+        catalogConfig.setTransactional(true);
+        catalogConfig.setAllowCreate(true);
+        catalogDb = env.openDatabase(null, "catalogDb", catalogConfig);
+        StoredClassCatalog catalog = new StoredClassCatalog(catalogDb);
+
+        /*
+         * Create a serial binding for Event data objects.  Serial
+         * bindings can be used to store any Serializable object.
+         * We can use some pre-defined binding classes to convert
+         * primitives like the long key value to the a byte array.
+         */
+        eventBinding = new SerialBinding(catalog, Event.class);
+
+        /*
+         * Open a secondary database to allow accessing the primary
+         * database a secondary key value. In this case, access events
+         * by price.
+         */
+        SecondaryConfig secConfig = new SecondaryConfig();
+        secConfig.setTransactional(true);
+        secConfig.setAllowCreate(true);
+        secConfig.setSortedDuplicates(true);
+        secConfig.setKeyCreator(new PriceKeyCreator(eventBinding));
+        eventByPriceDb = env.openSecondaryDatabase(null,
+                                                   "priceDb",
+                                                   eventDb,
+                                                   secConfig);
+
+    }
+
+    private void run()
+        throws DatabaseException {
+
+        Random rand = new Random();
+
+        /* DatabaseEntry represents the key and data of each record */
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        /*
+         * Create a set of events. Each insertion is a separate, auto-commit
+         * transaction.
+         */
+        System.out.println("-> Inserting 4 events");
+        LongBinding.longToEntry(makeDate(1), key);
+        eventBinding.objectToEntry(new Event(100, "Company_A"),
+                                   data);
+        eventDb.put(null, key, data);
+
+        LongBinding.longToEntry(makeDate(2), key);
+        eventBinding.objectToEntry(new Event(2, "Company_B"),
+                                   data);
+        eventDb.put(null, key, data);
+
+        LongBinding.longToEntry(makeDate(3), key);
+        eventBinding.objectToEntry(new Event(20, "Company_C"),
+                                   data);
+        eventDb.put(null, key, data);
+
+        LongBinding.longToEntry(makeDate(4), key);
+        eventBinding.objectToEntry(new Event(40, "CompanyD"),
+                                   data);
+        eventDb.put(null, key, data);
+
+        /* Load a whole set of events transactionally. */
+        Transaction txn = env.beginTransaction(null, null);
+        int maxPrice = 50;
+        System.out.println("-> Inserting some randomly generated events");
+        for (int i = 0; i < 25; i++) {
+            long time = makeDate(rand.nextInt(365));
+            Event e = new Event(rand.nextInt(maxPrice),"Company_X");
+            if ((i%2) ==0) {
+                e.addRep("Jane");
+                e.addRep("Nikunj");
+            } else {
+                e.addRep("Yongmin");
+            }
+            LongBinding.longToEntry(time, key);
+            eventBinding.objectToEntry(e, data);
+            eventDb.put(txn, key, data);
+        }
+        txn.commitWriteNoSync();
+
+        /*
+         * Windows of events - display the events between June 1 and Aug 31
+         */
+        System.out.println("\n-> Display the events between June 1 and Aug 31");
+        long endDate = makeDate(Calendar.AUGUST, 31);
+
+        /* Position the cursor and print the first event. */
+        Cursor eventWindow = eventDb.openCursor(null, null);
+        LongBinding.longToEntry(makeDate(Calendar.JUNE, 1), key);
+
+        if ((eventWindow.getSearchKeyRange(key,  data, null)) !=
+            OperationStatus.SUCCESS) {
+            System.out.println("No events found!");
+            eventWindow.close();
+            return;
+        }
+        try {
+            printEvents(key, data, eventWindow, endDate);
+        } finally {
+            eventWindow.close();
+        }
+
+        /*
+         * Display all events, ordered by a secondary index on price.
+         */
+        System.out.println("\n-> Display all events, ordered by price");
+        SecondaryCursor priceCursor =
+            eventByPriceDb.openSecondaryCursor(null, null);
+        try {
+            printEvents(priceCursor);
+        } finally {
+            priceCursor.close();
+        }
+    }
+
+    private void close()
+        throws DatabaseException {
+
+        eventByPriceDb.close();
+        eventDb.close();
+        catalogDb.close();
+        env.close();
+    }
+
+    /**
+     * Print all events covered by this cursor up to the end date.  We know
+     * that the cursor operates on long keys and Event data items, but there's
+     * no type-safe way of expressing that within the JE base API.
+     */
+    private void printEvents(DatabaseEntry firstKey,
+                             DatabaseEntry firstData,
+                             Cursor cursor,
+                             long endDate)
+        throws DatabaseException {
+
+        System.out.println("time=" +
+                           new Date(LongBinding.entryToLong(firstKey)) +
+                           eventBinding.entryToObject(firstData));
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        while (cursor.getNext(key, data, null) ==
+               OperationStatus.SUCCESS) {
+            if (LongBinding.entryToLong(key) > endDate) {
+                break;
+            }
+            System.out.println("time=" +
+                               new Date(LongBinding.entryToLong(key)) +
+                               eventBinding.entryToObject(data));
+        }
+    }
+
+    private void printEvents(SecondaryCursor cursor)
+        throws DatabaseException {
+        DatabaseEntry timeKey = new DatabaseEntry();
+        DatabaseEntry priceKey = new DatabaseEntry();
+        DatabaseEntry eventData = new DatabaseEntry();
+
+        while (cursor.getNext(priceKey, timeKey, eventData, null) ==
+               OperationStatus.SUCCESS) {
+            System.out.println("time=" +
+                               new Date(LongBinding.entryToLong(timeKey)) +
+                               eventBinding.entryToObject(eventData));
+        }
+    }
+
+    /**
+     * Little utility for making up java.util.Dates for different days, just
+     * to generate test data.
+     */
+    private long makeDate(int day) {
+
+        cal.set((Calendar.DAY_OF_YEAR), day);
+        return cal.getTime().getTime();
+    }
+    /**
+     * Little utility for making up java.util.Dates for different days, just
+     * to make the test data easier to read.
+     */
+    private long makeDate(int month, int day) {
+
+        cal.set((Calendar.MONTH), month);
+        cal.set((Calendar.DAY_OF_MONTH), day);
+        return cal.getTime().getTime();
+    }
+
+    /**
+     * A key creator that knows how to extract the secondary key from the data
+     * entry of the primary database.  To do so, it uses both the dataBinding
+     * of the primary database and the secKeyBinding.
+     */
+    private static class PriceKeyCreator implements SecondaryKeyCreator {
+
+        private EntryBinding dataBinding;
+
+        PriceKeyCreator(EntryBinding eventBinding) {
+            this.dataBinding = eventBinding;
+        }
+
+        public boolean createSecondaryKey(SecondaryDatabase secondaryDb,
+                                          DatabaseEntry keyEntry,
+                                          DatabaseEntry dataEntry,
+                                          DatabaseEntry resultEntry)
+            throws DatabaseException {
+
+            /*
+             * Convert the data entry to an Event object, extract the secondary
+             * key value from it, and then convert it to the resulting
+             * secondary key entry.
+             */
+            Event e  = (Event) dataBinding.entryToObject(dataEntry);
+            int price = e.getPrice();
+            IntegerBinding.intToEntry(price, resultEntry);
+            return true;
+        }
+    }
+}
diff --git a/examples/persist/EventExampleDPL.java b/examples/persist/EventExampleDPL.java
new file mode 100644
index 0000000000000000000000000000000000000000..66936164b38167577ebe7e6c14e3e6a9166cfa23
--- /dev/null
+++ b/examples/persist/EventExampleDPL.java
@@ -0,0 +1,270 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2004,2008 Oracle.  All rights reserved.
+ *
+ * $Id: EventExampleDPL.java,v 1.5 2008/02/05 23:28:17 mark Exp $
+ */
+
+package persist;
+
+import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE;
+
+import java.io.File;
+import java.util.Calendar;
+import java.util.Date;
+import java.util.HashSet;
+import java.util.Random;
+import java.util.Set;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.persist.EntityCursor;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.SecondaryIndex;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.SecondaryKey;
+
+/**
+ * EventExampleDPL is a trivial example which stores Java objects that
+ * represent an event. Events are primarily indexed by a timestamp, but have
+ * other attributes, such as price, account reps, customer name and
+ * quantity.  Some of those other attributes are indexed.
+ * <p>
+ * The example simply shows the creation of a BDB environment and database,
+ * inserting some events, and retrieving the events using the Direct
+ * Persistence layer.
+ * <p>
+ * This example is meant to be paired with its twin, EventExample.java.
+ * EventExample.java and EventExampleDPL.java perform the same functionality,
+ * but use the Base API and the Direct Persistence Layer API, respectively.
+ * This may be a useful way to compare the two APIs.
+ * <p>
+ * To run the example:
+ * <pre>
+ * javac EventExampleDPL.java
+ * java EventExampleDPL -h <environmentDirectory>
+ * </pre>
+ */
+public class EventExampleDPL {
+
+    /*
+     * The Event class embodies our example event and is the application
+     * data. The @Entity annotation indicates that this class defines the
+     * objects stored in a BDB database.
+     */
+    @Entity
+    static class Event {
+
+        @PrimaryKey
+        private Date time;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private int price;
+
+        private Set<String> accountReps;
+
+        private String customerName;
+        private int quantity;
+
+        Event(Date time,
+              int price,
+              String customerName) {
+
+            this.time = time;
+            this.price = price;
+            this.customerName = customerName;
+            this.accountReps = new HashSet<String>();
+        }
+
+        private Event() {} // For deserialization
+
+        void addRep(String rep) {
+            accountReps.add(rep);
+        }
+
+        @Override
+        public String toString() {
+            StringBuilder sb = new StringBuilder();
+            sb.append("time=").append(time);
+            sb.append(" price=").append(price);
+            sb.append(" customerName=").append(customerName);
+            sb.append(" reps=");
+            if (accountReps.size() == 0) {
+                sb.append("none");
+            } else {
+                for (String rep: accountReps) {
+                    sb.append(rep).append(" ");
+                }
+            }
+            return sb.toString();
+        }
+    }
+
+    /* A BDB environment is roughly equivalent to a relational database. */
+    private Environment env;
+    private EntityStore store;
+
+    /*
+     * Event accessors let us access events by the primary index (time)
+     * as well as by the rep and price fields
+     */
+    PrimaryIndex<Date,Event> eventByTime;
+    SecondaryIndex<Integer,Date,Event> eventByPrice;
+
+    /* Used for generating example data. */
+    private Calendar cal;
+
+    /*
+     * First manually make a directory to house the BDB environment.
+     * Usage: java EventExampleDPL -h <envHome>
+     * All BDB on-disk storage is held within envHome.
+     */
+    public static void main(String[] args)
+        throws DatabaseException {
+
+        if (args.length != 2 || !"-h".equals(args[0])) {
+            System.err.println
+                ("Usage: java " + EventExampleDPL.class.getName() +
+                 " -h <envHome>");
+            System.exit(2);
+        }
+        EventExampleDPL example = new EventExampleDPL(new File(args[1]));
+        example.run();
+        example.close();
+    }
+
+    private EventExampleDPL(File envHome)
+        throws DatabaseException {
+
+        /* Open a transactional Berkeley DB engine environment. */
+        System.out.println("-> Creating a BDB environment");
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setTransactional(true);
+        env = new Environment(envHome, envConfig);
+
+        /* Initialize the data access object. */
+        init();
+        cal = Calendar.getInstance();
+    }
+
+    /**
+     * Create all primary and secondary indices.
+     */
+    private void init()
+        throws DatabaseException {
+
+        /* Open a transactional entity store. */
+        System.out.println("-> Creating a BDB database");
+        StoreConfig storeConfig = new StoreConfig();
+        storeConfig.setAllowCreate(true);
+        storeConfig.setTransactional(true);
+        store = new EntityStore(env, "ExampleStore", storeConfig);
+
+        eventByTime = store.getPrimaryIndex(Date.class, Event.class);
+        eventByPrice = store.getSecondaryIndex(eventByTime,
+                                               Integer.class,
+                                               "price");
+    }
+
+    private void run()
+        throws DatabaseException {
+
+        Random rand = new Random();
+
+        /*
+         * Create a set of events. Each insertion is a separate, auto-commit
+         * transaction.
+         */
+        System.out.println("-> Inserting 4 events");
+        eventByTime.put(new Event(makeDate(1), 100, "Company_A"));
+        eventByTime.put(new Event(makeDate(2), 2, "Company_B"));
+        eventByTime.put(new Event(makeDate(3), 20, "Company_C"));
+        eventByTime.put(new Event(makeDate(4), 40, "CompanyD"));
+
+        /* Load a whole set of events transactionally. */
+        Transaction txn = env.beginTransaction(null, null);
+        int maxPrice = 50;
+        System.out.println("-> Inserting some randomly generated events");
+        for (int i = 0; i < 25; i++) {
+            Event e = new Event(makeDate(rand.nextInt(365)),
+                                rand.nextInt(maxPrice),
+                                "Company_X");
+            if ((i%2) ==0) {
+                e.addRep("Bob");
+                e.addRep("Nikunj");
+            } else {
+                e.addRep("Yongmin");
+            }
+            eventByTime.put(e);
+        }
+        txn.commitWriteNoSync();
+
+        /*
+         * Windows of events - display the events between June 1 and Aug 31
+         */
+        System.out.println("\n-> Display the events between June 1 and Aug 31");
+        Date startDate = makeDate(Calendar.JUNE, 1);
+        Date endDate = makeDate(Calendar.AUGUST, 31);
+
+        EntityCursor<Event> eventWindow =
+            eventByTime.entities(startDate, true, endDate, true);
+        printEvents(eventWindow);
+
+        /*
+         * Display all events, ordered by a secondary index on price.
+         */
+        System.out.println("\n-> Display all events, ordered by price");
+        EntityCursor<Event> byPriceEvents = eventByPrice.entities();
+        printEvents(byPriceEvents);
+    }
+
+    private void close()
+        throws DatabaseException {
+
+        store.close();
+        env.close();
+    }
+
+    /**
+     * Print all events covered by this cursor.
+     */
+    private void printEvents(EntityCursor<Event> eCursor)
+        throws DatabaseException {
+        try {
+            for (Event e: eCursor) {
+                System.out.println(e);
+            }
+        } finally {
+            /* Be sure to close the cursor. */
+            eCursor.close();
+        }
+    }
+
+    /**
+     * Little utility for making up java.util.Dates for different days, just
+     * to generate test data.
+     */
+    private Date makeDate(int day) {
+
+        cal.set((Calendar.DAY_OF_YEAR), day);
+        return cal.getTime();
+    }
+
+    /**
+     * Little utility for making up java.util.Dates for different days, just
+     * to make the test data easier to read.
+     */
+    private Date makeDate(int month, int day) {
+
+        cal.set((Calendar.MONTH), month);
+        cal.set((Calendar.DAY_OF_MONTH), day);
+        return cal.getTime();
+    }
+}
diff --git a/examples/persist/PersonExample.java b/examples/persist/PersonExample.java
new file mode 100644
index 0000000000000000000000000000000000000000..49e87a4b50dbdc8e8742d29fb1ea44ba3a82e10b
--- /dev/null
+++ b/examples/persist/PersonExample.java
@@ -0,0 +1,253 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PersonExample.java,v 1.12.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package persist;
+
+import java.io.File;
+import java.util.HashSet;
+import java.util.Set;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.persist.EntityCursor;
+import com.sleepycat.persist.EntityIndex;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.SecondaryIndex;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.Persistent;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.SecondaryKey;
+import static com.sleepycat.persist.model.DeleteAction.NULLIFY;
+import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE;
+import static com.sleepycat.persist.model.Relationship.ONE_TO_MANY;
+import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE;
+import static com.sleepycat.persist.model.Relationship.MANY_TO_MANY;
+
+public class PersonExample {
+
+    /* An entity class. */
+    @Entity
+    static class Person {
+
+        @PrimaryKey
+        String ssn;
+
+        String name;
+        Address address;
+
+        @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Person.class)
+        String parentSsn;
+
+        @SecondaryKey(relate=ONE_TO_MANY)
+        Set<String> emailAddresses = new HashSet<String>();
+
+        @SecondaryKey(relate=MANY_TO_MANY,
+                      relatedEntity=Employer.class,
+                      onRelatedEntityDelete=NULLIFY)
+        Set<Long> employerIds = new HashSet<Long>();
+
+        Person(String name, String ssn, String parentSsn) {
+            this.name = name;
+            this.ssn = ssn;
+            this.parentSsn = parentSsn;
+        }
+
+        private Person() {} // For deserialization
+    }
+
+    /* Another entity class. */
+    @Entity
+    static class Employer {
+
+        @PrimaryKey(sequence="ID")
+        long id;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        String name;
+
+        Address address;
+
+        Employer(String name) {
+            this.name = name;
+        }
+
+        private Employer() {} // For deserialization
+    }
+
+    /* A persistent class used in other classes. */
+    @Persistent
+    static class Address {
+        String street;
+        String city;
+        String state;
+        int zipCode;
+        private Address() {} // For deserialization
+    }
+
+    /* The data accessor class for the entity model. */
+    static class PersonAccessor {
+
+        /* Person accessors */
+        PrimaryIndex<String,Person> personBySsn;
+        SecondaryIndex<String,String,Person> personByParentSsn;
+        SecondaryIndex<String,String,Person> personByEmailAddresses;
+        SecondaryIndex<Long,String,Person> personByEmployerIds;
+
+        /* Employer accessors */
+        PrimaryIndex<Long,Employer> employerById;
+        SecondaryIndex<String,Long,Employer> employerByName;
+
+        /* Opens all primary and secondary indices. */
+        public PersonAccessor(EntityStore store)
+            throws DatabaseException {
+
+            personBySsn = store.getPrimaryIndex(
+                String.class, Person.class);
+
+            personByParentSsn = store.getSecondaryIndex(
+                personBySsn, String.class, "parentSsn");
+
+            personByEmailAddresses = store.getSecondaryIndex(
+                personBySsn, String.class, "emailAddresses");
+
+            personByEmployerIds = store.getSecondaryIndex(
+                personBySsn, Long.class, "employerIds");
+
+            employerById = store.getPrimaryIndex(
+                Long.class, Employer.class);
+
+            employerByName = store.getSecondaryIndex(
+                employerById, String.class, "name");
+        }
+    }
+
+    public static void main(String[] args)
+        throws DatabaseException {
+
+        if (args.length != 2 || !"-h".equals(args[0])) {
+            System.err.println
+                ("Usage: java " + PersonExample.class.getName() +
+                 " -h <envHome>");
+            System.exit(2);
+        }
+        PersonExample example = new PersonExample(new File(args[1]));
+        example.run();
+        example.close();
+    }
+
+    private Environment env;
+    private EntityStore store;
+    private PersonAccessor dao;
+
+    private PersonExample(File envHome)
+        throws DatabaseException {
+
+        /* Open a transactional Berkeley DB engine environment. */
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setTransactional(true);
+        env = new Environment(envHome, envConfig);
+
+        /* Open a transactional entity store. */
+        StoreConfig storeConfig = new StoreConfig();
+        storeConfig.setAllowCreate(true);
+        storeConfig.setTransactional(true);
+        store = new EntityStore(env, "PersonStore", storeConfig);
+
+        /* Initialize the data access object. */
+        dao = new PersonAccessor(store);
+    }
+
+    private void run()
+        throws DatabaseException {
+
+        /*
+         * Add a parent and two children using the Person primary index.
+         * Specifying a non-null parentSsn adds the child Person to the
+         * sub-index of children for that parent key.
+         */
+        dao.personBySsn.put
+            (new Person("Bob Smith", "111-11-1111", null));
+        dao.personBySsn.put
+            (new Person("Mary Smith", "333-33-3333", "111-11-1111"));
+        dao.personBySsn.put
+            (new Person("Jack Smith", "222-22-2222", "111-11-1111"));
+
+        /* Print the children of a parent using a sub-index and a cursor. */
+        EntityCursor<Person> children =
+            dao.personByParentSsn.subIndex("111-11-1111").entities();
+        try {
+            for (Person child : children) {
+                System.out.println(child.ssn + ' ' + child.name);
+            }
+        } finally {
+            children.close();
+        }
+
+        /* Get Bob by primary key using the primary index. */
+        Person bob = dao.personBySsn.get("111-11-1111");
+        assert bob != null;
+
+        /*
+         * Create two employers if they do not already exist.  Their primary
+         * keys are assigned from a sequence.
+         */
+        Employer gizmoInc = dao.employerByName.get("Gizmo Inc");
+        if (gizmoInc == null) {
+            gizmoInc = new Employer("Gizmo Inc");
+            dao.employerById.put(gizmoInc);
+        }
+        Employer gadgetInc = dao.employerByName.get("Gadget Inc");
+        if (gadgetInc == null) {
+            gadgetInc = new Employer("Gadget Inc");
+            dao.employerById.put(gadgetInc);
+        }
+
+        /* Bob has two jobs and two email addresses. */
+        bob.employerIds.add(gizmoInc.id);
+        bob.employerIds.add(gadgetInc.id);
+        bob.emailAddresses.add("bob@bob.com");
+        bob.emailAddresses.add("bob@gmail.com");
+
+        /* Update Bob's record. */
+        dao.personBySsn.put(bob);
+
+        /* Bob can now be found by both email addresses. */
+        bob = dao.personByEmailAddresses.get("bob@bob.com");
+        assert bob != null;
+        bob = dao.personByEmailAddresses.get("bob@gmail.com");
+        assert bob != null;
+
+        /* Bob can also be found as an employee of both employers. */
+        EntityIndex<String,Person> employees;
+        employees = dao.personByEmployerIds.subIndex(gizmoInc.id);
+        assert employees.contains("111-11-1111");
+        employees = dao.personByEmployerIds.subIndex(gadgetInc.id);
+        assert employees.contains("111-11-1111");
+
+        /*
+         * When an employer is deleted, the onRelatedEntityDelete=NULLIFY for
+         * the employerIds key causes the deleted ID to be removed from Bob's
+         * employerIds.
+         */
+        dao.employerById.delete(gizmoInc.id);
+        bob = dao.personBySsn.get("111-11-1111");
+        assert bob != null;
+        assert !bob.employerIds.contains(gizmoInc.id);
+    }
+
+    private void close()
+        throws DatabaseException {
+
+        store.close();
+        env.close();
+    }
+}
diff --git a/examples/persist/ScalaPersonExample.scala b/examples/persist/ScalaPersonExample.scala
new file mode 100644
index 0000000000000000000000000000000000000000..3a8d9d41ac23c728d0685df2b5e01b41cccb6503
--- /dev/null
+++ b/examples/persist/ScalaPersonExample.scala
@@ -0,0 +1,124 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ScalaPersonExample.scala,v 1.2.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+import java.io.File
+
+import com.sleepycat.je.{Environment, EnvironmentConfig}
+import com.sleepycat.persist.{EntityCursor,EntityStore,StoreConfig}
+import com.sleepycat.persist.model.{Entity,PrimaryKey,SecondaryKey}
+import com.sleepycat.persist.model.Relationship.ONE_TO_ONE
+
+/**
+ * Simple example of using Berkeley DB Java Edition (JE) with Scala.  The JE
+ * Direct Persistence Layer (DPL) is used in this example, which requires Java
+ * 1.5, so the scalac -target:jvm-1.5 option is required when compiling.  The
+ * -Ygenerics option must also be used because DPL generics are used in this
+ * example.
+ *
+ *  scalac -Ygenerics -target:jvm-1.5 -cp je-x.y.z.jar ScalaPersonExample.scala
+ *
+ * To run the example:
+ *
+ *  mkdir ./tmp
+ *  scala -cp ".;je-x.y.z.jar" ScalaPersonExample
+ *
+ * Note that classOf[java.lang.String] and classOf[java.lang.Long] are used
+ * rather than classOf[String] and classOf[Long].  The latter use the Scala
+ * types rather than the Java types and cause run-time errors.
+ *
+ * This example was tested with Scala 2.6.1-RC1 and JE 3.2.30.
+ *
+ * See:
+ *  http://www.scala-lang.org/
+ *  http://www.oracle.com/technology/products/berkeley-db/je
+ */
+object ScalaPersonExample extends Application {
+
+    /**
+     * A persistent Entity is defined using DPL annotations.
+     */
+    @Entity
+    class Person(nameParam: String, addressParam: String) {
+
+        @PrimaryKey{val sequence="ID"}
+        var id: long = 0
+
+        @SecondaryKey{val relate=ONE_TO_ONE}
+        var name: String = nameParam
+
+        var address: String = addressParam
+
+        private def this() = this(null, null) // default ctor for unmarshalling
+
+        override def toString = "Person: " + id + ' ' + name + ' ' + address
+    }
+
+    /* Open the JE Environment. */
+    val envConfig = new EnvironmentConfig()
+    envConfig.setAllowCreate(true)
+    envConfig.setTransactional(true)
+    val env = new Environment(new File("./tmp"), envConfig)
+
+    /* Open the DPL Store. */
+    val storeConfig = new StoreConfig()
+    storeConfig.setAllowCreate(true)
+    storeConfig.setTransactional(true)
+    val store = new EntityStore(env, "ScalaPersonExample", storeConfig)
+
+    /* The PrimaryIndex maps the Long primary key to Person. */
+    val priIndex =
+        store.getPrimaryIndex(classOf[java.lang.Long], classOf[Person])
+
+    /* The SecondaryIndex maps the String secondary key to Person. */
+    val secIndex =
+        store.getSecondaryIndex(priIndex, classOf[java.lang.String], "name")
+
+    /* Insert some entities if the primary index is empty. */
+    val txn = env.beginTransaction(null, null)
+    if (priIndex.get(txn, 1L, null) == null) {
+        val person1 = new Person("Zola", "#1 Zola Street")
+        val person2 = new Person("Abby", "#1 Abby Street")
+        priIndex.put(txn, person1)
+        priIndex.put(txn, person2)
+        assert(person1.id == 1) // assigned from the ID sequence
+        assert(person2.id == 2) // assigned from the ID sequence
+        txn.commit()
+        println("--- Entities were inserted ---")
+    } else {
+        txn.abort()
+        println("--- Entities already exist ---")
+    }
+
+    /* Get entities by primary and secondary keys. */
+    println("--- Get by primary key ---")
+    println(priIndex.get(1L))
+    println(priIndex.get(2L))
+    assert(priIndex.get(3L) == null)
+    println("--- Get by secondary key ---")
+    println(secIndex.get("Zola"))
+    println(secIndex.get("Abby"))
+    assert(secIndex.get("xxx") == null)
+
+    /* Iterate entities in primary and secondary key order. */
+    def printAll[T](cursor: EntityCursor[T]) {
+        val person = cursor.next()
+        if (person == null) {
+            cursor.close()
+        } else {
+            println(person)
+            printAll(cursor) // tail recursion
+        }
+    }
+    println("--- Iterate by primary key ---")
+    printAll(priIndex.entities())
+    println("--- Iterate by secondary key ---")
+    printAll(secIndex.entities())
+
+    store.close()
+    env.close()
+}
diff --git a/examples/persist/gettingStarted/DataAccessor.java b/examples/persist/gettingStarted/DataAccessor.java
new file mode 100644
index 0000000000000000000000000000000000000000..d9248126532cd5d529570323ed730abfa0c9d1bd
--- /dev/null
+++ b/examples/persist/gettingStarted/DataAccessor.java
@@ -0,0 +1,36 @@
+package persist.gettingStarted;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.SecondaryIndex;
+
+public class DataAccessor {
+    // Open the indices
+    public DataAccessor(EntityStore store)
+        throws DatabaseException {
+
+        // Primary key for Inventory classes
+        inventoryBySku = store.getPrimaryIndex(
+            String.class, Inventory.class);
+
+        // Secondary key for Inventory classes
+        // Last field in the getSecondaryIndex() method must be
+        // the name of a class member; in this case, an Inventory.class
+        // data member.
+        inventoryByName = store.getSecondaryIndex(
+            inventoryBySku, String.class, "itemName");
+
+        // Primary key for Vendor class
+        vendorByName = store.getPrimaryIndex(
+            String.class, Vendor.class);
+    }
+
+    // Inventory Accessors
+    PrimaryIndex<String,Inventory> inventoryBySku;
+    SecondaryIndex<String,String,Inventory> inventoryByName;
+
+    // Vendor Accessors
+    PrimaryIndex<String,Vendor> vendorByName;
+}
+
diff --git a/examples/persist/gettingStarted/ExampleDatabasePut.java b/examples/persist/gettingStarted/ExampleDatabasePut.java
new file mode 100644
index 0000000000000000000000000000000000000000..27852a25e26a4c05ab33a34ecdff8e9f1a12ee30
--- /dev/null
+++ b/examples/persist/gettingStarted/ExampleDatabasePut.java
@@ -0,0 +1,181 @@
+package persist.gettingStarted;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.util.ArrayList;
+import java.util.List;
+
+import com.sleepycat.je.DatabaseException;
+
+public class ExampleDatabasePut {
+
+    private static File myDbEnvPath = new File("/tmp/JEDB");
+    private static File inventoryFile = new File("./inventory.txt");
+    private static File vendorsFile = new File("./vendors.txt");
+
+    private DataAccessor da;
+    
+    // Encapsulates the environment and data store.
+    private static MyDbEnv myDbEnv = new MyDbEnv();
+    
+    private static void usage() {
+        System.out.println("ExampleDatabasePut [-h <env directory>]");
+        System.out.println("      [-i <inventory file>] [-v <vendors file>]");
+        System.exit(-1);
+    }
+    
+
+    public static void main(String args[]) {
+        ExampleDatabasePut edp = new ExampleDatabasePut();
+        try {
+            edp.run(args);
+        } catch (DatabaseException dbe) {
+            System.err.println("ExampleDatabasePut: " + dbe.toString());
+            dbe.printStackTrace();
+            dbe.printStackTrace();
+        } catch (Exception e) {
+            System.out.println("Exception: " + e.toString());
+            e.printStackTrace();
+        } finally {
+            myDbEnv.close();
+        }
+        System.out.println("All done.");
+    }
+    
+    
+    private void run(String args[]) 
+        throws DatabaseException {
+        // Parse the arguments list
+        parseArgs(args);
+
+        myDbEnv.setup(myDbEnvPath,  // Path to the environment home 
+                      false);       // Environment read-only?
+        
+        // Open the data accessor. This is used to store
+        // persistent objects.
+        da = new DataAccessor(myDbEnv.getEntityStore());
+       
+        System.out.println("loading vendors db....");
+        loadVendorsDb();
+        
+        System.out.println("loading inventory db....");
+        loadInventoryDb();
+    }
+    
+    private void loadVendorsDb() 
+            throws DatabaseException {
+        
+        // loadFile opens a flat-text file that contains our data
+        // and loads it into a list for us to work with. The integer
+        // parameter represents the number of fields expected in the
+        // file.
+        List vendors = loadFile(vendorsFile, 8);
+        
+        // Now load the data into the store.
+        for (int i = 0; i < vendors.size(); i++) {
+            String[] sArray = (String[])vendors.get(i);
+            Vendor theVendor = new Vendor();
+            theVendor.setVendorName(sArray[0]);
+            theVendor.setAddress(sArray[1]);
+            theVendor.setCity(sArray[2]);
+            theVendor.setState(sArray[3]);
+            theVendor.setZipcode(sArray[4]);
+            theVendor.setBusinessPhoneNumber(sArray[5]);
+            theVendor.setRepName(sArray[6]);
+            theVendor.setRepPhoneNumber(sArray[7]);
+            
+            // Put it in the store. Because we do not explicitly set
+            // a transaction here, and because the store was opened
+            // with transactional support, auto commit is used for each
+            // write to the store.
+            da.vendorByName.put(theVendor);
+        }
+    }
+    
+    private void loadInventoryDb() 
+        throws DatabaseException {
+        
+        // loadFile opens a flat-text file that contains our data
+        // and loads it into a list for us to work with. The integer
+        // parameter represents the number of fields expected in the
+        // file.
+        List inventoryArray = loadFile(inventoryFile, 6);
+        
+        // Now load the data into the store. The item's sku is the
+        // key, and the data is an Inventory class object.
+        
+        for (int i = 0; i < inventoryArray.size(); i++) {
+            String[] sArray = (String[])inventoryArray.get(i);
+            String sku = sArray[1];
+            
+            Inventory theInventory = new Inventory();
+            theInventory.setItemName(sArray[0]);
+            theInventory.setSku(sArray[1]);
+            theInventory.setVendorPrice((new Float(sArray[2])).floatValue());
+            theInventory.setVendorInventory((new Integer(sArray[3])).intValue());
+            theInventory.setCategory(sArray[4]);
+            theInventory.setVendor(sArray[5]);
+
+            // Put it in the store. Note that this causes our secondary key
+            // to be automatically updated for us.
+            da.inventoryBySku.put(theInventory);
+        }
+    }
+        
+
+    private static void parseArgs(String args[]) {
+        for(int i = 0; i < args.length; ++i) {
+            if (args[i].startsWith("-")) {
+                switch(args[i].charAt(1)) {
+                  case 'h':
+                    myDbEnvPath = new File(args[++i]);
+                    break;
+                  case 'i':
+                    inventoryFile = new File(args[++i]);
+                    break;
+                  case 'v':
+                    vendorsFile = new File(args[++i]);
+                    break;
+                  default:
+                    usage();
+                }
+            }
+        }
+    }
+
+    private List loadFile(File theFile, int numFields) {
+        List<String[]> records = new ArrayList<String[]>();
+        try {
+            String theLine = null;
+            FileInputStream fis = new FileInputStream(theFile);
+            BufferedReader br = new BufferedReader(new InputStreamReader(fis));
+            while((theLine=br.readLine()) != null) {
+                String[] theLineArray = theLine.split("#");
+                if (theLineArray.length != numFields) {
+                    System.out.println("Malformed line found in " + theFile.getPath());
+                    System.out.println("Line was: '" + theLine);
+                    System.out.println("length found was: " + theLineArray.length);
+                    System.exit(-1);
+                }
+                records.add(theLineArray);
+            }
+            // Close the input stream handle
+            fis.close();
+        } catch (FileNotFoundException e) {
+            System.err.println(theFile.getPath() + " does not exist.");
+            e.printStackTrace();
+            usage();
+        } catch (IOException e)  {
+            System.err.println("IO Exception: " + e.toString());
+            e.printStackTrace();
+            System.exit(-1);
+        }
+        return records;
+    }
+
+    protected ExampleDatabasePut() {}
+}
diff --git a/examples/persist/gettingStarted/ExampleInventoryRead.java b/examples/persist/gettingStarted/ExampleInventoryRead.java
new file mode 100644
index 0000000000000000000000000000000000000000..dbc64ea9b8d77d697a68da27352f645818725c81
--- /dev/null
+++ b/examples/persist/gettingStarted/ExampleInventoryRead.java
@@ -0,0 +1,143 @@
+package persist.gettingStarted;
+
+import java.io.File;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.persist.EntityCursor;
+
+public class ExampleInventoryRead {
+
+    private static File myDbEnvPath =
+        new File("/tmp/JEDB");
+
+    private DataAccessor da;
+
+    // Encapsulates the database environment.
+    private static MyDbEnv myDbEnv = new MyDbEnv();
+
+    // The item to locate if the -s switch is used
+    private static String locateItem;
+
+    private static void usage() {
+        System.out.println("ExampleInventoryRead [-h <env directory>]" +
+                           "[-s <item to locate>]");
+        System.exit(-1);
+    }
+
+    public static void main(String args[]) {
+        ExampleInventoryRead eir = new ExampleInventoryRead();
+        try {
+            eir.run(args);
+        } catch (DatabaseException dbe) {
+            System.err.println("ExampleInventoryRead: " + dbe.toString());
+            dbe.printStackTrace();
+        } finally {
+            myDbEnv.close();
+        }
+        System.out.println("All done.");
+    }
+
+    private void run(String args[])
+        throws DatabaseException {
+        // Parse the arguments list
+        parseArgs(args);
+
+        myDbEnv.setup(myDbEnvPath, // path to the environment home
+                      true);       // is this environment read-only?
+
+        // Open the data accessor. This is used to retrieve
+        // persistent objects.
+        da = new DataAccessor(myDbEnv.getEntityStore());
+
+        // If a item to locate is provided on the command line,
+        // show just the inventory items using the provided name.
+        // Otherwise, show everything in the inventory.
+        if (locateItem != null) {
+            showItem();
+        } else {
+            showAllInventory();
+        }
+    }
+
+    // Shows all the inventory items that exist for a given
+    // inventory name.
+    private void showItem() throws DatabaseException {
+
+        // Use the inventory name secondary key to retrieve
+        // these objects.
+        EntityCursor<Inventory> items =
+            da.inventoryByName.subIndex(locateItem).entities();
+        try {
+            for (Inventory item : items) {
+                displayInventoryRecord(item);
+            }
+        } finally {
+            items.close();
+        }
+    }
+
+    // Displays all the inventory items in the store
+    private void showAllInventory()
+        throws DatabaseException {
+
+        // Get a cursor that will walk every
+        // inventory object in the store.
+        EntityCursor<Inventory> items =
+            da.inventoryBySku.entities();
+
+        try {
+            for (Inventory item : items) {
+                displayInventoryRecord(item);
+            }
+        } finally {
+            items.close();
+        }
+    }
+
+    private void displayInventoryRecord(Inventory theInventory)
+            throws DatabaseException {
+
+            System.out.println(theInventory.getSku() + ":");
+            System.out.println("\t " + theInventory.getItemName());
+            System.out.println("\t " + theInventory.getCategory());
+            System.out.println("\t " + theInventory.getVendor());
+            System.out.println("\t\tNumber in stock: " +
+                theInventory.getVendorInventory());
+            System.out.println("\t\tPrice per unit:  " +
+                theInventory.getVendorPrice());
+            System.out.println("\t\tContact: ");
+
+            Vendor theVendor =
+                    da.vendorByName.get(theInventory.getVendor());
+            assert theVendor != null;
+
+            System.out.println("\t\t " + theVendor.getAddress());
+            System.out.println("\t\t " + theVendor.getCity() + ", " +
+                theVendor.getState() + " " + theVendor.getZipcode());
+            System.out.println("\t\t Business Phone: " +
+                theVendor.getBusinessPhoneNumber());
+            System.out.println("\t\t Sales Rep: " +
+                                theVendor.getRepName());
+            System.out.println("\t\t            " +
+                theVendor.getRepPhoneNumber());
+    }
+
+    protected ExampleInventoryRead() {}
+
+    private static void parseArgs(String args[]) {
+        for(int i = 0; i < args.length; ++i) {
+            if (args[i].startsWith("-")) {
+                switch(args[i].charAt(1)) {
+                    case 'h':
+                        myDbEnvPath = new File(args[++i]);
+                        break;
+                    case 's':
+                        locateItem = args[++i];
+                        break;
+                    default:
+                        usage();
+                }
+            }
+        }
+    }
+}
diff --git a/examples/persist/gettingStarted/Inventory.java b/examples/persist/gettingStarted/Inventory.java
new file mode 100644
index 0000000000000000000000000000000000000000..b0a4cfbf2e01b2b19461f175f1270cf0e15c2f9e
--- /dev/null
+++ b/examples/persist/gettingStarted/Inventory.java
@@ -0,0 +1,72 @@
+package persist.gettingStarted;
+
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.PrimaryKey;
+import static com.sleepycat.persist.model.Relationship.*;
+import com.sleepycat.persist.model.SecondaryKey;
+
+@Entity
+public class Inventory {
+
+    // Primary key is sku
+    @PrimaryKey
+    private String sku;
+
+    @SecondaryKey(relate=MANY_TO_ONE)
+    private String itemName;
+
+    private String category;
+    private String vendor;
+    private int vendorInventory;
+    private float vendorPrice;
+
+    public void setSku(String data) {
+        sku = data;
+    }
+
+    public void setItemName(String data) {
+        itemName = data;
+    }
+
+    public void setCategory(String data) {
+        category = data;
+    }
+
+    public void setVendorInventory(int data) {
+        vendorInventory = data;
+    }
+
+    public void setVendor(String data) {
+        vendor = data;
+    }
+
+    public void setVendorPrice(float data) {
+        vendorPrice = data;
+    }
+
+    public String getSku() {
+        return sku;
+    }
+
+    public String getItemName() {
+        return itemName;
+    }
+
+    public String getCategory() {
+        return category;
+    }
+
+    public int getVendorInventory() {
+        return vendorInventory;
+    }
+
+    public String getVendor() {
+        return vendor;
+    }
+
+    public float getVendorPrice() {
+        return vendorPrice;
+    }
+
+}
+
diff --git a/examples/persist/gettingStarted/MyDbEnv.java b/examples/persist/gettingStarted/MyDbEnv.java
new file mode 100644
index 0000000000000000000000000000000000000000..4d2ca719e0a53f04c852f472a9a784d024f40379
--- /dev/null
+++ b/examples/persist/gettingStarted/MyDbEnv.java
@@ -0,0 +1,78 @@
+package persist.gettingStarted;
+
+import java.io.File;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.StoreConfig;
+
+public class MyDbEnv {
+
+    private Environment myEnv;
+    private EntityStore store;
+
+    // Our constructor does nothing
+    public MyDbEnv() {}
+
+    // The setup() method opens the environment and store
+    // for us.
+    public void setup(File envHome, boolean readOnly) 
+        throws DatabaseException {
+
+        EnvironmentConfig myEnvConfig = new EnvironmentConfig();
+        StoreConfig storeConfig = new StoreConfig();
+
+        myEnvConfig.setReadOnly(readOnly);
+        storeConfig.setReadOnly(readOnly);
+
+        // If the environment is opened for write, then we want to be 
+        // able to create the environment and entity store if 
+        // they do not exist.
+        myEnvConfig.setAllowCreate(!readOnly);
+        storeConfig.setAllowCreate(!readOnly);
+
+        // Open the environment and entity store
+        myEnv = new Environment(envHome, myEnvConfig);
+        store = new EntityStore(myEnv, "EntityStore", storeConfig);
+
+    }
+
+    // Return a handle to the entity store
+    public EntityStore getEntityStore() {
+        return store;
+    }
+
+    // Return a handle to the environment
+    public Environment getEnv() {
+        return myEnv;
+    }
+
+
+    // Close the store and environment
+    public void close() {
+        if (store != null) {
+            try {
+                store.close();
+            } catch(DatabaseException dbe) {
+                System.err.println("Error closing store: " + 
+                                    dbe.toString());
+               System.exit(-1);
+            }
+        }
+
+        if (myEnv != null) {
+            try {
+                // Finally, close the store and environment.
+                myEnv.close();
+            } catch(DatabaseException dbe) {
+                System.err.println("Error closing MyDbEnv: " + 
+                                    dbe.toString());
+               System.exit(-1);
+            }
+        }
+    }
+}
+
diff --git a/examples/persist/gettingStarted/Vendor.java b/examples/persist/gettingStarted/Vendor.java
new file mode 100644
index 0000000000000000000000000000000000000000..2dd02c45765dc1048edcc89b46bd1d9592db03b7
--- /dev/null
+++ b/examples/persist/gettingStarted/Vendor.java
@@ -0,0 +1,88 @@
+package persist.gettingStarted;
+
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.PrimaryKey;
+
+@Entity
+public class Vendor {
+
+    private String repName;
+    private String address;
+    private String city;
+    private String state;
+    private String zipcode;
+    private String bizPhoneNumber;
+    private String repPhoneNumber;
+
+    // Primary key is the vendor's name
+    // This assumes that the vendor's name is
+    // unique in the database.
+    @PrimaryKey
+    private String vendor;
+
+    public void setRepName(String data) {
+        repName = data;
+    }
+
+    public void setAddress(String data) {
+        address = data;
+    }
+
+    public void setCity(String data) {
+        city = data;
+    }
+
+    public void setState(String data) {
+        state = data;
+    }
+
+    public void setZipcode(String data) {
+        zipcode = data;
+    }
+
+    public void setBusinessPhoneNumber(String data) {
+        bizPhoneNumber = data;
+    }
+
+    public void setRepPhoneNumber(String data) {
+        repPhoneNumber = data;
+    }
+
+    public void setVendorName(String data) {
+        vendor = data;
+    }
+
+    public String getRepName() {
+        return repName;
+    }
+
+    public String getAddress() {
+        return address;
+    }
+
+    public String getCity() {
+        return city;
+    }
+
+    public String getState() {
+        return state;
+    }
+
+    public String getZipcode() {
+        return zipcode;
+    }
+
+    public String getBusinessPhoneNumber() {
+        return bizPhoneNumber;
+    }
+
+    public String getRepPhoneNumber() {
+        return repPhoneNumber;
+    }
+
+    public String getVendorName() {
+        return vendor;
+    }
+
+}
+
diff --git a/examples/persist/gettingStarted/inventory.txt b/examples/persist/gettingStarted/inventory.txt
new file mode 100644
index 0000000000000000000000000000000000000000..385c980051e8b6c0f0fac35b721b06281becafcd
--- /dev/null
+++ b/examples/persist/gettingStarted/inventory.txt
@@ -0,0 +1,800 @@
+Oranges#OranfruiRu6Ghr#0.71#451#fruits#TriCounty Produce
+Oranges#OranfruiXRPFn1#0.73#263#fruits#Simply Fresh
+Oranges#OranfruiLEuzQj#0.69#261#fruits#Off the Vine
+Apples#ApplfruiZls4Du#1.20#472#fruits#TriCounty Produce
+Apples#Applfrui8fewZe#1.21#402#fruits#Simply Fresh
+Apples#ApplfruiXoT6xG#1.20#728#fruits#Off the Vine
+Bananas#BanafruipIlluX#0.50#207#fruits#TriCounty Produce
+Bananas#BanafruiEQhWuj#0.50#518#fruits#Simply Fresh
+Bananas#BanafruimpRgPO#0.50#741#fruits#Off the Vine
+Almonds#AlmofruiPPCLz8#0.55#600#fruits#TriCounty Produce
+Almonds#AlmofruidMyKmp#0.54#745#fruits#Simply Fresh
+Almonds#Almofrui7K0xzH#0.53#405#fruits#Off the Vine
+Allspice#AllsfruibJGK4R#0.94#669#fruits#TriCounty Produce
+Allspice#Allsfruilfvoeg#0.94#244#fruits#Simply Fresh
+Allspice#Allsfruio12BOS#0.95#739#fruits#Off the Vine
+Apricot#AprifruijphEpM#0.89#560#fruits#TriCounty Produce
+Apricot#AprifruiU1zIDn#0.91#980#fruits#Simply Fresh
+Apricot#AprifruichcwYS#0.95#668#fruits#Off the Vine
+Avocado#AvocfruiwYYomu#0.99#379#fruits#TriCounty Produce
+Avocado#AvocfruiT6IwWE#1.02#711#fruits#Simply Fresh
+Avocado#AvocfruisbK1h5#0.97#856#fruits#Off the Vine
+Bael Fruit#BaelfruilAU7Hj#0.41#833#fruits#TriCounty Produce
+Bael Fruit#BaelfruiX2KvqV#0.40#770#fruits#Simply Fresh
+Bael Fruit#Baelfruidjne4e#0.39#778#fruits#Off the Vine
+Betel Nut#BetefruiQYdHqQ#0.34#926#fruits#TriCounty Produce
+Betel Nut#Betefrui32BKAz#0.37#523#fruits#Simply Fresh
+Betel Nut#BetefruisaWzY4#0.34#510#fruits#Off the Vine
+Black Walnut#BlacfruiXxIuMU#0.57#923#fruits#TriCounty Produce
+Black Walnut#BlacfruiZXgY9t#0.59#312#fruits#Simply Fresh
+Black Walnut#BlacfruikWO0vz#0.60#877#fruits#Off the Vine
+Blueberry#BluefruiCbxb4t#1.02#276#fruits#TriCounty Produce
+Blueberry#BluefruiBuCfgO#1.03#522#fruits#Simply Fresh
+Blueberry#Bluefruixz8MkE#1.01#278#fruits#Off the Vine
+Boysenberry#BoysfruizxyMuz#1.05#239#fruits#TriCounty Produce
+Boysenberry#Boysfrui3hTRQu#1.09#628#fruits#Simply Fresh
+Boysenberry#BoysfruinpLvr3#1.02#349#fruits#Off the Vine
+Breadnut#Breafrui0kDPs6#0.31#558#fruits#TriCounty Produce
+Breadnut#Breafrui44s3og#0.32#879#fruits#Simply Fresh
+Breadnut#BreafruiwyLKhJ#0.30#407#fruits#Off the Vine
+Cactus#Cactfruiyo2ddH#0.56#601#fruits#TriCounty Produce
+Cactus#CactfruixTOLv5#0.54#477#fruits#Simply Fresh
+Cactus#Cactfrui4ioUav#0.55#896#fruits#Off the Vine
+California Wild Grape#CalifruiZsWAa6#0.78#693#fruits#TriCounty Produce
+California Wild Grape#Califruid84xyt#0.83#293#fruits#Simply Fresh
+California Wild Grape#CalifruiLSJFoJ#0.81#543#fruits#Off the Vine
+Cashew#CashfruihaOFVP#0.37#221#fruits#TriCounty Produce
+Cashew#Cashfruizzcw1E#0.38#825#fruits#Simply Fresh
+Cashew#CashfruiqtMe2Q#0.38#515#fruits#Off the Vine
+Chico Sapote#ChicfruiY534SX#0.47#216#fruits#TriCounty Produce
+Chico Sapote#ChicfruiSqL3Lc#0.45#476#fruits#Simply Fresh
+Chico Sapote#ChicfruiurzIp4#0.47#200#fruits#Off the Vine
+Chinese Jello#ChinfruiyRg75u#0.64#772#fruits#TriCounty Produce
+Chinese Jello#ChinfruiuIUj0X#0.65#624#fruits#Simply Fresh
+Chinese Jello#ChinfruiwXbRrL#0.67#719#fruits#Off the Vine
+Common Guava#Commfruib6znSI#0.80#483#fruits#TriCounty Produce
+Common Guava#Commfrui6eUivL#0.81#688#fruits#Simply Fresh
+Common Guava#CommfruibWKnz3#0.84#581#fruits#Off the Vine
+Crabapple#CrabfruioY2L63#0.94#582#fruits#TriCounty Produce
+Crabapple#Crabfruijxcxyt#0.94#278#fruits#Simply Fresh
+Crabapple#CrabfruibvWd8K#0.95#213#fruits#Off the Vine
+Cranberry#CranfruiJxmKr5#0.83#923#fruits#TriCounty Produce
+Cranberry#CranfruiPlklAF#0.84#434#fruits#Simply Fresh
+Cranberry#Cranfrui3G5XL9#0.84#880#fruits#Off the Vine
+Damson Plum#DamsfruibMRMwe#0.98#782#fruits#TriCounty Produce
+Damson Plum#DamsfruiV6wFLk#1.03#400#fruits#Simply Fresh
+Damson Plum#DamsfruiLhqFrQ#0.98#489#fruits#Off the Vine
+Date Palm#DatefruigS31GU#1.14#315#fruits#TriCounty Produce
+Date Palm#DatefruipKPaJK#1.09#588#fruits#Simply Fresh
+Date Palm#Datefrui5fTyNS#1.14#539#fruits#Off the Vine
+Dragon's Eye#DragfruirGJ3aI#0.28#315#fruits#TriCounty Produce
+Dragon's Eye#DragfruiBotxqt#0.27#705#fruits#Simply Fresh
+Dragon's Eye#DragfruiPsSnV9#0.29#482#fruits#Off the Vine
+East Indian Wine Palm#EastfruiNXFJuG#0.43#992#fruits#TriCounty Produce
+East Indian Wine Palm#Eastfruiq06fRr#0.40#990#fruits#Simply Fresh
+East Indian Wine Palm#Eastfrui4QUwl2#0.43#351#fruits#Off the Vine
+English Walnut#EnglfruiBMtHtW#1.04#787#fruits#TriCounty Produce
+English Walnut#EnglfruiHmVzxV#1.03#779#fruits#Simply Fresh
+English Walnut#Englfrui18Tc9n#1.06#339#fruits#Off the Vine
+False Mangosteen#FalsfruibkmYqH#0.66#971#fruits#TriCounty Produce
+False Mangosteen#FalsfruipBsbcX#0.68#250#fruits#Simply Fresh
+False Mangosteen#FalsfruiPrFfhe#0.70#386#fruits#Off the Vine
+Fried Egg Tree#FriefruiihHUdc#0.29#649#fruits#TriCounty Produce
+Fried Egg Tree#FriefruimdD1rf#0.28#527#fruits#Simply Fresh
+Fried Egg Tree#FriefruivyAzYq#0.29#332#fruits#Off the Vine
+Genipap#GenifruiDtKusQ#0.62#986#fruits#TriCounty Produce
+Genipap#GenifruiXq32eP#0.61#326#fruits#Simply Fresh
+Genipap#Genifruiphwwyq#0.61#794#fruits#Off the Vine
+Ginger#GingfruiQLbRZI#0.28#841#fruits#TriCounty Produce
+Ginger#GingfruiS8kK4p#0.29#432#fruits#Simply Fresh
+Ginger#GingfruioL3Y4S#0.27#928#fruits#Off the Vine
+Grapefruit#Grapfruih86Zxh#1.07#473#fruits#TriCounty Produce
+Grapefruit#GrapfruiwL1v0N#1.08#878#fruits#Simply Fresh
+Grapefruit#GrapfruihmJzWm#1.02#466#fruits#Off the Vine
+Hackberry#HackfruiQjomN7#0.22#938#fruits#TriCounty Produce
+Hackberry#HackfruiWS0eKp#0.20#780#fruits#Simply Fresh
+Hackberry#Hackfrui0MIv6J#0.21#345#fruits#Off the Vine
+Honey Locust#HonefruiebXGRc#1.08#298#fruits#TriCounty Produce
+Honey Locust#HonefruiPSqILB#1.00#427#fruits#Simply Fresh
+Honey Locust#Honefrui6UXtvW#1.03#422#fruits#Off the Vine
+Japanese Plum#JapafruihTmoYR#0.40#658#fruits#TriCounty Produce
+Japanese Plum#JapafruifGqz0l#0.40#700#fruits#Simply Fresh
+Japanese Plum#JapafruiufWkLx#0.39#790#fruits#Off the Vine
+Jojoba#JojofruisE0wTh#0.97#553#fruits#TriCounty Produce
+Jojoba#JojofruiwiYLp2#1.02#969#fruits#Simply Fresh
+Jojoba#JojofruigMD1ej#0.96#899#fruits#Off the Vine
+Jostaberry#JostfruiglsEGV#0.50#300#fruits#TriCounty Produce
+Jostaberry#JostfruiV3oo1h#0.52#423#fruits#Simply Fresh
+Jostaberry#JostfruiUBerur#0.53#562#fruits#Off the Vine
+Kangaroo Apple#KangfruiEQknz8#0.60#661#fruits#TriCounty Produce
+Kangaroo Apple#KangfruiNabdFq#0.60#377#fruits#Simply Fresh
+Kangaroo Apple#Kangfrui7hky1i#0.60#326#fruits#Off the Vine
+Ken's Red#Ken'fruinPUSIm#0.21#337#fruits#TriCounty Produce
+Ken's Red#Ken'fruiAoZlpl#0.21#902#fruits#Simply Fresh
+Ken's Red#Ken'frui5rmbd4#0.22#972#fruits#Off the Vine
+Ketembilla#Ketefrui3yAKxQ#0.31#303#fruits#TriCounty Produce
+Ketembilla#KetefruiROn6F5#0.34#283#fruits#Simply Fresh
+Ketembilla#Ketefrui16Rsts#0.33#887#fruits#Off the Vine
+King Orange#KingfruisOFzWk#0.74#429#fruits#TriCounty Produce
+King Orange#KingfruiBmzRJT#0.74#500#fruits#Simply Fresh
+King Orange#KingfruiGsrgRX#0.78#994#fruits#Off the Vine
+Kola Nut#KolafruiBbtAuw#0.58#991#fruits#TriCounty Produce
+Kola Nut#KolafruirbnLVS#0.62#733#fruits#Simply Fresh
+Kola Nut#Kolafrui1ItXJx#0.58#273#fruits#Off the Vine
+Kuko#Kukofrui6YH5Ds#0.41#647#fruits#TriCounty Produce
+Kuko#Kukofrui7WZaZK#0.39#241#fruits#Simply Fresh
+Kuko#Kukofruig9MQFT#0.40#204#fruits#Off the Vine
+Kumquat#KumqfruiT6WKQL#0.73#388#fruits#TriCounty Produce
+Kumquat#KumqfruidLiFLU#0.70#393#fruits#Simply Fresh
+Kumquat#KumqfruiL6zhQX#0.71#994#fruits#Off the Vine
+Kwai Muk#KwaifruiQK1zOE#1.10#249#fruits#TriCounty Produce
+Kwai Muk#KwaifruifbCRlT#1.14#657#fruits#Simply Fresh
+Kwai Muk#Kwaifruipe7T2m#1.09#617#fruits#Off the Vine
+Lanzone#LanzfruijsPf1v#0.34#835#fruits#TriCounty Produce
+Lanzone#LanzfruibU3QoL#0.34#404#fruits#Simply Fresh
+Lanzone#LanzfruiYgHwv6#0.34#237#fruits#Off the Vine
+Lemon#Lemofrui4Tgsg2#0.46#843#fruits#TriCounty Produce
+Lemon#LemofruivK6qvj#0.43#207#fruits#Simply Fresh
+Lemon#LemofruiXSXqJ0#0.44#910#fruits#Off the Vine
+Lemon Grass#LemofruiVFgVh5#0.40#575#fruits#TriCounty Produce
+Lemon Grass#LemofruiWIelvi#0.41#386#fruits#Simply Fresh
+Lemon Grass#LemofruiGVAow0#0.39#918#fruits#Off the Vine
+Lilly-pilly#LillfruiEQnW1m#1.21#974#fruits#TriCounty Produce
+Lilly-pilly#LillfruiMqVuR5#1.23#303#fruits#Simply Fresh
+Lilly-pilly#LillfruiVGH9p4#1.17#512#fruits#Off the Vine
+Ling Nut#LingfruiGtOf8X#0.85#540#fruits#TriCounty Produce
+Ling Nut#LingfruiuP0Jf9#0.83#200#fruits#Simply Fresh
+Ling Nut#LingfruiuO5qf5#0.81#319#fruits#Off the Vine
+Lipote#LipofruisxD2Qc#0.85#249#fruits#TriCounty Produce
+Lipote#LipofruiHNdIqL#0.85#579#fruits#Simply Fresh
+Lipote#LipofruiSQ2pKK#0.83#472#fruits#Off the Vine
+Litchee#Litcfrui1R6Ydz#0.99#806#fruits#TriCounty Produce
+Litchee#LitcfruiwtDM79#1.01#219#fruits#Simply Fresh
+Litchee#LitcfruilpPZbC#1.05#419#fruits#Off the Vine
+Longan#LongfruiEI0lWF#1.02#573#fruits#TriCounty Produce
+Longan#LongfruiPQxxSF#1.04#227#fruits#Simply Fresh
+Longan#LongfruisdI812#0.99#993#fruits#Off the Vine
+Love-in-a-mist#LovefruiKYPW70#0.69#388#fruits#TriCounty Produce
+Love-in-a-mist#LovefruiHrgjDa#0.67#478#fruits#Simply Fresh
+Love-in-a-mist#LovefruipSOWVz#0.71#748#fruits#Off the Vine
+Lychee#LychfruiicVLnY#0.38#276#fruits#TriCounty Produce
+Lychee#LychfruiGY6yJr#0.38#602#fruits#Simply Fresh
+Lychee#LychfruiTzDCq2#0.40#572#fruits#Off the Vine
+Mabolo#MabofruiSY8RQS#0.97#263#fruits#TriCounty Produce
+Mabolo#MabofruiOWWk0n#0.98#729#fruits#Simply Fresh
+Mabolo#MabofruixQLOTF#0.98#771#fruits#Off the Vine
+Macadamia Nut#MacafruiZppJPw#1.22#888#fruits#TriCounty Produce
+Macadamia Nut#MacafruiI7XFMV#1.24#484#fruits#Simply Fresh
+Macadamia Nut#Macafrui4x8bxV#1.20#536#fruits#Off the Vine
+Madagascar Plum#MadafruiVj5fDf#1.14#596#fruits#TriCounty Produce
+Madagascar Plum#MadafruivJhAFI#1.15#807#fruits#Simply Fresh
+Madagascar Plum#Madafrui7MTe1x#1.17#355#fruits#Off the Vine
+Magnolia Vine#MagnfruiigN4Y1#1.17#321#fruits#TriCounty Produce
+Magnolia Vine#MagnfruicKtiHd#1.15#353#fruits#Simply Fresh
+Magnolia Vine#MagnfruiLPDSCp#1.23#324#fruits#Off the Vine
+Mamey#Mamefrui5rjLF6#0.36#683#fruits#TriCounty Produce
+Mamey#MamefruiM6ndnR#0.38#404#fruits#Simply Fresh
+Mamey#Mamefruiq9KntD#0.36#527#fruits#Off the Vine
+Mandarin Orange#MandfruiRKpmKL#0.42#352#fruits#TriCounty Produce
+Mandarin Orange#Mandfrui1V0KLG#0.42#548#fruits#Simply Fresh
+Mandarin Orange#Mandfruig2o9Fg#0.41#686#fruits#Off the Vine
+Marany Nut#MarafruiqkrwoJ#1.14#273#fruits#TriCounty Produce
+Marany Nut#MarafruiCGKpke#1.12#482#fruits#Simply Fresh
+Marany Nut#MarafruiB1YE5x#1.09#412#fruits#Off the Vine
+Marula#MarufruiXF4biH#0.22#403#fruits#TriCounty Produce
+Marula#MarufruidZiVKZ#0.23#317#fruits#Simply Fresh
+Marula#MarufruiIS8BEp#0.21#454#fruits#Off the Vine
+Mayhaw#MayhfruiCSrm7k#0.24#220#fruits#TriCounty Produce
+Mayhaw#MayhfruiNRDzWs#0.25#710#fruits#Simply Fresh
+Mayhaw#MayhfruiIUCyEg#0.24#818#fruits#Off the Vine
+Meiwa Kumquat#MeiwfruiYhv3AY#0.21#997#fruits#TriCounty Produce
+Meiwa Kumquat#MeiwfruiyzQFNR#0.22#347#fruits#Simply Fresh
+Meiwa Kumquat#Meiwfruict4OUp#0.21#923#fruits#Off the Vine
+Mexican Barberry#Mexifrui2P2dXi#0.28#914#fruits#TriCounty Produce
+Mexican Barberry#MexifruiywUTMI#0.29#782#fruits#Simply Fresh
+Mexican Barberry#MexifruijPHu5X#0.29#367#fruits#Off the Vine
+Meyer Lemon#Meyefruin9901J#0.38#824#fruits#TriCounty Produce
+Meyer Lemon#MeyefruiNeQpjO#0.37#617#fruits#Simply Fresh
+Meyer Lemon#MeyefruiYEVznZ#0.37#741#fruits#Off the Vine
+Mississippi Honeyberry#Missfruipb5iW3#0.95#595#fruits#TriCounty Produce
+Mississippi Honeyberry#MissfruiINiDbB#0.96#551#fruits#Simply Fresh
+Mississippi Honeyberry#MissfruiNUQ82a#0.93#396#fruits#Off the Vine
+Monkey Pot#MonkfruiXlTW4j#0.90#896#fruits#TriCounty Produce
+Monkey Pot#Monkfrui1p7a4h#0.88#344#fruits#Simply Fresh
+Monkey Pot#Monkfrui4eKggb#0.92#917#fruits#Off the Vine
+Monos Plum#Monofrui0Mv9aV#1.11#842#fruits#TriCounty Produce
+Monos Plum#Monofrui6iTGQY#1.14#570#fruits#Simply Fresh
+Monos Plum#MonofruiNu2uGH#1.13#978#fruits#Off the Vine
+Moosewood#MoosfruiMXEGex#0.86#969#fruits#TriCounty Produce
+Moosewood#Moosfrui8805mB#0.86#963#fruits#Simply Fresh
+Moosewood#MoosfruiOsnDFL#0.88#594#fruits#Off the Vine
+Natal Orange#NatafruitB8Kh2#0.42#332#fruits#TriCounty Produce
+Natal Orange#NatafruiOhqRrd#0.42#982#fruits#Simply Fresh
+Natal Orange#NatafruiRObMf6#0.41#268#fruits#Off the Vine
+Nectarine#NectfruilNfeD8#0.36#601#fruits#TriCounty Produce
+Nectarine#NectfruiQfjt6b#0.35#818#fruits#Simply Fresh
+Nectarine#Nectfrui5U7U96#0.37#930#fruits#Off the Vine
+Neem Tree#NeemfruiCruEMF#0.24#222#fruits#TriCounty Produce
+Neem Tree#NeemfruiGv0pv5#0.24#645#fruits#Simply Fresh
+Neem Tree#NeemfruiUFPVfk#0.25#601#fruits#Off the Vine
+New Zealand Spinach#New fruihDIgec#0.87#428#fruits#TriCounty Produce
+New Zealand Spinach#New fruiaoR9TP#0.87#630#fruits#Simply Fresh
+New Zealand Spinach#New fruiy8LBul#0.94#570#fruits#Off the Vine
+Olosapo#OlosfruiGXvaMm#0.76#388#fruits#TriCounty Produce
+Olosapo#OlosfruiESlpB3#0.76#560#fruits#Simply Fresh
+Olosapo#OlosfruiFNEkER#0.76#962#fruits#Off the Vine
+Oregon Grape#OregfruiWxhzrf#1.14#892#fruits#TriCounty Produce
+Oregon Grape#OregfruiMgjHUn#1.20#959#fruits#Simply Fresh
+Oregon Grape#OregfruiC5UCxX#1.17#419#fruits#Off the Vine
+Otaheite Apple#OtahfruilT0iFj#0.21#579#fruits#TriCounty Produce
+Otaheite Apple#Otahfrui92PyMY#0.22#857#fruits#Simply Fresh
+Otaheite Apple#OtahfruiLGD1EH#0.20#807#fruits#Off the Vine
+Oyster Plant#OystfruimGxOsj#0.77#835#fruits#TriCounty Produce
+Oyster Plant#Oystfrui1kudBX#0.81#989#fruits#Simply Fresh
+Oyster Plant#OystfruiaX3uO2#0.80#505#fruits#Off the Vine
+Panama Berry#PanafruiZG0Vp4#1.19#288#fruits#TriCounty Produce
+Panama Berry#PanafruiobvXPE#1.21#541#fruits#Simply Fresh
+Panama Berry#PanafruipaW8F3#1.16#471#fruits#Off the Vine
+Peach Tomato#PeacfruiQpovYH#1.20#475#fruits#TriCounty Produce
+Peach Tomato#PeacfruixYXLTN#1.18#655#fruits#Simply Fresh
+Peach Tomato#PeacfruiILDYAp#1.23#876#fruits#Off the Vine
+Peanut#Peanfruiy8M7pt#0.69#275#fruits#TriCounty Produce
+Peanut#PeanfruiEimbED#0.65#307#fruits#Simply Fresh
+Peanut#Peanfruic452Vc#0.68#937#fruits#Off the Vine
+Peanut Butter Fruit#PeanfruixEDt9Y#0.27#628#fruits#TriCounty Produce
+Peanut Butter Fruit#PeanfruiST0T0R#0.27#910#fruits#Simply Fresh
+Peanut Butter Fruit#Peanfrui7jeRN2#0.27#938#fruits#Off the Vine
+Pear#PearfruiB5YmSJ#0.20#945#fruits#TriCounty Produce
+Pear#PearfruiA93XZx#0.21#333#fruits#Simply Fresh
+Pear#PearfruioNKiIf#0.21#715#fruits#Off the Vine
+Pecan#PecafruiiTIv1Z#0.26#471#fruits#TriCounty Produce
+Pecan#PecafruiMGkqla#0.26#889#fruits#Simply Fresh
+Pecan#Pecafrui1szYz2#0.25#929#fruits#Off the Vine
+Purple Passion Fruit#Purpfrui4mMGkD#1.04#914#fruits#TriCounty Produce
+Purple Passion Fruit#Purpfrui5XOW3K#1.06#423#fruits#Simply Fresh
+Purple Passion Fruit#PurpfruifDTAgW#1.05#549#fruits#Off the Vine
+Red Mulberry#Red fruiVLOXIW#1.24#270#fruits#TriCounty Produce
+Red Mulberry#Red fruiXNXt4a#1.21#836#fruits#Simply Fresh
+Red Mulberry#Red fruiUseWLG#1.21#795#fruits#Off the Vine
+Red Princess#Red fruigJLR4V#0.23#829#fruits#TriCounty Produce
+Red Princess#Red fruinVKps5#0.23#558#fruits#Simply Fresh
+Red Princess#Red frui0jl9mg#0.24#252#fruits#Off the Vine
+Striped Screw Pine#StrifruiUKzjoU#0.60#226#fruits#TriCounty Produce
+Striped Screw Pine#StrifruivWLDzH#0.64#685#fruits#Simply Fresh
+Striped Screw Pine#StrifruiiF7CGH#0.60#983#fruits#Off the Vine
+Tapioca#Tapifruib4LCqt#0.40#955#fruits#TriCounty Produce
+Tapioca#TapifruiwgQLj9#0.41#889#fruits#Simply Fresh
+Tapioca#TapifruiZ6Igg3#0.41#655#fruits#Off the Vine
+Tavola#Tavofrui0k9XOt#1.16#938#fruits#TriCounty Produce
+Tavola#Tavofrui8DuRxL#1.08#979#fruits#Simply Fresh
+Tavola#TavofruiNZEuJZ#1.16#215#fruits#Off the Vine
+Tea#TeafruiL0357s#1.11#516#fruits#TriCounty Produce
+Tea#TeafruiD5soTf#1.13#970#fruits#Simply Fresh
+Tea#TeafruiOWq4oO#1.19#357#fruits#Off the Vine
+Ugli Fruit#UglifruipKNCpf#0.24#501#fruits#TriCounty Produce
+Ugli Fruit#UglifruifbDrzc#0.24#642#fruits#Simply Fresh
+Ugli Fruit#Uglifruiwx8or4#0.24#280#fruits#Off the Vine
+Vegetable Brain#VegefruieXLBoc#0.73#355#fruits#TriCounty Produce
+Vegetable Brain#Vegefruik5FSdl#0.71#498#fruits#Simply Fresh
+Vegetable Brain#VegefruiKBfzN0#0.72#453#fruits#Off the Vine
+White Walnut#Whitfruit3oVHL#0.30#501#fruits#TriCounty Produce
+White Walnut#WhitfruiHygydw#0.30#913#fruits#Simply Fresh
+White Walnut#WhitfruieNtplo#0.30#401#fruits#Off the Vine
+Wood Apple#WoodfruijVPRqA#0.68#501#fruits#TriCounty Produce
+Wood Apple#Woodfrui4Zk69T#0.68#616#fruits#Simply Fresh
+Wood Apple#WoodfruiuSLHZK#0.70#474#fruits#Off the Vine
+Yellow Horn#Yellfrui5igjjf#1.18#729#fruits#TriCounty Produce
+Yellow Horn#Yellfrui0DiPqa#1.13#517#fruits#Simply Fresh
+Yellow Horn#Yellfrui0ljvqC#1.14#853#fruits#Off the Vine
+Yellow Sapote#YellfruilGmCfq#0.93#204#fruits#TriCounty Produce
+Yellow Sapote#Yellfrui4J2mke#0.88#269#fruits#Simply Fresh
+Yellow Sapote#Yellfrui6PuXaL#0.86#575#fruits#Off the Vine
+Ylang-ylang#Ylanfrui3rmByO#0.76#429#fruits#TriCounty Produce
+Ylang-ylang#YlanfruiA80Nkq#0.76#886#fruits#Simply Fresh
+Ylang-ylang#YlanfruinUEm5d#0.72#747#fruits#Off the Vine
+Zapote Blanco#ZapofruisZ5sMA#0.67#428#fruits#TriCounty Produce
+Zapote Blanco#ZapofruilKxl7N#0.65#924#fruits#Simply Fresh
+Zapote Blanco#ZapofruiAe6Eu1#0.68#255#fruits#Off the Vine
+Zulu Nut#Zulufrui469K4k#0.71#445#fruits#TriCounty Produce
+Zulu Nut#ZulufruiWbz6vU#0.71#653#fruits#Simply Fresh
+Zulu Nut#Zulufrui0LJnWK#0.71#858#fruits#Off the Vine
+Artichoke#ArtivegeIuqmS4#0.71#282#vegetables#The Pantry
+Artichoke#Artivegebljjnf#0.69#66#vegetables#TriCounty Produce
+Artichoke#ArtivegeTa2lcF#0.70#618#vegetables#Off the Vine
+Asparagus#AspavegezC0cDl#0.23#70#vegetables#The Pantry
+Asparagus#AspavegeM1q5Kt#0.24#546#vegetables#TriCounty Produce
+Asparagus#AspavegeXWbCb8#0.24#117#vegetables#Off the Vine
+Basil#Basivegev08fzf#0.31#213#vegetables#The Pantry
+Basil#BasivegeF3Uha7#0.29#651#vegetables#TriCounty Produce
+Basil#BasivegeqR8SHC#0.31#606#vegetables#Off the Vine
+Bean#BeanvegegCFUOp#0.27#794#vegetables#The Pantry
+Bean#BeanvegeqMSEVq#0.27#468#vegetables#TriCounty Produce
+Bean#Beanvege4IGUwX#0.27#463#vegetables#Off the Vine
+Beet#BeetvegedEv4Ic#0.35#120#vegetables#The Pantry
+Beet#Beetvegegi1bz1#0.35#540#vegetables#TriCounty Produce
+Beet#BeetvegemztZcN#0.36#386#vegetables#Off the Vine
+Blackeyed Pea#Blacvege3TPldr#0.86#133#vegetables#The Pantry
+Blackeyed Pea#Blacvege3Zqnep#0.88#67#vegetables#TriCounty Produce
+Blackeyed Pea#Blacvege3khffZ#0.90#790#vegetables#Off the Vine
+Cabbage#CabbvegeY0c4Fw#0.82#726#vegetables#The Pantry
+Cabbage#CabbvegeoaK7Co#0.85#439#vegetables#TriCounty Produce
+Cabbage#CabbvegeVvO646#0.82#490#vegetables#Off the Vine
+Carrot#CarrvegeEbI0sw#0.45#717#vegetables#The Pantry
+Carrot#CarrvegeEZndWL#0.49#284#vegetables#TriCounty Produce
+Carrot#CarrvegewUkHao#0.47#122#vegetables#Off the Vine
+Cauliflower#Caulvege1CPeNG#0.68#756#vegetables#The Pantry
+Cauliflower#CaulvegedrPqib#0.66#269#vegetables#TriCounty Produce
+Cauliflower#CaulvegeT6cka8#0.65#728#vegetables#Off the Vine
+Chayote#ChayvegePRReGE#0.14#233#vegetables#The Pantry
+Chayote#Chayvegep058f7#0.14#88#vegetables#TriCounty Produce
+Chayote#ChayvegeoxO40S#0.14#611#vegetables#Off the Vine
+Corn#CornvegeukXkv6#0.72#632#vegetables#The Pantry
+Corn#CornvegePnPREC#0.72#609#vegetables#TriCounty Produce
+Corn#CornvegeO0GwoQ#0.70#664#vegetables#Off the Vine
+Cucumber#CucuvegeEqQeA7#0.94#499#vegetables#The Pantry
+Cucumber#CucuvegewmKbJ1#0.94#738#vegetables#TriCounty Produce
+Cucumber#CucuvegeUW6JaA#0.94#565#vegetables#Off the Vine
+Cantaloupe#CantvegeIHs9vJ#0.66#411#vegetables#The Pantry
+Cantaloupe#CantvegeEaDdST#0.66#638#vegetables#TriCounty Produce
+Cantaloupe#CantvegewWQEa0#0.64#682#vegetables#Off the Vine
+Carraway#CarrvegewuL4Ma#0.32#740#vegetables#The Pantry
+Carraway#CarrvegeyiWfBj#0.32#265#vegetables#TriCounty Produce
+Carraway#CarrvegeMjb1i9#0.31#732#vegetables#Off the Vine
+Celeriac#CelevegeoTBicd#0.74#350#vegetables#The Pantry
+Celeriac#CelevegeCNABoZ#0.70#261#vegetables#TriCounty Produce
+Celeriac#Celevege9LUeww#0.70#298#vegetables#Off the Vine
+Celery#Celevegej40ZCc#0.59#740#vegetables#The Pantry
+Celery#CelevegerYlVRy#0.58#734#vegetables#TriCounty Produce
+Celery#Celevege67eimC#0.58#619#vegetables#Off the Vine
+Chervil#ChervegeuH4Dge#0.09#502#vegetables#The Pantry
+Chervil#Chervegea1OyKO#0.09#299#vegetables#TriCounty Produce
+Chervil#Chervegeq56gMO#0.09#474#vegetables#Off the Vine
+Chicory#Chicvege79qoQ8#0.09#709#vegetables#The Pantry
+Chicory#ChicvegeTSVBQq#0.10#477#vegetables#TriCounty Produce
+Chicory#Chicvege6qpcyi#0.10#282#vegetables#Off the Vine
+Chinese Cabbage#ChinvegeFNsSRn#0.78#408#vegetables#The Pantry
+Chinese Cabbage#Chinvege2ldNr3#0.80#799#vegetables#TriCounty Produce
+Chinese Cabbage#ChinvegeK3R2Td#0.80#180#vegetables#Off the Vine
+Chinese Beans#ChinvegebxbyPy#0.45#654#vegetables#The Pantry
+Chinese Beans#ChinvegewKGwgx#0.45#206#vegetables#TriCounty Produce
+Chinese Beans#ChinvegevVjzC0#0.47#643#vegetables#Off the Vine
+Chines Kale#ChinvegeCfdkss#0.70#239#vegetables#The Pantry
+Chines Kale#Chinvege6V6Dne#0.65#548#vegetables#TriCounty Produce
+Chines Kale#ChinvegeB7vE3x#0.66#380#vegetables#Off the Vine
+Chinese Radish#ChinvegeXcM4eq#0.22#190#vegetables#The Pantry
+Chinese Radish#ChinvegeTdUBqN#0.22#257#vegetables#TriCounty Produce
+Chinese Radish#ChinvegeMXMms8#0.22#402#vegetables#Off the Vine
+Chinese Mustard#ChinvegeRDdpdl#0.33#149#vegetables#The Pantry
+Chinese Mustard#ChinvegeABDhNd#0.32#320#vegetables#TriCounty Produce
+Chinese Mustard#Chinvege8NPwa2#0.34#389#vegetables#Off the Vine
+Cilantro#CilavegeQXBEsW#0.60#674#vegetables#The Pantry
+Cilantro#CilavegeRgjkUG#0.60#355#vegetables#TriCounty Produce
+Cilantro#CilavegelT2msu#0.59#464#vegetables#Off the Vine
+Collard#CollvegesTGGNw#0.32#745#vegetables#The Pantry
+Collard#CollvegeAwdor5#0.32#124#vegetables#TriCounty Produce
+Collard#CollvegeQe900L#0.30#796#vegetables#Off the Vine
+Coriander#CorivegeXxp4xY#0.26#560#vegetables#The Pantry
+Coriander#Corivege9xBAT0#0.27#321#vegetables#TriCounty Produce
+Coriander#CorivegeCfNjBx#0.27#709#vegetables#Off the Vine
+Dandelion#DandvegeJNcnbr#0.11#285#vegetables#The Pantry
+Dandelion#DandvegeGwBkHZ#0.11#733#vegetables#TriCounty Produce
+Dandelion#DandvegeZfwVqn#0.11#57#vegetables#Off the Vine
+Daikon Radish#DaikvegeHHsd7M#0.61#743#vegetables#The Pantry
+Daikon Radish#DaikvegeIu17yC#0.62#459#vegetables#TriCounty Produce
+Daikon Radish#DaikvegePzFjqf#0.63#296#vegetables#Off the Vine
+Eggplant#EggpvegeKJtydN#0.55#200#vegetables#The Pantry
+Eggplant#EggpvegeQMKrNs#0.53#208#vegetables#TriCounty Produce
+Eggplant#EggpvegeN0WnSo#0.51#761#vegetables#Off the Vine
+English Pea#Englvegea1ytIn#0.40#457#vegetables#The Pantry
+English Pea#EnglvegerU9Vty#0.37#263#vegetables#TriCounty Produce
+English Pea#EnglvegeCmkd3y#0.39#430#vegetables#Off the Vine
+Fennel#Fennvegebz2UM7#0.76#545#vegetables#The Pantry
+Fennel#FennvegeQzjtZ3#0.78#795#vegetables#TriCounty Produce
+Fennel#FennvegeXSrW61#0.75#79#vegetables#Off the Vine
+Garlic#GarlvegesR2yel#0.76#478#vegetables#The Pantry
+Garlic#GarlvegeEQvt8W#0.77#349#vegetables#TriCounty Produce
+Garlic#GarlvegedljBdK#0.80#708#vegetables#Off the Vine
+Ginger#GingvegeMNiTc2#0.88#563#vegetables#The Pantry
+Ginger#Gingvegeq366Sn#0.89#738#vegetables#TriCounty Produce
+Ginger#GingvegeznyyVj#0.89#598#vegetables#Off the Vine
+Horseradish#HorsvegemSwISt#0.12#622#vegetables#The Pantry
+Horseradish#HorsvegetCOS0x#0.11#279#vegetables#TriCounty Produce
+Horseradish#Horsvegew6XXaS#0.12#478#vegetables#Off the Vine
+Japanese Eggplant#JapavegeTdKDCL#0.57#539#vegetables#The Pantry
+Japanese Eggplant#JapavegevsJfGa#0.58#782#vegetables#TriCounty Produce
+Japanese Eggplant#JapavegeCIrIxd#0.57#777#vegetables#Off the Vine
+Jerusalem Artichoke#Jeruvege928cr0#0.13#231#vegetables#The Pantry
+Jerusalem Artichoke#JeruvegeC2v086#0.14#123#vegetables#TriCounty Produce
+Jerusalem Artichoke#JeruvegeehCYzi#0.14#196#vegetables#Off the Vine
+Jicama#JicavegeRWYj9n#0.75#79#vegetables#The Pantry
+Jicama#JicavegeGk5LKH#0.71#292#vegetables#TriCounty Produce
+Jicama#JicavegeUjpaX1#0.70#308#vegetables#Off the Vine
+Kale#Kalevegext6RNT#0.55#765#vegetables#The Pantry
+Kale#KalevegeFsp17B#0.53#107#vegetables#TriCounty Produce
+Kale#KalevegeAffBTS#0.57#573#vegetables#Off the Vine
+Kiwifruit#KiwivegeloZBKJ#0.60#769#vegetables#The Pantry
+Kiwifruit#KiwivegenCQAHw#0.59#307#vegetables#TriCounty Produce
+Kiwifruit#Kiwivege0Gi3P2#0.59#235#vegetables#Off the Vine
+Kohlrabi#KohlvegeJFKZDl#0.26#406#vegetables#The Pantry
+Kohlrabi#Kohlvege32UTAj#0.28#613#vegetables#TriCounty Produce
+Kohlrabi#KohlvegejNQC1M#0.28#326#vegetables#Off the Vine
+Leek#Leekvege5iaFtg#0.70#580#vegetables#The Pantry
+Leek#Leekvegei9Wxbz#0.68#188#vegetables#TriCounty Produce
+Leek#LeekvegewY4mAc#0.70#473#vegetables#Off the Vine
+Lettuce#LettvegesK9wDR#0.55#716#vegetables#The Pantry
+Lettuce#LettvegeWzMyCM#0.57#83#vegetables#TriCounty Produce
+Lettuce#LettvegeHgfGG8#0.56#268#vegetables#Off the Vine
+Melons#Melovege6t93WF#0.11#252#vegetables#The Pantry
+Melons#Melovegeq9kz7T#0.12#558#vegetables#TriCounty Produce
+Melons#Melovege9kLTXN#0.12#382#vegetables#Off the Vine
+Mushroom#MushvegeSq53h8#0.59#365#vegetables#The Pantry
+Mushroom#Mushvegedq6lYP#0.59#444#vegetables#TriCounty Produce
+Mushroom#Mushvege8o27D2#0.55#467#vegetables#Off the Vine
+Okra#OkravegeTszQSL#0.55#62#vegetables#The Pantry
+Okra#OkravegeJBWmfh#0.58#165#vegetables#TriCounty Produce
+Okra#OkravegeD6tF9n#0.55#77#vegetables#Off the Vine
+Onion#OniovegejwimQo#0.80#186#vegetables#The Pantry
+Onion#OniovegeUOwwks#0.80#417#vegetables#TriCounty Produce
+Onion#OniovegezcRDrc#0.80#435#vegetables#Off the Vine
+Oregano#OregvegetlU7Ez#0.71#119#vegetables#The Pantry
+Oregano#Oregvege9h9ZKy#0.70#173#vegetables#TriCounty Produce
+Oregano#OregvegebXr0PJ#0.70#773#vegetables#Off the Vine
+Parsley#ParsvegeXFEjjN#0.83#502#vegetables#The Pantry
+Parsley#ParsvegejAg5C4#0.80#454#vegetables#TriCounty Produce
+Parsley#ParsvegehAtH2H#0.84#523#vegetables#Off the Vine
+Parsnip#Parsvegee9Lp6D#0.46#626#vegetables#The Pantry
+Parsnip#ParsvegeSxXHSA#0.47#411#vegetables#TriCounty Produce
+Parsnip#Parsvegea0stPf#0.44#403#vegetables#Off the Vine
+Pea#Peavegecq4SxR#0.18#342#vegetables#The Pantry
+Pea#Peavege46Gdp9#0.18#255#vegetables#TriCounty Produce
+Pea#Peavegeov1gc5#0.18#251#vegetables#Off the Vine
+Pepper#PeppvegeUcBYRp#0.33#52#vegetables#The Pantry
+Pepper#PeppvegeB60btP#0.35#107#vegetables#TriCounty Produce
+Pepper#PeppvegeG4tP3e#0.34#481#vegetables#Off the Vine
+Pigeon Pea#Pigevegec5bAtm#0.94#391#vegetables#The Pantry
+Pigeon Pea#Pigevegeb93eLi#0.91#447#vegetables#TriCounty Produce
+Pigeon Pea#PigevegejEBDRa#0.89#259#vegetables#Off the Vine
+Irish Potato#IrisvegeJNQqby#0.72#355#vegetables#The Pantry
+Irish Potato#Irisvegewq1PLd#0.72#601#vegetables#TriCounty Produce
+Irish Potato#IrisvegeAfFLdO#0.68#740#vegetables#Off the Vine
+Pumpkin#PumpvegeiYsPR8#0.25#776#vegetables#The Pantry
+Pumpkin#PumpvegelqP1Kh#0.25#189#vegetables#TriCounty Produce
+Pumpkin#Pumpvegeb3nQU5#0.26#207#vegetables#Off the Vine
+Radish#RadivegeNwwSBJ#0.16#613#vegetables#The Pantry
+Radish#Radivege0tIBnL#0.16#779#vegetables#TriCounty Produce
+Radish#RadivegeNLqJCf#0.16#731#vegetables#Off the Vine
+Rhubarb#RhubvegeREfOti#0.12#301#vegetables#The Pantry
+Rhubarb#Rhubvege4Jc3b7#0.12#557#vegetables#TriCounty Produce
+Rhubarb#RhubvegeaXqF7H#0.12#378#vegetables#Off the Vine
+Rosemary#Rosevege16QStc#0.73#380#vegetables#The Pantry
+Rosemary#RosevegeNf6Oem#0.75#622#vegetables#TriCounty Produce
+Rosemary#RosevegeFgsOyN#0.74#631#vegetables#Off the Vine
+Rutabaga#RutavegecUYfQ3#0.55#676#vegetables#The Pantry
+Rutabaga#RutavegejOG5DF#0.55#273#vegetables#TriCounty Produce
+Rutabaga#RutavegewEVjzV#0.53#452#vegetables#Off the Vine
+Salsify#SalsvegeViS9HF#0.11#537#vegetables#The Pantry
+Salsify#Salsvegemd3HAL#0.11#753#vegetables#TriCounty Produce
+Salsify#SalsvegeuRCnmq#0.10#787#vegetables#Off the Vine
+Savory#Savovegee4DRWl#0.21#456#vegetables#The Pantry
+Savory#SavovegerZ90Xm#0.21#642#vegetables#TriCounty Produce
+Savory#Savovegeje7yy7#0.22#328#vegetables#Off the Vine
+Sesame#Sesavege4NAWZE#0.84#54#vegetables#The Pantry
+Sesame#SesavegeMTc9IN#0.84#458#vegetables#TriCounty Produce
+Sesame#SesavegegOwAjo#0.83#125#vegetables#Off the Vine
+Shallots#ShalvegeUO2pDO#0.26#599#vegetables#The Pantry
+Shallots#ShalvegeY1sekb#0.27#647#vegetables#TriCounty Produce
+Shallots#ShalvegeSDC8VY#0.27#369#vegetables#Off the Vine
+Sugar Snap Peas#SugavegepUZDTl#0.47#308#vegetables#The Pantry
+Sugar Snap Peas#Sugavege1XyzNH#0.48#205#vegetables#TriCounty Produce
+Sugar Snap Peas#SugavegeJuaG7f#0.46#348#vegetables#Off the Vine
+Soybean#SoybvegeqxSVRL#0.70#639#vegetables#The Pantry
+Soybean#SoybvegezEMjOG#0.68#423#vegetables#TriCounty Produce
+Soybean#SoybvegebanSFq#0.67#268#vegetables#Off the Vine
+Spaghetti Squash#SpagvegeMNO1yC#0.12#753#vegetables#The Pantry
+Spaghetti Squash#SpagvegeilpUaD#0.13#604#vegetables#TriCounty Produce
+Spaghetti Squash#SpagvegeAOoZNX#0.13#431#vegetables#Off the Vine
+Spinach#SpinvegeegXXou#0.10#742#vegetables#The Pantry
+Spinach#SpinvegeVcqXL6#0.11#708#vegetables#TriCounty Produce
+Spinach#SpinvegetZ26DN#0.11#625#vegetables#Off the Vine
+Sweet Potato#SweevegepNDQWb#0.94#720#vegetables#The Pantry
+Sweet Potato#Sweevegepnw7Tm#0.90#377#vegetables#TriCounty Produce
+Sweet Potato#Sweevegeyk0C82#0.89#242#vegetables#Off the Vine
+Swiss Chard#SwisvegeksalTA#0.54#545#vegetables#The Pantry
+Swiss Chard#SwisvegeKm2Kze#0.54#472#vegetables#TriCounty Produce
+Swiss Chard#SwisvegehteuMk#0.56#142#vegetables#Off the Vine
+Taro#Tarovege3fpGV6#0.87#155#vegetables#The Pantry
+Taro#TarovegerZkmof#0.86#371#vegetables#TriCounty Produce
+Taro#TarovegeXKPuzc#0.89#443#vegetables#Off the Vine
+Tarragon#TarrvegeCzVC6U#0.18#491#vegetables#The Pantry
+Tarragon#TarrvegesIkEfS#0.17#65#vegetables#TriCounty Produce
+Tarragon#TarrvegerZsKFP#0.18#180#vegetables#Off the Vine
+Thyme#Thymvege8Rv72c#0.41#442#vegetables#The Pantry
+Thyme#ThymvegeJoUdQS#0.42#237#vegetables#TriCounty Produce
+Thyme#ThymvegeRck5uO#0.43#491#vegetables#Off the Vine
+Tomato#Tomavegey0NHGK#0.31#60#vegetables#The Pantry
+Tomato#TomavegeKAjRUn#0.30#630#vegetables#TriCounty Produce
+Tomato#TomavegePZOHlH#0.30#70#vegetables#Off the Vine
+Turnip#TurnvegeRVQiV5#0.44#580#vegetables#The Pantry
+Turnip#TurnvegeVjIX9D#0.45#743#vegetables#TriCounty Produce
+Turnip#TurnvegelFhvuJ#0.44#219#vegetables#Off the Vine
+Watercress#WatevegelwzPLQ#0.54#230#vegetables#The Pantry
+Watercress#Watevege8oeDCT#0.54#774#vegetables#TriCounty Produce
+Watercress#Watevegexr8L1t#0.55#185#vegetables#Off the Vine
+Watermelon#WatevegeL83MRH#0.19#698#vegetables#The Pantry
+Watermelon#WatevegeR2S4Dq#0.21#488#vegetables#TriCounty Produce
+Watermelon#WatevegepFPXQu#0.21#439#vegetables#Off the Vine
+Kamote#KamovegegdON75#0.13#218#vegetables#The Pantry
+Kamote#KamovegevupDBf#0.13#98#vegetables#TriCounty Produce
+Kamote#KamovegeSQX7IA#0.14#703#vegetables#Off the Vine
+Alogbati#AlogvegeB1WaJU#0.41#775#vegetables#The Pantry
+Alogbati#AlogvegeVr5cPP#0.40#789#vegetables#TriCounty Produce
+Alogbati#AlogvegeyTUQzy#0.40#416#vegetables#Off the Vine
+Ampalaya#AmpavegemR9fSd#0.85#107#vegetables#The Pantry
+Ampalaya#AmpavegeJDu9Im#0.90#676#vegetables#TriCounty Produce
+Ampalaya#AmpavegepL8GH5#0.86#728#vegetables#Off the Vine
+Dahon ng sili#Dahovege6X9grk#0.11#369#vegetables#The Pantry
+Dahon ng sili#DahovegeiHZjQT#0.11#141#vegetables#TriCounty Produce
+Dahon ng sili#DahovegeoCDAH8#0.12#517#vegetables#Off the Vine
+Gabi#GabivegeVm4Xk3#0.44#396#vegetables#The Pantry
+Gabi#Gabivegeu6woqK#0.42#722#vegetables#TriCounty Produce
+Gabi#GabivegezcA7q1#0.42#394#vegetables#Off the Vine
+Kabute#Kabuvege6Tqrif#0.16#123#vegetables#The Pantry
+Kabute#KabuvegeA3uYdG#0.15#183#vegetables#TriCounty Produce
+Kabute#KabuvegeXW6ZiI#0.16#624#vegetables#Off the Vine
+Kamoteng Kahoy#KamovegeAdW37X#0.42#782#vegetables#The Pantry
+Kamoteng Kahoy#KamovegetFlqpC#0.42#515#vegetables#TriCounty Produce
+Kamoteng Kahoy#KamovegeMvxoLn#0.40#166#vegetables#Off the Vine
+Kangkong#KangvegeSFTvEz#0.35#759#vegetables#The Pantry
+Kangkong#KangvegeRLR6gL#0.34#695#vegetables#TriCounty Produce
+Kangkong#Kangvege9BFo14#0.35#783#vegetables#Off the Vine
+Labanos#Labavege3qrWJL#0.94#514#vegetables#The Pantry
+Labanos#LabavegekgVWDH#0.89#210#vegetables#TriCounty Produce
+Labanos#LabavegeiVPgMx#0.89#207#vegetables#Off the Vine
+Labong#LabovegeX3O8yz#0.85#722#vegetables#The Pantry
+Labong#LabovegeI1wSEs#0.87#472#vegetables#TriCounty Produce
+Labong#LabovegeOPiQht#0.85#740#vegetables#Off the Vine
+Malunggay#MaluvegeHkwAFm#0.30#252#vegetables#The Pantry
+Malunggay#Maluvegez6TiSY#0.30#245#vegetables#TriCounty Produce
+Malunggay#MaluvegewzY37D#0.31#405#vegetables#Off the Vine
+Munggo#MungvegeqeuwGw#0.25#362#vegetables#The Pantry
+Munggo#MungvegeNhqWvL#0.26#360#vegetables#TriCounty Produce
+Munggo#MungvegeGxNxQC#0.25#555#vegetables#Off the Vine
+Pechay#PechvegezDeHFZ#0.36#401#vegetables#The Pantry
+Pechay#Pechvegehi4Fcx#0.35#723#vegetables#TriCounty Produce
+Pechay#Pechvege8Pq8Eo#0.36#141#vegetables#Off the Vine
+Sigarilyas#SigavegeMJrtlV#0.88#335#vegetables#The Pantry
+Sigarilyas#SigavegeLhsoOB#0.87#768#vegetables#TriCounty Produce
+Sigarilyas#SigavegeS6RJcA#0.93#356#vegetables#Off the Vine
+Sitaw#Sitavege0hMi9z#0.65#153#vegetables#The Pantry
+Sitaw#Sitavegeez1g6N#0.67#561#vegetables#TriCounty Produce
+Sitaw#Sitavege0BCNeF#0.66#674#vegetables#Off the Vine
+Talong#TalovegevZjVK6#0.10#530#vegetables#The Pantry
+Talong#TalovegexX4MRw#0.09#305#vegetables#TriCounty Produce
+Talong#TalovegeO3U2ze#0.10#126#vegetables#Off the Vine
+Toge#TogevegeYelJUw#0.54#449#vegetables#The Pantry
+Toge#Togevegeilr1xK#0.54#274#vegetables#TriCounty Produce
+Toge#Togevegesvjnyn#0.51#316#vegetables#Off the Vine
+Ube#UbevegeoPnxvb#0.56#397#vegetables#The Pantry
+Ube#Ubevege2CNyve#0.55#450#vegetables#TriCounty Produce
+Ube#UbevegeC43sVj#0.55#263#vegetables#Off the Vine
+Upo#UpovegecOGRqC#0.22#404#vegetables#The Pantry
+Upo#Upovegekjl2wl#0.22#541#vegetables#TriCounty Produce
+Upo#UpovegemTTTwI#0.23#459#vegetables#Off the Vine
+Edamame#EdamvegeVYtk8z#0.79#296#vegetables#The Pantry
+Edamame#Edamvege608vXi#0.78#700#vegetables#TriCounty Produce
+Edamame#Edamvege1jiqGY#0.75#115#vegetables#Off the Vine
+Hairy melon#HairvegeFYFHIw#0.71#789#vegetables#The Pantry
+Hairy melon#HairvegeS7AAqI#0.72#302#vegetables#TriCounty Produce
+Hairy melon#HairvegeO6WJHL#0.72#444#vegetables#Off the Vine
+Burdock#BurdvegeyLstLV#0.56#761#vegetables#The Pantry
+Burdock#BurdvegeZsqAjT#0.56#582#vegetables#TriCounty Produce
+Burdock#BurdvegeycF7mo#0.55#566#vegetables#Off the Vine
+Snake gourd#SnakvegesfHGvt#0.92#626#vegetables#The Pantry
+Snake gourd#SnakvegedlNiBk#0.92#669#vegetables#TriCounty Produce
+Snake gourd#Snakvegec5n1UM#0.92#143#vegetables#Off the Vine
+Wasabi#Wasavege5P5pZp#0.67#751#vegetables#The Pantry
+Wasabi#Wasavege6EEE9r#0.68#559#vegetables#TriCounty Produce
+Wasabi#Wasavege1ve7TY#0.65#61#vegetables#Off the Vine
+Yam#YamvegeRN9ONH#0.57#438#vegetables#The Pantry
+Yam#YamvegeWjdzeA#0.56#564#vegetables#TriCounty Produce
+Yam#YamvegeI1AnyI#0.56#456#vegetables#Off the Vine
+Apple Fritters#AppldessDj96hw#6.12#0#desserts#Mom's Kitchen
+Apple Fritters#AppldessrN1kvM#6.06#0#desserts#The Baking Pan
+Banana Split#Banadess7tpjkJ#10.86#0#desserts#Mom's Kitchen
+Banana Split#Banadessfif758#11.07#0#desserts#The Baking Pan
+Blueberry Boy Bait#BluedesseX2LVU#3.72#0#desserts#Mom's Kitchen
+Blueberry Boy Bait#Bluedess9zLhaH#3.93#0#desserts#The Baking Pan
+Candied Cranberries#CanddessjW92p3#1.77#0#desserts#Mom's Kitchen
+Candied Cranberries#CanddesskhtVoQ#1.72#0#desserts#The Baking Pan
+Daiquiri Souffle#DaiqdessebnYcy#9.54#0#desserts#Mom's Kitchen
+Daiquiri Souffle#DaiqdessfM1DnX#9.72#0#desserts#The Baking Pan
+Bananas Flambe#BanadesscczumD#6.94#0#desserts#Mom's Kitchen
+Bananas Flambe#Banadess8qNfxd#7.07#0#desserts#The Baking Pan
+Pie, Apple#Pie,desshcSHhT#7.88#0#desserts#Mom's Kitchen
+Pie, Apple#Pie,dessTbiwDp#7.88#0#desserts#The Baking Pan
+Pie, Pumpkin#Pie,desswhPBPB#6.00#0#desserts#Mom's Kitchen
+Pie, Pumpkin#Pie,dessDg3NWl#6.24#0#desserts#The Baking Pan
+Pie, Blueberry#Pie,dessw9VdgD#2.14#0#desserts#Mom's Kitchen
+Pie, Blueberry#Pie,dessiSjZKD#2.12#0#desserts#The Baking Pan
+Pie, Pecan#Pie,dess2NqhNR#12.70#0#desserts#Mom's Kitchen
+Pie, Pecan#Pie,dessB1LfcE#12.33#0#desserts#The Baking Pan
+Pie, Cranberry Apple#Pie,dess1mL7IS#10.16#0#desserts#Mom's Kitchen
+Pie, Cranberry Apple#Pie,dessmDhkUA#10.16#0#desserts#The Baking Pan
+Pie, Banana Cream#Pie,dessH80DuG#7.35#0#desserts#Mom's Kitchen
+Pie, Banana Cream#Pie,dessf1YvFb#7.08#0#desserts#The Baking Pan
+Pie, Key Lime#Pie,desshtli5N#4.85#0#desserts#Mom's Kitchen
+Pie, Key Lime#Pie,dessMwQkKm#5.13#0#desserts#The Baking Pan
+Pie, Lemon Meringue#Pie,dess9naVkX#3.74#0#desserts#Mom's Kitchen
+Pie, Lemon Meringue#Pie,dessKYcNML#3.67#0#desserts#The Baking Pan
+Pie, Caramel#Pie,dessSUuiIU#2.27#0#desserts#Mom's Kitchen
+Pie, Caramel#Pie,dessvo8uHh#2.33#0#desserts#The Baking Pan
+Pie, Raspberry#Pie,dessUHhMlS#2.36#0#desserts#Mom's Kitchen
+Pie, Raspberry#Pie,dessJflbf5#2.36#0#desserts#The Baking Pan
+Ice Cream, Chocolate#Ice desseXuyxx#1.44#0#desserts#Mom's Kitchen
+Ice Cream, Chocolate#Ice dessASBohf#1.41#0#desserts#The Baking Pan
+Ice Cream, Vanilla#Ice dessYnzbbt#11.92#0#desserts#Mom's Kitchen
+Ice Cream, Vanilla#Ice dessUBBKp8#11.58#0#desserts#The Baking Pan
+Ice Cream, Strawberry#Ice dessfTwKhD#1.90#0#desserts#Mom's Kitchen
+Ice Cream, Strawberry#Ice dessaO9Fxf#1.99#0#desserts#The Baking Pan
+Ice Cream, Rocky Road#Ice dessyIri3P#13.10#0#desserts#Mom's Kitchen
+Ice Cream, Rocky Road#Ice dessZuLr8F#13.48#0#desserts#The Baking Pan
+Ice Cream, Mint Chocolate Chip#Ice dessV1IGG7#5.75#0#desserts#Mom's Kitchen
+Ice Cream, Mint Chocolate Chip#Ice dessX1gEQ4#5.64#0#desserts#The Baking Pan
+Ice Cream Sundae#Ice dessbhlAXt#5.62#0#desserts#Mom's Kitchen
+Ice Cream Sundae#Ice dessByapxl#5.72#0#desserts#The Baking Pan
+Cobbler, Peach#CobbdessYUGeOB#10.14#0#desserts#Mom's Kitchen
+Cobbler, Peach#CobbdessXfEtUK#10.43#0#desserts#The Baking Pan
+Cobbler, Berry-Pecan#Cobbdessx3htak#5.36#0#desserts#Mom's Kitchen
+Cobbler, Berry-Pecan#Cobbdesse4FUVI#5.41#0#desserts#The Baking Pan
+Cobbler, Blueberry#CobbdessbiI0oF#3.78#0#desserts#Mom's Kitchen
+Cobbler, Blueberry#CobbdessMXxbBN#3.57#0#desserts#The Baking Pan
+Cobbler, Cherry#CobbdessNSa8QW#12.58#0#desserts#Mom's Kitchen
+Cobbler, Cherry#CobbdessA1dADa#12.10#0#desserts#The Baking Pan
+Cobbler, Huckleberry#Cobbdess3t6O8d#3.99#0#desserts#Mom's Kitchen
+Cobbler, Huckleberry#CobbdessGI9euK#3.88#0#desserts#The Baking Pan
+Cobbler, Rhubarb#Cobbdess22X40Z#9.54#0#desserts#Mom's Kitchen
+Cobbler, Rhubarb#CobbdessPfnCT0#9.27#0#desserts#The Baking Pan
+Cobbler, Strawberry#CobbdessI78188#12.43#0#desserts#Mom's Kitchen
+Cobbler, Strawberry#CobbdessH3LdgQ#12.20#0#desserts#The Baking Pan
+Cobbler, Zucchini#Cobbdess5rK4dP#11.24#0#desserts#Mom's Kitchen
+Cobbler, Zucchini#Cobbdess4Ez8kS#10.51#0#desserts#The Baking Pan
+Brownies#BrowdessmogdTl#7.62#0#desserts#Mom's Kitchen
+Brownies#Browdess84Qc1z#7.55#0#desserts#The Baking Pan
+Fudge Bar#Fudgdess8iXSyf#11.72#0#desserts#Mom's Kitchen
+Fudge Bar#FudgdessakU1Id#12.29#0#desserts#The Baking Pan
+Cookies, Oatmeal#Cookdessnq9Oya#2.84#0#desserts#Mom's Kitchen
+Cookies, Oatmeal#CookdessBhgp7p#2.68#0#desserts#The Baking Pan
+Cookies, Chocolate Chip#CookdessRVszsZ#12.73#0#desserts#Mom's Kitchen
+Cookies, Chocolate Chip#CookdessSOoHmT#12.26#0#desserts#The Baking Pan
+Cookies, Peanut Butter#Cookdess2UcMI2#7.82#0#desserts#Mom's Kitchen
+Cookies, Peanut Butter#Cookdess1cILme#7.46#0#desserts#The Baking Pan
+Mousse, Chocolate#MousdessDpN4sQ#6.25#0#desserts#Mom's Kitchen
+Mousse, Chocolate#Mousdess8FyFT8#5.96#0#desserts#The Baking Pan
+Mousse, Blueberry Maple#MousdessacwrkO#7.28#0#desserts#Mom's Kitchen
+Mousse, Blueberry Maple#MousdessbiCMFg#7.21#0#desserts#The Baking Pan
+Mousse, Chocolate Banana#MousdessIeW4qz#5.13#0#desserts#Mom's Kitchen
+Mousse, Chocolate Banana#Mousdess1De9oL#5.08#0#desserts#The Baking Pan
+Mousse, Cherry#Mousdesss1bF8H#13.05#0#desserts#Mom's Kitchen
+Mousse, Cherry#Mousdess0ujevx#12.43#0#desserts#The Baking Pan
+Mousse, Eggnog#MousdessZ38hXj#9.07#0#desserts#Mom's Kitchen
+Mousse, Eggnog#Mousdesshs05ST#8.81#0#desserts#The Baking Pan
+Mousse, Strawberry#MousdessHCDlBK#5.58#0#desserts#Mom's Kitchen
+Mousse, Strawberry#MousdessSZ4PyW#5.36#0#desserts#The Baking Pan
+Sherbet, Cantaloupe#Sherdess3DCxUg#3.11#0#desserts#Mom's Kitchen
+Sherbet, Cantaloupe#Sherdesscp2VIz#2.99#0#desserts#The Baking Pan
+Sherbet, Lemon Milk#Sherdess1JVFOS#7.57#0#desserts#Mom's Kitchen
+Sherbet, Lemon Milk#SherdessC865vu#7.57#0#desserts#The Baking Pan
+Sherbet, Orange Crush#Sherdess8W8Mb9#4.32#0#desserts#Mom's Kitchen
+Sherbet, Orange Crush#SherdessxmVJBF#4.16#0#desserts#The Baking Pan
+Sherbet, Blueberry#SherdessFAgxqp#3.46#0#desserts#Mom's Kitchen
+Sherbet, Blueberry#SherdessMPL87u#3.60#0#desserts#The Baking Pan
+Sherbet, Raspberry#Sherdesse86ugA#6.08#0#desserts#Mom's Kitchen
+Sherbet, Raspberry#Sherdesslc1etR#5.85#0#desserts#The Baking Pan
+Sherbet, Strawberry#SherdessFwv09m#4.63#0#desserts#Mom's Kitchen
+Sherbet, Strawberry#SherdessKB0H7q#4.81#0#desserts#The Baking Pan
+Tart, Apple#TartdessrsTyXA#3.35#0#desserts#Mom's Kitchen
+Tart, Apple#Tartdessp7pyiy#3.13#0#desserts#The Baking Pan
+Tart, Almond#TartdessC7FARL#6.62#0#desserts#Mom's Kitchen
+Tart, Almond#Tartdess1V1A1c#6.68#0#desserts#The Baking Pan
+Tart, Blueberry#TartdesssQZRXX#10.28#0#desserts#Mom's Kitchen
+Tart, Blueberry#TartdessUSJSuc#10.28#0#desserts#The Baking Pan
+Tart, Chocolate-Pear#Tartdess2pdOE4#5.67#0#desserts#Mom's Kitchen
+Tart, Chocolate-Pear#TartdessL3aEDd#5.51#0#desserts#The Baking Pan
+Tart, Lemon Fudge#Tartdess9DhZUT#3.88#0#desserts#Mom's Kitchen
+Tart, Lemon Fudge#TartdesshzLOWt#3.96#0#desserts#The Baking Pan
+Tart, Pecan#TartdessvSbXzd#11.80#0#desserts#Mom's Kitchen
+Tart, Pecan#Tartdess6YXJec#11.04#0#desserts#The Baking Pan
+Tart, Pineapple#TartdesseMfJFe#9.01#0#desserts#Mom's Kitchen
+Tart, Pineapple#TartdessA2Wftr#8.44#0#desserts#The Baking Pan
+Tart, Pear#Tartdess4a1BUc#10.09#0#desserts#Mom's Kitchen
+Tart, Pear#TartdessNw8YPG#10.68#0#desserts#The Baking Pan
+Tart, Raspberry#TartdessAVnpP6#6.18#0#desserts#Mom's Kitchen
+Tart, Raspberry#TartdessfVxZFf#5.95#0#desserts#The Baking Pan
+Tart, Strawberry#Tartdess4IUcZW#4.75#0#desserts#Mom's Kitchen
+Tart, Strawberry#Tartdess2BeEDb#4.61#0#desserts#The Baking Pan
+Tart, Raspberry#TartdesshyBd24#1.85#0#desserts#Mom's Kitchen
+Tart, Raspberry#Tartdess5fqxgy#1.94#0#desserts#The Baking Pan
+Trifle, Berry#TrifdessmEkbU2#12.48#0#desserts#Mom's Kitchen
+Trifle, Berry#TrifdessAV9Ix8#12.60#0#desserts#The Baking Pan
+Trifle, American#TrifdesscsdSCd#4.70#0#desserts#Mom's Kitchen
+Trifle, American#TrifdessTArskm#4.35#0#desserts#The Baking Pan
+Trifle, English#TrifdessX87q8T#8.20#0#desserts#Mom's Kitchen
+Trifle, English#Trifdess52l955#8.12#0#desserts#The Baking Pan
+Trifle, Orange#TrifdesslUwxwe#9.74#0#desserts#Mom's Kitchen
+Trifle, Orange#TrifdessFrfCHP#10.22#0#desserts#The Baking Pan
+Trifle, Pumpkin#TrifdessJKFN96#4.72#0#desserts#Mom's Kitchen
+Trifle, Pumpkin#TrifdessMNw4EV#4.95#0#desserts#The Baking Pan
+Trifle, Scottish#TrifdessFa0JdK#13.63#0#desserts#Mom's Kitchen
+Trifle, Scottish#TrifdessAAUQCN#14.03#0#desserts#The Baking Pan
+Trifle, Sherry#TrifdesscuttJg#4.42#0#desserts#Mom's Kitchen
+Trifle, Sherry#TrifdesspRGpfP#4.21#0#desserts#The Baking Pan
+Trifle, Strawberry#TrifdessAd5TpV#3.58#0#desserts#Mom's Kitchen
+Trifle, Strawberry#Trifdess1rtW0A#3.58#0#desserts#The Baking Pan
+Trifle, Scotch Whiskey#Trifdess2zJsGi#5.44#0#desserts#Mom's Kitchen
+Trifle, Scotch Whiskey#TrifdessL8nuI6#5.18#0#desserts#The Baking Pan
+Cheesecake, Amaretto#CheedessOJBqfD#11.89#0#desserts#Mom's Kitchen
+Cheesecake, Amaretto#CheedessVnDf14#11.89#0#desserts#The Baking Pan
+Cheesecake, Apple#Cheedessuks1YK#11.22#0#desserts#Mom's Kitchen
+Cheesecake, Apple#CheedessMYKaKK#11.01#0#desserts#The Baking Pan
+Cheesecake, Apricot#CheedessKUxTYY#12.34#0#desserts#Mom's Kitchen
+Cheesecake, Apricot#CheedessMvB1pr#11.88#0#desserts#The Baking Pan
+Cheesecake, Australian#CheedessQ9WAIn#2.70#0#desserts#Mom's Kitchen
+Cheesecake, Australian#CheedessE6Jyjc#2.53#0#desserts#The Baking Pan
+Cheesecake, Arkansas#CheedessTbqzmw#6.98#0#desserts#Mom's Kitchen
+Cheesecake, Arkansas#CheedesstWJZfC#6.66#0#desserts#The Baking Pan
+Cheesecake, Blueberry#Cheedessyo51KL#8.07#0#desserts#Mom's Kitchen
+Cheesecake, Blueberry#Cheedess4Hz7P4#8.62#0#desserts#The Baking Pan
+Cheesecake, Cherry#CheedessEahRkC#4.40#0#desserts#Mom's Kitchen
+Cheesecake, Cherry#Cheedess3Nx4jZ#4.65#0#desserts#The Baking Pan
+Cheesecake, Cran-Raspberry#CheedessrJsr9i#13.47#0#desserts#Mom's Kitchen
+Cheesecake, Cran-Raspberry#CheedesshcuXCy#14.00#0#desserts#The Baking Pan
+Cheesecake, German Chocolate#CheedesswayvJL#12.03#0#desserts#Mom's Kitchen
+Cheesecake, German Chocolate#CheedessebTAeB#11.58#0#desserts#The Baking Pan
+Cheesecake, Turtle#CheedessLqgeIA#12.19#0#desserts#Mom's Kitchen
+Cheesecake, Turtle#CheedessvyNohA#12.07#0#desserts#The Baking Pan
+Brownies, Apple#BrowdessIDW1Cc#5.44#0#desserts#Mom's Kitchen
+Brownies, Apple#BrowdessyRMrAH#5.14#0#desserts#The Baking Pan
+Brownies, Fudge#BrowdessmIHIFJ#5.19#0#desserts#Mom's Kitchen
+Brownies, Fudge#BrowdessqewJ38#5.10#0#desserts#The Baking Pan
+Brownies, Almond Macaroon#BrowdessniK7QI#10.57#0#desserts#Mom's Kitchen
+Brownies, Almond Macaroon#BrowdessgkXURH#10.36#0#desserts#The Baking Pan
+Brownies, Butterscotch#BrowdesslpA06E#7.16#0#desserts#Mom's Kitchen
+Brownies, Butterscotch#BrowdessK5hofE#7.30#0#desserts#The Baking Pan
+Brownies, Caramel#BrowdessVGfoA8#3.07#0#desserts#Mom's Kitchen
+Brownies, Caramel#Browdess5jvVMM#3.13#0#desserts#The Baking Pan
+Brownies, Cherry#Browdessyoa66A#3.39#0#desserts#Mom's Kitchen
+Brownies, Cherry#BrowdessIg2JuF#3.39#0#desserts#The Baking Pan
+Brownies, Chocolate Chip#Browdessb9dc59#6.18#0#desserts#Mom's Kitchen
+Brownies, Chocolate Chip#BrowdessvW4nOx#6.43#0#desserts#The Baking Pan
+Brownies, Coconut#BrowdessWPHrVR#3.06#0#desserts#Mom's Kitchen
+Brownies, Coconut#BrowdessGVBlML#2.86#0#desserts#The Baking Pan
+Brownies, Cream Cheese#Browdess1OyRay#12.74#0#desserts#Mom's Kitchen
+Brownies, Cream Cheese#Browdess2fRsNv#12.61#0#desserts#The Baking Pan
+Brownies, Fudge Mint#Browdessl7DP7k#11.45#0#desserts#Mom's Kitchen
+Brownies, Fudge Mint#Browdessv70VKQ#11.34#0#desserts#The Baking Pan
+Brownies, Mint Chip#BrowdessDDMvF7#1.81#0#desserts#Mom's Kitchen
+Brownies, Mint Chip#Browdess0j9PBD#1.84#0#desserts#The Baking Pan
+Cake, Angel Food#CakedessEaqGaE#11.18#0#desserts#Mom's Kitchen
+Cake, Angel Food#CakedessJyAyFe#11.18#0#desserts#The Baking Pan
+Cake, Chocolate#CakedessKLXFbn#10.11#0#desserts#Mom's Kitchen
+Cake, Chocolate#CakedessfNP5Hg#9.91#0#desserts#The Baking Pan
+Cake, Carrot#CakedessUTgMoV#4.20#0#desserts#Mom's Kitchen
+Cake, Carrot#CakedessQdkaYg#4.00#0#desserts#The Baking Pan
+Cake, Lemon Blueberry#CakedessepkeEW#11.73#0#desserts#Mom's Kitchen
+Cake, Lemon Blueberry#CakedessHTKyQs#12.42#0#desserts#The Baking Pan
+Cake Triple Fudge#CakedessiZ75lR#7.92#0#desserts#Mom's Kitchen
+Cake Triple Fudge#CakedessWRrSXP#8.00#0#desserts#The Baking Pan
+Cake, Walnut#CakedessveYVXZ#10.83#0#desserts#Mom's Kitchen
+Cake, Walnut#Cakedesse22rT5#11.04#0#desserts#The Baking Pan
+Cake, French Apple#CakedessjA2Kxv#1.95#0#desserts#Mom's Kitchen
+Cake, French Apple#CakedessNBHCk0#1.86#0#desserts#The Baking Pan
+Cake, Fig#CakedessOncX4y#6.82#0#desserts#Mom's Kitchen
+Cake, Fig#CakedessTJtffn#7.08#0#desserts#The Baking Pan
+Cake, Maple#CakedessnoGPRF#3.04#0#desserts#Mom's Kitchen
+Cake, Maple#CakedessfVattM#3.22#0#desserts#The Baking Pan
+Cake, Devil's Food#CakedessiXcDCt#4.73#0#desserts#Mom's Kitchen
+Cake, Devil's Food#CakedessnBZk45#4.82#0#desserts#The Baking Pan
+Cake, Double-Lemon#CakedesskeS0Vd#3.46#0#desserts#Mom's Kitchen
+Cake, Double-Lemon#Cakedess50vx53#3.60#0#desserts#The Baking Pan
+Sorbet, Blackberry#SorbdessQoa0CE#9.88#0#desserts#Mom's Kitchen
+Sorbet, Blackberry#SorbdessqoOYzv#9.78#0#desserts#The Baking Pan
diff --git a/examples/persist/gettingStarted/vendors.txt b/examples/persist/gettingStarted/vendors.txt
new file mode 100644
index 0000000000000000000000000000000000000000..528e1b110baeb4ac31bca205f8cc998ebdd9a2a6
--- /dev/null
+++ b/examples/persist/gettingStarted/vendors.txt
@@ -0,0 +1,6 @@
+TriCounty Produce#309 S. Main Street#Middle Town#MN#55432#763 555 5761#Mort Dufresne#763 555 5765
+Simply Fresh#15612 Bogart Lane#Harrigan#WI#53704#420 333 3912#Cheryl Swedberg#420 333 3952
+Off the Vine#133 American Ct.#Centennial#IA#52002#563 121 3800#Bob King#563 121 3800 x54
+The Pantry#1206 N. Creek Way#Middle Town#MN#55432#763 555 3391#Sully Beckstrom#763 555 3391
+Mom's Kitchen#53 Yerman Ct.#Middle Town#MN#55432#763 554 9200#Maggie Kultgen#763 554 9200 x12
+The Baking Pan#1415 53rd Ave.#Dutchin#MN#56304#320 442 2277#Mike Roan#320 442 6879
diff --git a/examples/persist/sqlapp/DataAccessor.java b/examples/persist/sqlapp/DataAccessor.java
new file mode 100644
index 0000000000000000000000000000000000000000..e6b063b35de2bb92ae8aac967d98e29096e18406
--- /dev/null
+++ b/examples/persist/sqlapp/DataAccessor.java
@@ -0,0 +1,230 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DataAccessor.java,v 1.1.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package persist.sqlapp;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.persist.EntityCursor;
+import com.sleepycat.persist.EntityIndex;
+import com.sleepycat.persist.EntityJoin;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.ForwardCursor;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.SecondaryIndex;
+
+/**
+ * The data accessor class for the entity model.
+ * 
+ * @author chao
+ */
+class DataAccessor {
+    /* Employee Accessors */
+    PrimaryIndex<Integer, Employee> employeeById;
+    SecondaryIndex<String, Integer, Employee> employeeByName;
+    SecondaryIndex<Float, Integer, Employee> employeeBySalary;
+    SecondaryIndex<Integer, Integer, Employee> employeeByManagerId;
+    SecondaryIndex<Integer, Integer, Employee> employeeByDepartmentId;
+
+
+    /* Department Accessors */
+    PrimaryIndex<Integer, Department> departmentById;
+    SecondaryIndex<String, Integer, Department> departmentByName;
+
+    /** Opens all primary and secondary indices. */
+    public DataAccessor(EntityStore store)
+            throws DatabaseException {
+
+
+        /* Primary key for Employee classes. */
+        employeeById =
+                store.getPrimaryIndex(Integer.class, Employee.class);
+
+        /* Secondary key for Employee classes. */
+        employeeByName = store.getSecondaryIndex(employeeById,
+                                                 String.class,
+                                                 "employeeName");
+        employeeBySalary = store.getSecondaryIndex(employeeById,
+                                                   Float.class,
+                                                   "salary");
+        employeeByManagerId = store.getSecondaryIndex(employeeById,
+                                                      Integer.class,
+                                                      "managerId");
+        employeeByDepartmentId = store.getSecondaryIndex(employeeById,
+                                                         Integer.class,
+                                                         "departmentId");
+
+        /* Primary key for Department classes. */
+        departmentById =
+                store.getPrimaryIndex(Integer.class, Department.class);
+        /* Secondary key for Department classes. */
+        departmentByName = store.getSecondaryIndex(departmentById,
+                                                   String.class,
+                                                   "departmentName");
+    }
+
+
+    /**
+     * Do prefix query, similar to the SQL statement:
+     * <blockquote><pre>
+     * SELECT * FROM table WHERE col LIKE 'prefix%';
+     * </pre></blockquote>
+     *
+     * @param index
+     * @param prefix
+     * @return
+     * @throws DatabaseException
+     */
+    public <V> EntityCursor<V> doPrefixQuery(EntityIndex<String, V> index,
+                                             String prefix)
+            throws DatabaseException {
+
+        assert (index != null);
+        assert (prefix.length() > 0);
+
+        /* Opens a cursor for traversing entities in a key range. */
+        char[] ca = prefix.toCharArray();
+        final int lastCharIndex = ca.length - 1;
+        ca[lastCharIndex]++;
+        return doRangeQuery(index, prefix, true, String.valueOf(ca), false);
+    }
+
+    /**
+     * Do range query, similar to the SQL statement:
+     * <blockquote><pre>
+     * SELECT * FROM table WHERE col >= fromKey AND col <= toKey;
+     * </pre></blockquote>
+     *
+     * @param index
+     * @param fromKey
+     * @param fromInclusive
+     * @param toKey
+     * @param toInclusive
+     * @return
+     * @throws DatabaseException
+     */
+    public <K, V> EntityCursor<V> doRangeQuery(EntityIndex<K, V> index,
+                                               K fromKey,
+                                               boolean fromInclusive,
+                                               K toKey,
+                                               boolean toInclusive)
+            throws DatabaseException {
+
+        assert (index != null);
+
+        /* Opens a cursor for traversing entities in a key range. */
+        return index.entities(fromKey,
+                              fromInclusive,
+                              toKey,
+                              toInclusive);
+    }
+
+    /**
+     * Do a "AND" join on a single primary database, similar to the SQL:
+     * <blockquote><pre>
+     * SELECT * FROM table WHERE col1 = key1 AND col2 = key2;
+     * </pre></blockquote>
+     *
+     * @param pk
+     * @param sk1
+     * @param key1
+     * @param sk2
+     * @param key2
+     * @return
+     * @throws DatabaseException
+     */
+    public <PK, SK1, SK2, E> ForwardCursor<E>
+            doTwoConditionsJoin(PrimaryIndex<PK, E> pk,
+                                SecondaryIndex<SK1, PK, E> sk1,
+                                SK1 key1,
+                                SecondaryIndex<SK2, PK, E> sk2,
+                                SK2 key2)
+            throws DatabaseException {
+
+        assert (pk != null);
+        assert (sk1 != null);
+        assert (sk2 != null);
+
+        EntityJoin<PK, E> join = new EntityJoin<PK, E>(pk);
+        join.addCondition(sk1, key1);
+        join.addCondition(sk2, key2);
+
+        return join.entities();
+    }
+    
+    /**
+     * Do a natural join on Department and Employee by DepartmentId, similar to
+     * the SQL:
+     * <blockquote><pre>
+     * SELECT * FROM employee e, department d
+     *  WHERE e.departmentId = d.departmentId;
+     * </pre></blockquote>
+     * 
+     * @param iterable
+     * @throws DatabaseException
+     */
+    public void doDepartmentEmployeeJoin(Iterable<Department> iterable)
+            throws DatabaseException {
+
+        /* Do a filter on Department by DepartmentName. */
+        Iterator<Department> deptIter = iterable.iterator();
+        while (deptIter.hasNext()) {            
+            Department dept = deptIter.next();            
+            /* Do a natural join on Department and Employee by DepartmentId. */
+            EntityCursor<Employee> empCursor = this.employeeByDepartmentId.
+                subIndex(dept.getDepartmentId()).entities();
+            Iterator<Employee> empIter = empCursor.iterator();
+            while (empIter.hasNext()) {
+                System.out.println(empIter.next());
+            }
+            empCursor.close();
+        }
+        System.out.println();
+    }
+
+    /**
+     * Query the Employee database by Department's secondary-key: deptName. 
+     * 
+     * @param deptName
+     * @throws DatabaseException
+     */
+    public void getEmployeeByDeptName(String deptName)
+            throws DatabaseException {
+
+        EntityCursor<Department> deptCursor =
+            doRangeQuery(this.departmentByName, deptName, true, deptName, true);
+        doDepartmentEmployeeJoin(deptCursor);
+        deptCursor.close();
+    }
+    
+    /**
+     * Query the Employee database by adding a filter on Department's
+     * non-secondary-key: deptLocation.
+     * 
+     * @param deptLocation
+     * @throws DatabaseException
+     */
+    public void getEmployeeByDeptLocation(String deptLocation)
+            throws DatabaseException {
+
+        /* Do a filter on Department by DepartmentName. */
+        ArrayList<Department> list = new ArrayList<Department>();
+        
+        Iterator<Department> it =
+            this.departmentById.sortedMap().values().iterator();
+        while (it.hasNext()) {
+            Department dept = it.next();
+            if (dept.getLocation().equals(deptLocation)) {
+                list.add(dept);
+            }
+        }
+        doDepartmentEmployeeJoin(list);
+    }
+}
diff --git a/examples/persist/sqlapp/Department.java b/examples/persist/sqlapp/Department.java
new file mode 100644
index 0000000000000000000000000000000000000000..67e2fc2afefd273c617007ec5088ef8a54e0e973
--- /dev/null
+++ b/examples/persist/sqlapp/Department.java
@@ -0,0 +1,60 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Department.java,v 1.2.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package persist.sqlapp;
+
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.SecondaryKey;
+import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE;
+
+/**
+ * The Department entity class.
+ * 
+ * @author chao
+ */
+@Entity
+class Department {
+
+    @PrimaryKey
+    int departmentId;
+
+    @SecondaryKey(relate = ONE_TO_ONE)
+    String departmentName;
+
+    String location;
+
+    public Department(int departmentId,
+                      String departmentName,
+                      String location) {
+        this.departmentId = departmentId;
+        this.departmentName = departmentName;
+        this.location = location;
+    }
+
+	private Department() {} // For bindings.
+
+    public int getDepartmentId() {
+        return departmentId;
+    }
+
+    public String getDepartmentName() {
+        return departmentName;
+    }
+
+    public String getLocation() {
+        return location;
+    }
+
+    @Override
+    public String toString() {
+        return this.departmentId + ", " +
+               this.departmentName + ", " +
+               this.location;
+    }
+}
diff --git a/examples/persist/sqlapp/Employee.java b/examples/persist/sqlapp/Employee.java
new file mode 100644
index 0000000000000000000000000000000000000000..69c396ab58a9e73d721afb7c521c309a1218692c
--- /dev/null
+++ b/examples/persist/sqlapp/Employee.java
@@ -0,0 +1,94 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Employee.java,v 1.1.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package persist.sqlapp;
+
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.SecondaryKey;
+import static com.sleepycat.persist.model.DeleteAction.NULLIFY;
+import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE;
+
+/**
+ * The Employee entity class.
+ * 
+ * @author chao
+ */
+@Entity
+class Employee {
+
+    @PrimaryKey
+    int employeeId;
+
+    @SecondaryKey(relate = MANY_TO_ONE)
+    String employeeName;
+
+    @SecondaryKey(relate = MANY_TO_ONE)
+    float salary;
+
+    @SecondaryKey(relate = MANY_TO_ONE, relatedEntity=Employee.class,
+                                        onRelatedEntityDelete=NULLIFY)
+    Integer managerId; // Use "Integer" to allow null values.
+
+    @SecondaryKey(relate = MANY_TO_ONE, relatedEntity=Department.class,
+                                        onRelatedEntityDelete=NULLIFY)
+    int departmentId;
+
+    String address;
+
+    public Employee(int employeeId,
+                    String employeeName,
+                    float salary,
+                    Integer managerId,
+                    int departmentId,
+                    String address) {
+        
+        this.employeeId = employeeId;
+        this.employeeName = employeeName;
+        this.salary = salary;
+        this.managerId = managerId;
+        this.departmentId = departmentId;
+        this.address = address;
+    }
+
+    private Employee() {} // For bindings
+
+    public String getAddress() {
+        return address;
+    }
+
+    public int getDepartmentId() {
+        return departmentId;
+    }
+
+    public int getEmployeeId() {
+        return employeeId;
+    }
+
+    public String getEmployeeName() {
+        return employeeName;
+    }
+
+    public Integer getManagerId() {
+        return managerId;
+    }
+
+    public float getSalary() {
+        return salary;
+    }
+
+    @Override
+    public String toString() {
+        return this.employeeId + ", " +
+               this.employeeName + ", " +
+               this.salary + ", " +
+               this.managerId + ", " +
+               this.departmentId + ", " +
+               this.address;
+    }
+}
diff --git a/examples/persist/sqlapp/SQLApp.java b/examples/persist/sqlapp/SQLApp.java
new file mode 100644
index 0000000000000000000000000000000000000000..2d16c6365ead2491bd5b8ec98a263a2b5275f8ea
--- /dev/null
+++ b/examples/persist/sqlapp/SQLApp.java
@@ -0,0 +1,324 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SQLApp.java,v 1.2.2.2 2010/01/04 15:30:26 cwl Exp $
+ */
+
+package persist.sqlapp;
+
+import java.io.File;
+import java.util.Iterator;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.ForwardCursor;
+import com.sleepycat.persist.StoreConfig;
+
+/**
+ * An example shows how some common SQL queries are implemented using DPL.
+ *
+ * @see #usage
+ *
+ * @author chao
+ */
+public class SQLApp {
+
+    private static String envDir = "./tmp";
+    private static boolean cleanEnvOnExit = false;
+    private static Environment env = null;
+    private static EntityStore store = null;
+    private static DataAccessor dao = null;
+
+    /**
+     * Setup a Berkeley DB engine environment, and preload some example records.
+     *
+     * @throws com.sleepycat.je.DatabaseException
+     */
+    public void setup()
+            throws DatabaseException {
+
+        /* Open a transactional Berkeley DB engine environment. */
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setTransactional(true);
+        env = new Environment(new File(envDir), envConfig);
+
+        /* Open a transactional entity store. */
+        StoreConfig storeConfig = new StoreConfig();
+        storeConfig.setAllowCreate(true);
+        storeConfig.setTransactional(true);
+        store = new EntityStore(env, "SQLAppStore", storeConfig);
+
+        /* Initialize the data access object. */
+        dao = new DataAccessor(store);
+
+        /* Preload some example records. */
+        loadDepartmentDb();
+        loadEmployeeDb();
+    }
+
+    /* Preload the department database. */
+    private void loadDepartmentDb()
+            throws DatabaseException {
+
+        dao.departmentById.put
+                (new Department(1, "CEO Office", "North America"));
+        dao.departmentById.put
+                (new Department(2, "Sales", "EURO"));
+        dao.departmentById.put
+                (new Department(3, "HR", "MEA"));
+        dao.departmentById.put
+                (new Department(4, "Engineering", "APAC"));
+        dao.departmentById.put
+                (new Department(5, "Support", "LATAM"));
+    }
+
+    /* Preload the employee database. */
+    private void loadEmployeeDb()
+            throws DatabaseException {
+
+        /* Add the corporate's CEO using the Employee primary index. */
+        dao.employeeById.put(new Employee(1, // employeeId
+                                          "Abraham Lincoln", //employeeName
+                                          10000.0f, //salary
+                                          null, //managerId
+                                          1, //departmentId
+                                          "Washington D.C., USA")); //address
+
+        /* Add 4 managers responsible for 4 departments. */
+        dao.employeeById.put(new Employee(2,
+                                          "Augustus",
+                                          9000.0f,
+                                          1,
+                                          2,
+                                          "Rome, Italy"));
+        dao.employeeById.put(new Employee(3,
+                                          "Cleopatra",
+                                          7000.0f,
+                                          1,
+                                          3,
+                                          "Cairo, Egypt"));
+        dao.employeeById.put(new Employee(4,
+                                          "Confucius",
+                                          7500.0f,
+                                          1,
+                                          4,
+                                          "Beijing, China"));
+        dao.employeeById.put(new Employee(5,
+                                          "Toussaint Louverture",
+                                          6800.0f,
+                                          1,
+                                          5,
+                                          "Port-au-Prince, Haiti"));
+
+        /* Add 2 employees per department. */
+        dao.employeeById.put(new Employee(6,
+                                          "William Shakespeare",
+                                          7300.0f,
+                                          2,
+                                          2,
+                                          "London, England"));
+        dao.employeeById.put(new Employee(7,
+                                          "Victor Hugo",
+                                          7000.0f,
+                                          2,
+                                          2,
+                                          "Paris, France"));
+        dao.employeeById.put(new Employee(8,
+                                          "Yitzhak Rabin",
+                                          6500.0f,
+                                          3,
+                                          3,
+                                          "Jerusalem, Israel"));
+        dao.employeeById.put(new Employee(9,
+                                          "Nelson Rolihlahla Mandela",
+                                          6400.0f,
+                                          3,
+                                          3,
+                                          "Cape Town, South Africa"));
+        dao.employeeById.put(new Employee(10,
+                                          "Meiji Emperor",
+                                          6600.0f,
+                                          4,
+                                          4,
+                                          "Tokyo, Japan"));
+        dao.employeeById.put(new Employee(11,
+                                          "Mohandas Karamchand Gandhi",
+                                          7600.0f,
+                                          4,
+                                          4,
+                                          "New Delhi, India"));
+        dao.employeeById.put(new Employee(12,
+                                          "Ayrton Senna da Silva",
+                                          5600.0f,
+                                          5,
+                                          5,
+                                          "Brasilia, Brasil"));
+        dao.employeeById.put(new Employee(13,
+                                          "Ronahlinho De Assis Moreira",
+                                          6100.0f,
+                                          5,
+                                          5,
+                                          "Brasilia, Brasil"));
+    }
+
+    /** Run the SQL examples. */
+    public void runApp()
+            throws DatabaseException {
+
+        /* Print departmemt database contents order by departmentId. */
+        System.out.println("SELECT * FROM department ORDER BY departmentId;");
+        printQueryResults(dao.departmentById.entities());
+
+        /* Print departmemt database contents order by departmentName. */
+        System.out.println("SELECT * FROM department " +
+                           "ORDER BY departmentName;");
+        printQueryResults(dao.departmentByName.entities());
+
+        /* Print employee database contents order by employeeId. */
+        System.out.println("SELECT * FROM employee ORDER BY employeeId;");
+        printQueryResults(dao.employeeById.entities());
+
+        /* Do a prefix query. */
+        System.out.println("SELECT * FROM employee WHERE employeeName " +
+                           "LIKE 'M%';");
+        printQueryResults(dao.doPrefixQuery(dao.employeeByName, "M"));
+
+        /* Do a range query. */
+        System.out.println("SELECT * FROM employee WHERE salary >= 6000 AND " +
+                           "salary <= 8000;");
+        printQueryResults(dao.doRangeQuery(dao.employeeBySalary,
+                                           new Float(6000), //fromKey
+                                           true, //fromInclusive
+                                           new Float(8000), //toKey
+                                           true)); //toInclusive
+
+        /* Two conditions join on a single primary database. */
+        System.out.println("SELECT * FROM employee " +
+                           "WHERE employeeName = 'Victor Hugo' " +
+                           "AND departmentId = 2");
+        printQueryResults(dao.doTwoConditionsJoin(dao.employeeById,
+                                                  dao.employeeByName,
+                                                  new String("Victor Hugo"),
+                                                  dao.employeeByDepartmentId,
+                                                  new Integer(2)));
+
+        /*
+         * Two conditions join on two primary databases combined with a
+         * secondary key search.
+         */
+        System.out.println("SELECT * FROM employee e, department d " +
+                           "WHERE e.departmentId = d.departmentId " +
+                           "AND d.departmentName = 'Engineering'");
+        dao.getEmployeeByDeptName("Engineering");
+        
+        /*
+         * Two conditions join on two primary databases combined with a
+         * filtering on the non-secondary-key.
+         */
+        System.out.println("SELECT * FROM employee e, department d " +
+                           "WHERE e.departmentId = d.departmentId " +
+                           "AND d.location = 'North America'");
+        dao.getEmployeeByDeptLocation("North America");
+    }
+
+    /** Print query results. */
+    public <V> void printQueryResults(ForwardCursor<V> c)
+            throws DatabaseException {
+
+        Iterator<V> it = c.iterator();
+        while (it.hasNext()) {
+            System.out.println(it.next());
+        }
+        System.out.println();
+
+        c.close();
+    }
+
+    /**
+     * Close the store and environment.
+     */
+    public void close() {
+
+        if (store != null) {
+            try {
+                store.close();
+            } catch (DatabaseException dbe) {
+                System.err.println("Error closing store: " +
+                        dbe.toString());
+                System.exit(-1);
+            }
+        }
+
+        if (env != null) {
+            try {
+                // Finally, close environment.
+                env.close();
+            } catch (DatabaseException dbe) {
+                System.err.println("Error closing env: " +
+                        dbe.toString());
+                System.exit(-1);
+            }
+        }
+
+        if (cleanEnvOnExit) {
+            removeDbFiles();
+        }
+    }
+
+    private void removeDbFiles() {
+        File file = new File(envDir);
+        for (File f : file.listFiles()) {
+            f.delete();
+        }
+    }
+
+    /**
+     * @param args the command line arguments
+     */
+    public static void main(String[] args) {
+        
+        /* Parse the arguments list. */
+        parseArgs(args);
+
+        try {
+            SQLApp s = new SQLApp();
+            s.setup();
+            s.runApp();
+            s.close();
+        } catch (DatabaseException e) {
+            e.printStackTrace();
+            System.exit(-1);
+        }
+    }
+
+    /* Parse input arguments. */
+    private static void parseArgs(String args[]) {
+        for(int i = 0; i < args.length; ++i) {
+            if (args[i].startsWith("-")) {
+                switch(args[i].charAt(1)) {
+                  case 'h':
+                    envDir = args[++i];
+                    break;
+                  case 'd':
+                    cleanEnvOnExit = true;
+                    break;
+                  default:
+                    usage();
+                }
+            }
+        }
+    }
+
+    private static void usage() {
+        System.out.println("Usage: java SQLApp" +
+                           "\n [-h <env directory>] " +
+                           "# environment home directory" +
+                           "\n [-d] # clean environment after program exits");
+        System.exit(-1);
+    }
+}
diff --git a/examples/persist/txn/PayloadDataEntity.java b/examples/persist/txn/PayloadDataEntity.java
new file mode 100644
index 0000000000000000000000000000000000000000..e2fa38326b58e658a44f5253bcd67b823586e09f
--- /dev/null
+++ b/examples/persist/txn/PayloadDataEntity.java
@@ -0,0 +1,27 @@
+package persist.txn;
+
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.SecondaryKey;
+import static com.sleepycat.persist.model.Relationship.*;
+
+@Entity
+public class PayloadDataEntity {
+    @PrimaryKey
+    private int oID;
+
+    @SecondaryKey(relate=MANY_TO_ONE)
+    private String threadName;
+
+    private double doubleData;
+
+    PayloadDataEntity() {}
+
+    public double getDoubleData() { return doubleData; }
+    public int getID() { return oID; }
+    public String getThreadName() { return threadName; }
+
+    public void setDoubleData(double dd) { doubleData = dd; }
+    public void setID(int id) { oID = id; }
+    public void setThreadName(String tn) { threadName = tn; }
+}
diff --git a/examples/persist/txn/StoreWriter.java b/examples/persist/txn/StoreWriter.java
new file mode 100644
index 0000000000000000000000000000000000000000..29ccf0a355ce1be43657cbb2f5e9ee946b1cdc22
--- /dev/null
+++ b/examples/persist/txn/StoreWriter.java
@@ -0,0 +1,165 @@
+package persist.txn;
+
+import java.util.Random;
+
+import com.sleepycat.je.CursorConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DeadlockException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.persist.EntityCursor;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+
+public class StoreWriter extends Thread 
+{
+    private EntityStore myStore = null;
+    private Environment myEnv = null;
+    private PrimaryIndex<Integer,PayloadDataEntity> pdIndex;
+    private Random generator = new Random();
+    private boolean passTxn = false;
+
+
+    private static final int MAX_RETRY = 20;
+
+    // Constructor. Get our handles from here
+    StoreWriter(Environment env, EntityStore store) 
+
+        throws DatabaseException {
+        myStore = store;
+        myEnv = env;
+        
+        // Open the data accessor. This is used to store persistent
+        // objects.
+        pdIndex = myStore.getPrimaryIndex(Integer.class, 
+                        PayloadDataEntity.class);
+    }
+
+    // Thread method that writes a series of objects
+    // to the store using transaction protection.
+    // Deadlock handling is demonstrated here.
+    public void run () {
+        Transaction txn = null;
+
+        // Perform 50 transactions
+        for (int i=0; i<50; i++) {
+
+           boolean retry = true;
+           int retry_count = 0;
+           // while loop is used for deadlock retries
+           while (retry) {
+                // try block used for deadlock detection and
+                // general exception handling
+                try {
+
+                    // Get a transaction
+                    txn = myEnv.beginTransaction(null, null);
+
+                    // Write 10 PayloadDataEntity objects to the 
+                    // store for each transaction
+                    for (int j = 0; j < 10; j++) {
+                        // Instantiate an object
+                        PayloadDataEntity pd = new PayloadDataEntity();
+
+                        // Set the Object ID. This is used as the primary key.
+                        pd.setID(i + j);
+
+                        // The thread name is used as a secondary key, and
+                        // it is retrieved by this class's getName() method.
+                        pd.setThreadName(getName());
+                        
+                        // The last bit of data that we use is a double
+                        // that we generate randomly. This data is not
+                        // indexed.
+                        pd.setDoubleData(generator.nextDouble());
+
+                        // Do the put
+                        pdIndex.put(txn, pd);
+                    }
+
+                    // commit
+                    System.out.println(getName() + " : committing txn : " + i);
+                    System.out.println(getName() + " : Found " +
+                        countObjects(null) + " objects in the store.");
+                    try {
+                        txn.commit();
+                        txn = null;
+                    } catch (DatabaseException e) {
+                        System.err.println("Error on txn commit: " + 
+                            e.toString());
+                    } 
+                    retry = false;
+
+                } catch (DeadlockException de) {
+                    System.out.println("################# " + getName() + 
+                        " : caught deadlock");
+                    // retry if necessary
+                    if (retry_count < MAX_RETRY) {
+                        System.err.println(getName() + 
+                            " : Retrying operation.");
+                        retry = true;
+                        retry_count++;
+                    } else {
+                        System.err.println(getName() + 
+                            " : out of retries. Giving up.");
+                        retry = false;
+                    }
+                } catch (DatabaseException e) {
+                    // abort and don't retry
+                    retry = false;
+                    System.err.println(getName() +
+                        " : caught exception: " + e.toString());
+                    System.err.println(getName() +
+                        " : errno: " + e.toString());
+                    e.printStackTrace();
+                } finally {
+                    if (txn != null) {
+                        try {
+                            txn.abort();
+                        } catch (Exception e) {
+                            System.err.println("Error aborting transaction: " + 
+                                e.toString());
+                            e.printStackTrace();
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    // This simply counts the number of objects contained in the
+    // store and returns the result. You can use this method
+    // in three ways:
+    //
+    // First call it with an active txn handle.
+    //
+    // Secondly, configure the cursor for dirty reads
+    //
+    // Third, call countObjects AFTER the writer has committed
+    //    its transaction.
+    //
+    // If you do none of these things, the writer thread will 
+    // self-deadlock.
+    private int countObjects(Transaction txn)  throws DatabaseException {
+        int count = 0;
+
+        CursorConfig cc = new CursorConfig();
+        // This is ignored if the store is not opened with uncommitted read
+        // support.
+        cc.setReadUncommitted(true);
+        EntityCursor<PayloadDataEntity> cursor = pdIndex.entities(txn, cc);
+
+        try {
+            for (PayloadDataEntity pdi : cursor) {
+                    count++;
+            }
+        } finally {
+            if (cursor != null) {
+                cursor.close();
+            }
+        }
+
+        return count;
+        
+    }
+}
diff --git a/examples/persist/txn/TxnGuideDPL.java b/examples/persist/txn/TxnGuideDPL.java
new file mode 100644
index 0000000000000000000000000000000000000000..df168493d0e7290b39a02473163a5a21ac647ab4
--- /dev/null
+++ b/examples/persist/txn/TxnGuideDPL.java
@@ -0,0 +1,122 @@
+// File TxnGuideDPL.java
+
+package persist.txn;
+
+import java.io.File;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.StoreConfig;
+
+public class TxnGuideDPL {
+
+    private static String myEnvPath = "./";
+    private static String storeName = "exampleStore";
+
+    // Handles
+    private static EntityStore myStore = null;
+    private static Environment myEnv = null;
+
+    private static final int NUMTHREADS = 5;
+
+    private static void usage() {
+        System.out.println("TxnGuideDPL [-h <env directory>]");
+        System.exit(-1);
+    }
+
+    public static void main(String args[]) {
+        try {
+            // Parse the arguments list
+            parseArgs(args);
+            // Open the environment and store
+            openEnv();
+
+            // Start the threads
+            StoreWriter[] threadArray;
+            threadArray = new StoreWriter[NUMTHREADS];
+            for (int i = 0; i < NUMTHREADS; i++) {
+                threadArray[i] = new StoreWriter(myEnv, myStore);
+                threadArray[i].start();
+            }
+
+            for (int i = 0; i < NUMTHREADS; i++) {
+                threadArray[i].join();
+            }
+        } catch (Exception e) {
+            System.err.println("TxnGuideDPL: " + e.toString());
+            e.printStackTrace();
+        } finally {
+            closeEnv();
+        }
+        System.out.println("All done.");
+    }
+
+
+    private static void openEnv() throws DatabaseException {
+        System.out.println("opening env and store");
+
+        // Set up the environment.
+        EnvironmentConfig myEnvConfig = new EnvironmentConfig();
+        myEnvConfig.setAllowCreate(true);
+        myEnvConfig.setTransactional(true);
+        //  Environment handles are free-threaded by default in JE,
+        // so we do not have to do anything to cause the
+        // environment handle to be free-threaded.
+
+        // Set up the entity store
+        StoreConfig myStoreConfig = new StoreConfig();
+        myStoreConfig.setAllowCreate(true);
+        myStoreConfig.setTransactional(true);
+
+        // Open the environment
+        myEnv = new Environment(new File(myEnvPath),    // Env home
+                                    myEnvConfig);
+
+        // Open the store
+        myStore = new EntityStore(myEnv, storeName, myStoreConfig);
+
+    }
+
+    private static void closeEnv() {
+        System.out.println("Closing env and store");
+        if (myStore != null ) {
+            try {
+                myStore.close();
+            } catch (DatabaseException e) {
+                System.err.println("closeEnv: myStore: " + 
+                    e.toString());
+                e.printStackTrace();
+            }
+        }
+
+        if (myEnv != null ) {
+            try {
+                myEnv.close();
+            } catch (DatabaseException e) {
+                System.err.println("closeEnv: " + e.toString());
+                e.printStackTrace();
+            }
+        }
+    }
+
+    private TxnGuideDPL() {}
+
+    private static void parseArgs(String args[]) {
+        int nArgs = args.length;
+        for(int i = 0; i < args.length; ++i) {
+            if (args[i].startsWith("-")) {
+                switch(args[i].charAt(1)) {
+                    case 'h':
+                        if (i < nArgs - 1) {
+                            myEnvPath = new String(args[++i]);
+                        }
+                    break;
+                    default:
+                        usage();
+                }
+            }
+        }
+    }
+}
diff --git a/examples/resources/jboss/jndi.properties b/examples/resources/jboss/jndi.properties
new file mode 100644
index 0000000000000000000000000000000000000000..cbbd8718b33cbf9f8e00a60b94416cec96877163
--- /dev/null
+++ b/examples/resources/jboss/jndi.properties
@@ -0,0 +1,4 @@
+### JBossNS properties
+java.naming.factory.initial=org.jnp.interfaces.NamingContextFactory
+java.naming.provider.url=jnp://localhost:1099
+java.naming.factory.url.pkgs=org.jboss.naming:org.jnp.interfaces
diff --git a/src/com/sleepycat/asm/AnnotationVisitor.java b/src/com/sleepycat/asm/AnnotationVisitor.java
new file mode 100644
index 0000000000000000000000000000000000000000..43a273bf610f74082b8d62f732e0ef15a9c51963
--- /dev/null
+++ b/src/com/sleepycat/asm/AnnotationVisitor.java
@@ -0,0 +1,88 @@
+/***
+ * ASM: a very small and fast Java bytecode manipulation framework
+ * Copyright (c) 2000-2005 INRIA, France Telecom
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package com.sleepycat.asm;
+
+/**
+ * A visitor to visit a Java annotation. The methods of this interface must be
+ * called in the following order: (<tt>visit<tt> | <tt>visitEnum<tt> |
+ * <tt>visitAnnotation<tt> | <tt>visitArray<tt>)* <tt>visitEnd<tt>.
+ *
+ * @author Eric Bruneton
+ * @author Eugene Kuleshov
+ */
+public interface AnnotationVisitor {
+
+    /**
+     * Visits a primitive value of the annotation.
+     *
+     * @param name the value name.
+     * @param value the actual value, whose type must be {@link Byte},
+     *        {@link Boolean}, {@link Character}, {@link Short},
+     *        {@link Integer}, {@link Long}, {@link Float}, {@link Double},
+     *        {@link String} or {@link Type}.
+     */
+    void visit(String name, Object value);
+
+    /**
+     * Visits an enumeration value of the annotation.
+     *
+     * @param name the value name.
+     * @param desc the class descriptor of the enumeration class.
+     * @param value the actual enumeration value.
+     */
+    void visitEnum(String name, String desc, String value);
+
+    /**
+     * Visits a nested annotation value of the annotation.
+     *
+     * @param name the value name.
+     * @param desc the class descriptor of the nested annotation class.
+     * @return a non null visitor to visit the actual nested annotation value.
+     *         <i>The nested annotation value must be fully visited before
+     *         calling other methods on this annotation visitor</i>.
+     */
+    AnnotationVisitor visitAnnotation(String name, String desc);
+
+    /**
+     * Visits an array value of the annotation.
+     *
+     * @param name the value name.
+     * @return a non null visitor to visit the actual array value elements. The
+     *         'name' parameters passed to the methods of this visitor are
+     *         ignored. <i>All the array values must be visited before calling
+     *         other methods on this annotation visitor</i>.
+     */
+    AnnotationVisitor visitArray(String name);
+
+    /**
+     * Visits the end of the annotation.
+     */
+    void visitEnd();
+}
diff --git a/src/com/sleepycat/asm/AnnotationWriter.java b/src/com/sleepycat/asm/AnnotationWriter.java
new file mode 100644
index 0000000000000000000000000000000000000000..ba73a80fefd671d9fa18a04f793760bf88fc8c2d
--- /dev/null
+++ b/src/com/sleepycat/asm/AnnotationWriter.java
@@ -0,0 +1,311 @@
+/***
+ * ASM: a very small and fast Java bytecode manipulation framework
+ * Copyright (c) 2000-2005 INRIA, France Telecom
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package com.sleepycat.asm;
+
+/**
+ * An {@link AnnotationVisitor} that generates annotations in bytecode form.
+ *
+ * @author Eric Bruneton
+ * @author Eugene Kuleshov
+ */
+final class AnnotationWriter implements AnnotationVisitor {
+
+    /**
+     * The class writer to which this annotation must be added.
+     */
+    private final ClassWriter cw;
+
+    /**
+     * The number of values in this annotation.
+     */
+    private int size;
+
+    /**
+     * <tt>true<tt> if values are named, <tt>false</tt> otherwise. Annotation
+     * writers used for annotation default and annotation arrays use unnamed
+     * values.
+     */
+    private final boolean named;
+
+    /**
+     * The annotation values in bytecode form. This byte vector only contains
+     * the values themselves, i.e. the number of values must be stored as a
+     * unsigned short just before these bytes.
+     */
+    private final ByteVector bv;
+
+    /**
+     * The byte vector to be used to store the number of values of this
+     * annotation. See {@link #bv}.
+     */
+    private final ByteVector parent;
+
+    /**
+     * Where the number of values of this annotation must be stored in
+     * {@link #parent}.
+     */
+    private final int offset;
+
+    /**
+     * Next annotation writer. This field is used to store annotation lists.
+     */
+    AnnotationWriter next;
+
+    /**
+     * Previous annotation writer. This field is used to store annotation lists.
+     */
+    AnnotationWriter prev;
+
+    // ------------------------------------------------------------------------
+    // Constructor
+    // ------------------------------------------------------------------------
+
+    /**
+     * Constructs a new {@link AnnotationWriter}.
+     *
+     * @param cw the class writer to which this annotation must be added.
+     * @param named <tt>true<tt> if values are named, <tt>false</tt> otherwise.
+     * @param bv where the annotation values must be stored.
+     * @param parent where the number of annotation values must be stored.
+     * @param offset where in <tt>parent</tt> the number of annotation values must
+     *      be stored.
+     */
+    AnnotationWriter(
+        final ClassWriter cw,
+        final boolean named,
+        final ByteVector bv,
+        final ByteVector parent,
+        final int offset)
+    {
+        this.cw = cw;
+        this.named = named;
+        this.bv = bv;
+        this.parent = parent;
+        this.offset = offset;
+    }
+
+    // ------------------------------------------------------------------------
+    // Implementation of the AnnotationVisitor interface
+    // ------------------------------------------------------------------------
+
+    public void visit(final String name, final Object value) {
+        ++size;
+        if (named) {
+            bv.putShort(cw.newUTF8(name));
+        }
+        if (value instanceof String) {
+            bv.put12('s', cw.newUTF8((String) value));
+        } else if (value instanceof Byte) {
+            bv.put12('B', cw.newInteger(((Byte) value).byteValue()).index);
+        } else if (value instanceof Boolean) {
+            int v = ((Boolean) value).booleanValue() ? 1 : 0;
+            bv.put12('Z', cw.newInteger(v).index);
+        } else if (value instanceof Character) {
+            bv.put12('C', cw.newInteger(((Character) value).charValue()).index);
+        } else if (value instanceof Short) {
+            bv.put12('S', cw.newInteger(((Short) value).shortValue()).index);
+        } else if (value instanceof Type) {
+            bv.put12('c', cw.newUTF8(((Type) value).getDescriptor()));
+        } else if (value instanceof byte[]) {
+            byte[] v = (byte[]) value;
+            bv.put12('[', v.length);
+            for (int i = 0; i < v.length; i++) {
+                bv.put12('B', cw.newInteger(v[i]).index);
+            }
+        } else if (value instanceof boolean[]) {
+            boolean[] v = (boolean[]) value;
+            bv.put12('[', v.length);
+            for (int i = 0; i < v.length; i++) {
+                bv.put12('Z', cw.newInteger(v[i] ? 1 : 0).index);
+            }
+        } else if (value instanceof short[]) {
+            short[] v = (short[]) value;
+            bv.put12('[', v.length);
+            for (int i = 0; i < v.length; i++) {
+                bv.put12('S', cw.newInteger(v[i]).index);
+            }
+        } else if (value instanceof char[]) {
+            char[] v = (char[]) value;
+            bv.put12('[', v.length);
+            for (int i = 0; i < v.length; i++) {
+                bv.put12('C', cw.newInteger(v[i]).index);
+            }
+        } else if (value instanceof int[]) {
+            int[] v = (int[]) value;
+            bv.put12('[', v.length);
+            for (int i = 0; i < v.length; i++) {
+                bv.put12('I', cw.newInteger(v[i]).index);
+            }
+        } else if (value instanceof long[]) {
+            long[] v = (long[]) value;
+            bv.put12('[', v.length);
+            for (int i = 0; i < v.length; i++) {
+                bv.put12('J', cw.newLong(v[i]).index);
+            }
+        } else if (value instanceof float[]) {
+            float[] v = (float[]) value;
+            bv.put12('[', v.length);
+            for (int i = 0; i < v.length; i++) {
+                bv.put12('F', cw.newFloat(v[i]).index);
+            }
+        } else if (value instanceof double[]) {
+            double[] v = (double[]) value;
+            bv.put12('[', v.length);
+            for (int i = 0; i < v.length; i++) {
+                bv.put12('D', cw.newDouble(v[i]).index);
+            }
+        } else {
+            Item i = cw.newConstItem(value);
+            bv.put12(".s.IFJDCS".charAt(i.type), i.index);
+        }
+    }
+
+    public void visitEnum(
+        final String name,
+        final String desc,
+        final String value)
+    {
+        ++size;
+        if (named) {
+            bv.putShort(cw.newUTF8(name));
+        }
+        bv.put12('e', cw.newUTF8(desc)).putShort(cw.newUTF8(value));
+    }
+
+    public AnnotationVisitor visitAnnotation(
+        final String name,
+        final String desc)
+    {
+        ++size;
+        if (named) {
+            bv.putShort(cw.newUTF8(name));
+        }
+        // write tag and type, and reserve space for values count
+        bv.put12('@', cw.newUTF8(desc)).putShort(0);
+        return new AnnotationWriter(cw, true, bv, bv, bv.length - 2);
+    }
+
+    public AnnotationVisitor visitArray(final String name) {
+        ++size;
+        if (named) {
+            bv.putShort(cw.newUTF8(name));
+        }
+        // write tag, and reserve space for array size
+        bv.put12('[', 0);
+        return new AnnotationWriter(cw, false, bv, bv, bv.length - 2);
+    }
+
+    public void visitEnd() {
+        if (parent != null) {
+            byte[] data = parent.data;
+            data[offset] = (byte) (size >>> 8);
+            data[offset + 1] = (byte) size;
+        }
+    }
+
+    // ------------------------------------------------------------------------
+    // Utility methods
+    // ------------------------------------------------------------------------
+
+    /**
+     * Returns the size of this annotation writer list.
+     *
+     * @return the size of this annotation writer list.
+     */
+    int getSize() {
+        int size = 0;
+        AnnotationWriter aw = this;
+        while (aw != null) {
+            size += aw.bv.length;
+            aw = aw.next;
+        }
+        return size;
+    }
+
+    /**
+     * Puts the annotations of this annotation writer list into the given byte
+     * vector.
+     *
+     * @param out where the annotations must be put.
+     */
+    void put(final ByteVector out) {
+        int n = 0;
+        int size = 2;
+        AnnotationWriter aw = this;
+        AnnotationWriter last = null;
+        while (aw != null) {
+            ++n;
+            size += aw.bv.length;
+            aw.visitEnd(); // in case user forgot to call visitEnd
+            aw.prev = last;
+            last = aw;
+            aw = aw.next;
+        }
+        out.putInt(size);
+        out.putShort(n);
+        aw = last;
+        while (aw != null) {
+            out.putByteArray(aw.bv.data, 0, aw.bv.length);
+            aw = aw.prev;
+        }
+    }
+
+    /**
+     * Puts the given annotation lists into the given byte vector.
+     *
+     * @param panns an array of annotation writer lists.
+     * @param out where the annotations must be put.
+     */
+    static void put(final AnnotationWriter[] panns, final ByteVector out) {
+        int size = 1 + 2 * panns.length;
+        for (int i = 0; i < panns.length; ++i) {
+            size += panns[i] == null ? 0 : panns[i].getSize();
+        }
+        out.putInt(size).putByte(panns.length);
+        for (int i = 0; i < panns.length; ++i) {
+            AnnotationWriter aw = panns[i];
+            AnnotationWriter last = null;
+            int n = 0;
+            while (aw != null) {
+                ++n;
+                aw.visitEnd(); // in case user forgot to call visitEnd
+                aw.prev = last;
+                last = aw;
+                aw = aw.next;
+            }
+            out.putShort(n);
+            aw = last;
+            while (aw != null) {
+                out.putByteArray(aw.bv.data, 0, aw.bv.length);
+                aw = aw.prev;
+            }
+        }
+    }
+}
diff --git a/src/com/sleepycat/asm/Attribute.java b/src/com/sleepycat/asm/Attribute.java
new file mode 100644
index 0000000000000000000000000000000000000000..210deb9763e0947e3e783a51374256a4def58cbf
--- /dev/null
+++ b/src/com/sleepycat/asm/Attribute.java
@@ -0,0 +1,254 @@
+/***
+ * ASM: a very small and fast Java bytecode manipulation framework
+ * Copyright (c) 2000-2005 INRIA, France Telecom
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package com.sleepycat.asm;
+
+/**
+ * A non standard class, field, method or code attribute.
+ *
+ * @author Eric Bruneton
+ * @author Eugene Kuleshov
+ */
+public class Attribute {
+
+    /**
+     * The type of this attribute.
+     */
+    public final String type;
+
+    /**
+     * The raw value of this attribute, used only for unknown attributes.
+     */
+    byte[] value;
+
+    /**
+     * The next attribute in this attribute list. May be <tt>null</tt>.
+     */
+    Attribute next;
+
+    /**
+     * Constructs a new empty attribute.
+     *
+     * @param type the type of the attribute.
+     */
+    protected Attribute(final String type) {
+        this.type = type;
+    }
+
+    /**
+     * Returns <tt>true</tt> if this type of attribute is unknown. The default
+     * implementation of this method always returns <tt>true</tt>.
+     *
+     * @return <tt>true</tt> if this type of attribute is unknown.
+     */
+    public boolean isUnknown() {
+        return true;
+    }
+
+    /**
+     * Returns <tt>true</tt> if this type of attribute is a code attribute.
+     *
+     * @return <tt>true</tt> if this type of attribute is a code attribute.
+     */
+    public boolean isCodeAttribute() {
+        return false;
+    }
+
+    /**
+     * Returns the labels corresponding to this attribute.
+     *
+     * @return the labels corresponding to this attribute, or <tt>null</tt> if
+     *         this attribute is not a code attribute that contains labels.
+     */
+    protected Label[] getLabels() {
+        return null;
+    }
+
+    /**
+     * Reads a {@link #type type} attribute. This method must return a <i>new</i>
+     * {@link Attribute} object, of type {@link #type type}, corresponding to
+     * the <tt>len</tt> bytes starting at the given offset, in the given class
+     * reader.
+     *
+     * @param cr the class that contains the attribute to be read.
+     * @param off index of the first byte of the attribute's content in {@link
+     *        ClassReader#b cr.b}. The 6 attribute header bytes, containing the
+     *        type and the length of the attribute, are not taken into account
+     *        here.
+     * @param len the length of the attribute's content.
+     * @param buf buffer to be used to call
+     *        {@link ClassReader#readUTF8 readUTF8},
+     *        {@link ClassReader#readClass(int,char[]) readClass} or
+     *        {@link ClassReader#readConst readConst}.
+     * @param codeOff index of the first byte of code's attribute content in
+     *        {@link ClassReader#b cr.b}, or -1 if the attribute to be read is
+     *        not a code attribute. The 6 attribute header bytes, containing the
+     *        type and the length of the attribute, are not taken into account
+     *        here.
+     * @param labels the labels of the method's code, or <tt>null</tt> if the
+     *        attribute to be read is not a code attribute.
+     * @return a <i>new</i> {@link Attribute} object corresponding to the given
+     *         bytes.
+     */
+    protected Attribute read(
+        ClassReader cr,
+        int off,
+        int len,
+        char[] buf,
+        int codeOff,
+        Label[] labels)
+    {
+        Attribute attr = new Attribute(type);
+        attr.value = new byte[len];
+        System.arraycopy(cr.b, off, attr.value, 0, len);
+        return attr;
+    }
+
+    /**
+     * Returns the byte array form of this attribute.
+     *
+     * @param cw the class to which this attribute must be added. This parameter
+     *        can be used to add to the constant pool of this class the items
+     *        that corresponds to this attribute.
+     * @param code the bytecode of the method corresponding to this code
+     *        attribute, or <tt>null</tt> if this attribute is not a code
+     *        attributes.
+     * @param len the length of the bytecode of the method corresponding to this
+     *        code attribute, or <tt>null</tt> if this attribute is not a code
+     *        attribute.
+     * @param maxStack the maximum stack size of the method corresponding to
+     *        this code attribute, or -1 if this attribute is not a code
+     *        attribute.
+     * @param maxLocals the maximum number of local variables of the method
+     *        corresponding to this code attribute, or -1 if this attribute is
+     *        not a code attribute.
+     * @return the byte array form of this attribute.
+     */
+    protected ByteVector write(
+        ClassWriter cw,
+        byte[] code,
+        int len,
+        int maxStack,
+        int maxLocals)
+    {
+        ByteVector v = new ByteVector();
+        v.data = value;
+        v.length = value.length;
+        return v;
+    }
+
+    /**
+     * Returns the length of the attribute list that begins with this attribute.
+     *
+     * @return the length of the attribute list that begins with this attribute.
+     */
+    final int getCount() {
+        int count = 0;
+        Attribute attr = this;
+        while (attr != null) {
+            count += 1;
+            attr = attr.next;
+        }
+        return count;
+    }
+
+    /**
+     * Returns the size of all the attributes in this attribute list.
+     *
+     * @param cw the class writer to be used to convert the attributes into byte
+     *        arrays, with the {@link #write write} method.
+     * @param code the bytecode of the method corresponding to these code
+     *        attributes, or <tt>null</tt> if these attributes are not code
+     *        attributes.
+     * @param len the length of the bytecode of the method corresponding to
+     *        these code attributes, or <tt>null</tt> if these attributes are
+     *        not code attributes.
+     * @param maxStack the maximum stack size of the method corresponding to
+     *        these code attributes, or -1 if these attributes are not code
+     *        attributes.
+     * @param maxLocals the maximum number of local variables of the method
+     *        corresponding to these code attributes, or -1 if these attributes
+     *        are not code attributes.
+     * @return the size of all the attributes in this attribute list. This size
+     *         includes the size of the attribute headers.
+     */
+    final int getSize(
+        final ClassWriter cw,
+        final byte[] code,
+        final int len,
+        final int maxStack,
+        final int maxLocals)
+    {
+        Attribute attr = this;
+        int size = 0;
+        while (attr != null) {
+            cw.newUTF8(attr.type);
+            size += attr.write(cw, code, len, maxStack, maxLocals).length + 6;
+            attr = attr.next;
+        }
+        return size;
+    }
+
+    /**
+     * Writes all the attributes of this attribute list in the given byte
+     * vector.
+     *
+     * @param cw the class writer to be used to convert the attributes into byte
+     *        arrays, with the {@link #write write} method.
+     * @param code the bytecode of the method corresponding to these code
+     *        attributes, or <tt>null</tt> if these attributes are not code
+     *        attributes.
+     * @param len the length of the bytecode of the method corresponding to
+     *        these code attributes, or <tt>null</tt> if these attributes are
+     *        not code attributes.
+     * @param maxStack the maximum stack size of the method corresponding to
+     *        these code attributes, or -1 if these attributes are not code
+     *        attributes.
+     * @param maxLocals the maximum number of local variables of the method
+     *        corresponding to these code attributes, or -1 if these attributes
+     *        are not code attributes.
+     * @param out where the attributes must be written.
+     */
+    final void put(
+        final ClassWriter cw,
+        final byte[] code,
+        final int len,
+        final int maxStack,
+        final int maxLocals,
+        final ByteVector out)
+    {
+        Attribute attr = this;
+        while (attr != null) {
+            ByteVector b = attr.write(cw, code, len, maxStack, maxLocals);
+            out.putShort(cw.newUTF8(attr.type)).putInt(b.length);
+            out.putByteArray(b.data, 0, b.length);
+            attr = attr.next;
+        }
+    }
+}
diff --git a/src/com/sleepycat/asm/ByteVector.java b/src/com/sleepycat/asm/ByteVector.java
new file mode 100644
index 0000000000000000000000000000000000000000..b0cf125ae930aa12d11718b0a4df6d0c87e62abc
--- /dev/null
+++ b/src/com/sleepycat/asm/ByteVector.java
@@ -0,0 +1,293 @@
+/***
+ * ASM: a very small and fast Java bytecode manipulation framework
+ * Copyright (c) 2000-2005 INRIA, France Telecom
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package com.sleepycat.asm;
+
+/**
+ * A dynamically extensible vector of bytes. This class is roughly equivalent to
+ * a DataOutputStream on top of a ByteArrayOutputStream, but is more efficient.
+ *
+ * @author Eric Bruneton
+ */
+public class ByteVector {
+
+    /**
+     * The content of this vector.
+     */
+    byte[] data;
+
+    /**
+     * Actual number of bytes in this vector.
+     */
+    int length;
+
+    /**
+     * Constructs a new {@link ByteVector ByteVector} with a default initial
+     * size.
+     */
+    public ByteVector() {
+        data = new byte[64];
+    }
+
+    /**
+     * Constructs a new {@link ByteVector ByteVector} with the given initial
+     * size.
+     *
+     * @param initialSize the initial size of the byte vector to be constructed.
+     */
+    public ByteVector(final int initialSize) {
+        data = new byte[initialSize];
+    }
+
+    /**
+     * Puts a byte into this byte vector. The byte vector is automatically
+     * enlarged if necessary.
+     *
+     * @param b a byte.
+     * @return this byte vector.
+     */
+    public ByteVector putByte(final int b) {
+        int length = this.length;
+        if (length + 1 > data.length) {
+            enlarge(1);
+        }
+        data[length++] = (byte) b;
+        this.length = length;
+        return this;
+    }
+
+    /**
+     * Puts two bytes into this byte vector. The byte vector is automatically
+     * enlarged if necessary.
+     *
+     * @param b1 a byte.
+     * @param b2 another byte.
+     * @return this byte vector.
+     */
+    ByteVector put11(final int b1, final int b2) {
+        int length = this.length;
+        if (length + 2 > data.length) {
+            enlarge(2);
+        }
+        byte[] data = this.data;
+        data[length++] = (byte) b1;
+        data[length++] = (byte) b2;
+        this.length = length;
+        return this;
+    }
+
+    /**
+     * Puts a short into this byte vector. The byte vector is automatically
+     * enlarged if necessary.
+     *
+     * @param s a short.
+     * @return this byte vector.
+     */
+    public ByteVector putShort(final int s) {
+        int length = this.length;
+        if (length + 2 > data.length) {
+            enlarge(2);
+        }
+        byte[] data = this.data;
+        data[length++] = (byte) (s >>> 8);
+        data[length++] = (byte) s;
+        this.length = length;
+        return this;
+    }
+
+    /**
+     * Puts a byte and a short into this byte vector. The byte vector is
+     * automatically enlarged if necessary.
+     *
+     * @param b a byte.
+     * @param s a short.
+     * @return this byte vector.
+     */
+    ByteVector put12(final int b, final int s) {
+        int length = this.length;
+        if (length + 3 > data.length) {
+            enlarge(3);
+        }
+        byte[] data = this.data;
+        data[length++] = (byte) b;
+        data[length++] = (byte) (s >>> 8);
+        data[length++] = (byte) s;
+        this.length = length;
+        return this;
+    }
+
+    /**
+     * Puts an int into this byte vector. The byte vector is automatically
+     * enlarged if necessary.
+     *
+     * @param i an int.
+     * @return this byte vector.
+     */
+    public ByteVector putInt(final int i) {
+        int length = this.length;
+        if (length + 4 > data.length) {
+            enlarge(4);
+        }
+        byte[] data = this.data;
+        data[length++] = (byte) (i >>> 24);
+        data[length++] = (byte) (i >>> 16);
+        data[length++] = (byte) (i >>> 8);
+        data[length++] = (byte) i;
+        this.length = length;
+        return this;
+    }
+
+    /**
+     * Puts a long into this byte vector. The byte vector is automatically
+     * enlarged if necessary.
+     *
+     * @param l a long.
+     * @return this byte vector.
+     */
+    public ByteVector putLong(final long l) {
+        int length = this.length;
+        if (length + 8 > data.length) {
+            enlarge(8);
+        }
+        byte[] data = this.data;
+        int i = (int) (l >>> 32);
+        data[length++] = (byte) (i >>> 24);
+        data[length++] = (byte) (i >>> 16);
+        data[length++] = (byte) (i >>> 8);
+        data[length++] = (byte) i;
+        i = (int) l;
+        data[length++] = (byte) (i >>> 24);
+        data[length++] = (byte) (i >>> 16);
+        data[length++] = (byte) (i >>> 8);
+        data[length++] = (byte) i;
+        this.length = length;
+        return this;
+    }
+
+    /**
+     * Puts an UTF8 string into this byte vector. The byte vector is
+     * automatically enlarged if necessary.
+     *
+     * @param s a String.
+     * @return this byte vector.
+     */
+    public ByteVector putUTF8(final String s) {
+        int charLength = s.length();
+        if (length + 2 + charLength > data.length) {
+            enlarge(2 + charLength);
+        }
+        int len = length;
+        byte[] data = this.data;
+        // optimistic algorithm: instead of computing the byte length and then
+        // serializing the string (which requires two loops), we assume the byte
+        // length is equal to char length (which is the most frequent case), and
+        // we start serializing the string right away. During the serialization,
+        // if we find that this assumption is wrong, we continue with the
+        // general method.
+        data[len++] = (byte) (charLength >>> 8);
+        data[len++] = (byte) (charLength);
+        for (int i = 0; i < charLength; ++i) {
+            char c = s.charAt(i);
+            if (c >= '\001' && c <= '\177') {
+                data[len++] = (byte) c;
+            } else {
+                int byteLength = i;
+                for (int j = i; j < charLength; ++j) {
+                    c = s.charAt(j);
+                    if (c >= '\001' && c <= '\177') {
+                        byteLength++;
+                    } else if (c > '\u07FF') {
+                        byteLength += 3;
+                    } else {
+                        byteLength += 2;
+                    }
+                }
+                data[length] = (byte) (byteLength >>> 8);
+                data[length + 1] = (byte) (byteLength);
+                if (length + 2 + byteLength > data.length) {
+                    length = len;
+                    enlarge(2 + byteLength);
+                    data = this.data;
+                }
+                for (int j = i; j < charLength; ++j) {
+                    c = s.charAt(j);
+                    if (c >= '\001' && c <= '\177') {
+                        data[len++] = (byte) c;
+                    } else if (c > '\u07FF') {
+                        data[len++] = (byte) (0xE0 | c >> 12 & 0xF);
+                        data[len++] = (byte) (0x80 | c >> 6 & 0x3F);
+                        data[len++] = (byte) (0x80 | c & 0x3F);
+                    } else {
+                        data[len++] = (byte) (0xC0 | c >> 6 & 0x1F);
+                        data[len++] = (byte) (0x80 | c & 0x3F);
+                    }
+                }
+                break;
+            }
+        }
+        length = len;
+        return this;
+    }
+
+    /**
+     * Puts an array of bytes into this byte vector. The byte vector is
+     * automatically enlarged if necessary.
+     *
+     * @param b an array of bytes. May be <tt>null</tt> to put <tt>len</tt>
+     *        null bytes into this byte vector.
+     * @param off index of the fist byte of b that must be copied.
+     * @param len number of bytes of b that must be copied.
+     * @return this byte vector.
+     */
+    public ByteVector putByteArray(final byte[] b, final int off, final int len)
+    {
+        if (length + len > data.length) {
+            enlarge(len);
+        }
+        if (b != null) {
+            System.arraycopy(b, off, data, length, len);
+        }
+        length += len;
+        return this;
+    }
+
+    /**
+     * Enlarge this byte vector so that it can receive n more bytes.
+     *
+     * @param size number of additional bytes that this byte vector should be
+     *        able to receive.
+     */
+    private void enlarge(final int size) {
+        int length1 = 2 * data.length;
+        int length2 = length + size;
+        byte[] newData = new byte[length1 > length2 ? length1 : length2];
+        System.arraycopy(data, 0, newData, 0, length);
+        data = newData;
+    }
+}
diff --git a/src/com/sleepycat/asm/ClassAdapter.java b/src/com/sleepycat/asm/ClassAdapter.java
new file mode 100644
index 0000000000000000000000000000000000000000..b60c9acf8eb583fc4ac5ba61243da8dcc033971d
--- /dev/null
+++ b/src/com/sleepycat/asm/ClassAdapter.java
@@ -0,0 +1,121 @@
+/***
+ * ASM: a very small and fast Java bytecode manipulation framework
+ * Copyright (c) 2000-2005 INRIA, France Telecom
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package com.sleepycat.asm;
+
+/**
+ * An empty {@link ClassVisitor} that delegates to another {@link ClassVisitor}.
+ * This class can be used as a super class to quickly implement usefull class
+ * adapter classes, just by overriding the necessary methods.
+ *
+ * @author Eric Bruneton
+ */
+public class ClassAdapter implements ClassVisitor {
+
+    /**
+     * The {@link ClassVisitor} to which this adapter delegates calls.
+     */
+    protected ClassVisitor cv;
+
+    /**
+     * Constructs a new {@link ClassAdapter} object.
+     *
+     * @param cv the class visitor to which this adapter must delegate calls.
+     */
+    public ClassAdapter(final ClassVisitor cv) {
+        this.cv = cv;
+    }
+
+    public void visit(
+        final int version,
+        final int access,
+        final String name,
+        final String signature,
+        final String superName,
+        final String[] interfaces)
+    {
+        cv.visit(version, access, name, signature, superName, interfaces);
+    }
+
+    public void visitSource(final String source, final String debug) {
+        cv.visitSource(source, debug);
+    }
+
+    public void visitOuterClass(
+        final String owner,
+        final String name,
+        final String desc)
+    {
+        cv.visitOuterClass(owner, name, desc);
+    }
+
+    public AnnotationVisitor visitAnnotation(
+        final String desc,
+        final boolean visible)
+    {
+        return cv.visitAnnotation(desc, visible);
+    }
+
+    public void visitAttribute(final Attribute attr) {
+        cv.visitAttribute(attr);
+    }
+
+    public void visitInnerClass(
+        final String name,
+        final String outerName,
+        final String innerName,
+        final int access)
+    {
+        cv.visitInnerClass(name, outerName, innerName, access);
+    }
+
+    public FieldVisitor visitField(
+        final int access,
+        final String name,
+        final String desc,
+        final String signature,
+        final Object value)
+    {
+        return cv.visitField(access, name, desc, signature, value);
+    }
+
+    public MethodVisitor visitMethod(
+        final int access,
+        final String name,
+        final String desc,
+        final String signature,
+        final String[] exceptions)
+    {
+        return cv.visitMethod(access, name, desc, signature, exceptions);
+    }
+
+    public void visitEnd() {
+        cv.visitEnd();
+    }
+}
diff --git a/src/com/sleepycat/asm/ClassReader.java b/src/com/sleepycat/asm/ClassReader.java
new file mode 100644
index 0000000000000000000000000000000000000000..7b489893701d6e5872002decf3366de2de651d6a
--- /dev/null
+++ b/src/com/sleepycat/asm/ClassReader.java
@@ -0,0 +1,1606 @@
+/***
+ * ASM: a very small and fast Java bytecode manipulation framework
+ * Copyright (c) 2000-2005 INRIA, France Telecom
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package com.sleepycat.asm;
+
+import java.io.InputStream;
+import java.io.IOException;
+
+/**
+ * A Java class parser to make a {@link ClassVisitor} visit an existing class.
+ * This class parses a byte array conforming to the Java class file format and
+ * calls the appropriate visit methods of a given class visitor for each field,
+ * method and bytecode instruction encountered.
+ *
+ * @author Eric Bruneton
+ * @author Eugene Kuleshov
+ */
+public class ClassReader {
+
+    /**
+     * The class to be parsed. <i>The content of this array must not be
+     * modified. This field is intended for {@link Attribute} sub classes, and
+     * is normally not needed by class generators or adapters.</i>
+     */
+    public final byte[] b;
+
+    /**
+     * The start index of each constant pool item in {@link #b b}, plus one.
+     * The one byte offset skips the constant pool item tag that indicates its
+     * type.
+     */
+    private int[] items;
+
+    /**
+     * The String objects corresponding to the CONSTANT_Utf8 items. This cache
+     * avoids multiple parsing of a given CONSTANT_Utf8 constant pool item,
+     * which GREATLY improves performances (by a factor 2 to 3). This caching
+     * strategy could be extended to all constant pool items, but its benefit
+     * would not be so great for these items (because they are much less
+     * expensive to parse than CONSTANT_Utf8 items).
+     */
+    private String[] strings;
+
+    /**
+     * Maximum length of the strings contained in the constant pool of the
+     * class.
+     */
+    private int maxStringLength;
+
+    /**
+     * Start index of the class header information (access, name...) in
+     * {@link #b b}.
+     */
+    public final int header;
+
+    // ------------------------------------------------------------------------
+    // Constructors
+    // ------------------------------------------------------------------------
+
+    /**
+     * Constructs a new {@link ClassReader} object.
+     *
+     * @param b the bytecode of the class to be read.
+     */
+    public ClassReader(final byte[] b) {
+        this(b, 0, b.length);
+    }
+
+    /**
+     * Constructs a new {@link ClassReader} object.
+     *
+     * @param b the bytecode of the class to be read.
+     * @param off the start offset of the class data.
+     * @param len the length of the class data.
+     */
+    public ClassReader(final byte[] b, final int off, final int len) {
+        this.b = b;
+        // parses the constant pool
+        items = new int[readUnsignedShort(off + 8)];
+        int ll = items.length;
+        strings = new String[ll];
+        int max = 0;
+        int index = off + 10;
+        for (int i = 1; i < ll; ++i) {
+            items[i] = index + 1;
+            int tag = b[index];
+            int size;
+            switch (tag) {
+                case ClassWriter.FIELD:
+                case ClassWriter.METH:
+                case ClassWriter.IMETH:
+                case ClassWriter.INT:
+                case ClassWriter.FLOAT:
+                case ClassWriter.NAME_TYPE:
+                    size = 5;
+                    break;
+                case ClassWriter.LONG:
+                case ClassWriter.DOUBLE:
+                    size = 9;
+                    ++i;
+                    break;
+                case ClassWriter.UTF8:
+                    size = 3 + readUnsignedShort(index + 1);
+                    if (size > max) {
+                        max = size;
+                    }
+                    break;
+                // case ClassWriter.CLASS:
+                // case ClassWriter.STR:
+                default:
+                    size = 3;
+                    break;
+            }
+            index += size;
+        }
+        maxStringLength = max;
+        // the class header information starts just after the constant pool
+        header = index;
+    }
+
+    /**
+     * Copies the constant pool data into the given {@link ClassWriter}. Should
+     * be called before the {@link #accept(ClassVisitor,boolean)} method.
+     *
+     * @param classWriter the {@link ClassWriter} to copy constant pool into.
+     */
+    void copyPool(final ClassWriter classWriter) {
+        char[] buf = new char[maxStringLength];
+        int ll = items.length;
+        Item[] items2 = new Item[ll];
+        for (int i = 1; i < ll; i++) {
+            int index = items[i];
+            int tag = b[index - 1];
+            Item item = new Item(i);
+            int nameType;
+            switch (tag) {
+                case ClassWriter.FIELD:
+                case ClassWriter.METH:
+                case ClassWriter.IMETH:
+                    nameType = items[readUnsignedShort(index + 2)];
+                    item.set(tag,
+                            readClass(index, buf),
+                            readUTF8(nameType, buf),
+                            readUTF8(nameType + 2, buf));
+                    break;
+
+                case ClassWriter.INT:
+                    item.set(readInt(index));
+                    break;
+
+                case ClassWriter.FLOAT:
+                    item.set(Float.intBitsToFloat(readInt(index)));
+                    break;
+
+                case ClassWriter.NAME_TYPE:
+                    item.set(tag,
+                            readUTF8(index, buf),
+                            readUTF8(index + 2, buf),
+                            null);
+                    break;
+
+                case ClassWriter.LONG:
+                    item.set(readLong(index));
+                    ++i;
+                    break;
+
+                case ClassWriter.DOUBLE:
+                    item.set(Double.longBitsToDouble(readLong(index)));
+                    ++i;
+                    break;
+
+                case ClassWriter.UTF8: {
+                    String s = strings[i];
+                    if (s == null) {
+                        index = items[i];
+                        s = strings[i] = readUTF(index + 2,
+                                readUnsignedShort(index),
+                                buf);
+                    }
+                    item.set(tag, s, null, null);
+                }
+                    break;
+
+                // case ClassWriter.STR:
+                // case ClassWriter.CLASS:
+                default:
+                    item.set(tag, readUTF8(index, buf), null, null);
+                    break;
+            }
+
+            int index2 = item.hashCode % items2.length;
+            item.next = items2[index2];
+            items2[index2] = item;
+        }
+
+        int off = items[1] - 1;
+        classWriter.pool.putByteArray(b, off, header - off);
+        classWriter.items = items2;
+        classWriter.threshold = (int) (0.75d * ll);
+        classWriter.index = ll;
+    }
+
+    /**
+     * Constructs a new {@link ClassReader} object.
+     *
+     * @param is an input stream from which to read the class.
+     * @throws IOException if a problem occurs during reading.
+     */
+    public ClassReader(final InputStream is) throws IOException {
+        this(readClass(is));
+    }
+
+    /**
+     * Constructs a new {@link ClassReader} object.
+     *
+     * @param name the fully qualified name of the class to be read.
+     * @throws IOException if an exception occurs during reading.
+     */
+    public ClassReader(final String name) throws IOException {
+        this(ClassLoader.getSystemResourceAsStream(name.replace('.', '/')
+                + ".class"));
+    }
+
+    /**
+     * Reads the bytecode of a class.
+     *
+     * @param is an input stream from which to read the class.
+     * @return the bytecode read from the given input stream.
+     * @throws IOException if a problem occurs during reading.
+     */
+    private static byte[] readClass(final InputStream is) throws IOException {
+        if (is == null) {
+            throw new IOException("Class not found");
+        }
+        byte[] b = new byte[is.available()];
+        int len = 0;
+        while (true) {
+            int n = is.read(b, len, b.length - len);
+            if (n == -1) {
+                if (len < b.length) {
+                    byte[] c = new byte[len];
+                    System.arraycopy(b, 0, c, 0, len);
+                    b = c;
+                }
+                return b;
+            }
+            len += n;
+            if (len == b.length) {
+                byte[] c = new byte[b.length + 1000];
+                System.arraycopy(b, 0, c, 0, len);
+                b = c;
+            }
+        }
+    }
+
+    // ------------------------------------------------------------------------
+    // Public methods
+    // ------------------------------------------------------------------------
+
+    /**
+     * Makes the given visitor visit the Java class of this {@link ClassReader}.
+     * This class is the one specified in the constructor (see
+     * {@link #ClassReader(byte[]) ClassReader}).
+     *
+     * @param classVisitor the visitor that must visit this class.
+     * @param skipDebug <tt>true</tt> if the debug information of the class
+     *        must not be visited. In this case the
+     *        {@link MethodVisitor#visitLocalVariable visitLocalVariable} and
+     *        {@link MethodVisitor#visitLineNumber visitLineNumber} methods will
+     *        not be called.
+     */
+    public void accept(final ClassVisitor classVisitor, final boolean skipDebug)
+    {
+        accept(classVisitor, new Attribute[0], skipDebug);
+    }
+
+    /**
+     * Makes the given visitor visit the Java class of this {@link ClassReader}.
+     * This class is the one specified in the constructor (see
+     * {@link #ClassReader(byte[]) ClassReader}).
+     *
+     * @param classVisitor the visitor that must visit this class.
+     * @param attrs prototypes of the attributes that must be parsed during the
+     *        visit of the class. Any attribute whose type is not equal to the
+     *        type of one the prototypes will be ignored.
+     * @param skipDebug <tt>true</tt> if the debug information of the class
+     *        must not be visited. In this case the
+     *        {@link MethodVisitor#visitLocalVariable visitLocalVariable} and
+     *        {@link MethodVisitor#visitLineNumber visitLineNumber} methods will
+     *        not be called.
+     */
+    public void accept(
+        final ClassVisitor classVisitor,
+        final Attribute[] attrs,
+        final boolean skipDebug)
+    {
+        byte[] b = this.b; // the bytecode array
+        char[] c = new char[maxStringLength]; // buffer used to read strings
+        int i, j, k; // loop variables
+        int u, v, w; // indexes in b
+        Attribute attr;
+
+        int access;
+        String name;
+        String desc;
+        String attrName;
+        String signature;
+        int anns = 0;
+        int ianns = 0;
+        Attribute cattrs = null;
+
+        // visits the header
+        u = header;
+        access = readUnsignedShort(u);
+        name = readClass(u + 2, c);
+        v = items[readUnsignedShort(u + 4)];
+        String superClassName = v == 0 ? null : readUTF8(v, c);
+        String[] implementedItfs = new String[readUnsignedShort(u + 6)];
+        w = 0;
+        u += 8;
+        for (i = 0; i < implementedItfs.length; ++i) {
+            implementedItfs[i] = readClass(u, c);
+            u += 2;
+        }
+
+        // skips fields and methods
+        v = u;
+        i = readUnsignedShort(v);
+        v += 2;
+        for (; i > 0; --i) {
+            j = readUnsignedShort(v + 6);
+            v += 8;
+            for (; j > 0; --j) {
+                v += 6 + readInt(v + 2);
+            }
+        }
+        i = readUnsignedShort(v);
+        v += 2;
+        for (; i > 0; --i) {
+            j = readUnsignedShort(v + 6);
+            v += 8;
+            for (; j > 0; --j) {
+                v += 6 + readInt(v + 2);
+            }
+        }
+        // reads the class's attributes
+        signature = null;
+        String sourceFile = null;
+        String sourceDebug = null;
+        String enclosingOwner = null;
+        String enclosingName = null;
+        String enclosingDesc = null;
+
+        i = readUnsignedShort(v);
+        v += 2;
+        for (; i > 0; --i) {
+            attrName = readUTF8(v, c);
+            if (attrName.equals("SourceFile")) {
+                sourceFile = readUTF8(v + 6, c);
+            } else if (attrName.equals("Deprecated")) {
+                access |= Opcodes.ACC_DEPRECATED;
+            } else if (attrName.equals("Synthetic")) {
+                access |= Opcodes.ACC_SYNTHETIC;
+            } else if (attrName.equals("Annotation")) {
+                access |= Opcodes.ACC_ANNOTATION;
+            } else if (attrName.equals("Enum")) {
+                access |= Opcodes.ACC_ENUM;
+            } else if (attrName.equals("InnerClasses")) {
+                w = v + 6;
+            } else if (attrName.equals("Signature")) {
+                signature = readUTF8(v + 6, c);
+            } else if (attrName.equals("SourceDebugExtension")) {
+                int len = readInt(v + 2);
+                sourceDebug = readUTF(v + 6, len, new char[len]);
+            } else if (attrName.equals("EnclosingMethod")) {
+                enclosingOwner = readClass(v + 6, c);
+                int item = readUnsignedShort(v + 8);
+                if (item != 0) {
+                    enclosingName = readUTF8(items[item], c);
+                    enclosingDesc = readUTF8(items[item] + 2, c);
+                }
+            } else if (attrName.equals("RuntimeVisibleAnnotations")) {
+                anns = v + 6;
+            } else if (attrName.equals("RuntimeInvisibleAnnotations")) {
+                ianns = v + 6;
+            } else {
+                attr = readAttribute(attrs,
+                        attrName,
+                        v + 6,
+                        readInt(v + 2),
+                        c,
+                        -1,
+                        null);
+                if (attr != null) {
+                    attr.next = cattrs;
+                    cattrs = attr;
+                }
+            }
+            v += 6 + readInt(v + 2);
+        }
+        // calls the visit method
+        classVisitor.visit(readInt(4),
+                access,
+                name,
+                signature,
+                superClassName,
+                implementedItfs);
+
+        // calls the visitSource method
+        if (sourceFile != null || sourceDebug != null) {
+            classVisitor.visitSource(sourceFile, sourceDebug);
+        }
+
+        // calls the visitOuterClass method
+        if (enclosingOwner != null) {
+            classVisitor.visitOuterClass(enclosingOwner,
+                    enclosingName,
+                    enclosingDesc);
+        }
+
+        // visits the class annotations
+        for (i = 1; i >= 0; --i) {
+            v = i == 0 ? ianns : anns;
+            if (v != 0) {
+                j = readUnsignedShort(v);
+                v += 2;
+                for (; j > 0; --j) {
+                    desc = readUTF8(v, c);
+                    v += 2;
+                    v = readAnnotationValues(v,
+                            c,
+                            classVisitor.visitAnnotation(desc, i != 0));
+                }
+            }
+        }
+
+        // visits the class attributes
+        while (cattrs != null) {
+            attr = cattrs.next;
+            cattrs.next = null;
+            classVisitor.visitAttribute(cattrs);
+            cattrs = attr;
+        }
+
+        // class the visitInnerClass method
+        if (w != 0) {
+            i = readUnsignedShort(w);
+            w += 2;
+            for (; i > 0; --i) {
+                classVisitor.visitInnerClass(readUnsignedShort(w) == 0
+                        ? null
+                        : readClass(w, c), readUnsignedShort(w + 2) == 0
+                        ? null
+                        : readClass(w + 2, c), readUnsignedShort(w + 4) == 0
+                        ? null
+                        : readUTF8(w + 4, c), readUnsignedShort(w + 6));
+                w += 8;
+            }
+        }
+
+        // visits the fields
+        i = readUnsignedShort(u);
+        u += 2;
+        for (; i > 0; --i) {
+            access = readUnsignedShort(u);
+            name = readUTF8(u + 2, c);
+            desc = readUTF8(u + 4, c);
+            // visits the field's attributes and looks for a ConstantValue
+            // attribute
+            int fieldValueItem = 0;
+            signature = null;
+            anns = 0;
+            ianns = 0;
+            cattrs = null;
+
+            j = readUnsignedShort(u + 6);
+            u += 8;
+            for (; j > 0; --j) {
+                attrName = readUTF8(u, c);
+                if (attrName.equals("ConstantValue")) {
+                    fieldValueItem = readUnsignedShort(u + 6);
+                } else if (attrName.equals("Synthetic")) {
+                    access |= Opcodes.ACC_SYNTHETIC;
+                } else if (attrName.equals("Deprecated")) {
+                    access |= Opcodes.ACC_DEPRECATED;
+                } else if (attrName.equals("Enum")) {
+                    access |= Opcodes.ACC_ENUM;
+                } else if (attrName.equals("Signature")) {
+                    signature = readUTF8(u + 6, c);
+                } else if (attrName.equals("RuntimeVisibleAnnotations")) {
+                    anns = u + 6;
+                } else if (attrName.equals("RuntimeInvisibleAnnotations")) {
+                    ianns = u + 6;
+                } else {
+                    attr = readAttribute(attrs,
+                            attrName,
+                            u + 6,
+                            readInt(u + 2),
+                            c,
+                            -1,
+                            null);
+                    if (attr != null) {
+                        attr.next = cattrs;
+                        cattrs = attr;
+                    }
+                }
+                u += 6 + readInt(u + 2);
+            }
+            // reads the field's value, if any
+            Object value = (fieldValueItem == 0
+                    ? null
+                    : readConst(fieldValueItem, c));
+            // visits the field
+            FieldVisitor fv = classVisitor.visitField(access,
+                    name,
+                    desc,
+                    signature,
+                    value);
+            // visits the field annotations and attributes
+            if (fv != null) {
+                for (j = 1; j >= 0; --j) {
+                    v = j == 0 ? ianns : anns;
+                    if (v != 0) {
+                        k = readUnsignedShort(v);
+                        v += 2;
+                        for (; k > 0; --k) {
+                            desc = readUTF8(v, c);
+                            v += 2;
+                            v = readAnnotationValues(v,
+                                    c,
+                                    fv.visitAnnotation(desc, j != 0));
+                        }
+                    }
+                }
+                while (cattrs != null) {
+                    attr = cattrs.next;
+                    cattrs.next = null;
+                    fv.visitAttribute(cattrs);
+                    cattrs = attr;
+                }
+                fv.visitEnd();
+            }
+        }
+
+        // visits the methods
+        i = readUnsignedShort(u);
+        u += 2;
+        for (; i > 0; --i) {
+            int u0 = u + 6;
+            access = readUnsignedShort(u);
+            name = readUTF8(u + 2, c);
+            desc = readUTF8(u + 4, c);
+            signature = null;
+            anns = 0;
+            ianns = 0;
+            int dann = 0;
+            int mpanns = 0;
+            int impanns = 0;
+            cattrs = null;
+            v = 0;
+            w = 0;
+
+            // looks for Code and Exceptions attributes
+            j = readUnsignedShort(u + 6);
+            u += 8;
+            for (; j > 0; --j) {
+                attrName = readUTF8(u, c);
+                u += 2;
+                int attrSize = readInt(u);
+                u += 4;
+                if (attrName.equals("Code")) {
+                    v = u;
+                } else if (attrName.equals("Exceptions")) {
+                    w = u;
+                } else if (attrName.equals("Synthetic")) {
+                    access |= Opcodes.ACC_SYNTHETIC;
+                } else if (attrName.equals("Varargs")) {
+                    access |= Opcodes.ACC_VARARGS;
+                } else if (attrName.equals("Bridge")) {
+                    access |= Opcodes.ACC_BRIDGE;
+                } else if (attrName.equals("Deprecated")) {
+                    access |= Opcodes.ACC_DEPRECATED;
+                } else if (attrName.equals("Signature")) {
+                    signature = readUTF8(u, c);
+                } else if (attrName.equals("AnnotationDefault")) {
+                    dann = u;
+                } else if (attrName.equals("RuntimeVisibleAnnotations")) {
+                    anns = u;
+                } else if (attrName.equals("RuntimeInvisibleAnnotations")) {
+                    ianns = u;
+                } else if (attrName.equals("RuntimeVisibleParameterAnnotations"))
+                {
+                    mpanns = u;
+                } else if (attrName.equals("RuntimeInvisibleParameterAnnotations"))
+                {
+                    impanns = u;
+                } else {
+                    attr = readAttribute(attrs,
+                            attrName,
+                            u,
+                            attrSize,
+                            c,
+                            -1,
+                            null);
+                    if (attr != null) {
+                        attr.next = cattrs;
+                        cattrs = attr;
+                    }
+                }
+                u += attrSize;
+            }
+            // reads declared exceptions
+            String[] exceptions;
+            if (w == 0) {
+                exceptions = null;
+            } else {
+                exceptions = new String[readUnsignedShort(w)];
+                w += 2;
+                for (j = 0; j < exceptions.length; ++j) {
+                    exceptions[j] = readClass(w, c);
+                    w += 2;
+                }
+            }
+
+            // visits the method's code, if any
+            MethodVisitor mv = classVisitor.visitMethod(access,
+                    name,
+                    desc,
+                    signature,
+                    exceptions);
+
+            if (mv != null) {
+                /*
+                 * if the returned MethodVisitor is in fact a MethodWriter, it
+                 * means there is no method adapter between the reader and the
+                 * writer. If, in addition, the writer's constant pool was
+                 * copied from this reader (mw.cw.cr == this), and the signature
+                 * and exceptions of the method have not been changed, then it
+                 * is possible to skip all visit events and just copy the
+                 * original code of the method to the writer (the access, name
+                 * and descriptor can have been changed, this is not important
+                 * since they are not copied as is from the reader).
+                 */
+                if (mv instanceof MethodWriter) {
+                    MethodWriter mw = (MethodWriter) mv;
+                    if (mw.cw.cr == this) {
+                        if (signature == mw.signature) {
+                            boolean sameExceptions = false;
+                            if (exceptions == null) {
+                                sameExceptions = mw.exceptionCount == 0;
+                            } else {
+                                if (exceptions.length == mw.exceptionCount) {
+                                    sameExceptions = true;
+                                    for (j = exceptions.length - 1; j >= 0; --j)
+                                    {
+                                        w -= 2;
+                                        if (mw.exceptions[j] != readUnsignedShort(w))
+                                        {
+                                            sameExceptions = false;
+                                            break;
+                                        }
+                                    }
+                                }
+                            }
+                            if (sameExceptions) {
+                                /*
+                                 * we do not copy directly the code into
+                                 * MethodWriter to save a byte array copy
+                                 * operation. The real copy will be done in
+                                 * ClassWriter.toByteArray().
+                                 */
+                                mw.classReaderOffset = u0;
+                                mw.classReaderLength = u - u0;
+                                continue;
+                            }
+                        }
+                    }
+                }
+                if (dann != 0) {
+                    AnnotationVisitor dv = mv.visitAnnotationDefault();
+                    readAnnotationValue(dann, c, null, dv);
+                    dv.visitEnd();
+                }
+                for (j = 1; j >= 0; --j) {
+                    w = j == 0 ? ianns : anns;
+                    if (w != 0) {
+                        k = readUnsignedShort(w);
+                        w += 2;
+                        for (; k > 0; --k) {
+                            desc = readUTF8(w, c);
+                            w += 2;
+                            w = readAnnotationValues(w,
+                                    c,
+                                    mv.visitAnnotation(desc, j != 0));
+                        }
+                    }
+                }
+                if (mpanns != 0) {
+                    readParameterAnnotations(mpanns, c, true, mv);
+                }
+                if (impanns != 0) {
+                    readParameterAnnotations(impanns, c, false, mv);
+                }
+                while (cattrs != null) {
+                    attr = cattrs.next;
+                    cattrs.next = null;
+                    mv.visitAttribute(cattrs);
+                    cattrs = attr;
+                }
+            }
+
+            if (mv != null && v != 0) {
+                int maxStack = readUnsignedShort(v);
+                int maxLocals = readUnsignedShort(v + 2);
+                int codeLength = readInt(v + 4);
+                v += 8;
+
+                int codeStart = v;
+                int codeEnd = v + codeLength;
+
+                mv.visitCode();
+
+                // 1st phase: finds the labels
+                int label;
+                Label[] labels = new Label[codeLength + 1];
+                while (v < codeEnd) {
+                    int opcode = b[v] & 0xFF;
+                    switch (ClassWriter.TYPE[opcode]) {
+                        case ClassWriter.NOARG_INSN:
+                        case ClassWriter.IMPLVAR_INSN:
+                            v += 1;
+                            break;
+                        case ClassWriter.LABEL_INSN:
+                            label = v - codeStart + readShort(v + 1);
+                            if (labels[label] == null) {
+                                labels[label] = new Label();
+                            }
+                            v += 3;
+                            break;
+                        case ClassWriter.LABELW_INSN:
+                            label = v - codeStart + readInt(v + 1);
+                            if (labels[label] == null) {
+                                labels[label] = new Label();
+                            }
+                            v += 5;
+                            break;
+                        case ClassWriter.WIDE_INSN:
+                            opcode = b[v + 1] & 0xFF;
+                            if (opcode == Opcodes.IINC) {
+                                v += 6;
+                            } else {
+                                v += 4;
+                            }
+                            break;
+                        case ClassWriter.TABL_INSN:
+                            // skips 0 to 3 padding bytes
+                            w = v - codeStart;
+                            v = v + 4 - (w & 3);
+                            // reads instruction
+                            label = w + readInt(v);
+                            v += 4;
+                            if (labels[label] == null) {
+                                labels[label] = new Label();
+                            }
+                            j = readInt(v);
+                            v += 4;
+                            j = readInt(v) - j + 1;
+                            v += 4;
+                            for (; j > 0; --j) {
+                                label = w + readInt(v);
+                                v += 4;
+                                if (labels[label] == null) {
+                                    labels[label] = new Label();
+                                }
+                            }
+                            break;
+                        case ClassWriter.LOOK_INSN:
+                            // skips 0 to 3 padding bytes
+                            w = v - codeStart;
+                            v = v + 4 - (w & 3);
+                            // reads instruction
+                            label = w + readInt(v);
+                            v += 4;
+                            if (labels[label] == null) {
+                                labels[label] = new Label();
+                            }
+                            j = readInt(v);
+                            v += 4;
+                            for (; j > 0; --j) {
+                                v += 4; // skips key
+                                label = w + readInt(v);
+                                v += 4;
+                                if (labels[label] == null) {
+                                    labels[label] = new Label();
+                                }
+                            }
+                            break;
+                        case ClassWriter.VAR_INSN:
+                        case ClassWriter.SBYTE_INSN:
+                        case ClassWriter.LDC_INSN:
+                            v += 2;
+                            break;
+                        case ClassWriter.SHORT_INSN:
+                        case ClassWriter.LDCW_INSN:
+                        case ClassWriter.FIELDORMETH_INSN:
+                        case ClassWriter.TYPE_INSN:
+                        case ClassWriter.IINC_INSN:
+                            v += 3;
+                            break;
+                        case ClassWriter.ITFMETH_INSN:
+                            v += 5;
+                            break;
+                        // case MANA_INSN:
+                        default:
+                            v += 4;
+                            break;
+                    }
+                }
+                // parses the try catch entries
+                j = readUnsignedShort(v);
+                v += 2;
+                for (; j > 0; --j) {
+                    label = readUnsignedShort(v);
+                    Label start = labels[label];
+                    if (start == null) {
+                        labels[label] = start = new Label();
+                    }
+                    label = readUnsignedShort(v + 2);
+                    Label end = labels[label];
+                    if (end == null) {
+                        labels[label] = end = new Label();
+                    }
+                    label = readUnsignedShort(v + 4);
+                    Label handler = labels[label];
+                    if (handler == null) {
+                        labels[label] = handler = new Label();
+                    }
+
+                    int type = readUnsignedShort(v + 6);
+                    if (type == 0) {
+                        mv.visitTryCatchBlock(start, end, handler, null);
+                    } else {
+                        mv.visitTryCatchBlock(start,
+                                end,
+                                handler,
+                                readUTF8(items[type], c));
+                    }
+                    v += 8;
+                }
+                // parses the local variable, line number tables, and code
+                // attributes
+                int varTable = 0;
+                int varTypeTable = 0;
+                cattrs = null;
+                j = readUnsignedShort(v);
+                v += 2;
+                for (; j > 0; --j) {
+                    attrName = readUTF8(v, c);
+                    if (attrName.equals("LocalVariableTable")) {
+                        if (!skipDebug) {
+                            varTable = v + 6;
+                            k = readUnsignedShort(v + 6);
+                            w = v + 8;
+                            for (; k > 0; --k) {
+                                label = readUnsignedShort(w);
+                                if (labels[label] == null) {
+                                    labels[label] = new Label();
+                                }
+                                label += readUnsignedShort(w + 2);
+                                if (labels[label] == null) {
+                                    labels[label] = new Label();
+                                }
+                                w += 10;
+                            }
+                        }
+                    } else if (attrName.equals("LocalVariableTypeTable")) {
+                        varTypeTable = v + 6;
+                    } else if (attrName.equals("LineNumberTable")) {
+                        if (!skipDebug) {
+                            k = readUnsignedShort(v + 6);
+                            w = v + 8;
+                            for (; k > 0; --k) {
+                                label = readUnsignedShort(w);
+                                if (labels[label] == null) {
+                                    labels[label] = new Label();
+                                }
+                                labels[label].line = readUnsignedShort(w + 2);
+                                w += 4;
+                            }
+                        }
+                    } else {
+                        for (k = 0; k < attrs.length; ++k) {
+                            if (attrs[k].type.equals(attrName)) {
+                                attr = attrs[k].read(this,
+                                        v + 6,
+                                        readInt(v + 2),
+                                        c,
+                                        codeStart - 8,
+                                        labels);
+                                if (attr != null) {
+                                    attr.next = cattrs;
+                                    cattrs = attr;
+                                }
+                            }
+                        }
+                    }
+                    v += 6 + readInt(v + 2);
+                }
+
+                // 2nd phase: visits each instruction
+                v = codeStart;
+                Label l;
+                while (v < codeEnd) {
+                    w = v - codeStart;
+                    l = labels[w];
+                    if (l != null) {
+                        mv.visitLabel(l);
+                        if (!skipDebug && l.line > 0) {
+                            mv.visitLineNumber(l.line, l);
+                        }
+                    }
+                    int opcode = b[v] & 0xFF;
+                    switch (ClassWriter.TYPE[opcode]) {
+                        case ClassWriter.NOARG_INSN:
+                            mv.visitInsn(opcode);
+                            v += 1;
+                            break;
+                        case ClassWriter.IMPLVAR_INSN:
+                            if (opcode > Opcodes.ISTORE) {
+                                opcode -= 59; // ISTORE_0
+                                mv.visitVarInsn(Opcodes.ISTORE + (opcode >> 2),
+                                        opcode & 0x3);
+                            } else {
+                                opcode -= 26; // ILOAD_0
+                                mv.visitVarInsn(Opcodes.ILOAD + (opcode >> 2),
+                                        opcode & 0x3);
+                            }
+                            v += 1;
+                            break;
+                        case ClassWriter.LABEL_INSN:
+                            mv.visitJumpInsn(opcode, labels[w
+                                    + readShort(v + 1)]);
+                            v += 3;
+                            break;
+                        case ClassWriter.LABELW_INSN:
+                            mv.visitJumpInsn(opcode - 33, labels[w
+                                    + readInt(v + 1)]);
+                            v += 5;
+                            break;
+                        case ClassWriter.WIDE_INSN:
+                            opcode = b[v + 1] & 0xFF;
+                            if (opcode == Opcodes.IINC) {
+                                mv.visitIincInsn(readUnsignedShort(v + 2),
+                                        readShort(v + 4));
+                                v += 6;
+                            } else {
+                                mv.visitVarInsn(opcode,
+                                        readUnsignedShort(v + 2));
+                                v += 4;
+                            }
+                            break;
+                        case ClassWriter.TABL_INSN:
+                            // skips 0 to 3 padding bytes
+                            v = v + 4 - (w & 3);
+                            // reads instruction
+                            label = w + readInt(v);
+                            v += 4;
+                            int min = readInt(v);
+                            v += 4;
+                            int max = readInt(v);
+                            v += 4;
+                            Label[] table = new Label[max - min + 1];
+                            for (j = 0; j < table.length; ++j) {
+                                table[j] = labels[w + readInt(v)];
+                                v += 4;
+                            }
+                            mv.visitTableSwitchInsn(min,
+                                    max,
+                                    labels[label],
+                                    table);
+                            break;
+                        case ClassWriter.LOOK_INSN:
+                            // skips 0 to 3 padding bytes
+                            v = v + 4 - (w & 3);
+                            // reads instruction
+                            label = w + readInt(v);
+                            v += 4;
+                            j = readInt(v);
+                            v += 4;
+                            int[] keys = new int[j];
+                            Label[] values = new Label[j];
+                            for (j = 0; j < keys.length; ++j) {
+                                keys[j] = readInt(v);
+                                v += 4;
+                                values[j] = labels[w + readInt(v)];
+                                v += 4;
+                            }
+                            mv.visitLookupSwitchInsn(labels[label],
+                                    keys,
+                                    values);
+                            break;
+                        case ClassWriter.VAR_INSN:
+                            mv.visitVarInsn(opcode, b[v + 1] & 0xFF);
+                            v += 2;
+                            break;
+                        case ClassWriter.SBYTE_INSN:
+                            mv.visitIntInsn(opcode, b[v + 1]);
+                            v += 2;
+                            break;
+                        case ClassWriter.SHORT_INSN:
+                            mv.visitIntInsn(opcode, readShort(v + 1));
+                            v += 3;
+                            break;
+                        case ClassWriter.LDC_INSN:
+                            mv.visitLdcInsn(readConst(b[v + 1] & 0xFF, c));
+                            v += 2;
+                            break;
+                        case ClassWriter.LDCW_INSN:
+                            mv.visitLdcInsn(readConst(readUnsignedShort(v + 1),
+                                    c));
+                            v += 3;
+                            break;
+                        case ClassWriter.FIELDORMETH_INSN:
+                        case ClassWriter.ITFMETH_INSN:
+                            int cpIndex = items[readUnsignedShort(v + 1)];
+                            String iowner = readClass(cpIndex, c);
+                            cpIndex = items[readUnsignedShort(cpIndex + 2)];
+                            String iname = readUTF8(cpIndex, c);
+                            String idesc = readUTF8(cpIndex + 2, c);
+                            if (opcode < Opcodes.INVOKEVIRTUAL) {
+                                mv.visitFieldInsn(opcode, iowner, iname, idesc);
+                            } else {
+                                mv.visitMethodInsn(opcode, iowner, iname, idesc);
+                            }
+                            if (opcode == Opcodes.INVOKEINTERFACE) {
+                                v += 5;
+                            } else {
+                                v += 3;
+                            }
+                            break;
+                        case ClassWriter.TYPE_INSN:
+                            mv.visitTypeInsn(opcode, readClass(v + 1, c));
+                            v += 3;
+                            break;
+                        case ClassWriter.IINC_INSN:
+                            mv.visitIincInsn(b[v + 1] & 0xFF, b[v + 2]);
+                            v += 3;
+                            break;
+                        // case MANA_INSN:
+                        default:
+                            mv.visitMultiANewArrayInsn(readClass(v + 1, c),
+                                    b[v + 3] & 0xFF);
+                            v += 4;
+                            break;
+                    }
+                }
+                l = labels[codeEnd - codeStart];
+                if (l != null) {
+                    mv.visitLabel(l);
+                }
+
+                // visits the local variable tables
+                if (!skipDebug && varTable != 0) {
+                    int[] typeTable = null;
+                    if (varTypeTable != 0) {
+                        w = varTypeTable;
+                        k = readUnsignedShort(w) * 3;
+                        w += 2;
+                        typeTable = new int[k];
+                        while (k > 0) {
+                            typeTable[--k] = w + 6; // signature
+                            typeTable[--k] = readUnsignedShort(w + 8); // index
+                            typeTable[--k] = readUnsignedShort(w); // start
+                            w += 10;
+                        }
+                    }
+                    w = varTable;
+                    k = readUnsignedShort(w);
+                    w += 2;
+                    for (; k > 0; --k) {
+                        int start = readUnsignedShort(w);
+                        int length = readUnsignedShort(w + 2);
+                        int index = readUnsignedShort(w + 8);
+                        String vsignature = null;
+                        if (typeTable != null) {
+                            for (int a = 0; a < typeTable.length; a += 3) {
+                                if (typeTable[a] == start
+                                        && typeTable[a + 1] == index)
+                                {
+                                    vsignature = readUTF8(typeTable[a + 2], c);
+                                    break;
+                                }
+                            }
+                        }
+                        mv.visitLocalVariable(readUTF8(w + 4, c),
+                                readUTF8(w + 6, c),
+                                vsignature,
+                                labels[start],
+                                labels[start + length],
+                                index);
+                        w += 10;
+                    }
+                }
+                // visits the other attributes
+                while (cattrs != null) {
+                    attr = cattrs.next;
+                    cattrs.next = null;
+                    mv.visitAttribute(cattrs);
+                    cattrs = attr;
+                }
+                // visits the max stack and max locals values
+                mv.visitMaxs(maxStack, maxLocals);
+            }
+
+            if (mv != null) {
+                mv.visitEnd();
+            }
+        }
+
+        // visits the end of the class
+        classVisitor.visitEnd();
+    }
+
+    /**
+     * Reads parameter annotations and makes the given visitor visit them.
+     *
+     * @param v start offset in {@link #b b} of the annotations to be read.
+     * @param buf buffer to be used to call {@link #readUTF8 readUTF8},
+     *        {@link #readClass(int,char[]) readClass} or
+     *        {@link #readConst readConst}.
+     * @param visible <tt>true</tt> if the annotations to be read are visible
+     *        at runtime.
+     * @param mv the visitor that must visit the annotations.
+     */
+    private void readParameterAnnotations(
+        int v,
+        final char[] buf,
+        final boolean visible,
+        final MethodVisitor mv)
+    {
+        int n = b[v++] & 0xFF;
+        for (int i = 0; i < n; ++i) {
+            int j = readUnsignedShort(v);
+            v += 2;
+            for (; j > 0; --j) {
+                String desc = readUTF8(v, buf);
+                v += 2;
+                AnnotationVisitor av = mv.visitParameterAnnotation(i,
+                        desc,
+                        visible);
+                v = readAnnotationValues(v, buf, av);
+            }
+        }
+    }
+
+    /**
+     * Reads the values of an annotation and makes the given visitor visit them.
+     *
+     * @param v the start offset in {@link #b b} of the values to be read
+     *        (including the unsigned short that gives the number of values).
+     * @param buf buffer to be used to call {@link #readUTF8 readUTF8},
+     *        {@link #readClass(int,char[]) readClass} or
+     *        {@link #readConst readConst}.
+     * @param av the visitor that must visit the values.
+     * @return the end offset of the annotations values.
+     */
+    private int readAnnotationValues(
+        int v,
+        final char[] buf,
+        final AnnotationVisitor av)
+    {
+        int i = readUnsignedShort(v);
+        v += 2;
+        for (; i > 0; --i) {
+            String name = readUTF8(v, buf);
+            v += 2;
+            v = readAnnotationValue(v, buf, name, av);
+        }
+        av.visitEnd();
+        return v;
+    }
+
+    /**
+     * Reads a value of an annotation and makes the given visitor visit it.
+     *
+     * @param v the start offset in {@link #b b} of the value to be read (<i>not
+     *        including the value name constant pool index</i>).
+     * @param buf buffer to be used to call {@link #readUTF8 readUTF8},
+     *        {@link #readClass(int,char[]) readClass} or
+     *        {@link #readConst readConst}.
+     * @param name the name of the value to be read.
+     * @param av the visitor that must visit the value.
+     * @return the end offset of the annotation value.
+     */
+    private int readAnnotationValue(
+        int v,
+        final char[] buf,
+        final String name,
+        final AnnotationVisitor av)
+    {
+        int i;
+        switch (readByte(v++)) {
+            case 'I': // pointer to CONSTANT_Integer
+            case 'J': // pointer to CONSTANT_Long
+            case 'F': // pointer to CONSTANT_Float
+            case 'D': // pointer to CONSTANT_Double
+                av.visit(name, readConst(readUnsignedShort(v), buf));
+                v += 2;
+                break;
+            case 'B': // pointer to CONSTANT_Byte
+                av.visit(name,
+                        new Byte((byte) readInt(items[readUnsignedShort(v)])));
+                v += 2;
+                break;
+            case 'Z': // pointer to CONSTANT_Boolean
+                boolean b = readInt(items[readUnsignedShort(v)]) == 0;
+                av.visit(name, b ? Boolean.FALSE : Boolean.TRUE);
+                v += 2;
+                break;
+            case 'S': // pointer to CONSTANT_Short
+                av.visit(name,
+                        new Short((short) readInt(items[readUnsignedShort(v)])));
+                v += 2;
+                break;
+            case 'C': // pointer to CONSTANT_Char
+                av.visit(name,
+                        new Character((char) readInt(items[readUnsignedShort(v)])));
+                v += 2;
+                break;
+            case 's': // pointer to CONSTANT_Utf8
+                av.visit(name, readUTF8(v, buf));
+                v += 2;
+                break;
+            case 'e': // enum_const_value
+                av.visitEnum(name, readUTF8(v, buf), readUTF8(v + 2, buf));
+                v += 4;
+                break;
+            case 'c': // class_info
+                av.visit(name, Type.getType(readUTF8(v, buf)));
+                v += 2;
+                break;
+            case '@': // annotation_value
+                String desc = readUTF8(v, buf);
+                v += 2;
+                v = readAnnotationValues(v, buf, av.visitAnnotation(name, desc));
+                break;
+            case '[': // array_value
+                int size = readUnsignedShort(v);
+                v += 2;
+                if (size == 0) {
+                    av.visitArray(name).visitEnd();
+                    return v;
+                }
+                switch (readByte(v++)) {
+                    case 'B':
+                        byte[] bv = new byte[size];
+                        for (i = 0; i < size; i++) {
+                            bv[i] = (byte) readInt(items[readUnsignedShort(v)]);
+                            v += 3;
+                        }
+                        av.visit(name, bv);
+                        --v;
+                        break;
+                    case 'Z':
+                        boolean[] zv = new boolean[size];
+                        for (i = 0; i < size; i++) {
+                            zv[i] = readInt(items[readUnsignedShort(v)]) != 0;
+                            v += 3;
+                        }
+                        av.visit(name, zv);
+                        --v;
+                        break;
+                    case 'S':
+                        short[] sv = new short[size];
+                        for (i = 0; i < size; i++) {
+                            sv[i] = (short) readInt(items[readUnsignedShort(v)]);
+                            v += 3;
+                        }
+                        av.visit(name, sv);
+                        --v;
+                        break;
+                    case 'C':
+                        char[] cv = new char[size];
+                        for (i = 0; i < size; i++) {
+                            cv[i] = (char) readInt(items[readUnsignedShort(v)]);
+                            v += 3;
+                        }
+                        av.visit(name, cv);
+                        --v;
+                        break;
+                    case 'I':
+                        int[] iv = new int[size];
+                        for (i = 0; i < size; i++) {
+                            iv[i] = readInt(items[readUnsignedShort(v)]);
+                            v += 3;
+                        }
+                        av.visit(name, iv);
+                        --v;
+                        break;
+                    case 'J':
+                        long[] lv = new long[size];
+                        for (i = 0; i < size; i++) {
+                            lv[i] = readLong(items[readUnsignedShort(v)]);
+                            v += 3;
+                        }
+                        av.visit(name, lv);
+                        --v;
+                        break;
+                    case 'F':
+                        float[] fv = new float[size];
+                        for (i = 0; i < size; i++) {
+                            fv[i] = Float.intBitsToFloat(readInt(items[readUnsignedShort(v)]));
+                            v += 3;
+                        }
+                        av.visit(name, fv);
+                        --v;
+                        break;
+                    case 'D':
+                        double[] dv = new double[size];
+                        for (i = 0; i < size; i++) {
+                            dv[i] = Double.longBitsToDouble(readLong(items[readUnsignedShort(v)]));
+                            v += 3;
+                        }
+                        av.visit(name, dv);
+                        --v;
+                        break;
+                    default:
+                        v--;
+                        AnnotationVisitor aav = av.visitArray(name);
+                        for (i = size; i > 0; --i) {
+                            v = readAnnotationValue(v, buf, null, aav);
+                        }
+                        aav.visitEnd();
+                }
+        }
+        return v;
+    }
+
+    /**
+     * Reads an attribute in {@link #b b}.
+     *
+     * @param attrs prototypes of the attributes that must be parsed during the
+     *        visit of the class. Any attribute whose type is not equal to the
+     *        type of one the prototypes is ignored (i.e. an empty
+     *        {@link Attribute} instance is returned).
+     * @param type the type of the attribute.
+     * @param off index of the first byte of the attribute's content in
+     *        {@link #b b}. The 6 attribute header bytes, containing the type
+     *        and the length of the attribute, are not taken into account here
+     *        (they have already been read).
+     * @param len the length of the attribute's content.
+     * @param buf buffer to be used to call {@link #readUTF8 readUTF8},
+     *        {@link #readClass(int,char[]) readClass} or
+     *        {@link #readConst readConst}.
+     * @param codeOff index of the first byte of code's attribute content in
+     *        {@link #b b}, or -1 if the attribute to be read is not a code
+     *        attribute. The 6 attribute header bytes, containing the type and
+     *        the length of the attribute, are not taken into account here.
+     * @param labels the labels of the method's code, or <tt>null</tt> if the
+     *        attribute to be read is not a code attribute.
+     * @return the attribute that has been read, or <tt>null</tt> to skip this
+     *         attribute.
+     */
+    private Attribute readAttribute(
+        final Attribute[] attrs,
+        final String type,
+        final int off,
+        final int len,
+        final char[] buf,
+        final int codeOff,
+        final Label[] labels)
+    {
+        for (int i = 0; i < attrs.length; ++i) {
+            if (attrs[i].type.equals(type)) {
+                return attrs[i].read(this, off, len, buf, codeOff, labels);
+            }
+        }
+        return new Attribute(type).read(this, off, len, null, -1, null);
+    }
+
+    // ------------------------------------------------------------------------
+    // Utility methods: low level parsing
+    // ------------------------------------------------------------------------
+
+    /**
+     * Returns the start index of the constant pool item in {@link #b b}, plus
+     * one. <i>This method is intended for {@link Attribute} sub classes, and is
+     * normally not needed by class generators or adapters.</i>
+     *
+     * @param item the index a constant pool item.
+     * @return the start index of the constant pool item in {@link #b b}, plus
+     *         one.
+     */
+    public int getItem(final int item) {
+        return items[item];
+    }
+
+    /**
+     * Reads a byte value in {@link #b b}. <i>This method is intended for
+     * {@link Attribute} sub classes, and is normally not needed by class
+     * generators or adapters.</i>
+     *
+     * @param index the start index of the value to be read in {@link #b b}.
+     * @return the read value.
+     */
+    public int readByte(final int index) {
+        return b[index] & 0xFF;
+    }
+
+    /**
+     * Reads an unsigned short value in {@link #b b}. <i>This method is
+     * intended for {@link Attribute} sub classes, and is normally not needed by
+     * class generators or adapters.</i>
+     *
+     * @param index the start index of the value to be read in {@link #b b}.
+     * @return the read value.
+     */
+    public int readUnsignedShort(final int index) {
+        byte[] b = this.b;
+        return ((b[index] & 0xFF) << 8) | (b[index + 1] & 0xFF);
+    }
+
+    /**
+     * Reads a signed short value in {@link #b b}. <i>This method is intended
+     * for {@link Attribute} sub classes, and is normally not needed by class
+     * generators or adapters.</i>
+     *
+     * @param index the start index of the value to be read in {@link #b b}.
+     * @return the read value.
+     */
+    public short readShort(final int index) {
+        byte[] b = this.b;
+        return (short) (((b[index] & 0xFF) << 8) | (b[index + 1] & 0xFF));
+    }
+
+    /**
+     * Reads a signed int value in {@link #b b}. <i>This method is intended for
+     * {@link Attribute} sub classes, and is normally not needed by class
+     * generators or adapters.</i>
+     *
+     * @param index the start index of the value to be read in {@link #b b}.
+     * @return the read value.
+     */
+    public int readInt(final int index) {
+        byte[] b = this.b;
+        return ((b[index] & 0xFF) << 24) | ((b[index + 1] & 0xFF) << 16)
+                | ((b[index + 2] & 0xFF) << 8) | (b[index + 3] & 0xFF);
+    }
+
+    /**
+     * Reads a signed long value in {@link #b b}. <i>This method is intended
+     * for {@link Attribute} sub classes, and is normally not needed by class
+     * generators or adapters.</i>
+     *
+     * @param index the start index of the value to be read in {@link #b b}.
+     * @return the read value.
+     */
+    public long readLong(final int index) {
+        long l1 = readInt(index);
+        long l0 = readInt(index + 4) & 0xFFFFFFFFL;
+        return (l1 << 32) | l0;
+    }
+
+    /**
+     * Reads an UTF8 string constant pool item in {@link #b b}. <i>This method
+     * is intended for {@link Attribute} sub classes, and is normally not needed
+     * by class generators or adapters.</i>
+     *
+     * @param index the start index of an unsigned short value in {@link #b b},
+     *        whose value is the index of an UTF8 constant pool item.
+     * @param buf buffer to be used to read the item. This buffer must be
+     *        sufficiently large. It is not automatically resized.
+     * @return the String corresponding to the specified UTF8 item.
+     */
+    public String readUTF8(int index, final char[] buf) {
+        int item = readUnsignedShort(index);
+        String s = strings[item];
+        if (s != null) {
+            return s;
+        }
+        index = items[item];
+        return strings[item] = readUTF(index + 2, readUnsignedShort(index), buf);
+    }
+
+    /**
+     * Reads UTF8 string in {@link #b b}.
+     *
+     * @param index start offset of the UTF8 string to be read.
+     * @param utfLen length of the UTF8 string to be read.
+     * @param buf buffer to be used to read the string. This buffer must be
+     *        sufficiently large. It is not automatically resized.
+     * @return the String corresponding to the specified UTF8 string.
+     */
+    private String readUTF(int index, int utfLen, char[] buf) {
+        int endIndex = index + utfLen;
+        byte[] b = this.b;
+        int strLen = 0;
+        int c, d, e;
+        while (index < endIndex) {
+            c = b[index++] & 0xFF;
+            switch (c >> 4) {
+                case 0:
+                case 1:
+                case 2:
+                case 3:
+                case 4:
+                case 5:
+                case 6:
+                case 7:
+                    // 0xxxxxxx
+                    buf[strLen++] = (char) c;
+                    break;
+                case 12:
+                case 13:
+                    // 110x xxxx 10xx xxxx
+                    d = b[index++];
+                    buf[strLen++] = (char) (((c & 0x1F) << 6) | (d & 0x3F));
+                    break;
+                default:
+                    // 1110 xxxx 10xx xxxx 10xx xxxx
+                    d = b[index++];
+                    e = b[index++];
+                    buf[strLen++] = (char) (((c & 0x0F) << 12)
+                            | ((d & 0x3F) << 6) | (e & 0x3F));
+                    break;
+            }
+        }
+        return new String(buf, 0, strLen);
+    }
+
+    /**
+     * Reads a class constant pool item in {@link #b b}. <i>This method is
+     * intended for {@link Attribute} sub classes, and is normally not needed by
+     * class generators or adapters.</i>
+     *
+     * @param index the start index of an unsigned short value in {@link #b b},
+     *        whose value is the index of a class constant pool item.
+     * @param buf buffer to be used to read the item. This buffer must be
+     *        sufficiently large. It is not automatically resized.
+     * @return the String corresponding to the specified class item.
+     */
+    public String readClass(final int index, final char[] buf) {
+        // computes the start index of the CONSTANT_Class item in b
+        // and reads the CONSTANT_Utf8 item designated by
+        // the first two bytes of this CONSTANT_Class item
+        return readUTF8(items[readUnsignedShort(index)], buf);
+    }
+
+    /**
+     * Reads a numeric or string constant pool item in {@link #b b}. <i>This
+     * method is intended for {@link Attribute} sub classes, and is normally not
+     * needed by class generators or adapters.</i>
+     *
+     * @param item the index of a constant pool item.
+     * @param buf buffer to be used to read the item. This buffer must be
+     *        sufficiently large. It is not automatically resized.
+     * @return the {@link Integer}, {@link Float}, {@link Long},
+     *         {@link Double}, {@link String} or {@link Type} corresponding to
+     *         the given constant pool item.
+     */
+    public Object readConst(final int item, final char[] buf) {
+        int index = items[item];
+        switch (b[index - 1]) {
+            case ClassWriter.INT:
+                return new Integer(readInt(index));
+            case ClassWriter.FLOAT:
+                return new Float(Float.intBitsToFloat(readInt(index)));
+            case ClassWriter.LONG:
+                return new Long(readLong(index));
+            case ClassWriter.DOUBLE:
+                return new Double(Double.longBitsToDouble(readLong(index)));
+            case ClassWriter.CLASS:
+                String s = readUTF8(index, buf);
+                return Type.getType(s.charAt(0) == '[' ? s : "L" + s + ";");
+            // case ClassWriter.STR:
+            default:
+                return readUTF8(index, buf);
+        }
+    }
+}
diff --git a/src/com/sleepycat/asm/ClassVisitor.java b/src/com/sleepycat/asm/ClassVisitor.java
new file mode 100644
index 0000000000000000000000000000000000000000..b58c53b45b5f0a69c9a07cc97336ed582ae8481a
--- /dev/null
+++ b/src/com/sleepycat/asm/ClassVisitor.java
@@ -0,0 +1,195 @@
+/***
+ * ASM: a very small and fast Java bytecode manipulation framework
+ * Copyright (c) 2000-2005 INRIA, France Telecom
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package com.sleepycat.asm;
+
+/**
+ * A visitor to visit a Java class. The methods of this interface must be called
+ * in the following order: <tt>visit</tt> [ <tt>visitSource</tt> ] [
+ * <tt>visitOuterClass</tt> ] ( <tt>visitAnnotation</tt> |
+ * <tt>visitAttribute</tt> )* (<tt>visitInnerClass</tt> |
+ * <tt>visitField</tt> | <tt>visitMethod</tt> )* <tt>visitEnd</tt>.
+ *
+ * @author Eric Bruneton
+ */
+public interface ClassVisitor {
+
+    /**
+     * Visits the header of the class.
+     *
+     * @param version the class version.
+     * @param access the class's access flags (see {@link Opcodes}). This
+     *        parameter also indicates if the class is deprecated.
+     * @param name the internal name of the class (see
+     *        {@link Type#getInternalName() getInternalName}).
+     * @param signature the signature of this class. May be <tt>null</tt> if
+     *        the class is not a generic one, and does not extend or implement
+     *        generic classes or interfaces.
+     * @param superName the internal of name of the super class (see
+     *        {@link Type#getInternalName() getInternalName}). For interfaces,
+     *        the super class is {@link Object}. May be <tt>null</tt>, but
+     *        only for the {@link Object} class.
+     * @param interfaces the internal names of the class's interfaces (see
+     *        {@link Type#getInternalName() getInternalName}). May be
+     *        <tt>null</tt>.
+     */
+    void visit(
+        int version,
+        int access,
+        String name,
+        String signature,
+        String superName,
+        String[] interfaces);
+
+    /**
+     * Visits the source of the class.
+     *
+     * @param source the name of the source file from which the class was
+     *        compiled. May be <tt>null</tt>.
+     * @param debug additional debug information to compute the correspondance
+     *        between source and compiled elements of the class. May be
+     *        <tt>null</tt>.
+     */
+    void visitSource(String source, String debug);
+
+    /**
+     * Visits the enclosing class of the class. This method must be called only
+     * if the class has an enclosing class.
+     *
+     * @param owner internal name of the enclosing class of the class.
+     * @param name the name of the method that contains the class, or
+     *        <tt>null</tt> if the class is not enclosed in a method of its
+     *        enclosing class.
+     * @param desc the descriptor of the method that contains the class, or
+     *        <tt>null</tt> if the class is not enclosed in a method of its
+     *        enclosing class.
+     */
+    void visitOuterClass(String owner, String name, String desc);
+
+    /**
+     * Visits an annotation of the class.
+     *
+     * @param desc the class descriptor of the annotation class.
+     * @param visible <tt>true</tt> if the annotation is visible at runtime.
+     * @return a non null visitor to visit the annotation values.
+     */
+    AnnotationVisitor visitAnnotation(String desc, boolean visible);
+
+    /**
+     * Visits a non standard attribute of the class.
+     *
+     * @param attr an attribute.
+     */
+    void visitAttribute(Attribute attr);
+
+    /**
+     * Visits information about an inner class. This inner class is not
+     * necessarily a member of the class being visited.
+     *
+     * @param name the internal name of an inner class (see
+     *        {@link Type#getInternalName() getInternalName}).
+     * @param outerName the internal name of the class to which the inner class
+     *        belongs (see {@link Type#getInternalName() getInternalName}). May
+     *        be <tt>null</tt>.
+     * @param innerName the (simple) name of the inner class inside its
+     *        enclosing class. May be <tt>null</tt> for anonymous inner
+     *        classes.
+     * @param access the access flags of the inner class as originally declared
+     *        in the enclosing class.
+     */
+    void visitInnerClass(
+        String name,
+        String outerName,
+        String innerName,
+        int access);
+
+    /**
+     * Visits a field of the class.
+     *
+     * @param access the field's access flags (see {@link Opcodes}). This
+     *        parameter also indicates if the field is synthetic and/or
+     *        deprecated.
+     * @param name the field's name.
+     * @param desc the field's descriptor (see {@link Type Type}).
+     * @param signature the field's signature. May be <tt>null</tt> if the
+     *        field's type does not use generic types.
+     * @param value the field's initial value. This parameter, which may be
+     *        <tt>null</tt> if the field does not have an initial value, must
+     *        be an {@link Integer}, a {@link Float}, a {@link Long}, a
+     *        {@link Double} or a {@link String} (for <tt>int</tt>,
+     *        <tt>float</tt>, <tt>long</tt> or <tt>String</tt> fields
+     *        respectively). <i>This parameter is only used for static fields</i>.
+     *        Its value is ignored for non static fields, which must be
+     *        initialized through bytecode instructions in constructors or
+     *        methods.
+     * @return a visitor to visit field annotations and attributes, or
+     *         <tt>null</tt> if this class visitor is not interested in
+     *         visiting these annotations and attributes.
+     */
+    FieldVisitor visitField(
+        int access,
+        String name,
+        String desc,
+        String signature,
+        Object value);
+
+    /**
+     * Visits a method of the class. This method <i>must</i> return a new
+     * {@link MethodVisitor} instance (or <tt>null</tt>) each time it is
+     * called, i.e., it should not return a previously returned visitor.
+     *
+     * @param access the method's access flags (see {@link Opcodes}). This
+     *        parameter also indicates if the method is synthetic and/or
+     *        deprecated.
+     * @param name the method's name.
+     * @param desc the method's descriptor (see {@link Type Type}).
+     * @param signature the method's signature. May be <tt>null</tt> if the
+     *        method parameters, return type and exceptions do not use generic
+     *        types.
+     * @param exceptions the internal names of the method's exception classes
+     *        (see {@link Type#getInternalName() getInternalName}). May be
+     *        <tt>null</tt>.
+     * @return an object to visit the byte code of the method, or <tt>null</tt>
+     *         if this class visitor is not interested in visiting the code of
+     *         this method.
+     */
+    MethodVisitor visitMethod(
+        int access,
+        String name,
+        String desc,
+        String signature,
+        String[] exceptions);
+
+    /**
+     * Visits the end of the class. This method, which is the last one to be
+     * called, is used to inform the visitor that all the fields and methods of
+     * the class have been visited.
+     */
+    void visitEnd();
+}
diff --git a/src/com/sleepycat/asm/ClassWriter.java b/src/com/sleepycat/asm/ClassWriter.java
new file mode 100644
index 0000000000000000000000000000000000000000..c2656a83f1136d0f201a4433f5202bfcc609e7dd
--- /dev/null
+++ b/src/com/sleepycat/asm/ClassWriter.java
@@ -0,0 +1,1162 @@
+/***
+ * ASM: a very small and fast Java bytecode manipulation framework
+ * Copyright (c) 2000-2005 INRIA, France Telecom
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package com.sleepycat.asm;
+
+/**
+ * A {@link ClassVisitor} that generates classes in bytecode form. More
+ * precisely this visitor generates a byte array conforming to the Java class
+ * file format. It can be used alone, to generate a Java class "from scratch",
+ * or with one or more {@link ClassReader ClassReader} and adapter class visitor
+ * to generate a modified class from one or more existing Java classes.
+ *
+ * @author Eric Bruneton
+ */
+public class ClassWriter implements ClassVisitor {
+
+    /**
+     * The type of instructions without any argument.
+     */
+    final static int NOARG_INSN = 0;
+
+    /**
+     * The type of instructions with an signed byte argument.
+     */
+    final static int SBYTE_INSN = 1;
+
+    /**
+     * The type of instructions with an signed short argument.
+     */
+    final static int SHORT_INSN = 2;
+
+    /**
+     * The type of instructions with a local variable index argument.
+     */
+    final static int VAR_INSN = 3;
+
+    /**
+     * The type of instructions with an implicit local variable index argument.
+     */
+    final static int IMPLVAR_INSN = 4;
+
+    /**
+     * The type of instructions with a type descriptor argument.
+     */
+    final static int TYPE_INSN = 5;
+
+    /**
+     * The type of field and method invocations instructions.
+     */
+    final static int FIELDORMETH_INSN = 6;
+
+    /**
+     * The type of the INVOKEINTERFACE instruction.
+     */
+    final static int ITFMETH_INSN = 7;
+
+    /**
+     * The type of instructions with a 2 bytes bytecode offset label.
+     */
+    final static int LABEL_INSN = 8;
+
+    /**
+     * The type of instructions with a 4 bytes bytecode offset label.
+     */
+    final static int LABELW_INSN = 9;
+
+    /**
+     * The type of the LDC instruction.
+     */
+    final static int LDC_INSN = 10;
+
+    /**
+     * The type of the LDC_W and LDC2_W instructions.
+     */
+    final static int LDCW_INSN = 11;
+
+    /**
+     * The type of the IINC instruction.
+     */
+    final static int IINC_INSN = 12;
+
+    /**
+     * The type of the TABLESWITCH instruction.
+     */
+    final static int TABL_INSN = 13;
+
+    /**
+     * The type of the LOOKUPSWITCH instruction.
+     */
+    final static int LOOK_INSN = 14;
+
+    /**
+     * The type of the MULTIANEWARRAY instruction.
+     */
+    final static int MANA_INSN = 15;
+
+    /**
+     * The type of the WIDE instruction.
+     */
+    final static int WIDE_INSN = 16;
+
+    /**
+     * The instruction types of all JVM opcodes.
+     */
+    static byte[] TYPE;
+
+    /**
+     * The type of CONSTANT_Class constant pool items.
+     */
+    final static int CLASS = 7;
+
+    /**
+     * The type of CONSTANT_Fieldref constant pool items.
+     */
+    final static int FIELD = 9;
+
+    /**
+     * The type of CONSTANT_Methodref constant pool items.
+     */
+    final static int METH = 10;
+
+    /**
+     * The type of CONSTANT_InterfaceMethodref constant pool items.
+     */
+    final static int IMETH = 11;
+
+    /**
+     * The type of CONSTANT_String constant pool items.
+     */
+    final static int STR = 8;
+
+    /**
+     * The type of CONSTANT_Integer constant pool items.
+     */
+    final static int INT = 3;
+
+    /**
+     * The type of CONSTANT_Float constant pool items.
+     */
+    final static int FLOAT = 4;
+
+    /**
+     * The type of CONSTANT_Long constant pool items.
+     */
+    final static int LONG = 5;
+
+    /**
+     * The type of CONSTANT_Double constant pool items.
+     */
+    final static int DOUBLE = 6;
+
+    /**
+     * The type of CONSTANT_NameAndType constant pool items.
+     */
+    final static int NAME_TYPE = 12;
+
+    /**
+     * The type of CONSTANT_Utf8 constant pool items.
+     */
+    final static int UTF8 = 1;
+
+    /**
+     * The class reader from which this class writer was constructed, if any.
+     */
+    ClassReader cr;
+
+    /**
+     * Minor and major version numbers of the class to be generated.
+     */
+    int version;
+
+    /**
+     * Index of the next item to be added in the constant pool.
+     */
+    int index;
+
+    /**
+     * The constant pool of this class.
+     */
+    ByteVector pool;
+
+    /**
+     * The constant pool's hash table data.
+     */
+    Item[] items;
+
+    /**
+     * The threshold of the constant pool's hash table.
+     */
+    int threshold;
+
+    /**
+     * A reusable key used to look for items in the hash {@link #items items}.
+     */
+    Item key;
+
+    /**
+     * A reusable key used to look for items in the hash {@link #items items}.
+     */
+    Item key2;
+
+    /**
+     * A reusable key used to look for items in the hash {@link #items items}.
+     */
+    Item key3;
+
+    /**
+     * The access flags of this class.
+     */
+    private int access;
+
+    /**
+     * The constant pool item that contains the internal name of this class.
+     */
+    private int name;
+
+    /**
+     * The constant pool item that contains the signature of this class.
+     */
+    private int signature;
+
+    /**
+     * The constant pool item that contains the internal name of the super class
+     * of this class.
+     */
+    private int superName;
+
+    /**
+     * Number of interfaces implemented or extended by this class or interface.
+     */
+    private int interfaceCount;
+
+    /**
+     * The interfaces implemented or extended by this class or interface. More
+     * precisely, this array contains the indexes of the constant pool items
+     * that contain the internal names of these interfaces.
+     */
+    private int[] interfaces;
+
+    /**
+     * The index of the constant pool item that contains the name of the source
+     * file from which this class was compiled.
+     */
+    private int sourceFile;
+
+    /**
+     * The SourceDebug attribute of this class.
+     */
+    private ByteVector sourceDebug;
+
+    /**
+     * The constant pool item that contains the name of the enclosing class of
+     * this class.
+     */
+    private int enclosingMethodOwner;
+
+    /**
+     * The constant pool item that contains the name and descriptor of the
+     * enclosing method of this class.
+     */
+    private int enclosingMethod;
+
+    /**
+     * The runtime visible annotations of this class.
+     */
+    private AnnotationWriter anns;
+
+    /**
+     * The runtime invisible annotations of this class.
+     */
+    private AnnotationWriter ianns;
+
+    /**
+     * The non standard attributes of this class.
+     */
+    private Attribute attrs;
+
+    /**
+     * The number of entries in the InnerClasses attribute.
+     */
+    private int innerClassesCount;
+
+    /**
+     * The InnerClasses attribute.
+     */
+    private ByteVector innerClasses;
+
+    /**
+     * The fields of this class. These fields are stored in a linked list of
+     * {@link FieldWriter} objects, linked to each other by their
+     * {@link FieldWriter#next} field. This field stores the first element of
+     * this list.
+     */
+    FieldWriter firstField;
+
+    /**
+     * The fields of this class. These fields are stored in a linked list of
+     * {@link FieldWriter} objects, linked to each other by their
+     * {@link FieldWriter#next} field. This field stores the last element of
+     * this list.
+     */
+    FieldWriter lastField;
+
+    /**
+     * The methods of this class. These methods are stored in a linked list of
+     * {@link MethodWriter} objects, linked to each other by their
+     * {@link MethodWriter#next} field. This field stores the first element of
+     * this list.
+     */
+    MethodWriter firstMethod;
+
+    /**
+     * The methods of this class. These methods are stored in a linked list of
+     * {@link MethodWriter} objects, linked to each other by their
+     * {@link MethodWriter#next} field. This field stores the last element of
+     * this list.
+     */
+    MethodWriter lastMethod;
+
+    /**
+     * <tt>true</tt> if the maximum stack size and number of local variables
+     * must be automatically computed.
+     */
+    private boolean computeMaxs;
+
+    // ------------------------------------------------------------------------
+    // Static initializer
+    // ------------------------------------------------------------------------
+
+    /**
+     * Computes the instruction types of JVM opcodes.
+     */
+    static {
+        int i;
+        byte[] b = new byte[220];
+        String s = "AAAAAAAAAAAAAAAABCKLLDDDDDEEEEEEEEEEEEEEEEEEEEAAAAAAAADD"
+                + "DDDEEEEEEEEEEEEEEEEEEEEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
+                + "AAAAAAAAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAIIIIIIIIIIIIIIIIDNOAA"
+                + "AAAAGGGGGGGHAFBFAAFFAAQPIIJJIIIIIIIIIIIIIIIIII";
+        for (i = 0; i < b.length; ++i) {
+            b[i] = (byte) (s.charAt(i) - 'A');
+        }
+        TYPE = b;
+
+        // code to generate the above string
+        //
+        // // SBYTE_INSN instructions
+        // b[Constants.NEWARRAY] = SBYTE_INSN;
+        // b[Constants.BIPUSH] = SBYTE_INSN;
+        //
+        // // SHORT_INSN instructions
+        // b[Constants.SIPUSH] = SHORT_INSN;
+        //
+        // // (IMPL)VAR_INSN instructions
+        // b[Constants.RET] = VAR_INSN;
+        // for (i = Constants.ILOAD; i <= Constants.ALOAD; ++i) {
+        // b[i] = VAR_INSN;
+        // }
+        // for (i = Constants.ISTORE; i <= Constants.ASTORE; ++i) {
+        // b[i] = VAR_INSN;
+        // }
+        // for (i = 26; i <= 45; ++i) { // ILOAD_0 to ALOAD_3
+        // b[i] = IMPLVAR_INSN;
+        // }
+        // for (i = 59; i <= 78; ++i) { // ISTORE_0 to ASTORE_3
+        // b[i] = IMPLVAR_INSN;
+        // }
+        //
+        // // TYPE_INSN instructions
+        // b[Constants.NEW] = TYPE_INSN;
+        // b[Constants.ANEWARRAY] = TYPE_INSN;
+        // b[Constants.CHECKCAST] = TYPE_INSN;
+        // b[Constants.INSTANCEOF] = TYPE_INSN;
+        //
+        // // (Set)FIELDORMETH_INSN instructions
+        // for (i = Constants.GETSTATIC; i <= Constants.INVOKESTATIC; ++i) {
+        // b[i] = FIELDORMETH_INSN;
+        // }
+        // b[Constants.INVOKEINTERFACE] = ITFMETH_INSN;
+        //
+        // // LABEL(W)_INSN instructions
+        // for (i = Constants.IFEQ; i <= Constants.JSR; ++i) {
+        // b[i] = LABEL_INSN;
+        // }
+        // b[Constants.IFNULL] = LABEL_INSN;
+        // b[Constants.IFNONNULL] = LABEL_INSN;
+        // b[200] = LABELW_INSN; // GOTO_W
+        // b[201] = LABELW_INSN; // JSR_W
+        // // temporary opcodes used internally by ASM - see Label and
+        // MethodWriter
+        // for (i = 202; i < 220; ++i) {
+        // b[i] = LABEL_INSN;
+        // }
+        //
+        // // LDC(_W) instructions
+        // b[Constants.LDC] = LDC_INSN;
+        // b[19] = LDCW_INSN; // LDC_W
+        // b[20] = LDCW_INSN; // LDC2_W
+        //
+        // // special instructions
+        // b[Constants.IINC] = IINC_INSN;
+        // b[Constants.TABLESWITCH] = TABL_INSN;
+        // b[Constants.LOOKUPSWITCH] = LOOK_INSN;
+        // b[Constants.MULTIANEWARRAY] = MANA_INSN;
+        // b[196] = WIDE_INSN; // WIDE
+        //
+        // for (i = 0; i < b.length; ++i) {
+        // System.err.print((char)('A' + b[i]));
+        // }
+        // System.err.println();
+    }
+
+    // ------------------------------------------------------------------------
+    // Constructor
+    // ------------------------------------------------------------------------
+
+    /**
+     * Constructs a new {@link ClassWriter ClassWriter} object.
+     *
+     * @param computeMaxs <tt>true</tt> if the maximum stack size and the
+     *        maximum number of local variables must be automatically computed.
+     *        If this flag is <tt>true</tt>, then the arguments of the
+     *        {@link MethodVisitor#visitMaxs visitMaxs} method of the
+     *        {@link MethodVisitor} returned by the
+     *        {@link #visitMethod visitMethod} method will be ignored, and
+     *        computed automatically from the signature and the bytecode of each
+     *        method.
+     */
+    public ClassWriter(final boolean computeMaxs) {
+        this(computeMaxs, false);
+    }
+
+    /**
+     * Constructs a new {@link ClassWriter} object.
+     *
+     * @param computeMaxs <tt>true</tt> if the maximum stack size and the
+     *        maximum number of local variables must be automatically computed.
+     *        If this flag is <tt>true</tt>, then the arguments of the
+     *        {@link MethodVisitor#visitMaxs visitMaxs} method of the
+     *        {@link MethodVisitor} returned by the
+     *        {@link #visitMethod visitMethod} method will be ignored, and
+     *        computed automatically from the signature and the bytecode of each
+     *        method.
+     * @param skipUnknownAttributes <b>Deprecated</b>. The value of this
+     *        parameter is ignored.
+     */
+    public ClassWriter(
+        final boolean computeMaxs,
+        final boolean skipUnknownAttributes)
+    {
+        index = 1;
+        pool = new ByteVector();
+        items = new Item[256];
+        threshold = (int) (0.75d * items.length);
+        key = new Item();
+        key2 = new Item();
+        key3 = new Item();
+        this.computeMaxs = computeMaxs;
+    }
+
+    /**
+     * Constructs a new {@link ClassWriter} object and enables optimizations for
+     * "mostly add" bytecode transformations. These optimizations are the
+     * following:
+     *
+     * <ul> <li>The constant pool from the original class is copied as is in
+     * the new class, which saves time. New constant pool entries will be added
+     * at the end if necessary, but unused constant pool entries <i>won't be
+     * removed</i>.</li> <li>Methods that are not transformed are copied as
+     * is in the new class, directly from the original class bytecode (i.e.
+     * without emitting visit events for all the method instructions), which
+     * saves a <i>lot</i> of time. Untransformed methods are detected by the
+     * fact that the {@link ClassReader} receives {@link MethodVisitor} objects
+     * that come from a {@link ClassWriter} (and not from a custom
+     * {@link ClassAdapter} or any other {@link ClassVisitor} instance).</li>
+     * </ul>
+     *
+     * @param classReader the {@link ClassReader} used to read the original
+     *        class. It will be used to copy the entire constant pool from the
+     *        original class and also to copy other fragments of original
+     *        bytecode where applicable.
+     * @param computeMaxs <tt>true</tt> if the maximum stack size and the
+     *        maximum number of local variables must be automatically computed.
+     *        If this flag is <tt>true</tt>, then the arguments of the
+     *        {@link MethodVisitor#visitMaxs visitMaxs} method of the
+     *        {@link MethodVisitor} returned by the
+     *        {@link #visitMethod visitMethod} method will be ignored, and
+     *        computed automatically from the signature and the bytecode of each
+     *        method.
+     */
+    public ClassWriter(
+        final ClassReader classReader,
+        final boolean computeMaxs)
+    {
+        this(computeMaxs, false);
+        classReader.copyPool(this);
+        this.cr = classReader;
+    }
+
+    // ------------------------------------------------------------------------
+    // Implementation of the ClassVisitor interface
+    // ------------------------------------------------------------------------
+
+    public void visit(
+        final int version,
+        final int access,
+        final String name,
+        final String signature,
+        final String superName,
+        final String[] interfaces)
+    {
+        this.version = version;
+        this.access = access;
+        this.name = newClass(name);
+        if (signature != null) {
+            this.signature = newUTF8(signature);
+        }
+        this.superName = superName == null ? 0 : newClass(superName);
+        if (interfaces != null && interfaces.length > 0) {
+            interfaceCount = interfaces.length;
+            this.interfaces = new int[interfaceCount];
+            for (int i = 0; i < interfaceCount; ++i) {
+                this.interfaces[i] = newClass(interfaces[i]);
+            }
+        }
+    }
+
+    public void visitSource(final String file, final String debug) {
+        if (file != null) {
+            sourceFile = newUTF8(file);
+        }
+        if (debug != null) {
+            sourceDebug = new ByteVector().putUTF8(debug);
+        }
+    }
+
+    public void visitOuterClass(
+        final String owner,
+        final String name,
+        final String desc)
+    {
+        enclosingMethodOwner = newClass(owner);
+        if (name != null && desc != null) {
+            enclosingMethod = newNameType(name, desc);
+        }
+    }
+
+    public AnnotationVisitor visitAnnotation(String desc, boolean visible) {
+        ByteVector bv = new ByteVector();
+        // write type, and reserve space for values count
+        bv.putShort(newUTF8(desc)).putShort(0);
+        AnnotationWriter aw = new AnnotationWriter(this, true, bv, bv, 2);
+        if (visible) {
+            aw.next = anns;
+            anns = aw;
+        } else {
+            aw.next = ianns;
+            ianns = aw;
+        }
+        return aw;
+    }
+
+    public void visitAttribute(final Attribute attr) {
+        attr.next = attrs;
+        attrs = attr;
+    }
+
+    public void visitInnerClass(
+        final String name,
+        final String outerName,
+        final String innerName,
+        final int access)
+    {
+        if (innerClasses == null) {
+            innerClasses = new ByteVector();
+        }
+        ++innerClassesCount;
+        innerClasses.putShort(name == null ? 0 : newClass(name));
+        innerClasses.putShort(outerName == null ? 0 : newClass(outerName));
+        innerClasses.putShort(innerName == null ? 0 : newUTF8(innerName));
+        innerClasses.putShort(access);
+    }
+
+    public FieldVisitor visitField(
+        final int access,
+        final String name,
+        final String desc,
+        final String signature,
+        final Object value)
+    {
+        return new FieldWriter(this, access, name, desc, signature, value);
+    }
+
+    public MethodVisitor visitMethod(
+        final int access,
+        final String name,
+        final String desc,
+        final String signature,
+        final String[] exceptions)
+    {
+        return new MethodWriter(this,
+                access,
+                name,
+                desc,
+                signature,
+                exceptions,
+                computeMaxs);
+    }
+
+    public void visitEnd() {
+    }
+
+    // ------------------------------------------------------------------------
+    // Other public methods
+    // ------------------------------------------------------------------------
+
+    /**
+     * Returns the bytecode of the class that was build with this class writer.
+     *
+     * @return the bytecode of the class that was build with this class writer.
+     */
+    public byte[] toByteArray() {
+        // computes the real size of the bytecode of this class
+        int size = 24 + 2 * interfaceCount;
+        int nbFields = 0;
+        FieldWriter fb = firstField;
+        while (fb != null) {
+            ++nbFields;
+            size += fb.getSize();
+            fb = fb.next;
+        }
+        int nbMethods = 0;
+        MethodWriter mb = firstMethod;
+        while (mb != null) {
+            ++nbMethods;
+            size += mb.getSize();
+            mb = mb.next;
+        }
+        int attributeCount = 0;
+        if (signature != 0) {
+            ++attributeCount;
+            size += 8;
+            newUTF8("Signature");
+        }
+        if (sourceFile != 0) {
+            ++attributeCount;
+            size += 8;
+            newUTF8("SourceFile");
+        }
+        if (sourceDebug != null) {
+            ++attributeCount;
+            size += sourceDebug.length + 4;
+            newUTF8("SourceDebugExtension");
+        }
+        if (enclosingMethodOwner != 0) {
+            ++attributeCount;
+            size += 10;
+            newUTF8("EnclosingMethod");
+        }
+        if ((access & Opcodes.ACC_DEPRECATED) != 0) {
+            ++attributeCount;
+            size += 6;
+            newUTF8("Deprecated");
+        }
+        if ((access & Opcodes.ACC_SYNTHETIC) != 0
+                && (version & 0xffff) < Opcodes.V1_5)
+        {
+            ++attributeCount;
+            size += 6;
+            newUTF8("Synthetic");
+        }
+        if (version == Opcodes.V1_4) {
+            if ((access & Opcodes.ACC_ANNOTATION) != 0) {
+                ++attributeCount;
+                size += 6;
+                newUTF8("Annotation");
+            }
+            if ((access & Opcodes.ACC_ENUM) != 0) {
+                ++attributeCount;
+                size += 6;
+                newUTF8("Enum");
+            }
+        }
+        if (innerClasses != null) {
+            ++attributeCount;
+            size += 8 + innerClasses.length;
+            newUTF8("InnerClasses");
+        }
+        if (anns != null) {
+            ++attributeCount;
+            size += 8 + anns.getSize();
+            newUTF8("RuntimeVisibleAnnotations");
+        }
+        if (ianns != null) {
+            ++attributeCount;
+            size += 8 + ianns.getSize();
+            newUTF8("RuntimeInvisibleAnnotations");
+        }
+        if (attrs != null) {
+            attributeCount += attrs.getCount();
+            size += attrs.getSize(this, null, 0, -1, -1);
+        }
+        size += pool.length;
+        // allocates a byte vector of this size, in order to avoid unnecessary
+        // arraycopy operations in the ByteVector.enlarge() method
+        ByteVector out = new ByteVector(size);
+        out.putInt(0xCAFEBABE).putInt(version);
+        out.putShort(index).putByteArray(pool.data, 0, pool.length);
+        out.putShort(access).putShort(name).putShort(superName);
+        out.putShort(interfaceCount);
+        for (int i = 0; i < interfaceCount; ++i) {
+            out.putShort(interfaces[i]);
+        }
+        out.putShort(nbFields);
+        fb = firstField;
+        while (fb != null) {
+            fb.put(out);
+            fb = fb.next;
+        }
+        out.putShort(nbMethods);
+        mb = firstMethod;
+        while (mb != null) {
+            mb.put(out);
+            mb = mb.next;
+        }
+        out.putShort(attributeCount);
+        if (signature != 0) {
+            out.putShort(newUTF8("Signature")).putInt(2).putShort(signature);
+        }
+        if (sourceFile != 0) {
+            out.putShort(newUTF8("SourceFile")).putInt(2).putShort(sourceFile);
+        }
+        if (sourceDebug != null) {
+            int len = sourceDebug.length - 2;
+            out.putShort(newUTF8("SourceDebugExtension")).putInt(len);
+            out.putByteArray(sourceDebug.data, 2, len);
+        }
+        if (enclosingMethodOwner != 0) {
+            out.putShort(newUTF8("EnclosingMethod")).putInt(4);
+            out.putShort(enclosingMethodOwner).putShort(enclosingMethod);
+        }
+        if ((access & Opcodes.ACC_DEPRECATED) != 0) {
+            out.putShort(newUTF8("Deprecated")).putInt(0);
+        }
+        if ((access & Opcodes.ACC_SYNTHETIC) != 0
+                && (version & 0xffff) < Opcodes.V1_5)
+        {
+            out.putShort(newUTF8("Synthetic")).putInt(0);
+        }
+        if (version == Opcodes.V1_4) {
+            if ((access & Opcodes.ACC_ANNOTATION) != 0) {
+                out.putShort(newUTF8("Annotation")).putInt(0);
+            }
+            if ((access & Opcodes.ACC_ENUM) != 0) {
+                out.putShort(newUTF8("Enum")).putInt(0);
+            }
+        }
+        if (innerClasses != null) {
+            out.putShort(newUTF8("InnerClasses"));
+            out.putInt(innerClasses.length + 2).putShort(innerClassesCount);
+            out.putByteArray(innerClasses.data, 0, innerClasses.length);
+        }
+        if (anns != null) {
+            out.putShort(newUTF8("RuntimeVisibleAnnotations"));
+            anns.put(out);
+        }
+        if (ianns != null) {
+            out.putShort(newUTF8("RuntimeInvisibleAnnotations"));
+            ianns.put(out);
+        }
+        if (attrs != null) {
+            attrs.put(this, null, 0, -1, -1, out);
+        }
+        return out.data;
+    }
+
+    // ------------------------------------------------------------------------
+    // Utility methods: constant pool management
+    // ------------------------------------------------------------------------
+
+    /**
+     * Adds a number or string constant to the constant pool of the class being
+     * build. Does nothing if the constant pool already contains a similar item.
+     *
+     * @param cst the value of the constant to be added to the constant pool.
+     *        This parameter must be an {@link Integer}, a {@link Float}, a
+     *        {@link Long}, a {@link Double}, a {@link String} or a
+     *        {@link Type}.
+     * @return a new or already existing constant item with the given value.
+     */
+    Item newConstItem(final Object cst) {
+        if (cst instanceof Integer) {
+            int val = ((Integer) cst).intValue();
+            return newInteger(val);
+        } else if (cst instanceof Byte) {
+            int val = ((Byte) cst).intValue();
+            return newInteger(val);
+        } else if (cst instanceof Character) {
+            int val = ((Character) cst).charValue();
+            return newInteger(val);
+        } else if (cst instanceof Short) {
+            int val = ((Short) cst).intValue();
+            return newInteger(val);
+        } else if (cst instanceof Boolean) {
+            int val = ((Boolean) cst).booleanValue() ? 1 : 0;
+            return newInteger(val);
+        } else if (cst instanceof Float) {
+            float val = ((Float) cst).floatValue();
+            return newFloat(val);
+        } else if (cst instanceof Long) {
+            long val = ((Long) cst).longValue();
+            return newLong(val);
+        } else if (cst instanceof Double) {
+            double val = ((Double) cst).doubleValue();
+            return newDouble(val);
+        } else if (cst instanceof String) {
+            return newString((String) cst);
+        } else if (cst instanceof Type) {
+            Type t = (Type) cst;
+            return newClassItem(t.getSort() == Type.OBJECT
+                    ? t.getInternalName()
+                    : t.getDescriptor());
+        } else {
+            throw new IllegalArgumentException("value " + cst);
+        }
+    }
+
+    /**
+     * Adds a number or string constant to the constant pool of the class being
+     * build. Does nothing if the constant pool already contains a similar item.
+     * <i>This method is intended for {@link Attribute} sub classes, and is
+     * normally not needed by class generators or adapters.</i>
+     *
+     * @param cst the value of the constant to be added to the constant pool.
+     *        This parameter must be an {@link Integer}, a {@link Float}, a
+     *        {@link Long}, a {@link Double} or a {@link String}.
+     * @return the index of a new or already existing constant item with the
+     *         given value.
+     */
+    public int newConst(final Object cst) {
+        return newConstItem(cst).index;
+    }
+
+    /**
+     * Adds an UTF8 string to the constant pool of the class being build. Does
+     * nothing if the constant pool already contains a similar item. <i>This
+     * method is intended for {@link Attribute} sub classes, and is normally not
+     * needed by class generators or adapters.</i>
+     *
+     * @param value the String value.
+     * @return the index of a new or already existing UTF8 item.
+     */
+    public int newUTF8(final String value) {
+        key.set(UTF8, value, null, null);
+        Item result = get(key);
+        if (result == null) {
+            pool.putByte(UTF8).putUTF8(value);
+            result = new Item(index++, key);
+            put(result);
+        }
+        return result.index;
+    }
+
+    /**
+     * Adds a class reference to the constant pool of the class being build.
+     * Does nothing if the constant pool already contains a similar item.
+     * <i>This method is intended for {@link Attribute} sub classes, and is
+     * normally not needed by class generators or adapters.</i>
+     *
+     * @param value the internal name of the class.
+     * @return the index of a new or already existing class reference item.
+     */
+    public int newClass(final String value) {
+        return newClassItem(value).index;
+    }
+
+    /**
+     * Adds a class reference to the constant pool of the class being build.
+     * Does nothing if the constant pool already contains a similar item.
+     * <i>This method is intended for {@link Attribute} sub classes, and is
+     * normally not needed by class generators or adapters.</i>
+     *
+     * @param value the internal name of the class.
+     * @return a new or already existing class reference item.
+     */
+    private Item newClassItem(final String value) {
+        key2.set(CLASS, value, null, null);
+        Item result = get(key2);
+        if (result == null) {
+            pool.put12(CLASS, newUTF8(value));
+            result = new Item(index++, key2);
+            put(result);
+        }
+        return result;
+    }
+
+    /**
+     * Adds a field reference to the constant pool of the class being build.
+     * Does nothing if the constant pool already contains a similar item.
+     * <i>This method is intended for {@link Attribute} sub classes, and is
+     * normally not needed by class generators or adapters.</i>
+     *
+     * @param owner the internal name of the field's owner class.
+     * @param name the field's name.
+     * @param desc the field's descriptor.
+     * @return the index of a new or already existing field reference item.
+     */
+    public int newField(final String owner, final String name, final String desc)
+    {
+        key3.set(FIELD, owner, name, desc);
+        Item result = get(key3);
+        if (result == null) {
+            put122(FIELD, newClass(owner), newNameType(name, desc));
+            result = new Item(index++, key3);
+            put(result);
+        }
+        return result.index;
+    }
+
+    /**
+     * Adds a method reference to the constant pool of the class being build.
+     * Does nothing if the constant pool already contains a similar item.
+     *
+     * @param owner the internal name of the method's owner class.
+     * @param name the method's name.
+     * @param desc the method's descriptor.
+     * @param itf <tt>true</tt> if <tt>owner</tt> is an interface.
+     * @return a new or already existing method reference item.
+     */
+    Item newMethodItem(
+        final String owner,
+        final String name,
+        final String desc,
+        final boolean itf)
+    {
+        int type = itf ? IMETH : METH;
+        key3.set(type, owner, name, desc);
+        Item result = get(key3);
+        if (result == null) {
+            put122(type, newClass(owner), newNameType(name, desc));
+            result = new Item(index++, key3);
+            put(result);
+        }
+        return result;
+    }
+
+    /**
+     * Adds a method reference to the constant pool of the class being build.
+     * Does nothing if the constant pool already contains a similar item.
+     * <i>This method is intended for {@link Attribute} sub classes, and is
+     * normally not needed by class generators or adapters.</i>
+     *
+     * @param owner the internal name of the method's owner class.
+     * @param name the method's name.
+     * @param desc the method's descriptor.
+     * @param itf <tt>true</tt> if <tt>owner</tt> is an interface.
+     * @return the index of a new or already existing method reference item.
+     */
+    public int newMethod(
+        final String owner,
+        final String name,
+        final String desc,
+        final boolean itf)
+    {
+        return newMethodItem(owner, name, desc, itf).index;
+    }
+
+    /**
+     * Adds an integer to the constant pool of the class being build. Does
+     * nothing if the constant pool already contains a similar item.
+     *
+     * @param value the int value.
+     * @return a new or already existing int item.
+     */
+    Item newInteger(final int value) {
+        key.set(value);
+        Item result = get(key);
+        if (result == null) {
+            pool.putByte(INT).putInt(value);
+            result = new Item(index++, key);
+            put(result);
+        }
+        return result;
+    }
+
+    /**
+     * Adds a float to the constant pool of the class being build. Does nothing
+     * if the constant pool already contains a similar item.
+     *
+     * @param value the float value.
+     * @return a new or already existing float item.
+     */
+    Item newFloat(final float value) {
+        key.set(value);
+        Item result = get(key);
+        if (result == null) {
+            pool.putByte(FLOAT).putInt(Float.floatToIntBits(value));
+            result = new Item(index++, key);
+            put(result);
+        }
+        return result;
+    }
+
+    /**
+     * Adds a long to the constant pool of the class being build. Does nothing
+     * if the constant pool already contains a similar item.
+     *
+     * @param value the long value.
+     * @return a new or already existing long item.
+     */
+    Item newLong(final long value) {
+        key.set(value);
+        Item result = get(key);
+        if (result == null) {
+            pool.putByte(LONG).putLong(value);
+            result = new Item(index, key);
+            put(result);
+            index += 2;
+        }
+        return result;
+    }
+
+    /**
+     * Adds a double to the constant pool of the class being build. Does nothing
+     * if the constant pool already contains a similar item.
+     *
+     * @param value the double value.
+     * @return a new or already existing double item.
+     */
+    Item newDouble(final double value) {
+        key.set(value);
+        Item result = get(key);
+        if (result == null) {
+            pool.putByte(DOUBLE).putLong(Double.doubleToLongBits(value));
+            result = new Item(index, key);
+            put(result);
+            index += 2;
+        }
+        return result;
+    }
+
+    /**
+     * Adds a string to the constant pool of the class being build. Does nothing
+     * if the constant pool already contains a similar item.
+     *
+     * @param value the String value.
+     * @return a new or already existing string item.
+     */
+    private Item newString(final String value) {
+        key2.set(STR, value, null, null);
+        Item result = get(key2);
+        if (result == null) {
+            pool.put12(STR, newUTF8(value));
+            result = new Item(index++, key2);
+            put(result);
+        }
+        return result;
+    }
+
+    /**
+     * Adds a name and type to the constant pool of the class being build. Does
+     * nothing if the constant pool already contains a similar item. <i>This
+     * method is intended for {@link Attribute} sub classes, and is normally not
+     * needed by class generators or adapters.</i>
+     *
+     * @param name a name.
+     * @param desc a type descriptor.
+     * @return the index of a new or already existing name and type item.
+     */
+    public int newNameType(final String name, final String desc) {
+        key2.set(NAME_TYPE, name, desc, null);
+        Item result = get(key2);
+        if (result == null) {
+            put122(NAME_TYPE, newUTF8(name), newUTF8(desc));
+            result = new Item(index++, key2);
+            put(result);
+        }
+        return result.index;
+    }
+
+    /**
+     * Returns the constant pool's hash table item which is equal to the given
+     * item.
+     *
+     * @param key a constant pool item.
+     * @return the constant pool's hash table item which is equal to the given
+     *         item, or <tt>null</tt> if there is no such item.
+     */
+    private Item get(final Item key) {
+        Item i = items[key.hashCode % items.length];
+        while (i != null && !key.isEqualTo(i)) {
+            i = i.next;
+        }
+        return i;
+    }
+
+    /**
+     * Puts the given item in the constant pool's hash table. The hash table
+     * <i>must</i> not already contains this item.
+     *
+     * @param i the item to be added to the constant pool's hash table.
+     */
+    private void put(final Item i) {
+        if (index > threshold) {
+            int ll = items.length;
+            int nl = ll * 2 + 1;
+            Item[] newItems = new Item[nl];
+            for (int l = ll - 1; l >= 0; --l) {
+                Item j = items[l];
+                while (j != null) {
+                    int index = j.hashCode % newItems.length;
+                    Item k = j.next;
+                    j.next = newItems[index];
+                    newItems[index] = j;
+                    j = k;
+                }
+            }
+            items = newItems;
+            threshold = (int) (nl * 0.75);
+        }
+        int index = i.hashCode % items.length;
+        i.next = items[index];
+        items[index] = i;
+    }
+
+    /**
+     * Puts one byte and two shorts into the constant pool.
+     *
+     * @param b a byte.
+     * @param s1 a short.
+     * @param s2 another short.
+     */
+    private void put122(final int b, final int s1, final int s2) {
+        pool.put12(b, s1).putShort(s2);
+    }
+}
diff --git a/src/com/sleepycat/asm/Edge.java b/src/com/sleepycat/asm/Edge.java
new file mode 100644
index 0000000000000000000000000000000000000000..c73f5210503e3727383ecd8cbbbae433f57e3b59
--- /dev/null
+++ b/src/com/sleepycat/asm/Edge.java
@@ -0,0 +1,57 @@
+/***
+ * ASM: a very small and fast Java bytecode manipulation framework
+ * Copyright (c) 2000-2005 INRIA, France Telecom
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package com.sleepycat.asm;
+
+/**
+ * An edge in the control flow graph of a method body. See {@link Label Label}.
+ *
+ * @author Eric Bruneton
+ */
+class Edge {
+
+    /**
+     * The (relative) stack size in the basic block from which this edge
+     * originates. This size is equal to the stack size at the "jump"
+     * instruction to which this edge corresponds, relatively to the stack size
+     * at the beginning of the originating basic block.
+     */
+    int stackSize;
+
+    /**
+     * The successor block of the basic block from which this edge originates.
+     */
+    Label successor;
+
+    /**
+     * The next edge in the list of successors of the originating basic block.
+     * See {@link Label#successors successors}.
+     */
+    Edge next;
+}
diff --git a/src/com/sleepycat/asm/FieldVisitor.java b/src/com/sleepycat/asm/FieldVisitor.java
new file mode 100644
index 0000000000000000000000000000000000000000..f4836493587b962b58cf6618444ea4b2f9435f40
--- /dev/null
+++ b/src/com/sleepycat/asm/FieldVisitor.java
@@ -0,0 +1,63 @@
+/***
+ * ASM: a very small and fast Java bytecode manipulation framework
+ * Copyright (c) 2000-2005 INRIA, France Telecom
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package com.sleepycat.asm;
+
+/**
+ * A visitor to visit a Java field. The methods of this interface must be called
+ * in the following order: ( <tt>visitAnnotation</tt> |
+ * <tt>visitAttribute</tt> )* <tt>visitEnd</tt>.
+ *
+ * @author Eric Bruneton
+ */
+public interface FieldVisitor {
+
+    /**
+     * Visits an annotation of the field.
+     *
+     * @param desc the class descriptor of the annotation class.
+     * @param visible <tt>true</tt> if the annotation is visible at runtime.
+     * @return a non null visitor to visit the annotation values.
+     */
+    AnnotationVisitor visitAnnotation(String desc, boolean visible);
+
+    /**
+     * Visits a non standard attribute of the field.
+     *
+     * @param attr an attribute.
+     */
+    void visitAttribute(Attribute attr);
+
+    /**
+     * Visits the end of the field. This method, which is the last one to be
+     * called, is used to inform the visitor that all the annotations and
+     * attributes of the field have been visited.
+     */
+    void visitEnd();
+}
diff --git a/src/com/sleepycat/asm/FieldWriter.java b/src/com/sleepycat/asm/FieldWriter.java
new file mode 100644
index 0000000000000000000000000000000000000000..ea985ac6c1c294ad222d2ae321d29a8db9ead4c8
--- /dev/null
+++ b/src/com/sleepycat/asm/FieldWriter.java
@@ -0,0 +1,276 @@
+/***
+ * ASM: a very small and fast Java bytecode manipulation framework
+ * Copyright (c) 2000-2005 INRIA, France Telecom
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package com.sleepycat.asm;
+
+/**
+ * An {@link FieldVisitor} that generates Java fields in bytecode form.
+ *
+ * @author Eric Bruneton
+ */
+final class FieldWriter implements FieldVisitor {
+
+    /**
+     * Next field writer (see {@link ClassWriter#firstField firstField}).
+     */
+    FieldWriter next;
+
+    /**
+     * The class writer to which this field must be added.
+     */
+    private ClassWriter cw;
+
+    /**
+     * Access flags of this field.
+     */
+    private int access;
+
+    /**
+     * The index of the constant pool item that contains the name of this
+     * method.
+     */
+    private int name;
+
+    /**
+     * The index of the constant pool item that contains the descriptor of this
+     * field.
+     */
+    private int desc;
+
+    /**
+     * The index of the constant pool item that contains the signature of this
+     * field.
+     */
+    private int signature;
+
+    /**
+     * The index of the constant pool item that contains the constant value of
+     * this field.
+     */
+    private int value;
+
+    /**
+     * The runtime visible annotations of this field. May be <tt>null</tt>.
+     */
+    private AnnotationWriter anns;
+
+    /**
+     * The runtime invisible annotations of this field. May be <tt>null</tt>.
+     */
+    private AnnotationWriter ianns;
+
+    /**
+     * The non standard attributes of this field. May be <tt>null</tt>.
+     */
+    private Attribute attrs;
+
+    // ------------------------------------------------------------------------
+    // Constructor
+    // ------------------------------------------------------------------------
+
+    /**
+     * Constructs a new {@link FieldWriter}.
+     *
+     * @param cw the class writer to which this field must be added.
+     * @param access the field's access flags (see {@link Opcodes}).
+     * @param name the field's name.
+     * @param desc the field's descriptor (see {@link Type}).
+     * @param signature the field's signature. May be <tt>null</tt>.
+     * @param value the field's constant value. May be <tt>null</tt>.
+     */
+    protected FieldWriter(
+        final ClassWriter cw,
+        final int access,
+        final String name,
+        final String desc,
+        final String signature,
+        final Object value)
+    {
+        if (cw.firstField == null) {
+            cw.firstField = this;
+        } else {
+            cw.lastField.next = this;
+        }
+        cw.lastField = this;
+        this.cw = cw;
+        this.access = access;
+        this.name = cw.newUTF8(name);
+        this.desc = cw.newUTF8(desc);
+        if (signature != null) {
+            this.signature = cw.newUTF8(signature);
+        }
+        if (value != null) {
+            this.value = cw.newConstItem(value).index;
+        }
+    }
+
+    // ------------------------------------------------------------------------
+    // Implementation of the FieldVisitor interface
+    // ------------------------------------------------------------------------
+
+    public AnnotationVisitor visitAnnotation(
+        final String desc,
+        final boolean visible)
+    {
+        ByteVector bv = new ByteVector();
+        // write type, and reserve space for values count
+        bv.putShort(cw.newUTF8(desc)).putShort(0);
+        AnnotationWriter aw = new AnnotationWriter(cw, true, bv, bv, 2);
+        if (visible) {
+            aw.next = anns;
+            anns = aw;
+        } else {
+            aw.next = ianns;
+            ianns = aw;
+        }
+        return aw;
+    }
+
+    public void visitAttribute(final Attribute attr) {
+        attr.next = attrs;
+        attrs = attr;
+    }
+
+    public void visitEnd() {
+    }
+
+    // ------------------------------------------------------------------------
+    // Utility methods
+    // ------------------------------------------------------------------------
+
+    /**
+     * Returns the size of this field.
+     *
+     * @return the size of this field.
+     */
+    int getSize() {
+        int size = 8;
+        if (value != 0) {
+            cw.newUTF8("ConstantValue");
+            size += 8;
+        }
+        if ((access & Opcodes.ACC_SYNTHETIC) != 0
+                && (cw.version & 0xffff) < Opcodes.V1_5)
+        {
+            cw.newUTF8("Synthetic");
+            size += 6;
+        }
+        if ((access & Opcodes.ACC_DEPRECATED) != 0) {
+            cw.newUTF8("Deprecated");
+            size += 6;
+        }
+        if (cw.version == Opcodes.V1_4 && (access & Opcodes.ACC_ENUM) != 0) {
+            cw.newUTF8("Enum");
+            size += 6;
+        }
+        if (signature != 0) {
+            cw.newUTF8("Signature");
+            size += 8;
+        }
+        if (anns != null) {
+            cw.newUTF8("RuntimeVisibleAnnotations");
+            size += 8 + anns.getSize();
+        }
+        if (ianns != null) {
+            cw.newUTF8("RuntimeInvisibleAnnotations");
+            size += 8 + ianns.getSize();
+        }
+        if (attrs != null) {
+            size += attrs.getSize(cw, null, 0, -1, -1);
+        }
+        return size;
+    }
+
+    /**
+     * Puts the content of this field into the given byte vector.
+     *
+     * @param out where the content of this field must be put.
+     */
+    void put(final ByteVector out) {
+        out.putShort(access).putShort(name).putShort(desc);
+        int attributeCount = 0;
+        if (value != 0) {
+            ++attributeCount;
+        }
+        if ((access & Opcodes.ACC_SYNTHETIC) != 0
+                && (cw.version & 0xffff) < Opcodes.V1_5)
+        {
+            ++attributeCount;
+        }
+        if ((access & Opcodes.ACC_DEPRECATED) != 0) {
+            ++attributeCount;
+        }
+        if (cw.version == Opcodes.V1_4 && (access & Opcodes.ACC_ENUM) != 0) {
+            ++attributeCount;
+        }
+        if (signature != 0) {
+            ++attributeCount;
+        }
+        if (anns != null) {
+            ++attributeCount;
+        }
+        if (ianns != null) {
+            ++attributeCount;
+        }
+        if (attrs != null) {
+            attributeCount += attrs.getCount();
+        }
+        out.putShort(attributeCount);
+        if (value != 0) {
+            out.putShort(cw.newUTF8("ConstantValue"));
+            out.putInt(2).putShort(value);
+        }
+        if ((access & Opcodes.ACC_SYNTHETIC) != 0
+                && (cw.version & 0xffff) < Opcodes.V1_5)
+        {
+            out.putShort(cw.newUTF8("Synthetic")).putInt(0);
+        }
+        if ((access & Opcodes.ACC_DEPRECATED) != 0) {
+            out.putShort(cw.newUTF8("Deprecated")).putInt(0);
+        }
+        if (cw.version == Opcodes.V1_4 && (access & Opcodes.ACC_ENUM) != 0) {
+            out.putShort(cw.newUTF8("Enum")).putInt(0);
+        }
+        if (signature != 0) {
+            out.putShort(cw.newUTF8("Signature"));
+            out.putInt(2).putShort(signature);
+        }
+        if (anns != null) {
+            out.putShort(cw.newUTF8("RuntimeVisibleAnnotations"));
+            anns.put(out);
+        }
+        if (ianns != null) {
+            out.putShort(cw.newUTF8("RuntimeInvisibleAnnotations"));
+            ianns.put(out);
+        }
+        if (attrs != null) {
+            attrs.put(cw, null, 0, -1, -1, out);
+        }
+    }
+}
diff --git a/src/com/sleepycat/asm/Handler.java b/src/com/sleepycat/asm/Handler.java
new file mode 100644
index 0000000000000000000000000000000000000000..de71da8c8c683743e22a8409daf972973ffea9c8
--- /dev/null
+++ b/src/com/sleepycat/asm/Handler.java
@@ -0,0 +1,70 @@
+/***
+ * ASM: a very small and fast Java bytecode manipulation framework
+ * Copyright (c) 2000-2005 INRIA, France Telecom
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package com.sleepycat.asm;
+
+/**
+ * Information about an exception handler block.
+ *
+ * @author Eric Bruneton
+ */
+class Handler {
+
+    /**
+     * Beginning of the exception handler's scope (inclusive).
+     */
+    Label start;
+
+    /**
+     * End of the exception handler's scope (exclusive).
+     */
+    Label end;
+
+    /**
+     * Beginning of the exception handler's code.
+     */
+    Label handler;
+
+    /**
+     * Internal name of the type of exceptions handled by this handler, or
+     * <tt>null</tt> to catch any exceptions.
+     */
+    String desc;
+
+    /**
+     * Constant pool index of the internal name of the type of exceptions
+     * handled by this handler, or 0 to catch any exceptions.
+     */
+    int type;
+
+    /**
+     * Next exception handler block info.
+     */
+    Handler next;
+}
diff --git a/src/com/sleepycat/asm/Item.java b/src/com/sleepycat/asm/Item.java
new file mode 100644
index 0000000000000000000000000000000000000000..e61f9c11c12e258c62b31dff394547d31ac56663
--- /dev/null
+++ b/src/com/sleepycat/asm/Item.java
@@ -0,0 +1,252 @@
+/***
+ * ASM: a very small and fast Java bytecode manipulation framework
+ * Copyright (c) 2000-2005 INRIA, France Telecom
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package com.sleepycat.asm;
+
+/**
+ * A constant pool item. Constant pool items can be created with the 'newXXX'
+ * methods in the {@link ClassWriter} class.
+ *
+ * @author Eric Bruneton
+ */
+final class Item {
+
+    /**
+     * Index of this item in the constant pool.
+     */
+    int index;
+
+    /**
+     * Type of this constant pool item. A single class is used to represent all
+     * constant pool item types, in order to minimize the bytecode size of this
+     * package. The value of this field is one of {@link ClassWriter#INT},
+     * {@link ClassWriter#LONG}, {@link ClassWriter#FLOAT},
+     * {@link ClassWriter#DOUBLE}, {@link ClassWriter#UTF8},
+     * {@link ClassWriter#STR}, {@link ClassWriter#CLASS},
+     * {@link ClassWriter#NAME_TYPE}, {@link ClassWriter#FIELD},
+     * {@link ClassWriter#METH}, {@link ClassWriter#IMETH}.
+     */
+    int type;
+
+    /**
+     * Value of this item, for an integer item.
+     */
+    int intVal;
+
+    /**
+     * Value of this item, for a long item.
+     */
+    long longVal;
+
+    /**
+     * Value of this item, for a float item.
+     */
+    float floatVal;
+
+    /**
+     * Value of this item, for a double item.
+     */
+    double doubleVal;
+
+    /**
+     * First part of the value of this item, for items that do not hold a
+     * primitive value.
+     */
+    String strVal1;
+
+    /**
+     * Second part of the value of this item, for items that do not hold a
+     * primitive value.
+     */
+    String strVal2;
+
+    /**
+     * Third part of the value of this item, for items that do not hold a
+     * primitive value.
+     */
+    String strVal3;
+
+    /**
+     * The hash code value of this constant pool item.
+     */
+    int hashCode;
+
+    /**
+     * Link to another constant pool item, used for collision lists in the
+     * constant pool's hash table.
+     */
+    Item next;
+
+    /**
+     * Constructs an uninitialized {@link Item}.
+     */
+    Item() {
+    }
+
+    Item(int index) {
+        this.index = index;
+    }
+
+    /**
+     * Constructs a copy of the given item.
+     *
+     * @param index index of the item to be constructed.
+     * @param i the item that must be copied into the item to be constructed.
+     */
+    Item(final int index, final Item i) {
+        this.index = index;
+        type = i.type;
+        intVal = i.intVal;
+        longVal = i.longVal;
+        floatVal = i.floatVal;
+        doubleVal = i.doubleVal;
+        strVal1 = i.strVal1;
+        strVal2 = i.strVal2;
+        strVal3 = i.strVal3;
+        hashCode = i.hashCode;
+    }
+
+    /**
+     * Sets this item to an integer item.
+     *
+     * @param intVal the value of this item.
+     */
+    void set(final int intVal) {
+        this.type = ClassWriter.INT;
+        this.intVal = intVal;
+        this.hashCode = 0x7FFFFFFF & (type + intVal);
+    }
+
+    /**
+     * Sets this item to a long item.
+     *
+     * @param longVal the value of this item.
+     */
+    void set(final long longVal) {
+        this.type = ClassWriter.LONG;
+        this.longVal = longVal;
+        this.hashCode = 0x7FFFFFFF & (type + (int) longVal);
+    }
+
+    /**
+     * Sets this item to a float item.
+     *
+     * @param floatVal the value of this item.
+     */
+    void set(final float floatVal) {
+        this.type = ClassWriter.FLOAT;
+        this.floatVal = floatVal;
+        this.hashCode = 0x7FFFFFFF & (type + (int) floatVal);
+    }
+
+    /**
+     * Sets this item to a double item.
+     *
+     * @param doubleVal the value of this item.
+     */
+    void set(final double doubleVal) {
+        this.type = ClassWriter.DOUBLE;
+        this.doubleVal = doubleVal;
+        this.hashCode = 0x7FFFFFFF & (type + (int) doubleVal);
+    }
+
+    /**
+     * Sets this item to an item that do not hold a primitive value.
+     *
+     * @param type the type of this item.
+     * @param strVal1 first part of the value of this item.
+     * @param strVal2 second part of the value of this item.
+     * @param strVal3 third part of the value of this item.
+     */
+    void set(
+        final int type,
+        final String strVal1,
+        final String strVal2,
+        final String strVal3)
+    {
+        this.type = type;
+        this.strVal1 = strVal1;
+        this.strVal2 = strVal2;
+        this.strVal3 = strVal3;
+        switch (type) {
+            case ClassWriter.UTF8:
+            case ClassWriter.STR:
+            case ClassWriter.CLASS:
+                hashCode = 0x7FFFFFFF & (type + strVal1.hashCode());
+                return;
+            case ClassWriter.NAME_TYPE:
+                hashCode = 0x7FFFFFFF & (type + strVal1.hashCode()
+                        * strVal2.hashCode());
+                return;
+            // ClassWriter.FIELD:
+            // ClassWriter.METH:
+            // ClassWriter.IMETH:
+            default:
+                hashCode = 0x7FFFFFFF & (type + strVal1.hashCode()
+                        * strVal2.hashCode() * strVal3.hashCode());
+        }
+    }
+
+    /**
+     * Indicates if the given item is equal to this one.
+     *
+     * @param i the item to be compared to this one.
+     * @return <tt>true</tt> if the given item if equal to this one,
+     *         <tt>false</tt> otherwise.
+     */
+    boolean isEqualTo(final Item i) {
+        if (i.type == type) {
+            switch (type) {
+                case ClassWriter.INT:
+                    return i.intVal == intVal;
+                case ClassWriter.LONG:
+                    return i.longVal == longVal;
+                case ClassWriter.FLOAT:
+                    return i.floatVal == floatVal;
+                case ClassWriter.DOUBLE:
+                    return i.doubleVal == doubleVal;
+                case ClassWriter.UTF8:
+                case ClassWriter.STR:
+                case ClassWriter.CLASS:
+                    return i.strVal1.equals(strVal1);
+                case ClassWriter.NAME_TYPE:
+                    return i.strVal1.equals(strVal1)
+                            && i.strVal2.equals(strVal2);
+                // ClassWriter.FIELD:
+                // ClassWriter.METH:
+                // ClassWriter.IMETH:
+                default:
+                    return i.strVal1.equals(strVal1)
+                            && i.strVal2.equals(strVal2)
+                            && i.strVal3.equals(strVal3);
+            }
+        }
+        return false;
+    }
+}
diff --git a/src/com/sleepycat/asm/Label.java b/src/com/sleepycat/asm/Label.java
new file mode 100644
index 0000000000000000000000000000000000000000..3599443ed776eaba11e6728d32571eb79a148df7
--- /dev/null
+++ b/src/com/sleepycat/asm/Label.java
@@ -0,0 +1,299 @@
+/***
+ * ASM: a very small and fast Java bytecode manipulation framework
+ * Copyright (c) 2000-2005 INRIA, France Telecom
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package com.sleepycat.asm;
+
+/**
+ * A label represents a position in the bytecode of a method. Labels are used
+ * for jump, goto, and switch instructions, and for try catch blocks.
+ *
+ * @author Eric Bruneton
+ */
+public class Label {
+
+    /**
+     * The line number corresponding to this label, if known.
+     */
+    int line;
+
+    /**
+     * Indicates if the position of this label is known.
+     */
+    boolean resolved;
+
+    /**
+     * The position of this label in the code, if known.
+     */
+    int position;
+
+    /**
+     * If the label position has been updated, after instruction resizing.
+     */
+    boolean resized;
+
+    /**
+     * Number of forward references to this label, times two.
+     */
+    private int referenceCount;
+
+    /**
+     * Informations about forward references. Each forward reference is
+     * described by two consecutive integers in this array: the first one is the
+     * position of the first byte of the bytecode instruction that contains the
+     * forward reference, while the second is the position of the first byte of
+     * the forward reference itself. In fact the sign of the first integer
+     * indicates if this reference uses 2 or 4 bytes, and its absolute value
+     * gives the position of the bytecode instruction.
+     */
+    private int[] srcAndRefPositions;
+
+    /*
+     * Fields for the control flow graph analysis algorithm (used to compute the
+     * maximum stack size). A control flow graph contains one node per "basic
+     * block", and one edge per "jump" from one basic block to another. Each
+     * node (i.e., each basic block) is represented by the Label object that
+     * corresponds to the first instruction of this basic block. Each node also
+     * stores the list of it successors in the graph, as a linked list of Edge
+     * objects.
+     */
+
+    /**
+     * The stack size at the beginning of this basic block. This size is
+     * initially unknown. It is computed by the control flow analysis algorithm
+     * (see {@link MethodWriter#visitMaxs visitMaxs}).
+     */
+    int beginStackSize;
+
+    /**
+     * The (relative) maximum stack size corresponding to this basic block. This
+     * size is relative to the stack size at the beginning of the basic block,
+     * i.e., the true maximum stack size is equal to {@link #beginStackSize
+     * beginStackSize} + {@link #maxStackSize maxStackSize}.
+     */
+    int maxStackSize;
+
+    /**
+     * The successors of this node in the control flow graph. These successors
+     * are stored in a linked list of {@link Edge Edge} objects, linked to each
+     * other by their {@link Edge#next} field.
+     */
+    Edge successors;
+
+    /**
+     * The next basic block in the basic block stack. See
+     * {@link MethodWriter#visitMaxs visitMaxs}.
+     */
+    Label next;
+
+    /**
+     * <tt>true</tt> if this basic block has been pushed in the basic block
+     * stack. See {@link MethodWriter#visitMaxs visitMaxs}.
+     */
+    boolean pushed;
+
+    // ------------------------------------------------------------------------
+    // Constructor
+    // ------------------------------------------------------------------------
+
+    /**
+     * Constructs a new label.
+     */
+    public Label() {
+    }
+
+    // ------------------------------------------------------------------------
+    // Methods to compute offsets and to manage forward references
+    // ------------------------------------------------------------------------
+
+    /**
+     * Returns the offset corresponding to this label. This offset is computed
+     * from the start of the method's bytecode. <i>This method is intended for
+     * {@link Attribute} sub classes, and is normally not needed by class
+     * generators or adapters.</i>
+     *
+     * @return the offset corresponding to this label.
+     * @throws IllegalStateException if this label is not resolved yet.
+     */
+    public int getOffset() {
+        if (!resolved) {
+            throw new IllegalStateException("Label offset position has not been resolved yet");
+        }
+        return position;
+    }
+
+    /**
+     * Puts a reference to this label in the bytecode of a method. If the
+     * position of the label is known, the offset is computed and written
+     * directly. Otherwise, a null offset is written and a new forward reference
+     * is declared for this label.
+     *
+     * @param owner the code writer that calls this method.
+     * @param out the bytecode of the method.
+     * @param source the position of first byte of the bytecode instruction that
+     *        contains this label.
+     * @param wideOffset <tt>true</tt> if the reference must be stored in 4
+     *        bytes, or <tt>false</tt> if it must be stored with 2 bytes.
+     * @throws IllegalArgumentException if this label has not been created by
+     *         the given code writer.
+     */
+    void put(
+        final MethodWriter owner,
+        final ByteVector out,
+        final int source,
+        final boolean wideOffset)
+    {
+        if (resolved) {
+            if (wideOffset) {
+                out.putInt(position - source);
+            } else {
+                out.putShort(position - source);
+            }
+        } else {
+            if (wideOffset) {
+                addReference(-1 - source, out.length);
+                out.putInt(-1);
+            } else {
+                addReference(source, out.length);
+                out.putShort(-1);
+            }
+        }
+    }
+
+    /**
+     * Adds a forward reference to this label. This method must be called only
+     * for a true forward reference, i.e. only if this label is not resolved
+     * yet. For backward references, the offset of the reference can be, and
+     * must be, computed and stored directly.
+     *
+     * @param sourcePosition the position of the referencing instruction. This
+     *        position will be used to compute the offset of this forward
+     *        reference.
+     * @param referencePosition the position where the offset for this forward
+     *        reference must be stored.
+     */
+    private void addReference(
+        final int sourcePosition,
+        final int referencePosition)
+    {
+        if (srcAndRefPositions == null) {
+            srcAndRefPositions = new int[6];
+        }
+        if (referenceCount >= srcAndRefPositions.length) {
+            int[] a = new int[srcAndRefPositions.length + 6];
+            System.arraycopy(srcAndRefPositions,
+                    0,
+                    a,
+                    0,
+                    srcAndRefPositions.length);
+            srcAndRefPositions = a;
+        }
+        srcAndRefPositions[referenceCount++] = sourcePosition;
+        srcAndRefPositions[referenceCount++] = referencePosition;
+    }
+
+    /**
+     * Resolves all forward references to this label. This method must be called
+     * when this label is added to the bytecode of the method, i.e. when its
+     * position becomes known. This method fills in the blanks that where left
+     * in the bytecode by each forward reference previously added to this label.
+     *
+     * @param owner the code writer that calls this method.
+     * @param position the position of this label in the bytecode.
+     * @param data the bytecode of the method.
+     * @return <tt>true</tt> if a blank that was left for this label was to
+     *         small to store the offset. In such a case the corresponding jump
+     *         instruction is replaced with a pseudo instruction (using unused
+     *         opcodes) using an unsigned two bytes offset. These pseudo
+     *         instructions will need to be replaced with true instructions with
+     *         wider offsets (4 bytes instead of 2). This is done in
+     *         {@link MethodWriter#resizeInstructions}.
+     * @throws IllegalArgumentException if this label has already been resolved,
+     *         or if it has not been created by the given code writer.
+     */
+    boolean resolve(
+        final MethodWriter owner,
+        final int position,
+        final byte[] data)
+    {
+        boolean needUpdate = false;
+        this.resolved = true;
+        this.position = position;
+        int i = 0;
+        while (i < referenceCount) {
+            int source = srcAndRefPositions[i++];
+            int reference = srcAndRefPositions[i++];
+            int offset;
+            if (source >= 0) {
+                offset = position - source;
+                if (offset < Short.MIN_VALUE || offset > Short.MAX_VALUE) {
+                    /*
+                     * changes the opcode of the jump instruction, in order to
+                     * be able to find it later (see resizeInstructions in
+                     * MethodWriter). These temporary opcodes are similar to
+                     * jump instruction opcodes, except that the 2 bytes offset
+                     * is unsigned (and can therefore represent values from 0 to
+                     * 65535, which is sufficient since the size of a method is
+                     * limited to 65535 bytes).
+                     */
+                    int opcode = data[reference - 1] & 0xFF;
+                    if (opcode <= Opcodes.JSR) {
+                        // changes IFEQ ... JSR to opcodes 202 to 217
+                        data[reference - 1] = (byte) (opcode + 49);
+                    } else {
+                        // changes IFNULL and IFNONNULL to opcodes 218 and 219
+                        data[reference - 1] = (byte) (opcode + 20);
+                    }
+                    needUpdate = true;
+                }
+                data[reference++] = (byte) (offset >>> 8);
+                data[reference] = (byte) offset;
+            } else {
+                offset = position + source + 1;
+                data[reference++] = (byte) (offset >>> 24);
+                data[reference++] = (byte) (offset >>> 16);
+                data[reference++] = (byte) (offset >>> 8);
+                data[reference] = (byte) offset;
+            }
+        }
+        return needUpdate;
+    }
+
+    // ------------------------------------------------------------------------
+    // Overriden Object methods
+    // ------------------------------------------------------------------------
+
+    /**
+     * Returns a string representation of this label.
+     *
+     * @return a string representation of this label.
+     */
+    public String toString() {
+        return "L" + System.identityHashCode(this);
+    }
+}
diff --git a/src/com/sleepycat/asm/MethodVisitor.java b/src/com/sleepycat/asm/MethodVisitor.java
new file mode 100644
index 0000000000000000000000000000000000000000..2a6749602a4be835b433636b6b1714b8cfffb613
--- /dev/null
+++ b/src/com/sleepycat/asm/MethodVisitor.java
@@ -0,0 +1,334 @@
+/***
+ * ASM: a very small and fast Java bytecode manipulation framework
+ * Copyright (c) 2000-2005 INRIA, France Telecom
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package com.sleepycat.asm;
+
+/**
+ * A visitor to visit a Java method. The methods of this interface must be
+ * called in the following order: [ <tt>visitAnnotationDefault</tt> ] (
+ * <tt>visitAnnotation</tt> | <tt>visitParameterAnnotation</tt> |
+ * <tt>visitAttribute</tt> )* [ <tt>visitCode</tt> ( <tt>visit</tt><i>X</i>Insn</tt> |
+ * <tt>visitLabel</tt> | <tt>visitTryCatchBlock</tt> | <tt>visitLocalVariable</tt> |
+ * <tt>visitLineNumber</tt>)* <tt>visitMaxs</tt> ] <tt>visitEnd</tt>. In
+ * addition, the <tt>visit</tt><i>X</i>Insn</tt> and <tt>visitLabel</tt>
+ * methods must be called in the sequential order of the bytecode instructions
+ * of the visited code, and the <tt>visitLocalVariable</tt> and <tt>visitLineNumber</tt>
+ * methods must be called <i>after</i> the labels passed as arguments have been
+ * visited.
+ *
+ * @author Eric Bruneton
+ */
+public interface MethodVisitor {
+
+    // -------------------------------------------------------------------------
+    // Annotations and non standard attributes
+    // -------------------------------------------------------------------------
+
+    /**
+     * Visits the default value of this annotation interface method.
+     *
+     * @return a non null visitor to the visit the actual default value of this
+     *         annotation interface method. The 'name' parameters passed to the
+     *         methods of this annotation visitor are ignored. Moreover, exacly
+     *         one visit method must be called on this annotation visitor,
+     *         followed by visitEnd.
+     */
+    AnnotationVisitor visitAnnotationDefault();
+
+    /**
+     * Visits an annotation of this method.
+     *
+     * @param desc the class descriptor of the annotation class.
+     * @param visible <tt>true</tt> if the annotation is visible at runtime.
+     * @return a non null visitor to visit the annotation values.
+     */
+    AnnotationVisitor visitAnnotation(String desc, boolean visible);
+
+    /**
+     * Visits an annotation of a parameter this method.
+     *
+     * @param parameter the parameter index.
+     * @param desc the class descriptor of the annotation class.
+     * @param visible <tt>true</tt> if the annotation is visible at runtime.
+     * @return a non null visitor to visit the annotation values.
+     */
+    AnnotationVisitor visitParameterAnnotation(
+        int parameter,
+        String desc,
+        boolean visible);
+
+    /**
+     * Visits a non standard attribute of this method.
+     *
+     * @param attr an attribute.
+     */
+    void visitAttribute(Attribute attr);
+
+    /**
+     * Starts the visit of the method's code, if any (i.e. non abstract method).
+     */
+    void visitCode();
+
+    // -------------------------------------------------------------------------
+    // Normal instructions
+    // -------------------------------------------------------------------------
+
+    /**
+     * Visits a zero operand instruction.
+     *
+     * @param opcode the opcode of the instruction to be visited. This opcode is
+     *        either NOP, ACONST_NULL, ICONST_M1, ICONST_0, ICONST_1, ICONST_2,
+     *        ICONST_3, ICONST_4, ICONST_5, LCONST_0, LCONST_1, FCONST_0,
+     *        FCONST_1, FCONST_2, DCONST_0, DCONST_1, IALOAD, LALOAD, FALOAD,
+     *        DALOAD, AALOAD, BALOAD, CALOAD, SALOAD, IASTORE, LASTORE, FASTORE,
+     *        DASTORE, AASTORE, BASTORE, CASTORE, SASTORE, POP, POP2, DUP,
+     *        DUP_X1, DUP_X2, DUP2, DUP2_X1, DUP2_X2, SWAP, IADD, LADD, FADD,
+     *        DADD, ISUB, LSUB, FSUB, DSUB, IMUL, LMUL, FMUL, DMUL, IDIV, LDIV,
+     *        FDIV, DDIV, IREM, LREM, FREM, DREM, INEG, LNEG, FNEG, DNEG, ISHL,
+     *        LSHL, ISHR, LSHR, IUSHR, LUSHR, IAND, LAND, IOR, LOR, IXOR, LXOR,
+     *        I2L, I2F, I2D, L2I, L2F, L2D, F2I, F2L, F2D, D2I, D2L, D2F, I2B,
+     *        I2C, I2S, LCMP, FCMPL, FCMPG, DCMPL, DCMPG, IRETURN, LRETURN,
+     *        FRETURN, DRETURN, ARETURN, RETURN, ARRAYLENGTH, ATHROW,
+     *        MONITORENTER, or MONITOREXIT.
+     */
+    void visitInsn(int opcode);
+
+    /**
+     * Visits an instruction with a single int operand.
+     *
+     * @param opcode the opcode of the instruction to be visited. This opcode is
+     *        either BIPUSH, SIPUSH or NEWARRAY.
+     * @param operand the operand of the instruction to be visited.<br>
+     *        When opcode is BIPUSH, operand value should be between
+     *        Byte.MIN_VALUE and Byte.MAX_VALUE.<br>
+     *        When opcode is SIPUSH, operand value should be between
+     *        Short.MIN_VALUE and Short.MAX_VALUE.<br>
+     *        When opcode is NEWARRAY, operand value should be one of
+     *        {@link Opcodes#T_BOOLEAN}, {@link Opcodes#T_CHAR},
+     *        {@link Opcodes#T_FLOAT}, {@link Opcodes#T_DOUBLE},
+     *        {@link Opcodes#T_BYTE}, {@link Opcodes#T_SHORT},
+     *        {@link Opcodes#T_INT} or {@link Opcodes#T_LONG}.
+     */
+    void visitIntInsn(int opcode, int operand);
+
+    /**
+     * Visits a local variable instruction. A local variable instruction is an
+     * instruction that loads or stores the value of a local variable.
+     *
+     * @param opcode the opcode of the local variable instruction to be visited.
+     *        This opcode is either ILOAD, LLOAD, FLOAD, DLOAD, ALOAD, ISTORE,
+     *        LSTORE, FSTORE, DSTORE, ASTORE or RET.
+     * @param var the operand of the instruction to be visited. This operand is
+     *        the index of a local variable.
+     */
+    void visitVarInsn(int opcode, int var);
+
+    /**
+     * Visits a type instruction. A type instruction is an instruction that
+     * takes a type descriptor as parameter.
+     *
+     * @param opcode the opcode of the type instruction to be visited. This
+     *        opcode is either NEW, ANEWARRAY, CHECKCAST or INSTANCEOF.
+     * @param desc the operand of the instruction to be visited. This operand is
+     *        must be a fully qualified class name in internal form, or the type
+     *        descriptor of an array type (see {@link Type Type}).
+     */
+    void visitTypeInsn(int opcode, String desc);
+
+    /**
+     * Visits a field instruction. A field instruction is an instruction that
+     * loads or stores the value of a field of an object.
+     *
+     * @param opcode the opcode of the type instruction to be visited. This
+     *        opcode is either GETSTATIC, PUTSTATIC, GETFIELD or PUTFIELD.
+     * @param owner the internal name of the field's owner class (see {@link
+     *        Type#getInternalName() getInternalName}).
+     * @param name the field's name.
+     * @param desc the field's descriptor (see {@link Type Type}).
+     */
+    void visitFieldInsn(int opcode, String owner, String name, String desc);
+
+    /**
+     * Visits a method instruction. A method instruction is an instruction that
+     * invokes a method.
+     *
+     * @param opcode the opcode of the type instruction to be visited. This
+     *        opcode is either INVOKEVIRTUAL, INVOKESPECIAL, INVOKESTATIC or
+     *        INVOKEINTERFACE.
+     * @param owner the internal name of the method's owner class (see {@link
+     *        Type#getInternalName() getInternalName}).
+     * @param name the method's name.
+     * @param desc the method's descriptor (see {@link Type Type}).
+     */
+    void visitMethodInsn(int opcode, String owner, String name, String desc);
+
+    /**
+     * Visits a jump instruction. A jump instruction is an instruction that may
+     * jump to another instruction.
+     *
+     * @param opcode the opcode of the type instruction to be visited. This
+     *        opcode is either IFEQ, IFNE, IFLT, IFGE, IFGT, IFLE, IF_ICMPEQ,
+     *        IF_ICMPNE, IF_ICMPLT, IF_ICMPGE, IF_ICMPGT, IF_ICMPLE, IF_ACMPEQ,
+     *        IF_ACMPNE, GOTO, JSR, IFNULL or IFNONNULL.
+     * @param label the operand of the instruction to be visited. This operand
+     *        is a label that designates the instruction to which the jump
+     *        instruction may jump.
+     */
+    void visitJumpInsn(int opcode, Label label);
+
+    /**
+     * Visits a label. A label designates the instruction that will be visited
+     * just after it.
+     *
+     * @param label a {@link Label Label} object.
+     */
+    void visitLabel(Label label);
+
+    // -------------------------------------------------------------------------
+    // Special instructions
+    // -------------------------------------------------------------------------
+
+    /**
+     * Visits a LDC instruction.
+     *
+     * @param cst the constant to be loaded on the stack. This parameter must be
+     *        a non null {@link Integer}, a {@link Float}, a {@link Long}, a
+     *        {@link Double} a {@link String} (or a {@link Type} for
+     *        <tt>.class</tt> constants, for classes whose version is 49.0 or
+     *        more).
+     */
+    void visitLdcInsn(Object cst);
+
+    /**
+     * Visits an IINC instruction.
+     *
+     * @param var index of the local variable to be incremented.
+     * @param increment amount to increment the local variable by.
+     */
+    void visitIincInsn(int var, int increment);
+
+    /**
+     * Visits a TABLESWITCH instruction.
+     *
+     * @param min the minimum key value.
+     * @param max the maximum key value.
+     * @param dflt beginning of the default handler block.
+     * @param labels beginnings of the handler blocks. <tt>labels[i]</tt> is
+     *        the beginning of the handler block for the <tt>min + i</tt> key.
+     */
+    void visitTableSwitchInsn(int min, int max, Label dflt, Label labels[]);
+
+    /**
+     * Visits a LOOKUPSWITCH instruction.
+     *
+     * @param dflt beginning of the default handler block.
+     * @param keys the values of the keys.
+     * @param labels beginnings of the handler blocks. <tt>labels[i]</tt> is
+     *        the beginning of the handler block for the <tt>keys[i]</tt> key.
+     */
+    void visitLookupSwitchInsn(Label dflt, int keys[], Label labels[]);
+
+    /**
+     * Visits a MULTIANEWARRAY instruction.
+     *
+     * @param desc an array type descriptor (see {@link Type Type}).
+     * @param dims number of dimensions of the array to allocate.
+     */
+    void visitMultiANewArrayInsn(String desc, int dims);
+
+    // -------------------------------------------------------------------------
+    // Exceptions table entries, debug information,
+    // max stack size and max locals
+    // -------------------------------------------------------------------------
+
+    /**
+     * Visits a try catch block.
+     *
+     * @param start beginning of the exception handler's scope (inclusive).
+     * @param end end of the exception handler's scope (exclusive).
+     * @param handler beginning of the exception handler's code.
+     * @param type internal name of the type of exceptions handled by the
+     *        handler, or <tt>null</tt> to catch any exceptions (for "finally"
+     *        blocks).
+     */
+    void visitTryCatchBlock(Label start, Label end, Label handler, String type);
+
+    /**
+     * Visits a local variable declaration.
+     *
+     * @param name the name of a local variable.
+     * @param desc the type descriptor of this local variable.
+     * @param signature the type signature of this local variable. May be
+     *        <tt>null</tt> if the local variable type does not use generic
+     *        types.
+     * @param start the first instruction corresponding to the scope of this
+     *        local variable (inclusive).
+     * @param end the last instruction corresponding to the scope of this local
+     *        variable (exclusive).
+     * @param index the local variable's index.
+     * @throws IllegalArgumentException if one of the labels has not already
+     *         been visited by this visitor (by the
+     *         {@link #visitLabel visitLabel} method).
+     */
+    void visitLocalVariable(
+        String name,
+        String desc,
+        String signature,
+        Label start,
+        Label end,
+        int index);
+
+    /**
+     * Visits a line number declaration.
+     *
+     * @param line a line number. This number refers to the source file from
+     *        which the class was compiled.
+     * @param start the first instruction corresponding to this line number.
+     * @throws IllegalArgumentException if <tt>start</tt> has not already been
+     *         visited by this visitor (by the {@link #visitLabel visitLabel}
+     *         method).
+     */
+    void visitLineNumber(int line, Label start);
+
+    /**
+     * Visits the maximum stack size and the maximum number of local variables
+     * of the method.
+     *
+     * @param maxStack maximum stack size of the method.
+     * @param maxLocals maximum number of local variables for the method.
+     */
+    void visitMaxs(int maxStack, int maxLocals);
+
+    /**
+     * Visits the end of the method. This method, which is the last one to be
+     * called, is used to inform the visitor that all the annotations and
+     * attributes of the method have been visited.
+     */
+    void visitEnd();
+}
diff --git a/src/com/sleepycat/asm/MethodWriter.java b/src/com/sleepycat/asm/MethodWriter.java
new file mode 100644
index 0000000000000000000000000000000000000000..ed5a29abfa6383291a18660803db31edc604ac4e
--- /dev/null
+++ b/src/com/sleepycat/asm/MethodWriter.java
@@ -0,0 +1,2012 @@
+/***
+ * ASM: a very small and fast Java bytecode manipulation framework
+ * Copyright (c) 2000-2005 INRIA, France Telecom
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package com.sleepycat.asm;
+
+/**
+ * A {@link MethodVisitor} that generates methods in bytecode form. Each visit
+ * method of this class appends the bytecode corresponding to the visited
+ * instruction to a byte vector, in the order these methods are called.
+ *
+ * @author Eric Bruneton
+ */
+class MethodWriter implements MethodVisitor {
+
+    /**
+     * Next method writer (see {@link ClassWriter#firstMethod firstMethod}).
+     */
+    MethodWriter next;
+
+    /**
+     * The class writer to which this method must be added.
+     */
+    ClassWriter cw;
+
+    /**
+     * Access flags of this method.
+     */
+    private int access;
+
+    /**
+     * The index of the constant pool item that contains the name of this
+     * method.
+     */
+    private int name;
+
+    /**
+     * The index of the constant pool item that contains the descriptor of this
+     * method.
+     */
+    private int desc;
+
+    /**
+     * The descriptor of this method.
+     */
+    private String descriptor;
+
+    /**
+     * If not zero, indicates that the code of this method must be copied from
+     * the ClassReader associated to this writer in <code>cw.cr</code>. More
+     * precisely, this field gives the index of the first byte to copied from
+     * <code>cw.cr.b</code>.
+     */
+    int classReaderOffset;
+
+    /**
+     * If not zero, indicates that the code of this method must be copied from
+     * the ClassReader associated to this writer in <code>cw.cr</code>. More
+     * precisely, this field gives the number of bytes to copied from
+     * <code>cw.cr.b</code>.
+     */
+    int classReaderLength;
+
+    /**
+     * The signature of this method.
+     */
+    String signature;
+
+    /**
+     * Number of exceptions that can be thrown by this method.
+     */
+    int exceptionCount;
+
+    /**
+     * The exceptions that can be thrown by this method. More precisely, this
+     * array contains the indexes of the constant pool items that contain the
+     * internal names of these exception classes.
+     */
+    int[] exceptions;
+
+    /**
+     * The annotation default attribute of this method. May be <tt>null</tt>.
+     */
+    private ByteVector annd;
+
+    /**
+     * The runtime visible annotations of this method. May be <tt>null</tt>.
+     */
+    private AnnotationWriter anns;
+
+    /**
+     * The runtime invisible annotations of this method. May be <tt>null</tt>.
+     */
+    private AnnotationWriter ianns;
+
+    /**
+     * The runtime visible parameter annotations of this method. May be
+     * <tt>null</tt>.
+     */
+    private AnnotationWriter[] panns;
+
+    /**
+     * The runtime invisible parameter annotations of this method. May be
+     * <tt>null</tt>.
+     */
+    private AnnotationWriter[] ipanns;
+
+    /**
+     * The non standard attributes of the method.
+     */
+    private Attribute attrs;
+
+    /**
+     * The bytecode of this method.
+     */
+    private ByteVector code = new ByteVector();
+
+    /**
+     * Maximum stack size of this method.
+     */
+    private int maxStack;
+
+    /**
+     * Maximum number of local variables for this method.
+     */
+    private int maxLocals;
+
+    /**
+     * Number of entries in the catch table of this method.
+     */
+    private int catchCount;
+
+    /**
+     * The catch table of this method.
+     */
+    private Handler catchTable;
+
+    /**
+     * The last element in the catchTable handler list.
+     */
+    private Handler lastHandler;
+
+    /**
+     * Number of entries in the LocalVariableTable attribute.
+     */
+    private int localVarCount;
+
+    /**
+     * The LocalVariableTable attribute.
+     */
+    private ByteVector localVar;
+
+    /**
+     * Number of entries in the LocalVariableTypeTable attribute.
+     */
+    private int localVarTypeCount;
+
+    /**
+     * The LocalVariableTypeTable attribute.
+     */
+    private ByteVector localVarType;
+
+    /**
+     * Number of entries in the LineNumberTable attribute.
+     */
+    private int lineNumberCount;
+
+    /**
+     * The LineNumberTable attribute.
+     */
+    private ByteVector lineNumber;
+
+    /**
+     * The non standard attributes of the method's code.
+     */
+    private Attribute cattrs;
+
+    /**
+     * Indicates if some jump instructions are too small and need to be resized.
+     */
+    private boolean resize;
+
+    /*
+     * Fields for the control flow graph analysis algorithm (used to compute the
+     * maximum stack size). A control flow graph contains one node per "basic
+     * block", and one edge per "jump" from one basic block to another. Each
+     * node (i.e., each basic block) is represented by the Label object that
+     * corresponds to the first instruction of this basic block. Each node also
+     * stores the list of its successors in the graph, as a linked list of Edge
+     * objects.
+     */
+
+    /**
+     * <tt>true</tt> if the maximum stack size and number of local variables
+     * must be automatically computed.
+     */
+    private final boolean computeMaxs;
+
+    /**
+     * The (relative) stack size after the last visited instruction. This size
+     * is relative to the beginning of the current basic block, i.e., the true
+     * stack size after the last visited instruction is equal to the {@link
+     * Label#beginStackSize beginStackSize} of the current basic block plus
+     * <tt>stackSize</tt>.
+     */
+    private int stackSize;
+
+    /**
+     * The (relative) maximum stack size after the last visited instruction.
+     * This size is relative to the beginning of the current basic block, i.e.,
+     * the true maximum stack size after the last visited instruction is equal
+     * to the {@link Label#beginStackSize beginStackSize} of the current basic
+     * block plus <tt>stackSize</tt>.
+     */
+    private int maxStackSize;
+
+    /**
+     * The current basic block. This block is the basic block to which the next
+     * instruction to be visited must be added.
+     */
+    private Label currentBlock;
+
+    /**
+     * The basic block stack used by the control flow analysis algorithm. This
+     * stack is represented by a linked list of {@link Label Label} objects,
+     * linked to each other by their {@link Label#next} field. This stack must
+     * not be confused with the JVM stack used to execute the JVM instructions!
+     */
+    private Label blockStack;
+
+    /**
+     * The stack size variation corresponding to each JVM instruction. This
+     * stack variation is equal to the size of the values produced by an
+     * instruction, minus the size of the values consumed by this instruction.
+     */
+    private final static int[] SIZE;
+
+    // ------------------------------------------------------------------------
+    // Static initializer
+    // ------------------------------------------------------------------------
+
+    /**
+     * Computes the stack size variation corresponding to each JVM instruction.
+     */
+    static {
+        int i;
+        int[] b = new int[202];
+        String s = "EFFFFFFFFGGFFFGGFFFEEFGFGFEEEEEEEEEEEEEEEEEEEEDEDEDDDDD"
+                + "CDCDEEEEEEEEEEEEEEEEEEEEBABABBBBDCFFFGGGEDCDCDCDCDCDCDCDCD"
+                + "CDCEEEEDDDDDDDCDCDCEFEFDDEEFFDEDEEEBDDBBDDDDDDCCCCCCCCEFED"
+                + "DDCDCDEEEEEEEEEEFEEEEEEDDEEDDEE";
+        for (i = 0; i < b.length; ++i) {
+            b[i] = s.charAt(i) - 'E';
+        }
+        SIZE = b;
+
+        // code to generate the above string
+        //
+        // int NA = 0; // not applicable (unused opcode or variable size opcode)
+        //
+        // b = new int[] {
+        // 0, //NOP, // visitInsn
+        // 1, //ACONST_NULL, // -
+        // 1, //ICONST_M1, // -
+        // 1, //ICONST_0, // -
+        // 1, //ICONST_1, // -
+        // 1, //ICONST_2, // -
+        // 1, //ICONST_3, // -
+        // 1, //ICONST_4, // -
+        // 1, //ICONST_5, // -
+        // 2, //LCONST_0, // -
+        // 2, //LCONST_1, // -
+        // 1, //FCONST_0, // -
+        // 1, //FCONST_1, // -
+        // 1, //FCONST_2, // -
+        // 2, //DCONST_0, // -
+        // 2, //DCONST_1, // -
+        // 1, //BIPUSH, // visitIntInsn
+        // 1, //SIPUSH, // -
+        // 1, //LDC, // visitLdcInsn
+        // NA, //LDC_W, // -
+        // NA, //LDC2_W, // -
+        // 1, //ILOAD, // visitVarInsn
+        // 2, //LLOAD, // -
+        // 1, //FLOAD, // -
+        // 2, //DLOAD, // -
+        // 1, //ALOAD, // -
+        // NA, //ILOAD_0, // -
+        // NA, //ILOAD_1, // -
+        // NA, //ILOAD_2, // -
+        // NA, //ILOAD_3, // -
+        // NA, //LLOAD_0, // -
+        // NA, //LLOAD_1, // -
+        // NA, //LLOAD_2, // -
+        // NA, //LLOAD_3, // -
+        // NA, //FLOAD_0, // -
+        // NA, //FLOAD_1, // -
+        // NA, //FLOAD_2, // -
+        // NA, //FLOAD_3, // -
+        // NA, //DLOAD_0, // -
+        // NA, //DLOAD_1, // -
+        // NA, //DLOAD_2, // -
+        // NA, //DLOAD_3, // -
+        // NA, //ALOAD_0, // -
+        // NA, //ALOAD_1, // -
+        // NA, //ALOAD_2, // -
+        // NA, //ALOAD_3, // -
+        // -1, //IALOAD, // visitInsn
+        // 0, //LALOAD, // -
+        // -1, //FALOAD, // -
+        // 0, //DALOAD, // -
+        // -1, //AALOAD, // -
+        // -1, //BALOAD, // -
+        // -1, //CALOAD, // -
+        // -1, //SALOAD, // -
+        // -1, //ISTORE, // visitVarInsn
+        // -2, //LSTORE, // -
+        // -1, //FSTORE, // -
+        // -2, //DSTORE, // -
+        // -1, //ASTORE, // -
+        // NA, //ISTORE_0, // -
+        // NA, //ISTORE_1, // -
+        // NA, //ISTORE_2, // -
+        // NA, //ISTORE_3, // -
+        // NA, //LSTORE_0, // -
+        // NA, //LSTORE_1, // -
+        // NA, //LSTORE_2, // -
+        // NA, //LSTORE_3, // -
+        // NA, //FSTORE_0, // -
+        // NA, //FSTORE_1, // -
+        // NA, //FSTORE_2, // -
+        // NA, //FSTORE_3, // -
+        // NA, //DSTORE_0, // -
+        // NA, //DSTORE_1, // -
+        // NA, //DSTORE_2, // -
+        // NA, //DSTORE_3, // -
+        // NA, //ASTORE_0, // -
+        // NA, //ASTORE_1, // -
+        // NA, //ASTORE_2, // -
+        // NA, //ASTORE_3, // -
+        // -3, //IASTORE, // visitInsn
+        // -4, //LASTORE, // -
+        // -3, //FASTORE, // -
+        // -4, //DASTORE, // -
+        // -3, //AASTORE, // -
+        // -3, //BASTORE, // -
+        // -3, //CASTORE, // -
+        // -3, //SASTORE, // -
+        // -1, //POP, // -
+        // -2, //POP2, // -
+        // 1, //DUP, // -
+        // 1, //DUP_X1, // -
+        // 1, //DUP_X2, // -
+        // 2, //DUP2, // -
+        // 2, //DUP2_X1, // -
+        // 2, //DUP2_X2, // -
+        // 0, //SWAP, // -
+        // -1, //IADD, // -
+        // -2, //LADD, // -
+        // -1, //FADD, // -
+        // -2, //DADD, // -
+        // -1, //ISUB, // -
+        // -2, //LSUB, // -
+        // -1, //FSUB, // -
+        // -2, //DSUB, // -
+        // -1, //IMUL, // -
+        // -2, //LMUL, // -
+        // -1, //FMUL, // -
+        // -2, //DMUL, // -
+        // -1, //IDIV, // -
+        // -2, //LDIV, // -
+        // -1, //FDIV, // -
+        // -2, //DDIV, // -
+        // -1, //IREM, // -
+        // -2, //LREM, // -
+        // -1, //FREM, // -
+        // -2, //DREM, // -
+        // 0, //INEG, // -
+        // 0, //LNEG, // -
+        // 0, //FNEG, // -
+        // 0, //DNEG, // -
+        // -1, //ISHL, // -
+        // -1, //LSHL, // -
+        // -1, //ISHR, // -
+        // -1, //LSHR, // -
+        // -1, //IUSHR, // -
+        // -1, //LUSHR, // -
+        // -1, //IAND, // -
+        // -2, //LAND, // -
+        // -1, //IOR, // -
+        // -2, //LOR, // -
+        // -1, //IXOR, // -
+        // -2, //LXOR, // -
+        // 0, //IINC, // visitIincInsn
+        // 1, //I2L, // visitInsn
+        // 0, //I2F, // -
+        // 1, //I2D, // -
+        // -1, //L2I, // -
+        // -1, //L2F, // -
+        // 0, //L2D, // -
+        // 0, //F2I, // -
+        // 1, //F2L, // -
+        // 1, //F2D, // -
+        // -1, //D2I, // -
+        // 0, //D2L, // -
+        // -1, //D2F, // -
+        // 0, //I2B, // -
+        // 0, //I2C, // -
+        // 0, //I2S, // -
+        // -3, //LCMP, // -
+        // -1, //FCMPL, // -
+        // -1, //FCMPG, // -
+        // -3, //DCMPL, // -
+        // -3, //DCMPG, // -
+        // -1, //IFEQ, // visitJumpInsn
+        // -1, //IFNE, // -
+        // -1, //IFLT, // -
+        // -1, //IFGE, // -
+        // -1, //IFGT, // -
+        // -1, //IFLE, // -
+        // -2, //IF_ICMPEQ, // -
+        // -2, //IF_ICMPNE, // -
+        // -2, //IF_ICMPLT, // -
+        // -2, //IF_ICMPGE, // -
+        // -2, //IF_ICMPGT, // -
+        // -2, //IF_ICMPLE, // -
+        // -2, //IF_ACMPEQ, // -
+        // -2, //IF_ACMPNE, // -
+        // 0, //GOTO, // -
+        // 1, //JSR, // -
+        // 0, //RET, // visitVarInsn
+        // -1, //TABLESWITCH, // visiTableSwitchInsn
+        // -1, //LOOKUPSWITCH, // visitLookupSwitch
+        // -1, //IRETURN, // visitInsn
+        // -2, //LRETURN, // -
+        // -1, //FRETURN, // -
+        // -2, //DRETURN, // -
+        // -1, //ARETURN, // -
+        // 0, //RETURN, // -
+        // NA, //GETSTATIC, // visitFieldInsn
+        // NA, //PUTSTATIC, // -
+        // NA, //GETFIELD, // -
+        // NA, //PUTFIELD, // -
+        // NA, //INVOKEVIRTUAL, // visitMethodInsn
+        // NA, //INVOKESPECIAL, // -
+        // NA, //INVOKESTATIC, // -
+        // NA, //INVOKEINTERFACE, // -
+        // NA, //UNUSED, // NOT VISITED
+        // 1, //NEW, // visitTypeInsn
+        // 0, //NEWARRAY, // visitIntInsn
+        // 0, //ANEWARRAY, // visitTypeInsn
+        // 0, //ARRAYLENGTH, // visitInsn
+        // NA, //ATHROW, // -
+        // 0, //CHECKCAST, // visitTypeInsn
+        // 0, //INSTANCEOF, // -
+        // -1, //MONITORENTER, // visitInsn
+        // -1, //MONITOREXIT, // -
+        // NA, //WIDE, // NOT VISITED
+        // NA, //MULTIANEWARRAY, // visitMultiANewArrayInsn
+        // -1, //IFNULL, // visitJumpInsn
+        // -1, //IFNONNULL, // -
+        // NA, //GOTO_W, // -
+        // NA, //JSR_W, // -
+        // };
+        // for (i = 0; i < b.length; ++i) {
+        // System.err.print((char)('E' + b[i]));
+        // }
+        // System.err.println();
+    }
+
+    // ------------------------------------------------------------------------
+    // Constructor
+    // ------------------------------------------------------------------------
+
+    /**
+     * Constructs a new {@link MethodWriter}.
+     *
+     * @param cw the class writer in which the method must be added.
+     * @param access the method's access flags (see {@link Opcodes}).
+     * @param name the method's name.
+     * @param desc the method's descriptor (see {@link Type}).
+     * @param signature the method's signature. May be <tt>null</tt>.
+     * @param exceptions the internal names of the method's exceptions. May be
+     *        <tt>null</tt>.
+     * @param computeMaxs <tt>true</tt> if the maximum stack size and number
+     *        of local variables must be automatically computed.
+     */
+    MethodWriter(
+        final ClassWriter cw,
+        final int access,
+        final String name,
+        final String desc,
+        final String signature,
+        final String[] exceptions,
+        final boolean computeMaxs)
+    {
+        if (cw.firstMethod == null) {
+            cw.firstMethod = this;
+        } else {
+            cw.lastMethod.next = this;
+        }
+        cw.lastMethod = this;
+        this.cw = cw;
+        this.access = access;
+        this.name = cw.newUTF8(name);
+        this.desc = cw.newUTF8(desc);
+        this.descriptor = desc;
+        this.signature = signature;
+        if (exceptions != null && exceptions.length > 0) {
+            exceptionCount = exceptions.length;
+            this.exceptions = new int[exceptionCount];
+            for (int i = 0; i < exceptionCount; ++i) {
+                this.exceptions[i] = cw.newClass(exceptions[i]);
+            }
+        }
+        this.computeMaxs = computeMaxs;
+        if (computeMaxs) {
+            // updates maxLocals
+            int size = getArgumentsAndReturnSizes(desc) >> 2;
+            if ((access & Opcodes.ACC_STATIC) != 0) {
+                --size;
+            }
+            maxLocals = size;
+            // pushes the first block onto the stack of blocks to be visited
+            currentBlock = new Label();
+            currentBlock.pushed = true;
+            blockStack = currentBlock;
+        }
+    }
+
+    // ------------------------------------------------------------------------
+    // Implementation of the MethodVisitor interface
+    // ------------------------------------------------------------------------
+
+    public AnnotationVisitor visitAnnotationDefault() {
+        annd = new ByteVector();
+        return new AnnotationWriter(cw, false, annd, null, 0);
+    }
+
+    public AnnotationVisitor visitAnnotation(
+        final String desc,
+        final boolean visible)
+    {
+        ByteVector bv = new ByteVector();
+        // write type, and reserve space for values count
+        bv.putShort(cw.newUTF8(desc)).putShort(0);
+        AnnotationWriter aw = new AnnotationWriter(cw, true, bv, bv, 2);
+        if (visible) {
+            aw.next = anns;
+            anns = aw;
+        } else {
+            aw.next = ianns;
+            ianns = aw;
+        }
+        return aw;
+    }
+
+    public AnnotationVisitor visitParameterAnnotation(
+        final int parameter,
+        final String desc,
+        final boolean visible)
+    {
+        ByteVector bv = new ByteVector();
+        // write type, and reserve space for values count
+        bv.putShort(cw.newUTF8(desc)).putShort(0);
+        AnnotationWriter aw = new AnnotationWriter(cw, true, bv, bv, 2);
+        if (visible) {
+            if (panns == null) {
+                panns = new AnnotationWriter[Type.getArgumentTypes(descriptor).length];
+            }
+            aw.next = panns[parameter];
+            panns[parameter] = aw;
+        } else {
+            if (ipanns == null) {
+                ipanns = new AnnotationWriter[Type.getArgumentTypes(descriptor).length];
+            }
+            aw.next = ipanns[parameter];
+            ipanns[parameter] = aw;
+        }
+        return aw;
+    }
+
+    public void visitAttribute(final Attribute attr) {
+        if (attr.isCodeAttribute()) {
+            attr.next = cattrs;
+            cattrs = attr;
+        } else {
+            attr.next = attrs;
+            attrs = attr;
+        }
+    }
+
+    public void visitCode() {
+    }
+
+    public void visitInsn(final int opcode) {
+        if (computeMaxs) {
+            // updates current and max stack sizes
+            int size = stackSize + SIZE[opcode];
+            if (size > maxStackSize) {
+                maxStackSize = size;
+            }
+            stackSize = size;
+            // if opcode == ATHROW or xRETURN, ends current block (no successor)
+            if ((opcode >= Opcodes.IRETURN && opcode <= Opcodes.RETURN)
+                    || opcode == Opcodes.ATHROW)
+            {
+                if (currentBlock != null) {
+                    currentBlock.maxStackSize = maxStackSize;
+                    currentBlock = null;
+                }
+            }
+        }
+        // adds the instruction to the bytecode of the method
+        code.putByte(opcode);
+    }
+
+    public void visitIntInsn(final int opcode, final int operand) {
+        if (computeMaxs && opcode != Opcodes.NEWARRAY) {
+            // updates current and max stack sizes only if opcode == NEWARRAY
+            // (stack size variation = 0 for BIPUSH or SIPUSH)
+            int size = stackSize + 1;
+            if (size > maxStackSize) {
+                maxStackSize = size;
+            }
+            stackSize = size;
+        }
+        // adds the instruction to the bytecode of the method
+        if (opcode == Opcodes.SIPUSH) {
+            code.put12(opcode, operand);
+        } else { // BIPUSH or NEWARRAY
+            code.put11(opcode, operand);
+        }
+    }
+
+    public void visitVarInsn(final int opcode, final int var) {
+        if (computeMaxs) {
+            // updates current and max stack sizes
+            if (opcode == Opcodes.RET) {
+                // no stack change, but end of current block (no successor)
+                if (currentBlock != null) {
+                    currentBlock.maxStackSize = maxStackSize;
+                    currentBlock = null;
+                }
+            } else { // xLOAD or xSTORE
+                int size = stackSize + SIZE[opcode];
+                if (size > maxStackSize) {
+                    maxStackSize = size;
+                }
+                stackSize = size;
+            }
+            // updates max locals
+            int n;
+            if (opcode == Opcodes.LLOAD || opcode == Opcodes.DLOAD
+                    || opcode == Opcodes.LSTORE || opcode == Opcodes.DSTORE)
+            {
+                n = var + 2;
+            } else {
+                n = var + 1;
+            }
+            if (n > maxLocals) {
+                maxLocals = n;
+            }
+        }
+        // adds the instruction to the bytecode of the method
+        if (var < 4 && opcode != Opcodes.RET) {
+            int opt;
+            if (opcode < Opcodes.ISTORE) {
+                /* ILOAD_0 */
+                opt = 26 + ((opcode - Opcodes.ILOAD) << 2) + var;
+            } else {
+                /* ISTORE_0 */
+                opt = 59 + ((opcode - Opcodes.ISTORE) << 2) + var;
+            }
+            code.putByte(opt);
+        } else if (var >= 256) {
+            code.putByte(196 /* WIDE */).put12(opcode, var);
+        } else {
+            code.put11(opcode, var);
+        }
+    }
+
+    public void visitTypeInsn(final int opcode, final String desc) {
+        if (computeMaxs && opcode == Opcodes.NEW) {
+            // updates current and max stack sizes only if opcode == NEW
+            // (stack size variation = 0 for ANEWARRAY, CHECKCAST, INSTANCEOF)
+            int size = stackSize + 1;
+            if (size > maxStackSize) {
+                maxStackSize = size;
+            }
+            stackSize = size;
+        }
+        // adds the instruction to the bytecode of the method
+        code.put12(opcode, cw.newClass(desc));
+    }
+
+    public void visitFieldInsn(
+        final int opcode,
+        final String owner,
+        final String name,
+        final String desc)
+    {
+        if (computeMaxs) {
+            int size;
+            // computes the stack size variation
+            char c = desc.charAt(0);
+            switch (opcode) {
+                case Opcodes.GETSTATIC:
+                    size = stackSize + (c == 'D' || c == 'J' ? 2 : 1);
+                    break;
+                case Opcodes.PUTSTATIC:
+                    size = stackSize + (c == 'D' || c == 'J' ? -2 : -1);
+                    break;
+                case Opcodes.GETFIELD:
+                    size = stackSize + (c == 'D' || c == 'J' ? 1 : 0);
+                    break;
+                // case Constants.PUTFIELD:
+                default:
+                    size = stackSize + (c == 'D' || c == 'J' ? -3 : -2);
+                    break;
+            }
+            // updates current and max stack sizes
+            if (size > maxStackSize) {
+                maxStackSize = size;
+            }
+            stackSize = size;
+        }
+        // adds the instruction to the bytecode of the method
+        code.put12(opcode, cw.newField(owner, name, desc));
+    }
+
+    public void visitMethodInsn(
+        final int opcode,
+        final String owner,
+        final String name,
+        final String desc)
+    {
+        boolean itf = opcode == Opcodes.INVOKEINTERFACE;
+        Item i = cw.newMethodItem(owner, name, desc, itf);
+        int argSize = i.intVal;
+        if (computeMaxs) {
+            /*
+             * computes the stack size variation. In order not to recompute
+             * several times this variation for the same Item, we use the intVal
+             * field of this item to store this variation, once it has been
+             * computed. More precisely this intVal field stores the sizes of
+             * the arguments and of the return value corresponding to desc.
+             */
+            if (argSize == 0) {
+                // the above sizes have not been computed yet, so we compute
+                // them...
+                argSize = getArgumentsAndReturnSizes(desc);
+                // ... and we save them in order not to recompute them in the
+                // future
+                i.intVal = argSize;
+            }
+            int size;
+            if (opcode == Opcodes.INVOKESTATIC) {
+                size = stackSize - (argSize >> 2) + (argSize & 0x03) + 1;
+            } else {
+                size = stackSize - (argSize >> 2) + (argSize & 0x03);
+            }
+            // updates current and max stack sizes
+            if (size > maxStackSize) {
+                maxStackSize = size;
+            }
+            stackSize = size;
+        }
+        // adds the instruction to the bytecode of the method
+        if (itf) {
+            if (!computeMaxs) {
+                if (argSize == 0) {
+                    argSize = getArgumentsAndReturnSizes(desc);
+                    i.intVal = argSize;
+                }
+            }
+            code.put12(Opcodes.INVOKEINTERFACE, i.index).put11(argSize >> 2, 0);
+        } else {
+            code.put12(opcode, i.index);
+        }
+    }
+
+    public void visitJumpInsn(final int opcode, final Label label) {
+        if (computeMaxs) {
+            if (opcode == Opcodes.GOTO) {
+                // no stack change, but end of current block (with one new
+                // successor)
+                if (currentBlock != null) {
+                    currentBlock.maxStackSize = maxStackSize;
+                    addSuccessor(stackSize, label);
+                    currentBlock = null;
+                }
+            } else if (opcode == Opcodes.JSR) {
+                if (currentBlock != null) {
+                    addSuccessor(stackSize + 1, label);
+                }
+            } else {
+                // updates current stack size (max stack size unchanged because
+                // stack size variation always negative in this case)
+                stackSize += SIZE[opcode];
+                if (currentBlock != null) {
+                    addSuccessor(stackSize, label);
+                }
+            }
+        }
+        // adds the instruction to the bytecode of the method
+        if (label.resolved && label.position - code.length < Short.MIN_VALUE) {
+            /*
+             * case of a backward jump with an offset < -32768. In this case we
+             * automatically replace GOTO with GOTO_W, JSR with JSR_W and IFxxx
+             * <l> with IFNOTxxx <l'> GOTO_W <l>, where IFNOTxxx is the
+             * "opposite" opcode of IFxxx (i.e., IFNE for IFEQ) and where <l'>
+             * designates the instruction just after the GOTO_W.
+             */
+            if (opcode == Opcodes.GOTO) {
+                code.putByte(200); // GOTO_W
+            } else if (opcode == Opcodes.JSR) {
+                code.putByte(201); // JSR_W
+            } else {
+                code.putByte(opcode <= 166
+                        ? ((opcode + 1) ^ 1) - 1
+                        : opcode ^ 1);
+                code.putShort(8); // jump offset
+                code.putByte(200); // GOTO_W
+            }
+            label.put(this, code, code.length - 1, true);
+        } else {
+            /*
+             * case of a backward jump with an offset >= -32768, or of a forward
+             * jump with, of course, an unknown offset. In these cases we store
+             * the offset in 2 bytes (which will be increased in
+             * resizeInstructions, if needed).
+             */
+            code.putByte(opcode);
+            label.put(this, code, code.length - 1, false);
+        }
+    }
+
+    public void visitLabel(final Label label) {
+        if (computeMaxs) {
+            if (currentBlock != null) {
+                // ends current block (with one new successor)
+                currentBlock.maxStackSize = maxStackSize;
+                addSuccessor(stackSize, label);
+            }
+            // begins a new current block,
+            // resets the relative current and max stack sizes
+            currentBlock = label;
+            stackSize = 0;
+            maxStackSize = 0;
+        }
+        // resolves previous forward references to label, if any
+        resize |= label.resolve(this, code.length, code.data);
+    }
+
+    public void visitLdcInsn(final Object cst) {
+        Item i = cw.newConstItem(cst);
+        if (computeMaxs) {
+            int size;
+            // computes the stack size variation
+            if (i.type == ClassWriter.LONG || i.type == ClassWriter.DOUBLE) {
+                size = stackSize + 2;
+            } else {
+                size = stackSize + 1;
+            }
+            // updates current and max stack sizes
+            if (size > maxStackSize) {
+                maxStackSize = size;
+            }
+            stackSize = size;
+        }
+        // adds the instruction to the bytecode of the method
+        int index = i.index;
+        if (i.type == ClassWriter.LONG || i.type == ClassWriter.DOUBLE) {
+            code.put12(20 /* LDC2_W */, index);
+        } else if (index >= 256) {
+            code.put12(19 /* LDC_W */, index);
+        } else {
+            code.put11(Opcodes.LDC, index);
+        }
+    }
+
+    public void visitIincInsn(final int var, final int increment) {
+        if (computeMaxs) {
+            // updates max locals only (no stack change)
+            int n = var + 1;
+            if (n > maxLocals) {
+                maxLocals = n;
+            }
+        }
+        // adds the instruction to the bytecode of the method
+        if ((var > 255) || (increment > 127) || (increment < -128)) {
+            code.putByte(196 /* WIDE */)
+                    .put12(Opcodes.IINC, var)
+                    .putShort(increment);
+        } else {
+            code.putByte(Opcodes.IINC).put11(var, increment);
+        }
+    }
+
+    public void visitTableSwitchInsn(
+        final int min,
+        final int max,
+        final Label dflt,
+        final Label labels[])
+    {
+        if (computeMaxs) {
+            // updates current stack size (max stack size unchanged)
+            --stackSize;
+            // ends current block (with many new successors)
+            if (currentBlock != null) {
+                currentBlock.maxStackSize = maxStackSize;
+                addSuccessor(stackSize, dflt);
+                for (int i = 0; i < labels.length; ++i) {
+                    addSuccessor(stackSize, labels[i]);
+                }
+                currentBlock = null;
+            }
+        }
+        // adds the instruction to the bytecode of the method
+        int source = code.length;
+        code.putByte(Opcodes.TABLESWITCH);
+        while (code.length % 4 != 0) {
+            code.putByte(0);
+        }
+        dflt.put(this, code, source, true);
+        code.putInt(min).putInt(max);
+        for (int i = 0; i < labels.length; ++i) {
+            labels[i].put(this, code, source, true);
+        }
+    }
+
+    public void visitLookupSwitchInsn(
+        final Label dflt,
+        final int keys[],
+        final Label labels[])
+    {
+        if (computeMaxs) {
+            // updates current stack size (max stack size unchanged)
+            --stackSize;
+            // ends current block (with many new successors)
+            if (currentBlock != null) {
+                currentBlock.maxStackSize = maxStackSize;
+                addSuccessor(stackSize, dflt);
+                for (int i = 0; i < labels.length; ++i) {
+                    addSuccessor(stackSize, labels[i]);
+                }
+                currentBlock = null;
+            }
+        }
+        // adds the instruction to the bytecode of the method
+        int source = code.length;
+        code.putByte(Opcodes.LOOKUPSWITCH);
+        while (code.length % 4 != 0) {
+            code.putByte(0);
+        }
+        dflt.put(this, code, source, true);
+        code.putInt(labels.length);
+        for (int i = 0; i < labels.length; ++i) {
+            code.putInt(keys[i]);
+            labels[i].put(this, code, source, true);
+        }
+    }
+
+    public void visitMultiANewArrayInsn(final String desc, final int dims) {
+        if (computeMaxs) {
+            // updates current stack size (max stack size unchanged because
+            // stack size variation always negative or null)
+            stackSize += 1 - dims;
+        }
+        // adds the instruction to the bytecode of the method
+        code.put12(Opcodes.MULTIANEWARRAY, cw.newClass(desc)).putByte(dims);
+    }
+
+    public void visitTryCatchBlock(
+        final Label start,
+        final Label end,
+        final Label handler,
+        final String type)
+    {
+        if (computeMaxs) {
+            // pushes handler block onto the stack of blocks to be visited
+            if (!handler.pushed) {
+                handler.beginStackSize = 1;
+                handler.pushed = true;
+                handler.next = blockStack;
+                blockStack = handler;
+            }
+        }
+        ++catchCount;
+        Handler h = new Handler();
+        h.start = start;
+        h.end = end;
+        h.handler = handler;
+        h.desc = type;
+        h.type = type != null ? cw.newClass(type) : 0;
+        if (lastHandler == null) {
+            catchTable = h;
+        } else {
+            lastHandler.next = h;
+        }
+        lastHandler = h;
+    }
+
+    public void visitLocalVariable(
+        final String name,
+        final String desc,
+        final String signature,
+        final Label start,
+        final Label end,
+        final int index)
+    {
+        if (signature != null) {
+            if (localVarType == null) {
+                localVarType = new ByteVector();
+            }
+            ++localVarTypeCount;
+            localVarType.putShort(start.position)
+                    .putShort(end.position - start.position)
+                    .putShort(cw.newUTF8(name))
+                    .putShort(cw.newUTF8(signature))
+                    .putShort(index);
+        }
+        if (localVar == null) {
+            localVar = new ByteVector();
+        }
+        ++localVarCount;
+        localVar.putShort(start.position)
+                .putShort(end.position - start.position)
+                .putShort(cw.newUTF8(name))
+                .putShort(cw.newUTF8(desc))
+                .putShort(index);
+    }
+
+    public void visitLineNumber(final int line, final Label start) {
+        if (lineNumber == null) {
+            lineNumber = new ByteVector();
+        }
+        ++lineNumberCount;
+        lineNumber.putShort(start.position);
+        lineNumber.putShort(line);
+    }
+
+    public void visitMaxs(final int maxStack, final int maxLocals) {
+        if (computeMaxs) {
+            // true (non relative) max stack size
+            int max = 0;
+            /*
+             * control flow analysis algorithm: while the block stack is not
+             * empty, pop a block from this stack, update the max stack size,
+             * compute the true (non relative) begin stack size of the
+             * successors of this block, and push these successors onto the
+             * stack (unless they have already been pushed onto the stack).
+             * Note: by hypothesis, the {@link Label#beginStackSize} of the
+             * blocks in the block stack are the true (non relative) beginning
+             * stack sizes of these blocks.
+             */
+            Label stack = blockStack;
+            while (stack != null) {
+                // pops a block from the stack
+                Label l = stack;
+                stack = stack.next;
+                // computes the true (non relative) max stack size of this block
+                int start = l.beginStackSize;
+                int blockMax = start + l.maxStackSize;
+                // updates the global max stack size
+                if (blockMax > max) {
+                    max = blockMax;
+                }
+                // analyses the successors of the block
+                Edge b = l.successors;
+                while (b != null) {
+                    l = b.successor;
+                    // if this successor has not already been pushed onto the
+                    // stack...
+                    if (!l.pushed) {
+                        // computes the true beginning stack size of this
+                        // successor block
+                        l.beginStackSize = start + b.stackSize;
+                        // pushes this successor onto the stack
+                        l.pushed = true;
+                        l.next = stack;
+                        stack = l;
+                    }
+                    b = b.next;
+                }
+            }
+            this.maxStack = max;
+        } else {
+            this.maxStack = maxStack;
+            this.maxLocals = maxLocals;
+        }
+    }
+
+    public void visitEnd() {
+    }
+
+    // ------------------------------------------------------------------------
+    // Utility methods: control flow analysis algorithm
+    // ------------------------------------------------------------------------
+
+    /**
+     * Computes the size of the arguments and of the return value of a method.
+     *
+     * @param desc the descriptor of a method.
+     * @return the size of the arguments of the method (plus one for the
+     *         implicit this argument), argSize, and the size of its return
+     *         value, retSize, packed into a single int i =
+     *         <tt>(argSize << 2) | retSize</tt> (argSize is therefore equal
+     *         to <tt>i >> 2</tt>, and retSize to <tt>i & 0x03</tt>).
+     */
+    private static int getArgumentsAndReturnSizes(final String desc) {
+        int n = 1;
+        int c = 1;
+        while (true) {
+            char car = desc.charAt(c++);
+            if (car == ')') {
+                car = desc.charAt(c);
+                return n << 2
+                        | (car == 'V' ? 0 : (car == 'D' || car == 'J' ? 2 : 1));
+            } else if (car == 'L') {
+                while (desc.charAt(c++) != ';') {
+                }
+                n += 1;
+            } else if (car == '[') {
+                while ((car = desc.charAt(c)) == '[') {
+                    ++c;
+                }
+                if (car == 'D' || car == 'J') {
+                    n -= 1;
+                }
+            } else if (car == 'D' || car == 'J') {
+                n += 2;
+            } else {
+                n += 1;
+            }
+        }
+    }
+
+    /**
+     * Adds a successor to the {@link #currentBlock currentBlock} block.
+     *
+     * @param stackSize the current (relative) stack size in the current block.
+     * @param successor the successor block to be added to the current block.
+     */
+    private void addSuccessor(final int stackSize, final Label successor) {
+        Edge b = new Edge();
+        // initializes the previous Edge object...
+        b.stackSize = stackSize;
+        b.successor = successor;
+        // ...and adds it to the successor list of the currentBlock block
+        b.next = currentBlock.successors;
+        currentBlock.successors = b;
+    }
+
+    // ------------------------------------------------------------------------
+    // Utility methods: dump bytecode array
+    // ------------------------------------------------------------------------
+
+    /**
+     * Returns the size of the bytecode of this method.
+     *
+     * @return the size of the bytecode of this method.
+     */
+    final int getSize() {
+        if (classReaderOffset != 0) {
+            return 6 + classReaderLength;
+        }
+        if (resize) {
+            // replaces the temporary jump opcodes introduced by Label.resolve.
+            resizeInstructions(new int[0], new int[0], 0);
+        }
+        int size = 8;
+        if (code.length > 0) {
+            cw.newUTF8("Code");
+            size += 18 + code.length + 8 * catchCount;
+            if (localVar != null) {
+                cw.newUTF8("LocalVariableTable");
+                size += 8 + localVar.length;
+            }
+            if (localVarType != null) {
+                cw.newUTF8("LocalVariableTypeTable");
+                size += 8 + localVarType.length;
+            }
+            if (lineNumber != null) {
+                cw.newUTF8("LineNumberTable");
+                size += 8 + lineNumber.length;
+            }
+            if (cattrs != null) {
+                size += cattrs.getSize(cw,
+                        code.data,
+                        code.length,
+                        maxStack,
+                        maxLocals);
+            }
+        }
+        if (exceptionCount > 0) {
+            cw.newUTF8("Exceptions");
+            size += 8 + 2 * exceptionCount;
+        }
+        if ((access & Opcodes.ACC_SYNTHETIC) != 0
+                && (cw.version & 0xffff) < Opcodes.V1_5)
+        {
+            cw.newUTF8("Synthetic");
+            size += 6;
+        }
+        if ((access & Opcodes.ACC_DEPRECATED) != 0) {
+            cw.newUTF8("Deprecated");
+            size += 6;
+        }
+        if (cw.version == Opcodes.V1_4) {
+            if ((access & Opcodes.ACC_VARARGS) != 0) {
+                cw.newUTF8("Varargs");
+                size += 6;
+            }
+            if ((access & Opcodes.ACC_BRIDGE) != 0) {
+                cw.newUTF8("Bridge");
+                size += 6;
+            }
+        }
+        if (signature != null) {
+            cw.newUTF8("Signature");
+            cw.newUTF8(signature);
+            size += 8;
+        }
+        if (annd != null) {
+            cw.newUTF8("AnnotationDefault");
+            size += 6 + annd.length;
+        }
+        if (anns != null) {
+            cw.newUTF8("RuntimeVisibleAnnotations");
+            size += 8 + anns.getSize();
+        }
+        if (ianns != null) {
+            cw.newUTF8("RuntimeInvisibleAnnotations");
+            size += 8 + ianns.getSize();
+        }
+        if (panns != null) {
+            cw.newUTF8("RuntimeVisibleParameterAnnotations");
+            size += 7 + 2 * panns.length;
+            for (int i = panns.length - 1; i >= 0; --i) {
+                size += panns[i] == null ? 0 : panns[i].getSize();
+            }
+        }
+        if (ipanns != null) {
+            cw.newUTF8("RuntimeInvisibleParameterAnnotations");
+            size += 7 + 2 * ipanns.length;
+            for (int i = ipanns.length - 1; i >= 0; --i) {
+                size += ipanns[i] == null ? 0 : ipanns[i].getSize();
+            }
+        }
+        if (attrs != null) {
+            size += attrs.getSize(cw, null, 0, -1, -1);
+        }
+        return size;
+    }
+
+    /**
+     * Puts the bytecode of this method in the given byte vector.
+     *
+     * @param out the byte vector into which the bytecode of this method must be
+     *        copied.
+     */
+    final void put(final ByteVector out) {
+        out.putShort(access).putShort(name).putShort(desc);
+        if (classReaderOffset != 0) {
+            out.putByteArray(cw.cr.b, classReaderOffset, classReaderLength);
+            return;
+        }
+        int attributeCount = 0;
+        if (code.length > 0) {
+            ++attributeCount;
+        }
+        if (exceptionCount > 0) {
+            ++attributeCount;
+        }
+        if ((access & Opcodes.ACC_SYNTHETIC) != 0
+                && (cw.version & 0xffff) < Opcodes.V1_5)
+        {
+            ++attributeCount;
+        }
+        if ((access & Opcodes.ACC_DEPRECATED) != 0) {
+            ++attributeCount;
+        }
+        if (cw.version == Opcodes.V1_4) {
+            if ((access & Opcodes.ACC_VARARGS) != 0) {
+                ++attributeCount;
+            }
+            if ((access & Opcodes.ACC_BRIDGE) != 0) {
+                ++attributeCount;
+            }
+        }
+        if (signature != null) {
+            ++attributeCount;
+        }
+        if (annd != null) {
+            ++attributeCount;
+        }
+        if (anns != null) {
+            ++attributeCount;
+        }
+        if (ianns != null) {
+            ++attributeCount;
+        }
+        if (panns != null) {
+            ++attributeCount;
+        }
+        if (ipanns != null) {
+            ++attributeCount;
+        }
+        if (attrs != null) {
+            attributeCount += attrs.getCount();
+        }
+        out.putShort(attributeCount);
+        if (code.length > 0) {
+            int size = 12 + code.length + 8 * catchCount;
+            if (localVar != null) {
+                size += 8 + localVar.length;
+            }
+            if (localVarType != null) {
+                size += 8 + localVarType.length;
+            }
+            if (lineNumber != null) {
+                size += 8 + lineNumber.length;
+            }
+            if (cattrs != null) {
+                size += cattrs.getSize(cw,
+                        code.data,
+                        code.length,
+                        maxStack,
+                        maxLocals);
+            }
+            out.putShort(cw.newUTF8("Code")).putInt(size);
+            out.putShort(maxStack).putShort(maxLocals);
+            out.putInt(code.length).putByteArray(code.data, 0, code.length);
+            out.putShort(catchCount);
+            if (catchCount > 0) {
+                Handler h = catchTable;
+                while (h != null) {
+                    out.putShort(h.start.position)
+                            .putShort(h.end.position)
+                            .putShort(h.handler.position)
+                            .putShort(h.type);
+                    h = h.next;
+                }
+            }
+            attributeCount = 0;
+            if (localVar != null) {
+                ++attributeCount;
+            }
+            if (localVarType != null) {
+                ++attributeCount;
+            }
+            if (lineNumber != null) {
+                ++attributeCount;
+            }
+            if (cattrs != null) {
+                attributeCount += cattrs.getCount();
+            }
+            out.putShort(attributeCount);
+            if (localVar != null) {
+                out.putShort(cw.newUTF8("LocalVariableTable"));
+                out.putInt(localVar.length + 2).putShort(localVarCount);
+                out.putByteArray(localVar.data, 0, localVar.length);
+            }
+            if (localVarType != null) {
+                out.putShort(cw.newUTF8("LocalVariableTypeTable"));
+                out.putInt(localVarType.length + 2).putShort(localVarTypeCount);
+                out.putByteArray(localVarType.data, 0, localVarType.length);
+            }
+            if (lineNumber != null) {
+                out.putShort(cw.newUTF8("LineNumberTable"));
+                out.putInt(lineNumber.length + 2).putShort(lineNumberCount);
+                out.putByteArray(lineNumber.data, 0, lineNumber.length);
+            }
+            if (cattrs != null) {
+                cattrs.put(cw, code.data, code.length, maxLocals, maxStack, out);
+            }
+        }
+        if (exceptionCount > 0) {
+            out.putShort(cw.newUTF8("Exceptions"))
+                    .putInt(2 * exceptionCount + 2);
+            out.putShort(exceptionCount);
+            for (int i = 0; i < exceptionCount; ++i) {
+                out.putShort(exceptions[i]);
+            }
+        }
+        if ((access & Opcodes.ACC_SYNTHETIC) != 0
+                && (cw.version & 0xffff) < Opcodes.V1_5)
+        {
+            out.putShort(cw.newUTF8("Synthetic")).putInt(0);
+        }
+        if ((access & Opcodes.ACC_DEPRECATED) != 0) {
+            out.putShort(cw.newUTF8("Deprecated")).putInt(0);
+        }
+        if (cw.version == Opcodes.V1_4) {
+            if ((access & Opcodes.ACC_VARARGS) != 0) {
+                out.putShort(cw.newUTF8("Varargs")).putInt(0);
+            }
+            if ((access & Opcodes.ACC_BRIDGE) != 0) {
+                out.putShort(cw.newUTF8("Bridge")).putInt(0);
+            }
+        }
+        if (signature != null) {
+            out.putShort(cw.newUTF8("Signature"))
+                    .putInt(2)
+                    .putShort(cw.newUTF8(signature));
+        }
+        if (annd != null) {
+            out.putShort(cw.newUTF8("AnnotationDefault"));
+            out.putInt(annd.length);
+            out.putByteArray(annd.data, 0, annd.length);
+        }
+        if (anns != null) {
+            out.putShort(cw.newUTF8("RuntimeVisibleAnnotations"));
+            anns.put(out);
+        }
+        if (ianns != null) {
+            out.putShort(cw.newUTF8("RuntimeInvisibleAnnotations"));
+            ianns.put(out);
+        }
+        if (panns != null) {
+            out.putShort(cw.newUTF8("RuntimeVisibleParameterAnnotations"));
+            AnnotationWriter.put(panns, out);
+        }
+        if (ipanns != null) {
+            out.putShort(cw.newUTF8("RuntimeInvisibleParameterAnnotations"));
+            AnnotationWriter.put(ipanns, out);
+        }
+        if (attrs != null) {
+            attrs.put(cw, null, 0, -1, -1, out);
+        }
+    }
+
+    // ------------------------------------------------------------------------
+    // Utility methods: instruction resizing (used to handle GOTO_W and JSR_W)
+    // ------------------------------------------------------------------------
+
+    /**
+     * Resizes the designated instructions, while keeping jump offsets and
+     * instruction addresses consistent. This may require to resize other
+     * existing instructions, or even to introduce new instructions: for
+     * example, increasing the size of an instruction by 2 at the middle of a
+     * method can increases the offset of an IFEQ instruction from 32766 to
+     * 32768, in which case IFEQ 32766 must be replaced with IFNEQ 8 GOTO_W
+     * 32765. This, in turn, may require to increase the size of another jump
+     * instruction, and so on... All these operations are handled automatically
+     * by this method. <p> <i>This method must be called after all the method
+     * that is being built has been visited</i>. In particular, the
+     * {@link Label Label} objects used to construct the method are no longer
+     * valid after this method has been called.
+     *
+     * @param indexes current positions of the instructions to be resized. Each
+     *        instruction must be designated by the index of its <i>last</i>
+     *        byte, plus one (or, in other words, by the index of the <i>first</i>
+     *        byte of the <i>next</i> instruction).
+     * @param sizes the number of bytes to be <i>added</i> to the above
+     *        instructions. More precisely, for each i &lt; <tt>len</tt>,
+     *        <tt>sizes</tt>[i] bytes will be added at the end of the
+     *        instruction designated by <tt>indexes</tt>[i] or, if
+     *        <tt>sizes</tt>[i] is negative, the <i>last</i> |<tt>sizes[i]</tt>|
+     *        bytes of the instruction will be removed (the instruction size
+     *        <i>must not</i> become negative or null). The gaps introduced by
+     *        this method must be filled in "manually" in {@link #code code}
+     *        method.
+     * @param len the number of instruction to be resized. Must be smaller than
+     *        or equal to <tt>indexes</tt>.length and <tt>sizes</tt>.length.
+     * @return the <tt>indexes</tt> array, which now contains the new
+     *         positions of the resized instructions (designated as above).
+     */
+    private int[] resizeInstructions(
+        final int[] indexes,
+        final int[] sizes,
+        final int len)
+    {
+        byte[] b = code.data; // bytecode of the method
+        int u, v, label; // indexes in b
+        int i, j; // loop indexes
+
+        /*
+         * 1st step: As explained above, resizing an instruction may require to
+         * resize another one, which may require to resize yet another one, and
+         * so on. The first step of the algorithm consists in finding all the
+         * instructions that need to be resized, without modifying the code.
+         * This is done by the following "fix point" algorithm:
+         *
+         * Parse the code to find the jump instructions whose offset will need
+         * more than 2 bytes to be stored (the future offset is computed from
+         * the current offset and from the number of bytes that will be inserted
+         * or removed between the source and target instructions). For each such
+         * instruction, adds an entry in (a copy of) the indexes and sizes
+         * arrays (if this has not already been done in a previous iteration!).
+         *
+         * If at least one entry has been added during the previous step, go
+         * back to the beginning, otherwise stop.
+         *
+         * In fact the real algorithm is complicated by the fact that the size
+         * of TABLESWITCH and LOOKUPSWITCH instructions depends on their
+         * position in the bytecode (because of padding). In order to ensure the
+         * convergence of the algorithm, the number of bytes to be added or
+         * removed from these instructions is over estimated during the previous
+         * loop, and computed exactly only after the loop is finished (this
+         * requires another pass to parse the bytecode of the method).
+         */
+        int[] allIndexes = new int[len]; // copy of indexes
+        int[] allSizes = new int[len]; // copy of sizes
+        boolean[] resize; // instructions to be resized
+        int newOffset; // future offset of a jump instruction
+
+        System.arraycopy(indexes, 0, allIndexes, 0, len);
+        System.arraycopy(sizes, 0, allSizes, 0, len);
+        resize = new boolean[code.length];
+
+        // 3 = loop again, 2 = loop ended, 1 = last pass, 0 = done
+        int state = 3;
+        do {
+            if (state == 3) {
+                state = 2;
+            }
+            u = 0;
+            while (u < b.length) {
+                int opcode = b[u] & 0xFF; // opcode of current instruction
+                int insert = 0; // bytes to be added after this instruction
+
+                switch (ClassWriter.TYPE[opcode]) {
+                    case ClassWriter.NOARG_INSN:
+                    case ClassWriter.IMPLVAR_INSN:
+                        u += 1;
+                        break;
+                    case ClassWriter.LABEL_INSN:
+                        if (opcode > 201) {
+                            // converts temporary opcodes 202 to 217, 218 and
+                            // 219 to IFEQ ... JSR (inclusive), IFNULL and
+                            // IFNONNULL
+                            opcode = opcode < 218 ? opcode - 49 : opcode - 20;
+                            label = u + readUnsignedShort(b, u + 1);
+                        } else {
+                            label = u + readShort(b, u + 1);
+                        }
+                        newOffset = getNewOffset(allIndexes, allSizes, u, label);
+                        if (newOffset < Short.MIN_VALUE
+                                || newOffset > Short.MAX_VALUE)
+                        {
+                            if (!resize[u]) {
+                                if (opcode == Opcodes.GOTO
+                                        || opcode == Opcodes.JSR)
+                                {
+                                    // two additional bytes will be required to
+                                    // replace this GOTO or JSR instruction with
+                                    // a GOTO_W or a JSR_W
+                                    insert = 2;
+                                } else {
+                                    // five additional bytes will be required to
+                                    // replace this IFxxx <l> instruction with
+                                    // IFNOTxxx <l'> GOTO_W <l>, where IFNOTxxx
+                                    // is the "opposite" opcode of IFxxx (i.e.,
+                                    // IFNE for IFEQ) and where <l'> designates
+                                    // the instruction just after the GOTO_W.
+                                    insert = 5;
+                                }
+                                resize[u] = true;
+                            }
+                        }
+                        u += 3;
+                        break;
+                    case ClassWriter.LABELW_INSN:
+                        u += 5;
+                        break;
+                    case ClassWriter.TABL_INSN:
+                        if (state == 1) {
+                            // true number of bytes to be added (or removed)
+                            // from this instruction = (future number of padding
+                            // bytes - current number of padding byte) -
+                            // previously over estimated variation =
+                            // = ((3 - newOffset%4) - (3 - u%4)) - u%4
+                            // = (-newOffset%4 + u%4) - u%4
+                            // = -(newOffset & 3)
+                            newOffset = getNewOffset(allIndexes, allSizes, 0, u);
+                            insert = -(newOffset & 3);
+                        } else if (!resize[u]) {
+                            // over estimation of the number of bytes to be
+                            // added to this instruction = 3 - current number
+                            // of padding bytes = 3 - (3 - u%4) = u%4 = u & 3
+                            insert = u & 3;
+                            resize[u] = true;
+                        }
+                        // skips instruction
+                        u = u + 4 - (u & 3);
+                        u += 4 * (readInt(b, u + 8) - readInt(b, u + 4) + 1) + 12;
+                        break;
+                    case ClassWriter.LOOK_INSN:
+                        if (state == 1) {
+                            // like TABL_INSN
+                            newOffset = getNewOffset(allIndexes, allSizes, 0, u);
+                            insert = -(newOffset & 3);
+                        } else if (!resize[u]) {
+                            // like TABL_INSN
+                            insert = u & 3;
+                            resize[u] = true;
+                        }
+                        // skips instruction
+                        u = u + 4 - (u & 3);
+                        u += 8 * readInt(b, u + 4) + 8;
+                        break;
+                    case ClassWriter.WIDE_INSN:
+                        opcode = b[u + 1] & 0xFF;
+                        if (opcode == Opcodes.IINC) {
+                            u += 6;
+                        } else {
+                            u += 4;
+                        }
+                        break;
+                    case ClassWriter.VAR_INSN:
+                    case ClassWriter.SBYTE_INSN:
+                    case ClassWriter.LDC_INSN:
+                        u += 2;
+                        break;
+                    case ClassWriter.SHORT_INSN:
+                    case ClassWriter.LDCW_INSN:
+                    case ClassWriter.FIELDORMETH_INSN:
+                    case ClassWriter.TYPE_INSN:
+                    case ClassWriter.IINC_INSN:
+                        u += 3;
+                        break;
+                    case ClassWriter.ITFMETH_INSN:
+                        u += 5;
+                        break;
+                    // case ClassWriter.MANA_INSN:
+                    default:
+                        u += 4;
+                        break;
+                }
+                if (insert != 0) {
+                    // adds a new (u, insert) entry in the allIndexes and
+                    // allSizes arrays
+                    int[] newIndexes = new int[allIndexes.length + 1];
+                    int[] newSizes = new int[allSizes.length + 1];
+                    System.arraycopy(allIndexes,
+                            0,
+                            newIndexes,
+                            0,
+                            allIndexes.length);
+                    System.arraycopy(allSizes, 0, newSizes, 0, allSizes.length);
+                    newIndexes[allIndexes.length] = u;
+                    newSizes[allSizes.length] = insert;
+                    allIndexes = newIndexes;
+                    allSizes = newSizes;
+                    if (insert > 0) {
+                        state = 3;
+                    }
+                }
+            }
+            if (state < 3) {
+                --state;
+            }
+        } while (state != 0);
+
+        // 2nd step:
+        // copies the bytecode of the method into a new bytevector, updates the
+        // offsets, and inserts (or removes) bytes as requested.
+
+        ByteVector newCode = new ByteVector(code.length);
+
+        u = 0;
+        while (u < code.length) {
+            for (i = allIndexes.length - 1; i >= 0; --i) {
+                if (allIndexes[i] == u) {
+                    if (i < len) {
+                        if (sizes[i] > 0) {
+                            newCode.putByteArray(null, 0, sizes[i]);
+                        } else {
+                            newCode.length += sizes[i];
+                        }
+                        indexes[i] = newCode.length;
+                    }
+                }
+            }
+            int opcode = b[u] & 0xFF;
+            switch (ClassWriter.TYPE[opcode]) {
+                case ClassWriter.NOARG_INSN:
+                case ClassWriter.IMPLVAR_INSN:
+                    newCode.putByte(opcode);
+                    u += 1;
+                    break;
+                case ClassWriter.LABEL_INSN:
+                    if (opcode > 201) {
+                        // changes temporary opcodes 202 to 217 (inclusive), 218
+                        // and 219 to IFEQ ... JSR (inclusive), IFNULL and
+                        // IFNONNULL
+                        opcode = opcode < 218 ? opcode - 49 : opcode - 20;
+                        label = u + readUnsignedShort(b, u + 1);
+                    } else {
+                        label = u + readShort(b, u + 1);
+                    }
+                    newOffset = getNewOffset(allIndexes, allSizes, u, label);
+                    if (resize[u]) {
+                        // replaces GOTO with GOTO_W, JSR with JSR_W and IFxxx
+                        // <l> with IFNOTxxx <l'> GOTO_W <l>, where IFNOTxxx is
+                        // the "opposite" opcode of IFxxx (i.e., IFNE for IFEQ)
+                        // and where <l'> designates the instruction just after
+                        // the GOTO_W.
+                        if (opcode == Opcodes.GOTO) {
+                            newCode.putByte(200); // GOTO_W
+                        } else if (opcode == Opcodes.JSR) {
+                            newCode.putByte(201); // JSR_W
+                        } else {
+                            newCode.putByte(opcode <= 166
+                                    ? ((opcode + 1) ^ 1) - 1
+                                    : opcode ^ 1);
+                            newCode.putShort(8); // jump offset
+                            newCode.putByte(200); // GOTO_W
+                            // newOffset now computed from start of GOTO_W
+                            newOffset -= 3;
+                        }
+                        newCode.putInt(newOffset);
+                    } else {
+                        newCode.putByte(opcode);
+                        newCode.putShort(newOffset);
+                    }
+                    u += 3;
+                    break;
+                case ClassWriter.LABELW_INSN:
+                    label = u + readInt(b, u + 1);
+                    newOffset = getNewOffset(allIndexes, allSizes, u, label);
+                    newCode.putByte(opcode);
+                    newCode.putInt(newOffset);
+                    u += 5;
+                    break;
+                case ClassWriter.TABL_INSN:
+                    // skips 0 to 3 padding bytes
+                    v = u;
+                    u = u + 4 - (v & 3);
+                    // reads and copies instruction
+                    newCode.putByte(Opcodes.TABLESWITCH);
+                    while (newCode.length % 4 != 0) {
+                        newCode.putByte(0);
+                    }
+                    label = v + readInt(b, u);
+                    u += 4;
+                    newOffset = getNewOffset(allIndexes, allSizes, v, label);
+                    newCode.putInt(newOffset);
+                    j = readInt(b, u);
+                    u += 4;
+                    newCode.putInt(j);
+                    j = readInt(b, u) - j + 1;
+                    u += 4;
+                    newCode.putInt(readInt(b, u - 4));
+                    for (; j > 0; --j) {
+                        label = v + readInt(b, u);
+                        u += 4;
+                        newOffset = getNewOffset(allIndexes, allSizes, v, label);
+                        newCode.putInt(newOffset);
+                    }
+                    break;
+                case ClassWriter.LOOK_INSN:
+                    // skips 0 to 3 padding bytes
+                    v = u;
+                    u = u + 4 - (v & 3);
+                    // reads and copies instruction
+                    newCode.putByte(Opcodes.LOOKUPSWITCH);
+                    while (newCode.length % 4 != 0) {
+                        newCode.putByte(0);
+                    }
+                    label = v + readInt(b, u);
+                    u += 4;
+                    newOffset = getNewOffset(allIndexes, allSizes, v, label);
+                    newCode.putInt(newOffset);
+                    j = readInt(b, u);
+                    u += 4;
+                    newCode.putInt(j);
+                    for (; j > 0; --j) {
+                        newCode.putInt(readInt(b, u));
+                        u += 4;
+                        label = v + readInt(b, u);
+                        u += 4;
+                        newOffset = getNewOffset(allIndexes, allSizes, v, label);
+                        newCode.putInt(newOffset);
+                    }
+                    break;
+                case ClassWriter.WIDE_INSN:
+                    opcode = b[u + 1] & 0xFF;
+                    if (opcode == Opcodes.IINC) {
+                        newCode.putByteArray(b, u, 6);
+                        u += 6;
+                    } else {
+                        newCode.putByteArray(b, u, 4);
+                        u += 4;
+                    }
+                    break;
+                case ClassWriter.VAR_INSN:
+                case ClassWriter.SBYTE_INSN:
+                case ClassWriter.LDC_INSN:
+                    newCode.putByteArray(b, u, 2);
+                    u += 2;
+                    break;
+                case ClassWriter.SHORT_INSN:
+                case ClassWriter.LDCW_INSN:
+                case ClassWriter.FIELDORMETH_INSN:
+                case ClassWriter.TYPE_INSN:
+                case ClassWriter.IINC_INSN:
+                    newCode.putByteArray(b, u, 3);
+                    u += 3;
+                    break;
+                case ClassWriter.ITFMETH_INSN:
+                    newCode.putByteArray(b, u, 5);
+                    u += 5;
+                    break;
+                // case MANA_INSN:
+                default:
+                    newCode.putByteArray(b, u, 4);
+                    u += 4;
+                    break;
+            }
+        }
+
+        // updates the exception handler block labels
+        Handler h = catchTable;
+        while (h != null) {
+            getNewOffset(allIndexes, allSizes, h.start);
+            getNewOffset(allIndexes, allSizes, h.end);
+            getNewOffset(allIndexes, allSizes, h.handler);
+            h = h.next;
+        }
+        for (i = 0; i < 2; ++i) {
+            ByteVector bv = i == 0 ? localVar : localVarType;
+            if (bv != null) {
+                b = bv.data;
+                u = 0;
+                while (u < bv.length) {
+                    label = readUnsignedShort(b, u);
+                    newOffset = getNewOffset(allIndexes, allSizes, 0, label);
+                    writeShort(b, u, newOffset);
+                    label += readUnsignedShort(b, u + 2);
+                    newOffset = getNewOffset(allIndexes, allSizes, 0, label)
+                            - newOffset;
+                    writeShort(b, u + 2, newOffset);
+                    u += 10;
+                }
+            }
+        }
+        if (lineNumber != null) {
+            b = lineNumber.data;
+            u = 0;
+            while (u < lineNumber.length) {
+                writeShort(b, u, getNewOffset(allIndexes,
+                        allSizes,
+                        0,
+                        readUnsignedShort(b, u)));
+                u += 4;
+            }
+        }
+        // updates the labels of the other attributes
+        while (cattrs != null) {
+            Label[] labels = cattrs.getLabels();
+            if (labels != null) {
+                for (i = labels.length - 1; i >= 0; --i) {
+                    if (!labels[i].resized) {
+                        labels[i].position = getNewOffset(allIndexes,
+                                allSizes,
+                                0,
+                                labels[i].position);
+                        labels[i].resized = true;
+                    }
+                }
+            }
+        }
+
+        // replaces old bytecodes with new ones
+        code = newCode;
+
+        // returns the positions of the resized instructions
+        return indexes;
+    }
+
+    /**
+     * Reads an unsigned short value in the given byte array.
+     *
+     * @param b a byte array.
+     * @param index the start index of the value to be read.
+     * @return the read value.
+     */
+    static int readUnsignedShort(final byte[] b, final int index) {
+        return ((b[index] & 0xFF) << 8) | (b[index + 1] & 0xFF);
+    }
+
+    /**
+     * Reads a signed short value in the given byte array.
+     *
+     * @param b a byte array.
+     * @param index the start index of the value to be read.
+     * @return the read value.
+     */
+    static short readShort(final byte[] b, final int index) {
+        return (short) (((b[index] & 0xFF) << 8) | (b[index + 1] & 0xFF));
+    }
+
+    /**
+     * Reads a signed int value in the given byte array.
+     *
+     * @param b a byte array.
+     * @param index the start index of the value to be read.
+     * @return the read value.
+     */
+    static int readInt(final byte[] b, final int index) {
+        return ((b[index] & 0xFF) << 24) | ((b[index + 1] & 0xFF) << 16)
+                | ((b[index + 2] & 0xFF) << 8) | (b[index + 3] & 0xFF);
+    }
+
+    /**
+     * Writes a short value in the given byte array.
+     *
+     * @param b a byte array.
+     * @param index where the first byte of the short value must be written.
+     * @param s the value to be written in the given byte array.
+     */
+    static void writeShort(final byte[] b, final int index, final int s) {
+        b[index] = (byte) (s >>> 8);
+        b[index + 1] = (byte) s;
+    }
+
+    /**
+     * Computes the future value of a bytecode offset. <p> Note: it is possible
+     * to have several entries for the same instruction in the <tt>indexes</tt>
+     * and <tt>sizes</tt>: two entries (index=a,size=b) and (index=a,size=b')
+     * are equivalent to a single entry (index=a,size=b+b').
+     *
+     * @param indexes current positions of the instructions to be resized. Each
+     *        instruction must be designated by the index of its <i>last</i>
+     *        byte, plus one (or, in other words, by the index of the <i>first</i>
+     *        byte of the <i>next</i> instruction).
+     * @param sizes the number of bytes to be <i>added</i> to the above
+     *        instructions. More precisely, for each i < <tt>len</tt>,
+     *        <tt>sizes</tt>[i] bytes will be added at the end of the
+     *        instruction designated by <tt>indexes</tt>[i] or, if
+     *        <tt>sizes</tt>[i] is negative, the <i>last</i> |<tt>sizes[i]</tt>|
+     *        bytes of the instruction will be removed (the instruction size
+     *        <i>must not</i> become negative or null).
+     * @param begin index of the first byte of the source instruction.
+     * @param end index of the first byte of the target instruction.
+     * @return the future value of the given bytecode offset.
+     */
+    static int getNewOffset(
+        final int[] indexes,
+        final int[] sizes,
+        final int begin,
+        final int end)
+    {
+        int offset = end - begin;
+        for (int i = 0; i < indexes.length; ++i) {
+            if (begin < indexes[i] && indexes[i] <= end) {
+                // forward jump
+                offset += sizes[i];
+            } else if (end < indexes[i] && indexes[i] <= begin) {
+                // backward jump
+                offset -= sizes[i];
+            }
+        }
+        return offset;
+    }
+
+    /**
+     * Updates the offset of the given label.
+     *
+     * @param indexes current positions of the instructions to be resized. Each
+     *        instruction must be designated by the index of its <i>last</i>
+     *        byte, plus one (or, in other words, by the index of the <i>first</i>
+     *        byte of the <i>next</i> instruction).
+     * @param sizes the number of bytes to be <i>added</i> to the above
+     *        instructions. More precisely, for each i < <tt>len</tt>,
+     *        <tt>sizes</tt>[i] bytes will be added at the end of the
+     *        instruction designated by <tt>indexes</tt>[i] or, if
+     *        <tt>sizes</tt>[i] is negative, the <i>last</i> |<tt>sizes[i]</tt>|
+     *        bytes of the instruction will be removed (the instruction size
+     *        <i>must not</i> become negative or null).
+     * @param label the label whose offset must be updated.
+     */
+    static void getNewOffset(
+        final int[] indexes,
+        final int[] sizes,
+        final Label label)
+    {
+        if (!label.resized) {
+            label.position = getNewOffset(indexes, sizes, 0, label.position);
+            label.resized = true;
+        }
+    }
+}
diff --git a/src/com/sleepycat/asm/Opcodes.java b/src/com/sleepycat/asm/Opcodes.java
new file mode 100644
index 0000000000000000000000000000000000000000..fd3e3d3357ed9dcd5a51c47fb13be7d9c75d7d96
--- /dev/null
+++ b/src/com/sleepycat/asm/Opcodes.java
@@ -0,0 +1,295 @@
+/***
+ * ASM: a very small and fast Java bytecode manipulation framework
+ * Copyright (c) 2000-2005 INRIA, France Telecom
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package com.sleepycat.asm;
+
+/**
+ * Defines the JVM opcodes, access flags and array type codes. This interface
+ * does not define all the JVM opcodes because some opcodes are automatically
+ * handled. For example, the xLOAD and xSTORE opcodes are automatically replaced
+ * by xLOAD_n and xSTORE_n opcodes when possible. The xLOAD_n and xSTORE_n
+ * opcodes are therefore not defined in this interface. Likewise for LDC,
+ * automatically replaced by LDC_W or LDC2_W when necessary, WIDE, GOTO_W and
+ * JSR_W.
+ *
+ * @author Eric Bruneton
+ * @author Eugene Kuleshov
+ */
+public interface Opcodes {
+
+    // versions
+
+    int V1_1 = 3 << 16 | 45;
+    int V1_2 = 0 << 16 | 46;
+    int V1_3 = 0 << 16 | 47;
+    int V1_4 = 0 << 16 | 48;
+    int V1_5 = 0 << 16 | 49;
+    int V1_6 = 0 << 16 | 50;
+
+    // access flags
+
+    int ACC_PUBLIC = 0x0001; // class, field, method
+    int ACC_PRIVATE = 0x0002; // class, field, method
+    int ACC_PROTECTED = 0x0004; // class, field, method
+    int ACC_STATIC = 0x0008; // field, method
+    int ACC_FINAL = 0x0010; // class, field, method
+    int ACC_SUPER = 0x0020; // class
+    int ACC_SYNCHRONIZED = 0x0020; // method
+    int ACC_VOLATILE = 0x0040; // field
+    int ACC_BRIDGE = 0x0040; // method
+    int ACC_VARARGS = 0x0080; // method
+    int ACC_TRANSIENT = 0x0080; // field
+    int ACC_NATIVE = 0x0100; // method
+    int ACC_INTERFACE = 0x0200; // class
+    int ACC_ABSTRACT = 0x0400; // class, method
+    int ACC_STRICT = 0x0800; // method
+    int ACC_SYNTHETIC = 0x1000; // class, field, method
+    int ACC_ANNOTATION = 0x2000; // class
+    int ACC_ENUM = 0x4000; // class(?) field inner
+
+    // ASM specific pseudo access flags
+
+    int ACC_DEPRECATED = 131072; // class, field, method
+
+    // types for NEWARRAY
+
+    int T_BOOLEAN = 4;
+    int T_CHAR = 5;
+    int T_FLOAT = 6;
+    int T_DOUBLE = 7;
+    int T_BYTE = 8;
+    int T_SHORT = 9;
+    int T_INT = 10;
+    int T_LONG = 11;
+
+    // opcodes // visit method (- = idem)
+
+    int NOP = 0; // visitInsn
+    int ACONST_NULL = 1; // -
+    int ICONST_M1 = 2; // -
+    int ICONST_0 = 3; // -
+    int ICONST_1 = 4; // -
+    int ICONST_2 = 5; // -
+    int ICONST_3 = 6; // -
+    int ICONST_4 = 7; // -
+    int ICONST_5 = 8; // -
+    int LCONST_0 = 9; // -
+    int LCONST_1 = 10; // -
+    int FCONST_0 = 11; // -
+    int FCONST_1 = 12; // -
+    int FCONST_2 = 13; // -
+    int DCONST_0 = 14; // -
+    int DCONST_1 = 15; // -
+    int BIPUSH = 16; // visitIntInsn
+    int SIPUSH = 17; // -
+    int LDC = 18; // visitLdcInsn
+    // int LDC_W = 19; // -
+    // int LDC2_W = 20; // -
+    int ILOAD = 21; // visitVarInsn
+    int LLOAD = 22; // -
+    int FLOAD = 23; // -
+    int DLOAD = 24; // -
+    int ALOAD = 25; // -
+    // int ILOAD_0 = 26; // -
+    // int ILOAD_1 = 27; // -
+    // int ILOAD_2 = 28; // -
+    // int ILOAD_3 = 29; // -
+    // int LLOAD_0 = 30; // -
+    // int LLOAD_1 = 31; // -
+    // int LLOAD_2 = 32; // -
+    // int LLOAD_3 = 33; // -
+    // int FLOAD_0 = 34; // -
+    // int FLOAD_1 = 35; // -
+    // int FLOAD_2 = 36; // -
+    // int FLOAD_3 = 37; // -
+    // int DLOAD_0 = 38; // -
+    // int DLOAD_1 = 39; // -
+    // int DLOAD_2 = 40; // -
+    // int DLOAD_3 = 41; // -
+    // int ALOAD_0 = 42; // -
+    // int ALOAD_1 = 43; // -
+    // int ALOAD_2 = 44; // -
+    // int ALOAD_3 = 45; // -
+    int IALOAD = 46; // visitInsn
+    int LALOAD = 47; // -
+    int FALOAD = 48; // -
+    int DALOAD = 49; // -
+    int AALOAD = 50; // -
+    int BALOAD = 51; // -
+    int CALOAD = 52; // -
+    int SALOAD = 53; // -
+    int ISTORE = 54; // visitVarInsn
+    int LSTORE = 55; // -
+    int FSTORE = 56; // -
+    int DSTORE = 57; // -
+    int ASTORE = 58; // -
+    // int ISTORE_0 = 59; // -
+    // int ISTORE_1 = 60; // -
+    // int ISTORE_2 = 61; // -
+    // int ISTORE_3 = 62; // -
+    // int LSTORE_0 = 63; // -
+    // int LSTORE_1 = 64; // -
+    // int LSTORE_2 = 65; // -
+    // int LSTORE_3 = 66; // -
+    // int FSTORE_0 = 67; // -
+    // int FSTORE_1 = 68; // -
+    // int FSTORE_2 = 69; // -
+    // int FSTORE_3 = 70; // -
+    // int DSTORE_0 = 71; // -
+    // int DSTORE_1 = 72; // -
+    // int DSTORE_2 = 73; // -
+    // int DSTORE_3 = 74; // -
+    // int ASTORE_0 = 75; // -
+    // int ASTORE_1 = 76; // -
+    // int ASTORE_2 = 77; // -
+    // int ASTORE_3 = 78; // -
+    int IASTORE = 79; // visitInsn
+    int LASTORE = 80; // -
+    int FASTORE = 81; // -
+    int DASTORE = 82; // -
+    int AASTORE = 83; // -
+    int BASTORE = 84; // -
+    int CASTORE = 85; // -
+    int SASTORE = 86; // -
+    int POP = 87; // -
+    int POP2 = 88; // -
+    int DUP = 89; // -
+    int DUP_X1 = 90; // -
+    int DUP_X2 = 91; // -
+    int DUP2 = 92; // -
+    int DUP2_X1 = 93; // -
+    int DUP2_X2 = 94; // -
+    int SWAP = 95; // -
+    int IADD = 96; // -
+    int LADD = 97; // -
+    int FADD = 98; // -
+    int DADD = 99; // -
+    int ISUB = 100; // -
+    int LSUB = 101; // -
+    int FSUB = 102; // -
+    int DSUB = 103; // -
+    int IMUL = 104; // -
+    int LMUL = 105; // -
+    int FMUL = 106; // -
+    int DMUL = 107; // -
+    int IDIV = 108; // -
+    int LDIV = 109; // -
+    int FDIV = 110; // -
+    int DDIV = 111; // -
+    int IREM = 112; // -
+    int LREM = 113; // -
+    int FREM = 114; // -
+    int DREM = 115; // -
+    int INEG = 116; // -
+    int LNEG = 117; // -
+    int FNEG = 118; // -
+    int DNEG = 119; // -
+    int ISHL = 120; // -
+    int LSHL = 121; // -
+    int ISHR = 122; // -
+    int LSHR = 123; // -
+    int IUSHR = 124; // -
+    int LUSHR = 125; // -
+    int IAND = 126; // -
+    int LAND = 127; // -
+    int IOR = 128; // -
+    int LOR = 129; // -
+    int IXOR = 130; // -
+    int LXOR = 131; // -
+    int IINC = 132; // visitIincInsn
+    int I2L = 133; // visitInsn
+    int I2F = 134; // -
+    int I2D = 135; // -
+    int L2I = 136; // -
+    int L2F = 137; // -
+    int L2D = 138; // -
+    int F2I = 139; // -
+    int F2L = 140; // -
+    int F2D = 141; // -
+    int D2I = 142; // -
+    int D2L = 143; // -
+    int D2F = 144; // -
+    int I2B = 145; // -
+    int I2C = 146; // -
+    int I2S = 147; // -
+    int LCMP = 148; // -
+    int FCMPL = 149; // -
+    int FCMPG = 150; // -
+    int DCMPL = 151; // -
+    int DCMPG = 152; // -
+    int IFEQ = 153; // visitJumpInsn
+    int IFNE = 154; // -
+    int IFLT = 155; // -
+    int IFGE = 156; // -
+    int IFGT = 157; // -
+    int IFLE = 158; // -
+    int IF_ICMPEQ = 159; // -
+    int IF_ICMPNE = 160; // -
+    int IF_ICMPLT = 161; // -
+    int IF_ICMPGE = 162; // -
+    int IF_ICMPGT = 163; // -
+    int IF_ICMPLE = 164; // -
+    int IF_ACMPEQ = 165; // -
+    int IF_ACMPNE = 166; // -
+    int GOTO = 167; // -
+    int JSR = 168; // -
+    int RET = 169; // visitVarInsn
+    int TABLESWITCH = 170; // visiTableSwitchInsn
+    int LOOKUPSWITCH = 171; // visitLookupSwitch
+    int IRETURN = 172; // visitInsn
+    int LRETURN = 173; // -
+    int FRETURN = 174; // -
+    int DRETURN = 175; // -
+    int ARETURN = 176; // -
+    int RETURN = 177; // -
+    int GETSTATIC = 178; // visitFieldInsn
+    int PUTSTATIC = 179; // -
+    int GETFIELD = 180; // -
+    int PUTFIELD = 181; // -
+    int INVOKEVIRTUAL = 182; // visitMethodInsn
+    int INVOKESPECIAL = 183; // -
+    int INVOKESTATIC = 184; // -
+    int INVOKEINTERFACE = 185; // -
+    // int UNUSED = 186; // NOT VISITED
+    int NEW = 187; // visitTypeInsn
+    int NEWARRAY = 188; // visitIntInsn
+    int ANEWARRAY = 189; // visitTypeInsn
+    int ARRAYLENGTH = 190; // visitInsn
+    int ATHROW = 191; // -
+    int CHECKCAST = 192; // visitTypeInsn
+    int INSTANCEOF = 193; // -
+    int MONITORENTER = 194; // visitInsn
+    int MONITOREXIT = 195; // -
+    // int WIDE = 196; // NOT VISITED
+    int MULTIANEWARRAY = 197; // visitMultiANewArrayInsn
+    int IFNULL = 198; // visitJumpInsn
+    int IFNONNULL = 199; // -
+    // int GOTO_W = 200; // -
+    // int JSR_W = 201; // -
+}
diff --git a/src/com/sleepycat/asm/Type.java b/src/com/sleepycat/asm/Type.java
new file mode 100644
index 0000000000000000000000000000000000000000..09c2cfc19ffb630bbf0547b5cbeb719eb11bcea9
--- /dev/null
+++ b/src/com/sleepycat/asm/Type.java
@@ -0,0 +1,760 @@
+/***
+ * ASM: a very small and fast Java bytecode manipulation framework
+ * Copyright (c) 2000-2005 INRIA, France Telecom
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package com.sleepycat.asm;
+
+import java.lang.reflect.Method;
+
+/**
+ * A Java type. This class can be used to make it easier to manipulate type and
+ * method descriptors.
+ *
+ * @author Eric Bruneton
+ * @author Chris Nokleberg
+ */
+public class Type {
+
+    /**
+     * The sort of the <tt>void</tt> type. See {@link #getSort getSort}.
+     */
+    public final static int VOID = 0;
+
+    /**
+     * The sort of the <tt>boolean</tt> type. See {@link #getSort getSort}.
+     */
+    public final static int BOOLEAN = 1;
+
+    /**
+     * The sort of the <tt>char</tt> type. See {@link #getSort getSort}.
+     */
+    public final static int CHAR = 2;
+
+    /**
+     * The sort of the <tt>byte</tt> type. See {@link #getSort getSort}.
+     */
+    public final static int BYTE = 3;
+
+    /**
+     * The sort of the <tt>short</tt> type. See {@link #getSort getSort}.
+     */
+    public final static int SHORT = 4;
+
+    /**
+     * The sort of the <tt>int</tt> type. See {@link #getSort getSort}.
+     */
+    public final static int INT = 5;
+
+    /**
+     * The sort of the <tt>float</tt> type. See {@link #getSort getSort}.
+     */
+    public final static int FLOAT = 6;
+
+    /**
+     * The sort of the <tt>long</tt> type. See {@link #getSort getSort}.
+     */
+    public final static int LONG = 7;
+
+    /**
+     * The sort of the <tt>double</tt> type. See {@link #getSort getSort}.
+     */
+    public final static int DOUBLE = 8;
+
+    /**
+     * The sort of array reference types. See {@link #getSort getSort}.
+     */
+    public final static int ARRAY = 9;
+
+    /**
+     * The sort of object reference type. See {@link #getSort getSort}.
+     */
+    public final static int OBJECT = 10;
+
+    /**
+     * The <tt>void</tt> type.
+     */
+    public final static Type VOID_TYPE = new Type(VOID);
+
+    /**
+     * The <tt>boolean</tt> type.
+     */
+    public final static Type BOOLEAN_TYPE = new Type(BOOLEAN);
+
+    /**
+     * The <tt>char</tt> type.
+     */
+    public final static Type CHAR_TYPE = new Type(CHAR);
+
+    /**
+     * The <tt>byte</tt> type.
+     */
+    public final static Type BYTE_TYPE = new Type(BYTE);
+
+    /**
+     * The <tt>short</tt> type.
+     */
+    public final static Type SHORT_TYPE = new Type(SHORT);
+
+    /**
+     * The <tt>int</tt> type.
+     */
+    public final static Type INT_TYPE = new Type(INT);
+
+    /**
+     * The <tt>float</tt> type.
+     */
+    public final static Type FLOAT_TYPE = new Type(FLOAT);
+
+    /**
+     * The <tt>long</tt> type.
+     */
+    public final static Type LONG_TYPE = new Type(LONG);
+
+    /**
+     * The <tt>double</tt> type.
+     */
+    public final static Type DOUBLE_TYPE = new Type(DOUBLE);
+
+    // ------------------------------------------------------------------------
+    // Fields
+    // ------------------------------------------------------------------------
+
+    /**
+     * The sort of this Java type.
+     */
+    private final int sort;
+
+    /**
+     * A buffer containing the descriptor of this Java type. This field is only
+     * used for reference types.
+     */
+    private char[] buf;
+
+    /**
+     * The offset of the descriptor of this Java type in {@link #buf buf}. This
+     * field is only used for reference types.
+     */
+    private int off;
+
+    /**
+     * The length of the descriptor of this Java type.
+     */
+    private int len;
+
+    // ------------------------------------------------------------------------
+    // Constructors
+    // ------------------------------------------------------------------------
+
+    /**
+     * Constructs a primitive type.
+     *
+     * @param sort the sort of the primitive type to be constructed.
+     */
+    private Type(final int sort) {
+        this.sort = sort;
+        this.len = 1;
+    }
+
+    /**
+     * Constructs a reference type.
+     *
+     * @param sort the sort of the reference type to be constructed.
+     * @param buf a buffer containing the descriptor of the previous type.
+     * @param off the offset of this descriptor in the previous buffer.
+     * @param len the length of this descriptor.
+     */
+    private Type(final int sort, final char[] buf, final int off, final int len)
+    {
+        this.sort = sort;
+        this.buf = buf;
+        this.off = off;
+        this.len = len;
+    }
+
+    /**
+     * Returns the Java type corresponding to the given type descriptor.
+     *
+     * @param typeDescriptor a type descriptor.
+     * @return the Java type corresponding to the given type descriptor.
+     */
+    public static Type getType(final String typeDescriptor) {
+        return getType(typeDescriptor.toCharArray(), 0);
+    }
+
+    /**
+     * Returns the Java type corresponding to the given class.
+     *
+     * @param c a class.
+     * @return the Java type corresponding to the given class.
+     */
+    public static Type getType(final Class c) {
+        if (c.isPrimitive()) {
+            if (c == Integer.TYPE) {
+                return INT_TYPE;
+            } else if (c == Void.TYPE) {
+                return VOID_TYPE;
+            } else if (c == Boolean.TYPE) {
+                return BOOLEAN_TYPE;
+            } else if (c == Byte.TYPE) {
+                return BYTE_TYPE;
+            } else if (c == Character.TYPE) {
+                return CHAR_TYPE;
+            } else if (c == Short.TYPE) {
+                return SHORT_TYPE;
+            } else if (c == Double.TYPE) {
+                return DOUBLE_TYPE;
+            } else if (c == Float.TYPE) {
+                return FLOAT_TYPE;
+            } else /* if (c == Long.TYPE) */{
+                return LONG_TYPE;
+            }
+        } else {
+            return getType(getDescriptor(c));
+        }
+    }
+
+    /**
+     * Returns the Java types corresponding to the argument types of the given
+     * method descriptor.
+     *
+     * @param methodDescriptor a method descriptor.
+     * @return the Java types corresponding to the argument types of the given
+     *         method descriptor.
+     */
+    public static Type[] getArgumentTypes(final String methodDescriptor) {
+        char[] buf = methodDescriptor.toCharArray();
+        int off = 1;
+        int size = 0;
+        while (true) {
+            char car = buf[off++];
+            if (car == ')') {
+                break;
+            } else if (car == 'L') {
+                while (buf[off++] != ';') {
+                }
+                ++size;
+            } else if (car != '[') {
+                ++size;
+            }
+        }
+        Type[] args = new Type[size];
+        off = 1;
+        size = 0;
+        while (buf[off] != ')') {
+            args[size] = getType(buf, off);
+            off += args[size].len;
+            size += 1;
+        }
+        return args;
+    }
+
+    /**
+     * Returns the Java types corresponding to the argument types of the given
+     * method.
+     *
+     * @param method a method.
+     * @return the Java types corresponding to the argument types of the given
+     *         method.
+     */
+    public static Type[] getArgumentTypes(final Method method) {
+        Class[] classes = method.getParameterTypes();
+        Type[] types = new Type[classes.length];
+        for (int i = classes.length - 1; i >= 0; --i) {
+            types[i] = getType(classes[i]);
+        }
+        return types;
+    }
+
+    /**
+     * Returns the Java type corresponding to the return type of the given
+     * method descriptor.
+     *
+     * @param methodDescriptor a method descriptor.
+     * @return the Java type corresponding to the return type of the given
+     *         method descriptor.
+     */
+    public static Type getReturnType(final String methodDescriptor) {
+        char[] buf = methodDescriptor.toCharArray();
+        return getType(buf, methodDescriptor.indexOf(')') + 1);
+    }
+
+    /**
+     * Returns the Java type corresponding to the return type of the given
+     * method.
+     *
+     * @param method a method.
+     * @return the Java type corresponding to the return type of the given
+     *         method.
+     */
+    public static Type getReturnType(final Method method) {
+        return getType(method.getReturnType());
+    }
+
+    /**
+     * Returns the Java type corresponding to the given type descriptor.
+     *
+     * @param buf a buffer containing a type descriptor.
+     * @param off the offset of this descriptor in the previous buffer.
+     * @return the Java type corresponding to the given type descriptor.
+     */
+    private static Type getType(final char[] buf, final int off) {
+        int len;
+        switch (buf[off]) {
+            case 'V':
+                return VOID_TYPE;
+            case 'Z':
+                return BOOLEAN_TYPE;
+            case 'C':
+                return CHAR_TYPE;
+            case 'B':
+                return BYTE_TYPE;
+            case 'S':
+                return SHORT_TYPE;
+            case 'I':
+                return INT_TYPE;
+            case 'F':
+                return FLOAT_TYPE;
+            case 'J':
+                return LONG_TYPE;
+            case 'D':
+                return DOUBLE_TYPE;
+            case '[':
+                len = 1;
+                while (buf[off + len] == '[') {
+                    ++len;
+                }
+                if (buf[off + len] == 'L') {
+                    ++len;
+                    while (buf[off + len] != ';') {
+                        ++len;
+                    }
+                }
+                return new Type(ARRAY, buf, off, len + 1);
+            // case 'L':
+            default:
+                len = 1;
+                while (buf[off + len] != ';') {
+                    ++len;
+                }
+                return new Type(OBJECT, buf, off, len + 1);
+        }
+    }
+
+    // ------------------------------------------------------------------------
+    // Accessors
+    // ------------------------------------------------------------------------
+
+    /**
+     * Returns the sort of this Java type.
+     *
+     * @return {@link #VOID VOID}, {@link #BOOLEAN BOOLEAN},
+     *         {@link #CHAR CHAR}, {@link #BYTE BYTE}, {@link #SHORT SHORT},
+     *         {@link #INT INT}, {@link #FLOAT FLOAT}, {@link #LONG LONG},
+     *         {@link #DOUBLE DOUBLE}, {@link #ARRAY ARRAY} or
+     *         {@link #OBJECT OBJECT}.
+     */
+    public int getSort() {
+        return sort;
+    }
+
+    /**
+     * Returns the number of dimensions of this array type. This method should
+     * only be used for an array type.
+     *
+     * @return the number of dimensions of this array type.
+     */
+    public int getDimensions() {
+        int i = 1;
+        while (buf[off + i] == '[') {
+            ++i;
+        }
+        return i;
+    }
+
+    /**
+     * Returns the type of the elements of this array type. This method should
+     * only be used for an array type.
+     *
+     * @return Returns the type of the elements of this array type.
+     */
+    public Type getElementType() {
+        return getType(buf, off + getDimensions());
+    }
+
+    /**
+     * Returns the name of the class corresponding to this type.
+     *
+     * @return the fully qualified name of the class corresponding to this type.
+     */
+    public String getClassName() {
+        switch (sort) {
+            case VOID:
+                return "void";
+            case BOOLEAN:
+                return "boolean";
+            case CHAR:
+                return "char";
+            case BYTE:
+                return "byte";
+            case SHORT:
+                return "short";
+            case INT:
+                return "int";
+            case FLOAT:
+                return "float";
+            case LONG:
+                return "long";
+            case DOUBLE:
+                return "double";
+            case ARRAY:
+                StringBuffer b = new StringBuffer(getElementType().getClassName());
+                for (int i = getDimensions(); i > 0; --i) {
+                    b.append("[]");
+                }
+                return b.toString();
+            // case OBJECT:
+            default:
+                return new String(buf, off + 1, len - 2).replace('/', '.');
+        }
+    }
+
+    /**
+     * Returns the internal name of the class corresponding to this object type.
+     * The internal name of a class is its fully qualified name, where '.' are
+     * replaced by '/'. This method should only be used for an object type.
+     *
+     * @return the internal name of the class corresponding to this object type.
+     */
+    public String getInternalName() {
+        return new String(buf, off + 1, len - 2);
+    }
+
+    // ------------------------------------------------------------------------
+    // Conversion to type descriptors
+    // ------------------------------------------------------------------------
+
+    /**
+     * Returns the descriptor corresponding to this Java type.
+     *
+     * @return the descriptor corresponding to this Java type.
+     */
+    public String getDescriptor() {
+        StringBuffer buf = new StringBuffer();
+        getDescriptor(buf);
+        return buf.toString();
+    }
+
+    /**
+     * Returns the descriptor corresponding to the given argument and return
+     * types.
+     *
+     * @param returnType the return type of the method.
+     * @param argumentTypes the argument types of the method.
+     * @return the descriptor corresponding to the given argument and return
+     *         types.
+     */
+    public static String getMethodDescriptor(
+        final Type returnType,
+        final Type[] argumentTypes)
+    {
+        StringBuffer buf = new StringBuffer();
+        buf.append('(');
+        for (int i = 0; i < argumentTypes.length; ++i) {
+            argumentTypes[i].getDescriptor(buf);
+        }
+        buf.append(')');
+        returnType.getDescriptor(buf);
+        return buf.toString();
+    }
+
+    /**
+     * Appends the descriptor corresponding to this Java type to the given
+     * string buffer.
+     *
+     * @param buf the string buffer to which the descriptor must be appended.
+     */
+    private void getDescriptor(final StringBuffer buf) {
+        switch (sort) {
+            case VOID:
+                buf.append('V');
+                return;
+            case BOOLEAN:
+                buf.append('Z');
+                return;
+            case CHAR:
+                buf.append('C');
+                return;
+            case BYTE:
+                buf.append('B');
+                return;
+            case SHORT:
+                buf.append('S');
+                return;
+            case INT:
+                buf.append('I');
+                return;
+            case FLOAT:
+                buf.append('F');
+                return;
+            case LONG:
+                buf.append('J');
+                return;
+            case DOUBLE:
+                buf.append('D');
+                return;
+            // case ARRAY:
+            // case OBJECT:
+            default:
+                buf.append(this.buf, off, len);
+        }
+    }
+
+    // ------------------------------------------------------------------------
+    // Direct conversion from classes to type descriptors,
+    // without intermediate Type objects
+    // ------------------------------------------------------------------------
+
+    /**
+     * Returns the internal name of the given class. The internal name of a
+     * class is its fully qualified name, where '.' are replaced by '/'.
+     *
+     * @param c an object class.
+     * @return the internal name of the given class.
+     */
+    public static String getInternalName(final Class c) {
+        return c.getName().replace('.', '/');
+    }
+
+    /**
+     * Returns the descriptor corresponding to the given Java type.
+     *
+     * @param c an object class, a primitive class or an array class.
+     * @return the descriptor corresponding to the given class.
+     */
+    public static String getDescriptor(final Class c) {
+        StringBuffer buf = new StringBuffer();
+        getDescriptor(buf, c);
+        return buf.toString();
+    }
+
+    /**
+     * Returns the descriptor corresponding to the given method.
+     *
+     * @param m a {@link Method Method} object.
+     * @return the descriptor of the given method.
+     */
+    public static String getMethodDescriptor(final Method m) {
+        Class[] parameters = m.getParameterTypes();
+        StringBuffer buf = new StringBuffer();
+        buf.append('(');
+        for (int i = 0; i < parameters.length; ++i) {
+            getDescriptor(buf, parameters[i]);
+        }
+        buf.append(')');
+        getDescriptor(buf, m.getReturnType());
+        return buf.toString();
+    }
+
+    /**
+     * Appends the descriptor of the given class to the given string buffer.
+     *
+     * @param buf the string buffer to which the descriptor must be appended.
+     * @param c the class whose descriptor must be computed.
+     */
+    private static void getDescriptor(final StringBuffer buf, final Class c) {
+        Class d = c;
+        while (true) {
+            if (d.isPrimitive()) {
+                char car;
+                if (d == Integer.TYPE) {
+                    car = 'I';
+                } else if (d == Void.TYPE) {
+                    car = 'V';
+                } else if (d == Boolean.TYPE) {
+                    car = 'Z';
+                } else if (d == Byte.TYPE) {
+                    car = 'B';
+                } else if (d == Character.TYPE) {
+                    car = 'C';
+                } else if (d == Short.TYPE) {
+                    car = 'S';
+                } else if (d == Double.TYPE) {
+                    car = 'D';
+                } else if (d == Float.TYPE) {
+                    car = 'F';
+                } else /* if (d == Long.TYPE) */{
+                    car = 'J';
+                }
+                buf.append(car);
+                return;
+            } else if (d.isArray()) {
+                buf.append('[');
+                d = d.getComponentType();
+            } else {
+                buf.append('L');
+                String name = d.getName();
+                int len = name.length();
+                for (int i = 0; i < len; ++i) {
+                    char car = name.charAt(i);
+                    buf.append(car == '.' ? '/' : car);
+                }
+                buf.append(';');
+                return;
+            }
+        }
+    }
+
+    // ------------------------------------------------------------------------
+    // Corresponding size and opcodes
+    // ------------------------------------------------------------------------
+
+    /**
+     * Returns the size of values of this type.
+     *
+     * @return the size of values of this type, i.e., 2 for <tt>long</tt> and
+     *         <tt>double</tt>, and 1 otherwise.
+     */
+    public int getSize() {
+        return (sort == LONG || sort == DOUBLE ? 2 : 1);
+    }
+
+    /**
+     * Returns a JVM instruction opcode adapted to this Java type.
+     *
+     * @param opcode a JVM instruction opcode. This opcode must be one of ILOAD,
+     *        ISTORE, IALOAD, IASTORE, IADD, ISUB, IMUL, IDIV, IREM, INEG, ISHL,
+     *        ISHR, IUSHR, IAND, IOR, IXOR and IRETURN.
+     * @return an opcode that is similar to the given opcode, but adapted to
+     *         this Java type. For example, if this type is <tt>float</tt> and
+     *         <tt>opcode</tt> is IRETURN, this method returns FRETURN.
+     */
+    public int getOpcode(final int opcode) {
+        if (opcode == Opcodes.IALOAD || opcode == Opcodes.IASTORE) {
+            switch (sort) {
+                case BOOLEAN:
+                case BYTE:
+                    return opcode + 5;
+                case CHAR:
+                    return opcode + 6;
+                case SHORT:
+                    return opcode + 7;
+                case INT:
+                    return opcode;
+                case FLOAT:
+                    return opcode + 2;
+                case LONG:
+                    return opcode + 1;
+                case DOUBLE:
+                    return opcode + 3;
+                // case ARRAY:
+                // case OBJECT:
+                default:
+                    return opcode + 4;
+            }
+        } else {
+            switch (sort) {
+                case VOID:
+                    return opcode + 5;
+                case BOOLEAN:
+                case CHAR:
+                case BYTE:
+                case SHORT:
+                case INT:
+                    return opcode;
+                case FLOAT:
+                    return opcode + 2;
+                case LONG:
+                    return opcode + 1;
+                case DOUBLE:
+                    return opcode + 3;
+                // case ARRAY:
+                // case OBJECT:
+                default:
+                    return opcode + 4;
+            }
+        }
+    }
+
+    // ------------------------------------------------------------------------
+    // Equals, hashCode and toString
+    // ------------------------------------------------------------------------
+
+    /**
+     * Tests if the given object is equal to this type.
+     *
+     * @param o the object to be compared to this type.
+     * @return <tt>true</tt> if the given object is equal to this type.
+     */
+    public boolean equals(final Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || !(o instanceof Type)) {
+            return false;
+        }
+        Type t = (Type) o;
+        if (sort != t.sort) {
+            return false;
+        }
+        if (sort == Type.OBJECT || sort == Type.ARRAY) {
+            if (len != t.len) {
+                return false;
+            }
+            for (int i = off, j = t.off, end = i + len; i < end; i++, j++) {
+                if (buf[i] != t.buf[j]) {
+                    return false;
+                }
+            }
+        }
+        return true;
+    }
+
+    /**
+     * Returns a hash code value for this type.
+     *
+     * @return a hash code value for this type.
+     */
+    public int hashCode() {
+        int hc = 13 * sort;
+        if (sort == Type.OBJECT || sort == Type.ARRAY) {
+            for (int i = off, end = i + len; i < end; i++) {
+                hc = 17 * (hc + buf[i]);
+            }
+        }
+        return hc;
+    }
+
+    /**
+     * Returns a string representation of this type.
+     *
+     * @return the descriptor of this type.
+     */
+    public String toString() {
+        return getDescriptor();
+    }
+}
diff --git a/src/com/sleepycat/bind/ByteArrayBinding.java b/src/com/sleepycat/bind/ByteArrayBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..c666ae42cb3f8d5690700d8d62bae42940f38e14
--- /dev/null
+++ b/src/com/sleepycat/bind/ByteArrayBinding.java
@@ -0,0 +1,51 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: ByteArrayBinding.java,v 1.27 2008/05/27 15:30:32 mark Exp $
+ */
+
+package com.sleepycat.bind;
+
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * A pass-through <code>EntryBinding</code> that uses the entry's byte array as
+ * the key or data object.
+ *
+ * @author Mark Hayes
+ */
+public class ByteArrayBinding implements EntryBinding<byte[]> {
+
+    /*
+     * We can return the same byte[] for 0 length arrays.
+     */
+    private static byte[] ZERO_LENGTH_BYTE_ARRAY = new byte[0];
+
+    /**
+     * Creates a byte array binding.
+     */
+    public ByteArrayBinding() {
+    }
+
+    // javadoc is inherited
+    public byte[] entryToObject(DatabaseEntry entry) {
+
+	int len = entry.getSize();
+	if (len == 0) {
+	    return ZERO_LENGTH_BYTE_ARRAY;
+	} else {
+	    byte[] bytes = new byte[len];
+	    System.arraycopy(entry.getData(), entry.getOffset(),
+			     bytes, 0, bytes.length);
+	    return bytes;
+	}
+    }
+
+    // javadoc is inherited
+    public void objectToEntry(byte[] object, DatabaseEntry entry) {
+
+        entry.setData(object, 0, object.length);
+    }
+}
diff --git a/src/com/sleepycat/bind/EntityBinding.java b/src/com/sleepycat/bind/EntityBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..e2e2331d38b67649c587872d7455771325dfe7cd
--- /dev/null
+++ b/src/com/sleepycat/bind/EntityBinding.java
@@ -0,0 +1,54 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: EntityBinding.java,v 1.25 2008/05/27 15:30:32 mark Exp $
+ */
+
+package com.sleepycat.bind;
+
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * A binding between a key-value entry pair and an entity object.
+ *
+ * <p><em>WARNING:</em> Binding instances are typically shared by multiple
+ * threads and binding methods are called without any special synchronization.
+ * Therefore, bindings must be thread safe.  In general no shared state should
+ * be used and any caching of computed values must be done with proper
+ * synchronization.</p>
+ *
+ * @author Mark Hayes
+ */
+public interface EntityBinding<E> {
+
+    /**
+     * Converts key and data entry buffers into an entity Object.
+     *
+     * @param key is the source key entry.
+     *
+     * @param data is the source data entry.
+     *
+     * @return the resulting Object.
+     */
+    E entryToObject(DatabaseEntry key, DatabaseEntry data);
+
+    /**
+     * Extracts the key entry from an entity Object.
+     *
+     * @param object is the source Object.
+     *
+     * @param key is the destination entry buffer.
+     */
+    void objectToKey(E object, DatabaseEntry key);
+
+    /**
+     * Extracts the data entry from an entity Object.
+     *
+     * @param object is the source Object.
+     *
+     * @param data is the destination entry buffer.
+     */
+    void objectToData(E object, DatabaseEntry data);
+}
diff --git a/src/com/sleepycat/bind/EntryBinding.java b/src/com/sleepycat/bind/EntryBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..e3790198aa935e224fbc2a9eadd848ea9353c772
--- /dev/null
+++ b/src/com/sleepycat/bind/EntryBinding.java
@@ -0,0 +1,43 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: EntryBinding.java,v 1.24 2008/05/27 15:30:32 mark Exp $
+ */
+
+package com.sleepycat.bind;
+
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * A binding between a key or data entry and a key or data object.
+ *
+ * <p><em>WARNING:</em> Binding instances are typically shared by multiple
+ * threads and binding methods are called without any special synchronization.
+ * Therefore, bindings must be thread safe.  In general no shared state should
+ * be used and any caching of computed values must be done with proper
+ * synchronization.</p>
+ *
+ * @author Mark Hayes
+ */
+public interface EntryBinding<E> {
+
+    /**
+     * Converts a entry buffer into an Object.
+     *
+     * @param entry is the source entry buffer.
+     *
+     * @return the resulting Object.
+     */
+    E entryToObject(DatabaseEntry entry);
+
+    /**
+     * Converts an Object into a entry buffer.
+     *
+     * @param object is the source Object.
+     *
+     * @param entry is the destination entry buffer.
+     */
+    void objectToEntry(E object, DatabaseEntry entry);
+}
diff --git a/src/com/sleepycat/bind/RecordNumberBinding.java b/src/com/sleepycat/bind/RecordNumberBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..8ea9c661c0abdaec9c0973fd8ba8afdabcd56358
--- /dev/null
+++ b/src/com/sleepycat/bind/RecordNumberBinding.java
@@ -0,0 +1,72 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: RecordNumberBinding.java,v 1.34 2008/05/27 15:30:32 mark Exp $
+ */
+
+package com.sleepycat.bind;
+
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * <!-- begin JE only -->
+ * @hidden
+ * <!-- end JE only -->
+ * An <code>EntryBinding</code> that treats a record number key entry as a
+ * <code>Long</code> key object.
+ *
+ * <p>Record numbers are returned as <code>Long</code> objects, although on
+ * input any <code>Number</code> object may be used.</p>
+ *
+ * @author Mark Hayes
+ */
+public class RecordNumberBinding implements EntryBinding<Long> {
+
+    /**
+     * Creates a byte array binding.
+     */
+    public RecordNumberBinding() {
+    }
+
+    // javadoc is inherited
+    public Long entryToObject(DatabaseEntry entry) {
+
+        return Long.valueOf(entryToRecordNumber(entry));
+    }
+
+    // javadoc is inherited
+    public void objectToEntry(Long object, DatabaseEntry entry) {
+
+        recordNumberToEntry(object, entry);
+    }
+
+    /**
+     * Utility method for use by bindings to translate a entry buffer to an
+     * record number integer.
+     *
+     * @param entry the entry buffer.
+     *
+     * @return the record number.
+     */
+    public static long entryToRecordNumber(DatabaseEntry entry) {
+
+        return DbCompat.getRecordNumber(entry) & 0xFFFFFFFFL;
+    }
+
+    /**
+     * Utility method for use by bindings to translate a record number integer
+     * to a entry buffer.
+     *
+     * @param recordNumber the record number.
+     *
+     * @param entry the entry buffer to hold the record number.
+     */
+    public static void recordNumberToEntry(long recordNumber,
+                                           DatabaseEntry entry) {
+        entry.setData(new byte[4], 0, 4);
+        DbCompat.setRecordNumber(entry, (int) recordNumber);
+    }
+}
diff --git a/src/com/sleepycat/bind/package.html b/src/com/sleepycat/bind/package.html
new file mode 100644
index 0000000000000000000000000000000000000000..b750ad67973a3285ee20f1e67cdf4ced8fdf86d3
--- /dev/null
+++ b/src/com/sleepycat/bind/package.html
@@ -0,0 +1,10 @@
+<!-- $Id: package.html,v 1.19 2008/02/05 23:28:18 mark Exp $ -->
+<html>
+<body>
+Bindings between database entries and Java objects.
+<!-- begin JE only -->
+@see <a href="{@docRoot}/../GettingStartedGuide/bindAPI.html"
+        target="_top">[Getting Started Guide]</a>
+<!-- end JE only -->
+</body>
+</html>
diff --git a/src/com/sleepycat/bind/serial/ClassCatalog.java b/src/com/sleepycat/bind/serial/ClassCatalog.java
new file mode 100644
index 0000000000000000000000000000000000000000..98754837d9342408a1c5b25f352bac8b80ecf195
--- /dev/null
+++ b/src/com/sleepycat/bind/serial/ClassCatalog.java
@@ -0,0 +1,73 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: ClassCatalog.java,v 1.21 2008/04/07 22:28:58 mark Exp $
+ */
+
+package com.sleepycat.bind.serial;
+
+import java.io.ObjectStreamClass;
+
+import com.sleepycat.je.DatabaseException;
+
+/**
+ * A catalog of class description information for use during object
+ * serialization.
+ *
+ * <p>A catalog is used to store class descriptions separately from serialized
+ * objects, to avoid redundantly stored information with each object.
+ * When serialized objects are stored in a database, a {@link
+ * StoredClassCatalog} should be used.</p>
+ *
+ * <p>This information is used for serialization of class descriptors or
+ * java.io.ObjectStreamClass objects, each of which represents a unique class
+ * format.  For each unique format, a unique class ID is assigned by the
+ * catalog.  The class ID can then be used in the serialization stream in place
+ * of the full class information.  When used with {@link SerialInput} and
+ * {@link SerialOutput} or any of the serial bindings, the use of the catalog
+ * is transparent to the application.</p>
+ *
+ * @see <a href="SerialBinding.html#evolution">Class Evolution</a>
+ *
+ * @author Mark Hayes
+ */
+public interface ClassCatalog {
+
+    /**
+     * Close a catalog database and release any cached resources.
+     */
+    public void close()
+        throws DatabaseException;
+
+    /**
+     * Return the class ID for the current version of the given class
+     * description.
+     * This is used for storing in serialization streams in place of a full
+     * class descriptor, since it is much more compact.  To get back the
+     * ObjectStreamClass for a class ID, call {@link #getClassFormat(byte[])}.
+     * This function causes a new class ID to be assigned if the class
+     * description has changed.
+     *
+     * @param classDesc The class description for which to return the
+     * class ID.
+     *
+     * @return The class ID for the current version of the class.
+     */
+    public byte[] getClassID(ObjectStreamClass classDesc)
+        throws DatabaseException, ClassNotFoundException;
+
+    /**
+     * Return the ObjectStreamClass for the given class ID.  This may or may
+     * not be the current class format, depending on whether the class has
+     * changed since the class ID was generated.
+     *
+     * @param classID The class ID for which to return the class format.
+     *
+     * @return The class format for the given class ID, which may or may not
+     * represent the current version of the class.
+     */
+    public ObjectStreamClass getClassFormat(byte[] classID)
+        throws DatabaseException, ClassNotFoundException;
+}
diff --git a/src/com/sleepycat/bind/serial/SerialBase.java b/src/com/sleepycat/bind/serial/SerialBase.java
new file mode 100644
index 0000000000000000000000000000000000000000..adba17e4234f16d3b6ec145d5ddb763896abbbb4
--- /dev/null
+++ b/src/com/sleepycat/bind/serial/SerialBase.java
@@ -0,0 +1,96 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: SerialBase.java,v 1.8 2008/04/07 22:28:58 mark Exp $
+ */
+
+package com.sleepycat.bind.serial;
+
+import com.sleepycat.util.FastOutputStream;
+
+/**
+ * A base class for serial bindings creators that provides control over the
+ * allocation of the output buffer.
+ *
+ * <p>Serial bindings append data to a {@link FastOutputStream} instance.  This
+ * object has a byte array buffer that is resized when it is full.  The
+ * reallocation of this buffer can be a performance factor for some
+ * applications using large objects.  To manage this issue, the {@link
+ * #setSerialBufferSize} method may be used to control the initial size of the
+ * buffer, and the {@link #getSerialOutput} method may be overridden by
+ * subclasses to take over creation of the FastOutputStream object.</p>
+ *
+ * @see <a href="SerialBinding.html#evolution">Class Evolution</a>
+ *
+ * @author Mark Hayes
+ */
+public class SerialBase {
+
+    private int outputBufferSize;
+
+    /**
+     * Initializes the initial output buffer size to zero.
+     *
+     * <p>Unless {@link #setSerialBufferSize} is called, the default {@link
+     * FastOutputStream#DEFAULT_INIT_SIZE} size will be used.</p>
+     */
+    public SerialBase() {
+        outputBufferSize = 0;
+    }
+
+    /**
+     * Sets the initial byte size of the output buffer that is allocated by the
+     * default implementation of {@link #getSerialOutput}.
+     *
+     * <p>If this property is zero (the default), the default {@link
+     * FastOutputStream#DEFAULT_INIT_SIZE} size is used.</p>
+     *
+     * @param byteSize the initial byte size of the output buffer, or zero to
+     * use the default size.
+     */
+    public void setSerialBufferSize(int byteSize) {
+        outputBufferSize = byteSize;
+    }
+
+    /**
+     * Returns the initial byte size of the output buffer.
+     *
+     * @return the initial byte size of the output buffer.
+     *
+     * @see #setSerialBufferSize
+     */
+    public int getSerialBufferSize() {
+        return outputBufferSize;
+    }
+
+    /**
+     * Returns an empty SerialOutput instance that will be used by the serial
+     * binding or key creator.
+     *
+     * <p>The default implementation of this method creates a new SerialOutput
+     * with an initial buffer size that can be changed using the {@link
+     * #setSerialBufferSize} method.</p>
+     *
+     * <p>This method may be overridden to return a FastOutputStream instance.
+     * For example, an instance per thread could be created and returned by
+     * this method.  If a FastOutputStream instance is reused, be sure to call
+     * its {@link FastOutputStream#reset} method before each use.</p>
+     *
+     * @param object is the object to be written to the serial output, and may
+     * be used by subclasses to determine the size of the output buffer.
+     *
+     * @return an empty FastOutputStream instance.
+     *
+     * @see #setSerialBufferSize
+     */
+    protected FastOutputStream getSerialOutput(Object object) {
+        int byteSize = getSerialBufferSize();
+        if (byteSize != 0) {
+            return new FastOutputStream(byteSize);
+        } else {
+            return new FastOutputStream();
+        }
+    }
+}
diff --git a/src/com/sleepycat/bind/serial/SerialBinding.java b/src/com/sleepycat/bind/serial/SerialBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..c97d08ec07e3d47dddeecdb7104f2a84e00df4b3
--- /dev/null
+++ b/src/com/sleepycat/bind/serial/SerialBinding.java
@@ -0,0 +1,180 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: SerialBinding.java,v 1.35.2.1 2008/10/09 14:46:49 mark Exp $
+ */
+
+package com.sleepycat.bind.serial;
+
+import java.io.IOException;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.util.FastInputStream;
+import com.sleepycat.util.FastOutputStream;
+import com.sleepycat.util.RuntimeExceptionWrapper;
+
+/**
+ * A concrete <code>EntryBinding</code> that treats a key or data entry as
+ * a serialized object.
+ *
+ * <p>This binding stores objects in serialized object format.  The
+ * deserialized objects are returned by the binding, and their
+ * <code>Class</code> must implement the <code>Serializable</code>
+ * interface.</p>
+ *
+ * <p>For key bindings, a tuple binding is usually a better choice than a
+ * serial binding.  A tuple binding gives a reasonable sort order, and works
+ * with comparators in all cases -- see below.</p>
+ *
+ * <p><em>WARNING:</em> SerialBinding should not be used with Berkeley DB Java
+ * Edition for key bindings, when a custom comparator is used.  In JE,
+ * comparators are instantiated and called internally at times when databases
+ * are not accessible.  Because serial bindings depend on the class catalog
+ * database, a serial binding cannot be used during these times.  An attempt
+ * to use a serial binding with a custom comparator will result in a
+ * NullPointerException during environment open or close.</p>
+ *
+ * <p><a name="evolution"><strong>Class Evolution</strong></a></p>
+ *
+ * <p>{@code SerialBinding} and other classes in this package use standard Java
+ * serialization and all rules of Java serialization apply.  This includes the
+ * rules for class evolution.  Once an instance of a class is stored, the class
+ * must maintain its {@code serialVersionUID} and follow the rules defined in
+ * the Java specification.  To use a new incompatible version of a class, a
+ * different {@link ClassCatalog} must be used or the class catalog database
+ * must be truncated.</p>
+ *
+ * <p>If more advanced class evolution features are required, consider using
+ * the {@link com.sleepycat.persist.evolve Direct Persistence Layer}.</p>
+ *
+ * @author Mark Hayes
+ */
+public class SerialBinding<E> extends SerialBase implements EntryBinding<E> {
+
+    private ClassCatalog classCatalog;
+    private Class<E> baseClass;
+
+    /**
+     * Creates a serial binding.
+     *
+     * @param classCatalog is the catalog to hold shared class information and
+     * for a database should be a {@link StoredClassCatalog}.
+     *
+     * @param baseClass is the base class for serialized objects stored using
+     * this binding -- all objects using this binding must be an instance of
+     * this class.
+     */
+    public SerialBinding(ClassCatalog classCatalog, Class<E> baseClass) {
+
+        if (classCatalog == null) {
+            throw new NullPointerException("classCatalog must be non-null");
+        }
+        this.classCatalog = classCatalog;
+        this.baseClass = baseClass;
+    }
+
+    /**
+     * Returns the base class for this binding.
+     *
+     * @return the base class for this binding.
+     */
+    public final Class<E> getBaseClass() {
+
+        return baseClass;
+    }
+
+    /**
+     * Returns the class loader to be used during deserialization, or null if
+     * a default class loader should be used.  The default implementation of
+     * this method returns
+     * <code>Thread.currentThread().getContextClassLoader()</code> to use the
+     * context class loader for the current thread.
+     *
+     * <p>This method may be overridden to return a dynamically determined class
+     * loader.  For example, <code>getBaseClass().getClassLoader()</code> could
+     * be called to use the class loader for the base class, assuming that a
+     * base class has been specified.</p>
+     *
+     * <p>If this method returns null, a default class loader will be used as
+     * determined by the <code>java.io.ObjectInputStream.resolveClass</code>
+     * method.</p>
+     */
+    public ClassLoader getClassLoader() {
+
+        return Thread.currentThread().getContextClassLoader();
+    }
+
+    /**
+     * Deserialize an object from an entry buffer.  May only be called for data
+     * that was serialized using {@link #objectToEntry}, since the fixed
+     * serialization header is assumed to not be included in the input data.
+     * {@link SerialInput} is used to deserialize the object.
+     *
+     * @param entry is the input serialized entry.
+     *
+     * @return the output deserialized object.
+     */
+    public E entryToObject(DatabaseEntry entry) {
+
+        int length = entry.getSize();
+        byte[] hdr = SerialOutput.getStreamHeader();
+        byte[] bufWithHeader = new byte[length + hdr.length];
+
+        System.arraycopy(hdr, 0, bufWithHeader, 0, hdr.length);
+        System.arraycopy(entry.getData(), entry.getOffset(),
+                         bufWithHeader, hdr.length, length);
+
+        try {
+            SerialInput jin = new SerialInput(
+                new FastInputStream(bufWithHeader, 0, bufWithHeader.length),
+                classCatalog,
+                getClassLoader());
+            return (E) jin.readObject();
+        } catch (IOException e) {
+            throw new RuntimeExceptionWrapper(e);
+        } catch (ClassNotFoundException e) {
+            throw new RuntimeExceptionWrapper(e);
+        }
+    }
+
+    /**
+     * Serialize an object into an entry buffer.  The fixed serialization
+     * header is not included in the output data to save space, and therefore
+     * to deserialize the data the complementary {@link #entryToObject} method
+     * must be used.  {@link SerialOutput} is used to serialize the object.
+     *
+     * <p>Note that this method sets the DatabaseEntry offset property to a
+     * non-zero value and the size property to a value less than the length of
+     * the byte array.</p>
+     *
+     * @param object is the input deserialized object.
+     *
+     * @param entry is the output serialized entry.
+     *
+     * @throws IllegalArgumentException if the object is not an instance of the
+     * base class for this binding.
+     */
+    public void objectToEntry(E object, DatabaseEntry entry) {
+
+        if (baseClass != null && !baseClass.isInstance(object)) {
+            throw new IllegalArgumentException(
+                        "Data object class (" + object.getClass() +
+                        ") not an instance of binding's base class (" +
+                        baseClass + ')');
+        }
+        FastOutputStream fo = getSerialOutput(object);
+        try {
+            SerialOutput jos = new SerialOutput(fo, classCatalog);
+            jos.writeObject(object);
+        } catch (IOException e) {
+            throw new RuntimeExceptionWrapper(e);
+        }
+
+        byte[] hdr = SerialOutput.getStreamHeader();
+        entry.setData(fo.getBufferBytes(), hdr.length,
+                     fo.getBufferLength() - hdr.length);
+    }
+}
diff --git a/src/com/sleepycat/bind/serial/SerialInput.java b/src/com/sleepycat/bind/serial/SerialInput.java
new file mode 100644
index 0000000000000000000000000000000000000000..3e2a891ef0a1cd637693a605cf51e1794b6fe54b
--- /dev/null
+++ b/src/com/sleepycat/bind/serial/SerialInput.java
@@ -0,0 +1,113 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: SerialInput.java,v 1.21 2008/04/07 22:28:58 mark Exp $
+ */
+
+package com.sleepycat.bind.serial;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.ObjectInputStream;
+import java.io.ObjectStreamClass;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.util.RuntimeExceptionWrapper;
+
+/**
+ * A specialized <code>ObjectInputStream</code> that gets class description
+ * information from a <code>ClassCatalog</code>.  It is used by
+ * <code>SerialBinding</code>.
+ *
+ * <p>This class is used instead of an {@link ObjectInputStream}, which it
+ * extends, to read an object stream written by the {@link SerialOutput} class.
+ * For reading objects from a database normally one of the serial binding
+ * classes is used.  {@link SerialInput} is used when an {@link
+ * ObjectInputStream} is needed along with compact storage.  A {@link
+ * ClassCatalog} must be supplied, however, to stored shared class
+ * descriptions.</p>
+ *
+ * @see <a href="SerialBinding.html#evolution">Class Evolution</a>
+ *
+ * @author Mark Hayes
+ */
+public class SerialInput extends ObjectInputStream {
+
+    private ClassCatalog classCatalog;
+    private ClassLoader classLoader;
+
+    /**
+     * Creates a serial input stream.
+     *
+     * @param in is the input stream from which compact serialized objects will
+     * be read.
+     *
+     * @param classCatalog is the catalog containing the class descriptions
+     * for the serialized objects.
+     */
+    public SerialInput(InputStream in, ClassCatalog classCatalog)
+        throws IOException {
+
+        this(in, classCatalog, null);
+    }
+
+    /**
+     * Creates a serial input stream.
+     *
+     * @param in is the input stream from which compact serialized objects will
+     * be read.
+     *
+     * @param classCatalog is the catalog containing the class descriptions
+     * for the serialized objects.
+     *
+     * @param classLoader is the class loader to use, or null if a default
+     * class loader should be used.
+     */
+    public SerialInput(InputStream in,
+                       ClassCatalog classCatalog,
+                       ClassLoader classLoader)
+        throws IOException {
+
+        super(in);
+
+        this.classCatalog = classCatalog;
+        this.classLoader = classLoader;
+    }
+
+    // javadoc is specified elsewhere
+    protected ObjectStreamClass readClassDescriptor()
+        throws IOException, ClassNotFoundException {
+
+        try {
+            byte len = readByte();
+            byte[] id = new byte[len];
+            readFully(id);
+
+            return classCatalog.getClassFormat(id);
+        } catch (DatabaseException e) {
+            /*
+             * Do not throw IOException from here since ObjectOutputStream
+             * will write the exception to the stream, which causes another
+             * call here, etc.
+             */
+            throw new RuntimeExceptionWrapper(e);
+        }
+    }
+
+    // javadoc is specified elsewhere
+    protected Class resolveClass(ObjectStreamClass desc)
+        throws IOException, ClassNotFoundException {
+
+        if (classLoader != null) {
+            try {
+                return Class.forName(desc.getName(), false, classLoader);
+            } catch (ClassNotFoundException e) {
+                return super.resolveClass(desc);
+            }
+        } else {
+            return super.resolveClass(desc);
+        }
+    }
+}
diff --git a/src/com/sleepycat/bind/serial/SerialOutput.java b/src/com/sleepycat/bind/serial/SerialOutput.java
new file mode 100644
index 0000000000000000000000000000000000000000..c4e3ea89cc609ede7176205b94ee9dfb75b79e84
--- /dev/null
+++ b/src/com/sleepycat/bind/serial/SerialOutput.java
@@ -0,0 +1,116 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: SerialOutput.java,v 1.23 2008/04/07 22:28:58 mark Exp $
+ */
+
+package com.sleepycat.bind.serial;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectOutputStream;
+import java.io.ObjectStreamClass;
+import java.io.ObjectStreamConstants;
+import java.io.OutputStream;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.util.RuntimeExceptionWrapper;
+
+/**
+ * A specialized <code>ObjectOutputStream</code> that stores class description
+ * information in a <code>ClassCatalog</code>.  It is used by
+ * <code>SerialBinding</code>.
+ *
+ * <p>This class is used instead of an {@link ObjectOutputStream}, which it
+ * extends, to write a compact object stream.  For writing objects to a
+ * database normally one of the serial binding classes is used.  {@link
+ * SerialOutput} is used when an {@link ObjectOutputStream} is needed along
+ * with compact storage.  A {@link ClassCatalog} must be supplied, however, to
+ * stored shared class descriptions.</p>
+ *
+ * <p>The {@link ClassCatalog} is used to store class definitions rather than
+ * embedding these into the stream.  Instead, a class format identifier is
+ * embedded into the stream.  This identifier is then used by {@link
+ * SerialInput} to load the class format to deserialize the object.</p>
+ *
+ * @see <a href="SerialBinding.html#evolution">Class Evolution</a>
+ *
+ * @author Mark Hayes
+ */
+public class SerialOutput extends ObjectOutputStream {
+
+    /*
+     * Serialization version constants. Instead of hardcoding these we get them
+     * by creating a SerialOutput, which itself guarantees that we'll always
+     * use a PROTOCOL_VERSION_2 header.
+     */
+    private final static byte[] STREAM_HEADER;
+    static {
+        ByteArrayOutputStream baos = new ByteArrayOutputStream();
+        try {
+            new SerialOutput(baos, null);
+        } catch (IOException e) {
+            throw new RuntimeExceptionWrapper(e);
+        }
+        STREAM_HEADER = baos.toByteArray();
+    }
+
+    private ClassCatalog classCatalog;
+
+    /**
+     * Creates a serial output stream.
+     *
+     * @param out is the output stream to which the compact serialized objects
+     * will be written.
+     *
+     * @param classCatalog is the catalog to which the class descriptions for
+     * the serialized objects will be written.
+     */
+    public SerialOutput(OutputStream out, ClassCatalog classCatalog)
+        throws IOException {
+
+        super(out);
+        this.classCatalog = classCatalog;
+
+        /* guarantee that we'll always use the same serialization format */
+
+        useProtocolVersion(ObjectStreamConstants.PROTOCOL_VERSION_2);
+    }
+
+    // javadoc is inherited
+    protected void writeClassDescriptor(ObjectStreamClass classdesc)
+        throws IOException {
+
+        try {
+            byte[] id = classCatalog.getClassID(classdesc);
+            writeByte(id.length);
+            write(id);
+        } catch (DatabaseException e) {
+            /*
+             * Do not throw IOException from here since ObjectOutputStream
+             * will write the exception to the stream, which causes another
+             * call here, etc.
+             */
+            throw new RuntimeExceptionWrapper(e);
+        } catch (ClassNotFoundException e) {
+            throw new RuntimeExceptionWrapper(e);
+        }
+    }
+
+    /**
+     * Returns the fixed stream header used for all serialized streams in
+     * PROTOCOL_VERSION_2 format.  To save space this header can be removed and
+     * serialized streams before storage and inserted before deserializing.
+     * {@link SerialOutput} always uses PROTOCOL_VERSION_2 serialization format
+     * to guarantee that this header is fixed.  {@link SerialBinding} removes
+     * this header from serialized streams automatically.
+     *
+     * @return the fixed stream header.
+     */
+    public static byte[] getStreamHeader() {
+
+        return STREAM_HEADER;
+    }
+}
diff --git a/src/com/sleepycat/bind/serial/SerialSerialBinding.java b/src/com/sleepycat/bind/serial/SerialSerialBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..f6ec906aa0a7649c72e4d3bf8e01b6020d66feb8
--- /dev/null
+++ b/src/com/sleepycat/bind/serial/SerialSerialBinding.java
@@ -0,0 +1,118 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: SerialSerialBinding.java,v 1.26 2008/05/27 15:30:32 mark Exp $
+ */
+
+package com.sleepycat.bind.serial;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * An abstract <code>EntityBinding</code> that treats an entity's key entry and
+ * data entry as serialized objects.
+ *
+ * <p>This class takes care of serializing and deserializing the key and
+ * data entry automatically.  Its three abstract methods must be implemented by
+ * a concrete subclass to convert the deserialized objects to/from an entity
+ * object.</p>
+ * <ul>
+ * <li> {@link #entryToObject(Object,Object)} </li>
+ * <li> {@link #objectToKey(Object)} </li>
+ * <li> {@link #objectToData(Object)} </li>
+ * </ul>
+ *
+ * @see <a href="SerialBinding.html#evolution">Class Evolution</a>
+ *
+ * @author Mark Hayes
+ */
+public abstract class SerialSerialBinding<K,D,E> implements EntityBinding<E> {
+
+    private SerialBinding<K> keyBinding;
+    private SerialBinding<D> dataBinding;
+
+    /**
+     * Creates a serial-serial entity binding.
+     *
+     * @param classCatalog is the catalog to hold shared class information and
+     * for a database should be a {@link StoredClassCatalog}.
+     *
+     * @param keyClass is the key base class.
+     *
+     * @param dataClass is the data base class.
+     */
+    public SerialSerialBinding(ClassCatalog classCatalog,
+                               Class<K> keyClass,
+                               Class<D> dataClass) {
+
+        this(new SerialBinding<K>(classCatalog, keyClass),
+             new SerialBinding<D>(classCatalog, dataClass));
+    }
+
+    /**
+     * Creates a serial-serial entity binding.
+     *
+     * @param keyBinding is the key binding.
+     *
+     * @param dataBinding is the data binding.
+     */
+    public SerialSerialBinding(SerialBinding<K> keyBinding,
+                               SerialBinding<D> dataBinding) {
+
+        this.keyBinding = keyBinding;
+        this.dataBinding = dataBinding;
+    }
+
+    // javadoc is inherited
+    public E entryToObject(DatabaseEntry key, DatabaseEntry data) {
+
+        return entryToObject(keyBinding.entryToObject(key),
+                             dataBinding.entryToObject(data));
+    }
+
+    // javadoc is inherited
+    public void objectToKey(E object, DatabaseEntry key) {
+
+        K keyObject = objectToKey(object);
+        keyBinding.objectToEntry(keyObject, key);
+    }
+
+    // javadoc is inherited
+    public void objectToData(E object, DatabaseEntry data) {
+
+        D dataObject = objectToData(object);
+        dataBinding.objectToEntry(dataObject, data);
+    }
+
+    /**
+     * Constructs an entity object from deserialized key and data objects.
+     *
+     * @param keyInput is the deserialized key object.
+     *
+     * @param dataInput is the deserialized data object.
+     *
+     * @return the entity object constructed from the key and data.
+     */
+    public abstract E entryToObject(K keyInput, D dataInput);
+
+    /**
+     * Extracts a key object from an entity object.
+     *
+     * @param object is the entity object.
+     *
+     * @return the deserialized key object.
+     */
+    public abstract K objectToKey(E object);
+
+    /**
+     * Extracts a data object from an entity object.
+     *
+     * @param object is the entity object.
+     *
+     * @return the deserialized data object.
+     */
+    public abstract D objectToData(E object);
+}
diff --git a/src/com/sleepycat/bind/serial/SerialSerialKeyCreator.java b/src/com/sleepycat/bind/serial/SerialSerialKeyCreator.java
new file mode 100644
index 0000000000000000000000000000000000000000..2fd22b4a3472e599a24fdfac03df2b40024f4391
--- /dev/null
+++ b/src/com/sleepycat/bind/serial/SerialSerialKeyCreator.java
@@ -0,0 +1,152 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: SerialSerialKeyCreator.java,v 1.36 2008/05/27 15:30:32 mark Exp $
+ */
+
+package com.sleepycat.bind.serial;
+
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.ForeignKeyNullifier;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.je.SecondaryKeyCreator;
+
+/**
+ * A abstract key creator that uses a serial key and a serial data entry.
+ * This class takes care of serializing and deserializing the key and data
+ * entry automatically.
+ * The following abstract method must be implemented by a concrete subclass
+ * to create the index key using these objects
+ * <ul>
+ * <li> {@link #createSecondaryKey(Object,Object)} </li>
+ * </ul>
+ * <p>If {@link com.sleepycat.je.ForeignKeyDeleteAction#NULLIFY} was
+ * specified when opening the secondary database, the following method must be
+ * overridden to nullify the foreign index key.  If NULLIFY was not specified,
+ * this method need not be overridden.</p>
+ * <ul>
+ * <li> {@link #nullifyForeignKey(Object)} </li>
+ * </ul>
+ *
+ * @see <a href="SerialBinding.html#evolution">Class Evolution</a>
+ *
+ * @author Mark Hayes
+ */
+public abstract class SerialSerialKeyCreator<PK,D,SK>
+    implements SecondaryKeyCreator, ForeignKeyNullifier {
+
+    protected SerialBinding<PK> primaryKeyBinding;
+    protected SerialBinding<D> dataBinding;
+    protected SerialBinding<SK> indexKeyBinding;
+
+    /**
+     * Creates a serial-serial key creator.
+     *
+     * @param classCatalog is the catalog to hold shared class information and
+     * for a database should be a {@link StoredClassCatalog}.
+     *
+     * @param primaryKeyClass is the primary key base class.
+     *
+     * @param dataClass is the data base class.
+     *
+     * @param indexKeyClass is the index key base class.
+     */
+    public SerialSerialKeyCreator(ClassCatalog classCatalog,
+                                  Class<PK> primaryKeyClass,
+                                  Class<D> dataClass,
+                                  Class<SK> indexKeyClass) {
+
+        this(new SerialBinding<PK>(classCatalog, primaryKeyClass),
+             new SerialBinding<D>(classCatalog, dataClass),
+             new SerialBinding<SK>(classCatalog, indexKeyClass));
+    }
+
+    /**
+     * Creates a serial-serial entity binding.
+     *
+     * @param primaryKeyBinding is the primary key binding.
+     *
+     * @param dataBinding is the data binding.
+     *
+     * @param indexKeyBinding is the index key binding.
+     */
+    public SerialSerialKeyCreator(SerialBinding<PK> primaryKeyBinding,
+                                  SerialBinding<D> dataBinding,
+                                  SerialBinding<SK> indexKeyBinding) {
+
+        this.primaryKeyBinding = primaryKeyBinding;
+        this.dataBinding = dataBinding;
+        this.indexKeyBinding = indexKeyBinding;
+    }
+
+    // javadoc is inherited
+    public boolean createSecondaryKey(SecondaryDatabase db,
+                                      DatabaseEntry primaryKeyEntry,
+                                      DatabaseEntry dataEntry,
+                                      DatabaseEntry indexKeyEntry)
+        throws DatabaseException {
+
+        PK primaryKeyInput =
+            primaryKeyBinding.entryToObject(primaryKeyEntry);
+        D dataInput = dataBinding.entryToObject(dataEntry);
+        SK indexKey = createSecondaryKey(primaryKeyInput, dataInput);
+        if (indexKey != null) {
+            indexKeyBinding.objectToEntry(indexKey, indexKeyEntry);
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    // javadoc is inherited
+    public boolean nullifyForeignKey(SecondaryDatabase db,
+                                     DatabaseEntry dataEntry)
+        throws DatabaseException {
+
+        D data = dataBinding.entryToObject(dataEntry);
+        data = nullifyForeignKey(data);
+        if (data != null) {
+            dataBinding.objectToEntry(data, dataEntry);
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    /**
+     * Creates the index key object from primary key and data objects.
+     *
+     * @param primaryKey is the deserialized source primary key entry, or
+     * null if no primary key entry is used to construct the index key.
+     *
+     * @param data is the deserialized source data entry, or null if no
+     * data entry is used to construct the index key.
+     *
+     * @return the destination index key object, or null to indicate that
+     * the key is not present.
+     */
+    public abstract SK createSecondaryKey(PK primaryKey, D data);
+
+    /**
+     * Clears the index key in a data object.
+     *
+     * <p>On entry the data parameter contains the index key to be cleared.  It
+     * should be changed by this method such that {@link #createSecondaryKey}
+     * will return false.  Other fields in the data object should remain
+     * unchanged.</p>
+     *
+     * @param data is the source and destination data object.
+     *
+     * @return the destination data object, or null to indicate that the
+     * key is not present and no change is necessary.  The data returned may
+     * be the same object passed as the data parameter or a newly created
+     * object.
+     */
+    public D nullifyForeignKey(D data) {
+
+        return null;
+    }
+}
diff --git a/src/com/sleepycat/bind/serial/StoredClassCatalog.java b/src/com/sleepycat/bind/serial/StoredClassCatalog.java
new file mode 100644
index 0000000000000000000000000000000000000000..5fdff451dbb98a0801583cd7f468840b2682a3fa
--- /dev/null
+++ b/src/com/sleepycat/bind/serial/StoredClassCatalog.java
@@ -0,0 +1,463 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: StoredClassCatalog.java,v 1.51 2008/05/27 15:30:32 mark Exp $
+ */
+
+package com.sleepycat.bind.serial;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.ObjectStreamClass;
+import java.io.Serializable;
+import java.math.BigInteger;
+import java.util.HashMap;
+
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.CursorConfig;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.util.RuntimeExceptionWrapper;
+import com.sleepycat.util.UtfOps;
+
+/**
+ * A <code>ClassCatalog</code> that is stored in a <code>Database</code>.
+ *
+ * <p>A single <code>StoredClassCatalog</code> object is normally used along
+ * with a set of databases that stored serialized objects.</p>
+ *
+ * @author Mark Hayes
+ *
+ * @see <a href="SerialBinding.html#evolution">Class Evolution</a>
+ */
+public class StoredClassCatalog implements ClassCatalog {
+
+    /*
+     * Record types ([key] [data]):
+     *
+     * [0] [next class ID]
+     * [1 / class ID] [ObjectStreamClass (class format)]
+     * [2 / class name] [ClassInfo (has 8 byte class ID)]
+     */
+    private static final byte REC_LAST_CLASS_ID = (byte) 0;
+    private static final byte REC_CLASS_FORMAT = (byte) 1;
+    private static final byte REC_CLASS_INFO = (byte) 2;
+
+    private static final byte[] LAST_CLASS_ID_KEY = {REC_LAST_CLASS_ID};
+
+    private Database db;
+    private HashMap<String, ClassInfo> classMap;
+    private HashMap<BigInteger, ObjectStreamClass> formatMap;
+    private LockMode writeLockMode;
+    private boolean cdbMode;
+    private boolean txnMode;
+
+    /**
+     * Creates a catalog based on a given database. To save resources, only a
+     * single catalog object should be used for each unique catalog database.
+     *
+     * @param database an open database to use as the class catalog.  It must
+     * be a BTREE database and must not allow duplicates.
+     *
+     * @throws DatabaseException if an error occurs accessing the database.
+     *
+     * @throws IllegalArgumentException if the database is not a BTREE database
+     * or if it configured to allow duplicates.
+     */
+    public StoredClassCatalog(Database database)
+        throws DatabaseException, IllegalArgumentException {
+
+        db = database;
+        DatabaseConfig dbConfig = db.getConfig();
+        EnvironmentConfig envConfig = db.getEnvironment().getConfig();
+
+        writeLockMode = (DbCompat.getInitializeLocking(envConfig) ||
+                         envConfig.getTransactional()) ? LockMode.RMW
+                                                       : LockMode.DEFAULT;
+        cdbMode = DbCompat.getInitializeCDB(envConfig);
+        txnMode = dbConfig.getTransactional();
+
+        if (!DbCompat.isTypeBtree(dbConfig)) {
+            throw new IllegalArgumentException(
+                    "The class catalog must be a BTREE database.");
+        }
+        if (DbCompat.getSortedDuplicates(dbConfig) ||
+            DbCompat.getUnsortedDuplicates(dbConfig)) {
+            throw new IllegalArgumentException(
+                    "The class catalog database must not allow duplicates.");
+        }
+
+        /*
+         * Create the class format and class info maps. Note that these are not
+         * synchronized, and therefore the methods that use them are
+         * synchronized.
+         */
+        classMap = new HashMap<String, ClassInfo>();
+        formatMap = new HashMap<BigInteger, ObjectStreamClass>();
+
+        DatabaseEntry key = new DatabaseEntry(LAST_CLASS_ID_KEY);
+        DatabaseEntry data = new DatabaseEntry();
+        if (dbConfig.getReadOnly()) {
+            /* Check that the class ID record exists. */
+            OperationStatus status = db.get(null, key, data, null);
+            if (status != OperationStatus.SUCCESS) {
+                throw new IllegalStateException
+                    ("A read-only catalog database may not be empty");
+            }
+        } else {
+            /* Add the initial class ID record if it doesn't exist.  */
+            data.setData(new byte[1]); // zero ID
+            /* Use putNoOverwrite to avoid phantoms. */
+            db.putNoOverwrite(null, key, data);
+        }
+    }
+
+    // javadoc is inherited
+    public synchronized void close()
+        throws DatabaseException {
+
+        if (db != null) {
+            db.close();
+        }
+        db = null;
+        formatMap = null;
+        classMap = null;
+    }
+
+    // javadoc is inherited
+    public synchronized byte[] getClassID(ObjectStreamClass classFormat)
+        throws DatabaseException, ClassNotFoundException {
+
+        ClassInfo classInfo = getClassInfo(classFormat);
+        return classInfo.getClassID();
+    }
+
+    // javadoc is inherited
+    public synchronized ObjectStreamClass getClassFormat(byte[] classID)
+        throws DatabaseException, ClassNotFoundException {
+
+        return getClassFormat(classID, new DatabaseEntry());
+    }
+
+    /**
+     * Internal function for getting the class format.  Allows passing the
+     * DatabaseEntry object for the data, so the bytes of the class format can
+     * be examined afterwards.
+     */
+    private ObjectStreamClass getClassFormat(byte[] classID,
+					     DatabaseEntry data)
+        throws DatabaseException, ClassNotFoundException {
+
+        /* First check the map and, if found, add class info to the map. */
+
+        BigInteger classIDObj = new BigInteger(classID);
+        ObjectStreamClass classFormat = formatMap.get(classIDObj);
+        if (classFormat == null) {
+
+            /* Make the class format key. */
+
+            byte[] keyBytes = new byte[classID.length + 1];
+            keyBytes[0] = REC_CLASS_FORMAT;
+            System.arraycopy(classID, 0, keyBytes, 1, classID.length);
+            DatabaseEntry key = new DatabaseEntry(keyBytes);
+
+            /* Read the class format. */
+
+            OperationStatus status = db.get(null, key, data, LockMode.DEFAULT);
+            if (status != OperationStatus.SUCCESS) {
+                throw new ClassNotFoundException("Catalog class ID not found");
+            }
+            try {
+                ObjectInputStream ois =
+                    new ObjectInputStream(
+                        new ByteArrayInputStream(data.getData(),
+                                                 data.getOffset(),
+                                                 data.getSize()));
+                classFormat = (ObjectStreamClass) ois.readObject();
+            } catch (IOException e) {
+                throw new RuntimeExceptionWrapper(e);
+            }
+
+            /* Update the class format map. */
+
+            formatMap.put(classIDObj, classFormat);
+        }
+        return classFormat;
+    }
+
+    /**
+     * Get the ClassInfo for a given class name, adding it and its
+     * ObjectStreamClass to the database if they are not already present, and
+     * caching both of them using the class info and class format maps.  When a
+     * class is first loaded from the database, the stored ObjectStreamClass is
+     * compared to the current ObjectStreamClass loaded by the Java class
+     * loader; if they are different, a new class ID is assigned for the
+     * current format.
+     */
+    private ClassInfo getClassInfo(ObjectStreamClass classFormat)
+        throws DatabaseException, ClassNotFoundException {
+
+        /*
+         * First check for a cached copy of the class info, which if
+         * present always contains the class format object
+         */
+        String className = classFormat.getName();
+        ClassInfo classInfo = classMap.get(className);
+        if (classInfo != null) {
+            return classInfo;
+        } else {
+            /* Make class info key.  */
+            char[] nameChars = className.toCharArray();
+            byte[] keyBytes = new byte[1 + UtfOps.getByteLength(nameChars)];
+            keyBytes[0] = REC_CLASS_INFO;
+            UtfOps.charsToBytes(nameChars, 0, keyBytes, 1, nameChars.length);
+            DatabaseEntry key = new DatabaseEntry(keyBytes);
+
+            /* Read class info.  */
+            DatabaseEntry data = new DatabaseEntry();
+            OperationStatus status = db.get(null, key, data, LockMode.DEFAULT);
+            if (status != OperationStatus.SUCCESS) {
+                /*
+                 * Not found in the database, write class info and class
+                 * format.
+                 */
+                classInfo = putClassInfo(new ClassInfo(), className, key,
+                                         classFormat);
+            } else {
+                /*
+                 * Read class info to get the class format key, then read class
+                 * format.
+                 */
+                classInfo = new ClassInfo(data);
+                DatabaseEntry formatData = new DatabaseEntry();
+                ObjectStreamClass storedClassFormat =
+                    getClassFormat(classInfo.getClassID(), formatData);
+
+                /*
+                 * Compare the stored class format to the current class format,
+                 * and if they are different then generate a new class ID.
+                 */
+                if (!areClassFormatsEqual(storedClassFormat,
+                                          getBytes(formatData),
+                                          classFormat)) {
+                    classInfo = putClassInfo(classInfo, className, key,
+                                             classFormat);
+                }
+
+                /* Update the class info map.  */
+                classInfo.setClassFormat(classFormat);
+                classMap.put(className, classInfo);
+            }
+        }
+        return classInfo;
+    }
+
+    /**
+     * Assign a new class ID (increment the current ID record), write the
+     * ObjectStreamClass record for this new ID, and update the ClassInfo
+     * record with the new ID also.  The ClassInfo passed as an argument is the
+     * one to be updated.
+     */
+    private ClassInfo putClassInfo(ClassInfo classInfo,
+				   String className,
+				   DatabaseEntry classKey,
+				   ObjectStreamClass classFormat)
+        throws DatabaseException, ClassNotFoundException {
+
+        /* An intent-to-write cursor is needed for CDB. */
+        CursorConfig cursorConfig = null;
+        if (cdbMode) {
+            cursorConfig = new CursorConfig();
+            DbCompat.setWriteCursor(cursorConfig, true);
+        }
+        Cursor cursor = null;
+        Transaction txn = null;
+        try {
+            if (txnMode) {
+                txn = db.getEnvironment().beginTransaction(null, null);
+            }
+            cursor = db.openCursor(txn, cursorConfig);
+
+            /* Get the current class ID. */
+            DatabaseEntry key = new DatabaseEntry(LAST_CLASS_ID_KEY);
+            DatabaseEntry data = new DatabaseEntry();
+            OperationStatus status = cursor.getSearchKey(key, data,
+                                                         writeLockMode);
+            if (status != OperationStatus.SUCCESS) {
+                throw new IllegalStateException("Class ID not initialized");
+            }
+            byte[] idBytes = getBytes(data);
+
+            /* Increment the ID by one and write the updated record.  */
+            idBytes = incrementID(idBytes);
+            data.setData(idBytes);
+            cursor.put(key, data);
+
+            /*
+             * Write the new class format record whose key is the ID just
+             * assigned.
+             */
+            byte[] keyBytes = new byte[1 + idBytes.length];
+            keyBytes[0] = REC_CLASS_FORMAT;
+            System.arraycopy(idBytes, 0, keyBytes, 1, idBytes.length);
+            key.setData(keyBytes);
+
+            ByteArrayOutputStream baos = new ByteArrayOutputStream();
+            ObjectOutputStream oos;
+            try {
+                oos = new ObjectOutputStream(baos);
+                oos.writeObject(classFormat);
+            } catch (IOException e) {
+                throw new RuntimeExceptionWrapper(e);
+            }
+            data.setData(baos.toByteArray());
+
+            cursor.put(key, data);
+
+            /*
+             * Write the new class info record, using the key passed in; this
+             * is done last so that a reader who gets the class info record
+             * first will always find the corresponding class format record.
+             */
+            classInfo.setClassID(idBytes);
+            classInfo.toDbt(data);
+
+            cursor.put(classKey, data);
+
+            /*
+             * Update the maps before closing the cursor, so that the cursor
+             * lock prevents other writers from duplicating this entry.
+             */
+            classInfo.setClassFormat(classFormat);
+            classMap.put(className, classInfo);
+            formatMap.put(new BigInteger(idBytes), classFormat);
+            return classInfo;
+        } finally {
+            if (cursor != null) {
+                cursor.close();
+            }
+            if (txn != null) {
+                txn.commit();
+            }
+        }
+    }
+
+    private static byte[] incrementID(byte[] key) {
+
+        BigInteger id = new BigInteger(key);
+        id = id.add(BigInteger.valueOf(1));
+        return id.toByteArray();
+    }
+
+    /**
+     * Holds the class format key for a class, maintains a reference to the
+     * ObjectStreamClass.  Other fields can be added when we need to store more
+     * information per class.
+     */
+    private static class ClassInfo implements Serializable {
+        static final long serialVersionUID = 3845446969989650562L;
+
+        private byte[] classID;
+        private transient ObjectStreamClass classFormat;
+
+        ClassInfo() {
+        }
+
+        ClassInfo(DatabaseEntry dbt) {
+
+            byte[] data = dbt.getData();
+            int len = data[0];
+            classID = new byte[len];
+            System.arraycopy(data, 1, classID, 0, len);
+        }
+
+        void toDbt(DatabaseEntry dbt) {
+
+            byte[] data = new byte[1 + classID.length];
+            data[0] = (byte) classID.length;
+            System.arraycopy(classID, 0, data, 1, classID.length);
+            dbt.setData(data);
+        }
+
+        void setClassID(byte[] classID) {
+
+            this.classID = classID;
+        }
+
+        byte[] getClassID() {
+
+            return classID;
+        }
+
+        ObjectStreamClass getClassFormat() {
+
+            return classFormat;
+        }
+
+        void setClassFormat(ObjectStreamClass classFormat) {
+
+            this.classFormat = classFormat;
+        }
+    }
+
+    /**
+     * Return whether two class formats are equal.  This determines whether a
+     * new class format is needed for an object being serialized.  Formats must
+     * be identical in all respects, or a new format is needed.
+     */
+    private static boolean areClassFormatsEqual(ObjectStreamClass format1,
+                                                byte[] format1Bytes,
+                                                ObjectStreamClass format2) {
+        try {
+            if (format1Bytes == null) { // using cached format1 object
+                format1Bytes = getObjectBytes(format1);
+            }
+            byte[] format2Bytes = getObjectBytes(format2);
+            return java.util.Arrays.equals(format2Bytes, format1Bytes);
+        } catch (IOException e) { return false; }
+    }
+
+    /*
+     * We can return the same byte[] for 0 length arrays.
+     */
+    private static byte[] ZERO_LENGTH_BYTE_ARRAY = new byte[0];
+
+    private static byte[] getBytes(DatabaseEntry dbt) {
+        byte[] b = dbt.getData();
+        if (b == null) {
+            return null;
+        }
+        if (dbt.getOffset() == 0 && b.length == dbt.getSize()) {
+            return b;
+        }
+	int len = dbt.getSize();
+	if (len == 0) {
+	    return ZERO_LENGTH_BYTE_ARRAY;
+	} else {
+	    byte[] t = new byte[len];
+	    System.arraycopy(b, dbt.getOffset(), t, 0, t.length);
+	    return t;
+	}
+    }
+
+    private static byte[] getObjectBytes(Object o)
+        throws IOException {
+
+        ByteArrayOutputStream baos = new ByteArrayOutputStream();
+        ObjectOutputStream oos = new ObjectOutputStream(baos);
+        oos.writeObject(o);
+        return baos.toByteArray();
+    }
+}
diff --git a/src/com/sleepycat/bind/serial/TupleSerialBinding.java b/src/com/sleepycat/bind/serial/TupleSerialBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..bef2302b03609c9c6fd35306d243fe35e8ddd69d
--- /dev/null
+++ b/src/com/sleepycat/bind/serial/TupleSerialBinding.java
@@ -0,0 +1,116 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: TupleSerialBinding.java,v 1.28 2008/05/27 15:30:32 mark Exp $
+ */
+
+package com.sleepycat.bind.serial;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.bind.tuple.TupleBase;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * An abstract <code>EntityBinding</code> that treats an entity's key entry as
+ * a tuple and its data entry as a serialized object.
+ *
+ * <p>This class takes care of serializing and deserializing the data entry,
+ * and converting the key entry to/from {@link TupleInput} and {@link
+ * TupleOutput} objects.  Its three abstract methods must be implemented by a
+ * concrete subclass to convert these objects to/from an entity object.</p>
+ * <ul>
+ * <li> {@link #entryToObject(TupleInput,Object)} </li>
+ * <li> {@link #objectToKey(Object,TupleOutput)} </li>
+ * <li> {@link #objectToData(Object)} </li>
+ * </ul>
+ *
+ * @see <a href="SerialBinding.html#evolution">Class Evolution</a>
+ *
+ * @author Mark Hayes
+ */
+public abstract class TupleSerialBinding<D,E> extends TupleBase
+    implements EntityBinding<E> {
+
+    protected SerialBinding<D> dataBinding;
+
+    /**
+     * Creates a tuple-serial entity binding.
+     *
+     * @param classCatalog is the catalog to hold shared class information and
+     * for a database should be a {@link StoredClassCatalog}.
+     *
+     * @param baseClass is the base class.
+     */
+    public TupleSerialBinding(ClassCatalog classCatalog,
+                              Class<D> baseClass) {
+
+        this(new SerialBinding<D>(classCatalog, baseClass));
+    }
+
+    /**
+     * Creates a tuple-serial entity binding.
+     *
+     * @param dataBinding is the data binding.
+     */
+    public TupleSerialBinding(SerialBinding<D> dataBinding) {
+
+        this.dataBinding = dataBinding;
+    }
+
+    // javadoc is inherited
+    public E entryToObject(DatabaseEntry key, DatabaseEntry data) {
+
+        return entryToObject(entryToInput(key),
+                             dataBinding.entryToObject(data));
+    }
+
+    // javadoc is inherited
+    public void objectToKey(E object, DatabaseEntry key) {
+
+        TupleOutput output = getTupleOutput(object);
+        objectToKey(object, output);
+        outputToEntry(output, key);
+    }
+
+    // javadoc is inherited
+    public void objectToData(E object, DatabaseEntry data) {
+
+        D dataObject = objectToData(object);
+        dataBinding.objectToEntry(dataObject, data);
+    }
+
+    /**
+     * Constructs an entity object from {@link TupleInput} key entry and
+     * deserialized data entry objects.
+     *
+     * @param keyInput is the {@link TupleInput} key entry object.
+     *
+     * @param dataInput is the deserialized data entry object.
+     *
+     * @return the entity object constructed from the key and data.
+     */
+    public abstract E entryToObject(TupleInput keyInput, D dataInput);
+
+    /**
+     * Extracts a key tuple from an entity object.
+     *
+     * @param object is the entity object.
+     *
+     * @param keyOutput is the {@link TupleOutput} to which the key should be
+     * written.
+     */
+    public abstract void objectToKey(E object, TupleOutput keyOutput);
+
+    /**
+     * Extracts a data object from an entity object.
+     *
+     * @param object is the entity object.
+     *
+     * @return the deserialized data object.
+     */
+    public abstract D objectToData(E object);
+}
diff --git a/src/com/sleepycat/bind/serial/TupleSerialKeyCreator.java b/src/com/sleepycat/bind/serial/TupleSerialKeyCreator.java
new file mode 100644
index 0000000000000000000000000000000000000000..4820a901ae97499bdda6d35ea60ecb36d1f51581
--- /dev/null
+++ b/src/com/sleepycat/bind/serial/TupleSerialKeyCreator.java
@@ -0,0 +1,146 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: TupleSerialKeyCreator.java,v 1.35 2008/05/27 15:30:32 mark Exp $
+ */
+
+package com.sleepycat.bind.serial;
+
+import com.sleepycat.bind.tuple.TupleBase;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.ForeignKeyNullifier;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.je.SecondaryKeyCreator;
+
+/**
+ * A abstract key creator that uses a tuple key and a serial data entry. This
+ * class takes care of serializing and deserializing the data entry, and
+ * converting the key entry to/from {@link TupleInput} and {@link TupleOutput}
+ * objects.
+ * The following abstract method must be implemented by a concrete subclass
+ * to create the index key using these objects
+ * <ul>
+ * <li> {@link #createSecondaryKey(TupleInput,Object,TupleOutput)} </li>
+ * </ul>
+ * <p>If {@link com.sleepycat.je.ForeignKeyDeleteAction#NULLIFY} was
+ * specified when opening the secondary database, the following method must be
+ * overridden to nullify the foreign index key.  If NULLIFY was not specified,
+ * this method need not be overridden.</p>
+ * <ul>
+ * <li> {@link #nullifyForeignKey(Object)} </li>
+ * </ul>
+ *
+ * @see <a href="SerialBinding.html#evolution">Class Evolution</a>
+ *
+ * @author Mark Hayes
+ */
+public abstract class TupleSerialKeyCreator<D> extends TupleBase
+    implements SecondaryKeyCreator, ForeignKeyNullifier {
+
+    protected SerialBinding<D> dataBinding;
+
+    /**
+     * Creates a tuple-serial key creator.
+     *
+     * @param classCatalog is the catalog to hold shared class information and
+     * for a database should be a {@link StoredClassCatalog}.
+     *
+     * @param dataClass is the data base class.
+     */
+    public TupleSerialKeyCreator(ClassCatalog classCatalog,
+                                 Class<D> dataClass) {
+
+        this(new SerialBinding<D>(classCatalog, dataClass));
+    }
+
+    /**
+     * Creates a tuple-serial key creator.
+     *
+     * @param dataBinding is the data binding.
+     */
+    public TupleSerialKeyCreator(SerialBinding<D> dataBinding) {
+
+        this.dataBinding = dataBinding;
+    }
+
+    // javadoc is inherited
+    public boolean createSecondaryKey(SecondaryDatabase db,
+                                      DatabaseEntry primaryKeyEntry,
+                                      DatabaseEntry dataEntry,
+                                      DatabaseEntry indexKeyEntry)
+        throws DatabaseException {
+
+        TupleOutput output = getTupleOutput(null);
+        TupleInput primaryKeyInput = entryToInput(primaryKeyEntry);
+        D dataInput = dataBinding.entryToObject(dataEntry);
+        if (createSecondaryKey(primaryKeyInput, dataInput, output)) {
+            outputToEntry(output, indexKeyEntry);
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    // javadoc is inherited
+    public boolean nullifyForeignKey(SecondaryDatabase db,
+                                     DatabaseEntry dataEntry)
+        throws DatabaseException {
+
+        D data = dataBinding.entryToObject(dataEntry);
+        data = nullifyForeignKey(data);
+        if (data != null) {
+            dataBinding.objectToEntry(data, dataEntry);
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    /**
+     * Creates the index key entry from primary key tuple entry and
+     * deserialized data entry.
+     *
+     * @param primaryKeyInput is the {@link TupleInput} for the primary key
+     * entry, or null if no primary key entry is used to construct the index
+     * key.
+     *
+     * @param dataInput is the deserialized data entry, or null if no data
+     * entry is used to construct the index key.
+     *
+     * @param indexKeyOutput is the destination index key tuple.  For index
+     * keys which are optionally present, no tuple entry should be output to
+     * indicate that the key is not present or null.
+     *
+     * @return true if a key was created, or false to indicate that the key is
+     * not present.
+     */
+    public abstract boolean createSecondaryKey(TupleInput primaryKeyInput,
+                                               D dataInput,
+                                               TupleOutput indexKeyOutput);
+
+    /**
+     * Clears the index key in the deserialized data entry.
+     *
+     * <p>On entry the data parameter contains the index key to be cleared.  It
+     * should be changed by this method such that {@link #createSecondaryKey}
+     * will return false.  Other fields in the data object should remain
+     * unchanged.</p>
+     *
+     * @param data is the source and destination deserialized data
+     * entry.
+     *
+     * @return the destination data object, or null to indicate that the
+     * key is not present and no change is necessary.  The data returned may
+     * be the same object passed as the data parameter or a newly created
+     * object.
+     */
+    public D nullifyForeignKey(D data) {
+
+        return null;
+    }
+}
diff --git a/src/com/sleepycat/bind/serial/TupleSerialMarshalledBinding.java b/src/com/sleepycat/bind/serial/TupleSerialMarshalledBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..e674e5bcce154622eb1a116e01555cc12ecb6241
--- /dev/null
+++ b/src/com/sleepycat/bind/serial/TupleSerialMarshalledBinding.java
@@ -0,0 +1,93 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: TupleSerialMarshalledBinding.java,v 1.28 2008/05/27 15:30:32 mark Exp $
+ */
+
+package com.sleepycat.bind.serial;
+
+import com.sleepycat.bind.tuple.MarshalledTupleKeyEntity;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+
+/**
+ * A concrete <code>TupleSerialBinding</code> that delegates to the
+ * <code>MarshalledTupleKeyEntity</code> interface of the entity class.
+ *
+ * <p>The {@link MarshalledTupleKeyEntity} interface must be implemented by the
+ * entity class to convert between the key/data entry and entity object.</p>
+ *
+ * <p> The binding is "tricky" in that it uses the entity class for both the
+ * stored data entry and the combined entity object.  To do this, the entity's
+ * key field(s) are transient and are set by the binding after the data object
+ * has been deserialized. This avoids the use of a "data" class completely.
+ * </p>
+ *
+ * @see MarshalledTupleKeyEntity
+ * @see <a href="SerialBinding.html#evolution">Class Evolution</a>
+ *
+ * @author Mark Hayes
+ */
+public class TupleSerialMarshalledBinding<E extends MarshalledTupleKeyEntity>
+    extends TupleSerialBinding<E,E> {
+
+    /**
+     * Creates a tuple-serial marshalled binding object.
+     *
+     * @param classCatalog is the catalog to hold shared class information and
+     * for a database should be a {@link StoredClassCatalog}.
+     *
+     * @param baseClass is the base class for serialized objects stored using
+     * this binding -- all objects using this binding must be an instance of
+     * this class.
+     */
+    public TupleSerialMarshalledBinding(ClassCatalog classCatalog,
+                                        Class<E> baseClass) {
+
+        this(new SerialBinding<E>(classCatalog, baseClass));
+    }
+
+    /**
+     * Creates a tuple-serial marshalled binding object.
+     *
+     * @param dataBinding is the binding used for serializing and deserializing
+     * the entity object.
+     */
+    public TupleSerialMarshalledBinding(SerialBinding<E> dataBinding) {
+
+        super(dataBinding);
+    }
+
+    // javadoc is inherited
+    public E entryToObject(TupleInput tupleInput, E javaInput) {
+
+        /*
+         * Creates the entity by combining the stored key and data.
+         * This "tricky" binding returns the stored data as the entity, but
+         * first it sets the transient key fields from the stored key.
+         */
+        if (tupleInput != null) { // may be null if not used by key extractor
+            javaInput.unmarshalPrimaryKey(tupleInput);
+        }
+        return javaInput;
+    }
+
+    // javadoc is inherited
+    public void objectToKey(E object, TupleOutput output) {
+
+        /* Creates the stored key from the entity. */
+        object.marshalPrimaryKey(output);
+    }
+
+    // javadoc is inherited
+    public E objectToData(E object) {
+
+        /*
+         * Returns the entity as the stored data.  There is nothing to do here
+         * since the entity's key fields are transient.
+         */
+        return object;
+    }
+}
diff --git a/src/com/sleepycat/bind/serial/TupleSerialMarshalledKeyCreator.java b/src/com/sleepycat/bind/serial/TupleSerialMarshalledKeyCreator.java
new file mode 100644
index 0000000000000000000000000000000000000000..8f3784dd6ce7f3ed81120426dd3b89e46b28c1ac
--- /dev/null
+++ b/src/com/sleepycat/bind/serial/TupleSerialMarshalledKeyCreator.java
@@ -0,0 +1,78 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: TupleSerialMarshalledKeyCreator.java,v 1.29 2008/05/27 15:30:32 mark Exp $
+ */
+
+package com.sleepycat.bind.serial;
+
+import com.sleepycat.bind.tuple.MarshalledTupleKeyEntity;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+
+/**
+ * A concrete key creator that works in conjunction with a {@link
+ * TupleSerialMarshalledBinding}.  This key creator works by calling the
+ * methods of the {@link MarshalledTupleKeyEntity} interface to create and
+ * clear the index key fields.
+ *
+ * @see <a href="SerialBinding.html#evolution">Class Evolution</a>
+ *
+ * @author Mark Hayes
+ */
+public class TupleSerialMarshalledKeyCreator<D extends
+                                             MarshalledTupleKeyEntity>
+    extends TupleSerialKeyCreator<D> {
+
+    private TupleSerialMarshalledBinding<D> binding;
+    private String keyName;
+
+    /**
+     * Creates a tuple-serial marshalled key creator.
+     *
+     * @param binding is the binding used for the tuple-serial entity.
+     *
+     * @param keyName is the key name passed to the {@link
+     * MarshalledTupleKeyEntity#marshalSecondaryKey} method to identify the
+     * index key.
+     */
+    public TupleSerialMarshalledKeyCreator(TupleSerialMarshalledBinding<D>
+                                           binding,
+                                           String keyName) {
+
+        super(binding.dataBinding);
+        this.binding = binding;
+        this.keyName = keyName;
+
+        if (dataBinding == null) {
+            throw new NullPointerException("dataBinding may not be null");
+        }
+    }
+
+    // javadoc is inherited
+    public boolean createSecondaryKey(TupleInput primaryKeyInput,
+                                      D dataInput,
+                                      TupleOutput indexKeyOutput) {
+
+        /*
+         * The primary key is unmarshalled before marshalling the index key, to
+         * account for cases where the index key includes fields taken from the
+         * primary key.
+         */
+        MarshalledTupleKeyEntity entity =
+            binding.entryToObject(primaryKeyInput, dataInput);
+
+        return entity.marshalSecondaryKey(keyName, indexKeyOutput);
+    }
+
+    // javadoc is inherited
+    public D nullifyForeignKey(D dataInput) {
+
+        MarshalledTupleKeyEntity entity =
+            binding.entryToObject(null, dataInput);
+
+        return entity.nullifyForeignKey(keyName) ? dataInput : null;
+    }
+}
diff --git a/src/com/sleepycat/bind/serial/package.html b/src/com/sleepycat/bind/serial/package.html
new file mode 100644
index 0000000000000000000000000000000000000000..d37e9b81605198d02e351ce588b94b4f78c1cb23
--- /dev/null
+++ b/src/com/sleepycat/bind/serial/package.html
@@ -0,0 +1,10 @@
+<!-- $Id: package.html,v 1.12 2008/02/05 23:28:18 mark Exp $ -->
+<html>
+<body>
+Bindings that use Java serialization.
+<!-- begin JE only -->
+@see <a href="{@docRoot}/../GettingStartedGuide/bindAPI.html"
+        target="_top">[Getting Started Guide]</a>
+<!-- end JE only -->
+</body>
+</html>
diff --git a/src/com/sleepycat/bind/tuple/BigIntegerBinding.java b/src/com/sleepycat/bind/tuple/BigIntegerBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..468104aeb7f5b178dcfdc0feba1c46024173a2af
--- /dev/null
+++ b/src/com/sleepycat/bind/tuple/BigIntegerBinding.java
@@ -0,0 +1,71 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: BigIntegerBinding.java,v 1.5 2008/05/27 15:30:33 mark Exp $
+ */
+
+package com.sleepycat.bind.tuple;
+
+import java.math.BigInteger;
+
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * A concrete <code>TupleBinding</code> for a <code>BigInteger</code> value.
+ */
+public class BigIntegerBinding extends TupleBinding<BigInteger> {
+
+    // javadoc is inherited
+    public BigInteger entryToObject(TupleInput input) {
+
+        return input.readBigInteger();
+    }
+
+    // javadoc is inherited
+    public void objectToEntry(BigInteger object, TupleOutput output) {
+
+        output.writeBigInteger(object);
+    }
+
+    // javadoc is inherited
+    protected TupleOutput getTupleOutput(BigInteger object) {
+
+        return sizedOutput(object);
+    }
+
+    /**
+     * Converts an entry buffer into a <code>BigInteger</code> value.
+     *
+     * @param entry is the source entry buffer.
+     *
+     * @return the resulting value.
+     */
+    public static BigInteger entryToBigInteger(DatabaseEntry entry) {
+
+        return entryToInput(entry).readBigInteger();
+    }
+
+    /**
+     * Converts a <code>BigInteger</code> value into an entry buffer.
+     *
+     * @param val is the source value.
+     *
+     * @param entry is the destination entry buffer.
+     */
+    public static void bigIntegerToEntry(BigInteger val, DatabaseEntry entry) {
+
+        outputToEntry(sizedOutput(val).writeBigInteger(val), entry);
+    }
+
+    /**
+     * Returns a tuple output object of the exact size needed, to avoid
+     * wasting space when a single primitive is output.
+     */
+    private static TupleOutput sizedOutput(BigInteger val) {
+
+        int len = TupleOutput.getBigIntegerByteLength(val);
+        return new TupleOutput(new byte[len]);
+    }
+}
diff --git a/src/com/sleepycat/bind/tuple/BooleanBinding.java b/src/com/sleepycat/bind/tuple/BooleanBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..d6d69b5a4101e11314af3f6ddc45fbc16ef8275b
--- /dev/null
+++ b/src/com/sleepycat/bind/tuple/BooleanBinding.java
@@ -0,0 +1,82 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: BooleanBinding.java,v 1.15 2008/05/27 15:30:33 mark Exp $
+ */
+
+package com.sleepycat.bind.tuple;
+
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * A concrete <code>TupleBinding</code> for a <code>Boolean</code> primitive
+ * wrapper or a <code>boolean</code> primitive.
+ *
+ * <p>There are two ways to use this class:</p>
+ * <ol>
+ * <li>When using the {@link com.sleepycat.je} package directly, the static
+ * methods in this class can be used to convert between primitive values and
+ * {@link DatabaseEntry} objects.</li>
+ * <li>When using the {@link com.sleepycat.collections} package, an instance of
+ * this class can be used with any stored collection.  The easiest way to
+ * obtain a binding instance is with the {@link
+ * TupleBinding#getPrimitiveBinding} method.</li>
+ * </ol>
+ */
+public class BooleanBinding extends TupleBinding<Boolean> {
+
+    private static final int BOOLEAN_SIZE = 1;
+
+    // javadoc is inherited
+    public Boolean entryToObject(TupleInput input) {
+
+	return input.readBoolean();
+    }
+
+    // javadoc is inherited
+    public void objectToEntry(Boolean object, TupleOutput output) {
+
+        output.writeBoolean(object);
+    }
+
+    // javadoc is inherited
+    protected TupleOutput getTupleOutput(Boolean object) {
+
+        return sizedOutput();
+    }
+
+    /**
+     * Converts an entry buffer into a simple <code>boolean</code> value.
+     *
+     * @param entry is the source entry buffer.
+     *
+     * @return the resulting value.
+     */
+    public static boolean entryToBoolean(DatabaseEntry entry) {
+
+        return entryToInput(entry).readBoolean();
+    }
+
+    /**
+     * Converts a simple <code>boolean</code> value into an entry buffer.
+     *
+     * @param val is the source value.
+     *
+     * @param entry is the destination entry buffer.
+     */
+    public static void booleanToEntry(boolean val, DatabaseEntry entry) {
+
+        outputToEntry(sizedOutput().writeBoolean(val), entry);
+    }
+
+    /**
+     * Returns a tuple output object of the exact size needed, to avoid
+     * wasting space when a single primitive is output.
+     */
+    private static TupleOutput sizedOutput() {
+
+        return new TupleOutput(new byte[BOOLEAN_SIZE]);
+    }
+}
diff --git a/src/com/sleepycat/bind/tuple/ByteBinding.java b/src/com/sleepycat/bind/tuple/ByteBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..1d691f79c30376351d389858a3e2336e4c27a7ad
--- /dev/null
+++ b/src/com/sleepycat/bind/tuple/ByteBinding.java
@@ -0,0 +1,82 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: ByteBinding.java,v 1.14 2008/05/27 15:30:33 mark Exp $
+ */
+
+package com.sleepycat.bind.tuple;
+
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * A concrete <code>TupleBinding</code> for a <code>Byte</code> primitive
+ * wrapper or a <code>byte</code> primitive.
+ *
+ * <p>There are two ways to use this class:</p>
+ * <ol>
+ * <li>When using the {@link com.sleepycat.je} package directly, the static
+ * methods in this class can be used to convert between primitive values and
+ * {@link DatabaseEntry} objects.</li>
+ * <li>When using the {@link com.sleepycat.collections} package, an instance of
+ * this class can be used with any stored collection.  The easiest way to
+ * obtain a binding instance is with the {@link
+ * TupleBinding#getPrimitiveBinding} method.</li>
+ * </ol>
+ */
+public class ByteBinding extends TupleBinding<Byte> {
+
+    private static final int BYTE_SIZE = 1;
+
+    // javadoc is inherited
+    public Byte entryToObject(TupleInput input) {
+
+        return input.readByte();
+    }
+
+    // javadoc is inherited
+    public void objectToEntry(Byte object, TupleOutput output) {
+
+        output.writeByte(object);
+    }
+
+    // javadoc is inherited
+    protected TupleOutput getTupleOutput(Byte object) {
+
+        return sizedOutput();
+    }
+
+    /**
+     * Converts an entry buffer into a simple <code>byte</code> value.
+     *
+     * @param entry is the source entry buffer.
+     *
+     * @return the resulting value.
+     */
+    public static byte entryToByte(DatabaseEntry entry) {
+
+        return entryToInput(entry).readByte();
+    }
+
+    /**
+     * Converts a simple <code>byte</code> value into an entry buffer.
+     *
+     * @param val is the source value.
+     *
+     * @param entry is the destination entry buffer.
+     */
+    public static void byteToEntry(byte val, DatabaseEntry entry) {
+
+        outputToEntry(sizedOutput().writeByte(val), entry);
+    }
+
+    /**
+     * Returns a tuple output object of the exact size needed, to avoid
+     * wasting space when a single primitive is output.
+     */
+    private static TupleOutput sizedOutput() {
+
+        return new TupleOutput(new byte[BYTE_SIZE]);
+    }
+}
diff --git a/src/com/sleepycat/bind/tuple/CharacterBinding.java b/src/com/sleepycat/bind/tuple/CharacterBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..d939953f3eff2f0f037ff5a93fb1bc267d0bac86
--- /dev/null
+++ b/src/com/sleepycat/bind/tuple/CharacterBinding.java
@@ -0,0 +1,82 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: CharacterBinding.java,v 1.14 2008/05/27 15:30:33 mark Exp $
+ */
+
+package com.sleepycat.bind.tuple;
+
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * A concrete <code>TupleBinding</code> for a <code>Character</code> primitive
+ * wrapper or a <code>char</code> primitive.
+ *
+ * <p>There are two ways to use this class:</p>
+ * <ol>
+ * <li>When using the {@link com.sleepycat.je} package directly, the static
+ * methods in this class can be used to convert between primitive values and
+ * {@link DatabaseEntry} objects.</li>
+ * <li>When using the {@link com.sleepycat.collections} package, an instance of
+ * this class can be used with any stored collection.  The easiest way to
+ * obtain a binding instance is with the {@link
+ * TupleBinding#getPrimitiveBinding} method.</li>
+ * </ol>
+ */
+public class CharacterBinding extends TupleBinding<Character> {
+
+    private static final int CHAR_SIZE = 2;
+
+    // javadoc is inherited
+    public Character entryToObject(TupleInput input) {
+
+        return input.readChar();
+    }
+
+    // javadoc is inherited
+    public void objectToEntry(Character object, TupleOutput output) {
+
+        output.writeChar(object);
+    }
+
+    // javadoc is inherited
+    protected TupleOutput getTupleOutput(Character object) {
+
+        return sizedOutput();
+    }
+
+    /**
+     * Converts an entry buffer into a simple <code>char</code> value.
+     *
+     * @param entry is the source entry buffer.
+     *
+     * @return the resulting value.
+     */
+    public static char entryToChar(DatabaseEntry entry) {
+
+        return entryToInput(entry).readChar();
+    }
+
+    /**
+     * Converts a simple <code>char</code> value into an entry buffer.
+     *
+     * @param val is the source value.
+     *
+     * @param entry is the destination entry buffer.
+     */
+    public static void charToEntry(char val, DatabaseEntry entry) {
+
+        outputToEntry(sizedOutput().writeChar(val), entry);
+    }
+
+    /**
+     * Returns a tuple output object of the exact size needed, to avoid
+     * wasting space when a single primitive is output.
+     */
+    private static TupleOutput sizedOutput() {
+
+        return new TupleOutput(new byte[CHAR_SIZE]);
+    }
+}
diff --git a/src/com/sleepycat/bind/tuple/DoubleBinding.java b/src/com/sleepycat/bind/tuple/DoubleBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..42c225966d0da9750b29eb5202363d24459c309a
--- /dev/null
+++ b/src/com/sleepycat/bind/tuple/DoubleBinding.java
@@ -0,0 +1,87 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: DoubleBinding.java,v 1.14 2008/05/27 15:30:33 mark Exp $
+ */
+
+package com.sleepycat.bind.tuple;
+
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * A concrete <code>TupleBinding</code> for a <code>Double</code> primitive
+ * wrapper or a <code>double</code> primitive.
+ *
+ * <p><em>Note:</em> This class produces byte array values that by default
+ * (without a custom comparator) do <em>not</em> sort correctly for negative
+ * values.  Only non-negative values are sorted correctly by default.  To sort
+ * all values correctly by default, use {@link SortedDoubleBinding}.</p>
+ *
+ * <p>There are two ways to use this class:</p>
+ * <ol>
+ * <li>When using the {@link com.sleepycat.je} package directly, the static
+ * methods in this class can be used to convert between primitive values and
+ * {@link DatabaseEntry} objects.</li>
+ * <li>When using the {@link com.sleepycat.collections} package, an instance of
+ * this class can be used with any stored collection.  The easiest way to
+ * obtain a binding instance is with the {@link
+ * TupleBinding#getPrimitiveBinding} method.</li>
+ * </ol>
+ */
+public class DoubleBinding extends TupleBinding<Double> {
+
+    private static final int DOUBLE_SIZE = 8;
+
+    // javadoc is inherited
+    public Double entryToObject(TupleInput input) {
+
+        return input.readDouble();
+    }
+
+    // javadoc is inherited
+    public void objectToEntry(Double object, TupleOutput output) {
+
+        output.writeDouble(object);
+    }
+
+    // javadoc is inherited
+    protected TupleOutput getTupleOutput(Double object) {
+
+        return sizedOutput();
+    }
+
+    /**
+     * Converts an entry buffer into a simple <code>double</code> value.
+     *
+     * @param entry is the source entry buffer.
+     *
+     * @return the resulting value.
+     */
+    public static double entryToDouble(DatabaseEntry entry) {
+
+        return entryToInput(entry).readDouble();
+    }
+
+    /**
+     * Converts a simple <code>double</code> value into an entry buffer.
+     *
+     * @param val is the source value.
+     *
+     * @param entry is the destination entry buffer.
+     */
+    public static void doubleToEntry(double val, DatabaseEntry entry) {
+
+        outputToEntry(sizedOutput().writeDouble(val), entry);
+    }
+
+    /**
+     * Returns a tuple output object of the exact size needed, to avoid
+     * wasting space when a single primitive is output.
+     */
+    static TupleOutput sizedOutput() {
+
+        return new TupleOutput(new byte[DOUBLE_SIZE]);
+    }
+}
diff --git a/src/com/sleepycat/bind/tuple/FloatBinding.java b/src/com/sleepycat/bind/tuple/FloatBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..dbf8980ab00eb1740b74e51fe2096b99c758005e
--- /dev/null
+++ b/src/com/sleepycat/bind/tuple/FloatBinding.java
@@ -0,0 +1,87 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: FloatBinding.java,v 1.14 2008/05/27 15:30:33 mark Exp $
+ */
+
+package com.sleepycat.bind.tuple;
+
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * A concrete <code>TupleBinding</code> for a <code>Float</code> primitive
+ * wrapper or a <code>float</code> primitive.
+ *
+ * <p><em>Note:</em> This class produces byte array values that by default
+ * (without a custom comparator) do <em>not</em> sort correctly for negative
+ * values.  Only non-negative values are sorted correctly by default.  To sort
+ * all values correctly by default, use {@link SortedFloatBinding}.</p>
+ *
+ * <p>There are two ways to use this class:</p>
+ * <ol>
+ * <li>When using the {@link com.sleepycat.je} package directly, the static
+ * methods in this class can be used to convert between primitive values and
+ * {@link DatabaseEntry} objects.</li>
+ * <li>When using the {@link com.sleepycat.collections} package, an instance of
+ * this class can be used with any stored collection.  The easiest way to
+ * obtain a binding instance is with the {@link
+ * TupleBinding#getPrimitiveBinding} method.</li>
+ * </ol>
+ */
+public class FloatBinding extends TupleBinding<Float> {
+
+    private static final int FLOAT_SIZE = 4;
+
+    // javadoc is inherited
+    public Float entryToObject(TupleInput input) {
+
+        return input.readFloat();
+    }
+
+    // javadoc is inherited
+    public void objectToEntry(Float object, TupleOutput output) {
+
+        output.writeFloat(object);
+    }
+
+    // javadoc is inherited
+    protected TupleOutput getTupleOutput(Float object) {
+
+        return sizedOutput();
+    }
+
+    /**
+     * Converts an entry buffer into a simple <code>float</code> value.
+     *
+     * @param entry is the source entry buffer.
+     *
+     * @return the resulting value.
+     */
+    public static float entryToFloat(DatabaseEntry entry) {
+
+        return entryToInput(entry).readFloat();
+    }
+
+    /**
+     * Converts a simple <code>float</code> value into an entry buffer.
+     *
+     * @param val is the source value.
+     *
+     * @param entry is the destination entry buffer.
+     */
+    public static void floatToEntry(float val, DatabaseEntry entry) {
+
+        outputToEntry(sizedOutput().writeFloat(val), entry);
+    }
+
+    /**
+     * Returns a tuple output object of the exact size needed, to avoid
+     * wasting space when a single primitive is output.
+     */
+    static TupleOutput sizedOutput() {
+
+        return new TupleOutput(new byte[FLOAT_SIZE]);
+    }
+}
diff --git a/src/com/sleepycat/bind/tuple/IntegerBinding.java b/src/com/sleepycat/bind/tuple/IntegerBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..14a6c4b00b3439e3b0e611cf4d6c7fb77d8ae17b
--- /dev/null
+++ b/src/com/sleepycat/bind/tuple/IntegerBinding.java
@@ -0,0 +1,82 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: IntegerBinding.java,v 1.14 2008/05/27 15:30:33 mark Exp $
+ */
+
+package com.sleepycat.bind.tuple;
+
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * A concrete <code>TupleBinding</code> for a <code>Integer</code> primitive
+ * wrapper or an <code>int</code> primitive.
+ *
+ * <p>There are two ways to use this class:</p>
+ * <ol>
+ * <li>When using the {@link com.sleepycat.je} package directly, the static
+ * methods in this class can be used to convert between primitive values and
+ * {@link DatabaseEntry} objects.</li>
+ * <li>When using the {@link com.sleepycat.collections} package, an instance of
+ * this class can be used with any stored collection.  The easiest way to
+ * obtain a binding instance is with the {@link
+ * TupleBinding#getPrimitiveBinding} method.</li>
+ * </ol>
+ */
+public class IntegerBinding extends TupleBinding<Integer> {
+
+    private static final int INT_SIZE = 4;
+
+    // javadoc is inherited
+    public Integer entryToObject(TupleInput input) {
+
+        return input.readInt();
+    }
+
+    // javadoc is inherited
+    public void objectToEntry(Integer object, TupleOutput output) {
+
+        output.writeInt(object);
+    }
+
+    // javadoc is inherited
+    protected TupleOutput getTupleOutput(Integer object) {
+
+        return sizedOutput();
+    }
+
+    /**
+     * Converts an entry buffer into a simple <code>int</code> value.
+     *
+     * @param entry is the source entry buffer.
+     *
+     * @return the resulting value.
+     */
+    public static int entryToInt(DatabaseEntry entry) {
+
+        return entryToInput(entry).readInt();
+    }
+
+    /**
+     * Converts a simple <code>int</code> value into an entry buffer.
+     *
+     * @param val is the source value.
+     *
+     * @param entry is the destination entry buffer.
+     */
+    public static void intToEntry(int val, DatabaseEntry entry) {
+
+        outputToEntry(sizedOutput().writeInt(val), entry);
+    }
+
+    /**
+     * Returns a tuple output object of the exact size needed, to avoid
+     * wasting space when a single primitive is output.
+     */
+    private static TupleOutput sizedOutput() {
+
+        return new TupleOutput(new byte[INT_SIZE]);
+    }
+}
diff --git a/src/com/sleepycat/bind/tuple/LongBinding.java b/src/com/sleepycat/bind/tuple/LongBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..5da57edd1311abb04578749deac1c37ba0699b09
--- /dev/null
+++ b/src/com/sleepycat/bind/tuple/LongBinding.java
@@ -0,0 +1,82 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: LongBinding.java,v 1.14 2008/05/27 15:30:33 mark Exp $
+ */
+
+package com.sleepycat.bind.tuple;
+
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * A concrete <code>TupleBinding</code> for a <code>Long</code> primitive
+ * wrapper or a <code>long</code> primitive.
+ *
+ * <p>There are two ways to use this class:</p>
+ * <ol>
+ * <li>When using the {@link com.sleepycat.je} package directly, the static
+ * methods in this class can be used to convert between primitive values and
+ * {@link DatabaseEntry} objects.</li>
+ * <li>When using the {@link com.sleepycat.collections} package, an instance of
+ * this class can be used with any stored collection.  The easiest way to
+ * obtain a binding instance is with the {@link
+ * TupleBinding#getPrimitiveBinding} method.</li>
+ * </ol>
+ */
+public class LongBinding extends TupleBinding<Long> {
+
+    private static final int LONG_SIZE = 8;
+
+    // javadoc is inherited
+    public Long entryToObject(TupleInput input) {
+
+        return input.readLong();
+    }
+
+    // javadoc is inherited
+    public void objectToEntry(Long object, TupleOutput output) {
+
+        output.writeLong(object);
+    }
+
+    // javadoc is inherited
+    protected TupleOutput getTupleOutput(Long object) {
+
+        return sizedOutput();
+    }
+
+    /**
+     * Converts an entry buffer into a simple <code>long</code> value.
+     *
+     * @param entry is the source entry buffer.
+     *
+     * @return the resulting value.
+     */
+    public static long entryToLong(DatabaseEntry entry) {
+
+        return entryToInput(entry).readLong();
+    }
+
+    /**
+     * Converts a simple <code>long</code> value into an entry buffer.
+     *
+     * @param val is the source value.
+     *
+     * @param entry is the destination entry buffer.
+     */
+    public static void longToEntry(long val, DatabaseEntry entry) {
+
+        outputToEntry(sizedOutput().writeLong(val), entry);
+    }
+
+    /**
+     * Returns a tuple output object of the exact size needed, to avoid
+     * wasting space when a single primitive is output.
+     */
+    private static TupleOutput sizedOutput() {
+
+        return new TupleOutput(new byte[LONG_SIZE]);
+    }
+}
diff --git a/src/com/sleepycat/bind/tuple/MarshalledTupleEntry.java b/src/com/sleepycat/bind/tuple/MarshalledTupleEntry.java
new file mode 100644
index 0000000000000000000000000000000000000000..a2a083e93c1b3f2cef3bedb51d5e9b5a8df71c33
--- /dev/null
+++ b/src/com/sleepycat/bind/tuple/MarshalledTupleEntry.java
@@ -0,0 +1,44 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: MarshalledTupleEntry.java,v 1.23 2008/01/07 14:28:44 cwl Exp $
+ */
+
+package com.sleepycat.bind.tuple;
+
+/**
+ * A marshalling interface implemented by key, data or entity classes that
+ * are represented as tuples.
+ *
+ * <p>Key classes implement this interface to marshal their key entry.  Data or
+ * entity classes implement this interface to marshal their data entry.
+ * Implementations of this interface must have a public no arguments
+ * constructor so that they can be instantiated by a binding, prior to calling
+ * the {@link #unmarshalEntry} method.</p>
+ *
+ * <p>Note that implementing this interface is not necessary when the object is
+ * a Java simple type, for example: String, Integer, etc. These types can be
+ * used with built-in bindings returned by {@link
+ * TupleBinding#getPrimitiveBinding}.</p>
+ *
+ * @author Mark Hayes
+ * @see TupleTupleMarshalledBinding
+ */
+public interface MarshalledTupleEntry {
+
+    /**
+     * Construct the key or data tuple entry from the key or data object.
+     *
+     * @param dataOutput is the output tuple.
+     */
+    void marshalEntry(TupleOutput dataOutput);
+
+    /**
+     * Construct the key or data object from the key or data tuple entry.
+     *
+     * @param dataInput is the input tuple.
+     */
+    void unmarshalEntry(TupleInput dataInput);
+}
diff --git a/src/com/sleepycat/bind/tuple/MarshalledTupleKeyEntity.java b/src/com/sleepycat/bind/tuple/MarshalledTupleKeyEntity.java
new file mode 100644
index 0000000000000000000000000000000000000000..555b7c98640cead7ed43b6c19beab5ee8142cca3
--- /dev/null
+++ b/src/com/sleepycat/bind/tuple/MarshalledTupleKeyEntity.java
@@ -0,0 +1,75 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: MarshalledTupleKeyEntity.java,v 1.25 2008/02/05 23:28:19 mark Exp $
+ */
+
+package com.sleepycat.bind.tuple;
+
+/**
+ * A marshalling interface implemented by entity classes that represent keys as
+ * tuples. Since <code>MarshalledTupleKeyEntity</code> objects are instantiated
+ * using Java deserialization, no particular constructor is required by classes
+ * that implement this interface.
+ *
+ * <p>Note that a marshalled tuple key extractor is somewhat less efficient
+ * than a non-marshalled key tuple extractor because more conversions are
+ * needed.  A marshalled key extractor must convert the entry to an object in
+ * order to extract the key fields, while an unmarshalled key extractor does
+ * not.</p>
+ *
+ * @author Mark Hayes
+ * @see TupleTupleMarshalledBinding
+ * @see com.sleepycat.bind.serial.TupleSerialMarshalledBinding
+ */
+public interface MarshalledTupleKeyEntity {
+
+    /**
+     * Extracts the entity's primary key and writes it to the key output.
+     *
+     * @param keyOutput is the output tuple.
+     */
+    void marshalPrimaryKey(TupleOutput keyOutput);
+
+    /**
+     * Completes construction of the entity by setting its primary key from the
+     * stored primary key.
+     *
+     * @param keyInput is the input tuple.
+     */
+    void unmarshalPrimaryKey(TupleInput keyInput);
+
+    /**
+     * Extracts the entity's secondary key and writes it to the key output.
+     *
+     * @param keyName identifies the secondary key.
+     *
+     * @param keyOutput is the output tuple.
+     *
+     * @return true if a key was created, or false to indicate that the key is
+     * not present.
+     */
+    boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput);
+
+    /**
+     * Clears the entity's secondary key fields for the given key name.
+     *
+     * <p>The specified index key should be changed by this method such that
+     * {@link #marshalSecondaryKey} for the same key name will return false.
+     * Other fields in the data object should remain unchanged.</p>
+     *
+     * <p>If {@link com.sleepycat.je.ForeignKeyDeleteAction#NULLIFY} was
+     * specified when opening the secondary database, this method is called
+     * when the entity for this foreign key is deleted.  If NULLIFY was not
+     * specified, this method will not be called and may always return
+     * false.</p>
+     *
+     * @param keyName identifies the secondary key.
+     *
+     * @return true if the key was cleared, or false to indicate that the key
+     * is not present and no change is necessary.
+     */
+    boolean nullifyForeignKey(String keyName);
+}
diff --git a/src/com/sleepycat/bind/tuple/ShortBinding.java b/src/com/sleepycat/bind/tuple/ShortBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..d8a98cd57183a192001e5ebfa3391025191f144c
--- /dev/null
+++ b/src/com/sleepycat/bind/tuple/ShortBinding.java
@@ -0,0 +1,82 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: ShortBinding.java,v 1.14 2008/05/27 15:30:33 mark Exp $
+ */
+
+package com.sleepycat.bind.tuple;
+
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * A concrete <code>TupleBinding</code> for a <code>Short</code> primitive
+ * wrapper or a <code>short</code> primitive.
+ *
+ * <p>There are two ways to use this class:</p>
+ * <ol>
+ * <li>When using the {@link com.sleepycat.je} package directly, the static
+ * methods in this class can be used to convert between primitive values and
+ * {@link DatabaseEntry} objects.</li>
+ * <li>When using the {@link com.sleepycat.collections} package, an instance of
+ * this class can be used with any stored collection.  The easiest way to
+ * obtain a binding instance is with the {@link
+ * TupleBinding#getPrimitiveBinding} method.</li>
+ * </ol>
+ */
+public class ShortBinding extends TupleBinding<Short> {
+
+    private static final int SHORT_SIZE = 2;
+
+    // javadoc is inherited
+    public Short entryToObject(TupleInput input) {
+
+        return input.readShort();
+    }
+
+    // javadoc is inherited
+    public void objectToEntry(Short object, TupleOutput output) {
+
+        output.writeShort(object);
+    }
+
+    // javadoc is inherited
+    protected TupleOutput getTupleOutput(Short object) {
+
+        return sizedOutput();
+    }
+
+    /**
+     * Converts an entry buffer into a simple <code>short</code> value.
+     *
+     * @param entry is the source entry buffer.
+     *
+     * @return the resulting value.
+     */
+    public static short entryToShort(DatabaseEntry entry) {
+
+        return entryToInput(entry).readShort();
+    }
+
+    /**
+     * Converts a simple <code>short</code> value into an entry buffer.
+     *
+     * @param val is the source value.
+     *
+     * @param entry is the destination entry buffer.
+     */
+    public static void shortToEntry(short val, DatabaseEntry entry) {
+
+        outputToEntry(sizedOutput().writeShort(val), entry);
+    }
+
+    /**
+     * Returns a tuple output object of the exact size needed, to avoid
+     * wasting space when a single primitive is output.
+     */
+    private static TupleOutput sizedOutput() {
+
+        return new TupleOutput(new byte[SHORT_SIZE]);
+    }
+}
diff --git a/src/com/sleepycat/bind/tuple/SortedDoubleBinding.java b/src/com/sleepycat/bind/tuple/SortedDoubleBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..4cd018057ce06c77acc13536284639917c9a741d
--- /dev/null
+++ b/src/com/sleepycat/bind/tuple/SortedDoubleBinding.java
@@ -0,0 +1,85 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: SortedDoubleBinding.java,v 1.8 2008/05/27 15:30:33 mark Exp $
+ */
+
+package com.sleepycat.bind.tuple;
+
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * A concrete <code>TupleBinding</code> for a <code>Double</code> primitive
+ * wrapper or a <code>double</code> primitive.
+ *
+ * <p>This class produces byte array values that by default (without a custom
+ * comparator) sort correctly, including sorting of negative values.
+ * Therefore, this class should normally be used instead of {@link
+ * DoubleBinding} which does not by default support sorting of negative values.
+ * Please note that:</p>
+ * <ul>
+ * <li>The byte array (stored) formats used by {@link DoubleBinding} and
+ * {@link SortedDoubleBinding} are different and incompatible.  They are not
+ * interchangeable once data has been stored.</li>
+ * <li>An instance of {@link DoubleBinding}, not {@link SortedDoubleBinding},
+ * is returned by {@link TupleBinding#getPrimitiveBinding} method.  Therefore,
+ * to use {@link SortedDoubleBinding}, {@link TupleBinding#getPrimitiveBinding}
+ * should not be called.</li>
+ * </ul>
+ *
+ * <p>There are two ways to use this class:</p>
+ * <ol>
+ * <li>When using the {@link com.sleepycat.je} package directly, the static
+ * methods in this class can be used to convert between primitive values and
+ * {@link DatabaseEntry} objects.</li>
+ * <li>When using the {@link com.sleepycat.collections} package, an instance of
+ * this class can be used with any stored collection.</li>
+ * </ol>
+ */
+public class SortedDoubleBinding extends TupleBinding<Double> {
+
+    /* javadoc is inherited */
+    public Double entryToObject(TupleInput input) {
+
+        return input.readSortedDouble();
+    }
+
+    /* javadoc is inherited */
+    public void objectToEntry(Double object, TupleOutput output) {
+
+        output.writeSortedDouble(object);
+    }
+
+    /* javadoc is inherited */
+    protected TupleOutput getTupleOutput(Double object) {
+
+        return DoubleBinding.sizedOutput();
+    }
+
+    /**
+     * Converts an entry buffer into a simple <code>double</code> value.
+     *
+     * @param entry is the source entry buffer.
+     *
+     * @return the resulting value.
+     */
+    public static double entryToDouble(DatabaseEntry entry) {
+
+        return entryToInput(entry).readSortedDouble();
+    }
+
+    /**
+     * Converts a simple <code>double</code> value into an entry buffer.
+     *
+     * @param val is the source value.
+     *
+     * @param entry is the destination entry buffer.
+     */
+    public static void doubleToEntry(double val, DatabaseEntry entry) {
+
+        outputToEntry(DoubleBinding.sizedOutput().writeSortedDouble(val),
+                      entry);
+    }
+}
diff --git a/src/com/sleepycat/bind/tuple/SortedFloatBinding.java b/src/com/sleepycat/bind/tuple/SortedFloatBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..d5ff412d29568d7206393d32bfd863bcc0286d26
--- /dev/null
+++ b/src/com/sleepycat/bind/tuple/SortedFloatBinding.java
@@ -0,0 +1,84 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: SortedFloatBinding.java,v 1.7 2008/05/27 15:30:33 mark Exp $
+ */
+
+package com.sleepycat.bind.tuple;
+
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * A concrete <code>TupleBinding</code> for a <code>Float</code> primitive
+ * wrapper or a <code>float</code> primitive.
+ *
+ * <p>This class produces byte array values that by default (without a custom
+ * comparator) sort correctly, including sorting of negative values.
+ * Therefore, this class should normally be used instead of {@link
+ * FloatBinding} which does not by default support sorting of negative values.
+ * Please note that:</p>
+ * <ul>
+ * <li>The byte array (stored) formats used by {@link FloatBinding} and
+ * {@link SortedFloatBinding} are different and incompatible.  They are not
+ * interchangeable once data has been stored.</li>
+ * <li>An instance of {@link FloatBinding}, not {@link SortedFloatBinding}, is
+ * returned by {@link TupleBinding#getPrimitiveBinding} method.  Therefore, to
+ * use {@link SortedFloatBinding}, {@link TupleBinding#getPrimitiveBinding}
+ * should not be called.</li>
+ * </ul>
+ *
+ * <p>There are two ways to use this class:</p>
+ * <ol>
+ * <li>When using the {@link com.sleepycat.je} package directly, the static
+ * methods in this class can be used to convert between primitive values and
+ * {@link DatabaseEntry} objects.</li>
+ * <li>When using the {@link com.sleepycat.collections} package, an instance of
+ * this class can be used with any stored collection.</li>
+ * </ol>
+ */
+public class SortedFloatBinding extends TupleBinding<Float> {
+
+    /* javadoc is inherited */
+    public Float entryToObject(TupleInput input) {
+
+        return input.readSortedFloat();
+    }
+
+    /* javadoc is inherited */
+    public void objectToEntry(Float object, TupleOutput output) {
+
+        output.writeSortedFloat(object);
+    }
+
+    /* javadoc is inherited */
+    protected TupleOutput getTupleOutput(Float object) {
+
+        return FloatBinding.sizedOutput();
+    }
+
+    /**
+     * Converts an entry buffer into a simple <code>float</code> value.
+     *
+     * @param entry is the source entry buffer.
+     *
+     * @return the resulting value.
+     */
+    public static float entryToFloat(DatabaseEntry entry) {
+
+        return entryToInput(entry).readSortedFloat();
+    }
+
+    /**
+     * Converts a simple <code>float</code> value into an entry buffer.
+     *
+     * @param val is the source value.
+     *
+     * @param entry is the destination entry buffer.
+     */
+    public static void floatToEntry(float val, DatabaseEntry entry) {
+
+        outputToEntry(FloatBinding.sizedOutput().writeSortedFloat(val), entry);
+    }
+}
diff --git a/src/com/sleepycat/bind/tuple/StringBinding.java b/src/com/sleepycat/bind/tuple/StringBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..94e4c01daba32fc471573c467d716ce952b513c7
--- /dev/null
+++ b/src/com/sleepycat/bind/tuple/StringBinding.java
@@ -0,0 +1,83 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: StringBinding.java,v 1.13 2008/05/27 15:30:33 mark Exp $
+ */
+
+package com.sleepycat.bind.tuple;
+
+import com.sleepycat.util.UtfOps;
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * A concrete <code>TupleBinding</code> for a simple <code>String</code> value.
+ *
+ * <p>There are two ways to use this class:</p>
+ * <ol>
+ * <li>When using the {@link com.sleepycat.je} package directly, the static
+ * methods in this class can be used to convert between primitive values and
+ * {@link DatabaseEntry} objects.</li>
+ * <li>When using the {@link com.sleepycat.collections} package, an instance of
+ * this class can be used with any stored collection.  The easiest way to
+ * obtain a binding instance is with the {@link
+ * TupleBinding#getPrimitiveBinding} method.</li>
+ * </ol>
+ */
+public class StringBinding extends TupleBinding<String> {
+
+    // javadoc is inherited
+    public String entryToObject(TupleInput input) {
+
+        return input.readString();
+    }
+
+    // javadoc is inherited
+    public void objectToEntry(String object, TupleOutput output) {
+
+        output.writeString(object);
+    }
+
+    // javadoc is inherited
+    protected TupleOutput getTupleOutput(String object) {
+
+        return sizedOutput(object);
+    }
+
+    /**
+     * Converts an entry buffer into a simple <code>String</code> value.
+     *
+     * @param entry is the source entry buffer.
+     *
+     * @return the resulting value.
+     */
+    public static String entryToString(DatabaseEntry entry) {
+
+        return entryToInput(entry).readString();
+    }
+
+    /**
+     * Converts a simple <code>String</code> value into an entry buffer.
+     *
+     * @param val is the source value.
+     *
+     * @param entry is the destination entry buffer.
+     */
+    public static void stringToEntry(String val, DatabaseEntry entry) {
+
+        outputToEntry(sizedOutput(val).writeString(val), entry);
+    }
+
+    /**
+     * Returns a tuple output object of the exact size needed, to avoid
+     * wasting space when a single primitive is output.
+     */
+    private static TupleOutput sizedOutput(String val) {
+
+	int stringLength =
+	    (val == null) ? 1 : UtfOps.getByteLength(val.toCharArray());
+	stringLength++;           // null terminator
+        return new TupleOutput(new byte[stringLength]);
+    }
+}
diff --git a/src/com/sleepycat/bind/tuple/TupleBase.java b/src/com/sleepycat/bind/tuple/TupleBase.java
new file mode 100644
index 0000000000000000000000000000000000000000..e1d7a3681dd5fff832a29253baddc275bfca054a
--- /dev/null
+++ b/src/com/sleepycat/bind/tuple/TupleBase.java
@@ -0,0 +1,163 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: TupleBase.java,v 1.8 2008/05/27 15:30:33 mark Exp $
+ */
+
+package com.sleepycat.bind.tuple;
+
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * A base class for tuple bindings and tuple key creators that provides control
+ * over the allocation of the output buffer.
+ *
+ * <p>Tuple bindings and key creators append data to a {@link TupleOutput}
+ * instance, which is also a {@link com.sleepycat.util.FastOutputStream}
+ * instance.  This object has a byte array buffer that is resized when it is
+ * full.  The reallocation of this buffer can be a performance factor for
+ * some applications using large objects.  To manage this issue, the {@link
+ * #setTupleBufferSize} method may be used to control the initial size of the
+ * buffer, and the {@link #getTupleOutput} method may be overridden by
+ * subclasses to take over creation of the TupleOutput object.</p>
+ */
+public class TupleBase<E> {
+
+    private int outputBufferSize;
+
+    /**
+     * Initializes the initial output buffer size to zero.
+     *
+     * <p>Unless {@link #setTupleBufferSize} is called, the default {@link
+     * com.sleepycat.util.FastOutputStream#DEFAULT_INIT_SIZE} size will be
+     * used.</p>
+     */
+    public TupleBase() {
+        outputBufferSize = 0;
+    }
+
+    /**
+     * Sets the initial byte size of the output buffer that is allocated by the
+     * default implementation of {@link #getTupleOutput}.
+     *
+     * <p>If this property is zero (the default), the default {@link
+     * com.sleepycat.util.FastOutputStream#DEFAULT_INIT_SIZE} size is used.</p>
+     *
+     * @param byteSize the initial byte size of the output buffer, or zero to
+     * use the default size.
+     */
+    public void setTupleBufferSize(int byteSize) {
+        outputBufferSize = byteSize;
+    }
+
+    /**
+     * Returns the initial byte size of the output buffer.
+     *
+     * @return the initial byte size of the output buffer.
+     *
+     * @see #setTupleBufferSize
+     */
+    public int getTupleBufferSize() {
+        return outputBufferSize;
+    }
+
+    /**
+     * Returns an empty TupleOutput instance that will be used by the tuple
+     * binding or key creator.
+     *
+     * <p>The default implementation of this method creates a new TupleOutput
+     * with an initial buffer size that can be changed using the {@link
+     * #setTupleBufferSize} method.</p>
+     *
+     * <p>This method may be overridden to return a TupleOutput instance.  For
+     * example, an instance per thread could be created and returned by this
+     * method.  If a TupleOutput instance is reused, be sure to call its
+     * {@link com.sleepycat.util.FastOutputStream#reset} method before each
+     * use.</p>
+     *
+     * @param object is the object to be written to the tuple output, and may
+     * be used by subclasses to determine the size of the output buffer.
+     *
+     * @return an empty TupleOutput instance.
+     *
+     * @see #setTupleBufferSize
+     */
+    protected TupleOutput getTupleOutput(E object) {
+        int byteSize = getTupleBufferSize();
+        if (byteSize != 0) {
+            return new TupleOutput(new byte[byteSize]);
+        } else {
+            return new TupleOutput();
+        }
+    }
+
+    /**
+     * Utility method to set the data in a entry buffer to the data in a tuple
+     * output object.
+     *
+     * @param output is the source tuple output object.
+     *
+     * @param entry is the destination entry buffer.
+     */
+    public static void outputToEntry(TupleOutput output, DatabaseEntry entry) {
+
+        entry.setData(output.getBufferBytes(), output.getBufferOffset(),
+                      output.getBufferLength());
+    }
+
+    /**
+     * Utility method to set the data in a entry buffer to the data in a tuple
+     * input object.
+     *
+     * @param input is the source tuple input object.
+     *
+     * @param entry is the destination entry buffer.
+     */
+    public static void inputToEntry(TupleInput input, DatabaseEntry entry) {
+
+        entry.setData(input.getBufferBytes(), input.getBufferOffset(),
+                      input.getBufferLength());
+    }
+
+    /**
+     * Utility method to create a new tuple input object for reading the data
+     * from a given buffer.  If an existing input is reused, it is reset before
+     * returning it.
+     *
+     * @param entry is the source entry buffer.
+     *
+     * @return the new tuple input object.
+     */
+    public static TupleInput entryToInput(DatabaseEntry entry) {
+
+        return new TupleInput(entry.getData(), entry.getOffset(),
+                              entry.getSize());
+    }
+
+    /**
+     * Utility method for use by bindings to create a tuple output object.
+     *
+     * @return a new tuple output object.
+     *
+     * @deprecated replaced by {@link #getTupleOutput}
+     */
+    public static TupleOutput newOutput() {
+
+        return new TupleOutput();
+    }
+
+    /**
+     * Utility method for use by bindings to create a tuple output object
+     * with a specific starting size.
+     *
+     * @return a new tuple output object.
+     *
+     * @deprecated replaced by {@link #getTupleOutput}
+     */
+    public static TupleOutput newOutput(byte[] buffer) {
+
+        return new TupleOutput(buffer);
+    }
+}
diff --git a/src/com/sleepycat/bind/tuple/TupleBinding.java b/src/com/sleepycat/bind/tuple/TupleBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..0f81a8ce2d76787c1a31a263cbae772411e6f262
--- /dev/null
+++ b/src/com/sleepycat/bind/tuple/TupleBinding.java
@@ -0,0 +1,135 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: TupleBinding.java,v 1.34 2008/05/27 15:30:33 mark Exp $
+ */
+
+package com.sleepycat.bind.tuple;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * An abstract <code>EntryBinding</code> that treats a key or data entry as a
+ * tuple; it includes predefined bindings for Java primitive types.
+ *
+ * <p>This class takes care of converting the entries to/from {@link
+ * TupleInput} and {@link TupleOutput} objects.  Its two abstract methods must
+ * be implemented by a concrete subclass to convert between tuples and key or
+ * data objects.</p>
+ * <ul>
+ * <li> {@link #entryToObject(TupleInput)} </li>
+ * <li> {@link #objectToEntry(Object,TupleOutput)} </li>
+ * </ul>
+ *
+ * <p>For key or data entries which are Java primitive classes (String,
+ * Integer, etc) {@link #getPrimitiveBinding} may be used to return a built in
+ * tuple binding.  A custom tuple binding for these types is not needed.
+ * <em>Note:</em> {@link #getPrimitiveBinding} returns bindings that do not
+ * sort negative floating point numbers correctly by default.  See {@link
+ * SortedFloatBinding} and {@link SortedDoubleBinding} for details.</p>
+ *
+ * <p>When a tuple binding is used as a key binding, it produces key values
+ * with a reasonable default sort order.  For more information on the default
+ * sort order, see {@link com.sleepycat.bind.tuple.TupleOutput}.</p>
+ *
+ * @author Mark Hayes
+ */
+public abstract class TupleBinding<E>
+    extends TupleBase<E>
+    implements EntryBinding<E> {
+
+    private static final Map<Class,TupleBinding> primitives =
+        new HashMap<Class,TupleBinding>();
+    static {
+        addPrimitive(String.class, String.class, new StringBinding());
+        addPrimitive(Character.class, Character.TYPE, new CharacterBinding());
+        addPrimitive(Boolean.class, Boolean.TYPE, new BooleanBinding());
+        addPrimitive(Byte.class, Byte.TYPE, new ByteBinding());
+        addPrimitive(Short.class, Short.TYPE, new ShortBinding());
+        addPrimitive(Integer.class, Integer.TYPE, new IntegerBinding());
+        addPrimitive(Long.class, Long.TYPE, new LongBinding());
+        addPrimitive(Float.class, Float.TYPE, new FloatBinding());
+        addPrimitive(Double.class, Double.TYPE, new DoubleBinding());
+    }
+
+    private static void addPrimitive(Class cls1, Class cls2,
+                                     TupleBinding binding) {
+        primitives.put(cls1, binding);
+        primitives.put(cls2, binding);
+    }
+
+    /**
+     * Creates a tuple binding.
+     */
+    public TupleBinding() {
+    }
+
+    // javadoc is inherited
+    public E entryToObject(DatabaseEntry entry) {
+
+        return entryToObject(entryToInput(entry));
+    }
+
+    // javadoc is inherited
+    public void objectToEntry(E object, DatabaseEntry entry) {
+
+        TupleOutput output = getTupleOutput(object);
+        objectToEntry(object, output);
+        outputToEntry(output, entry);
+    }
+
+    /**
+     * Constructs a key or data object from a {@link TupleInput} entry.
+     *
+     * @param input is the tuple key or data entry.
+     *
+     * @return the key or data object constructed from the entry.
+     */
+    public abstract E entryToObject(TupleInput input);
+
+    /**
+     * Converts a key or data object to a tuple entry.
+     *
+     * @param object is the key or data object.
+     *
+     * @param output is the tuple entry to which the key or data should be
+     * written.
+     */
+    public abstract void objectToEntry(E object, TupleOutput output);
+
+    /**
+     * Creates a tuple binding for a primitive Java class.  The following
+     * Java classes are supported.
+     * <ul>
+     * <li><code>String</code></li>
+     * <li><code>Character</code></li>
+     * <li><code>Boolean</code></li>
+     * <li><code>Byte</code></li>
+     * <li><code>Short</code></li>
+     * <li><code>Integer</code></li>
+     * <li><code>Long</code></li>
+     * <li><code>Float</code></li>
+     * <li><code>Double</code></li>
+     * </ul>
+     *
+     * <p><em>Note:</em> {@link #getPrimitiveBinding} returns bindings that do
+     * not sort negative floating point numbers correctly by default.  See
+     * {@link SortedFloatBinding} and {@link SortedDoubleBinding} for
+     * details.</p>
+     *
+     * @param cls is the primitive Java class.
+     *
+     * @return a new binding for the primitive class or null if the cls
+     * parameter is not one of the supported classes.
+     */
+    public static <T> TupleBinding<T> getPrimitiveBinding(Class<T> cls) {
+
+        return primitives.get(cls);
+    }
+}
diff --git a/src/com/sleepycat/bind/tuple/TupleInput.java b/src/com/sleepycat/bind/tuple/TupleInput.java
new file mode 100644
index 0000000000000000000000000000000000000000..651ca6624caf7fa745b2a52055f6baf560808232
--- /dev/null
+++ b/src/com/sleepycat/bind/tuple/TupleInput.java
@@ -0,0 +1,676 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: TupleInput.java,v 1.32 2008/01/07 14:28:44 cwl Exp $
+ */
+
+package com.sleepycat.bind.tuple;
+
+import java.math.BigInteger;
+
+import com.sleepycat.util.FastInputStream;
+import com.sleepycat.util.PackedInteger;
+import com.sleepycat.util.UtfOps;
+
+/**
+ * An <code>InputStream</code> with <code>DataInput</code>-like methods for
+ * reading tuple fields.  It is used by <code>TupleBinding</code>.
+ *
+ * <p>This class has many methods that have the same signatures as methods in
+ * the {@link java.io.DataInput} interface.  The reason this class does not
+ * implement {@link java.io.DataInput} is because it would break the interface
+ * contract for those methods because of data format differences.</p>
+ *
+ * <p>Signed numbers are stored in the buffer in MSB (most significant byte
+ * first) order with their sign bit (high-order bit) inverted to cause negative
+ * numbers to be sorted first when comparing values as unsigned byte arrays,
+ * as done in a database.  Unsigned numbers, including characters, are stored
+ * in MSB order with no change to their sign bit.  BigInteger values are stored
+ * with a preceding length having the same sign as the value.</p>
+ *
+ * <p>Strings and character arrays are stored either as a fixed length array of
+ * unicode characters, where the length must be known by the application, or as
+ * a null-terminated UTF byte array.</p>
+ * <ul>
+ * <li>Null strings are UTF encoded as { 0xFF }, which is not allowed in a
+ * standard UTF encoding.  This allows null strings, as distinct from empty or
+ * zero length strings, to be represented in a tuple.  Using the default
+ * comparator, null strings will be ordered last.</li>
+ * <li>Zero (0x0000) character values are UTF encoded as non-zero values, and
+ * therefore embedded zeros in the string are supported.  The sequence { 0xC0,
+ * 0x80 } is used to encode a zero character.  This UTF encoding is the same
+ * one used by native Java UTF libraries.  However, this encoding of zero does
+ * impact the lexicographical ordering, and zeros will not be sorted first (the
+ * natural order) or last.  For all character values other than zero, the
+ * default UTF byte ordering is the same as the Unicode lexicographical
+ * character ordering.</li>
+ * </ul>
+ *
+ * <p>Floats and doubles are stored using two different representations: sorted
+ * representation and integer-bit (IEEE 754) representation.  If you use
+ * negative floating point numbers in a key, you should use sorted
+ * representation; alternatively you may use integer-bit representation but you
+ * will need to implement and configure a custom comparator to get correct
+ * numeric ordering for negative numbers.</p>
+ *
+ * <p>To use sorted representation use this set of methods:</p>
+ * <ul>
+ * <li>{@link TupleOutput#writeSortedFloat}</li>
+ * <li>{@link TupleInput#readSortedFloat}</li>
+ * <li>{@link TupleOutput#writeSortedDouble}</li>
+ * <li>{@link TupleInput#readSortedDouble}</li>
+ * </ul>
+ *
+ * <p>To use integer-bit representation use this set of methods:</p>
+ * <ul>
+ * <li>{@link TupleOutput#writeFloat}</li>
+ * <li>{@link TupleInput#readFloat}</li>
+ * <li>{@link TupleOutput#writeDouble}</li>
+ * <li>{@link TupleInput#readDouble}</li>
+ * </ul>
+ *
+ * @author Mark Hayes
+ */
+public class TupleInput extends FastInputStream {
+
+    /**
+     * Creates a tuple input object for reading a byte array of tuple data.  A
+     * reference to the byte array will be kept by this object (it will not be
+     * copied) and therefore the byte array should not be modified while this
+     * object is in use.
+     *
+     * @param buffer is the byte array to be read and should contain data in
+     * tuple format.
+     */
+    public TupleInput(byte[] buffer) {
+
+        super(buffer);
+    }
+
+    /**
+     * Creates a tuple input object for reading a byte array of tuple data at
+     * a given offset for a given length.  A reference to the byte array will
+     * be kept by this object (it will not be copied) and therefore the byte
+     * array should not be modified while this object is in use.
+     *
+     * @param buffer is the byte array to be read and should contain data in
+     * tuple format.
+     *
+     * @param offset is the byte offset at which to begin reading.
+     *
+     * @param length is the number of bytes to be read.
+     */
+    public TupleInput(byte[] buffer, int offset, int length) {
+
+        super(buffer, offset, length);
+    }
+
+    /**
+     * Creates a tuple input object from the data contained in a tuple output
+     * object.  A reference to the tuple output's byte array will be kept by
+     * this object (it will not be copied) and therefore the tuple output
+     * object should not be modified while this object is in use.
+     *
+     * @param output is the tuple output object containing the data to be read.
+     */
+    public TupleInput(TupleOutput output) {
+
+        super(output.getBufferBytes(), output.getBufferOffset(),
+              output.getBufferLength());
+    }
+
+    // --- begin DataInput compatible methods ---
+
+    /**
+     * Reads a null-terminated UTF string from the data buffer and converts
+     * the data from UTF to Unicode.
+     * Reads values that were written using {@link
+     * TupleOutput#writeString(String)}.
+     *
+     * @return the converted string.
+     *
+     * @throws IndexOutOfBoundsException if no null terminating byte is found
+     * in the buffer.
+     *
+     * @throws IllegalArgumentException malformed UTF data is encountered.
+     */
+    public final String readString()
+        throws IndexOutOfBoundsException, IllegalArgumentException  {
+
+        byte[] myBuf = buf;
+        int myOff = off;
+        if (available() >= 2 &&
+            myBuf[myOff] == TupleOutput.NULL_STRING_UTF_VALUE &&
+            myBuf[myOff + 1] == 0) {
+            skip(2);
+            return null;
+        } else {
+            int byteLen = UtfOps.getZeroTerminatedByteLength(myBuf, myOff);
+            skip(byteLen + 1);
+            return UtfOps.bytesToString(myBuf, myOff, byteLen);
+        }
+    }
+
+    /**
+     * Reads a char (two byte) unsigned value from the buffer.
+     * Reads values that were written using {@link TupleOutput#writeChar}.
+     *
+     * @return the value read from the buffer.
+     *
+     * @throws IndexOutOfBoundsException if not enough bytes are available in
+     * the buffer.
+     */
+    public final char readChar()
+        throws IndexOutOfBoundsException {
+
+        return (char) readUnsignedShort();
+    }
+
+    /**
+     * Reads a boolean (one byte) unsigned value from the buffer and returns
+     * true if it is non-zero and false if it is zero.
+     * Reads values that were written using {@link TupleOutput#writeBoolean}.
+     *
+     * @return the value read from the buffer.
+     *
+     * @throws IndexOutOfBoundsException if not enough bytes are available in
+     * the buffer.
+     */
+    public final boolean readBoolean()
+        throws IndexOutOfBoundsException {
+
+        int c = readFast();
+        if (c < 0) {
+            throw new IndexOutOfBoundsException();
+        }
+        return (c != 0);
+    }
+
+    /**
+     * Reads a signed byte (one byte) value from the buffer.
+     * Reads values that were written using {@link TupleOutput#writeByte}.
+     *
+     * @return the value read from the buffer.
+     *
+     * @throws IndexOutOfBoundsException if not enough bytes are available in
+     * the buffer.
+     */
+    public final byte readByte()
+        throws IndexOutOfBoundsException {
+
+        return (byte) (readUnsignedByte() ^ 0x80);
+    }
+
+    /**
+     * Reads a signed short (two byte) value from the buffer.
+     * Reads values that were written using {@link TupleOutput#writeShort}.
+     *
+     * @return the value read from the buffer.
+     *
+     * @throws IndexOutOfBoundsException if not enough bytes are available in
+     * the buffer.
+     */
+    public final short readShort()
+        throws IndexOutOfBoundsException {
+
+        return (short) (readUnsignedShort() ^ 0x8000);
+    }
+
+    /**
+     * Reads a signed int (four byte) value from the buffer.
+     * Reads values that were written using {@link TupleOutput#writeInt}.
+     *
+     * @return the value read from the buffer.
+     *
+     * @throws IndexOutOfBoundsException if not enough bytes are available in
+     * the buffer.
+     */
+    public final int readInt()
+        throws IndexOutOfBoundsException {
+
+        return (int) (readUnsignedInt() ^ 0x80000000);
+    }
+
+    /**
+     * Reads a signed long (eight byte) value from the buffer.
+     * Reads values that were written using {@link TupleOutput#writeLong}.
+     *
+     * @return the value read from the buffer.
+     *
+     * @throws IndexOutOfBoundsException if not enough bytes are available in
+     * the buffer.
+     */
+    public final long readLong()
+        throws IndexOutOfBoundsException {
+
+        return readUnsignedLong() ^ 0x8000000000000000L;
+    }
+
+    /**
+     * Reads a signed float (four byte) value from the buffer.
+     * Reads values that were written using {@link TupleOutput#writeFloat}.
+     * <code>Float.intBitsToFloat</code> is used to convert the signed int
+     * value.
+     *
+     * <p><em>Note:</em> This method operations on byte array values that by
+     * default (without a custom comparator) do <em>not</em> sort correctly for
+     * negative values.  Only non-negative values are sorted correctly by
+     * default.  To sort all values correctly by default, use {@link
+     * #readSortedFloat}.</p>
+     *
+     * @return the value read from the buffer.
+     *
+     * @throws IndexOutOfBoundsException if not enough bytes are available in
+     * the buffer.
+     */
+    public final float readFloat()
+        throws IndexOutOfBoundsException {
+
+        return Float.intBitsToFloat((int) readUnsignedInt());
+    }
+
+    /**
+     * Reads a signed double (eight byte) value from the buffer.
+     * Reads values that were written using {@link TupleOutput#writeDouble}.
+     * <code>Double.longBitsToDouble</code> is used to convert the signed long
+     * value.
+     *
+     * <p><em>Note:</em> This method operations on byte array values that by
+     * default (without a custom comparator) do <em>not</em> sort correctly for
+     * negative values.  Only non-negative values are sorted correctly by
+     * default.  To sort all values correctly by default, use {@link
+     * #readSortedDouble}.</p>
+     *
+     * @return the value read from the buffer.
+     *
+     * @throws IndexOutOfBoundsException if not enough bytes are available in
+     * the buffer.
+     */
+    public final double readDouble()
+        throws IndexOutOfBoundsException {
+
+        return Double.longBitsToDouble(readUnsignedLong());
+    }
+
+    /**
+     * Reads a signed float (four byte) value from the buffer, with support
+     * for correct default sorting of all values.
+     * Reads values that were written using {@link
+     * TupleOutput#writeSortedFloat}.
+     *
+     * <p><code>Float.intBitsToFloat</code> and the following bit
+     * manipulations are used to convert the stored representation to a signed
+     * float value.</p>
+     * <pre>
+     *  int val = ... // get stored bits
+     *  val ^= (val &lt; 0) ? 0x80000000 : 0xffffffff;
+     *  return Float.intBitsToFloat(val);
+     * </pre>
+     *
+     * @return the value read from the buffer.
+     *
+     * @throws IndexOutOfBoundsException if not enough bytes are available in
+     * the buffer.
+     */
+    public final float readSortedFloat()
+        throws IndexOutOfBoundsException {
+
+        int val = (int) readUnsignedInt();
+        val ^= (val < 0) ? 0x80000000 : 0xffffffff;
+        return Float.intBitsToFloat(val);
+    }
+
+    /**
+     * Reads a signed double (eight byte) value from the buffer, with support
+     * for correct default sorting of all values.
+     * Reads values that were written using {@link
+     * TupleOutput#writeSortedDouble}.
+     *
+     * <p><code>Float.longBitsToDouble</code> and the following bit
+     * manipulations are used to convert the stored representation to a signed
+     * double value.</p>
+     * <pre>
+     *  int val = ... // get stored bits
+        val ^= (val &lt; 0) ? 0x8000000000000000L : 0xffffffffffffffffL;
+        return Double.longBitsToDouble(val);
+     * </pre>
+     *
+     * @return the value read from the buffer.
+     *
+     * @throws IndexOutOfBoundsException if not enough bytes are available in
+     * the buffer.
+     */
+    public final double readSortedDouble()
+        throws IndexOutOfBoundsException {
+
+        long val = readUnsignedLong();
+        val ^= (val < 0) ? 0x8000000000000000L : 0xffffffffffffffffL;
+        return Double.longBitsToDouble(val);
+    }
+
+    /**
+     * Reads an unsigned byte (one byte) value from the buffer.
+     * Reads values that were written using {@link
+     * TupleOutput#writeUnsignedByte}.
+     *
+     * @return the value read from the buffer.
+     *
+     * @throws IndexOutOfBoundsException if not enough bytes are available in
+     * the buffer.
+     */
+    public final int readUnsignedByte()
+        throws IndexOutOfBoundsException {
+
+        int c = readFast();
+        if (c < 0) {
+            throw new IndexOutOfBoundsException();
+        }
+        return c;
+    }
+
+    /**
+     * Reads an unsigned short (two byte) value from the buffer.
+     * Reads values that were written using {@link
+     * TupleOutput#writeUnsignedShort}.
+     *
+     * @return the value read from the buffer.
+     *
+     * @throws IndexOutOfBoundsException if not enough bytes are available in
+     * the buffer.
+     */
+    public final int readUnsignedShort()
+        throws IndexOutOfBoundsException {
+
+        int c1 = readFast();
+        int c2 = readFast();
+        if ((c1 | c2) < 0) {
+             throw new IndexOutOfBoundsException();
+        }
+        return ((c1 << 8) | c2);
+    }
+
+    // --- end DataInput compatible methods ---
+
+    /**
+     * Reads an unsigned int (four byte) value from the buffer.
+     * Reads values that were written using {@link
+     * TupleOutput#writeUnsignedInt}.
+     *
+     * @return the value read from the buffer.
+     *
+     * @throws IndexOutOfBoundsException if not enough bytes are available in
+     * the buffer.
+     */
+    public final long readUnsignedInt()
+        throws IndexOutOfBoundsException {
+
+        long c1 = readFast();
+        long c2 = readFast();
+        long c3 = readFast();
+        long c4 = readFast();
+        if ((c1 | c2 | c3 | c4) < 0) {
+	    throw new IndexOutOfBoundsException();
+        }
+        return ((c1 << 24) | (c2 << 16) | (c3 << 8) | c4);
+    }
+
+    /**
+     * This method is private since an unsigned long cannot be treated as
+     * such in Java, nor converted to a BigInteger of the same value.
+     */
+    private final long readUnsignedLong()
+        throws IndexOutOfBoundsException {
+
+        long c1 = readFast();
+        long c2 = readFast();
+        long c3 = readFast();
+        long c4 = readFast();
+        long c5 = readFast();
+        long c6 = readFast();
+        long c7 = readFast();
+        long c8 = readFast();
+        if ((c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8) < 0) {
+             throw new IndexOutOfBoundsException();
+        }
+        return ((c1 << 56) | (c2 << 48) | (c3 << 40) | (c4 << 32) |
+                (c5 << 24) | (c6 << 16) | (c7 << 8)  | c8);
+    }
+
+    /**
+     * Reads the specified number of bytes from the buffer, converting each
+     * unsigned byte value to a character of the resulting string.
+     * Reads values that were written using {@link TupleOutput#writeBytes}.
+     * Only characters with values below 0x100 may be read using this method.
+     *
+     * @param length is the number of bytes to be read.
+     *
+     * @return the value read from the buffer.
+     *
+     * @throws IndexOutOfBoundsException if not enough bytes are available in
+     * the buffer.
+     */
+    public final String readBytes(int length)
+        throws IndexOutOfBoundsException {
+
+        StringBuffer buf = new StringBuffer(length);
+        for (int i = 0; i < length; i++) {
+            int c = readFast();
+            if (c < 0) {
+                throw new IndexOutOfBoundsException();
+            }
+            buf.append((char) c);
+        }
+        return buf.toString();
+    }
+
+    /**
+     * Reads the specified number of characters from the buffer, converting
+     * each two byte unsigned value to a character of the resulting string.
+     * Reads values that were written using {@link TupleOutput#writeChars}.
+     *
+     * @param length is the number of characters to be read.
+     *
+     * @return the value read from the buffer.
+     *
+     * @throws IndexOutOfBoundsException if not enough bytes are available in
+     * the buffer.
+     */
+    public final String readChars(int length)
+        throws IndexOutOfBoundsException {
+
+        StringBuffer buf = new StringBuffer(length);
+        for (int i = 0; i < length; i++) {
+            buf.append(readChar());
+        }
+        return buf.toString();
+    }
+
+    /**
+     * Reads the specified number of bytes from the buffer, converting each
+     * unsigned byte value to a character of the resulting array.
+     * Reads values that were written using {@link TupleOutput#writeBytes}.
+     * Only characters with values below 0x100 may be read using this method.
+     *
+     * @param chars is the array to receive the data and whose length is used
+     * to determine the number of bytes to be read.
+     *
+     * @throws IndexOutOfBoundsException if not enough bytes are available in
+     * the buffer.
+     */
+    public final void readBytes(char[] chars)
+        throws IndexOutOfBoundsException {
+
+        for (int i = 0; i < chars.length; i++) {
+            int c = readFast();
+            if (c < 0) {
+                throw new IndexOutOfBoundsException();
+            }
+            chars[i] = (char) c;
+        }
+    }
+
+    /**
+     * Reads the specified number of characters from the buffer, converting
+     * each two byte unsigned value to a character of the resulting array.
+     * Reads values that were written using {@link TupleOutput#writeChars}.
+     *
+     * @param chars is the array to receive the data and whose length is used
+     * to determine the number of characters to be read.
+     *
+     * @throws IndexOutOfBoundsException if not enough bytes are available in
+     * the buffer.
+     */
+    public final void readChars(char[] chars)
+        throws IndexOutOfBoundsException {
+
+        for (int i = 0; i < chars.length; i++) {
+            chars[i] = readChar();
+        }
+    }
+
+    /**
+     * Reads the specified number of UTF characters string from the data
+     * buffer and converts the data from UTF to Unicode.
+     * Reads values that were written using {@link
+     * TupleOutput#writeString(char[])}.
+     *
+     * @param length is the number of characters to be read.
+     *
+     * @return the converted string.
+     *
+     * @throws IndexOutOfBoundsException if no null terminating byte is found
+     * in the buffer.
+     *
+     * @throws IllegalArgumentException malformed UTF data is encountered.
+     */
+    public final String readString(int length)
+        throws IndexOutOfBoundsException, IllegalArgumentException  {
+
+        char[] chars = new char[length];
+        readString(chars);
+        return new String(chars);
+    }
+
+    /**
+     * Reads the specified number of UTF characters string from the data
+     * buffer and converts the data from UTF to Unicode.
+     * Reads values that were written using {@link
+     * TupleOutput#writeString(char[])}.
+     *
+     * @param chars is the array to receive the data and whose length is used
+     * to determine the number of characters to be read.
+     *
+     * @throws IndexOutOfBoundsException if no null terminating byte is found
+     * in the buffer.
+     *
+     * @throws IllegalArgumentException malformed UTF data is encountered.
+     */
+    public final void readString(char[] chars)
+        throws IndexOutOfBoundsException, IllegalArgumentException  {
+
+        off = UtfOps.bytesToChars(buf, off, chars, 0, chars.length, false);
+    }
+
+    /**
+     * Returns the byte length of a null-terminated UTF string in the data
+     * buffer, including the terminator.  Used with string values that were
+     * written using {@link TupleOutput#writeString(String)}.
+     *
+     * @throws IndexOutOfBoundsException if no null terminating byte is found
+     * in the buffer.
+     *
+     * @throws IllegalArgumentException malformed UTF data is encountered.
+     */
+    public final int getStringByteLength()
+        throws IndexOutOfBoundsException, IllegalArgumentException  {
+
+        if (available() >= 2 &&
+            buf[off] == TupleOutput.NULL_STRING_UTF_VALUE &&
+            buf[off + 1] == 0) {
+            return 2;
+        } else {
+            return UtfOps.getZeroTerminatedByteLength(buf, off) + 1;
+        }
+    }
+
+    /**
+     * Reads a packed integer.  Note that packed integers are not appropriate
+     * for sorted values (keys) unless a custom comparator is used.
+     *
+     * @see PackedInteger
+     */
+    public final int readPackedInt() {
+
+        int len = PackedInteger.getReadIntLength(buf, off);
+        int val = PackedInteger.readInt(buf, off);
+
+        off += len;
+        return val;
+    }
+
+    /**
+     * Returns the byte length of a packed integer.
+     *
+     * @see PackedInteger
+     */
+    public final int getPackedIntByteLength() {
+        return PackedInteger.getReadIntLength(buf, off);
+    }
+
+    /**
+     * Reads a packed long integer.  Note that packed integers are not
+     * appropriate for sorted values (keys) unless a custom comparator is used.
+     *
+     * @see PackedInteger
+     */
+    public final long readPackedLong() {
+
+        int len = PackedInteger.getReadLongLength(buf, off);
+        long val = PackedInteger.readLong(buf, off);
+
+        off += len;
+        return val;
+    }
+
+    /**
+     * Returns the byte length of a packed long integer.
+     *
+     * @see PackedInteger
+     */
+    public final int getPackedLongByteLength() {
+        return PackedInteger.getReadLongLength(buf, off);
+    }
+
+    /**
+     * Reads a {@code BigInteger}.
+     *
+     * @see TupleOutput#writeBigInteger
+     */
+    public final BigInteger readBigInteger() {
+        int len = readShort();
+        if (len < 0) {
+            len = (- len);
+        }
+        byte[] a = new byte[len];
+        a[0] = readByte();
+        readFast(a, 1, a.length - 1);
+        return new BigInteger(a);
+    }
+
+    /**
+     * Returns the byte length of a {@code BigInteger}.
+     *
+     * @see TupleOutput#writeBigInteger
+     */
+    public final int getBigIntegerByteLength() {
+        int saveOff = off;
+        int len = readShort();
+        off = saveOff;
+        if (len < 0) {
+            len = (- len);
+        }
+        return len + 2;
+    }
+}
diff --git a/src/com/sleepycat/bind/tuple/TupleInputBinding.java b/src/com/sleepycat/bind/tuple/TupleInputBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..aaf2bed24dbb2c756c420e6b2304fd2264144131
--- /dev/null
+++ b/src/com/sleepycat/bind/tuple/TupleInputBinding.java
@@ -0,0 +1,45 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: TupleInputBinding.java,v 1.25 2008/05/27 15:30:33 mark Exp $
+ */
+
+package com.sleepycat.bind.tuple;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * A concrete <code>EntryBinding</code> that uses the <code>TupleInput</code>
+ * object as the key or data object.
+ *
+ * A concrete tuple binding for key or data entries which are {@link
+ * TupleInput} objects.  This binding is used when tuples themselves are the
+ * objects, rather than using application defined objects. A {@link TupleInput}
+ * must always be used.  To convert a {@link TupleOutput} to a {@link
+ * TupleInput}, use the {@link TupleInput#TupleInput(TupleOutput)} constructor.
+ *
+ * @author Mark Hayes
+ */
+public class TupleInputBinding implements EntryBinding<TupleInput> {
+
+    /**
+     * Creates a tuple input binding.
+     */
+    public TupleInputBinding() {
+    }
+
+    // javadoc is inherited
+    public TupleInput entryToObject(DatabaseEntry entry) {
+
+        return TupleBinding.entryToInput(entry);
+    }
+
+    // javadoc is inherited
+    public void objectToEntry(TupleInput object, DatabaseEntry entry) {
+
+        TupleBinding.inputToEntry(object, entry);
+    }
+}
diff --git a/src/com/sleepycat/bind/tuple/TupleMarshalledBinding.java b/src/com/sleepycat/bind/tuple/TupleMarshalledBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..879afda84a9197300555a752c4cf95da3fd3c569
--- /dev/null
+++ b/src/com/sleepycat/bind/tuple/TupleMarshalledBinding.java
@@ -0,0 +1,68 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: TupleMarshalledBinding.java,v 1.27 2008/05/27 15:30:33 mark Exp $
+ */
+
+package com.sleepycat.bind.tuple;
+
+import com.sleepycat.util.RuntimeExceptionWrapper;
+
+/**
+ * A concrete <code>TupleBinding</code> that delegates to the
+ * <code>MarshalledTupleEntry</code> interface of the data or key object.
+ *
+ * <p>This class works by calling the methods of the {@link
+ * MarshalledTupleEntry} interface, which must be implemented by the key or
+ * data class, to convert between the key or data entry and the object.</p>
+ *
+ * @author Mark Hayes
+ */
+public class TupleMarshalledBinding<E extends MarshalledTupleEntry>
+    extends TupleBinding<E> {
+
+    private Class<E> cls;
+
+    /**
+     * Creates a tuple marshalled binding object.
+     *
+     * <p>The given class is used to instantiate key or data objects using
+     * {@link Class#forName}, and therefore must be a public class and have a
+     * public no-arguments constructor.  It must also implement the {@link
+     * MarshalledTupleEntry} interface.</p>
+     *
+     * @param cls is the class of the key or data objects.
+     */
+    public TupleMarshalledBinding(Class<E> cls) {
+
+        this.cls = cls;
+
+        /* The class will be used to instantiate the object.  */
+        if (!MarshalledTupleEntry.class.isAssignableFrom(cls)) {
+            throw new IllegalArgumentException(cls.toString() +
+                        " does not implement MarshalledTupleEntry");
+        }
+    }
+
+    // javadoc is inherited
+    public E entryToObject(TupleInput input) {
+
+        try {
+            E obj = cls.newInstance();
+            obj.unmarshalEntry(input);
+            return obj;
+        } catch (IllegalAccessException e) {
+            throw new RuntimeExceptionWrapper(e);
+        } catch (InstantiationException e) {
+            throw new RuntimeExceptionWrapper(e);
+        }
+    }
+
+    // javadoc is inherited
+    public void objectToEntry(E object, TupleOutput output) {
+
+        object.marshalEntry(output);
+    }
+}
diff --git a/src/com/sleepycat/bind/tuple/TupleOutput.java b/src/com/sleepycat/bind/tuple/TupleOutput.java
new file mode 100644
index 0000000000000000000000000000000000000000..860b123a20ea5942a9424719a13d1bd8cfef739f
--- /dev/null
+++ b/src/com/sleepycat/bind/tuple/TupleOutput.java
@@ -0,0 +1,560 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: TupleOutput.java,v 1.31 2008/01/07 14:28:44 cwl Exp $
+ */
+
+package com.sleepycat.bind.tuple;
+
+import java.math.BigInteger;
+
+import com.sleepycat.util.FastOutputStream;
+import com.sleepycat.util.PackedInteger;
+import com.sleepycat.util.UtfOps;
+
+/**
+ * An <code>OutputStream</code> with <code>DataOutput</code>-like methods for
+ * writing tuple fields.  It is used by <code>TupleBinding</code>.
+ *
+ * <p>This class has many methods that have the same signatures as methods in
+ * the {@link java.io.DataOutput} interface.  The reason this class does not
+ * implement {@link java.io.DataOutput} is because it would break the interface
+ * contract for those methods because of data format differences.</p>
+ *
+ * <p>Signed numbers are stored in the buffer in MSB (most significant byte
+ * first) order with their sign bit (high-order bit) inverted to cause negative
+ * numbers to be sorted first when comparing values as unsigned byte arrays,
+ * as done in a database.  Unsigned numbers, including characters, are stored
+ * in MSB order with no change to their sign bit.  BigInteger values are stored
+ * with a preceding length having the same sign as the value.</p>
+ *
+ * <p>Strings and character arrays are stored either as a fixed length array of
+ * unicode characters, where the length must be known by the application, or as
+ * a null-terminated UTF byte array.</p>
+ * <ul>
+ * <li>Null strings are UTF encoded as { 0xFF }, which is not allowed in a
+ * standard UTF encoding.  This allows null strings, as distinct from empty or
+ * zero length strings, to be represented in a tuple.  Using the default
+ * comparator, null strings will be ordered last.</li>
+ * <li>Zero (0x0000) character values are UTF encoded as non-zero values, and
+ * therefore embedded zeros in the string are supported.  The sequence { 0xC0,
+ * 0x80 } is used to encode a zero character.  This UTF encoding is the same
+ * one used by native Java UTF libraries.  However, this encoding of zero does
+ * impact the lexicographical ordering, and zeros will not be sorted first (the
+ * natural order) or last.  For all character values other than zero, the
+ * default UTF byte ordering is the same as the Unicode lexicographical
+ * character ordering.</li>
+ * </ul>
+ *
+ * <p>Floats and doubles are stored using two different representations: sorted
+ * representation and integer-bit (IEEE 754) representation.  If you use
+ * negative floating point numbers in a key, you should use sorted
+ * representation; alternatively you may use integer-bit representation but you
+ * will need to implement and configure a custom comparator to get correct
+ * numeric ordering for negative numbers.</p>
+ *
+ * <p>To use sorted representation use this set of methods:</p>
+ * <ul>
+ * <li>{@link TupleOutput#writeSortedFloat}</li>
+ * <li>{@link TupleInput#readSortedFloat}</li>
+ * <li>{@link TupleOutput#writeSortedDouble}</li>
+ * <li>{@link TupleInput#readSortedDouble}</li>
+ * </ul>
+ *
+ * <p>To use integer-bit representation use this set of methods:</p>
+ * <ul>
+ * <li>{@link TupleOutput#writeFloat}</li>
+ * <li>{@link TupleInput#readFloat}</li>
+ * <li>{@link TupleOutput#writeDouble}</li>
+ * <li>{@link TupleInput#readDouble}</li>
+ * </ul>
+ *
+ * @author Mark Hayes
+ */
+public class TupleOutput extends FastOutputStream {
+
+    /**
+     * We represent a null string as a single FF UTF character, which cannot
+     * occur in a UTF encoded string.
+     */
+    static final int NULL_STRING_UTF_VALUE = ((byte) 0xFF);
+
+    /**
+     * Creates a tuple output object for writing a byte array of tuple data.
+     */
+    public TupleOutput() {
+
+        super();
+    }
+
+    /**
+     * Creates a tuple output object for writing a byte array of tuple data,
+     * using a given buffer.  A new buffer will be allocated only if the number
+     * of bytes needed is greater than the length of this buffer.  A reference
+     * to the byte array will be kept by this object and therefore the byte
+     * array should not be modified while this object is in use.
+     *
+     * @param buffer is the byte array to use as the buffer.
+     */
+    public TupleOutput(byte[] buffer) {
+
+        super(buffer);
+    }
+
+    // --- begin DataOutput compatible methods ---
+
+    /**
+     * Writes the specified bytes to the buffer, converting each character to
+     * an unsigned byte value.
+     * Writes values that can be read using {@link TupleInput#readBytes}.
+     * Only characters with values below 0x100 may be written using this
+     * method, since the high-order 8 bits of all characters are discarded.
+     *
+     * @param val is the string containing the values to be written.
+     *
+     * @return this tuple output object.
+     *
+     * @throws NullPointerException if the val parameter is null.
+     */
+    public final TupleOutput writeBytes(String val) {
+
+        writeBytes(val.toCharArray());
+        return this;
+    }
+
+    /**
+     * Writes the specified characters to the buffer, converting each character
+     * to a two byte unsigned value.
+     * Writes values that can be read using {@link TupleInput#readChars}.
+     *
+     * @param val is the string containing the characters to be written.
+     *
+     * @return this tuple output object.
+     *
+     * @throws NullPointerException if the val parameter is null.
+     */
+    public final TupleOutput writeChars(String val) {
+
+        writeChars(val.toCharArray());
+        return this;
+    }
+
+    /**
+     * Writes the specified characters to the buffer, converting each character
+     * to UTF format, and adding a null terminator byte.
+     * Note that zero (0x0000) character values are encoded as non-zero values
+     * and a null String parameter is encoded as 0xFF.
+     * Writes values that can be read using {@link TupleInput#readString()}.
+     *
+     * @param val is the string containing the characters to be written.
+     *
+     * @return this tuple output object.
+     */
+    public final TupleOutput writeString(String val) {
+
+        if (val != null) {
+            writeString(val.toCharArray());
+        } else {
+            writeFast(NULL_STRING_UTF_VALUE);
+        }
+        writeFast(0);
+        return this;
+    }
+
+    /**
+     * Writes a char (two byte) unsigned value to the buffer.
+     * Writes values that can be read using {@link TupleInput#readChar}.
+     *
+     * @param val is the value to write to the buffer.
+     *
+     * @return this tuple output object.
+     */
+    public final TupleOutput writeChar(int val) {
+
+        writeFast((byte) (val >>> 8));
+        writeFast((byte) val);
+        return this;
+    }
+
+    /**
+     * Writes a boolean (one byte) unsigned value to the buffer, writing one
+     * if the value is true and zero if it is false.
+     * Writes values that can be read using {@link TupleInput#readBoolean}.
+     *
+     * @param val is the value to write to the buffer.
+     *
+     * @return this tuple output object.
+     */
+    public final TupleOutput writeBoolean(boolean val) {
+
+        writeFast(val ? (byte)1 : (byte)0);
+        return this;
+    }
+
+    /**
+     * Writes an signed byte (one byte) value to the buffer.
+     * Writes values that can be read using {@link TupleInput#readByte}.
+     *
+     * @param val is the value to write to the buffer.
+     *
+     * @return this tuple output object.
+     */
+    public final TupleOutput writeByte(int val) {
+
+        writeUnsignedByte(val ^ 0x80);
+        return this;
+    }
+
+    /**
+     * Writes an signed short (two byte) value to the buffer.
+     * Writes values that can be read using {@link TupleInput#readShort}.
+     *
+     * @param val is the value to write to the buffer.
+     *
+     * @return this tuple output object.
+     */
+    public final TupleOutput writeShort(int val) {
+
+        writeUnsignedShort(val ^ 0x8000);
+        return this;
+    }
+
+    /**
+     * Writes an signed int (four byte) value to the buffer.
+     * Writes values that can be read using {@link TupleInput#readInt}.
+     *
+     * @param val is the value to write to the buffer.
+     *
+     * @return this tuple output object.
+     */
+    public final TupleOutput writeInt(int val) {
+
+        writeUnsignedInt(val ^ 0x80000000);
+        return this;
+    }
+
+    /**
+     * Writes an signed long (eight byte) value to the buffer.
+     * Writes values that can be read using {@link TupleInput#readLong}.
+     *
+     * @param val is the value to write to the buffer.
+     *
+     * @return this tuple output object.
+     */
+    public final TupleOutput writeLong(long val) {
+
+        writeUnsignedLong(val ^ 0x8000000000000000L);
+        return this;
+    }
+
+    /**
+     * Writes an signed float (four byte) value to the buffer.
+     * Writes values that can be read using {@link TupleInput#readFloat}.
+     * <code>Float.floatToIntBits</code> is used to convert the signed float
+     * value.
+     *
+     * <p><em>Note:</em> This method produces byte array values that by default
+     * (without a custom comparator) do <em>not</em> sort correctly for
+     * negative values.  Only non-negative values are sorted correctly by
+     * default.  To sort all values correctly by default, use {@link
+     * #writeSortedFloat}.</p>
+     *
+     * @param val is the value to write to the buffer.
+     *
+     * @return this tuple output object.
+     */
+    public final TupleOutput writeFloat(float val) {
+
+        writeUnsignedInt(Float.floatToIntBits(val));
+        return this;
+    }
+
+    /**
+     * Writes an signed double (eight byte) value to the buffer.
+     * Writes values that can be read using {@link TupleInput#readDouble}.
+     * <code>Double.doubleToLongBits</code> is used to convert the signed
+     * double value.
+     *
+     * <p><em>Note:</em> This method produces byte array values that by default
+     * (without a custom comparator) do <em>not</em> sort correctly for
+     * negative values.  Only non-negative values are sorted correctly by
+     * default.  To sort all values correctly by default, use {@link
+     * #writeSortedDouble}.</p>
+     *
+     * @param val is the value to write to the buffer.
+     *
+     * @return this tuple output object.
+     */
+    public final TupleOutput writeDouble(double val) {
+
+        writeUnsignedLong(Double.doubleToLongBits(val));
+        return this;
+    }
+
+    /**
+     * Writes a signed float (four byte) value to the buffer, with support for
+     * correct default sorting of all values.
+     * Writes values that can be read using {@link TupleInput#readSortedFloat}.
+     *
+     * <p><code>Float.floatToIntBits</code> and the following bit manipulations
+     * are used to convert the signed float value to a representation that is
+     * sorted correctly by default.</p>
+     * <pre>
+     *  int intVal = Float.floatToIntBits(val);
+     *  intVal ^= (intVal &lt; 0) ? 0xffffffff : 0x80000000;
+     * </pre>
+     *
+     * @param val is the value to write to the buffer.
+     *
+     * @return this tuple output object.
+     */
+    public final TupleOutput writeSortedFloat(float val) {
+
+        int intVal = Float.floatToIntBits(val);
+        intVal ^= (intVal < 0) ? 0xffffffff : 0x80000000;
+        writeUnsignedInt(intVal);
+        return this;
+    }
+
+    /**
+     * Writes a signed double (eight byte) value to the buffer, with support
+     * for correct default sorting of all values.
+     * Writes values that can be read using {@link TupleInput#readSortedDouble}.
+     *
+     * <p><code>Float.doubleToLongBits</code> and the following bit
+     * manipulations are used to convert the signed double value to a
+     * representation that is sorted correctly by default.</p>
+     * <pre>
+     *  long longVal = Double.doubleToLongBits(val);
+     *  longVal ^= (longVal &lt; 0) ? 0xffffffffffffffffL : 0x8000000000000000L;
+     * </pre>
+     *
+     * @param val is the value to write to the buffer.
+     *
+     * @return this tuple output object.
+     */
+    public final TupleOutput writeSortedDouble(double val) {
+
+        long longVal = Double.doubleToLongBits(val);
+        longVal ^= (longVal < 0) ? 0xffffffffffffffffL : 0x8000000000000000L;
+        writeUnsignedLong(longVal);
+        return this;
+    }
+
+    // --- end DataOutput compatible methods ---
+
+    /**
+     * Writes the specified bytes to the buffer, converting each character to
+     * an unsigned byte value.
+     * Writes values that can be read using {@link TupleInput#readBytes}.
+     * Only characters with values below 0x100 may be written using this
+     * method, since the high-order 8 bits of all characters are discarded.
+     *
+     * @param chars is the array of values to be written.
+     *
+     * @return this tuple output object.
+     *
+     * @throws NullPointerException if the chars parameter is null.
+     */
+    public final TupleOutput writeBytes(char[] chars) {
+
+        for (int i = 0; i < chars.length; i++) {
+            writeFast((byte) chars[i]);
+        }
+        return this;
+    }
+
+    /**
+     * Writes the specified characters to the buffer, converting each character
+     * to a two byte unsigned value.
+     * Writes values that can be read using {@link TupleInput#readChars}.
+     *
+     * @param chars is the array of characters to be written.
+     *
+     * @return this tuple output object.
+     *
+     * @throws NullPointerException if the chars parameter is null.
+     */
+    public final TupleOutput writeChars(char[] chars) {
+
+        for (int i = 0; i < chars.length; i++) {
+            writeFast((byte) (chars[i] >>> 8));
+            writeFast((byte) chars[i]);
+        }
+        return this;
+    }
+
+    /**
+     * Writes the specified characters to the buffer, converting each character
+     * to UTF format.
+     * Note that zero (0x0000) character values are encoded as non-zero values.
+     * Writes values that can be read using {@link TupleInput#readString(int)}
+     * or {@link TupleInput#readString(char[])}.
+     *
+     * @param chars is the array of characters to be written.
+     *
+     * @return this tuple output object.
+     *
+     * @throws NullPointerException if the chars parameter is null.
+     */
+    public final TupleOutput writeString(char[] chars) {
+
+        if (chars.length == 0) return this;
+
+        int utfLength = UtfOps.getByteLength(chars);
+
+        makeSpace(utfLength);
+        UtfOps.charsToBytes(chars, 0, getBufferBytes(), getBufferLength(),
+                            chars.length);
+        addSize(utfLength);
+        return this;
+    }
+
+    /**
+     * Writes an unsigned byte (one byte) value to the buffer.
+     * Writes values that can be read using {@link
+     * TupleInput#readUnsignedByte}.
+     *
+     * @param val is the value to write to the buffer.
+     *
+     * @return this tuple output object.
+     */
+    public final TupleOutput writeUnsignedByte(int val) {
+
+        writeFast(val);
+        return this;
+    }
+
+    /**
+     * Writes an unsigned short (two byte) value to the buffer.
+     * Writes values that can be read using {@link
+     * TupleInput#readUnsignedShort}.
+     *
+     * @param val is the value to write to the buffer.
+     *
+     * @return this tuple output object.
+     */
+    public final TupleOutput writeUnsignedShort(int val) {
+
+        writeFast((byte) (val >>> 8));
+        writeFast((byte) val);
+        return this;
+    }
+
+    /**
+     * Writes an unsigned int (four byte) value to the buffer.
+     * Writes values that can be read using {@link
+     * TupleInput#readUnsignedInt}.
+     *
+     * @param val is the value to write to the buffer.
+     *
+     * @return this tuple output object.
+     */
+    public final TupleOutput writeUnsignedInt(long val) {
+
+        writeFast((byte) (val >>> 24));
+        writeFast((byte) (val >>> 16));
+        writeFast((byte) (val >>> 8));
+        writeFast((byte) val);
+        return this;
+    }
+
+    /**
+     * This method is private since an unsigned long cannot be treated as
+     * such in Java, nor converted to a BigInteger of the same value.
+     */
+    private final TupleOutput writeUnsignedLong(long val) {
+
+        writeFast((byte) (val >>> 56));
+        writeFast((byte) (val >>> 48));
+        writeFast((byte) (val >>> 40));
+        writeFast((byte) (val >>> 32));
+        writeFast((byte) (val >>> 24));
+        writeFast((byte) (val >>> 16));
+        writeFast((byte) (val >>> 8));
+        writeFast((byte) val);
+        return this;
+    }
+
+    /**
+     * Writes a packed integer.  Note that packed integers are not appropriate
+     * for sorted values (keys) unless a custom comparator is used.
+     *
+     * @see PackedInteger
+     */
+    public final void writePackedInt(int val) {
+
+        makeSpace(PackedInteger.MAX_LENGTH);
+
+        int oldLen = getBufferLength();
+        int newLen = PackedInteger.writeInt(getBufferBytes(), oldLen, val);
+
+        addSize(newLen - oldLen);
+    }
+
+    /**
+     * Writes a packed long integer.  Note that packed integers are not
+     * appropriate for sorted values (keys) unless a custom comparator is used.
+     *
+     * @see PackedInteger
+     */
+    public final void writePackedLong(long val) {
+
+        makeSpace(PackedInteger.MAX_LONG_LENGTH);
+
+        int oldLen = getBufferLength();
+        int newLen = PackedInteger.writeLong(getBufferBytes(), oldLen, val);
+
+        addSize(newLen - oldLen);
+    }
+
+    /**
+     * Writes a {@code BigInteger}.  Supported {@code BigInteger} values are
+     * limited to those with a byte array ({@link BigInteger#toByteArray})
+     * representation with a size of 0x7fff bytes or less.  The maximum {@code
+     * BigInteger} value is (2<sup>0x3fff7</sup> - 1) and the minimum value is
+     * (-2<sup>0x3fff7</sup>).
+     *
+     * <p>The byte format for a {@code BigInteger} value is:</p>
+     * <ul>
+     * <li>Byte 0 and 1: The length of the following bytes, negated if the
+     * {@code BigInteger} value is negative, and written as a sorted value as
+     * if {@link #writeShort} were called.</li>
+     * <li>Byte 2: The first byte of the {@link BigInteger#toByteArray} array,
+     * written as a sorted value as if {@link #writeByte} were called.</li>
+     * <li>Byte 3 to N: The second and remaining bytes, if any, of the {@link
+     * BigInteger#toByteArray} array, written without modification.</li>
+     * </ul>
+     * <p>This format provides correct default sorting when the default
+     * byte-by-byte comparison is used.</p>
+     *
+     * @throws NullPointerException if val is null.
+     *
+     * @throws IllegalArgumentException if the byte array representation of val
+     * is larger than 0x7fff bytes.
+     */
+    public final TupleOutput writeBigInteger(BigInteger val) {
+        byte[] a = val.toByteArray();
+        if (a.length > Short.MAX_VALUE) {
+            throw new IllegalArgumentException
+                ("BigInteger byte array is larger than 0x7fff bytes");
+        }
+        int firstByte = a[0];
+        writeShort((firstByte < 0) ? (- a.length) : a.length);
+        writeByte(firstByte);
+        writeFast(a, 1, a.length - 1);
+        return this;
+    }
+
+    /**
+     * Returns the byte length of a given {@code BigInteger} value.
+     *
+     * @see TupleOutput#writeBigInteger
+     */
+    public static int getBigIntegerByteLength(BigInteger val) {
+        return 2 /* length bytes */ +
+               (val.bitLength() + 1 /* sign bit */ + 7 /* round up */) / 8;
+    }
+}
diff --git a/src/com/sleepycat/bind/tuple/TupleTupleBinding.java b/src/com/sleepycat/bind/tuple/TupleTupleBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..4c13e8268ecd06bd93d9e4308218b02cccf3dd04
--- /dev/null
+++ b/src/com/sleepycat/bind/tuple/TupleTupleBinding.java
@@ -0,0 +1,95 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: TupleTupleBinding.java,v 1.27 2008/05/27 15:30:33 mark Exp $
+ */
+
+package com.sleepycat.bind.tuple;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * An abstract <code>EntityBinding</code> that treats an entity's key entry and
+ * data entry as tuples.
+ *
+ * <p>This class takes care of converting the entries to/from {@link
+ * TupleInput} and {@link TupleOutput} objects.  Its three abstract methods
+ * must be implemented by a concrete subclass to convert between tuples and
+ * entity objects.</p>
+ * <ul>
+ * <li> {@link #entryToObject(TupleInput,TupleInput)} </li>
+ * <li> {@link #objectToKey(Object,TupleOutput)} </li>
+ * <li> {@link #objectToData(Object,TupleOutput)} </li>
+ * </ul>
+ *
+ * @author Mark Hayes
+ */
+public abstract class TupleTupleBinding<E> extends TupleBase<E>
+    implements EntityBinding<E> {
+
+    /**
+     * Creates a tuple-tuple entity binding.
+     */
+    public TupleTupleBinding() {
+    }
+
+    // javadoc is inherited
+    public E entryToObject(DatabaseEntry key, DatabaseEntry data) {
+
+        return entryToObject(TupleBinding.entryToInput(key),
+                             TupleBinding.entryToInput(data));
+    }
+
+    // javadoc is inherited
+    public void objectToKey(E object, DatabaseEntry key) {
+
+        TupleOutput output = getTupleOutput(object);
+        objectToKey(object, output);
+        outputToEntry(output, key);
+    }
+
+    // javadoc is inherited
+    public void objectToData(E object, DatabaseEntry data) {
+
+        TupleOutput output = getTupleOutput(object);
+        objectToData(object, output);
+        outputToEntry(output, data);
+    }
+
+    // abstract methods
+
+    /**
+     * Constructs an entity object from {@link TupleInput} key and data
+     * entries.
+     *
+     * @param keyInput is the {@link TupleInput} key entry object.
+     *
+     * @param dataInput is the {@link TupleInput} data entry object.
+     *
+     * @return the entity object constructed from the key and data.
+     */
+    public abstract E entryToObject(TupleInput keyInput, TupleInput dataInput);
+
+    /**
+     * Extracts a key tuple from an entity object.
+     *
+     * @param object is the entity object.
+     *
+     * @param output is the {@link TupleOutput} to which the key should be
+     * written.
+     */
+    public abstract void objectToKey(E object, TupleOutput output);
+
+    /**
+     * Extracts a key tuple from an entity object.
+     *
+     * @param object is the entity object.
+     *
+     * @param output is the {@link TupleOutput} to which the data should be
+     * written.
+     */
+    public abstract void objectToData(E object, TupleOutput output);
+}
diff --git a/src/com/sleepycat/bind/tuple/TupleTupleKeyCreator.java b/src/com/sleepycat/bind/tuple/TupleTupleKeyCreator.java
new file mode 100644
index 0000000000000000000000000000000000000000..883cee5ff6eb41bf49ad973fef68aca8460e0443
--- /dev/null
+++ b/src/com/sleepycat/bind/tuple/TupleTupleKeyCreator.java
@@ -0,0 +1,119 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: TupleTupleKeyCreator.java,v 1.32 2008/05/27 15:30:33 mark Exp $
+ */
+
+package com.sleepycat.bind.tuple;
+
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.ForeignKeyNullifier;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.je.SecondaryKeyCreator;
+
+/**
+ * An abstract key creator that uses a tuple key and a tuple data entry. This
+ * class takes care of converting the key and data entry to/from {@link
+ * TupleInput} and {@link TupleOutput} objects.
+ * The following abstract method must be implemented by a concrete subclass
+ * to create the index key using these objects
+ * <ul>
+ * <li> {@link #createSecondaryKey(TupleInput,TupleInput,TupleOutput)} </li>
+ * </ul>
+ * <p>If {@link com.sleepycat.je.ForeignKeyDeleteAction#NULLIFY} was
+ * specified when opening the secondary database, the following method must be
+ * overridden to nullify the foreign index key.  If NULLIFY was not specified,
+ * this method need not be overridden.</p>
+ * <ul>
+ * <li> {@link #nullifyForeignKey(TupleInput,TupleOutput)} </li>
+ * </ul>
+ * <p>If {@link com.sleepycat.je.ForeignKeyDeleteAction#NULLIFY} was
+ * specified when creating the secondary, this method is called when the
+ * entity for this foreign key is deleted.  If NULLIFY was not specified,
+ * this method will not be called and may always return false.</p>
+ *
+ * @author Mark Hayes
+ */
+public abstract class TupleTupleKeyCreator<E> extends TupleBase<E>
+    implements SecondaryKeyCreator, ForeignKeyNullifier {
+
+    /**
+     * Creates a tuple-tuple key creator.
+     */
+    public TupleTupleKeyCreator() {
+    }
+
+    // javadoc is inherited
+    public boolean createSecondaryKey(SecondaryDatabase db,
+                                      DatabaseEntry primaryKeyEntry,
+                                      DatabaseEntry dataEntry,
+                                      DatabaseEntry indexKeyEntry)
+        throws DatabaseException {
+
+        TupleOutput output = getTupleOutput(null);
+        TupleInput primaryKeyInput = entryToInput(primaryKeyEntry);
+        TupleInput dataInput = entryToInput(dataEntry);
+        if (createSecondaryKey(primaryKeyInput, dataInput, output)) {
+            outputToEntry(output, indexKeyEntry);
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    // javadoc is inherited
+    public boolean nullifyForeignKey(SecondaryDatabase db,
+                                     DatabaseEntry dataEntry)
+        throws DatabaseException {
+
+        TupleOutput output = getTupleOutput(null);
+        if (nullifyForeignKey(entryToInput(dataEntry), output)) {
+            outputToEntry(output, dataEntry);
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    /**
+     * Creates the index key from primary key tuple and data tuple.
+     *
+     * @param primaryKeyInput is the {@link TupleInput} for the primary key
+     * entry.
+     *
+     * @param dataInput is the {@link TupleInput} for the data entry.
+     *
+     * @param indexKeyOutput is the destination index key tuple.
+     *
+     * @return true if a key was created, or false to indicate that the key is
+     * not present.
+     */
+    public abstract boolean createSecondaryKey(TupleInput primaryKeyInput,
+                                               TupleInput dataInput,
+                                               TupleOutput indexKeyOutput);
+
+    /**
+     * Clears the index key in the tuple data entry.  The dataInput should be
+     * read and then written to the dataOutput, clearing the index key in the
+     * process.
+     *
+     * <p>The secondary key should be output or removed by this method such
+     * that {@link #createSecondaryKey} will return false.  Other fields in the
+     * data object should remain unchanged.</p>
+     *
+     * @param dataInput is the {@link TupleInput} for the data entry.
+     *
+     * @param dataOutput is the destination {@link TupleOutput}.
+     *
+     * @return true if the key was cleared, or false to indicate that the key
+     * is not present and no change is necessary.
+     */
+    public boolean nullifyForeignKey(TupleInput dataInput,
+                                     TupleOutput dataOutput) {
+
+        return false;
+    }
+}
diff --git a/src/com/sleepycat/bind/tuple/TupleTupleMarshalledBinding.java b/src/com/sleepycat/bind/tuple/TupleTupleMarshalledBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..7bbbfc9d6070695bd059be003132b7923200361e
--- /dev/null
+++ b/src/com/sleepycat/bind/tuple/TupleTupleMarshalledBinding.java
@@ -0,0 +1,94 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: TupleTupleMarshalledBinding.java,v 1.26 2008/05/27 15:30:33 mark Exp $
+ */
+
+package com.sleepycat.bind.tuple;
+
+import com.sleepycat.util.RuntimeExceptionWrapper;
+
+/**
+ * A concrete <code>TupleTupleBinding</code> that delegates to the
+ * <code>MarshalledTupleEntry</code> and
+ * <code>MarshalledTupleKeyEntity</code> interfaces of the entity class.
+ *
+ * <p>This class calls the methods of the {@link MarshalledTupleEntry}
+ * interface to convert between the data entry and entity object.  It calls the
+ * methods of the {@link MarshalledTupleKeyEntity} interface to convert between
+ * the key entry and the entity object.  These two interfaces must both be
+ * implemented by the entity class.</p>
+ *
+ * @author Mark Hayes
+ */
+public class TupleTupleMarshalledBinding<E extends
+    MarshalledTupleEntry & MarshalledTupleKeyEntity>
+    extends TupleTupleBinding<E> {
+
+    private Class<E> cls;
+
+    /**
+     * Creates a tuple-tuple marshalled binding object.
+     *
+     * <p>The given class is used to instantiate entity objects using
+     * {@link Class#forName}, and therefore must be a public class and have a
+     * public no-arguments constructor.  It must also implement the {@link
+     * MarshalledTupleEntry} and {@link MarshalledTupleKeyEntity}
+     * interfaces.</p>
+     *
+     * @param cls is the class of the entity objects.
+     */
+    public TupleTupleMarshalledBinding(Class<E> cls) {
+
+        this.cls = cls;
+
+        // The entity class will be used to instantiate the entity object.
+        //
+        if (!MarshalledTupleKeyEntity.class.isAssignableFrom(cls)) {
+            throw new IllegalArgumentException(cls.toString() +
+                        " does not implement MarshalledTupleKeyEntity");
+        }
+        if (!MarshalledTupleEntry.class.isAssignableFrom(cls)) {
+            throw new IllegalArgumentException(cls.toString() +
+                        " does not implement MarshalledTupleEntry");
+        }
+    }
+
+    // javadoc is inherited
+    public E entryToObject(TupleInput keyInput, TupleInput dataInput) {
+
+        /*
+         * This "tricky" binding returns the stored data as the entity, but
+         * first it sets the transient key fields from the stored key.
+         */
+        E obj;
+        try {
+            obj = cls.newInstance();
+        } catch (IllegalAccessException e) {
+            throw new RuntimeExceptionWrapper(e);
+        } catch (InstantiationException e) {
+            throw new RuntimeExceptionWrapper(e);
+        }
+        if (dataInput != null) { // may be null if used by key extractor
+            obj.unmarshalEntry(dataInput);
+        }
+        if (keyInput != null) { // may be null if used by key extractor
+            obj.unmarshalPrimaryKey(keyInput);
+        }
+        return obj;
+    }
+
+    // javadoc is inherited
+    public void objectToKey(E object, TupleOutput output) {
+
+        object.marshalPrimaryKey(output);
+    }
+
+    // javadoc is inherited
+    public void objectToData(E object, TupleOutput output) {
+
+        object.marshalEntry(output);
+    }
+}
diff --git a/src/com/sleepycat/bind/tuple/TupleTupleMarshalledKeyCreator.java b/src/com/sleepycat/bind/tuple/TupleTupleMarshalledKeyCreator.java
new file mode 100644
index 0000000000000000000000000000000000000000..ab47b65f0bc1838f9188bab4e35409b53a1d625c
--- /dev/null
+++ b/src/com/sleepycat/bind/tuple/TupleTupleMarshalledKeyCreator.java
@@ -0,0 +1,73 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: TupleTupleMarshalledKeyCreator.java,v 1.31 2008/05/27 15:30:33 mark Exp $
+ */
+
+package com.sleepycat.bind.tuple;
+
+/**
+ * A concrete key creator that works in conjunction with a {@link
+ * TupleTupleMarshalledBinding}.  This key creator works by calling the
+ * methods of the {@link MarshalledTupleKeyEntity} interface to create and
+ * clear the index key.
+ *
+ * <p>Note that a marshalled tuple key creator is somewhat less efficient
+ * than a non-marshalled key tuple creator because more conversions are
+ * needed.  A marshalled key creator must convert the entry to an object in
+ * order to create the key, while an unmarshalled key creator does not.</p>
+ *
+ * @author Mark Hayes
+ */
+public class TupleTupleMarshalledKeyCreator<E extends
+    MarshalledTupleEntry & MarshalledTupleKeyEntity>
+    extends TupleTupleKeyCreator<E> {
+
+    private String keyName;
+    private TupleTupleMarshalledBinding<E> binding;
+
+    /**
+     * Creates a tuple-tuple marshalled key creator.
+     *
+     * @param binding is the binding used for the tuple-tuple entity.
+     *
+     * @param keyName is the key name passed to the {@link
+     * MarshalledTupleKeyEntity#marshalSecondaryKey} method to identify the
+     * index key.
+     */
+    public TupleTupleMarshalledKeyCreator(TupleTupleMarshalledBinding<E>
+                                          binding,
+                                          String keyName) {
+
+        this.binding = binding;
+        this.keyName = keyName;
+    }
+
+    // javadoc is inherited
+    public boolean createSecondaryKey(TupleInput primaryKeyInput,
+                                      TupleInput dataInput,
+                                      TupleOutput indexKeyOutput) {
+
+        /* The primary key is unmarshalled before marshalling the index key, to
+         * account for cases where the index key includes fields taken from the
+         * primary key.
+         */
+        E entity = binding.entryToObject(primaryKeyInput, dataInput);
+        return entity.marshalSecondaryKey(keyName, indexKeyOutput);
+    }
+
+    // javadoc is inherited
+    public boolean nullifyForeignKey(TupleInput dataInput,
+                                     TupleOutput dataOutput) {
+
+        E entity = binding.entryToObject(null, dataInput);
+        if (entity.nullifyForeignKey(keyName)) {
+            binding.objectToData(entity, dataOutput);
+            return true;
+        } else {
+            return false;
+        }
+    }
+}
diff --git a/src/com/sleepycat/bind/tuple/package.html b/src/com/sleepycat/bind/tuple/package.html
new file mode 100644
index 0000000000000000000000000000000000000000..3e99263dc78667dc0e6ac83c29427e489772959b
--- /dev/null
+++ b/src/com/sleepycat/bind/tuple/package.html
@@ -0,0 +1,10 @@
+<!-- $Id: package.html,v 1.12 2008/02/05 23:28:19 mark Exp $ -->
+<html>
+<body>
+Bindings that use sequences of primitive fields, or tuples.
+<!-- begin JE only -->
+@see <a href="{@docRoot}/../GettingStartedGuide/bindAPI.html"
+        target="_top">[Getting Started Guide]</a>
+<!-- end JE only -->
+</body>
+</html>
diff --git a/src/com/sleepycat/collections/BaseIterator.java b/src/com/sleepycat/collections/BaseIterator.java
new file mode 100644
index 0000000000000000000000000000000000000000..85c197e4c5e2f298872618d73b53ede16ed1edf1
--- /dev/null
+++ b/src/com/sleepycat/collections/BaseIterator.java
@@ -0,0 +1,37 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: BaseIterator.java,v 1.6 2008/05/27 15:30:34 mark Exp $
+ */
+
+package com.sleepycat.collections;
+
+import java.util.ListIterator;
+
+/**
+ * Common interface for BlockIterator and StoredIterator.
+ */
+interface BaseIterator<E> extends ListIterator<E> {
+
+    /**
+     * @hidden
+     * Duplicate a cursor.  Called by StoredCollections.iterator.
+     */
+    ListIterator<E> dup();
+
+    /**
+     * @hidden
+     * Returns whether the given data is the current iterator data.  Called by
+     * StoredMapEntry.setValue.
+     */
+    boolean isCurrentData(Object currentData);
+
+    /**
+     * @hidden
+     * Initializes a list iterator at the given index.  Called by
+     * StoredList.iterator(int).
+     */
+    boolean moveToIndex(int index);
+}
diff --git a/src/com/sleepycat/collections/BlockIterator.java b/src/com/sleepycat/collections/BlockIterator.java
new file mode 100644
index 0000000000000000000000000000000000000000..ab1d0a47b01c42ec3aa719bd2842599fe11d4065
--- /dev/null
+++ b/src/com/sleepycat/collections/BlockIterator.java
@@ -0,0 +1,788 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: BlockIterator.java,v 1.13 2008/05/27 15:30:34 mark Exp $
+ */
+
+package com.sleepycat.collections;
+
+import java.util.ListIterator;
+import java.util.NoSuchElementException;
+
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.util.keyrange.KeyRange;
+
+/**
+ * An iterator that does not need closing because a cursor is not kept open
+ * across method calls.  A cursor is opened to read a block of records at a
+ * time and then closed before the method returns.
+ *
+ * @author Mark Hayes
+ */
+class BlockIterator<E> implements BaseIterator<E> {
+
+    private StoredCollection<E> coll;
+    private boolean writeAllowed;
+
+    /**
+     * Slots for a block of record keys and values.  The priKeys array is only
+     * used for secondary databases; otherwise it is set to the keys array.
+     */
+    private byte[][] keys;
+    private byte[][] priKeys;
+    private byte[][] values;
+
+    /**
+     * The slot index of the record that would be returned by next().
+     * nextIndex is always greater or equal to zero.  If the next record is not
+     * available, then nextIndex is equal to keys.length or keys[nextIndex] is
+     * null.
+     *
+     * If the block is empty, then either the iterator is uninitialized or the
+     * key range is empty.  Either way, nextIndex will be the array length and
+     * all array values will be null.  This is the initial state set by the
+     * constructor.  If remove() is used to delete all records in the key
+     * range, it will restore the iterator to this initial state.  The block
+     * must never be allowed to become empty when the key range is non-empty,
+     * since then the iterator's position would be lost.  [#15858]
+     */
+    private int nextIndex;
+
+    /**
+     * The slot index of the record last returned by next() or previous(), or
+     * the record inserted by add().  dataIndex is -1 if the data record is not
+     * available.  If greater or equal to zero, the slot at dataIndex is always
+     * non-null.
+     */
+    private int dataIndex;
+
+    /**
+     * The iterator data last returned by next() or previous().  This value is
+     * set to null if dataIndex is -1, or if the state of the iterator is such
+     * that set() or remove() cannot be called.  For example, after add() this
+     * field is set to null, even though the dataIndex is still valid.
+     */
+    private E dataObject;
+
+    /**
+     * Creates an iterator.
+     */
+    BlockIterator(StoredCollection<E> coll,
+                  boolean writeAllowed,
+                  int blockSize) {
+
+        this.coll = coll;
+        this.writeAllowed = writeAllowed;
+
+        keys = new byte[blockSize][];
+        priKeys = coll.isSecondary() ? (new byte[blockSize][]) : keys;
+        values = new byte[blockSize][];
+
+        nextIndex = blockSize;
+        dataIndex = -1;
+        dataObject = null;
+    }
+
+    /**
+     * Copy constructor.
+     */
+    private BlockIterator(BlockIterator<E> o) {
+
+        coll = o.coll;
+        writeAllowed = o.writeAllowed;
+
+        keys = copyArray(o.keys);
+        priKeys = coll.isSecondary() ? copyArray(o.priKeys) : keys;
+        values = copyArray(o.values);
+
+        nextIndex = o.nextIndex;
+        dataIndex = o.dataIndex;
+        dataObject = o.dataObject;
+    }
+
+    /**
+     * Copies an array of byte arrays.
+     */
+    private byte[][] copyArray(byte[][] a) {
+
+        byte[][] b = new byte[a.length][];
+        for (int i = 0; i < b.length; i += 1) {
+            if (a[i] != null) {
+                b[i] = KeyRange.copyBytes(a[i]);
+            }
+        }
+        return b;
+    }
+
+    /**
+     * Returns whether the element at nextIndex is available.
+     */
+    private boolean isNextAvailable() {
+
+        return (nextIndex < keys.length) &&
+               (keys[nextIndex] != null);
+    }
+
+    /**
+     * Returns whether the element at nextIndex-1 is available.
+     */
+    private boolean isPrevAvailable() {
+
+        return (nextIndex > 0) &&
+               (keys[nextIndex - 1] != null);
+    }
+
+    /**
+     * Returns the record number at the given slot position.
+     */
+    private int getRecordNumber(int i) {
+
+        if (coll.view.btreeRecNumDb) {
+            DataCursor cursor = null;
+            try {
+                cursor = new DataCursor(coll.view, false);
+                if (moveCursor(i, cursor)) {
+                    return cursor.getCurrentRecordNumber();
+                } else {
+                    throw new IllegalStateException();
+                }
+            } catch (DatabaseException e) {
+                throw StoredContainer.convertException(e);
+            } finally {
+                closeCursor(cursor);
+            }
+        } else {
+            DatabaseEntry entry = new DatabaseEntry(keys[i]);
+            return DbCompat.getRecordNumber(entry);
+        }
+    }
+
+    /**
+     * Sets dataObject to the iterator data for the element at dataIndex.
+     */
+    private void makeDataObject() {
+
+        int i = dataIndex;
+        DatabaseEntry keyEntry = new DatabaseEntry(keys[i]);
+        DatabaseEntry priKeyEntry = (keys != priKeys)
+                                    ? (new DatabaseEntry(priKeys[i]))
+                                    : keyEntry;
+        DatabaseEntry valuesEntry = new DatabaseEntry(values[i]);
+
+        dataObject = coll.makeIteratorData(this, keyEntry, priKeyEntry,
+                                           valuesEntry);
+    }
+
+    /**
+     * Sets all slots to null.
+     */
+    private void clearSlots() {
+
+        for (int i = 0; i < keys.length; i += 1) {
+            keys[i] = null;
+            priKeys[i] = null;
+            values[i] = null;
+        }
+    }
+
+    /**
+     * Sets a given slot using the data in the given cursor.
+     */
+    private void setSlot(int i, DataCursor cursor) {
+
+        keys[i] = KeyRange.getByteArray(cursor.getKeyThang());
+
+        if (keys != priKeys) {
+            priKeys[i] = KeyRange.getByteArray
+                (cursor.getPrimaryKeyThang());
+        }
+
+        values[i] = KeyRange.getByteArray(cursor.getValueThang());
+    }
+
+    /**
+     * Inserts an added record at a given slot position and shifts other slots
+     * accordingly.  Also adjusts nextIndex and sets dataIndex to -1.
+     */
+    private void insertSlot(int i, DataCursor cursor) {
+
+        if (i < keys.length) {
+            for (int j = keys.length - 1; j > i; j -= 1) {
+
+                /* Shift right. */
+                keys[j] = keys[j - 1];
+                priKeys[j] = priKeys[j - 1];
+                values[j] = values[j - 1];
+
+                /* Bump key in recno-renumber database. */
+                if (coll.view.recNumRenumber && keys[j] != null) {
+                    bumpRecordNumber(j);
+                }
+            }
+            nextIndex += 1;
+        } else {
+            if (i != keys.length) {
+                throw new IllegalStateException();
+            }
+            i -= 1;
+            for (int j = 0; j < i; j += 1) {
+                /* Shift left. */
+                keys[j] = keys[j + 1];
+                priKeys[j] = priKeys[j + 1];
+                values[j] = values[j + 1];
+            }
+        }
+        setSlot(i, cursor);
+        dataIndex = -1;
+    }
+
+    /**
+     * Increments the record number key at the given slot.
+     */
+    private void bumpRecordNumber(int i) {
+
+        DatabaseEntry entry = new DatabaseEntry(keys[i]);
+        DbCompat.setRecordNumber(entry,
+                                 DbCompat.getRecordNumber(entry) + 1);
+        keys[i] = entry.getData();
+    }
+
+    /**
+     * Deletes the given slot, adjusts nextIndex and sets dataIndex to -1.
+     */
+    private void deleteSlot(int i) {
+
+        for (int j = i + 1; j < keys.length; j += 1) {
+            keys[j - 1] = keys[j];
+            priKeys[j - 1] = priKeys[j];
+            values[j - 1] = values[j];
+        }
+        int last = keys.length - 1;
+        keys[last] = null;
+        priKeys[last] = null;
+        values[last] = null;
+
+        if (nextIndex > i) {
+            nextIndex -= 1;
+        }
+        dataIndex = -1;
+    }
+
+    /**
+     * Moves the cursor to the key/data at the given slot, and returns false
+     * if the reposition (search) fails.
+     */
+    private boolean moveCursor(int i, DataCursor cursor)
+        throws DatabaseException {
+
+        return cursor.repositionExact(keys[i], priKeys[i], values[i], false);
+    }
+
+    /**
+     * Closes the given cursor if non-null.
+     */
+    private void closeCursor(DataCursor cursor) {
+
+        if (cursor != null) {
+            try {
+                cursor.close();
+            } catch (DatabaseException e) {
+                throw StoredContainer.convertException(e);
+            }
+        }
+    }
+
+    // --- begin Iterator/ListIterator methods ---
+
+    public boolean hasNext() {
+
+        if (isNextAvailable()) {
+            return true;
+        }
+        DataCursor cursor = null;
+        try {
+            cursor = new DataCursor(coll.view, writeAllowed);
+            int prev = nextIndex - 1;
+            boolean found = false;
+
+            if (keys[prev] == null) {
+                /* Get the first record for an uninitialized iterator. */
+                OperationStatus status = cursor.getFirst(false);
+                if (status == OperationStatus.SUCCESS) {
+                    found = true;
+                    nextIndex = 0;
+                }
+            } else {
+                /* Reposition to the last known key/data pair. */
+                int repos = cursor.repositionRange
+                    (keys[prev], priKeys[prev], values[prev], false);
+
+                if (repos == DataCursor.REPOS_EXACT) {
+
+                    /*
+                     * The last known key/data pair was found and will now be
+                     * in slot zero.
+                     */
+                    found = true;
+                    nextIndex = 1;
+
+                    /* The data object is now in slot zero or not available. */
+                    if (dataIndex == prev) {
+                        dataIndex = 0;
+                    } else {
+                        dataIndex = -1;
+                        dataObject = null;
+                    }
+                } else if (repos == DataCursor.REPOS_NEXT) {
+
+                    /*
+                     * The last known key/data pair was not found, but the
+                     * following record was found and it will be in slot zero.
+                     */
+                    found = true;
+                    nextIndex = 0;
+
+                    /* The data object is no longer available. */
+                    dataIndex = -1;
+                    dataObject = null;
+                } else {
+                    if (repos != DataCursor.REPOS_EOF) {
+                        throw new IllegalStateException();
+                    }
+                }
+            }
+
+            if (found) {
+                /* Clear all slots first in case an exception occurs below. */
+                clearSlots();
+
+                /* Attempt to fill all slots with records. */
+                int i = 0;
+                boolean done = false;
+                while (!done) {
+                    setSlot(i, cursor);
+                    i += 1;
+                    if (i < keys.length) {
+                        OperationStatus status = coll.iterateDuplicates() ?
+                                                 cursor.getNext(false) :
+                                                 cursor.getNextNoDup(false);
+                        if (status != OperationStatus.SUCCESS) {
+                            done = true;
+                        }
+                    } else {
+                        done = true;
+                    }
+                }
+
+            }
+
+            /*
+             * If REPOS_EXACT was returned above, make sure we retrieved
+             * the following record.
+             */
+            return isNextAvailable();
+        } catch (DatabaseException e) {
+            throw StoredContainer.convertException(e);
+        } finally {
+            closeCursor(cursor);
+        }
+    }
+
+    public boolean hasPrevious() {
+
+        if (isPrevAvailable()) {
+            return true;
+        }
+        if (!isNextAvailable()) {
+            return false;
+        }
+        DataCursor cursor = null;
+        try {
+            cursor = new DataCursor(coll.view, writeAllowed);
+            int last = keys.length - 1;
+            int next = nextIndex;
+            boolean found = false;
+
+            /* Reposition to the last known key/data pair. */
+            int repos = cursor.repositionRange
+                (keys[next], priKeys[next], values[next], false);
+
+            if (repos == DataCursor.REPOS_EXACT ||
+                repos == DataCursor.REPOS_NEXT) {
+
+                /*
+                 * The last known key/data pair, or the record following it,
+                 * was found and will now be in the last slot.
+                 */
+                found = true;
+                nextIndex = last;
+
+                /* The data object is now in the last slot or not available. */
+                if (dataIndex == next && repos == DataCursor.REPOS_EXACT) {
+                    dataIndex = last;
+                } else {
+                    dataIndex = -1;
+                    dataObject = null;
+                }
+            } else {
+                if (repos != DataCursor.REPOS_EOF) {
+                    throw new IllegalStateException();
+                }
+            }
+
+            if (found) {
+                /* Clear all slots first in case an exception occurs below. */
+                clearSlots();
+
+                /* Attempt to fill all slots with records. */
+                int i = last;
+                boolean done = false;
+                while (!done) {
+                    setSlot(i, cursor);
+                    i -= 1;
+                    if (i >= 0) {
+                        OperationStatus status = coll.iterateDuplicates() ?
+                                                 cursor.getPrev(false) :
+                                                 cursor.getPrevNoDup(false);
+                        if (status != OperationStatus.SUCCESS) {
+                            done = true;
+                        }
+                    } else {
+                        done = true;
+                    }
+                }
+            }
+
+            /*
+             * Make sure we retrieved the preceding record after the reposition
+             * above.
+             */
+            return isPrevAvailable();
+        } catch (DatabaseException e) {
+            throw StoredContainer.convertException(e);
+        } finally {
+            closeCursor(cursor);
+        }
+    }
+
+    public E next() {
+
+        if (hasNext()) {
+            dataIndex = nextIndex;
+            nextIndex += 1;
+            makeDataObject();
+            return dataObject;
+        } else {
+            throw new NoSuchElementException();
+        }
+    }
+
+    public E previous() {
+
+        if (hasPrevious()) {
+            nextIndex -= 1;
+            dataIndex = nextIndex;
+            makeDataObject();
+            return dataObject;
+        } else {
+            throw new NoSuchElementException();
+        }
+    }
+
+    public int nextIndex() {
+
+        if (!coll.view.recNumAccess) {
+            throw new UnsupportedOperationException(
+                "Record number access not supported");
+        }
+
+        return hasNext() ? (getRecordNumber(nextIndex) -
+                            coll.getIndexOffset())
+                         : Integer.MAX_VALUE;
+    }
+
+    public int previousIndex() {
+
+        if (!coll.view.recNumAccess) {
+            throw new UnsupportedOperationException(
+                "Record number access not supported");
+        }
+
+        return hasPrevious() ? (getRecordNumber(nextIndex - 1) -
+                                coll.getIndexOffset())
+                             : (-1);
+    }
+
+    public void set(E value) {
+
+        if (dataObject == null) {
+            throw new IllegalStateException();
+        }
+        if (!coll.hasValues()) {
+            throw new UnsupportedOperationException();
+        }
+        DataCursor cursor = null;
+        boolean doAutoCommit = coll.beginAutoCommit();
+        try {
+            cursor = new DataCursor(coll.view, writeAllowed);
+            if (moveCursor(dataIndex, cursor)) {
+                cursor.putCurrent(value);
+                setSlot(dataIndex, cursor);
+                coll.closeCursor(cursor);
+                coll.commitAutoCommit(doAutoCommit);
+            } else {
+                throw new IllegalStateException();
+            }
+        } catch (Exception e) {
+            coll.closeCursor(cursor);
+            throw coll.handleException(e, doAutoCommit);
+        }
+    }
+
+    public void remove() {
+
+        if (dataObject == null) {
+            throw new IllegalStateException();
+        }
+        DataCursor cursor = null;
+        boolean doAutoCommit = coll.beginAutoCommit();
+        try {
+            cursor = new DataCursor(coll.view, writeAllowed);
+            if (moveCursor(dataIndex, cursor)) {
+                cursor.delete();
+                deleteSlot(dataIndex);
+                dataObject = null;
+
+                /*
+                 * Repopulate the block after removing all records, using the
+                 * cursor position of the deleted record as a starting point.
+                 * First try moving forward, since the user was moving forward.
+                 * (It is possible to delete all records in the block only by
+                 * moving forward, i.e, when nextIndex is greater than
+                 * dataIndex.)
+                 */
+                if (nextIndex == 0 && keys[0] == null) {
+                    OperationStatus status;
+                    for (int i = 0; i < keys.length; i += 1) {
+                        status = coll.iterateDuplicates() ?
+                                 cursor.getNext(false) :
+                                 cursor.getNextNoDup(false);
+                        if (status == OperationStatus.SUCCESS) {
+                            setSlot(i, cursor);
+                        } else {
+                            break;
+                        }
+                    }
+
+                    /*
+                     * If no records are found past the cursor position, try
+                     * moving backward.  If no records are found before the
+                     * cursor position, leave nextIndex set to keys.length,
+                     * which is the same as the initial iterator state and is
+                     * appropriate for an empty key range.
+                     */
+                    if (keys[0] == null) {
+                        nextIndex = keys.length;
+                        for (int i = nextIndex - 1; i >= 0; i -= 1) {
+                            status = coll.iterateDuplicates() ?
+                                     cursor.getPrev(false) :
+                                     cursor.getPrevNoDup(false);
+                            if (status == OperationStatus.SUCCESS) {
+                                setSlot(i, cursor);
+                            } else {
+                                break;
+                            }
+                        }
+                    }
+                }
+                coll.closeCursor(cursor);
+                coll.commitAutoCommit(doAutoCommit);
+            } else {
+                throw new IllegalStateException();
+            }
+        } catch (Exception e) {
+            coll.closeCursor(cursor);
+            throw coll.handleException(e, doAutoCommit);
+        }
+    }
+
+    public void add(E value) {
+
+        /*
+         * The checkIterAddAllowed method ensures that one of the following two
+         * conditions holds and throws UnsupportedOperationException otherwise:
+         * 1- This is a list iterator for a recno-renumber database.
+         * 2- This is a collection iterator for a duplicates database.
+         */
+        coll.checkIterAddAllowed();
+        OperationStatus status = OperationStatus.SUCCESS;
+        DataCursor cursor = null;
+        boolean doAutoCommit = coll.beginAutoCommit();
+        try {
+            if (coll.view.keysRenumbered || !coll.areDuplicatesOrdered()) {
+
+                /*
+                 * This is a recno-renumber database or a btree/hash database
+                 * with unordered duplicates.
+                 */
+                boolean hasPrev = hasPrevious();
+                if (!hasPrev && !hasNext()) {
+
+                    /* The collection is empty. */
+                    if (coll.view.keysRenumbered) {
+
+                        /* Append to an empty recno-renumber database. */
+                        status = coll.view.append(value, null, null);
+
+                    } else if (coll.view.dupsAllowed &&
+                               coll.view.range.isSingleKey()) {
+
+                        /*
+                         * When inserting a duplicate into a single-key range,
+                         * the main key is fixed, so we can always insert into
+                         * an empty collection.
+                         */
+                        cursor = new DataCursor(coll.view, writeAllowed);
+                        cursor.useRangeKey();
+                        status = cursor.putNoDupData(null, value, null, true);
+                        coll.closeCursor(cursor);
+                        cursor = null;
+                    } else {
+                        throw new IllegalStateException
+                            ("Collection is empty, cannot add() duplicate");
+                    }
+
+                    /*
+                     * Move past the record just inserted (the cursor should be
+                     * closed above to prevent multiple open cursors in certain
+                     * DB core modes).
+                     */
+                    if (status == OperationStatus.SUCCESS) {
+                        next();
+                        dataIndex = nextIndex - 1;
+                    }
+                } else {
+
+                    /*
+                     * The collection is non-empty.  If hasPrev is true then
+                     * the element at (nextIndex - 1) is available; otherwise
+                     * the element at nextIndex is available.
+                     */
+                    cursor = new DataCursor(coll.view, writeAllowed);
+                    int insertIndex = hasPrev ? (nextIndex - 1) : nextIndex;
+
+                    if (!moveCursor(insertIndex, cursor)) {
+                        throw new IllegalStateException();
+                    }
+
+                    /*
+                     * For a recno-renumber database or a database with
+                     * unsorted duplicates, insert before the iterator 'next'
+                     * position, or after the 'prev' position.  Then adjust
+                     * the slots to account for the inserted record.
+                     */
+                    status = hasPrev ? cursor.putAfter(value)
+                                     : cursor.putBefore(value);
+                    if (status == OperationStatus.SUCCESS) {
+                        insertSlot(nextIndex, cursor);
+                    }
+                }
+            } else {
+                /* This is a btree/hash database with ordered duplicates. */
+                cursor = new DataCursor(coll.view, writeAllowed);
+
+                if (coll.view.range.isSingleKey()) {
+
+                    /*
+                     * When inserting a duplicate into a single-key range,
+                     * the main key is fixed.
+                     */
+                    cursor.useRangeKey();
+                } else {
+
+                    /*
+                     * When inserting into a multi-key range, the main key
+                     * is the last dataIndex accessed by next(), previous()
+                     * or add().
+                     */
+                    if (dataIndex < 0 || !moveCursor(dataIndex, cursor)) {
+                        throw new IllegalStateException();
+                    }
+                }
+
+                /*
+                 * For a hash/btree with duplicates, insert the duplicate,
+                 * put the new record in slot zero, and set the next index
+                 * to slot one (past the new record).
+                 */
+                status = cursor.putNoDupData(null, value, null, true);
+                if (status == OperationStatus.SUCCESS) {
+                    clearSlots();
+                    setSlot(0, cursor);
+                    dataIndex = 0;
+                    nextIndex = 1;
+                }
+            }
+
+            if (status == OperationStatus.KEYEXIST) {
+                throw new IllegalArgumentException("Duplicate value");
+            } else if (status != OperationStatus.SUCCESS) {
+                throw new IllegalArgumentException("Could not insert: " +
+                                                    status);
+            }
+
+            /* Prevent subsequent set() or remove() call. */
+            dataObject = null;
+
+            coll.closeCursor(cursor);
+            coll.commitAutoCommit(doAutoCommit);
+        } catch (Exception e) {
+            /* Catch RuntimeExceptions too. */
+            coll.closeCursor(cursor);
+            throw coll.handleException(e, doAutoCommit);
+        }
+    }
+
+    // --- end Iterator/ListIterator methods ---
+
+    // --- begin BaseIterator methods ---
+
+    public final ListIterator<E> dup() {
+
+        return new BlockIterator<E>(this);
+    }
+
+    public final boolean isCurrentData(Object currentData) {
+
+        return (dataObject == currentData);
+    }
+
+    public final boolean moveToIndex(int index) {
+
+        DataCursor cursor = null;
+        try {
+            cursor = new DataCursor(coll.view, writeAllowed);
+            OperationStatus status =
+                cursor.getSearchKey(Integer.valueOf(index), null, false);
+            if (status == OperationStatus.SUCCESS) {
+                clearSlots();
+                setSlot(0, cursor);
+                nextIndex = 0;
+                return true;
+            } else {
+                return false;
+            }
+        } catch (DatabaseException e) {
+            throw StoredContainer.convertException(e);
+        } finally {
+            closeCursor(cursor);
+        }
+    }
+
+    // --- end BaseIterator methods ---
+}
diff --git a/src/com/sleepycat/collections/CurrentTransaction.java b/src/com/sleepycat/collections/CurrentTransaction.java
new file mode 100644
index 0000000000000000000000000000000000000000..ba7c21576178d1a0095450862a3d55d9293325dc
--- /dev/null
+++ b/src/com/sleepycat/collections/CurrentTransaction.java
@@ -0,0 +1,469 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: CurrentTransaction.java,v 1.53 2008/02/05 23:28:19 mark Exp $
+ */
+
+package com.sleepycat.collections;
+
+import java.lang.ref.WeakReference;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.WeakHashMap;
+
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.CursorConfig;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.TransactionConfig;
+import com.sleepycat.util.RuntimeExceptionWrapper;
+
+/**
+ * Provides access to the current transaction for the current thread within the
+ * context of a Berkeley DB environment.  This class provides explicit
+ * transaction control beyond that provided by the {@link TransactionRunner}
+ * class.  However, both methods of transaction control manage per-thread
+ * transactions.
+ *
+ * @author Mark Hayes
+ */
+public class CurrentTransaction {
+
+    /* For internal use, this class doubles as an Environment wrapper. */
+
+    private static WeakHashMap<Environment,CurrentTransaction> envMap =
+        new WeakHashMap<Environment,CurrentTransaction>();
+
+    private LockMode writeLockMode;
+    private boolean cdbMode;
+    private boolean txnMode;
+    private boolean lockingMode;
+    private ThreadLocal localTrans = new ThreadLocal();
+    private ThreadLocal localCdbCursors;
+
+    /*
+     * Use a WeakReference to the Environment to avoid pinning the environment
+     * in the envMap.  The WeakHashMap envMap uses the Environment as a weak
+     * key, but this won't prevent GC of the Environment if the map's value has
+     * a hard reference to the Environment.  [#15444]
+     */
+    private WeakReference<Environment> envRef;
+
+    /**
+     * Gets the CurrentTransaction accessor for a specified Berkeley DB
+     * environment.  This method always returns the same reference when called
+     * more than once with the same environment parameter.
+     *
+     * @param env is an open Berkeley DB environment.
+     *
+     * @return the CurrentTransaction accessor for the given environment, or
+     * null if the environment is not transactional.
+     */
+    public static CurrentTransaction getInstance(Environment env) {
+
+        CurrentTransaction currentTxn = getInstanceInternal(env);
+        return currentTxn.isTxnMode() ? currentTxn : null;
+    }
+
+    /**
+     * Gets the CurrentTransaction accessor for a specified Berkeley DB
+     * environment.  Unlike getInstance(), this method never returns null.
+     *
+     * @param env is an open Berkeley DB environment.
+     */
+    static CurrentTransaction getInstanceInternal(Environment env) {
+        synchronized (envMap) {
+            CurrentTransaction ct = envMap.get(env);
+            if (ct == null) {
+                ct = new CurrentTransaction(env);
+                envMap.put(env, ct);
+            }
+            return ct;
+        }
+    }
+
+    private CurrentTransaction(Environment env) {
+        envRef = new WeakReference<Environment>(env);
+        try {
+            EnvironmentConfig config = env.getConfig();
+            txnMode = config.getTransactional();
+            lockingMode = DbCompat.getInitializeLocking(config);
+            if (txnMode || lockingMode) {
+                writeLockMode = LockMode.RMW;
+            } else {
+                writeLockMode = LockMode.DEFAULT;
+            }
+            cdbMode = DbCompat.getInitializeCDB(config);
+            if (cdbMode) {
+                localCdbCursors = new ThreadLocal();
+            }
+        } catch (DatabaseException e) {
+            throw new RuntimeExceptionWrapper(e);
+        }
+    }
+
+    /**
+     * Returns whether environment is configured for locking.
+     */
+    final boolean isLockingMode() {
+
+        return lockingMode;
+    }
+
+    /**
+     * Returns whether this is a transactional environment.
+     */
+    final boolean isTxnMode() {
+
+        return txnMode;
+    }
+
+    /**
+     * Returns whether this is a Concurrent Data Store environment.
+     */
+    final boolean isCdbMode() {
+
+        return cdbMode;
+    }
+
+    /**
+     * Return the LockMode.RMW or null, depending on whether locking is
+     * enabled.  LockMode.RMW will cause an error if passed when locking
+     * is not enabled.  Locking is enabled if locking or transactions were
+     * specified for this environment.
+     */
+    final LockMode getWriteLockMode() {
+
+        return writeLockMode;
+    }
+
+    /**
+     * Returns the underlying Berkeley DB environment.
+     */
+    public final Environment getEnvironment() {
+
+        return envRef.get();
+    }
+
+    /**
+     * Returns the transaction associated with the current thread for this
+     * environment, or null if no transaction is active.
+     */
+    public final Transaction getTransaction() {
+
+        Trans trans = (Trans) localTrans.get();
+        return (trans != null) ? trans.txn : null;
+    }
+
+    /**
+     * Returns whether auto-commit may be performed by the collections API.
+     * True is returned if no collections API transaction is currently active,
+     * and no XA transaction is currently active.
+     */
+    boolean isAutoCommitAllowed()
+	throws DatabaseException {
+
+        return getTransaction() == null &&
+               DbCompat.getThreadTransaction(getEnvironment()) == null;
+    }
+
+    /**
+     * Begins a new transaction for this environment and associates it with
+     * the current thread.  If a transaction is already active for this
+     * environment and thread, a nested transaction will be created.
+     *
+     * @param config the transaction configuration used for calling
+     * {@link Environment#beginTransaction}, or null to use the default
+     * configuration.
+     *
+     * @return the new transaction.
+     *
+     * @throws DatabaseException if the transaction cannot be started, in which
+     * case any existing transaction is not affected.
+     *
+     * @throws IllegalStateException if a transaction is already active and
+     * nested transactions are not supported by the environment.
+     */
+    public final Transaction beginTransaction(TransactionConfig config)
+        throws DatabaseException {
+
+        Environment env = getEnvironment();
+        Trans trans = (Trans) localTrans.get();
+        if (trans != null) {
+            if (trans.txn != null) {
+                if (!DbCompat.NESTED_TRANSACTIONS) {
+                    throw new IllegalStateException(
+                            "Nested transactions are not supported");
+                }
+                Transaction parentTxn = trans.txn;
+                trans = new Trans(trans, config);
+                trans.txn = env.beginTransaction(parentTxn, config);
+                localTrans.set(trans);
+            } else {
+                trans.txn = env.beginTransaction(null, config);
+                trans.config = config;
+            }
+        } else {
+            trans = new Trans(null, config);
+            trans.txn = env.beginTransaction(null, config);
+            localTrans.set(trans);
+        }
+        return trans.txn;
+    }
+
+    /**
+     * Commits the transaction that is active for the current thread for this
+     * environment and makes the parent transaction (if any) the current
+     * transaction.
+     *
+     * @return the parent transaction or null if the committed transaction was
+     * not nested.
+     *
+     * @throws DatabaseException if an error occurs committing the transaction.
+     * The transaction will still be closed and the parent transaction will
+     * become the current transaction.
+     *
+     * @throws IllegalStateException if no transaction is active for the
+     * current thread for this environment.
+     */
+    public final Transaction commitTransaction()
+        throws DatabaseException, IllegalStateException {
+
+        Trans trans = (Trans) localTrans.get();
+        if (trans != null && trans.txn != null) {
+            Transaction parent = closeTxn(trans);
+            trans.txn.commit();
+            return parent;
+        } else {
+            throw new IllegalStateException("No transaction is active");
+        }
+    }
+
+    /**
+     * Aborts the transaction that is active for the current thread for this
+     * environment and makes the parent transaction (if any) the current
+     * transaction.
+     *
+     * @return the parent transaction or null if the aborted transaction was
+     * not nested.
+     *
+     * @throws DatabaseException if an error occurs aborting the transaction.
+     * The transaction will still be closed and the parent transaction will
+     * become the current transaction.
+     *
+     * @throws IllegalStateException if no transaction is active for the
+     * current thread for this environment.
+     */
+    public final Transaction abortTransaction()
+        throws DatabaseException, IllegalStateException {
+
+        Trans trans = (Trans) localTrans.get();
+        if (trans != null && trans.txn != null) {
+            Transaction parent = closeTxn(trans);
+            trans.txn.abort();
+            return parent;
+        } else {
+            throw new IllegalStateException("No transaction is active");
+        }
+    }
+
+    /**
+     * Returns whether the current transaction is a readUncommitted
+     * transaction.
+     */
+    final boolean isReadUncommitted() {
+
+        Trans trans = (Trans) localTrans.get();
+        if (trans != null && trans.config != null) {
+            return trans.config.getReadUncommitted();
+        } else {
+            return false;
+        }
+    }
+
+    private Transaction closeTxn(Trans trans) {
+
+        localTrans.set(trans.parent);
+        return (trans.parent != null) ? trans.parent.txn : null;
+    }
+
+    private static class Trans {
+
+        private Trans parent;
+        private Transaction txn;
+        private TransactionConfig config;
+
+        private Trans(Trans parent, TransactionConfig config) {
+
+            this.parent = parent;
+            this.config = config;
+        }
+    }
+
+    /**
+     * Opens a cursor for a given database, dup'ing an existing CDB cursor if
+     * one is open for the current thread.
+     */
+    Cursor openCursor(Database db, CursorConfig cursorConfig,
+                      boolean writeCursor, Transaction txn)
+        throws DatabaseException {
+
+        if (cdbMode) {
+            CdbCursors cdbCursors = null;
+            WeakHashMap cdbCursorsMap = (WeakHashMap) localCdbCursors.get();
+            if (cdbCursorsMap == null) {
+                cdbCursorsMap = new WeakHashMap();
+                localCdbCursors.set(cdbCursorsMap);
+            } else {
+                cdbCursors = (CdbCursors) cdbCursorsMap.get(db);
+            }
+            if (cdbCursors == null) {
+                cdbCursors = new CdbCursors();
+                cdbCursorsMap.put(db, cdbCursors);
+            }
+
+            /*
+             * In CDB mode the cursorConfig specified by the user is ignored
+             * and only the writeCursor parameter is honored.  This is the only
+             * meaningful cursor attribute for CDB, and here we count on
+             * writeCursor flag being set correctly by the caller.
+             */
+            List cursors;
+            CursorConfig cdbConfig;
+            if (writeCursor) {
+                if (cdbCursors.readCursors.size() > 0) {
+
+                    /*
+                     * Although CDB allows opening a write cursor when a read
+                     * cursor is open, a self-deadlock will occur if a write is
+                     * attempted for a record that is read-locked; we should
+                     * avoid self-deadlocks at all costs
+                     */
+                    throw new IllegalStateException(
+                      "cannot open CDB write cursor when read cursor is open");
+                }
+                cursors = cdbCursors.writeCursors;
+                cdbConfig = new CursorConfig();
+                DbCompat.setWriteCursor(cdbConfig, true);
+            } else {
+                cursors = cdbCursors.readCursors;
+                cdbConfig = null;
+            }
+            Cursor cursor;
+            if (cursors.size() > 0) {
+                Cursor other = ((Cursor) cursors.get(0));
+                cursor = other.dup(false);
+            } else {
+                cursor = db.openCursor(null, cdbConfig);
+            }
+            cursors.add(cursor);
+            return cursor;
+        } else {
+            return db.openCursor(txn, cursorConfig);
+        }
+    }
+
+    /**
+     * Duplicates a cursor for a given database.
+     *
+     * @param writeCursor true to open a write cursor in a CDB environment, and
+     * ignored for other environments.
+     *
+     * @param samePosition is passed through to Cursor.dup().
+     *
+     * @return the open cursor.
+     *
+     * @throws DatabaseException if a database problem occurs.
+     */
+    Cursor dupCursor(Cursor cursor, boolean writeCursor, boolean samePosition)
+        throws DatabaseException {
+
+        if (cdbMode) {
+            WeakHashMap cdbCursorsMap = (WeakHashMap) localCdbCursors.get();
+            if (cdbCursorsMap != null) {
+                Database db = cursor.getDatabase();
+                CdbCursors cdbCursors = (CdbCursors) cdbCursorsMap.get(db);
+                if (cdbCursors != null) {
+                    List cursors = writeCursor ? cdbCursors.writeCursors
+                                               : cdbCursors.readCursors;
+                    if (cursors.contains(cursor)) {
+                        Cursor newCursor = cursor.dup(samePosition);
+                        cursors.add(newCursor);
+                        return newCursor;
+                    }
+                }
+            }
+            throw new IllegalStateException("cursor to dup not tracked");
+        } else {
+            return cursor.dup(samePosition);
+        }
+    }
+
+    /**
+     * Closes a cursor.
+     *
+     * @param cursor the cursor to close.
+     *
+     * @throws DatabaseException if a database problem occurs.
+     */
+    void closeCursor(Cursor cursor)
+        throws DatabaseException {
+
+        if (cursor == null) {
+            return;
+        }
+        if (cdbMode) {
+            WeakHashMap cdbCursorsMap = (WeakHashMap) localCdbCursors.get();
+            if (cdbCursorsMap != null) {
+                Database db = cursor.getDatabase();
+                CdbCursors cdbCursors = (CdbCursors) cdbCursorsMap.get(db);
+                if (cdbCursors != null) {
+                    if (cdbCursors.readCursors.remove(cursor) ||
+                        cdbCursors.writeCursors.remove(cursor)) {
+                        cursor.close();
+                        return;
+                    }
+                }
+            }
+            throw new IllegalStateException(
+              "closing CDB cursor that was not known to be open");
+        } else {
+            cursor.close();
+        }
+    }
+
+    /**
+     * Returns true if a CDB cursor is open and therefore a Database write
+     * operation should not be attempted since a self-deadlock may result.
+     */
+    boolean isCDBCursorOpen(Database db)
+        throws DatabaseException {
+
+        if (cdbMode) {
+            WeakHashMap cdbCursorsMap = (WeakHashMap) localCdbCursors.get();
+            if (cdbCursorsMap != null) {
+                CdbCursors cdbCursors = (CdbCursors) cdbCursorsMap.get(db);
+
+                if (cdbCursors != null &&
+                    (cdbCursors.readCursors.size() > 0 ||
+                     cdbCursors.writeCursors.size() > 0)) {
+                    return true;
+                }
+            }
+        }
+        return false;
+    }
+
+    static final class CdbCursors {
+
+        List writeCursors = new ArrayList();
+        List readCursors = new ArrayList();
+    }
+}
diff --git a/src/com/sleepycat/collections/DataCursor.java b/src/com/sleepycat/collections/DataCursor.java
new file mode 100644
index 0000000000000000000000000000000000000000..8f43079eaa652ff163cdd044c73c3b43cd798279
--- /dev/null
+++ b/src/com/sleepycat/collections/DataCursor.java
@@ -0,0 +1,888 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: DataCursor.java,v 1.63 2008/01/07 14:28:44 cwl Exp $
+ */
+
+package com.sleepycat.collections;
+
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.CursorConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.JoinConfig;
+import com.sleepycat.je.JoinCursor;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.util.keyrange.KeyRange;
+import com.sleepycat.util.keyrange.RangeCursor;
+
+/**
+ * Represents a Berkeley DB cursor and adds support for indices, bindings and
+ * key ranges.
+ *
+ * <p>This class operates on a view and takes care of reading and updating
+ * indices, calling bindings, constraining access to a key range, etc.</p>
+ *
+ * @author Mark Hayes
+ */
+final class DataCursor implements Cloneable {
+
+    /** Repositioned exactly to the key/data pair given. */
+    static final int REPOS_EXACT = 0;
+    /** Repositioned on a record following the key/data pair given. */
+    static final int REPOS_NEXT = 1;
+    /** Repositioned failed, no records on or after the key/data pair given. */
+    static final int REPOS_EOF = 2;
+
+    private RangeCursor cursor;
+    private JoinCursor joinCursor;
+    private DataView view;
+    private KeyRange range;
+    private boolean writeAllowed;
+    private boolean readUncommitted;
+    private DatabaseEntry keyThang;
+    private DatabaseEntry valueThang;
+    private DatabaseEntry primaryKeyThang;
+    private DatabaseEntry otherThang;
+    private DataCursor[] indexCursorsToClose;
+
+    /**
+     * Creates a cursor for a given view.
+     */
+    DataCursor(DataView view, boolean writeAllowed)
+        throws DatabaseException {
+
+        init(view, writeAllowed, null, null);
+    }
+
+    /**
+     * Creates a cursor for a given view.
+     */
+    DataCursor(DataView view, boolean writeAllowed, CursorConfig config)
+        throws DatabaseException {
+
+        init(view, writeAllowed, config, null);
+    }
+
+    /**
+     * Creates a cursor for a given view and single key range.
+     * Used by unit tests.
+     */
+    DataCursor(DataView view, boolean writeAllowed, Object singleKey)
+        throws DatabaseException {
+
+        init(view, writeAllowed, null, view.subRange(view.range, singleKey));
+    }
+
+    /**
+     * Creates a cursor for a given view and key range.
+     * Used by unit tests.
+     */
+    DataCursor(DataView view, boolean writeAllowed,
+               Object beginKey, boolean beginInclusive,
+               Object endKey, boolean endInclusive)
+        throws DatabaseException {
+
+        init(view, writeAllowed, null,
+             view.subRange
+                (view.range, beginKey, beginInclusive, endKey, endInclusive));
+    }
+
+    /**
+     * Creates a join cursor.
+     */
+    DataCursor(DataView view, DataCursor[] indexCursors,
+               JoinConfig joinConfig, boolean closeIndexCursors)
+        throws DatabaseException {
+
+        if (view.isSecondary()) {
+            throw new IllegalArgumentException(
+                "The primary collection in a join must not be a secondary " +
+                "database");
+        }
+        Cursor[] cursors = new Cursor[indexCursors.length];
+        for (int i = 0; i < cursors.length; i += 1) {
+            cursors[i] = indexCursors[i].cursor.getCursor();
+        }
+        joinCursor = view.db.join(cursors, joinConfig);
+        init(view, false, null, null);
+        if (closeIndexCursors) {
+            indexCursorsToClose = indexCursors;
+        }
+    }
+
+    /**
+     * Clones a cursor preserving the current position.
+     */
+    DataCursor cloneCursor()
+        throws DatabaseException {
+
+        checkNoJoinCursor();
+
+        DataCursor o;
+        try {
+            o = (DataCursor) super.clone();
+        } catch (CloneNotSupportedException neverHappens) {
+            return null;
+        }
+
+        o.initThangs();
+        KeyRange.copy(keyThang, o.keyThang);
+        KeyRange.copy(valueThang, o.valueThang);
+        if (primaryKeyThang != keyThang) {
+            KeyRange.copy(primaryKeyThang, o.primaryKeyThang);
+        }
+
+        o.cursor = cursor.dup(true);
+        return o;
+    }
+
+    /**
+     * Returns the internal range cursor.
+     */
+    RangeCursor getCursor() {
+        return cursor;
+    }
+
+    /**
+     * Constructor helper.
+     */
+    private void init(DataView view,
+                      boolean writeAllowed,
+                      CursorConfig config,
+                      KeyRange range)
+        throws DatabaseException {
+
+        if (config == null) {
+            config = view.cursorConfig;
+        }
+        this.view = view;
+        this.writeAllowed = writeAllowed && view.writeAllowed;
+        this.range = (range != null) ? range : view.range;
+        readUncommitted = config.getReadUncommitted() ||
+                          view.currentTxn.isReadUncommitted();
+        initThangs();
+
+        if (joinCursor == null) {
+            cursor = new MyRangeCursor
+                (this.range, config, view, this.writeAllowed);
+        }
+    }
+
+    /**
+     * Constructor helper.
+     */
+    private void initThangs()
+        throws DatabaseException {
+
+        keyThang = new DatabaseEntry();
+        primaryKeyThang = view.isSecondary() ? (new DatabaseEntry())
+                                             : keyThang;
+        valueThang = new DatabaseEntry();
+    }
+
+    /**
+     * Set entries from given byte arrays.
+     */
+    private void setThangs(byte[] keyBytes,
+                           byte[] priKeyBytes,
+                           byte[] valueBytes) {
+
+        keyThang.setData(KeyRange.copyBytes(keyBytes));
+
+        if (keyThang != primaryKeyThang) {
+            primaryKeyThang.setData(KeyRange.copyBytes(priKeyBytes));
+        }
+
+        valueThang.setData(KeyRange.copyBytes(valueBytes));
+    }
+
+    /**
+     * Closes the associated cursor.
+     */
+    void close()
+        throws DatabaseException {
+
+        if (joinCursor != null) {
+            JoinCursor toClose = joinCursor;
+            joinCursor = null;
+            toClose.close();
+        }
+        if (cursor != null) {
+            Cursor toClose = cursor.getCursor();
+            cursor = null;
+            view.currentTxn.closeCursor(toClose );
+        }
+        if (indexCursorsToClose != null) {
+            DataCursor[] toClose = indexCursorsToClose;
+            indexCursorsToClose = null;
+            for (int i = 0; i < toClose.length; i += 1) {
+                toClose[i].close();
+            }
+        }
+    }
+
+    /**
+     * Repositions to a given raw key/data pair, or just past it if that record
+     * has been deleted.
+     *
+     * @return REPOS_EXACT, REPOS_NEXT or REPOS_EOF.
+     */
+    int repositionRange(byte[] keyBytes,
+                        byte[] priKeyBytes,
+                        byte[] valueBytes,
+                        boolean lockForWrite)
+        throws DatabaseException {
+
+        LockMode lockMode = getLockMode(lockForWrite);
+        OperationStatus status = null;
+
+        /* Use the given key/data byte arrays. */
+        setThangs(keyBytes, priKeyBytes, valueBytes);
+
+        /* Position on or after the given key/data pair. */
+        if (view.dupsAllowed) {
+            status = cursor.getSearchBothRange(keyThang, primaryKeyThang,
+                                               valueThang, lockMode);
+        }
+        if (status != OperationStatus.SUCCESS) {
+            status = cursor.getSearchKeyRange(keyThang, primaryKeyThang,
+                                              valueThang, lockMode);
+        }
+
+        /* Return the result of the operation. */
+        if (status == OperationStatus.SUCCESS) {
+            if (!KeyRange.equalBytes(keyBytes, 0, keyBytes.length,
+                                     keyThang.getData(),
+                                     keyThang.getOffset(),
+                                     keyThang.getSize())) {
+                return REPOS_NEXT;
+            }
+            if (view.dupsAllowed) {
+                DatabaseEntry thang = view.isSecondary() ? primaryKeyThang
+                                                         : valueThang;
+                byte[] bytes = view.isSecondary() ? priKeyBytes
+                                                  : valueBytes;
+                if (!KeyRange.equalBytes(bytes, 0, bytes.length,
+                                         thang.getData(),
+                                         thang.getOffset(),
+                                         thang.getSize())) {
+                    return REPOS_NEXT;
+                }
+            }
+            return REPOS_EXACT;
+        } else {
+            return REPOS_EOF;
+        }
+    }
+
+    /**
+     * Repositions to a given raw key/data pair.
+     *
+     * @throws IllegalStateException when the database has unordered keys or
+     * unordered duplicates.
+     *
+     * @return whether the search succeeded.
+     */
+    boolean repositionExact(byte[] keyBytes,
+                            byte[] priKeyBytes,
+                            byte[] valueBytes,
+                            boolean lockForWrite)
+        throws DatabaseException {
+
+        LockMode lockMode = getLockMode(lockForWrite);
+        OperationStatus status = null;
+
+        /* Use the given key/data byte arrays. */
+        setThangs(keyBytes, priKeyBytes, valueBytes);
+
+        /* Position on the given key/data pair. */
+        if (view.recNumRenumber) {
+            /* getSearchBoth doesn't work with recno-renumber databases. */
+            status = cursor.getSearchKey(keyThang, primaryKeyThang,
+                                         valueThang, lockMode);
+        } else {
+            status = cursor.getSearchBoth(keyThang, primaryKeyThang,
+                                          valueThang, lockMode);
+        }
+
+        return (status == OperationStatus.SUCCESS);
+    }
+
+    /**
+     * Returns the view for this cursor.
+     */
+    DataView getView() {
+
+        return view;
+    }
+
+    /**
+     * Returns the range for this cursor.
+     */
+    KeyRange getRange() {
+
+        return range;
+    }
+
+    /**
+     * Returns whether write is allowed for this cursor, as specified to the
+     * constructor.
+     */
+    boolean isWriteAllowed() {
+
+        return writeAllowed;
+    }
+
+    /**
+     * Returns the key object for the last record read.
+     */
+    Object getCurrentKey()
+        throws DatabaseException {
+
+        return view.makeKey(keyThang, primaryKeyThang);
+    }
+
+    /**
+     * Returns the value object for the last record read.
+     */
+    Object getCurrentValue()
+        throws DatabaseException {
+
+        return view.makeValue(primaryKeyThang, valueThang);
+    }
+
+    /**
+     * Returns the internal key entry.
+     */
+    DatabaseEntry getKeyThang() {
+        return keyThang;
+    }
+
+    /**
+     * Returns the internal primary key entry, which is the same object as the
+     * key entry if the cursor is not for a secondary database.
+     */
+    DatabaseEntry getPrimaryKeyThang() {
+        return primaryKeyThang;
+    }
+
+    /**
+     * Returns the internal value entry.
+     */
+    DatabaseEntry getValueThang() {
+        return valueThang;
+    }
+
+    /**
+     * Returns whether record number access is allowed.
+     */
+    boolean hasRecNumAccess() {
+
+        return view.recNumAccess;
+    }
+
+    /**
+     * Returns the record number for the last record read.
+     */
+    int getCurrentRecordNumber()
+        throws DatabaseException {
+
+        if (view.btreeRecNumDb) {
+            /* BTREE-RECNO access. */
+            if (otherThang == null) {
+                otherThang = new DatabaseEntry();
+            }
+            DbCompat.getCurrentRecordNumber(cursor.getCursor(), otherThang,
+                                            getLockMode(false));
+            return DbCompat.getRecordNumber(otherThang);
+        } else {
+            /* QUEUE or RECNO database. */
+            return DbCompat.getRecordNumber(keyThang);
+        }
+    }
+
+    /**
+     * Binding version of Cursor.getCurrent(), no join cursor allowed.
+     */
+    OperationStatus getCurrent(boolean lockForWrite)
+        throws DatabaseException {
+
+        checkNoJoinCursor();
+        return cursor.getCurrent(keyThang, primaryKeyThang, valueThang,
+                                 getLockMode(lockForWrite));
+    }
+
+    /**
+     * Binding version of Cursor.getFirst(), join cursor is allowed.
+     */
+    OperationStatus getFirst(boolean lockForWrite)
+        throws DatabaseException {
+
+        LockMode lockMode = getLockMode(lockForWrite);
+        if (joinCursor != null) {
+            return joinCursor.getNext(keyThang, valueThang, lockMode);
+        } else {
+            return cursor.getFirst(keyThang, primaryKeyThang, valueThang,
+                                   lockMode);
+        }
+    }
+
+    /**
+     * Binding version of Cursor.getNext(), join cursor is allowed.
+     */
+    OperationStatus getNext(boolean lockForWrite)
+        throws DatabaseException {
+
+        LockMode lockMode = getLockMode(lockForWrite);
+        if (joinCursor != null) {
+            return joinCursor.getNext(keyThang, valueThang, lockMode);
+        } else {
+            return cursor.getNext(keyThang, primaryKeyThang, valueThang,
+                                  lockMode);
+        }
+    }
+
+    /**
+     * Binding version of Cursor.getNext(), join cursor is allowed.
+     */
+    OperationStatus getNextNoDup(boolean lockForWrite)
+        throws DatabaseException {
+
+        LockMode lockMode = getLockMode(lockForWrite);
+        if (joinCursor != null) {
+            return joinCursor.getNext(keyThang, valueThang, lockMode);
+        } else if (view.dupsView) {
+            return cursor.getNext
+                (keyThang, primaryKeyThang, valueThang, lockMode);
+        } else {
+            return cursor.getNextNoDup
+                (keyThang, primaryKeyThang, valueThang, lockMode);
+        }
+    }
+
+    /**
+     * Binding version of Cursor.getNextDup(), no join cursor allowed.
+     */
+    OperationStatus getNextDup(boolean lockForWrite)
+        throws DatabaseException {
+
+        checkNoJoinCursor();
+        if (view.dupsView) {
+            return null;
+        } else {
+            return cursor.getNextDup
+                (keyThang, primaryKeyThang, valueThang,
+                 getLockMode(lockForWrite));
+        }
+    }
+
+    /**
+     * Binding version of Cursor.getLast(), no join cursor allowed.
+     */
+    OperationStatus getLast(boolean lockForWrite)
+        throws DatabaseException {
+
+        checkNoJoinCursor();
+        return cursor.getLast(keyThang, primaryKeyThang, valueThang,
+                              getLockMode(lockForWrite));
+    }
+
+    /**
+     * Binding version of Cursor.getPrev(), no join cursor allowed.
+     */
+    OperationStatus getPrev(boolean lockForWrite)
+        throws DatabaseException {
+
+        checkNoJoinCursor();
+        return cursor.getPrev(keyThang, primaryKeyThang, valueThang,
+                              getLockMode(lockForWrite));
+    }
+
+    /**
+     * Binding version of Cursor.getPrevNoDup(), no join cursor allowed.
+     */
+    OperationStatus getPrevNoDup(boolean lockForWrite)
+        throws DatabaseException {
+
+        checkNoJoinCursor();
+        LockMode lockMode = getLockMode(lockForWrite);
+        if (view.dupsView) {
+            return null;
+        } else if (view.dupsView) {
+            return cursor.getPrev
+                (keyThang, primaryKeyThang, valueThang, lockMode);
+        } else {
+            return cursor.getPrevNoDup
+                (keyThang, primaryKeyThang, valueThang, lockMode);
+        }
+    }
+
+    /**
+     * Binding version of Cursor.getPrevDup(), no join cursor allowed.
+     */
+    OperationStatus getPrevDup(boolean lockForWrite)
+        throws DatabaseException {
+
+        checkNoJoinCursor();
+        if (view.dupsView) {
+            return null;
+        } else {
+            return cursor.getPrevDup
+                (keyThang, primaryKeyThang, valueThang,
+                 getLockMode(lockForWrite));
+        }
+    }
+
+    /**
+     * Binding version of Cursor.getSearchKey(), no join cursor allowed.
+     * Searches by record number in a BTREE-RECNO db with RECNO access.
+     */
+    OperationStatus getSearchKey(Object key, Object value,
+                                 boolean lockForWrite)
+        throws DatabaseException {
+
+        checkNoJoinCursor();
+        if (view.dupsView) {
+            if (view.useKey(key, value, primaryKeyThang, view.dupsRange)) {
+                KeyRange.copy(view.dupsKey, keyThang);
+                return cursor.getSearchBoth
+                    (keyThang, primaryKeyThang, valueThang,
+                     getLockMode(lockForWrite));
+            }
+        } else {
+            if (view.useKey(key, value, keyThang, range)) {
+                return doGetSearchKey(lockForWrite);
+            }
+        }
+        return OperationStatus.NOTFOUND;
+    }
+
+    /**
+     * Pass-thru version of Cursor.getSearchKey().
+     * Searches by record number in a BTREE-RECNO db with RECNO access.
+     */
+    private OperationStatus doGetSearchKey(boolean lockForWrite)
+        throws DatabaseException {
+
+        LockMode lockMode = getLockMode(lockForWrite);
+        if (view.btreeRecNumAccess) {
+            return cursor.getSearchRecordNumber(keyThang, primaryKeyThang,
+                                                valueThang, lockMode);
+        } else {
+            return cursor.getSearchKey(keyThang, primaryKeyThang,
+                                       valueThang, lockMode);
+        }
+    }
+
+    /**
+     * Binding version of Cursor.getSearchKeyRange(), no join cursor allowed.
+     */
+    OperationStatus getSearchKeyRange(Object key, Object value,
+                                      boolean lockForWrite)
+        throws DatabaseException {
+
+        checkNoJoinCursor();
+        LockMode lockMode = getLockMode(lockForWrite);
+        if (view.dupsView) {
+            if (view.useKey(key, value, primaryKeyThang, view.dupsRange)) {
+                KeyRange.copy(view.dupsKey, keyThang);
+                return cursor.getSearchBothRange
+                    (keyThang, primaryKeyThang, valueThang, lockMode);
+            }
+        } else {
+            if (view.useKey(key, value, keyThang, range)) {
+                return cursor.getSearchKeyRange
+                    (keyThang, primaryKeyThang, valueThang, lockMode);
+            }
+        }
+        return OperationStatus.NOTFOUND;
+    }
+
+    /**
+     * Find the given key and value using getSearchBoth if possible or a
+     * sequential scan otherwise, no join cursor allowed.
+     */
+    OperationStatus findBoth(Object key, Object value, boolean lockForWrite)
+        throws DatabaseException {
+
+        checkNoJoinCursor();
+        LockMode lockMode = getLockMode(lockForWrite);
+        view.useValue(value, valueThang, null);
+        if (view.dupsView) {
+            if (view.useKey(key, value, primaryKeyThang, view.dupsRange)) {
+                KeyRange.copy(view.dupsKey, keyThang);
+                if (otherThang == null) {
+                    otherThang = new DatabaseEntry();
+                }
+                OperationStatus status = cursor.getSearchBoth
+                    (keyThang, primaryKeyThang, otherThang, lockMode);
+                if (status == OperationStatus.SUCCESS &&
+                    KeyRange.equalBytes(otherThang, valueThang)) {
+                    return status;
+                }
+            }
+        } else if (view.useKey(key, value, keyThang, range)) {
+            if (view.isSecondary()) {
+                if (otherThang == null) {
+                    otherThang = new DatabaseEntry();
+                }
+                OperationStatus status = cursor.getSearchKey(keyThang,
+                                                             primaryKeyThang,
+                                                             otherThang,
+                                                             lockMode);
+                while (status == OperationStatus.SUCCESS) {
+                    if (KeyRange.equalBytes(otherThang, valueThang)) {
+                        return status;
+                    }
+                    status = cursor.getNextDup(keyThang, primaryKeyThang,
+                                               otherThang, lockMode);
+                }
+                /* if status != SUCCESS set range cursor to invalid? */
+            } else {
+                return cursor.getSearchBoth(keyThang, null, valueThang,
+                                            lockMode);
+            }
+        }
+        return OperationStatus.NOTFOUND;
+    }
+
+    /**
+     * Find the given value using getSearchBoth if possible or a sequential
+     * scan otherwise, no join cursor allowed.
+     */
+    OperationStatus findValue(Object value, boolean findFirst)
+        throws DatabaseException {
+
+        checkNoJoinCursor();
+
+        if (view.entityBinding != null && !view.isSecondary() &&
+            (findFirst || !view.dupsAllowed)) {
+            return findBoth(null, value, false);
+        } else {
+            if (otherThang == null) {
+                otherThang = new DatabaseEntry();
+            }
+            view.useValue(value, otherThang, null);
+            OperationStatus status = findFirst ? getFirst(false)
+                                               : getLast(false);
+            while (status == OperationStatus.SUCCESS) {
+                if (KeyRange.equalBytes(valueThang, otherThang)) {
+                    break;
+                }
+                status = findFirst ? getNext(false) : getPrev(false);
+            }
+            return status;
+        }
+    }
+
+    /**
+     * Calls Cursor.count(), no join cursor allowed.
+     */
+    int count()
+        throws DatabaseException {
+
+        checkNoJoinCursor();
+        if (view.dupsView) {
+            return 1;
+        } else {
+            return cursor.count();
+        }
+    }
+
+    /**
+     * Binding version of Cursor.putCurrent().
+     */
+    OperationStatus putCurrent(Object value)
+        throws DatabaseException {
+
+        checkWriteAllowed(false);
+        view.useValue(value, valueThang, keyThang);
+
+        /*
+         * Workaround for a DB core problem: With HASH type a put() with
+         * different data is allowed.
+         */
+        boolean hashWorkaround = (view.dupsOrdered && !view.ordered);
+        if (hashWorkaround) {
+            if (otherThang == null) {
+                otherThang = new DatabaseEntry();
+            }
+            cursor.getCurrent(keyThang, primaryKeyThang, otherThang,
+                              LockMode.DEFAULT);
+            if (KeyRange.equalBytes(valueThang, otherThang)) {
+                return OperationStatus.SUCCESS;
+            } else {
+                throw new IllegalArgumentException(
+                  "Current data differs from put data with sorted duplicates");
+            }
+        }
+
+        return cursor.putCurrent(valueThang);
+    }
+
+    /**
+     * Binding version of Cursor.putAfter().
+     */
+    OperationStatus putAfter(Object value)
+        throws DatabaseException {
+
+        checkWriteAllowed(false);
+        view.useValue(value, valueThang, null); /* why no key check? */
+        return cursor.putAfter(keyThang, valueThang);
+    }
+
+    /**
+     * Binding version of Cursor.putBefore().
+     */
+    OperationStatus putBefore(Object value)
+        throws DatabaseException {
+
+        checkWriteAllowed(false);
+        view.useValue(value, valueThang, keyThang);
+        return cursor.putBefore(keyThang, valueThang);
+    }
+
+    /**
+     * Binding version of Cursor.put(), optionally returning the old value and
+     * optionally using the current key instead of the key parameter.
+     */
+    OperationStatus put(Object key, Object value, Object[] oldValue,
+                        boolean useCurrentKey)
+        throws DatabaseException {
+
+        initForPut(key, value, oldValue, useCurrentKey);
+        return cursor.put(keyThang, valueThang);
+    }
+
+    /**
+     * Binding version of Cursor.putNoOverwrite(), optionally using the current
+     * key instead of the key parameter.
+     */
+    OperationStatus putNoOverwrite(Object key, Object value,
+                                   boolean useCurrentKey)
+        throws DatabaseException {
+
+        initForPut(key, value, null, useCurrentKey);
+        return cursor.putNoOverwrite(keyThang, valueThang);
+    }
+
+    /**
+     * Binding version of Cursor.putNoDupData(), optionally returning the old
+     * value and optionally using the current key instead of the key parameter.
+     */
+    OperationStatus putNoDupData(Object key, Object value, Object[] oldValue,
+                                 boolean useCurrentKey)
+        throws DatabaseException {
+
+        initForPut(key, value, oldValue, useCurrentKey);
+        if (view.dupsOrdered) {
+            return cursor.putNoDupData(keyThang, valueThang);
+        } else {
+            if (view.dupsAllowed) {
+                /* Unordered duplicates. */
+                OperationStatus status =
+                        cursor.getSearchBoth(keyThang, primaryKeyThang,
+                                             valueThang,
+                                             getLockMode(false));
+                if (status == OperationStatus.SUCCESS) {
+                    return OperationStatus.KEYEXIST;
+                } else {
+                    return cursor.put(keyThang, valueThang);
+                }
+            } else {
+                /* No duplicates. */
+                return cursor.putNoOverwrite(keyThang, valueThang);
+            }
+        }
+    }
+
+    /**
+     * Do setup for a put() operation.
+     */
+    private void initForPut(Object key, Object value, Object[] oldValue,
+                            boolean useCurrentKey)
+        throws DatabaseException {
+
+        checkWriteAllowed(false);
+        if (!useCurrentKey && !view.useKey(key, value, keyThang, range)) {
+            throw new IllegalArgumentException("key out of range");
+        }
+        if (oldValue != null) {
+            oldValue[0] = null;
+            if (!view.dupsAllowed) {
+                OperationStatus status = doGetSearchKey(true);
+                if (status == OperationStatus.SUCCESS) {
+                    oldValue[0] = getCurrentValue();
+                }
+            }
+        }
+        view.useValue(value, valueThang, keyThang);
+    }
+
+    /**
+     * Sets the key entry to the begin key of a single key range, so the next
+     * time a putXxx() method is called that key will be used.
+     */
+    void useRangeKey() {
+        if (!range.isSingleKey()) {
+            throw new IllegalStateException();
+        }
+        KeyRange.copy(range.getSingleKey(), keyThang);
+    }
+
+    /**
+     * Perform an arbitrary database 'delete' operation.
+     */
+    OperationStatus delete()
+        throws DatabaseException {
+
+        checkWriteAllowed(true);
+        return cursor.delete();
+    }
+
+    /**
+     * Returns the lock mode to use for a getXxx() operation.
+     */
+    LockMode getLockMode(boolean lockForWrite) {
+
+        /* Read-uncommmitted takes precedence over write-locking. */
+
+        if (readUncommitted) {
+            return LockMode.READ_UNCOMMITTED;
+        } else if (lockForWrite) {
+            return view.currentTxn.getWriteLockMode();
+        } else {
+            return LockMode.DEFAULT;
+        }
+    }
+
+    /**
+     * Throws an exception if a join cursor is in use.
+     */
+    private void checkNoJoinCursor() {
+
+        if (joinCursor != null) {
+            throw new UnsupportedOperationException
+                ("Not allowed with a join cursor");
+        }
+    }
+
+    /**
+     * Throws an exception if write is not allowed or if a join cursor is in
+     * use.
+     */
+    private void checkWriteAllowed(boolean allowSecondary) {
+
+        checkNoJoinCursor();
+
+        if (!writeAllowed || (!allowSecondary && view.isSecondary())) {
+            throw new UnsupportedOperationException
+                ("Writing is not allowed");
+        }
+    }
+}
diff --git a/src/com/sleepycat/collections/DataView.java b/src/com/sleepycat/collections/DataView.java
new file mode 100644
index 0000000000000000000000000000000000000000..605c035a4abcc42052c144ef359604fc671dcfba
--- /dev/null
+++ b/src/com/sleepycat/collections/DataView.java
@@ -0,0 +1,674 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: DataView.java,v 1.74 2008/02/05 23:28:19 mark Exp $
+ */
+
+package com.sleepycat.collections;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.CursorConfig;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.JoinConfig;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.SecondaryConfig;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.je.SecondaryKeyCreator;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.util.RuntimeExceptionWrapper;
+import com.sleepycat.util.keyrange.KeyRange;
+import com.sleepycat.util.keyrange.KeyRangeException;
+
+/**
+ * Represents a Berkeley DB database and adds support for indices, bindings and
+ * key ranges.
+ *
+ * <p>This class defines a view and takes care of reading and updating indices,
+ * calling bindings, constraining access to a key range, etc.</p>
+ *
+ * @author Mark Hayes
+ */
+final class DataView implements Cloneable {
+
+    Database db;
+    SecondaryDatabase secDb;
+    CurrentTransaction currentTxn;
+    KeyRange range;
+    EntryBinding keyBinding;
+    EntryBinding valueBinding;
+    EntityBinding entityBinding;
+    PrimaryKeyAssigner keyAssigner;
+    SecondaryKeyCreator secKeyCreator;
+    CursorConfig cursorConfig;      // Used for all operations via this view
+    boolean writeAllowed;           // Read-write view
+    boolean ordered;                // Not a HASH Db
+    boolean keyRangesAllowed;       // BTREE only
+    boolean recNumAllowed;          // QUEUE, RECNO, or BTREE-RECNUM Db
+    boolean recNumAccess;           // recNumAllowed && using a rec num binding
+    boolean btreeRecNumDb;          // BTREE-RECNUM Db
+    boolean btreeRecNumAccess;      // recNumAccess && BTREE-RECNUM Db
+    boolean recNumRenumber;         // RECNO-RENUM Db
+    boolean keysRenumbered;         // recNumRenumber || btreeRecNumAccess
+    boolean dupsAllowed;            // Dups configured
+    boolean dupsOrdered;            // Sorted dups configured
+    boolean transactional;          // Db is transactional
+    boolean readUncommittedAllowed; // Read-uncommited is optional in DB-CORE
+
+    /*
+     * If duplicatesView is called, dupsView will be true and dupsKey will be
+     * the secondary key used as the "single key" range.  dupRange will be set
+     * as the range of the primary key values if subRange is subsequently
+     * called, to further narrow the view.
+     */
+    DatabaseEntry dupsKey;
+    boolean dupsView;
+    KeyRange dupsRange;
+
+    /**
+     * Creates a view for a given database and bindings.  The initial key range
+     * of the view will be open.
+     */
+    DataView(Database database, EntryBinding keyBinding,
+             EntryBinding valueBinding, EntityBinding entityBinding,
+             boolean writeAllowed, PrimaryKeyAssigner keyAssigner)
+        throws IllegalArgumentException {
+
+        if (database == null) {
+            throw new IllegalArgumentException("database is null");
+        }
+        db = database;
+        try {
+            currentTxn =
+                CurrentTransaction.getInstanceInternal(db.getEnvironment());
+            DatabaseConfig dbConfig;
+            if (db instanceof SecondaryDatabase) {
+                secDb = (SecondaryDatabase) database;
+                SecondaryConfig secConfig = secDb.getSecondaryConfig();
+                secKeyCreator = secConfig.getKeyCreator();
+                dbConfig = secConfig;
+            } else {
+                dbConfig = db.getConfig();
+            }
+            ordered = !DbCompat.isTypeHash(dbConfig);
+            keyRangesAllowed = DbCompat.isTypeBtree(dbConfig);
+            recNumAllowed = DbCompat.isTypeQueue(dbConfig) ||
+                            DbCompat.isTypeRecno(dbConfig) ||
+                            DbCompat.getBtreeRecordNumbers(dbConfig);
+            recNumRenumber = DbCompat.getRenumbering(dbConfig);
+            dupsAllowed = DbCompat.getSortedDuplicates(dbConfig) ||
+                          DbCompat.getUnsortedDuplicates(dbConfig);
+            dupsOrdered = DbCompat.getSortedDuplicates(dbConfig);
+            transactional = currentTxn.isTxnMode() &&
+                            dbConfig.getTransactional();
+            readUncommittedAllowed = DbCompat.getReadUncommitted(dbConfig);
+            btreeRecNumDb = recNumAllowed && DbCompat.isTypeBtree(dbConfig);
+            range = new KeyRange(dbConfig.getBtreeComparator());
+        } catch (DatabaseException e) {
+            throw new RuntimeExceptionWrapper(e);
+        }
+        this.writeAllowed = writeAllowed;
+        this.keyBinding = keyBinding;
+        this.valueBinding = valueBinding;
+        this.entityBinding = entityBinding;
+        this.keyAssigner = keyAssigner;
+        cursorConfig = CursorConfig.DEFAULT;
+
+        if (valueBinding != null && entityBinding != null)
+            throw new IllegalArgumentException(
+                "both valueBinding and entityBinding are non-null");
+
+        if (keyBinding instanceof com.sleepycat.bind.RecordNumberBinding) {
+            if (!recNumAllowed) {
+                throw new IllegalArgumentException(
+                    "RecordNumberBinding requires DB_BTREE/DB_RECNUM, " +
+                    "DB_RECNO, or DB_QUEUE");
+            }
+            recNumAccess = true;
+            if (btreeRecNumDb) {
+                btreeRecNumAccess = true;
+            }
+        }
+        keysRenumbered = recNumRenumber || btreeRecNumAccess;
+    }
+
+    /**
+     * Clones the view.
+     */
+    private DataView cloneView() {
+
+        try {
+            return (DataView) super.clone();
+        } catch (CloneNotSupportedException willNeverOccur) {
+            throw new IllegalStateException();
+        }
+    }
+
+    /**
+     * Return a new key-set view derived from this view by setting the
+     * entity and value binding to null.
+     *
+     * @return the derived view.
+     */
+    DataView keySetView() {
+
+        if (keyBinding == null) {
+            throw new UnsupportedOperationException("must have keyBinding");
+        }
+        DataView view = cloneView();
+        view.valueBinding = null;
+        view.entityBinding = null;
+        return view;
+    }
+
+    /**
+     * Return a new value-set view derived from this view by setting the
+     * key binding to null.
+     *
+     * @return the derived view.
+     */
+    DataView valueSetView() {
+
+        if (valueBinding == null && entityBinding == null) {
+            throw new UnsupportedOperationException(
+                "must have valueBinding or entityBinding");
+        }
+        DataView view = cloneView();
+        view.keyBinding = null;
+        return view;
+    }
+
+    /**
+     * Return a new value-set view for single key range.
+     *
+     * @param singleKey the single key value.
+     *
+     * @return the derived view.
+     *
+     * @throws DatabaseException if a database problem occurs.
+     *
+     * @throws KeyRangeException if the specified range is not within the
+     * current range.
+     */
+    DataView valueSetView(Object singleKey)
+        throws DatabaseException, KeyRangeException {
+
+        /*
+         * Must do subRange before valueSetView since the latter clears the
+         * key binding needed for the former.
+         */
+        KeyRange singleKeyRange = subRange(range, singleKey);
+        DataView view = valueSetView();
+        view.range = singleKeyRange;
+        return view;
+    }
+
+    /**
+     * Return a new value-set view for key range, optionally changing
+     * the key binding.
+     */
+    DataView subView(Object beginKey, boolean beginInclusive,
+                     Object endKey, boolean endInclusive,
+                     EntryBinding keyBinding)
+        throws DatabaseException, KeyRangeException {
+
+        DataView view = cloneView();
+        view.setRange(beginKey, beginInclusive, endKey, endInclusive);
+        if (keyBinding != null) view.keyBinding = keyBinding;
+        return view;
+    }
+
+    /**
+     * Return a new duplicates view for a given secondary key.
+     */
+    DataView duplicatesView(Object secondaryKey,
+                            EntryBinding primaryKeyBinding)
+        throws DatabaseException, KeyRangeException {
+
+        if (!isSecondary()) {
+            throw new UnsupportedOperationException
+                ("Only allowed for maps on secondary databases");
+        }
+        if (dupsView) {
+            throw new IllegalStateException();
+        }
+        DataView view = cloneView();
+        view.range = subRange(view.range, secondaryKey);
+        view.dupsKey = view.range.getSingleKey();
+        view.dupsView = true;
+        view.keyBinding = primaryKeyBinding;
+        return view;
+    }
+
+    /**
+     * Returns a new view with a specified cursor configuration.
+     */
+    DataView configuredView(CursorConfig config) {
+
+        DataView view = cloneView();
+        view.cursorConfig = (config != null) ?
+            DbCompat.cloneCursorConfig(config) : CursorConfig.DEFAULT;
+        return view;
+    }
+
+    /**
+     * Returns the current transaction for the view or null if the environment
+     * is non-transactional.
+     */
+    CurrentTransaction getCurrentTxn() {
+
+        return transactional ? currentTxn : null;
+    }
+
+    /**
+     * Sets this view's range to a subrange with the given parameters.
+     */
+    private void setRange(Object beginKey, boolean beginInclusive,
+                          Object endKey, boolean endInclusive)
+        throws DatabaseException, KeyRangeException {
+
+        if ((beginKey != null || endKey != null) && !keyRangesAllowed) {
+            throw new UnsupportedOperationException
+                ("Key ranges allowed only for BTREE databases");
+        }
+        KeyRange useRange = useSubRange();
+        useRange = subRange
+            (useRange, beginKey, beginInclusive, endKey, endInclusive);
+        if (dupsView) {
+            dupsRange = useRange;
+        } else {
+            range = useRange;
+        }
+    }
+
+    /**
+     * Returns the key thang for a single key range, or null if a single key
+     * range is not used.
+     */
+    DatabaseEntry getSingleKeyThang() {
+
+        return range.getSingleKey();
+    }
+
+    /**
+     * Returns the environment for the database.
+     */
+    final Environment getEnv() {
+
+        return currentTxn.getEnvironment();
+    }
+
+    /**
+     * Returns whether this is a view on a secondary database rather
+     * than directly on a primary database.
+     */
+    final boolean isSecondary() {
+
+        return (secDb != null);
+    }
+
+    /**
+     * Returns whether no records are present in the view.
+     */
+    boolean isEmpty()
+        throws DatabaseException {
+
+        DataCursor cursor = new DataCursor(this, false);
+        try {
+            return cursor.getFirst(false) != OperationStatus.SUCCESS;
+        } finally {
+            cursor.close();
+        }
+    }
+
+    /**
+     * Appends a value and returns the new key.  If a key assigner is used
+     * it assigns the key, otherwise a QUEUE or RECNO database is required.
+     */
+    OperationStatus append(Object value, Object[] retPrimaryKey,
+                           Object[] retValue)
+        throws DatabaseException {
+
+        /*
+         * Flags will be NOOVERWRITE if used with assigner, or APPEND
+         * otherwise.
+         * Requires: if value param, value or entity binding
+         * Requires: if retPrimaryKey, primary key binding (no index).
+         * Requires: if retValue, value or entity binding
+         */
+        DatabaseEntry keyThang = new DatabaseEntry();
+        DatabaseEntry valueThang = new DatabaseEntry();
+        useValue(value, valueThang, null);
+        OperationStatus status;
+        if (keyAssigner != null) {
+            keyAssigner.assignKey(keyThang);
+            if (!range.check(keyThang)) {
+                throw new IllegalArgumentException(
+                    "assigned key out of range");
+            }
+            DataCursor cursor = new DataCursor(this, true);
+            try {
+                status = cursor.getCursor().putNoOverwrite(keyThang,
+                                                           valueThang);
+            } finally {
+                cursor.close();
+            }
+        } else {
+            /* Assume QUEUE/RECNO access method. */
+            if (currentTxn.isCDBCursorOpen(db)) {
+                throw new IllegalStateException(
+                  "cannot open CDB write cursor when read cursor is open");
+            }
+            status = DbCompat.append(db, useTransaction(),
+                                     keyThang, valueThang);
+            if (status == OperationStatus.SUCCESS && !range.check(keyThang)) {
+                db.delete(useTransaction(), keyThang);
+                throw new IllegalArgumentException(
+                    "appended record number out of range");
+            }
+        }
+        if (status == OperationStatus.SUCCESS) {
+            returnPrimaryKeyAndValue(keyThang, valueThang,
+                                     retPrimaryKey, retValue);
+        }
+        return status;
+    }
+
+    /**
+     * Returns the current transaction if the database is transaction, or null
+     * if the database is not transactional or there is no current transaction.
+     */
+    Transaction useTransaction() {
+        return transactional ?  currentTxn.getTransaction() : null;
+    }
+
+    /**
+     * Deletes all records in the current range.
+     */
+    void clear()
+        throws DatabaseException {
+
+        DataCursor cursor = new DataCursor(this, true);
+        try {
+            OperationStatus status = OperationStatus.SUCCESS;
+            while (status == OperationStatus.SUCCESS) {
+                if (keysRenumbered) {
+                    status = cursor.getFirst(true);
+                } else {
+                    status = cursor.getNext(true);
+                }
+                if (status == OperationStatus.SUCCESS) {
+                    cursor.delete();
+                }
+            }
+        } finally {
+            cursor.close();
+        }
+    }
+
+    /**
+     * Returns a cursor for this view that reads only records having the
+     * specified index key values.
+     */
+    DataCursor join(DataView[] indexViews, Object[] indexKeys,
+                    JoinConfig joinConfig)
+        throws DatabaseException {
+
+        DataCursor joinCursor = null;
+        DataCursor[] indexCursors = new DataCursor[indexViews.length];
+        try {
+            for (int i = 0; i < indexViews.length; i += 1) {
+                indexCursors[i] = new DataCursor(indexViews[i], false);
+                indexCursors[i].getSearchKey(indexKeys[i], null, false);
+            }
+            joinCursor = new DataCursor(this, indexCursors, joinConfig, true);
+            return joinCursor;
+        } finally {
+            if (joinCursor == null) {
+                // An exception is being thrown, so close cursors we opened.
+                for (int i = 0; i < indexCursors.length; i += 1) {
+                    if (indexCursors[i] != null) {
+                        try { indexCursors[i].close(); }
+                        catch (Exception e) {
+			    /* FindBugs, this is ok. */
+			}
+                    }
+                }
+            }
+        }
+    }
+
+    /**
+     * Returns a cursor for this view that reads only records having the
+     * index key values at the specified cursors.
+     */
+    DataCursor join(DataCursor[] indexCursors, JoinConfig joinConfig)
+        throws DatabaseException {
+
+        return new DataCursor(this, indexCursors, joinConfig, false);
+    }
+
+    /**
+     * Returns primary key and value if return parameters are non-null.
+     */
+    private void returnPrimaryKeyAndValue(DatabaseEntry keyThang,
+                                          DatabaseEntry valueThang,
+                                          Object[] retPrimaryKey,
+                                          Object[] retValue)
+        throws DatabaseException {
+
+        // Requires: if retPrimaryKey, primary key binding (no index).
+        // Requires: if retValue, value or entity binding
+
+        if (retPrimaryKey != null) {
+            if (keyBinding == null) {
+                throw new IllegalArgumentException(
+                    "returning key requires primary key binding");
+            } else if (isSecondary()) {
+                throw new IllegalArgumentException(
+                    "returning key requires unindexed view");
+            } else {
+                retPrimaryKey[0] = keyBinding.entryToObject(keyThang);
+            }
+        }
+        if (retValue != null) {
+            retValue[0] = makeValue(keyThang, valueThang);
+        }
+    }
+
+    /**
+     * Populates the key entry and returns whether the key is within range.
+     */
+    boolean useKey(Object key, Object value, DatabaseEntry keyThang,
+                   KeyRange checkRange)
+        throws DatabaseException {
+
+        if (key != null) {
+            if (keyBinding == null) {
+                throw new IllegalArgumentException(
+                    "non-null key with null key binding");
+            }
+            keyBinding.objectToEntry(key, keyThang);
+        } else {
+            if (value == null) {
+                throw new IllegalArgumentException(
+                    "null key and null value");
+            }
+            if (entityBinding == null) {
+                throw new IllegalStateException(
+                    "EntityBinding required to derive key from value");
+            }
+            if (!dupsView && isSecondary()) {
+                DatabaseEntry primaryKeyThang = new DatabaseEntry();
+                entityBinding.objectToKey(value, primaryKeyThang);
+                DatabaseEntry valueThang = new DatabaseEntry();
+                entityBinding.objectToData(value, valueThang);
+                secKeyCreator.createSecondaryKey(secDb, primaryKeyThang,
+                                                 valueThang, keyThang);
+            } else {
+                entityBinding.objectToKey(value, keyThang);
+            }
+        }
+        if (recNumAccess && DbCompat.getRecordNumber(keyThang) <= 0) {
+            return false;
+        }
+        if (checkRange != null && !checkRange.check(keyThang)) {
+            return false;
+        }
+        return true;
+    }
+
+    /**
+     * Returns whether data keys can be derived from the value/entity binding
+     * of this view, which determines whether a value/entity object alone is
+     * sufficient for operations that require keys.
+     */
+    final boolean canDeriveKeyFromValue() {
+
+        return (entityBinding != null);
+    }
+
+    /**
+     * Populates the value entry and throws an exception if the primary key
+     * would be changed via an entity binding.
+     */
+    void useValue(Object value, DatabaseEntry valueThang,
+                  DatabaseEntry checkKeyThang)
+        throws DatabaseException {
+
+        if (value != null) {
+            if (valueBinding != null) {
+                valueBinding.objectToEntry(value, valueThang);
+            } else if (entityBinding != null) {
+                entityBinding.objectToData(value, valueThang);
+                if (checkKeyThang != null) {
+                    DatabaseEntry thang = new DatabaseEntry();
+                    entityBinding.objectToKey(value, thang);
+                    if (!KeyRange.equalBytes(thang, checkKeyThang)) {
+                        throw new IllegalArgumentException(
+                            "cannot change primary key");
+                    }
+                }
+            } else {
+                throw new IllegalArgumentException(
+                    "non-null value with null value/entity binding");
+            }
+        } else {
+            valueThang.setData(KeyRange.ZERO_LENGTH_BYTE_ARRAY);
+            valueThang.setOffset(0);
+            valueThang.setSize(0);
+        }
+    }
+
+    /**
+     * Converts a key entry to a key object.
+     */
+    Object makeKey(DatabaseEntry keyThang, DatabaseEntry priKeyThang) {
+
+        if (keyBinding == null) {
+            throw new UnsupportedOperationException();
+        } else {
+            DatabaseEntry thang = dupsView ? priKeyThang : keyThang;
+            if (thang.getSize() == 0) {
+                return null;
+            } else {
+                return keyBinding.entryToObject(thang);
+            }
+        }
+    }
+
+    /**
+     * Converts a key-value entry pair to a value object.
+     */
+    Object makeValue(DatabaseEntry primaryKeyThang, DatabaseEntry valueThang) {
+
+        Object value;
+        if (valueBinding != null) {
+            value = valueBinding.entryToObject(valueThang);
+        } else if (entityBinding != null) {
+            value = entityBinding.entryToObject(primaryKeyThang,
+                                                    valueThang);
+        } else {
+            throw new UnsupportedOperationException(
+                "requires valueBinding or entityBinding");
+        }
+        return value;
+    }
+
+    /**
+     * Intersects the given key and the current range.
+     */
+    KeyRange subRange(KeyRange useRange, Object singleKey)
+        throws DatabaseException, KeyRangeException {
+
+        return useRange.subRange(makeRangeKey(singleKey));
+    }
+
+    /**
+     * Intersects the given range and the current range.
+     */
+    KeyRange subRange(KeyRange useRange,
+                      Object beginKey, boolean beginInclusive,
+                      Object endKey, boolean endInclusive)
+        throws DatabaseException, KeyRangeException {
+
+        if (beginKey == endKey && beginInclusive && endInclusive) {
+            return subRange(useRange, beginKey);
+        }
+        if (!ordered) {
+            throw new UnsupportedOperationException(
+                    "Cannot use key ranges on an unsorted database");
+        }
+        DatabaseEntry beginThang =
+            (beginKey != null) ? makeRangeKey(beginKey) : null;
+        DatabaseEntry endThang =
+            (endKey != null) ? makeRangeKey(endKey) : null;
+
+        return useRange.subRange(beginThang, beginInclusive,
+                                 endThang, endInclusive);
+    }
+
+    /**
+     * Returns the range to use for sub-ranges.  Returns range if this is not a
+     * dupsView, or the dupsRange if this is a dupsView, creating dupsRange if
+     * necessary.
+     */
+    KeyRange useSubRange()
+        throws DatabaseException {
+
+        if (dupsView) {
+            synchronized (this) {
+                if (dupsRange == null) {
+                    DatabaseConfig config =
+                        secDb.getPrimaryDatabase().getConfig();
+                    dupsRange = new KeyRange(config.getBtreeComparator());
+                }
+            }
+            return dupsRange;
+        } else {
+            return range;
+        }
+    }
+
+    /**
+     * Given a key object, make a key entry that can be used in a range.
+     */
+    private DatabaseEntry makeRangeKey(Object key)
+        throws DatabaseException {
+
+        DatabaseEntry thang = new DatabaseEntry();
+        if (keyBinding != null) {
+            useKey(key, null, thang, null);
+        } else {
+            useKey(null, key, thang, null);
+        }
+        return thang;
+    }
+}
diff --git a/src/com/sleepycat/collections/MapEntryParameter.java b/src/com/sleepycat/collections/MapEntryParameter.java
new file mode 100644
index 0000000000000000000000000000000000000000..6dbab1da28e718b1a7f6aa24b7a612ed2b757c8c
--- /dev/null
+++ b/src/com/sleepycat/collections/MapEntryParameter.java
@@ -0,0 +1,124 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: MapEntryParameter.java,v 1.22 2008/05/27 15:30:34 mark Exp $
+ */
+
+package com.sleepycat.collections;
+
+import java.util.Map;
+
+/**
+ * A simple <code>Map.Entry</code> implementation that can be used as in
+ * input parameter.  Since a <code>MapEntryParameter</code> is not obtained
+ * from a map, it is not attached to any map in particular.  To emphasize that
+ * changing this object does not change the map, the {@link #setValue} method
+ * always throws <code>UnsupportedOperationException</code>.
+ *
+ * <p><b>Warning:</b> Use of this interface violates the Java Collections
+ * interface contract since these state that <code>Map.Entry</code> objects
+ * should only be obtained from <code>Map.entrySet()</code> sets, while this
+ * class allows constructing them directly.  However, it is useful for
+ * performing operations on an entry set such as add(), contains(), etc.  For
+ * restrictions see {@link #getValue} and {@link #setValue}.</p>
+ *
+ * @author Mark Hayes
+ */
+public class MapEntryParameter<K,V> implements Map.Entry<K,V> {
+
+    private K key;
+    private V value;
+
+    /**
+     * Creates a map entry with a given key and value.
+     *
+     * @param key is the key to use.
+     *
+     * @param value is the value to use.
+     */
+    public MapEntryParameter(K key, V value) {
+
+        this.key = key;
+        this.value = value;
+    }
+
+    /**
+     * Computes a hash code as specified by {@link
+     * java.util.Map.Entry#hashCode}.
+     *
+     * @return the computed hash code.
+     */
+    public int hashCode() {
+
+        return ((key == null)    ? 0 : key.hashCode()) ^
+               ((value == null)  ? 0 : value.hashCode());
+    }
+
+    /**
+     * Compares this entry to a given entry as specified by {@link
+     * java.util.Map.Entry#equals}.
+     *
+     * @return the computed hash code.
+     */
+    public boolean equals(Object other) {
+
+        if (!(other instanceof Map.Entry)) {
+            return false;
+        }
+
+        Map.Entry e = (Map.Entry) other;
+
+        return ((key == null) ? (e.getKey() == null)
+                              : key.equals(e.getKey())) &&
+               ((value == null) ? (e.getValue() == null)
+                                : value.equals(e.getValue()));
+    }
+
+    /**
+     * Returns the key of this entry.
+     *
+     * @return the key of this entry.
+     */
+    public final K getKey() {
+
+        return key;
+    }
+
+    /**
+     * Returns the value of this entry.  Note that this will be the value
+     * passed to the constructor or the last value passed to {@link #setValue}.
+     * It will not reflect changes made to a Map.
+     *
+     * @return the value of this entry.
+     */
+    public final V getValue() {
+
+        return value;
+    }
+
+    /**
+     * Always throws <code>UnsupportedOperationException</code> since this
+     * object is not attached to a map.
+     */
+    public V setValue(V newValue) {
+
+        throw new UnsupportedOperationException();
+    }
+
+    final void setValueInternal(V newValue) {
+
+        this.value = newValue;
+    }
+
+    /**
+     * Converts the entry to a string representation for debugging.
+     *
+     * @return the string representation.
+     */
+    public String toString() {
+
+        return "[key [" + key + "] value [" + value + ']';
+    }
+}
diff --git a/src/com/sleepycat/collections/MyRangeCursor.java b/src/com/sleepycat/collections/MyRangeCursor.java
new file mode 100644
index 0000000000000000000000000000000000000000..6f96615f083cde811eb516071d5dc827fbdc1efd
--- /dev/null
+++ b/src/com/sleepycat/collections/MyRangeCursor.java
@@ -0,0 +1,74 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: MyRangeCursor.java,v 1.8 2008/02/05 23:28:19 mark Exp $
+ */
+
+package com.sleepycat.collections;
+
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.CursorConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.util.keyrange.KeyRange;
+import com.sleepycat.util.keyrange.RangeCursor;
+
+class MyRangeCursor extends RangeCursor {
+
+    private DataView view;
+    private boolean isRecnoOrQueue;
+    private boolean writeCursor;
+
+    MyRangeCursor(KeyRange range,
+                  CursorConfig config,
+                  DataView view,
+                  boolean writeAllowed)
+        throws DatabaseException {
+
+        super(range, view.dupsRange, view.dupsOrdered,
+              openCursor(view, config, writeAllowed));
+        this.view = view;
+        isRecnoOrQueue = view.recNumAllowed && !view.btreeRecNumDb;
+        writeCursor = isWriteCursor(config, writeAllowed);
+    }
+
+    /**
+     * Returns true if a write cursor is requested by the user via the cursor
+     * config, or if this is a writable cursor and the user has not specified a
+     * cursor config.  For CDB, a special cursor must be created for writing.
+     * See CurrentTransaction.openCursor.
+     */
+    private static boolean isWriteCursor(CursorConfig config,
+                                         boolean writeAllowed) {
+        return DbCompat.getWriteCursor(config) ||
+               (config == CursorConfig.DEFAULT && writeAllowed);
+    }
+
+    private static Cursor openCursor(DataView view,
+                                     CursorConfig config,
+                                     boolean writeAllowed)
+        throws DatabaseException {
+
+        return view.currentTxn.openCursor
+            (view.db, config, isWriteCursor(config, writeAllowed),
+             view.useTransaction());
+    }
+
+    protected Cursor dupCursor(Cursor cursor, boolean samePosition)
+        throws DatabaseException {
+
+        return view.currentTxn.dupCursor(cursor, writeCursor, samePosition);
+    }
+
+    protected void closeCursor(Cursor cursor)
+        throws DatabaseException {
+
+        view.currentTxn.closeCursor(cursor);
+    }
+
+    protected boolean checkRecordNumber() {
+        return isRecnoOrQueue;
+    }
+}
diff --git a/src/com/sleepycat/collections/PrimaryKeyAssigner.java b/src/com/sleepycat/collections/PrimaryKeyAssigner.java
new file mode 100644
index 0000000000000000000000000000000000000000..560a366933b6732c9c33a5211acbadd6bbf29045
--- /dev/null
+++ b/src/com/sleepycat/collections/PrimaryKeyAssigner.java
@@ -0,0 +1,29 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: PrimaryKeyAssigner.java,v 1.30 2008/01/07 14:28:45 cwl Exp $
+ */
+
+package com.sleepycat.collections;
+
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+
+/**
+ * An interface implemented to assign new primary key values.
+ * An implementation of this interface is passed to the {@link StoredMap}
+ * or {@link StoredSortedMap} constructor to assign primary keys for that
+ * store. Key assignment occurs when <code>StoredMap.append()</code> is called.
+ *
+ * @author Mark Hayes
+ */
+public interface PrimaryKeyAssigner {
+
+    /**
+     * Assigns a new primary key value into the given data buffer.
+     */
+    void assignKey(DatabaseEntry keyData)
+        throws DatabaseException;
+}
diff --git a/src/com/sleepycat/collections/StoredCollection.java b/src/com/sleepycat/collections/StoredCollection.java
new file mode 100644
index 0000000000000000000000000000000000000000..8a6a01f63b288d2849c7f109e1e386c0c9ba211a
--- /dev/null
+++ b/src/com/sleepycat/collections/StoredCollection.java
@@ -0,0 +1,598 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: StoredCollection.java,v 1.46 2008/05/27 15:30:34 mark Exp $
+ */
+
+package com.sleepycat.collections;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.CursorConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.JoinConfig;
+import com.sleepycat.je.OperationStatus;
+
+/**
+ * A abstract base class for all stored collections.  This class, and its
+ * base class {@link StoredContainer}, provide implementations of most methods
+ * in the {@link Collection} interface.  Other methods, such as {@link #add}
+ * and {@link #remove}, are provided by concrete classes that extend this
+ * class.
+ *
+ * <p>In addition, this class provides the following methods for stored
+ * collections only.  Note that the use of these methods is not compatible with
+ * the standard Java collections interface.</p>
+ * <ul>
+ * <li>{@link #getIteratorBlockSize}</li>
+ * <li>{@link #setIteratorBlockSize}</li>
+ * <li>{@link #storedIterator()}</li>
+ * <li>{@link #storedIterator(boolean)}</li>
+ * <li>{@link #join}</li>
+ * <li>{@link #toList()}</li>
+ * </ul>
+ *
+ * @author Mark Hayes
+ */
+public abstract class StoredCollection<E> extends StoredContainer
+    implements Collection<E> {
+
+    /**
+     * The default number of records read at one time by iterators.
+     * @see #setIteratorBlockSize
+     */
+    public static final int DEFAULT_ITERATOR_BLOCK_SIZE = 10;
+
+    private int iteratorBlockSize = DEFAULT_ITERATOR_BLOCK_SIZE;
+
+    StoredCollection(DataView view) {
+
+        super(view);
+    }
+
+    /**
+     * Returns the number of records read at one time by iterators returned by
+     * the {@link #iterator} method.  By default this value is {@link
+     * #DEFAULT_ITERATOR_BLOCK_SIZE}.
+     */
+    public int getIteratorBlockSize() {
+
+        return iteratorBlockSize;
+    }
+
+    /**
+     * Changes the number of records read at one time by iterators returned by
+     * the {@link #iterator} method.  By default this value is {@link
+     * #DEFAULT_ITERATOR_BLOCK_SIZE}.
+     *
+     * @throws IllegalArgumentException if the blockSize is less than two.
+     */
+    public void setIteratorBlockSize(int blockSize) {
+
+        if (blockSize < 2) {
+            throw new IllegalArgumentException
+                ("blockSize is less than two: " + blockSize);
+        }
+
+        iteratorBlockSize = blockSize;
+    }
+
+    final boolean add(Object key, Object value) {
+
+        DataCursor cursor = null;
+        boolean doAutoCommit = beginAutoCommit();
+        try {
+            cursor = new DataCursor(view, true);
+            OperationStatus status =
+                cursor.putNoDupData(key, value, null, false);
+            closeCursor(cursor);
+            commitAutoCommit(doAutoCommit);
+            return (status == OperationStatus.SUCCESS);
+        } catch (Exception e) {
+            closeCursor(cursor);
+            throw handleException(e, doAutoCommit);
+        }
+    }
+
+    BlockIterator blockIterator() {
+        return new BlockIterator(this, isWriteAllowed(), iteratorBlockSize);
+    }
+
+    /**
+     * Returns an iterator over the elements in this collection.
+     * The iterator will be read-only if the collection is read-only.
+     * This method conforms to the {@link Collection#iterator} interface.
+     *
+     * <p>The iterator returned by this method does not keep a database cursor
+     * open and therefore it does not need to be closed.  It reads blocks of
+     * records as needed, opening and closing a cursor to read each block of
+     * records.  The number of records per block is 10 by default and can be
+     * changed with {@link #setIteratorBlockSize}.</p>
+     *
+     * <p>Because this iterator does not keep a cursor open, if it is used
+     * without transactions, the iterator does not have <em>cursor
+     * stability</em> characteristics.  In other words, the record at the
+     * current iterator position can be changed or deleted by another thread.
+     * To prevent this from happening, call this method within a transaction or
+     * use the {@link #storedIterator()} method instead.</p>
+     *
+     * @return a standard {@link Iterator} for this collection.
+     *
+     * @see #isWriteAllowed
+     */
+    public Iterator<E> iterator() {
+        return blockIterator();
+    }
+
+    /**
+     * Returns an iterator over the elements in this collection.
+     * The iterator will be read-only if the collection is read-only.
+     * This method does not exist in the standard {@link Collection} interface.
+     *
+     * <p>If {@code Iterator.set} or {@code Iterator.remove} will be called
+     * and the underlying Database is transactional, then a transaction must be
+     * active when calling this method and must remain active while using the
+     * iterator.</p>
+     *
+     * <p><strong>Warning:</strong> The iterator returned must be explicitly
+     * closed using {@link StoredIterator#close()} or {@link
+     * StoredIterator#close(java.util.Iterator)} to release the underlying
+     * database cursor resources.</p>
+     *
+     * @return a {@link StoredIterator} for this collection.
+     *
+     * @see #isWriteAllowed
+     */
+    public StoredIterator<E> storedIterator() {
+
+        return storedIterator(isWriteAllowed());
+    }
+
+    /**
+     * Returns a read or read-write iterator over the elements in this
+     * collection.
+     * This method does not exist in the standard {@link Collection} interface.
+     *
+     * <p>If {@code Iterator.set} or {@code Iterator.remove} will be called
+     * and the underlying Database is transactional, then a transaction must be
+     * active when calling this method and must remain active while using the
+     * iterator.</p>
+     *
+     * <p><strong>Warning:</strong> The iterator returned must be explicitly
+     * closed using {@link StoredIterator#close()} or {@link
+     * StoredIterator#close(java.util.Iterator)} to release the underlying
+     * database cursor resources.</p>
+     *
+     * @param writeAllowed is true to open a read-write iterator or false to
+     * open a read-only iterator.  If the collection is read-only the iterator
+     * will always be read-only.
+     *
+     * @return a {@link StoredIterator} for this collection.
+     *
+     * @throws IllegalStateException if writeAllowed is true but the collection
+     * is read-only.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     *
+     * @see #isWriteAllowed
+     */
+    public StoredIterator<E> storedIterator(boolean writeAllowed) {
+
+        try {
+            return new StoredIterator(this, writeAllowed && isWriteAllowed(),
+                                      null);
+        } catch (Exception e) {
+            throw StoredContainer.convertException(e);
+        }
+    }
+
+    /**
+     * @deprecated Please use {@link #storedIterator()} or {@link
+     * #storedIterator(boolean)} instead.  Because the iterator returned must
+     * be closed, the method name {@code iterator} is confusing since standard
+     * Java iterators do not need to be closed.
+     */
+    public StoredIterator<E> iterator(boolean writeAllowed) {
+
+        return storedIterator(writeAllowed);
+    }
+
+    /**
+     * Returns an array of all the elements in this collection.
+     * This method conforms to the {@link Collection#toArray()} interface.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public Object[] toArray() {
+
+        ArrayList<Object> list = new ArrayList<Object>();
+        StoredIterator i = storedIterator();
+        try {
+            while (i.hasNext()) {
+                list.add(i.next());
+            }
+        } finally {
+            i.close();
+        }
+        return list.toArray();
+    }
+
+    /**
+     * Returns an array of all the elements in this collection whose runtime
+     * type is that of the specified array.
+     * This method conforms to the {@link Collection#toArray(Object[])}
+     * interface.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public <T> T[] toArray(T[] a) {
+
+        int j = 0;
+        StoredIterator i = storedIterator();
+        try {
+            while (j < a.length && i.hasNext()) {
+                a[j++] = (T) i.next();
+            }
+            if (j < a.length) {
+                a[j] = null;
+            } else if (i.hasNext()) {
+                ArrayList<T> list = new ArrayList<T>(Arrays.asList(a));
+                while (i.hasNext()) {
+                    list.add((T) i.next());
+                }
+                a = list.toArray(a);
+            }
+        } finally {
+            i.close();
+        }
+        return a;
+    }
+
+    /**
+     * Returns true if this collection contains all of the elements in the
+     * specified collection.
+     * This method conforms to the {@link Collection#containsAll} interface.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public boolean containsAll(Collection<?> coll) {
+	Iterator<?> i = storedOrExternalIterator(coll);
+        try {
+            while (i.hasNext()) {
+                if (!contains(i.next())) {
+                    return false;
+                }
+            }
+        } finally {
+            StoredIterator.close(i);
+        }
+	return true;
+    }
+
+    /**
+     * Adds all of the elements in the specified collection to this collection
+     * (optional operation).
+     * This method calls the {@link #add(Object)} method of the concrete
+     * collection class, which may or may not be supported.
+     * This method conforms to the {@link Collection#addAll} interface.
+     *
+     * @throws UnsupportedOperationException if the collection is read-only, or
+     * if the collection is indexed, or if the add method is not supported by
+     * the concrete collection.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public boolean addAll(Collection<? extends E> coll) {
+	Iterator<? extends E> i = null;
+        boolean doAutoCommit = beginAutoCommit();
+        try {
+            i = storedOrExternalIterator(coll);
+            boolean changed = false;
+            while (i.hasNext()) {
+                if (add(i.next())) {
+                    changed = true;
+                }
+            }
+            StoredIterator.close(i);
+            commitAutoCommit(doAutoCommit);
+            return changed;
+        } catch (Exception e) {
+            StoredIterator.close(i);
+            throw handleException(e, doAutoCommit);
+        }
+    }
+
+    /**
+     * Removes all this collection's elements that are also contained in the
+     * specified collection (optional operation).
+     * This method conforms to the {@link Collection#removeAll} interface.
+     *
+     * @throws UnsupportedOperationException if the collection is read-only.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public boolean removeAll(Collection<?> coll) {
+
+        return removeAll(coll, true);
+    }
+
+    /**
+     * Retains only the elements in this collection that are contained in the
+     * specified collection (optional operation).
+     * This method conforms to the {@link Collection#removeAll} interface.
+     *
+     * @throws UnsupportedOperationException if the collection is read-only.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public boolean retainAll(Collection<?> coll) {
+
+        return removeAll(coll, false);
+    }
+
+    private boolean removeAll(Collection<?> coll, boolean ifExistsInColl) {
+	StoredIterator i = null;
+        boolean doAutoCommit = beginAutoCommit();
+        try {
+            boolean changed = false;
+            i = storedIterator();
+            while (i.hasNext()) {
+                if (ifExistsInColl == coll.contains(i.next())) {
+                    i.remove();
+                    changed = true;
+                }
+            }
+            i.close();
+            commitAutoCommit(doAutoCommit);
+            return changed;
+        } catch (Exception e) {
+            if (i != null) {
+                i.close();
+            }
+            throw handleException(e, doAutoCommit);
+        }
+    }
+
+    /**
+     * Compares the specified object with this collection for equality.
+     * A value comparison is performed by this method and the stored values
+     * are compared rather than calling the equals() method of each element.
+     * This method conforms to the {@link Collection#equals} interface.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public boolean equals(Object other) {
+
+        if (other instanceof Collection) {
+            Collection otherColl = StoredCollection.copyCollection(other);
+            StoredIterator i = storedIterator();
+            try {
+                while (i.hasNext()) {
+                    if (!otherColl.remove(i.next())) {
+                        return false;
+                    }
+                }
+                return otherColl.isEmpty();
+            } finally {
+                i.close();
+            }
+        } else {
+            return false;
+        }
+    }
+
+    /*
+     * Add this in to keep FindBugs from whining at us about implementing
+     * equals(), but not hashCode().
+     */
+    public int hashCode() {
+	return super.hashCode();
+    }
+
+    /**
+     * Returns a copy of this collection as an ArrayList.  This is the same as
+     * {@link #toArray()} but returns a collection instead of an array.
+     *
+     * @return an {@link ArrayList} containing a copy of all elements in this
+     * collection.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public List<E> toList() {
+
+        ArrayList<E> list = new ArrayList<E>();
+        StoredIterator<E> i = storedIterator();
+        try {
+            while (i.hasNext()) list.add(i.next());
+            return list;
+        } finally {
+            i.close();
+        }
+    }
+
+    /**
+     * Converts the collection to a string representation for debugging.
+     * WARNING: The returned string may be very large.
+     *
+     * @return the string representation.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public String toString() {
+	StringBuffer buf = new StringBuffer();
+	buf.append("[");
+	StoredIterator i = storedIterator();
+        try {
+            while (i.hasNext()) {
+                if (buf.length() > 1) buf.append(',');
+                buf.append(i.next().toString());
+            }
+            buf.append(']');
+            return buf.toString();
+        } finally {
+            i.close();
+        }
+    }
+
+    // Inherit javadoc
+    public int size() {
+
+        boolean countDups = iterateDuplicates();
+        if (DbCompat.DATABASE_COUNT && countDups && !view.range.hasBound()) {
+            try {
+                return (int) DbCompat.getDatabaseCount(view.db);
+            } catch (Exception e) {
+                throw StoredContainer.convertException(e);
+            }
+        } else {
+            int count = 0;
+            CursorConfig cursorConfig = view.currentTxn.isLockingMode() ?
+                CursorConfig.READ_UNCOMMITTED : null;
+            DataCursor cursor = null;
+            try {
+                cursor = new DataCursor(view, false, cursorConfig);
+                OperationStatus status = cursor.getFirst(false);
+                while (status == OperationStatus.SUCCESS) {
+                    if (countDups) {
+                        count += cursor.count();
+                    } else {
+                        count += 1;
+                    }
+                    status = cursor.getNextNoDup(false);
+                }
+            } catch (Exception e) {
+                throw StoredContainer.convertException(e);
+            } finally {
+                closeCursor(cursor);
+            }
+            return count;
+        }
+    }
+
+    /**
+     * Returns an iterator representing an equality join of the indices and
+     * index key values specified.
+     * This method does not exist in the standard {@link Collection} interface.
+     *
+     * <p><strong>Warning:</strong> The iterator returned must be explicitly
+     * closed using {@link StoredIterator#close()} or {@link
+     * StoredIterator#close(java.util.Iterator)} to release the underlying
+     * database cursor resources.</p>
+     *
+     * <p>The returned iterator supports only the two methods: hasNext() and
+     * next().  All other methods will throw UnsupportedOperationException.</p>
+     *
+     * @param indices is an array of indices with elements corresponding to
+     * those in the indexKeys array.
+     *
+     * @param indexKeys is an array of index key values identifying the
+     * elements to be selected.
+     *
+     * @param joinConfig is the join configuration, or null to use the
+     * default configuration.
+     *
+     * @return an iterator over the elements in this collection that match
+     * all specified index key values.
+     *
+     * @throws IllegalArgumentException if this collection is indexed or if a
+     * given index does not have the same store as this collection.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public StoredIterator<E> join(StoredContainer[] indices,
+                                  Object[] indexKeys,
+                                  JoinConfig joinConfig) {
+
+        try {
+            DataView[] indexViews = new DataView[indices.length];
+            for (int i = 0; i < indices.length; i += 1) {
+                indexViews[i] = indices[i].view;
+            }
+            DataCursor cursor = view.join(indexViews, indexKeys, joinConfig);
+            return new StoredIterator<E>(this, false, cursor);
+        } catch (Exception e) {
+            throw StoredContainer.convertException(e);
+        }
+    }
+
+    final E getFirstOrLast(boolean doGetFirst) {
+
+        DataCursor cursor = null;
+        try {
+            cursor = new DataCursor(view, false);
+            OperationStatus status;
+            if (doGetFirst) {
+                status = cursor.getFirst(false);
+            } else {
+                status = cursor.getLast(false);
+            }
+            return (status == OperationStatus.SUCCESS) ?
+                    makeIteratorData(null, cursor) : null;
+        } catch (Exception e) {
+            throw StoredContainer.convertException(e);
+        } finally {
+            closeCursor(cursor);
+        }
+    }
+
+    E makeIteratorData(BaseIterator iterator, DataCursor cursor) {
+
+        return makeIteratorData(iterator,
+                                cursor.getKeyThang(),
+                                cursor.getPrimaryKeyThang(),
+                                cursor.getValueThang());
+    }
+
+    abstract E makeIteratorData(BaseIterator iterator,
+                                DatabaseEntry keyEntry,
+                                DatabaseEntry priKeyEntry,
+                                DatabaseEntry valueEntry);
+
+    abstract boolean hasValues();
+
+    boolean iterateDuplicates() {
+
+        return true;
+    }
+
+    void checkIterAddAllowed()
+        throws UnsupportedOperationException {
+
+        if (!areDuplicatesAllowed()) {
+            throw new UnsupportedOperationException("duplicates required");
+        }
+    }
+
+    int getIndexOffset() {
+
+        return 0;
+    }
+
+    private static Collection copyCollection(Object other) {
+
+        if (other instanceof StoredCollection) {
+            return ((StoredCollection) other).toList();
+        } else {
+            return new ArrayList((Collection) other);
+        }
+    }
+}
diff --git a/src/com/sleepycat/collections/StoredCollections.java b/src/com/sleepycat/collections/StoredCollections.java
new file mode 100644
index 0000000000000000000000000000000000000000..92ae581f602531296fd8e51910d73182475442dc
--- /dev/null
+++ b/src/com/sleepycat/collections/StoredCollections.java
@@ -0,0 +1,258 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: StoredCollections.java,v 1.32 2008/05/27 15:30:34 mark Exp $
+ */
+
+package com.sleepycat.collections;
+
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.SortedSet;
+
+import com.sleepycat.je.CursorConfig;
+
+/**
+ * Static methods operating on collections and maps.
+ *
+ * <p>This class consists exclusively of static methods that operate on or
+ * return stored collections and maps, jointly called containers. It contains
+ * methods for changing certain properties of a container.  Because container
+ * properties are immutable, these methods always return a new container
+ * instance.  This allows stored container instances to be used safely by
+ * multiple threads.  Creating the new container instance is not expensive and
+ * creates only two new objects.</p>
+ *
+ * <p>When a container is created with a particular property, all containers
+ * and iterators derived from that container will inherit the property.  For
+ * example, if a read-uncommitted Map is created then calls to its subMap(),
+ * values(), entrySet(), and keySet() methods will create read-uncommitted
+ * containers also.</p>
+ *
+ * <p>Method names beginning with "configured" create a new container with a
+ * specified {@link CursorConfig} from a given stored container.  This allows
+ * configuring a container for read-committed isolation, read-uncommitted
+ * isolation, or any other property supported by <code>CursorConfig</code>.
+ * All operations performed with the resulting container will be performed with
+ * the specified cursor configuration.</p>
+ */
+public class StoredCollections {
+
+    private StoredCollections() {}
+
+    /**
+     * Creates a configured collection from a given stored collection.
+     *
+     * @param storedCollection the base collection.
+     *
+     * @param config is the cursor configuration to be used for all operations
+     * performed via the new collection instance; null may be specified to use
+     * the default configuration.
+     *
+     * @return the configured collection.
+     *
+     * @throws ClassCastException if the given container is not a
+     * StoredContainer.
+     */
+    public static <E> Collection<E> configuredCollection(Collection<E>
+                                                         storedCollection,
+                                                         CursorConfig config) {
+        return (Collection)
+            ((StoredContainer) storedCollection).configuredClone(config);
+    }
+
+    /**
+     * Creates a configured list from a given stored list.
+     *
+     * <p>Note that this method may not be called in the JE product, since the
+     * StoredList class is not supported.</p>
+     *
+     * @param storedList the base list.
+     *
+     * @param config is the cursor configuration to be used for all operations
+     * performed via the new list instance; null may be specified to use the
+     * default configuration.
+     *
+     * @return the configured list.
+     *
+     * @throws ClassCastException if the given container is not a
+     * StoredContainer.
+     */
+    public static <E> List<E> configuredList(List<E> storedList,
+                                             CursorConfig config) {
+        return (List) ((StoredContainer) storedList).configuredClone(config);
+    }
+
+    /**
+     * Creates a configured map from a given stored map.
+     *
+     * @param storedMap the base map.
+     *
+     * @param config is the cursor configuration to be used for all operations
+     * performed via the new map instance; null may be specified to use the
+     * default configuration.
+     *
+     * @return the configured map.
+     *
+     * @throws ClassCastException if the given container is not a
+     * StoredContainer.
+     */
+    public static <K,V> Map<K,V> configuredMap(Map<K,V> storedMap,
+                                               CursorConfig config) {
+        return (Map) ((StoredContainer) storedMap).configuredClone(config);
+    }
+
+    /**
+     * Creates a configured set from a given stored set.
+     *
+     * @param storedSet the base set.
+     *
+     * @param config is the cursor configuration to be used for all operations
+     * performed via the new set instance; null may be specified to use the
+     * default configuration.
+     *
+     * @return the configured set.
+     *
+     * @throws ClassCastException if the given container is not a
+     * StoredContainer.
+     */
+    public static <E> Set<E> configuredSet(Set<E> storedSet,
+                                           CursorConfig config) {
+        return (Set) ((StoredContainer) storedSet).configuredClone(config);
+    }
+
+    /**
+     * Creates a configured sorted map from a given stored sorted map.
+     *
+     * @param storedSortedMap the base map.
+     *
+     * @param config is the cursor configuration to be used for all operations
+     * performed via the new map instance; null may be specified to use the
+     * default configuration.
+     *
+     * @return the configured map.
+     *
+     * @throws ClassCastException if the given container is not a
+     * StoredContainer.
+     */
+    public static <K,V> SortedMap<K,V> configuredSortedMap(SortedMap<K,V>
+                                                           storedSortedMap,
+                                                           CursorConfig
+                                                           config) {
+        return (SortedMap)
+            ((StoredContainer) storedSortedMap).configuredClone(config);
+    }
+
+    /**
+     * Creates a configured sorted set from a given stored sorted set.
+     *
+     * @param storedSortedSet the base set.
+     *
+     * @param config is the cursor configuration to be used for all operations
+     * performed via the new set instance; null may be specified to use the
+     * default configuration.
+     *
+     * @return the configured set.
+     *
+     * @throws ClassCastException if the given container is not a
+     * StoredContainer.
+     */
+    public static <E> SortedSet<E> configuredSortedSet(SortedSet<E>
+                                                       storedSortedSet,
+                                                       CursorConfig config) {
+        return (SortedSet)
+            ((StoredContainer) storedSortedSet).configuredClone(config);
+    }
+
+    /**
+     * @deprecated This method has been replaced by {@link
+     * #configuredCollection} in order to conform to ANSI database isolation
+     * terminology.  To obtain a dirty-read collection, pass
+     * <code>CursorConfig.READ_UNCOMMITTED</code>
+     */
+    public static <E> Collection<E> dirtyReadCollection(Collection<E>
+                                                        storedCollection) {
+
+        /* We can't use READ_UNCOMMITTED until is is added to DB core. */
+        return configuredCollection
+            (storedCollection, CursorConfig.DIRTY_READ);
+    }
+
+    /**
+     * @deprecated This method has been replaced by {@link #configuredList} in
+     * order to conform to ANSI database isolation terminology.  To obtain a
+     * dirty-read list, pass <code>CursorConfig.READ_UNCOMMITTED</code>
+     */
+    public static <E> List<E> dirtyReadList(List<E> storedList) {
+        /* We can't use READ_UNCOMMITTED until is is added to DB core. */
+        return configuredList(storedList, CursorConfig.DIRTY_READ);
+    }
+
+    /**
+     * @deprecated This method has been replaced by {@link #configuredMap} in
+     * order to conform to ANSI database isolation terminology.  To obtain a
+     * dirty-read map, pass <code>CursorConfig.READ_UNCOMMITTED</code>
+     */
+    public static <K,V> Map<K,V> dirtyReadMap(Map<K,V> storedMap) {
+        /* We can't use READ_UNCOMMITTED until is is added to DB core. */
+        return configuredMap(storedMap, CursorConfig.DIRTY_READ);
+    }
+
+    /**
+     * @deprecated This method has been replaced by {@link #configuredSet} in
+     * order to conform to ANSI database isolation terminology.  To obtain a
+     * dirty-read set, pass <code>CursorConfig.READ_UNCOMMITTED</code>
+     */
+    public static <E> Set<E> dirtyReadSet(Set<E> storedSet) {
+        /* We can't use READ_UNCOMMITTED until is is added to DB core. */
+        return configuredSet(storedSet, CursorConfig.DIRTY_READ);
+    }
+
+    /**
+     * @deprecated This method has been replaced by {@link
+     * #configuredSortedMap} in order to conform to ANSI database isolation
+     * terminology.  To obtain a dirty-read map, pass
+     * <code>CursorConfig.READ_UNCOMMITTED</code>
+     */
+    public static <K,V> SortedMap<K,V> dirtyReadSortedMap(SortedMap<K,V>
+                                                          storedSortedMap) {
+        /* We can't use READ_UNCOMMITTED until is is added to DB core. */
+        return configuredSortedMap
+            (storedSortedMap, CursorConfig.DIRTY_READ);
+    }
+
+    /**
+     * @deprecated This method has been replaced by {@link
+     * #configuredSortedSet} in order to conform to ANSI database isolation
+     * terminology.  To obtain a dirty-read set, pass
+     * <code>CursorConfig.READ_UNCOMMITTED</code>
+     */
+    public static <E> SortedSet<E> dirtyReadSortedSet(SortedSet<E>
+                                                      storedSortedSet) {
+        /* We can't use READ_UNCOMMITTED until is is added to DB core. */
+        return configuredSortedSet
+            (storedSortedSet, CursorConfig.DIRTY_READ);
+    }
+
+    /**
+     * Clones an iterator preserving its current position.
+     *
+     * @param iter an iterator to clone.
+     *
+     * @return a new {@code Iterator} having the same position as the given
+     * iterator.
+     *
+     * @throws ClassCastException if the given iterator was not obtained via a
+     * {@link StoredCollection} method.
+     */
+    public static <E> Iterator<E> iterator(Iterator<E> iter) {
+
+        return ((BaseIterator) iter).dup();
+    }
+}
diff --git a/src/com/sleepycat/collections/StoredContainer.java b/src/com/sleepycat/collections/StoredContainer.java
new file mode 100644
index 0000000000000000000000000000000000000000..bbc4e1ef482c4e46ddbc44a8beb4db6f9e99d682
--- /dev/null
+++ b/src/com/sleepycat/collections/StoredContainer.java
@@ -0,0 +1,469 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: StoredContainer.java,v 1.56 2008/05/27 15:30:34 mark Exp $
+ */
+
+package com.sleepycat.collections;
+
+import java.util.Collection;
+import java.util.Iterator;
+
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.CursorConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.util.RuntimeExceptionWrapper;
+
+/**
+ * A abstract base class for all stored collections and maps.  This class
+ * provides implementations of methods that are common to the {@link
+ * java.util.Collection} and the {@link java.util.Map} interfaces, namely
+ * {@link #clear}, {@link #isEmpty} and {@link #size}.
+ *
+ * <p>In addition, this class provides the following methods for stored
+ * collections only.  Note that the use of these methods is not compatible with
+ * the standard Java collections interface.</p>
+ * <ul>
+ * <li>{@link #isWriteAllowed()}</li>
+ * <li>{@link #isSecondary()}</li>
+ * <li>{@link #isOrdered()}</li>
+ * <li>{@link #areKeyRangesAllowed()}</li>
+ * <li>{@link #areDuplicatesAllowed()}</li>
+ * <li>{@link #areDuplicatesOrdered()}</li>
+ * <li>{@link #areKeysRenumbered()}</li>
+ * <li>{@link #getCursorConfig()}</li>
+ * <li>{@link #isTransactional()}</li>
+ * </ul>
+ *
+ * @author Mark Hayes
+ */
+public abstract class StoredContainer implements Cloneable {
+
+    DataView view;
+
+    StoredContainer(DataView view) {
+
+        this.view = view;
+    }
+
+    /**
+     * Returns true if this is a read-write container or false if this is a
+     * read-only container.
+     * This method does not exist in the standard {@link java.util.Map} or
+     * {@link java.util.Collection} interfaces.
+     *
+     * @return whether write is allowed.
+     */
+    public final boolean isWriteAllowed() {
+
+        return view.writeAllowed;
+    }
+
+    /**
+     * Returns the cursor configuration that is used for all operations
+     * performed via this container.
+     * For example, if <code>CursorConfig.getReadUncommitted</code> returns
+     * true, data will be read that is modified but not committed.
+     * This method does not exist in the standard {@link java.util.Map} or
+     * {@link java.util.Collection} interfaces.
+     *
+     * @return the cursor configuration, or null if no configuration has been
+     * specified.
+     */
+    public final CursorConfig getCursorConfig() {
+
+        return DbCompat.cloneCursorConfig(view.cursorConfig);
+    }
+
+    /**
+     * Returns whether read-uncommitted is allowed for this container.
+     * For the JE product, read-uncommitted is always allowed; for the DB
+     * product, read-uncommitted is allowed if it was configured for the
+     * underlying database for this container.
+     * Even when read-uncommitted is allowed it must specifically be enabled by
+     * calling one of the {@link StoredCollections} methods.
+     * This method does not exist in the standard {@link java.util.Map} or
+     * {@link java.util.Collection} interfaces.
+     *
+     * @return whether read-uncommitted is allowed.
+     *
+     * @deprecated This method is deprecated with no replacement in this class.
+     * In the DB product, <code>DatabaseConfig.getReadUncommitted</code> may be
+     * called.
+     */
+    public final boolean isDirtyReadAllowed() {
+
+        return view.readUncommittedAllowed;
+    }
+
+    /**
+     * @deprecated This method has been replaced by {@link #getCursorConfig}.
+     * <code>CursorConfig.isReadUncommitted</code> may be called to determine
+     * whether dirty-read is enabled.
+     */
+    public final boolean isDirtyRead() {
+
+        return view.cursorConfig.getReadUncommitted();
+    }
+
+    /**
+     * Returns whether the databases underlying this container are
+     * transactional.
+     * Even in a transactional environment, a database will be transactional
+     * only if it was opened within a transaction or if the auto-commit option
+     * was specified when it was opened.
+     * This method does not exist in the standard {@link java.util.Map} or
+     * {@link java.util.Collection} interfaces.
+     *
+     * @return whether the database is transactional.
+     */
+    public final boolean isTransactional() {
+
+        return view.transactional;
+    }
+
+    /**
+     * Clones a container with a specified cursor configuration.
+     */
+    final StoredContainer configuredClone(CursorConfig config) {
+
+        try {
+            StoredContainer cont = (StoredContainer) clone();
+            cont.view = cont.view.configuredView(config);
+            cont.initAfterClone();
+            return cont;
+        } catch (CloneNotSupportedException willNeverOccur) { return null; }
+    }
+
+    /**
+     * Override this method to initialize view-dependent fields.
+     */
+    void initAfterClone() {
+    }
+
+    /**
+     * Returns whether duplicate keys are allowed in this container.
+     * Duplicates are optionally allowed for HASH and BTREE databases.
+     * This method does not exist in the standard {@link java.util.Map} or
+     * {@link java.util.Collection} interfaces.
+     *
+     * <p>Note that the JE product only supports BTREE databases.</p>
+     *
+     * @return whether duplicates are allowed.
+     */
+    public final boolean areDuplicatesAllowed() {
+
+        return view.dupsAllowed;
+    }
+
+    /**
+     * Returns whether duplicate keys are allowed and sorted by element value.
+     * Duplicates are optionally sorted for HASH and BTREE databases.
+     * This method does not exist in the standard {@link java.util.Map} or
+     * {@link java.util.Collection} interfaces.
+     *
+     * <p>Note that the JE product only supports BTREE databases, and
+     * duplicates are always sorted.</p>
+     *
+     * @return whether duplicates are ordered.
+     */
+    public final boolean areDuplicatesOrdered() {
+
+        return view.dupsOrdered;
+    }
+
+    /**
+     * Returns whether keys are renumbered when insertions and deletions occur.
+     * Keys are optionally renumbered for RECNO databases.
+     * This method does not exist in the standard {@link java.util.Map} or
+     * {@link java.util.Collection} interfaces.
+     *
+     * <p>Note that the JE product does not support RECNO databases, and
+     * therefore keys are never renumbered.</p>
+     *
+     * @return whether keys are renumbered.
+     */
+    public final boolean areKeysRenumbered() {
+
+        return view.keysRenumbered;
+    }
+
+    /**
+     * Returns whether keys are ordered in this container.
+     * Keys are ordered for BTREE, RECNO and QUEUE databases.
+     * This method does not exist in the standard {@link java.util.Map} or
+     * {@link java.util.Collection} interfaces.
+     *
+     * <p>Note that the JE product only support BTREE databases, and
+     * therefore keys are always ordered.</p>
+     *
+     * @return whether keys are ordered.
+     */
+    public final boolean isOrdered() {
+
+        return view.ordered;
+    }
+
+    /**
+     * Returns whether key ranges are allowed in this container.
+     * Key ranges are allowed only for BTREE databases.
+     * This method does not exist in the standard {@link java.util.Map} or
+     * {@link java.util.Collection} interfaces.
+     *
+     * <p>Note that the JE product only supports BTREE databases, and
+     * therefore key ranges are always allowed.</p>
+     *
+     * @return whether keys are ordered.
+     */
+    public final boolean areKeyRangesAllowed() {
+
+        return view.keyRangesAllowed;
+    }
+
+    /**
+     * Returns whether this container is a view on a secondary database rather
+     * than directly on a primary database.
+     * This method does not exist in the standard {@link java.util.Map} or
+     * {@link java.util.Collection} interfaces.
+     *
+     * @return whether the view is for a secondary database.
+     */
+    public final boolean isSecondary() {
+
+        return view.isSecondary();
+    }
+
+    /**
+     * Returns a non-transactional count of the records in the collection or
+     * map.  This method conforms to the {@link java.util.Collection#size} and
+     * {@link java.util.Map#size} interfaces.
+     *
+     * <!-- begin JE only -->
+     * <p>This operation is faster than obtaining a count by scanning the
+     * collection manually, and will not perturb the current contents of the
+     * cache.  However, the count is not guaranteed to be accurate if there are
+     * concurrent updates.</p>
+     * <!-- end JE only -->
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is thrown.
+     */
+    public abstract int size();
+
+    /**
+     * Returns true if this map or collection contains no mappings or elements.
+     * This method conforms to the {@link java.util.Collection#isEmpty} and
+     * {@link java.util.Map#isEmpty} interfaces.
+     *
+     * @return whether the container is empty.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is thrown.
+     */
+    public boolean isEmpty() {
+
+        try {
+            return view.isEmpty();
+        } catch (Exception e) {
+            throw convertException(e);
+        }
+    }
+
+    /**
+     * Removes all mappings or elements from this map or collection (optional
+     * operation).
+     * This method conforms to the {@link java.util.Collection#clear} and
+     * {@link java.util.Map#clear} interfaces.
+     *
+     * @throws UnsupportedOperationException if the container is read-only.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is thrown.
+     */
+    public void clear() {
+
+        boolean doAutoCommit = beginAutoCommit();
+        try {
+            view.clear();
+            commitAutoCommit(doAutoCommit);
+        } catch (Exception e) {
+            throw handleException(e, doAutoCommit);
+        }
+    }
+
+    Object getValue(Object key) {
+
+        DataCursor cursor = null;
+        try {
+            cursor = new DataCursor(view, false);
+            if (OperationStatus.SUCCESS ==
+                cursor.getSearchKey(key, null, false)) {
+                return cursor.getCurrentValue();
+            } else {
+                return null;
+            }
+        } catch (Exception e) {
+            throw StoredContainer.convertException(e);
+        } finally {
+            closeCursor(cursor);
+        }
+    }
+
+    Object putKeyValue(final Object key, final Object value) {
+
+        DataCursor cursor = null;
+        boolean doAutoCommit = beginAutoCommit();
+        try {
+            cursor = new DataCursor(view, true);
+            Object[] oldValue = new Object[1];
+            cursor.put(key, value, oldValue, false);
+            closeCursor(cursor);
+            commitAutoCommit(doAutoCommit);
+            return oldValue[0];
+        } catch (Exception e) {
+            closeCursor(cursor);
+            throw handleException(e, doAutoCommit);
+        }
+    }
+
+    final boolean removeKey(final Object key, final Object[] oldVal) {
+
+        DataCursor cursor = null;
+        boolean doAutoCommit = beginAutoCommit();
+        try {
+            cursor = new DataCursor(view, true);
+            boolean found = false;
+            OperationStatus status = cursor.getSearchKey(key, null, true);
+            while (status == OperationStatus.SUCCESS) {
+                cursor.delete();
+                found = true;
+                if (oldVal != null && oldVal[0] == null) {
+                    oldVal[0] = cursor.getCurrentValue();
+                }
+                status = areDuplicatesAllowed() ?
+                    cursor.getNextDup(true): OperationStatus.NOTFOUND;
+            }
+            closeCursor(cursor);
+            commitAutoCommit(doAutoCommit);
+            return found;
+        } catch (Exception e) {
+            closeCursor(cursor);
+            throw handleException(e, doAutoCommit);
+        }
+    }
+
+    boolean containsKey(Object key) {
+
+        DataCursor cursor = null;
+        try {
+            cursor = new DataCursor(view, false);
+            return OperationStatus.SUCCESS ==
+                   cursor.getSearchKey(key, null, false);
+        } catch (Exception e) {
+            throw StoredContainer.convertException(e);
+        } finally {
+            closeCursor(cursor);
+        }
+    }
+
+    final boolean removeValue(Object value) {
+
+        DataCursor cursor = null;
+        boolean doAutoCommit = beginAutoCommit();
+        try {
+            cursor = new DataCursor(view, true);
+            OperationStatus status = cursor.findValue(value, true);
+            if (status == OperationStatus.SUCCESS) {
+                cursor.delete();
+            }
+            closeCursor(cursor);
+            commitAutoCommit(doAutoCommit);
+            return (status == OperationStatus.SUCCESS);
+        } catch (Exception e) {
+            closeCursor(cursor);
+            throw handleException(e, doAutoCommit);
+        }
+    }
+
+    boolean containsValue(Object value) {
+
+        DataCursor cursor = null;
+        try {
+            cursor = new DataCursor(view, false);
+            OperationStatus status = cursor.findValue(value, true);
+            return (status == OperationStatus.SUCCESS);
+        } catch (Exception e) {
+            throw StoredContainer.convertException(e);
+        } finally {
+            closeCursor(cursor);
+        }
+    }
+
+    /**
+     * Returns a StoredIterator if the given collection is a StoredCollection,
+     * else returns a regular/external Iterator.  The iterator returned should
+     * be closed with the static method StoredIterator.close(Iterator).
+     */
+    final Iterator storedOrExternalIterator(Collection coll) {
+
+        if (coll instanceof StoredCollection) {
+            return ((StoredCollection) coll).storedIterator();
+        } else {
+            return coll.iterator();
+        }
+    }
+
+    final void closeCursor(DataCursor cursor) {
+
+        if (cursor != null) {
+            try {
+                cursor.close();
+            } catch (Exception e) {
+                throw StoredContainer.convertException(e);
+            }
+        }
+    }
+
+    final boolean beginAutoCommit() {
+
+        if (view.transactional) {
+            CurrentTransaction currentTxn = view.getCurrentTxn();
+            try {
+                if (currentTxn.isAutoCommitAllowed()) {
+                    currentTxn.beginTransaction(null);
+                    return true;
+                }
+            } catch (DatabaseException e) {
+                throw new RuntimeExceptionWrapper(e);
+            }
+        }
+        return false;
+    }
+
+    final void commitAutoCommit(boolean doAutoCommit)
+        throws DatabaseException {
+
+        if (doAutoCommit) view.getCurrentTxn().commitTransaction();
+    }
+
+    final RuntimeException handleException(Exception e, boolean doAutoCommit) {
+
+        if (doAutoCommit) {
+            try {
+                view.getCurrentTxn().abortTransaction();
+            } catch (DatabaseException ignored) {
+		/* Klockwork - ok */
+            }
+        }
+        return StoredContainer.convertException(e);
+    }
+
+    static RuntimeException convertException(Exception e) {
+
+        if (e instanceof RuntimeException) {
+            return (RuntimeException) e;
+        } else {
+            return new RuntimeExceptionWrapper(e);
+        }
+    }
+}
diff --git a/src/com/sleepycat/collections/StoredEntrySet.java b/src/com/sleepycat/collections/StoredEntrySet.java
new file mode 100644
index 0000000000000000000000000000000000000000..20a2ca91d24d12cea0884ea9c1c9c33159ce601d
--- /dev/null
+++ b/src/com/sleepycat/collections/StoredEntrySet.java
@@ -0,0 +1,168 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: StoredEntrySet.java,v 1.33 2008/05/27 15:30:34 mark Exp $
+ */
+
+package com.sleepycat.collections;
+
+import java.util.Map;
+import java.util.Set;
+
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.util.RuntimeExceptionWrapper;
+
+/**
+ * The Set returned by Map.entrySet().  This class may not be instantiated
+ * directly.  Contrary to what is stated by {@link Map#entrySet} this class
+ * does support the {@link #add} and {@link #addAll} methods.
+ *
+ * <p>The {@link java.util.Map.Entry#setValue} method of the Map.Entry objects
+ * that are returned by this class and its iterators behaves just as the {@link
+ * StoredIterator#set} method does.</p>
+ *
+ * @author Mark Hayes
+ */
+public class StoredEntrySet<K,V>
+    extends StoredCollection<Map.Entry<K,V>>
+    implements Set<Map.Entry<K,V>> {
+
+    StoredEntrySet(DataView mapView) {
+
+        super(mapView);
+    }
+
+    /**
+     * Adds the specified element to this set if it is not already present
+     * (optional operation).
+     * This method conforms to the {@link Set#add} interface.
+     *
+     * @param mapEntry must be a {@link java.util.Map.Entry} instance.
+     *
+     * @return true if the key-value pair was added to the set (and was not
+     * previously present).
+     *
+     * @throws UnsupportedOperationException if the collection is read-only.
+     *
+     * @throws ClassCastException if the mapEntry is not a {@link
+     * java.util.Map.Entry} instance.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is thrown.
+     */
+    public boolean add(Map.Entry<K,V> mapEntry) {
+
+        return add(mapEntry.getKey(), mapEntry.getValue());
+    }
+
+    /**
+     * Removes the specified element from this set if it is present (optional
+     * operation).
+     * This method conforms to the {@link Set#remove} interface.
+     *
+     * @param mapEntry is a {@link java.util.Map.Entry} instance to be removed.
+     *
+     * @return true if the key-value pair was removed from the set, or false if
+     * the mapEntry is not a {@link java.util.Map.Entry} instance or is not
+     * present in the set.
+     *
+     * @throws UnsupportedOperationException if the collection is read-only.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is thrown.
+     */
+    public boolean remove(Object mapEntry) {
+
+        if (!(mapEntry instanceof Map.Entry)) {
+            return false;
+        }
+        DataCursor cursor = null;
+        boolean doAutoCommit = beginAutoCommit();
+        try {
+            cursor = new DataCursor(view, true);
+            Map.Entry entry = (Map.Entry) mapEntry;
+            OperationStatus status =
+                cursor.findBoth(entry.getKey(), entry.getValue(), true);
+            if (status == OperationStatus.SUCCESS) {
+                cursor.delete();
+            }
+            closeCursor(cursor);
+            commitAutoCommit(doAutoCommit);
+            return (status == OperationStatus.SUCCESS);
+        } catch (Exception e) {
+            closeCursor(cursor);
+            throw handleException(e, doAutoCommit);
+        }
+    }
+
+    /**
+     * Returns true if this set contains the specified element.
+     * This method conforms to the {@link Set#contains} interface.
+     *
+     * @param mapEntry is a {@link java.util.Map.Entry} instance to be checked.
+     *
+     * @return true if the key-value pair is present in the set, or false if
+     * the mapEntry is not a {@link java.util.Map.Entry} instance or is not
+     * present in the set.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is thrown.
+     */
+    public boolean contains(Object mapEntry) {
+
+        if (!(mapEntry instanceof Map.Entry)) {
+            return false;
+        }
+        DataCursor cursor = null;
+        try {
+            cursor = new DataCursor(view, false);
+            Map.Entry entry = (Map.Entry) mapEntry;
+            OperationStatus status =
+                cursor.findBoth(entry.getKey(), entry.getValue(), false);
+            return (status == OperationStatus.SUCCESS);
+        } catch (Exception e) {
+            throw StoredContainer.convertException(e);
+        } finally {
+            closeCursor(cursor);
+        }
+    }
+
+    // javadoc is inherited
+    public String toString() {
+	StringBuffer buf = new StringBuffer();
+	buf.append("[");
+	StoredIterator i = storedIterator();
+        try {
+            while (i.hasNext()) {
+                Map.Entry entry = (Map.Entry) i.next();
+                if (buf.length() > 1) buf.append(',');
+                Object key = entry.getKey();
+                Object val = entry.getValue();
+                if (key != null) buf.append(key.toString());
+                buf.append('=');
+                if (val != null) buf.append(val.toString());
+            }
+            buf.append(']');
+            return buf.toString();
+        }
+        finally {
+            i.close();
+        }
+    }
+
+    Map.Entry<K,V> makeIteratorData(BaseIterator iterator,
+                                    DatabaseEntry keyEntry,
+                                    DatabaseEntry priKeyEntry,
+                                    DatabaseEntry valueEntry) {
+
+        return new StoredMapEntry(view.makeKey(keyEntry, priKeyEntry),
+                                  view.makeValue(priKeyEntry, valueEntry),
+                                  this, iterator);
+    }
+
+    boolean hasValues() {
+
+        return true;
+    }
+}
diff --git a/src/com/sleepycat/collections/StoredIterator.java b/src/com/sleepycat/collections/StoredIterator.java
new file mode 100644
index 0000000000000000000000000000000000000000..75a5a8200ec61b752f29924d6913ed3c0a83940d
--- /dev/null
+++ b/src/com/sleepycat/collections/StoredIterator.java
@@ -0,0 +1,633 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: StoredIterator.java,v 1.54 2008/05/27 15:30:34 mark Exp $
+ */
+
+package com.sleepycat.collections;
+
+import java.util.Iterator;
+import java.util.ListIterator;
+import java.util.NoSuchElementException;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.util.RuntimeExceptionWrapper;
+
+/**
+ * The Iterator returned by all stored collections.
+ *
+ * <p>While in general this class conforms to the {@link Iterator} interface,
+ * it is important to note that all iterators for stored collections must be
+ * explicitly closed with {@link #close()}.  The static method {@link
+ * #close(java.util.Iterator)} allows calling close for all iterators without
+ * harm to iterators that are not from stored collections, and also avoids
+ * casting.  If a stored iterator is not closed, unpredictable behavior
+ * including process death may result.</p>
+ *
+ * <p>This class implements the {@link Iterator} interface for all stored
+ * iterators.  It also implements {@link ListIterator} because some list
+ * iterator methods apply to all stored iterators, for example, {@link
+ * #previous} and {@link #hasPrevious}.  Other list iterator methods are always
+ * supported for lists, but for other types of collections are only supported
+ * under certain conditions.  See {@link #nextIndex}, {@link #previousIndex},
+ * {@link #add} and {@link #set} for details.</p>
+ *
+ * <p>In addition, this class provides the following methods for stored
+ * collection iterators only.  Note that the use of these methods is not
+ * compatible with the standard Java collections interface.</p>
+ * <ul>
+ * <li>{@link #close()}</li>
+ * <li>{@link #close(Iterator)}</li>
+ * <li>{@link #count()}</li>
+ * <li>{@link #getCollection}</li>
+ * <li>{@link #setReadModifyWrite}</li>
+ * <li>{@link #isReadModifyWrite}</li>
+ * </ul>
+ *
+ * @author Mark Hayes
+ */
+public class StoredIterator<E>
+    implements ListIterator<E>, BaseIterator<E>, Cloneable {
+
+    /**
+     * Closes the given iterator using {@link #close()} if it is a {@link
+     * StoredIterator}.  If the given iterator is not a {@link StoredIterator},
+     * this method does nothing.
+     *
+     * @param i is the iterator to close.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is thrown.
+     */
+    public static void close(Iterator<?> i) {
+
+        if (i instanceof StoredIterator) {
+            ((StoredIterator) i).close();
+        }
+    }
+
+    private static final int MOVE_NEXT = 1;
+    private static final int MOVE_PREV = 2;
+    private static final int MOVE_FIRST = 3;
+
+    private boolean lockForWrite;
+    private StoredCollection<E> coll;
+    private DataCursor cursor;
+    private int toNext;
+    private int toPrevious;
+    private int toCurrent;
+    private boolean writeAllowed;
+    private boolean setAndRemoveAllowed;
+    private E currentData;
+
+    StoredIterator(StoredCollection<E> coll,
+                   boolean writeAllowed,
+                   DataCursor joinCursor) {
+        try {
+            this.coll = coll;
+            this.writeAllowed = writeAllowed;
+            if (joinCursor == null)
+                this.cursor = new DataCursor(coll.view, writeAllowed);
+            else
+                this.cursor = joinCursor;
+            reset();
+        } catch (Exception e) {
+            try {
+                /* Ensure that the cursor is closed.  [#10516] */
+                close();
+            } catch (Exception ignored) {
+		/* Klockwork - ok */
+	    }
+            throw StoredContainer.convertException(e);
+        }
+    }
+
+    /**
+     * Returns whether write-locks will be obtained when reading with this
+     * cursor.
+     * Obtaining write-locks can prevent deadlocks when reading and then
+     * modifying data.
+     *
+     * @return the write-lock setting.
+     */
+    public final boolean isReadModifyWrite() {
+
+        return lockForWrite;
+    }
+
+    /**
+     * Changes whether write-locks will be obtained when reading with this
+     * cursor.
+     * Obtaining write-locks can prevent deadlocks when reading and then
+     * modifying data.
+     *
+     * @param lockForWrite the write-lock setting.
+     */
+    public void setReadModifyWrite(boolean lockForWrite) {
+
+        this.lockForWrite = lockForWrite;
+    }
+
+    // --- begin Iterator/ListIterator methods ---
+
+    /**
+     * Returns true if this iterator has more elements when traversing in the
+     * forward direction.  False is returned if the iterator has been closed.
+     * This method conforms to the {@link Iterator#hasNext} interface.
+     *
+     * @return whether {@link #next()} will succeed.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is thrown.
+     */
+    public boolean hasNext() {
+
+        if (cursor == null) {
+            return false;
+        }
+        try {
+            if (toNext != 0) {
+                OperationStatus status = move(toNext);
+                if (status == OperationStatus.SUCCESS) {
+                    toNext = 0;
+                    toPrevious = MOVE_PREV;
+                    toCurrent = MOVE_PREV;
+                }
+            }
+            return (toNext == 0);
+        } catch (Exception e) {
+            throw StoredContainer.convertException(e);
+        }
+    }
+
+    /**
+     * Returns true if this iterator has more elements when traversing in the
+     * reverse direction.  It returns false if the iterator has been closed.
+     * This method conforms to the {@link ListIterator#hasPrevious} interface.
+     *
+     * @return whether {@link #previous()} will succeed.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is thrown.
+     */
+    public boolean hasPrevious() {
+
+        if (cursor == null) {
+            return false;
+        }
+        try {
+            if (toPrevious != 0) {
+                OperationStatus status = move(toPrevious);
+                if (status == OperationStatus.SUCCESS) {
+                    toPrevious = 0;
+                    toNext = MOVE_NEXT;
+                    toCurrent = MOVE_NEXT;
+                }
+            }
+            return (toPrevious == 0);
+        } catch (Exception e) {
+            throw StoredContainer.convertException(e);
+        }
+    }
+
+    /**
+     * Returns the next element in the iteration.
+     * This method conforms to the {@link Iterator#next} interface.
+     *
+     * @return the next element.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public E next() {
+
+        try {
+            if (toNext != 0) {
+                OperationStatus status = move(toNext);
+                if (status == OperationStatus.SUCCESS) {
+                    toNext = 0;
+                }
+            }
+            if (toNext == 0) {
+                currentData = coll.makeIteratorData(this, cursor);
+                toNext = MOVE_NEXT;
+                toPrevious = 0;
+                toCurrent = 0;
+                setAndRemoveAllowed = true;
+                return currentData;
+            }
+            // else throw NoSuchElementException below
+        } catch (Exception e) {
+            throw StoredContainer.convertException(e);
+        }
+        throw new NoSuchElementException();
+    }
+
+    /**
+     * Returns the next element in the iteration.
+     * This method conforms to the {@link ListIterator#previous} interface.
+     *
+     * @return the previous element.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public E previous() {
+
+        try {
+            if (toPrevious != 0) {
+                OperationStatus status = move(toPrevious);
+                if (status == OperationStatus.SUCCESS) {
+                    toPrevious = 0;
+                }
+            }
+            if (toPrevious == 0) {
+                currentData = coll.makeIteratorData(this, cursor);
+                toPrevious = MOVE_PREV;
+                toNext = 0;
+                toCurrent = 0;
+                setAndRemoveAllowed = true;
+                return currentData;
+            }
+            // else throw NoSuchElementException below
+        } catch (Exception e) {
+            throw StoredContainer.convertException(e);
+        }
+        throw new NoSuchElementException();
+    }
+
+    /**
+     * Returns the index of the element that would be returned by a subsequent
+     * call to next.
+     * This method conforms to the {@link ListIterator#nextIndex} interface
+     * except that it returns Integer.MAX_VALUE for stored lists when
+     * positioned at the end of the list, rather than returning the list size
+     * as specified by the ListIterator interface. This is because the database
+     * size is not available.
+     *
+     * @return the next index.
+     *
+     * @throws UnsupportedOperationException if this iterator's collection does
+     * not use record number keys.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public int nextIndex() {
+
+        if (!coll.view.recNumAccess) {
+            throw new UnsupportedOperationException(
+                "Record number access not supported");
+        }
+        try {
+            return hasNext() ? (cursor.getCurrentRecordNumber() -
+                                coll.getIndexOffset())
+                             : Integer.MAX_VALUE;
+        } catch (Exception e) {
+            throw StoredContainer.convertException(e);
+        }
+    }
+
+    /**
+     * Returns the index of the element that would be returned by a subsequent
+     * call to previous.
+     * This method conforms to the {@link ListIterator#previousIndex}
+     * interface.
+     *
+     * @return the previous index.
+     *
+     * @throws UnsupportedOperationException if this iterator's collection does
+     * not use record number keys.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public int previousIndex() {
+
+        if (!coll.view.recNumAccess) {
+            throw new UnsupportedOperationException(
+                "Record number access not supported");
+        }
+        try {
+            return hasPrevious() ? (cursor.getCurrentRecordNumber() -
+                                    coll.getIndexOffset())
+                                 : (-1);
+        } catch (Exception e) {
+            throw StoredContainer.convertException(e);
+        }
+    }
+
+    /**
+     * Replaces the last element returned by next or previous with the
+     * specified element (optional operation).
+     * This method conforms to the {@link ListIterator#set} interface.
+     *
+     * <p>In order to call this method, if the underlying Database is
+     * transactional then a transaction must be active when creating the
+     * iterator.</p>
+     *
+     * @param value the new value.
+     *
+     * @throws UnsupportedOperationException if the collection is a {@link
+     * StoredKeySet} (the set returned by {@link java.util.Map#keySet}), or if
+     * duplicates are sorted since this would change the iterator position, or
+     * if the collection is indexed, or if the collection is read-only.
+     *
+     * @throws IllegalArgumentException if an entity value binding is used and
+     * the primary key of the value given is different than the existing stored
+     * primary key.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public void set(E value) {
+
+        if (!coll.hasValues()) throw new UnsupportedOperationException();
+        if (!setAndRemoveAllowed) throw new IllegalStateException();
+        try {
+            moveToCurrent();
+            cursor.putCurrent(value);
+        } catch (Exception e) {
+            throw StoredContainer.convertException(e);
+        }
+    }
+
+    /**
+     * Removes the last element that was returned by next or previous (optional
+     * operation).
+     * This method conforms to the {@link ListIterator#remove} interface except
+     * that when the collection is a list and the RECNO-RENUMBER access method
+     * is not used, list indices will not be renumbered.
+     *
+     * <p>In order to call this method, if the underlying Database is
+     * transactional then a transaction must be active when creating the
+     * iterator.</p>
+     *
+     * <p>Note that for the JE product, RECNO-RENUMBER databases are not
+     * supported, and therefore list indices are never renumbered by this
+     * method.</p>
+     *
+     * @throws UnsupportedOperationException if the collection is a sublist, or
+     * if the collection is read-only.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public void remove() {
+
+        if (!setAndRemoveAllowed) throw new IllegalStateException();
+        try {
+            moveToCurrent();
+            cursor.delete();
+            setAndRemoveAllowed = false;
+            toNext = MOVE_NEXT;
+            toPrevious = MOVE_PREV;
+        } catch (Exception e) {
+            throw StoredContainer.convertException(e);
+        }
+    }
+
+    /**
+     * Inserts the specified element into the list or inserts a duplicate into
+     * other types of collections (optional operation).
+     * This method conforms to the {@link ListIterator#add} interface when
+     * the collection is a list and the RECNO-RENUMBER access method is used.
+     * Otherwise, this method may only be called when duplicates are allowed.
+     * If duplicates are unsorted, the new value will be inserted in the same
+     * manner as list elements.
+     * If duplicates are sorted, the new value will be inserted in sort order.
+     *
+     * <p>Note that for the JE product, RECNO-RENUMBER databases are not
+     * supported, and therefore this method may only be used to add
+     * duplicates.</p>
+     *
+     * @param value the new value.
+     *
+     * @throws UnsupportedOperationException if the collection is a sublist, or
+     * if the collection is indexed, or if the collection is read-only, or if
+     * the collection is a list and the RECNO-RENUMBER access method was not
+     * used, or if the collection is not a list and duplicates are not allowed.
+     *
+     * @throws IllegalStateException if the collection is empty and is not a
+     * list with RECNO-RENUMBER access.
+     *
+     * @throws IllegalArgumentException if a duplicate value is being added
+     * that already exists and duplicates are sorted.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public void add(E value) {
+
+        coll.checkIterAddAllowed();
+        try {
+            OperationStatus status = OperationStatus.SUCCESS;
+            if (toNext != 0 && toPrevious != 0) { // database is empty
+                if (coll.view.keysRenumbered) { // recno-renumber database
+                    /*
+                     * Close cursor during append and then reopen to support
+                     * CDB restriction that append may not be called with a
+                     * cursor open; note the append will still fail if the
+                     * application has another cursor open.
+                     */
+                    close();
+                    status = coll.view.append(value, null, null);
+                    cursor = new DataCursor(coll.view, writeAllowed);
+                    reset();
+                    next(); // move past new record
+                } else { // hash/btree with duplicates
+                    throw new IllegalStateException(
+                        "Collection is empty, cannot add() duplicate");
+                }
+            } else { // database is not empty
+                boolean putBefore = false;
+                if (coll.view.keysRenumbered) { // recno-renumber database
+                    moveToCurrent();
+                    if (hasNext()) {
+                        status = cursor.putBefore(value);
+                        putBefore = true;
+                    } else {
+                        status = cursor.putAfter(value);
+                    }
+                } else { // hash/btree with duplicates
+                    if (coll.areDuplicatesOrdered()) {
+                        status = cursor.putNoDupData(null, value, null, true);
+                    } else if (toNext == 0) {
+                        status = cursor.putBefore(value);
+                        putBefore = true;
+                    } else {
+                        status = cursor.putAfter(value);
+                    }
+                }
+                if (putBefore) {
+                    toPrevious = 0;
+                    toNext = MOVE_NEXT;
+                }
+            }
+            if (status == OperationStatus.KEYEXIST) {
+                throw new IllegalArgumentException("Duplicate value");
+            } else if (status != OperationStatus.SUCCESS) {
+                throw new IllegalArgumentException("Could not insert: " +
+                                                    status);
+            }
+            setAndRemoveAllowed = false;
+        } catch (Exception e) {
+            throw StoredContainer.convertException(e);
+        }
+    }
+
+    // --- end Iterator/ListIterator methods ---
+
+    /**
+     * Resets cursor to an uninitialized state.
+     */
+    private void reset() {
+
+        toNext = MOVE_FIRST;
+        toPrevious = MOVE_PREV;
+        toCurrent = 0;
+        currentData = null;
+        /*
+	 * Initialize cursor at beginning to avoid "initial previous == last"
+	 * behavior when cursor is uninitialized.
+	 *
+	 * FindBugs whines about us ignoring the return value from hasNext().
+	 */
+        hasNext();
+    }
+
+    /**
+     * Returns the number of elements having the same key value as the key
+     * value of the element last returned by next() or previous().  If no
+     * duplicates are allowed, 1 is always returned.
+     * This method does not exist in the standard {@link Iterator} or {@link
+     * ListIterator} interfaces.
+     *
+     * @return the number of duplicates.
+     *
+     * @throws IllegalStateException if next() or previous() has not been
+     * called for this iterator, or if remove() or add() were called after
+     * the last call to next() or previous().
+     */
+    public int count() {
+
+        if (!setAndRemoveAllowed) throw new IllegalStateException();
+        try {
+            moveToCurrent();
+            return cursor.count();
+        } catch (Exception e) {
+            throw StoredContainer.convertException(e);
+        }
+    }
+
+    /**
+     * Closes this iterator.
+     * This method does not exist in the standard {@link Iterator} or {@link
+     * ListIterator} interfaces.
+     *
+     * <p>After being closed, only the {@link #hasNext} and {@link
+     * #hasPrevious} methods may be called and these will return false.  {@link
+     * #close()} may also be called again and will do nothing.  If other
+     * methods are called a <code>NullPointerException</code> will generally be
+     * thrown.</p>
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public void close() {
+
+        if (cursor != null) {
+            coll.closeCursor(cursor);
+            cursor = null;
+        }
+    }
+
+    /**
+     * Returns the collection associated with this iterator.
+     * This method does not exist in the standard {@link Iterator} or {@link
+     * ListIterator} interfaces.
+     *
+     * @return the collection associated with this iterator.
+     */
+    public final StoredCollection<E> getCollection() {
+
+        return coll;
+    }
+
+    // --- begin BaseIterator methods ---
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public final ListIterator<E> dup() {
+
+        try {
+            StoredIterator o = (StoredIterator) super.clone();
+            o.cursor = cursor.cloneCursor();
+            return o;
+        } catch (Exception e) {
+            throw StoredContainer.convertException(e);
+        }
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public final boolean isCurrentData(Object currentData) {
+
+        return (this.currentData == currentData);
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public final boolean moveToIndex(int index) {
+
+        try {
+            OperationStatus status =
+                cursor.getSearchKey(Integer.valueOf(index),
+                                    null, lockForWrite);
+            setAndRemoveAllowed = (status == OperationStatus.SUCCESS);
+            return setAndRemoveAllowed;
+        } catch (Exception e) {
+            throw StoredContainer.convertException(e);
+        }
+    }
+
+    // --- end BaseIterator methods ---
+
+    private void moveToCurrent()
+        throws DatabaseException {
+
+        if (toCurrent != 0) {
+            move(toCurrent);
+            toCurrent = 0;
+        }
+    }
+
+    private OperationStatus move(int direction)
+        throws DatabaseException {
+
+        switch (direction) {
+            case MOVE_NEXT:
+                if (coll.iterateDuplicates()) {
+                    return cursor.getNext(lockForWrite);
+                } else {
+                    return cursor.getNextNoDup(lockForWrite);
+                }
+            case MOVE_PREV:
+                if (coll.iterateDuplicates()) {
+                    return cursor.getPrev(lockForWrite);
+                } else {
+                    return cursor.getPrevNoDup(lockForWrite);
+                }
+            case MOVE_FIRST:
+                return cursor.getFirst(lockForWrite);
+            default:
+                throw new IllegalArgumentException(String.valueOf(direction));
+        }
+    }
+}
diff --git a/src/com/sleepycat/collections/StoredKeySet.java b/src/com/sleepycat/collections/StoredKeySet.java
new file mode 100644
index 0000000000000000000000000000000000000000..5db11f07d52d08ea13e80397ffd2f41f6fe2dffb
--- /dev/null
+++ b/src/com/sleepycat/collections/StoredKeySet.java
@@ -0,0 +1,136 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: StoredKeySet.java,v 1.34 2008/05/27 15:30:34 mark Exp $
+ */
+
+package com.sleepycat.collections;
+
+import java.util.Set;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.OperationStatus;
+
+/**
+ * The Set returned by Map.keySet() and which can also be constructed directly
+ * if a Map is not needed.
+ * Since this collection is a set it only contains one element for each key,
+ * even when duplicates are allowed.  Key set iterators are therefore
+ * particularly useful for enumerating the unique keys of a store or index that
+ * allows duplicates.
+ *
+ * @author Mark Hayes
+ */
+public class StoredKeySet<K> extends StoredCollection<K> implements Set<K> {
+
+    /**
+     * Creates a key set view of a {@link Database}.
+     *
+     * @param database is the Database underlying the new collection.
+     *
+     * @param keyBinding is the binding used to translate between key buffers
+     * and key objects.
+     *
+     * @param writeAllowed is true to create a read-write collection or false
+     * to create a read-only collection.
+     *
+     * @throws IllegalArgumentException if formats are not consistently
+     * defined or a parameter is invalid.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public StoredKeySet(Database database,
+                        EntryBinding<K> keyBinding,
+                        boolean writeAllowed) {
+
+        super(new DataView(database, keyBinding, null, null,
+                           writeAllowed, null));
+    }
+
+    StoredKeySet(DataView keySetView) {
+
+        super(keySetView);
+    }
+
+    /**
+     * Adds the specified key to this set if it is not already present
+     * (optional operation).
+     * When a key is added the value in the underlying data store will be
+     * empty.
+     * This method conforms to the {@link Set#add} interface.
+     *
+     * @throws UnsupportedOperationException if the collection is indexed, or
+     * if the collection is read-only.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public boolean add(K key) {
+
+        DataCursor cursor = null;
+        boolean doAutoCommit = beginAutoCommit();
+        try {
+            cursor = new DataCursor(view, true);
+            OperationStatus status = cursor.putNoOverwrite(key, null, false);
+            closeCursor(cursor);
+            commitAutoCommit(doAutoCommit);
+            return (status == OperationStatus.SUCCESS);
+        } catch (Exception e) {
+            closeCursor(cursor);
+            throw handleException(e, doAutoCommit);
+        }
+    }
+
+    /**
+     * Removes the specified key from this set if it is present (optional
+     * operation).
+     * If duplicates are allowed, this method removes all duplicates for the
+     * given key.
+     * This method conforms to the {@link Set#remove} interface.
+     *
+     * @throws UnsupportedOperationException if the collection is read-only.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public boolean remove(Object key) {
+
+        return removeKey(key, null);
+    }
+
+    /**
+     * Returns true if this set contains the specified key.
+     * This method conforms to the {@link Set#contains} interface.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public boolean contains(Object key) {
+
+        return containsKey(key);
+    }
+
+    boolean hasValues() {
+
+        return false;
+    }
+
+    K makeIteratorData(BaseIterator iterator,
+                       DatabaseEntry keyEntry,
+                       DatabaseEntry priKeyEntry,
+                       DatabaseEntry valueEntry) {
+
+        return (K) view.makeKey(keyEntry, priKeyEntry);
+    }
+
+    boolean iterateDuplicates() {
+
+        return false;
+    }
+}
diff --git a/src/com/sleepycat/collections/StoredList.java b/src/com/sleepycat/collections/StoredList.java
new file mode 100644
index 0000000000000000000000000000000000000000..c53e7a5b0cf04491ada67c075cc4e10cfc403b8e
--- /dev/null
+++ b/src/com/sleepycat/collections/StoredList.java
@@ -0,0 +1,622 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: StoredList.java,v 1.53 2008/05/27 15:30:34 mark Exp $
+ */
+
+package com.sleepycat.collections;
+
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.ListIterator;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.RecordNumberBinding;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.util.keyrange.KeyRangeException;
+
+/**
+ * <!-- begin JE only -->
+ * @hidden
+ * <!-- end JE only -->
+ * A List view of a {@link Database}.
+ *
+ * <p>For all stored lists the keys of the underlying Database
+ * must have record number format, and therefore the store or index must be a
+ * RECNO, RECNO-RENUMBER, QUEUE, or BTREE-RECNUM database.  Only RECNO-RENUMBER
+ * allows true list behavior where record numbers are renumbered following the
+ * position of an element that is added or removed.  For the other access
+ * methods (RECNO, QUEUE, and BTREE-RECNUM), stored Lists are most useful as
+ * read-only collections where record numbers are not required to be
+ * sequential.</p>
+ *
+ * <p>In addition to the standard List methods, this class provides the
+ * following methods for stored lists only.  Note that the use of these methods
+ * is not compatible with the standard Java collections interface.</p>
+ * <ul>
+ * <li>{@link #append(Object)}</li>
+ * </ul>
+ * @author Mark Hayes
+ */
+public class StoredList<E> extends StoredCollection<E> implements List<E> {
+
+    private static final EntryBinding DEFAULT_KEY_BINDING =
+        new IndexKeyBinding(1);
+
+    private int baseIndex = 1;
+    private boolean isSubList;
+
+    /**
+     * Creates a list view of a {@link Database}.
+     *
+     * @param database is the Database underlying the new collection.
+     *
+     * @param valueBinding is the binding used to translate between value
+     * buffers and value objects.
+     *
+     * @param writeAllowed is true to create a read-write collection or false
+     * to create a read-only collection.
+     *
+     * @throws IllegalArgumentException if formats are not consistently
+     * defined or a parameter is invalid.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public StoredList(Database database,
+                      EntryBinding<E> valueBinding,
+                      boolean writeAllowed) {
+
+        super(new DataView(database, DEFAULT_KEY_BINDING, valueBinding, null,
+                           writeAllowed, null));
+    }
+
+    /**
+     * Creates a list entity view of a {@link Database}.
+     *
+     * @param database is the Database underlying the new collection.
+     *
+     * @param valueEntityBinding is the binding used to translate between
+     * key/value buffers and entity value objects.
+     *
+     * @param writeAllowed is true to create a read-write collection or false
+     * to create a read-only collection.
+     *
+     * @throws IllegalArgumentException if formats are not consistently
+     * defined or a parameter is invalid.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public StoredList(Database database,
+                      EntityBinding<E> valueEntityBinding,
+                      boolean writeAllowed) {
+
+        super(new DataView(database, DEFAULT_KEY_BINDING, null,
+                           valueEntityBinding, writeAllowed, null));
+    }
+
+    /**
+     * Creates a list view of a {@link Database} with a {@link
+     * PrimaryKeyAssigner}.  Writing is allowed for the created list.
+     *
+     * @param database is the Database underlying the new collection.
+     *
+     * @param valueBinding is the binding used to translate between value
+     * buffers and value objects.
+     *
+     * @param keyAssigner is used by the {@link #add} and {@link #append}
+     * methods to assign primary keys.
+     *
+     * @throws IllegalArgumentException if formats are not consistently
+     * defined or a parameter is invalid.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public StoredList(Database database,
+                      EntryBinding<E> valueBinding,
+                      PrimaryKeyAssigner keyAssigner) {
+
+        super(new DataView(database, DEFAULT_KEY_BINDING, valueBinding,
+                           null, true, keyAssigner));
+    }
+
+    /**
+     * Creates a list entity view of a {@link Database} with a {@link
+     * PrimaryKeyAssigner}.  Writing is allowed for the created list.
+     *
+     * @param database is the Database underlying the new collection.
+     *
+     * @param valueEntityBinding is the binding used to translate between
+     * key/value buffers and entity value objects.
+     *
+     * @param keyAssigner is used by the {@link #add} and {@link #append}
+     * methods to assign primary keys.
+     *
+     * @throws IllegalArgumentException if formats are not consistently
+     * defined or a parameter is invalid.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public StoredList(Database database,
+                      EntityBinding<E> valueEntityBinding,
+                      PrimaryKeyAssigner keyAssigner) {
+
+        super(new DataView(database, DEFAULT_KEY_BINDING, null,
+                           valueEntityBinding, true, keyAssigner));
+    }
+
+    private StoredList(DataView view, int baseIndex) {
+
+        super(view);
+        this.baseIndex = baseIndex;
+        this.isSubList = true;
+    }
+
+    /**
+     * Inserts the specified element at the specified position in this list
+     * (optional operation).
+     * This method conforms to the {@link List#add(int, Object)} interface.
+     *
+     * @throws UnsupportedOperationException if the collection is a sublist, or
+     * if the collection is indexed, or if the collection is read-only, or if
+     * the RECNO-RENUMBER access method was not used.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public void add(int index, E value) {
+
+        checkIterAddAllowed();
+        DataCursor cursor = null;
+        boolean doAutoCommit = beginAutoCommit();
+        try {
+            cursor = new DataCursor(view, true);
+            OperationStatus status =
+                cursor.getSearchKey(Long.valueOf(index), null, false);
+            if (status == OperationStatus.SUCCESS) {
+                cursor.putBefore(value);
+                closeCursor(cursor);
+            } else {
+                closeCursor(cursor);
+                cursor = null;
+                view.append(value, null, null);
+            }
+            commitAutoCommit(doAutoCommit);
+        } catch (Exception e) {
+            closeCursor(cursor);
+            throw handleException(e, doAutoCommit);
+        }
+    }
+
+    /**
+     * Appends the specified element to the end of this list (optional
+     * operation).
+     * This method conforms to the {@link List#add(Object)} interface.
+     *
+     * @throws UnsupportedOperationException if the collection is a sublist, or
+     * if the collection is indexed, or if the collection is read-only, or if
+     * the RECNO-RENUMBER access method was not used.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public boolean add(E value) {
+
+        checkIterAddAllowed();
+        boolean doAutoCommit = beginAutoCommit();
+        try {
+            view.append(value, null, null);
+            commitAutoCommit(doAutoCommit);
+            return true;
+        } catch (Exception e) {
+            throw handleException(e, doAutoCommit);
+        }
+    }
+
+    /**
+     * Appends a given value returning the newly assigned index.
+     * If a {@link com.sleepycat.collections.PrimaryKeyAssigner} is associated
+     * with Store for this list, it will be used to assigned the returned
+     * index.  Otherwise the Store must be a QUEUE or RECNO database and the
+     * next available record number is assigned as the index.  This method does
+     * not exist in the standard {@link List} interface.
+     *
+     * @param value the value to be appended.
+     *
+     * @return the assigned index.
+     *
+     * @throws UnsupportedOperationException if the collection is indexed, or
+     * if the collection is read-only, or if the Store has no {@link
+     * com.sleepycat.collections.PrimaryKeyAssigner} and is not a QUEUE or
+     * RECNO database.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public int append(E value) {
+
+        boolean doAutoCommit = beginAutoCommit();
+        try {
+            Object[] key = new Object[1];
+            view.append(value, key, null);
+            commitAutoCommit(doAutoCommit);
+            return ((Number) key[0]).intValue();
+        } catch (Exception e) {
+            throw handleException(e, doAutoCommit);
+        }
+    }
+
+    void checkIterAddAllowed()
+        throws UnsupportedOperationException {
+
+        if (isSubList) {
+            throw new UnsupportedOperationException("cannot add to subList");
+        }
+        if (!view.keysRenumbered) { // RECNO-RENUM
+            throw new UnsupportedOperationException(
+                "requires renumbered keys");
+        }
+    }
+
+    /**
+     * Inserts all of the elements in the specified collection into this list
+     * at the specified position (optional operation).
+     * This method conforms to the {@link List#addAll(int, Collection)}
+     * interface.
+     *
+     * @throws UnsupportedOperationException if the collection is a sublist, or
+     * if the collection is indexed, or if the collection is read-only, or if
+     * the RECNO-RENUMBER access method was not used.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public boolean addAll(int index, Collection<? extends E> coll) {
+
+        checkIterAddAllowed();
+        DataCursor cursor = null;
+	Iterator<? extends E> i = null;
+        boolean doAutoCommit = beginAutoCommit();
+        try {
+            i = storedOrExternalIterator(coll);
+            if (!i.hasNext()) {
+                return false;
+            }
+            cursor = new DataCursor(view, true);
+            OperationStatus status =
+                cursor.getSearchKey(Long.valueOf(index), null, false);
+            if (status == OperationStatus.SUCCESS) {
+                while (i.hasNext()) {
+                    cursor.putBefore(i.next());
+                }
+                closeCursor(cursor);
+            } else {
+                closeCursor(cursor);
+                cursor = null;
+                while (i.hasNext()) {
+                    view.append(i.next(), null, null);
+                }
+            }
+            StoredIterator.close(i);
+            commitAutoCommit(doAutoCommit);
+            return true;
+        } catch (Exception e) {
+            closeCursor(cursor);
+            StoredIterator.close(i);
+            throw handleException(e, doAutoCommit);
+        }
+    }
+
+    /**
+     * Returns true if this list contains the specified element.
+     * This method conforms to the {@link List#contains} interface.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public boolean contains(Object value) {
+
+        return containsValue(value);
+    }
+
+    /**
+     * Returns the element at the specified position in this list.
+     * This method conforms to the {@link List#get} interface.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public E get(int index) {
+
+        return (E) getValue(Long.valueOf(index));
+    }
+
+    /**
+     * Returns the index in this list of the first occurrence of the specified
+     * element, or -1 if this list does not contain this element.
+     * This method conforms to the {@link List#indexOf} interface.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public int indexOf(Object value) {
+
+        return indexOf(value, true);
+    }
+
+    /**
+     * Returns the index in this list of the last occurrence of the specified
+     * element, or -1 if this list does not contain this element.
+     * This method conforms to the {@link List#lastIndexOf} interface.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public int lastIndexOf(Object value) {
+
+        return indexOf(value, false);
+    }
+
+    private int indexOf(Object value, boolean findFirst) {
+
+        DataCursor cursor = null;
+        try {
+            cursor = new DataCursor(view, false);
+            OperationStatus status = cursor.findValue(value, findFirst);
+            return (status == OperationStatus.SUCCESS)
+                    ? (cursor.getCurrentRecordNumber() - baseIndex)
+                    : (-1);
+        } catch (Exception e) {
+            throw StoredContainer.convertException(e);
+        } finally {
+            closeCursor(cursor);
+        }
+    }
+
+    int getIndexOffset() {
+
+        return baseIndex;
+    }
+
+    /**
+     * Returns a list iterator of the elements in this list (in proper
+     * sequence).
+     * The iterator will be read-only if the collection is read-only.
+     * This method conforms to the {@link List#listIterator()} interface.
+     *
+     * <p>For information on cursor stability and iterator block size, see
+     * {@link #iterator()}.</p>
+     *
+     * @return a {@link ListIterator} for this collection.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     *
+     * @see #isWriteAllowed
+     */
+    public ListIterator<E> listIterator() {
+
+        return blockIterator();
+    }
+
+    /**
+     * Returns a list iterator of the elements in this list (in proper
+     * sequence), starting at the specified position in this list.
+     * The iterator will be read-only if the collection is read-only.
+     * This method conforms to the {@link List#listIterator(int)} interface.
+     *
+     * <p>For information on cursor stability and iterator block size, see
+     * {@link #iterator()}.</p>
+     *
+     * @return a {@link ListIterator} for this collection.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     *
+     * @see #isWriteAllowed
+     */
+    public ListIterator<E> listIterator(int index) {
+
+        BlockIterator i = blockIterator();
+        if (i.moveToIndex(index)) {
+            return i;
+        } else {
+            throw new IndexOutOfBoundsException(String.valueOf(index));
+        }
+    }
+
+    /**
+     * Removes the element at the specified position in this list (optional
+     * operation).
+     * This method conforms to the {@link List#remove(int)} interface.
+     *
+     * @throws UnsupportedOperationException if the collection is a sublist, or
+     * if the collection is read-only.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public E remove(int index) {
+
+        try {
+            Object[] oldVal = new Object[1];
+            removeKey(Long.valueOf(index), oldVal);
+            return (E) oldVal[0];
+        } catch (IllegalArgumentException e) {
+            throw new IndexOutOfBoundsException(e.getMessage());
+        }
+    }
+
+    /**
+     * Removes the first occurrence in this list of the specified element
+     * (optional operation).
+     * This method conforms to the {@link List#remove(Object)} interface.
+     *
+     * @throws UnsupportedOperationException if the collection is a sublist, or
+     * if the collection is read-only.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public boolean remove(Object value) {
+
+        return removeValue(value);
+    }
+
+    /**
+     * Replaces the element at the specified position in this list with the
+     * specified element (optional operation).
+     * This method conforms to the {@link List#set} interface.
+     *
+     * @throws UnsupportedOperationException if the collection is indexed, or
+     * if the collection is read-only.
+     *
+     * @throws IllegalArgumentException if an entity value binding is used and
+     * the primary key of the value given is different than the existing stored
+     * primary key.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public E set(int index, E value) {
+
+        try {
+            return (E) putKeyValue(Long.valueOf(index), value);
+        } catch (IllegalArgumentException e) {
+            throw new IndexOutOfBoundsException(e.getMessage());
+        }
+    }
+
+    /**
+     * Returns a view of the portion of this list between the specified
+     * fromIndex, inclusive, and toIndex, exclusive.
+     * Note that add() and remove() may not be called for the returned sublist.
+     * This method conforms to the {@link List#subList} interface.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public List<E> subList(int fromIndex, int toIndex) {
+
+        if (fromIndex < 0 || fromIndex > toIndex) {
+            throw new IndexOutOfBoundsException(String.valueOf(fromIndex));
+        }
+        try {
+            int newBaseIndex = baseIndex + fromIndex;
+            return new StoredList(
+                view.subView(Long.valueOf(fromIndex), true,
+                             Long.valueOf(toIndex), false,
+                             new IndexKeyBinding(newBaseIndex)),
+                newBaseIndex);
+        } catch (KeyRangeException e) {
+            throw new IndexOutOfBoundsException(e.getMessage());
+        } catch (Exception e) {
+            throw StoredContainer.convertException(e);
+        }
+    }
+
+    /**
+     * Compares the specified object with this list for equality.
+     * A value comparison is performed by this method and the stored values
+     * are compared rather than calling the equals() method of each element.
+     * This method conforms to the {@link List#equals} interface.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public boolean equals(Object other) {
+
+        if (!(other instanceof List)) return false;
+        List otherList = (List) other;
+        StoredIterator i1 = null;
+        ListIterator i2 = null;
+        try {
+            i1 = storedIterator();
+            i2 = storedOrExternalListIterator(otherList);
+            while (i1.hasNext()) {
+                if (!i2.hasNext()) return false;
+                if (i1.nextIndex() != i2.nextIndex()) return false;
+                Object o1 = i1.next();
+                Object o2 = i2.next();
+                if (o1 == null) {
+                    if (o2 != null) return false;
+                } else {
+                    if (!o1.equals(o2)) return false;
+                }
+            }
+            if (i2.hasNext()) return false;
+            return true;
+        } finally {
+	    if (i1 != null) {
+		i1.close();
+	    }
+            StoredIterator.close(i2);
+        }
+    }
+
+    /**
+     * Returns a StoredIterator if the given collection is a StoredCollection,
+     * else returns a regular/external ListIterator.  The iterator returned
+     * should be closed with the static method StoredIterator.close(Iterator).
+     */
+    final ListIterator storedOrExternalListIterator(List list) {
+
+        if (list instanceof StoredCollection) {
+            return ((StoredCollection) list).storedIterator();
+        } else {
+            return list.listIterator();
+        }
+    }
+
+    /*
+     * Add this in to keep FindBugs from whining at us about implementing
+     * equals(), but not hashCode().
+     */
+    public int hashCode() {
+	return super.hashCode();
+    }
+
+    E makeIteratorData(BaseIterator iterator,
+                       DatabaseEntry keyEntry,
+                       DatabaseEntry priKeyEntry,
+                       DatabaseEntry valueEntry) {
+
+        return (E) view.makeValue(priKeyEntry, valueEntry);
+    }
+
+    boolean hasValues() {
+
+        return true;
+    }
+
+    private static class IndexKeyBinding extends RecordNumberBinding {
+
+        private int baseIndex;
+
+        private IndexKeyBinding(int baseIndex) {
+
+            this.baseIndex = baseIndex;
+        }
+
+        public Long entryToObject(DatabaseEntry data) {
+
+            return Long.valueOf(entryToRecordNumber(data) - baseIndex);
+        }
+
+        public void objectToEntry(Long object, DatabaseEntry data) {
+
+            recordNumberToEntry(((Number) object).intValue() + baseIndex,
+                                data);
+        }
+    }
+}
diff --git a/src/com/sleepycat/collections/StoredMap.java b/src/com/sleepycat/collections/StoredMap.java
new file mode 100644
index 0000000000000000000000000000000000000000..d8340225779457f013873f605ba5005be1bb5cf6
--- /dev/null
+++ b/src/com/sleepycat/collections/StoredMap.java
@@ -0,0 +1,693 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: StoredMap.java,v 1.55.2.1 2008/07/15 19:08:41 mark Exp $
+ */
+
+package com.sleepycat.collections;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.util.keyrange.KeyRangeException;
+
+/**
+ * A Map view of a {@link Database}.
+ *
+ * <p>In addition to the standard Map methods, this class provides the
+ * following methods for stored maps only.  Note that the use of these methods
+ * is not compatible with the standard Java collections interface.</p>
+ * <ul>
+ * <li>{@link #duplicates}</li>
+ * <li>{@link #duplicatesMap}</li>
+ * <li>{@link #append}</li>
+ * </ul>
+ *
+ * @author Mark Hayes
+ */
+public class StoredMap<K,V> extends StoredContainer
+    implements ConcurrentMap<K,V> {
+
+    private StoredKeySet<K> keySet;
+    private StoredEntrySet<K,V> entrySet;
+    private StoredValueSet<V> valueSet;
+
+    /**
+     * Creates a map view of a {@link Database}.
+     *
+     * @param database is the Database underlying the new collection.
+     *
+     * @param keyBinding is the binding used to translate between key buffers
+     * and key objects.
+     *
+     * @param valueBinding is the binding used to translate between value
+     * buffers and value objects.
+     *
+     * @param writeAllowed is true to create a read-write collection or false
+     * to create a read-only collection.
+     *
+     * @throws IllegalArgumentException if formats are not consistently
+     * defined or a parameter is invalid.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public StoredMap(Database database,
+                     EntryBinding<K> keyBinding,
+                     EntryBinding<V> valueBinding,
+                     boolean writeAllowed) {
+
+        super(new DataView(database, keyBinding, valueBinding, null,
+                           writeAllowed, null));
+        initView();
+    }
+
+    /**
+     * Creates a map view of a {@link Database} with a {@link
+     * PrimaryKeyAssigner}.  Writing is allowed for the created map.
+     *
+     * @param database is the Database underlying the new collection.
+     *
+     * @param keyBinding is the binding used to translate between key buffers
+     * and key objects.
+     *
+     * @param valueBinding is the binding used to translate between value
+     * buffers and value objects.
+     *
+     * @param keyAssigner is used by the {@link #append} method to assign
+     * primary keys.
+     *
+     * @throws IllegalArgumentException if formats are not consistently
+     * defined or a parameter is invalid.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public StoredMap(Database database,
+                     EntryBinding<K> keyBinding,
+                     EntryBinding<V> valueBinding,
+                     PrimaryKeyAssigner keyAssigner) {
+
+        super(new DataView(database, keyBinding, valueBinding, null,
+                           true, keyAssigner));
+        initView();
+    }
+
+    /**
+     * Creates a map entity view of a {@link Database}.
+     *
+     * @param database is the Database underlying the new collection.
+     *
+     * @param keyBinding is the binding used to translate between key buffers
+     * and key objects.
+     *
+     * @param valueEntityBinding is the binding used to translate between
+     * key/value buffers and entity value objects.
+     *
+     * @param writeAllowed is true to create a read-write collection or false
+     * to create a read-only collection.
+     *
+     * @throws IllegalArgumentException if formats are not consistently
+     * defined or a parameter is invalid.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public StoredMap(Database database,
+                     EntryBinding<K> keyBinding,
+                     EntityBinding<V> valueEntityBinding,
+                     boolean writeAllowed) {
+
+        super(new DataView(database, keyBinding, null, valueEntityBinding,
+                           writeAllowed, null));
+        initView();
+    }
+
+    /**
+     * Creates a map entity view of a {@link Database} with a {@link
+     * PrimaryKeyAssigner}.  Writing is allowed for the created map.
+     *
+     * @param database is the Database underlying the new collection.
+     *
+     * @param keyBinding is the binding used to translate between key buffers
+     * and key objects.
+     *
+     * @param valueEntityBinding is the binding used to translate between
+     * key/value buffers and entity value objects.
+     *
+     * @param keyAssigner is used by the {@link #append} method to assign
+     * primary keys.
+     *
+     * @throws IllegalArgumentException if formats are not consistently
+     * defined or a parameter is invalid.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public StoredMap(Database database,
+                     EntryBinding<K> keyBinding,
+                     EntityBinding<V> valueEntityBinding,
+                     PrimaryKeyAssigner keyAssigner) {
+
+        super(new DataView(database, keyBinding, null, valueEntityBinding,
+                           true, keyAssigner));
+        initView();
+    }
+
+    StoredMap(DataView view) {
+
+        super(view);
+        initView();
+    }
+
+    /**
+     * Override this method to initialize view-dependent fields.
+     */
+    void initAfterClone() {
+        initView();
+    }
+
+    /**
+     * The keySet, entrySet and valueSet are created during Map construction
+     * rather than lazily when requested (as done with the java.util.Map
+     * implementations).  This is done to avoid synchronization every time they
+     * are requested.  Since they are requested often but a StoredMap is
+     * created infrequently, this gives the best performance.  The additional
+     * views are small objects and are cheap to construct.
+     */
+    private void initView() {
+
+        /* entrySet */
+        if (areKeyRangesAllowed()) {
+            entrySet = new StoredSortedEntrySet<K,V>(view);
+        } else {
+            entrySet = new StoredEntrySet<K,V>(view);
+        }
+
+        /* keySet */
+        DataView newView = view.keySetView();
+        if (areKeyRangesAllowed()) {
+            keySet = new StoredSortedKeySet<K>(newView);
+        } else {
+            keySet = new StoredKeySet<K>(newView);
+        }
+
+        /* valueSet */
+        newView = view.valueSetView();
+        if (areKeyRangesAllowed() && newView.canDeriveKeyFromValue()) {
+            valueSet = new StoredSortedValueSet<V>(newView);
+        } else {
+            valueSet = new StoredValueSet<V>(newView);
+        }
+    }
+
+    /**
+     * Returns the value to which this map maps the specified key.  If
+     * duplicates are allowed, this method returns the first duplicate, in the
+     * order in which duplicates are configured, that maps to the specified
+     * key.
+     *
+     * This method conforms to the {@link Map#get} interface.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public V get(Object key) {
+
+        return (V) getValue(key);
+    }
+
+    /**
+     * Associates the specified value with the specified key in this map
+     * (optional operation).  If duplicates are allowed and the specified key
+     * is already mapped to a value, this method appends the new duplicate
+     * after the existing duplicates.  This method conforms to the {@link
+     * Map#put} interface.
+     *
+     * <p>The key parameter may be null if an entity binding is used and the
+     * key will be derived from the value (entity) parameter.  If an entity
+     * binding is used and the key parameter is non-null, then the key
+     * parameter must be equal to the key derived from the value parameter.</p>
+     *
+     * @return the previous value associated with specified key, or null if
+     * there was no mapping for the key or if duplicates are allowed.
+     *
+     * @throws UnsupportedOperationException if the collection is indexed, or
+     * if the collection is read-only.
+     *
+     * @throws IllegalArgumentException if an entity value binding is used and
+     * the primary key of the value given is different than the existing stored
+     * primary key.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public V put(K key, V value) {
+
+        return (V) putKeyValue(key, value);
+    }
+
+    /**
+     * Appends a given value returning the newly assigned key.  If a {@link
+     * PrimaryKeyAssigner} is associated with Store for this map, it will be
+     * used to assigned the returned key.  Otherwise the Store must be a QUEUE
+     * or RECNO database and the next available record number is assigned as
+     * the key.  This method does not exist in the standard {@link Map}
+     * interface.
+     *
+     * <p>Note that for the JE product, QUEUE and RECNO databases are not
+     * supported, and therefore a PrimaryKeyAssigner must be associated with
+     * the map in order to call this method.</p>
+     *
+     * @param value the value to be appended.
+     *
+     * @return the assigned key.
+     *
+     * @throws UnsupportedOperationException if the collection is indexed, or
+     * if the collection is read-only, or if the Store has no {@link
+     * PrimaryKeyAssigner} and is not a QUEUE or RECNO database.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public K append(V value) {
+
+        boolean doAutoCommit = beginAutoCommit();
+        try {
+            Object[] key = new Object[1];
+            view.append(value, key, null);
+            commitAutoCommit(doAutoCommit);
+            return (K) key[0];
+        } catch (Exception e) {
+            throw handleException(e, doAutoCommit);
+        }
+    }
+
+    /**
+     * Removes the mapping for this key from this map if present (optional
+     * operation).  If duplicates are allowed, this method removes all
+     * duplicates for the given key.  This method conforms to the {@link
+     * Map#remove} interface.
+     *
+     * @throws UnsupportedOperationException if the collection is read-only.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public V remove(Object key) {
+
+        Object[] oldVal = new Object[1];
+        removeKey(key, oldVal);
+        return (V) oldVal[0];
+    }
+
+    /**
+     * If the specified key is not already associated with a value, associate
+     * it with the given value.  This method conforms to the {@link
+     * ConcurrentMap#putIfAbsent} interface.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public V putIfAbsent(K key, V value) {
+        DataCursor cursor = null;
+        boolean doAutoCommit = beginAutoCommit();
+        try {
+            cursor = new DataCursor(view, true);
+            V oldValue;
+            while (true) {
+                OperationStatus status =
+                    cursor.putNoOverwrite(key, value, false /*useCurrentKey*/);
+                if (status == OperationStatus.SUCCESS) {
+                    /* We inserted the key.  Return null.  */
+                    oldValue = null;
+                    break;
+                } else {
+                    status = cursor.getSearchKey(key, null /*value*/,
+                                                 false /*lockForWrite*/);
+                    if (status == OperationStatus.SUCCESS) {
+                        /* The key is present. Return the current value. */
+                        oldValue = (V) cursor.getCurrentValue();
+                        break;
+                    } else {
+
+                        /*
+                         * If Serializable isolation is not configured, another
+                         * thread can delete the record after our attempt to
+                         * insert it failed above.  Loop back and try again.
+                         */
+                        continue;
+                    }
+                }
+            }
+            closeCursor(cursor);
+            commitAutoCommit(doAutoCommit);
+            return oldValue;
+        } catch (Exception e) {
+            closeCursor(cursor);
+            throw handleException(e, doAutoCommit);
+        }
+    }
+
+    /**
+     * Remove entry for key only if currently mapped to given value.  This
+     * method conforms to the {@link ConcurrentMap#remove(Object,Object)}
+     * interface.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public boolean remove(Object key, Object value) {
+        DataCursor cursor = null;
+        boolean doAutoCommit = beginAutoCommit();
+        try {
+            cursor = new DataCursor(view, true, key);
+            OperationStatus status = cursor.getFirst(true /*lockForWrite*/);
+            boolean removed;
+            if (status == OperationStatus.SUCCESS &&
+                cursor.getCurrentValue().equals(value)) {
+                cursor.delete();
+                removed = true;
+            } else {
+                removed = false;
+            }
+            closeCursor(cursor);
+            commitAutoCommit(doAutoCommit);
+            return removed;
+        } catch (Exception e) {
+            closeCursor(cursor);
+            throw handleException(e, doAutoCommit);
+        }
+    }
+
+    /**
+     * Replace entry for key only if currently mapped to some value.  This
+     * method conforms to the {@link ConcurrentMap#replace(Object,Object)}
+     * interface.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public V replace(K key, V value) {
+        DataCursor cursor = null;
+        boolean doAutoCommit = beginAutoCommit();
+        try {
+            cursor = new DataCursor(view, true, key);
+            OperationStatus status = cursor.getFirst(true /*lockForWrite*/);
+            V oldValue;
+            if (status == OperationStatus.SUCCESS) {
+                oldValue = (V) cursor.getCurrentValue();
+                cursor.putCurrent(value);
+            } else {
+                oldValue = null;
+            }
+            closeCursor(cursor);
+            commitAutoCommit(doAutoCommit);
+            return oldValue;
+        } catch (Exception e) {
+            closeCursor(cursor);
+            throw handleException(e, doAutoCommit);
+        }
+    }
+
+    /**
+     * Replace entry for key only if currently mapped to given value.  This
+     * method conforms to the {@link
+     * ConcurrentMap#replace(Object,Object,Object)} interface.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public boolean replace(K key, V oldValue, V newValue) {
+        DataCursor cursor = null;
+        boolean doAutoCommit = beginAutoCommit();
+        try {
+            cursor = new DataCursor(view, true, key);
+            OperationStatus status = cursor.getFirst(true /*lockForWrite*/);
+            boolean replaced;
+            if (status == OperationStatus.SUCCESS &&
+                cursor.getCurrentValue().equals(oldValue)) {
+                cursor.putCurrent(newValue);
+                replaced = true;
+            } else {
+                replaced = false;
+            }
+            closeCursor(cursor);
+            commitAutoCommit(doAutoCommit);
+            return replaced;
+        } catch (Exception e) {
+            closeCursor(cursor);
+            throw handleException(e, doAutoCommit);
+        }
+    }
+
+    /**
+     * Returns true if this map contains the specified key.  This method
+     * conforms to the {@link Map#containsKey} interface.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public boolean containsKey(Object key) {
+
+        return super.containsKey(key);
+    }
+
+    /**
+     * Returns true if this map contains the specified value.  When an entity
+     * binding is used, this method returns whether the map contains the
+     * primary key and value mapping of the entity.  This method conforms to
+     * the {@link Map#containsValue} interface.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public boolean containsValue(Object value) {
+
+        return super.containsValue(value);
+    }
+
+    /**
+     * Copies all of the mappings from the specified map to this map (optional
+     * operation).  When duplicates are allowed, the mappings in the specified
+     * map are effectively appended to the existing mappings in this map, that
+     * is no previously existing mappings in this map are replaced.  This
+     * method conforms to the {@link Map#putAll} interface.
+     *
+     * @throws UnsupportedOperationException if the collection is read-only, or
+     * if the collection is indexed.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public void putAll(Map<? extends K, ? extends V> map) {
+
+        boolean doAutoCommit = beginAutoCommit();
+        Iterator i = null;
+        try {
+            Collection coll = map.entrySet();
+            i = storedOrExternalIterator(coll);
+            while (i.hasNext()) {
+                Map.Entry entry = (Map.Entry) i.next();
+                putKeyValue(entry.getKey(), entry.getValue());
+            }
+            StoredIterator.close(i);
+            commitAutoCommit(doAutoCommit);
+        } catch (Exception e) {
+            StoredIterator.close(i);
+            throw handleException(e, doAutoCommit);
+        }
+    }
+
+    /**
+     * Returns a set view of the keys contained in this map.  A {@link
+     * java.util.SortedSet} is returned if the map supports key ranges.  The
+     * returned collection will be read-only if the map is read-only.  This
+     * method conforms to the {@link Map#keySet()} interface.
+     *
+     * <p>Note that the return value is a StoredCollection and must be treated
+     * as such; for example, its iterators must be explicitly closed.</p>
+     *
+     * @return a {@link StoredKeySet} or a {@link StoredSortedKeySet} for this
+     * map.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     *
+     * @see #areKeyRangesAllowed
+     * @see #isWriteAllowed
+     */
+    public Set<K> keySet() {
+
+        return keySet;
+    }
+
+    /**
+     * Returns a set view of the mappings contained in this map.  A {@link
+     * java.util.SortedSet} is returned if the map supports key ranges.  The
+     * returned collection will be read-only if the map is read-only.  This
+     * method conforms to the {@link Map#entrySet()} interface.
+     *
+     * <p>Note that the return value is a StoredCollection and must be treated
+     * as such; for example, its iterators must be explicitly closed.</p>
+     *
+     * @return a {@link StoredEntrySet} or a {@link StoredSortedEntrySet} for
+     * this map.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     *
+     * @see #areKeyRangesAllowed
+     * @see #isWriteAllowed
+     */
+    public Set<Map.Entry<K,V>> entrySet() {
+
+        return entrySet;
+    }
+
+    /**
+     * Returns a collection view of the values contained in this map.  A {@link
+     * java.util.SortedSet} is returned if the map supports key ranges and the
+     * value/entity binding can be used to derive the map's key from its
+     * value/entity object.  The returned collection will be read-only if the
+     * map is read-only.  This method conforms to the {@link Map#values()}
+     * interface.
+     *
+     * <p>Note that the return value is a StoredCollection and must be treated
+     * as such; for example, its iterators must be explicitly closed.</p>
+     *
+     * @return a {@link StoredValueSet} or a {@link StoredSortedValueSet} for
+     * this map.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     *
+     * @see #areKeyRangesAllowed
+     * @see #isWriteAllowed
+     */
+    public Collection<V> values() {
+
+        return valueSet;
+    }
+
+    /**
+     * Returns a new collection containing the values mapped to the given key
+     * in this map.  This collection's iterator() method is particularly useful
+     * for iterating over the duplicates for a given key, since this is not
+     * supported by the standard Map interface.  This method does not exist in
+     * the standard {@link Map} interface.
+     *
+     * <p>If no mapping for the given key is present, an empty collection is
+     * returned.  If duplicates are not allowed, at most a single value will be
+     * in the collection returned.  If duplicates are allowed, the returned
+     * collection's add() method may be used to add values for the given
+     * key.</p>
+     *
+     * @param key is the key for which values are to be returned.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public Collection<V> duplicates(K key) {
+
+        try {
+            DataView newView = view.valueSetView(key);
+            return new StoredValueSet(newView);
+        } catch (KeyRangeException e) {
+            return Collections.EMPTY_SET;
+        } catch (Exception e) {
+            throw StoredContainer.convertException(e);
+        }
+    }
+
+    /**
+     * Returns a new map from primary key to value for the subset of records
+     * having a given secondary key (duplicates).  This method does not exist
+     * in the standard {@link Map} interface.
+     *
+     * <p>If no mapping for the given key is present, an empty collection is
+     * returned.  If duplicates are not allowed, at most a single value will be
+     * in the collection returned.  If duplicates are allowed, the returned
+     * collection's add() method may be used to add values for the given
+     * key.</p>
+     *
+     * @param secondaryKey is the secondary key for which duplicates values
+     * will be represented by the returned map.
+     *
+     * @param primaryKeyBinding is the binding used for keys in the returned
+     * map.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public <PK> Map<PK,V> duplicatesMap(K secondaryKey,
+                                        EntryBinding primaryKeyBinding) {
+        try {
+            DataView newView =
+                view.duplicatesView(secondaryKey, primaryKeyBinding);
+            if (isOrdered()) {
+                return new StoredSortedMap(newView);
+            } else {
+                return new StoredMap(newView);
+            }
+        } catch (Exception e) {
+            throw StoredContainer.convertException(e);
+        }
+    }
+
+    /**
+     * Compares the specified object with this map for equality.  A value
+     * comparison is performed by this method and the stored values are
+     * compared rather than calling the equals() method of each element.  This
+     * method conforms to the {@link Map#equals} interface.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public boolean equals(Object other) {
+
+        if (other instanceof Map) {
+            return entrySet().equals(((Map) other).entrySet());
+        } else {
+            return false;
+        }
+    }
+
+    /*
+     * Add this in to keep FindBugs from whining at us about implementing
+     * equals(), but not hashCode().
+     */
+    public int hashCode() {
+	return super.hashCode();
+    }
+
+    // Inherit javadoc
+    public int size() {
+        return values().size();
+    }
+
+    /**
+     * Converts the map to a string representation for debugging.  WARNING: All
+     * mappings will be converted to strings and returned and therefore the
+     * returned string may be very large.
+     *
+     * @return the string representation.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public String toString() {
+
+        return entrySet().toString();
+    }
+}
diff --git a/src/com/sleepycat/collections/StoredMapEntry.java b/src/com/sleepycat/collections/StoredMapEntry.java
new file mode 100644
index 0000000000000000000000000000000000000000..d0f8f3f2e56694068a4143a9154e3ac8e5e5393a
--- /dev/null
+++ b/src/com/sleepycat/collections/StoredMapEntry.java
@@ -0,0 +1,44 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: StoredMapEntry.java,v 1.19 2008/05/27 15:30:34 mark Exp $
+ */
+
+package com.sleepycat.collections;
+
+/**
+ * @author Mark Hayes
+ */
+final class StoredMapEntry extends MapEntryParameter {
+
+    private BaseIterator iter;
+    private StoredCollection coll;
+
+    StoredMapEntry(Object key,
+                   Object value,
+                   StoredCollection coll,
+                   BaseIterator iter) {
+
+        super(key, value);
+        this.coll = coll;
+        this.iter = iter;
+    }
+
+    public Object setValue(Object newValue) {
+
+        Object oldValue;
+        if (iter != null && iter.isCurrentData(this)) {
+            oldValue = getValue();
+            iter.set(newValue);
+        } else {
+            if (coll.view.dupsAllowed) {
+                throw new IllegalStateException("May not insert duplicates");
+            }
+            oldValue = coll.putKeyValue(getKey(), newValue);
+        }
+        setValueInternal(newValue);
+        return oldValue;
+    }
+}
diff --git a/src/com/sleepycat/collections/StoredSortedEntrySet.java b/src/com/sleepycat/collections/StoredSortedEntrySet.java
new file mode 100644
index 0000000000000000000000000000000000000000..f9828d4c7174b6dddbc298e6b4d1e17bbd0b5321
--- /dev/null
+++ b/src/com/sleepycat/collections/StoredSortedEntrySet.java
@@ -0,0 +1,231 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: StoredSortedEntrySet.java,v 1.27 2008/05/27 15:30:34 mark Exp $
+ */
+
+package com.sleepycat.collections;
+
+import java.util.Comparator;
+import java.util.Map;
+import java.util.SortedSet;
+
+/**
+ * The SortedSet returned by Map.entrySet().  This class may not be
+ * instantiated directly.  Contrary to what is stated by {@link Map#entrySet}
+ * this class does support the {@link #add} and {@link #addAll} methods.
+ *
+ * <p>The {@link java.util.Map.Entry#setValue} method of the Map.Entry objects
+ * that are returned by this class and its iterators behaves just as the {@link
+ * StoredIterator#set} method does.</p>
+ *
+ * <p>In addition to the standard SortedSet methods, this class provides the
+ * following methods for stored sorted sets only.  Note that the use of these
+ * methods is not compatible with the standard Java collections interface.</p>
+ * <ul>
+ * <li>{@link #headSet(Map.Entry, boolean)}</li>
+ * <li>{@link #tailSet(Map.Entry, boolean)}</li>
+ * <li>{@link #subSet(Map.Entry, boolean, Map.Entry, boolean)}</li>
+ * </ul>
+ *
+ * @author Mark Hayes
+ */
+public class StoredSortedEntrySet<K,V>
+    extends StoredEntrySet<K,V>
+    implements SortedSet<Map.Entry<K,V>> {
+
+    StoredSortedEntrySet(DataView mapView) {
+
+        super(mapView);
+    }
+
+    /**
+     * Returns null since comparators are not supported.  The natural ordering
+     * of a stored collection is data byte order, whether the data classes
+     * implement the {@link java.lang.Comparable} interface or not.
+     * This method does not conform to the {@link SortedSet#comparator}
+     * interface.
+     *
+     * @return null.
+     */
+    public Comparator<? super Map.Entry<K,V>> comparator() {
+
+        return null;
+    }
+
+    /**
+     * Returns the first (lowest) element currently in this sorted set.
+     * This method conforms to the {@link SortedSet#first} interface.
+     *
+     * @return the first element.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public Map.Entry<K,V> first() {
+
+        return getFirstOrLast(true);
+    }
+
+    /**
+     * Returns the last (highest) element currently in this sorted set.
+     * This method conforms to the {@link SortedSet#last} interface.
+     *
+     * @return the last element.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public Map.Entry<K,V> last() {
+
+        return getFirstOrLast(false);
+    }
+
+    /**
+     * Returns a view of the portion of this sorted set whose elements are
+     * strictly less than toMapEntry.
+     * This method conforms to the {@link SortedSet#headSet} interface.
+     *
+     * <p>Note that the return value is a StoredCollection and must be treated
+     * as such; for example, its iterators must be explicitly closed.</p>
+     *
+     * @param toMapEntry the upper bound.
+     *
+     * @return the subset.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public SortedSet<Map.Entry<K,V>> headSet(Map.Entry<K,V> toMapEntry) {
+
+        return subSet(null, false, toMapEntry, false);
+    }
+
+    /**
+     * Returns a view of the portion of this sorted set whose elements are
+     * strictly less than toMapEntry, optionally including toMapEntry.
+     * This method does not exist in the standard {@link SortedSet} interface.
+     *
+     * <p>Note that the return value is a StoredCollection and must be treated
+     * as such; for example, its iterators must be explicitly closed.</p>
+     *
+     * @param toMapEntry is the upper bound.
+     *
+     * @param toInclusive is true to include toMapEntry.
+     *
+     * @return the subset.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public SortedSet<Map.Entry<K,V>> headSet(Map.Entry<K,V> toMapEntry,
+                                             boolean toInclusive) {
+
+        return subSet(null, false, toMapEntry, toInclusive);
+    }
+
+    /**
+     * Returns a view of the portion of this sorted set whose elements are
+     * greater than or equal to fromMapEntry.
+     * This method conforms to the {@link SortedSet#tailSet} interface.
+     *
+     * <p>Note that the return value is a StoredCollection and must be treated
+     * as such; for example, its iterators must be explicitly closed.</p>
+     *
+     * @param fromMapEntry is the lower bound.
+     *
+     * @return the subset.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public SortedSet<Map.Entry<K,V>> tailSet(Map.Entry<K,V> fromMapEntry) {
+
+        return subSet(fromMapEntry, true, null, false);
+    }
+
+    /**
+     * Returns a view of the portion of this sorted set whose elements are
+     * strictly greater than fromMapEntry, optionally including fromMapEntry.
+     * This method does not exist in the standard {@link SortedSet} interface.
+     *
+     * <p>Note that the return value is a StoredCollection and must be treated
+     * as such; for example, its iterators must be explicitly closed.</p>
+     *
+     * @param fromMapEntry is the lower bound.
+     *
+     * @param fromInclusive is true to include fromMapEntry.
+     *
+     * @return the subset.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public SortedSet<Map.Entry<K,V>> tailSet(Map.Entry<K,V> fromMapEntry,
+                                             boolean fromInclusive) {
+
+        return subSet(fromMapEntry, fromInclusive, null, false);
+    }
+
+    /**
+     * Returns a view of the portion of this sorted set whose elements range
+     * from fromMapEntry, inclusive, to toMapEntry, exclusive.
+     * This method conforms to the {@link SortedSet#subSet} interface.
+     *
+     * <p>Note that the return value is a StoredCollection and must be treated
+     * as such; for example, its iterators must be explicitly closed.</p>
+     *
+     * @param fromMapEntry is the lower bound.
+     *
+     * @param toMapEntry is the upper bound.
+     *
+     * @return the subset.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public SortedSet<Map.Entry<K,V>> subSet(Map.Entry<K,V> fromMapEntry,
+                                            Map.Entry<K,V> toMapEntry) {
+
+        return subSet(fromMapEntry, true, toMapEntry, false);
+    }
+
+    /**
+     * Returns a view of the portion of this sorted set whose elements are
+     * strictly greater than fromMapEntry and strictly less than toMapEntry,
+     * optionally including fromMapEntry and toMapEntry.
+     * This method does not exist in the standard {@link SortedSet} interface.
+     *
+     * <p>Note that the return value is a StoredCollection and must be treated
+     * as such; for example, its iterators must be explicitly closed.</p>
+     *
+     * @param fromMapEntry is the lower bound.
+     *
+     * @param fromInclusive is true to include fromMapEntry.
+     *
+     * @param toMapEntry is the upper bound.
+     *
+     * @param toInclusive is true to include toMapEntry.
+     *
+     * @return the subset.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public SortedSet<Map.Entry<K,V>> subSet(Map.Entry<K,V> fromMapEntry,
+                                            boolean fromInclusive,
+                                            Map.Entry<K,V> toMapEntry,
+                                            boolean toInclusive) {
+
+        Object fromKey = (fromMapEntry != null) ? fromMapEntry.getKey() : null;
+        Object toKey = (toMapEntry != null) ? toMapEntry.getKey() : null;
+        try {
+            return new StoredSortedEntrySet<K,V>(
+               view.subView(fromKey, fromInclusive, toKey, toInclusive, null));
+        } catch (Exception e) {
+            throw StoredContainer.convertException(e);
+        }
+    }
+}
diff --git a/src/com/sleepycat/collections/StoredSortedKeySet.java b/src/com/sleepycat/collections/StoredSortedKeySet.java
new file mode 100644
index 0000000000000000000000000000000000000000..35d851e39764e9bc2a7b93c010b6443dabf1e474
--- /dev/null
+++ b/src/com/sleepycat/collections/StoredSortedKeySet.java
@@ -0,0 +1,251 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: StoredSortedKeySet.java,v 1.30 2008/05/27 15:30:34 mark Exp $
+ */
+
+package com.sleepycat.collections;
+
+import java.util.Comparator;
+import java.util.SortedSet;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.je.Database;
+
+/**
+ * The SortedSet returned by Map.keySet() and which can also be constructed
+ * directly if a Map is not needed.
+ * Since this collection is a set it only contains one element for each key,
+ * even when duplicates are allowed.  Key set iterators are therefore
+ * particularly useful for enumerating the unique keys of a store or index that
+ * allows duplicates.
+ *
+ * <p>In addition to the standard SortedSet methods, this class provides the
+ * following methods for stored sorted sets only.  Note that the use of these
+ * methods is not compatible with the standard Java collections interface.</p>
+ * <ul>
+ * <li>{@link #headSet(Object, boolean)}</li>
+ * <li>{@link #tailSet(Object, boolean)}</li>
+ * <li>{@link #subSet(Object, boolean, Object, boolean)}</li>
+ * </ul>
+ *
+ * @author Mark Hayes
+ */
+public class StoredSortedKeySet<K>
+    extends StoredKeySet<K>
+    implements SortedSet<K> {
+
+    /**
+     * Creates a sorted key set view of a {@link Database}.
+     *
+     * @param database is the Database underlying the new collection.
+     *
+     * @param keyBinding is the binding used to translate between key buffers
+     * and key objects.
+     *
+     * @param writeAllowed is true to create a read-write collection or false
+     * to create a read-only collection.
+     *
+     * @throws IllegalArgumentException if formats are not consistently
+     * defined or a parameter is invalid.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public StoredSortedKeySet(Database database,
+                              EntryBinding<K> keyBinding,
+                              boolean writeAllowed) {
+
+        super(new DataView(database, keyBinding, null, null,
+                           writeAllowed, null));
+    }
+
+    StoredSortedKeySet(DataView keySetView) {
+
+        super(keySetView);
+    }
+
+    /**
+     * Returns null since comparators are not supported.  The natural ordering
+     * of a stored collection is data byte order, whether the data classes
+     * implement the {@link java.lang.Comparable} interface or not.
+     * This method does not conform to the {@link SortedSet#comparator}
+     * interface.
+     *
+     * @return null.
+     */
+    public Comparator<? super K> comparator() {
+
+        return null;
+    }
+
+    /**
+     * Returns the first (lowest) element currently in this sorted set.
+     * This method conforms to the {@link SortedSet#first} interface.
+     *
+     * @return the first element.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public K first() {
+
+        return getFirstOrLast(true);
+    }
+
+    /**
+     * Returns the last (highest) element currently in this sorted set.
+     * This method conforms to the {@link SortedSet#last} interface.
+     *
+     * @return the last element.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public K last() {
+
+        return getFirstOrLast(false);
+    }
+
+    /**
+     * Returns a view of the portion of this sorted set whose elements are
+     * strictly less than toKey.
+     * This method conforms to the {@link SortedSet#headSet} interface.
+     *
+     * <p>Note that the return value is a StoredCollection and must be treated
+     * as such; for example, its iterators must be explicitly closed.</p>
+     *
+     * @param toKey is the upper bound.
+     *
+     * @return the subset.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public SortedSet<K> headSet(K toKey) {
+
+        return subSet(null, false, toKey, false);
+    }
+
+    /**
+     * Returns a view of the portion of this sorted set whose elements are
+     * strictly less than toKey, optionally including toKey.
+     * This method does not exist in the standard {@link SortedSet} interface.
+     *
+     * <p>Note that the return value is a StoredCollection and must be treated
+     * as such; for example, its iterators must be explicitly closed.</p>
+     *
+     * @param toKey is the upper bound.
+     *
+     * @param toInclusive is true to include toKey.
+     *
+     * @return the subset.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public SortedSet<K> headSet(K toKey, boolean toInclusive) {
+
+        return subSet(null, false, toKey, toInclusive);
+    }
+
+    /**
+     * Returns a view of the portion of this sorted set whose elements are
+     * greater than or equal to fromKey.
+     * This method conforms to the {@link SortedSet#tailSet} interface.
+     *
+     * <p>Note that the return value is a StoredCollection and must be treated
+     * as such; for example, its iterators must be explicitly closed.</p>
+     *
+     * @param fromKey is the lower bound.
+     *
+     * @return the subset.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public SortedSet<K> tailSet(K fromKey) {
+
+        return subSet(fromKey, true, null, false);
+    }
+
+    /**
+     * Returns a view of the portion of this sorted set whose elements are
+     * strictly greater than fromKey, optionally including fromKey.
+     * This method does not exist in the standard {@link SortedSet} interface.
+     *
+     * <p>Note that the return value is a StoredCollection and must be treated
+     * as such; for example, its iterators must be explicitly closed.</p>
+     *
+     * @param fromKey is the lower bound.
+     *
+     * @param fromInclusive is true to include fromKey.
+     *
+     * @return the subset.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public SortedSet<K> tailSet(K fromKey, boolean fromInclusive) {
+
+        return subSet(fromKey, fromInclusive, null, false);
+    }
+
+    /**
+     * Returns a view of the portion of this sorted set whose elements range
+     * from fromKey, inclusive, to toKey, exclusive.
+     * This method conforms to the {@link SortedSet#subSet} interface.
+     *
+     * <p>Note that the return value is a StoredCollection and must be treated
+     * as such; for example, its iterators must be explicitly closed.</p>
+     *
+     * @param fromKey is the lower bound.
+     *
+     * @param toKey is the upper bound.
+     *
+     * @return the subset.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public SortedSet<K> subSet(K fromKey, K toKey) {
+
+        return subSet(fromKey, true, toKey, false);
+    }
+
+    /**
+     * Returns a view of the portion of this sorted set whose elements are
+     * strictly greater than fromKey and strictly less than toKey,
+     * optionally including fromKey and toKey.
+     * This method does not exist in the standard {@link SortedSet} interface.
+     *
+     * <p>Note that the return value is a StoredCollection and must be treated
+     * as such; for example, its iterators must be explicitly closed.</p>
+     *
+     * @param fromKey is the lower bound.
+     *
+     * @param fromInclusive is true to include fromKey.
+     *
+     * @param toKey is the upper bound.
+     *
+     * @param toInclusive is true to include toKey.
+     *
+     * @return the subset.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public SortedSet<K> subSet(K fromKey,
+                               boolean fromInclusive,
+                               K toKey,
+                               boolean toInclusive) {
+        try {
+            return new StoredSortedKeySet(
+               view.subView(fromKey, fromInclusive, toKey, toInclusive, null));
+        } catch (Exception e) {
+            throw StoredContainer.convertException(e);
+        }
+    }
+}
diff --git a/src/com/sleepycat/collections/StoredSortedMap.java b/src/com/sleepycat/collections/StoredSortedMap.java
new file mode 100644
index 0000000000000000000000000000000000000000..d1b4bdac105f85c53583c231ddb7ed8cfac6a369
--- /dev/null
+++ b/src/com/sleepycat/collections/StoredSortedMap.java
@@ -0,0 +1,361 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: StoredSortedMap.java,v 1.35 2008/05/27 15:30:34 mark Exp $
+ */
+
+package com.sleepycat.collections;
+
+import java.util.Comparator;
+import java.util.SortedMap;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.OperationStatus;
+
+/**
+ * A SortedMap view of a {@link Database}.
+ *
+ * <p>In addition to the standard SortedMap methods, this class provides the
+ * following methods for stored sorted maps only.  Note that the use of these
+ * methods is not compatible with the standard Java collections interface.</p>
+ * <ul>
+ * <li>{@link #headMap(Object, boolean)}</li>
+ * <li>{@link #tailMap(Object, boolean)}</li>
+ * <li>{@link #subMap(Object, boolean, Object, boolean)}</li>
+ * </ul>
+ *
+ * @author Mark Hayes
+ */
+public class StoredSortedMap<K,V>
+    extends StoredMap<K,V>
+    implements SortedMap<K,V> {
+
+    /**
+     * Creates a sorted map view of a {@link Database}.
+     *
+     * @param database is the Database underlying the new collection.
+     *
+     * @param keyBinding is the binding used to translate between key buffers
+     * and key objects.
+     *
+     * @param valueBinding is the binding used to translate between value
+     * buffers and value objects.
+     *
+     * @param writeAllowed is true to create a read-write collection or false
+     * to create a read-only collection.
+     *
+     * @throws IllegalArgumentException if formats are not consistently
+     * defined or a parameter is invalid.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public StoredSortedMap(Database database,
+                           EntryBinding<K> keyBinding,
+                           EntryBinding<V> valueBinding,
+                           boolean writeAllowed) {
+
+        super(new DataView(database, keyBinding, valueBinding, null,
+                           writeAllowed, null));
+    }
+
+    /**
+     * Creates a sorted map view of a {@link Database} with a {@link
+     * PrimaryKeyAssigner}.  Writing is allowed for the created map.
+     *
+     * @param database is the Database underlying the new collection.
+     *
+     * @param keyBinding is the binding used to translate between key buffers
+     * and key objects.
+     *
+     * @param valueBinding is the binding used to translate between value
+     * buffers and value objects.
+     *
+     * @param keyAssigner is used by the {@link #append} method to assign
+     * primary keys.
+     *
+     * @throws IllegalArgumentException if formats are not consistently
+     * defined or a parameter is invalid.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public StoredSortedMap(Database database,
+                           EntryBinding<K> keyBinding,
+                           EntryBinding<V> valueBinding,
+                           PrimaryKeyAssigner keyAssigner) {
+
+        super(new DataView(database, keyBinding, valueBinding, null,
+                           true, keyAssigner));
+    }
+
+    /**
+     * Creates a sorted map entity view of a {@link Database}.
+     *
+     * @param database is the Database underlying the new collection.
+     *
+     * @param keyBinding is the binding used to translate between key buffers
+     * and key objects.
+     *
+     * @param valueEntityBinding is the binding used to translate between
+     * key/value buffers and entity value objects.
+     *
+     * @param writeAllowed is true to create a read-write collection or false
+     * to create a read-only collection.
+     *
+     * @throws IllegalArgumentException if formats are not consistently
+     * defined or a parameter is invalid.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public StoredSortedMap(Database database,
+                           EntryBinding<K> keyBinding,
+                           EntityBinding<V> valueEntityBinding,
+                           boolean writeAllowed) {
+
+        super(new DataView(database, keyBinding, null, valueEntityBinding,
+                           writeAllowed, null));
+    }
+
+    /**
+     * Creates a sorted map entity view of a {@link Database} with a {@link
+     * PrimaryKeyAssigner}.  Writing is allowed for the created map.
+     *
+     * @param database is the Database underlying the new collection.
+     *
+     * @param keyBinding is the binding used to translate between key buffers
+     * and key objects.
+     *
+     * @param valueEntityBinding is the binding used to translate between
+     * key/value buffers and entity value objects.
+     *
+     * @param keyAssigner is used by the {@link #append} method to assign
+     * primary keys.
+     *
+     * @throws IllegalArgumentException if formats are not consistently
+     * defined or a parameter is invalid.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public StoredSortedMap(Database database,
+                           EntryBinding<K> keyBinding,
+                           EntityBinding<V> valueEntityBinding,
+                           PrimaryKeyAssigner keyAssigner) {
+
+        super(new DataView(database, keyBinding, null, valueEntityBinding,
+                           true, keyAssigner));
+    }
+
+    StoredSortedMap(DataView mapView) {
+
+        super(mapView);
+    }
+
+    /**
+     * Returns null since comparators are not supported.  The natural ordering
+     * of a stored collection is data byte order, whether the data classes
+     * implement the {@link java.lang.Comparable} interface or not.
+     * This method does not conform to the {@link SortedMap#comparator}
+     * interface.
+     *
+     * @return null.
+     */
+    public Comparator<? super K> comparator() {
+
+        return null;
+    }
+
+    /**
+     * Returns the first (lowest) key currently in this sorted map.
+     * This method conforms to the {@link SortedMap#firstKey} interface.
+     *
+     * @return the first key.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public K firstKey() {
+
+        return getFirstOrLastKey(true);
+    }
+
+    /**
+     * Returns the last (highest) element currently in this sorted map.
+     * This method conforms to the {@link SortedMap#lastKey} interface.
+     *
+     * @return the last key.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public K lastKey() {
+
+        return getFirstOrLastKey(false);
+    }
+
+    private K getFirstOrLastKey(boolean doGetFirst) {
+
+        DataCursor cursor = null;
+        try {
+            cursor = new DataCursor(view, false);
+            OperationStatus status;
+            if (doGetFirst) {
+                status = cursor.getFirst(false);
+            } else {
+                status = cursor.getLast(false);
+            }
+            return (K) ((status == OperationStatus.SUCCESS) ?
+                        cursor.getCurrentKey() : null);
+        } catch (Exception e) {
+            throw StoredContainer.convertException(e);
+        } finally {
+            closeCursor(cursor);
+        }
+    }
+
+    /**
+     * Returns a view of the portion of this sorted set whose keys are
+     * strictly less than toKey.
+     * This method conforms to the {@link SortedMap#headMap} interface.
+     *
+     * <p>Note that the return value is a StoredStoredMap and must be treated
+     * as such; for example, its iterators must be explicitly closed.</p>
+     *
+     * @param toKey is the upper bound.
+     *
+     * @return the submap.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public SortedMap<K,V> headMap(K toKey) {
+
+        return subMap(null, false, toKey, false);
+    }
+
+    /**
+     * Returns a view of the portion of this sorted map whose elements are
+     * strictly less than toKey, optionally including toKey.
+     * This method does not exist in the standard {@link SortedMap} interface.
+     *
+     * <p>Note that the return value is a StoredStoredMap and must be treated
+     * as such; for example, its iterators must be explicitly closed.</p>
+     *
+     * @param toKey is the upper bound.
+     *
+     * @param toInclusive is true to include toKey.
+     *
+     * @return the submap.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public SortedMap<K,V> headMap(K toKey, boolean toInclusive) {
+
+        return subMap(null, false, toKey, toInclusive);
+    }
+
+    /**
+     * Returns a view of the portion of this sorted map whose elements are
+     * greater than or equal to fromKey.
+     * This method conforms to the {@link SortedMap#tailMap} interface.
+     *
+     * <p>Note that the return value is a StoredStoredMap and must be treated
+     * as such; for example, its iterators must be explicitly closed.</p>
+     *
+     * @param fromKey is the lower bound.
+     *
+     * @return the submap.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public SortedMap<K,V> tailMap(K fromKey) {
+
+        return subMap(fromKey, true, null, false);
+    }
+
+    /**
+     * Returns a view of the portion of this sorted map whose elements are
+     * strictly greater than fromKey, optionally including fromKey.
+     * This method does not exist in the standard {@link SortedMap} interface.
+     *
+     * <p>Note that the return value is a StoredStoredMap and must be treated
+     * as such; for example, its iterators must be explicitly closed.</p>
+     *
+     * @param fromKey is the lower bound.
+     *
+     * @param fromInclusive is true to include fromKey.
+     *
+     * @return the submap.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public SortedMap<K,V> tailMap(K fromKey, boolean fromInclusive) {
+
+        return subMap(fromKey, fromInclusive, null, false);
+    }
+
+    /**
+     * Returns a view of the portion of this sorted map whose elements range
+     * from fromKey, inclusive, to toKey, exclusive.
+     * This method conforms to the {@link SortedMap#subMap} interface.
+     *
+     * <p>Note that the return value is a StoredStoredMap and must be treated
+     * as such; for example, its iterators must be explicitly closed.</p>
+     *
+     * @param fromKey is the lower bound.
+     *
+     * @param toKey is the upper bound.
+     *
+     * @return the submap.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public SortedMap<K,V> subMap(K fromKey, K toKey) {
+
+        return subMap(fromKey, true, toKey, false);
+    }
+
+    /**
+     * Returns a view of the portion of this sorted map whose elements are
+     * strictly greater than fromKey and strictly less than toKey,
+     * optionally including fromKey and toKey.
+     * This method does not exist in the standard {@link SortedMap} interface.
+     *
+     * <p>Note that the return value is a StoredStoredMap and must be treated
+     * as such; for example, its iterators must be explicitly closed.</p>
+     *
+     * @param fromKey is the lower bound.
+     *
+     * @param fromInclusive is true to include fromKey.
+     *
+     * @param toKey is the upper bound.
+     *
+     * @param toInclusive is true to include toKey.
+     *
+     * @return the submap.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public SortedMap<K,V> subMap(K fromKey,
+                                 boolean fromInclusive,
+                                 K toKey,
+                                 boolean toInclusive) {
+        try {
+            return new StoredSortedMap(
+               view.subView(fromKey, fromInclusive, toKey, toInclusive, null));
+        } catch (Exception e) {
+            throw StoredContainer.convertException(e);
+        }
+    }
+}
diff --git a/src/com/sleepycat/collections/StoredSortedValueSet.java b/src/com/sleepycat/collections/StoredSortedValueSet.java
new file mode 100644
index 0000000000000000000000000000000000000000..a1286d6738c0afe009d552d721c76c2e5eac0b0b
--- /dev/null
+++ b/src/com/sleepycat/collections/StoredSortedValueSet.java
@@ -0,0 +1,263 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: StoredSortedValueSet.java,v 1.32 2008/05/27 15:30:34 mark Exp $
+ */
+
+package com.sleepycat.collections;
+
+import java.util.Comparator;
+import java.util.SortedSet;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.je.Database;
+
+/**
+ * The SortedSet returned by Map.values() and which can also be constructed
+ * directly if a Map is not needed.
+ * Although this collection is a set it may contain duplicate values.  Only if
+ * an entity value binding is used are all elements guaranteed to be unique.
+ *
+ * <p>In addition to the standard SortedSet methods, this class provides the
+ * following methods for stored sorted value sets only.  Note that the use of
+ * these methods is not compatible with the standard Java collections
+ * interface.</p>
+ * <ul>
+ * <li>{@link #headSet(Object, boolean)}</li>
+ * <li>{@link #tailSet(Object, boolean)}</li>
+ * <li>{@link #subSet(Object, boolean, Object, boolean)}</li>
+ * </ul>
+ *
+ * @author Mark Hayes
+ */
+public class StoredSortedValueSet<E>
+    extends StoredValueSet<E>
+    implements SortedSet<E> {
+
+    /*
+     * No valueBinding ctor is possible since key cannot be derived.
+     */
+
+    /**
+     * Creates a sorted value set entity view of a {@link Database}.
+     *
+     * @param database is the Database underlying the new collection.
+     *
+     * @param valueEntityBinding is the binding used to translate between
+     * key/value buffers and entity value objects.
+     *
+     * @param writeAllowed is true to create a read-write collection or false
+     * to create a read-only collection.
+     *
+     * @throws IllegalArgumentException if formats are not consistently
+     * defined or a parameter is invalid.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public StoredSortedValueSet(Database database,
+                                EntityBinding<E> valueEntityBinding,
+                                boolean writeAllowed) {
+
+        super(new DataView(database, null, null, valueEntityBinding,
+                           writeAllowed, null));
+        checkKeyDerivation();
+    }
+
+    StoredSortedValueSet(DataView valueSetView) {
+
+        super(valueSetView);
+        checkKeyDerivation();
+    }
+
+    private void checkKeyDerivation() {
+
+        if (!view.canDeriveKeyFromValue()) {
+            throw new IllegalArgumentException("Cannot derive key from value");
+        }
+    }
+
+    /**
+     * Returns null since comparators are not supported.  The natural ordering
+     * of a stored collection is data byte order, whether the data classes
+     * implement the {@link java.lang.Comparable} interface or not.
+     * This method does not conform to the {@link SortedSet#comparator}
+     * interface.
+     *
+     * @return null.
+     */
+    public Comparator<? super E> comparator() {
+
+        return null;
+    }
+
+    /**
+     * Returns the first (lowest) element currently in this sorted set.
+     * This method conforms to the {@link SortedSet#first} interface.
+     *
+     * @return the first element.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public E first() {
+
+        return getFirstOrLast(true);
+    }
+
+    /**
+     * Returns the last (highest) element currently in this sorted set.
+     * This method conforms to the {@link SortedSet#last} interface.
+     *
+     * @return the last element.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public E last() {
+
+        return getFirstOrLast(false);
+    }
+
+    /**
+     * Returns a view of the portion of this sorted set whose elements are
+     * strictly less than toValue.
+     * This method conforms to the {@link SortedSet#headSet} interface.
+     *
+     * <p>Note that the return value is a StoredCollection and must be treated
+     * as such; for example, its iterators must be explicitly closed.</p>
+     *
+     * @param toValue the upper bound.
+     *
+     * @return the subset.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public SortedSet<E> headSet(E toValue) {
+
+        return subSet(null, false, toValue, false);
+    }
+
+    /**
+     * Returns a view of the portion of this sorted set whose elements are
+     * strictly less than toValue, optionally including toValue.
+     * This method does not exist in the standard {@link SortedSet} interface.
+     *
+     * <p>Note that the return value is a StoredCollection and must be treated
+     * as such; for example, its iterators must be explicitly closed.</p>
+     *
+     * @param toValue is the upper bound.
+     *
+     * @param toInclusive is true to include toValue.
+     *
+     * @return the subset.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public SortedSet<E> headSet(E toValue, boolean toInclusive) {
+
+        return subSet(null, false, toValue, toInclusive);
+    }
+
+    /**
+     * Returns a view of the portion of this sorted set whose elements are
+     * greater than or equal to fromValue.
+     * This method conforms to the {@link SortedSet#tailSet} interface.
+     *
+     * <p>Note that the return value is a StoredCollection and must be treated
+     * as such; for example, its iterators must be explicitly closed.</p>
+     *
+     * @param fromValue is the lower bound.
+     *
+     * @return the subset.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public SortedSet<E> tailSet(E fromValue) {
+
+        return subSet(fromValue, true, null, false);
+    }
+
+    /**
+     * Returns a view of the portion of this sorted set whose elements are
+     * strictly greater than fromValue, optionally including fromValue.
+     * This method does not exist in the standard {@link SortedSet} interface.
+     *
+     * <p>Note that the return value is a StoredCollection and must be treated
+     * as such; for example, its iterators must be explicitly closed.</p>
+     *
+     * @param fromValue is the lower bound.
+     *
+     * @param fromInclusive is true to include fromValue.
+     *
+     * @return the subset.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public SortedSet<E> tailSet(E fromValue, boolean fromInclusive) {
+
+        return subSet(fromValue, fromInclusive, null, false);
+    }
+
+    /**
+     * Returns a view of the portion of this sorted set whose elements range
+     * from fromValue, inclusive, to toValue, exclusive.
+     * This method conforms to the {@link SortedSet#subSet} interface.
+     *
+     * <p>Note that the return value is a StoredCollection and must be treated
+     * as such; for example, its iterators must be explicitly closed.</p>
+     *
+     * @param fromValue is the lower bound.
+     *
+     * @param toValue is the upper bound.
+     *
+     * @return the subset.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public SortedSet<E> subSet(E fromValue, E toValue) {
+
+        return subSet(fromValue, true, toValue, false);
+    }
+
+    /**
+     * Returns a view of the portion of this sorted set whose elements are
+     * strictly greater than fromValue and strictly less than toValue,
+     * optionally including fromValue and toValue.
+     * This method does not exist in the standard {@link SortedSet} interface.
+     *
+     * <p>Note that the return value is a StoredCollection and must be treated
+     * as such; for example, its iterators must be explicitly closed.</p>
+     *
+     * @param fromValue is the lower bound.
+     *
+     * @param fromInclusive is true to include fromValue.
+     *
+     * @param toValue is the upper bound.
+     *
+     * @param toInclusive is true to include toValue.
+     *
+     * @return the subset.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link
+     * com.sleepycat.je.DatabaseException} is thrown.
+     */
+    public SortedSet<E> subSet(E fromValue,
+                               boolean fromInclusive,
+                               E toValue,
+                               boolean toInclusive) {
+        try {
+            return new StoredSortedValueSet<E>(view.subView
+                (fromValue, fromInclusive, toValue, toInclusive, null));
+        } catch (Exception e) {
+            throw StoredContainer.convertException(e);
+        }
+    }
+}
diff --git a/src/com/sleepycat/collections/StoredValueSet.java b/src/com/sleepycat/collections/StoredValueSet.java
new file mode 100644
index 0000000000000000000000000000000000000000..000d353195eb26ad02da3704d4181aaaa2c87f5e
--- /dev/null
+++ b/src/com/sleepycat/collections/StoredValueSet.java
@@ -0,0 +1,181 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: StoredValueSet.java,v 1.43 2008/05/27 15:30:34 mark Exp $
+ */
+
+package com.sleepycat.collections;
+
+import java.util.Set;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.OperationStatus;
+
+/**
+ * The Set returned by Map.values() and Map.duplicates(), and which can also be
+ * constructed directly if a Map is not needed.
+ * Although this collection is a set it may contain duplicate values.  Only if
+ * an entity value binding is used are all elements guaranteed to be unique.
+ *
+ * @author Mark Hayes
+ */
+public class StoredValueSet<E> extends StoredCollection<E> implements Set<E> {
+
+    /*
+     * This class is also used internally for the set returned by duplicates().
+     */
+
+    /**
+     * Creates a value set view of a {@link Database}.
+     *
+     * @param database is the Database underlying the new collection.
+     *
+     * @param valueBinding is the binding used to translate between value
+     * buffers and value objects.
+     *
+     * @param writeAllowed is true to create a read-write collection or false
+     * to create a read-only collection.
+     *
+     * @throws IllegalArgumentException if formats are not consistently
+     * defined or a parameter is invalid.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public StoredValueSet(Database database,
+                          EntryBinding<E> valueBinding,
+                          boolean writeAllowed) {
+
+        super(new DataView(database, null, valueBinding, null,
+                           writeAllowed, null));
+    }
+
+    /**
+     * Creates a value set entity view of a {@link Database}.
+     *
+     * @param database is the Database underlying the new collection.
+     *
+     * @param valueEntityBinding is the binding used to translate between
+     * key/value buffers and entity value objects.
+     *
+     * @param writeAllowed is true to create a read-write collection or false
+     * to create a read-only collection.
+     *
+     * @throws IllegalArgumentException if formats are not consistently
+     * defined or a parameter is invalid.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public StoredValueSet(Database database,
+                          EntityBinding<E> valueEntityBinding,
+                          boolean writeAllowed) {
+
+        super(new DataView(database, null, null, valueEntityBinding,
+                           writeAllowed, null));
+    }
+
+    StoredValueSet(DataView valueSetView) {
+
+        super(valueSetView);
+    }
+
+    /**
+     * Adds the specified entity to this set if it is not already present
+     * (optional operation).
+     * This method conforms to the {@link Set#add} interface.
+     *
+     * @param entity is the entity to be added.
+     *
+     * @return true if the entity was added, that is the key-value pair
+     * represented by the entity was not previously present in the collection.
+     *
+     * @throws UnsupportedOperationException if the collection is read-only,
+     * if the collection is indexed, or if an entity binding is not used.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public boolean add(E entity) {
+
+        if (view.isSecondary()) {
+            throw new UnsupportedOperationException(
+                "add() not allowed with index");
+        } else if (view.range.isSingleKey()) {
+            /* entity is actually just a value in this case */
+            if (!view.dupsAllowed) {
+                throw new UnsupportedOperationException("duplicates required");
+            }
+            DataCursor cursor = null;
+            boolean doAutoCommit = beginAutoCommit();
+            try {
+                cursor = new DataCursor(view, true);
+                cursor.useRangeKey();
+                OperationStatus status =
+                    cursor.putNoDupData(null, entity, null, true);
+                closeCursor(cursor);
+                commitAutoCommit(doAutoCommit);
+                return (status == OperationStatus.SUCCESS);
+            } catch (Exception e) {
+                closeCursor(cursor);
+                throw handleException(e, doAutoCommit);
+            }
+        } else if (view.entityBinding == null) {
+            throw new UnsupportedOperationException(
+                "add() requires entity binding");
+        } else {
+            return add(null, entity);
+        }
+    }
+
+    /**
+     * Returns true if this set contains the specified element.
+     * This method conforms to the {@link java.util.Set#contains}
+     * interface.
+     *
+     * @param value the value to check.
+     *
+     * @return whether the set contains the given value.
+     */
+    public boolean contains(Object value) {
+
+        return containsValue(value);
+    }
+
+    /**
+     * Removes the specified value from this set if it is present (optional
+     * operation).
+     * If an entity binding is used, the key-value pair represented by the
+     * given entity is removed.  If an entity binding is used, the first
+     * occurrence of a key-value pair with the given value is removed.
+     * This method conforms to the {@link Set#remove} interface.
+     *
+     * @throws UnsupportedOperationException if the collection is read-only.
+     *
+     * @throws RuntimeExceptionWrapper if a {@link DatabaseException} is
+     * thrown.
+     */
+    public boolean remove(Object value) {
+
+        return removeValue(value);
+    }
+
+    E makeIteratorData(BaseIterator iterator,
+                       DatabaseEntry keyEntry,
+                       DatabaseEntry priKeyEntry,
+                       DatabaseEntry valueEntry) {
+
+        return (E) view.makeValue(priKeyEntry, valueEntry);
+    }
+
+    boolean hasValues() {
+
+        return true;
+    }
+}
diff --git a/src/com/sleepycat/collections/TransactionRunner.java b/src/com/sleepycat/collections/TransactionRunner.java
new file mode 100644
index 0000000000000000000000000000000000000000..7eb6828acfbb0378a754d6500c650fee845f122e
--- /dev/null
+++ b/src/com/sleepycat/collections/TransactionRunner.java
@@ -0,0 +1,274 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: TransactionRunner.java,v 1.48 2008/01/07 14:28:45 cwl Exp $
+ */
+
+package com.sleepycat.collections;
+
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DeadlockException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.TransactionConfig;
+import com.sleepycat.util.ExceptionUnwrapper;
+
+/**
+ * Starts a transaction, calls {@link TransactionWorker#doWork}, and handles
+ * transaction retry and exceptions.  To perform a transaction, the user
+ * implements the {@link TransactionWorker} interface and passes an instance of
+ * that class to the {@link #run} method.
+ *
+ * <p>A single TransactionRunner instance may be used by any number of threads
+ * for any number of transactions.</p>
+ *
+ * <p>The behavior of the run() method depends on whether the environment is
+ * transactional, whether nested transactions are enabled, and whether a
+ * transaction is already active.</p>
+ *
+ * <ul>
+ * <li>When the run() method is called in a transactional environment and no
+ * transaction is active for the current thread, a new transaction is started
+ * before calling doWork().  If DeadlockException is thrown by doWork(), the
+ * transaction will be aborted and the process will be repeated up to the
+ * maximum number of retries.  If another exception is thrown by doWork() or
+ * the maximum number of retries has occurred, the transaction will be aborted
+ * and the exception will be rethrown by the run() method.  If no exception is
+ * thrown by doWork(), the transaction will be committed.  The run() method
+ * will not attempt to commit or abort a transaction if it has already been
+ * committed or aborted by doWork().</li>
+ *
+ * <li>When the run() method is called and a transaction is active for the
+ * current thread, and nested transactions are enabled, a nested transaction is
+ * started before calling doWork().  The transaction that is active when
+ * calling the run() method will become the parent of the nested transaction.
+ * The nested transaction will be committed or aborted by the run() method
+ * following the same rules described above.  Note that nested transactions may
+ * not be enabled for the JE product, since JE does not support nested
+ * transactions.</li>
+ *
+ * <li>When the run() method is called in a non-transactional environment, the
+ * doWork() method is called without starting a transaction.  The run() method
+ * will return without committing or aborting a transaction, and any exceptions
+ * thrown by the doWork() method will be thrown by the run() method.</li>
+ *
+ * <li>When the run() method is called and a transaction is active for the
+ * current thread and nested transactions are not enabled (the default) the
+ * same rules as above apply. All the operations performed by the doWork()
+ * method will be part of the currently active transaction.</li>
+ * </ul>
+ *
+ * <p>In a transactional environment, the rules described above support nested
+ * calls to the run() method and guarantee that the outermost call will cause
+ * the transaction to be committed or aborted.  This is true whether or not
+ * nested transactions are supported or enabled.  Note that nested transactions
+ * are provided as an optimization for improving concurrency but do not change
+ * the meaning of the outermost transaction.  Nested transactions are not
+ * currently supported by the JE product.</p>
+ *
+ * @author Mark Hayes
+ */
+public class TransactionRunner {
+
+    /** The default maximum number of retries. */
+    public static final int DEFAULT_MAX_RETRIES = 10;
+
+    private CurrentTransaction currentTxn;
+    private int maxRetries;
+    private TransactionConfig config;
+    private boolean allowNestedTxn;
+
+    /**
+     * Creates a transaction runner for a given Berkeley DB environment.
+     * The default maximum number of retries ({@link #DEFAULT_MAX_RETRIES}) and
+     * a null (default) {@link TransactionConfig} will be used.
+     *
+     * @param env is the environment for running transactions.
+     */
+    public TransactionRunner(Environment env) {
+
+        this(env, DEFAULT_MAX_RETRIES, null);
+    }
+
+    /**
+     * Creates a transaction runner for a given Berkeley DB environment and
+     * with a given number of maximum retries.
+     *
+     * @param env is the environment for running transactions.
+     *
+     * @param maxRetries is the maximum number of retries that will be
+     * performed when deadlocks are detected.
+     *
+     * @param config the transaction configuration used for calling
+     * {@link Environment#beginTransaction}, or null to use the default
+     * configuration.  The configuration object is not cloned, and
+     * any modifications to it will impact subsequent transactions.
+     */
+    public TransactionRunner(Environment env,
+			     int maxRetries,
+                             TransactionConfig config) {
+
+        this.currentTxn = CurrentTransaction.getInstance(env);
+        this.maxRetries = maxRetries;
+        this.config = config;
+    }
+
+    /**
+     * Returns the maximum number of retries that will be performed when
+     * deadlocks are detected.
+     */
+    public int getMaxRetries() {
+
+        return maxRetries;
+    }
+
+    /**
+     * Changes the maximum number of retries that will be performed when
+     * deadlocks are detected.
+     * Calling this method does not impact transactions already running.
+     */
+    public void setMaxRetries(int maxRetries) {
+
+        this.maxRetries = maxRetries;
+    }
+
+    /**
+     * Returns whether nested transactions will be created if
+     * <code>run()</code> is called when a transaction is already active for
+     * the current thread.
+     * By default this property is false.
+     *
+     * <p>Note that this method always returns false in the JE product, since
+     * nested transactions are not supported by JE.</p>
+     */
+    public boolean getAllowNestedTransactions() {
+
+        return allowNestedTxn;
+    }
+
+    /**
+     * Changes whether nested transactions will be created if
+     * <code>run()</code> is called when a transaction is already active for
+     * the current thread.
+     * Calling this method does not impact transactions already running.
+     *
+     * <p>Note that true may not be passed to this method in the JE product,
+     * since nested transactions are not supported by JE.</p>
+     */
+    public void setAllowNestedTransactions(boolean allowNestedTxn) {
+
+        if (allowNestedTxn && !DbCompat.NESTED_TRANSACTIONS) {
+            throw new UnsupportedOperationException(
+                    "Nested transactions are not supported.");
+        }
+        this.allowNestedTxn = allowNestedTxn;
+    }
+
+    /**
+     * Returns the transaction configuration used for calling
+     * {@link Environment#beginTransaction}.
+     *
+     * <p>If this property is null, the default configuration is used.  The
+     * configuration object is not cloned, and any modifications to it will
+     * impact subsequent transactions.</p>
+     *
+     * @return the transaction configuration.
+     */
+    public TransactionConfig getTransactionConfig() {
+
+        return config;
+    }
+
+    /**
+     * Changes the transaction configuration used for calling
+     * {@link Environment#beginTransaction}.
+     *
+     * <p>If this property is null, the default configuration is used.  The
+     * configuration object is not cloned, and any modifications to it will
+     * impact subsequent transactions.</p>
+     *
+     * @param config the transaction configuration.
+     */
+    public void setTransactionConfig(TransactionConfig config) {
+
+        this.config = config;
+    }
+
+    /**
+     * Calls the {@link TransactionWorker#doWork} method and, for transactional
+     * environments, may begin and end a transaction.  If the environment given
+     * is non-transactional, a transaction will not be used but the doWork()
+     * method will still be called.  See the class description for more
+     * information.
+     *
+     * @throws DeadlockException when it is thrown by doWork() and the
+     * maximum number of retries has occurred.  The transaction will have been
+     * aborted by this method.
+     *
+     * @throws Exception when any other exception is thrown by doWork().  The
+     * exception will first be unwrapped by calling {@link
+     * ExceptionUnwrapper#unwrap}.  The transaction will have been aborted by
+     * this method.
+     */
+    public void run(TransactionWorker worker)
+        throws DatabaseException, Exception {
+
+        if (currentTxn != null &&
+            (allowNestedTxn || currentTxn.getTransaction() == null)) {
+
+            /*
+             * Transactional and (not nested or nested txns allowed).
+             */
+            for (int i = 0;; i += 1) {
+                Transaction txn = null;
+                try {
+                    txn = currentTxn.beginTransaction(config);
+                    worker.doWork();
+                    if (txn != null && txn == currentTxn.getTransaction()) {
+                        currentTxn.commitTransaction();
+                    }
+                    return;
+                } catch (Throwable e) {
+                    e = ExceptionUnwrapper.unwrapAny(e);
+                    if (txn != null && txn == currentTxn.getTransaction()) {
+                        try {
+                            currentTxn.abortTransaction();
+                        } catch (Throwable e2) {
+
+                            /*
+                             * We print this stack trace so that the
+                             * information is not lost when we throw the
+                             * original exception.
+                             */
+			    if (DbCompat.TRANSACTION_RUNNER_PRINT_STACK_TRACES) {
+				e2.printStackTrace();
+			    }
+                            /* Force the original exception to be thrown. */
+                            i = maxRetries + 1;
+                        }
+                    }
+                    if (i >= maxRetries || !(e instanceof DeadlockException)) {
+                        if (e instanceof Exception) {
+                            throw (Exception) e;
+                        } else {
+                            throw (Error) e;
+                        }
+                    }
+                }
+            }
+        } else {
+
+            /*
+             * Non-transactional or (nested and no nested txns allowed).
+             */
+            try {
+                worker.doWork();
+            } catch (Exception e) {
+                throw ExceptionUnwrapper.unwrap(e);
+            }
+        }
+    }
+}
diff --git a/src/com/sleepycat/collections/TransactionWorker.java b/src/com/sleepycat/collections/TransactionWorker.java
new file mode 100644
index 0000000000000000000000000000000000000000..8e9238731a808b9c131bb96c0a4b1e1a6c0da4bb
--- /dev/null
+++ b/src/com/sleepycat/collections/TransactionWorker.java
@@ -0,0 +1,27 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: TransactionWorker.java,v 1.17 2008/01/07 14:28:45 cwl Exp $
+ */
+
+package com.sleepycat.collections;
+
+/**
+ * The interface implemented to perform the work within a transaction.
+ * To run a transaction, an instance of this interface is passed to the
+ * {@link TransactionRunner#run} method.
+ *
+ * @author Mark Hayes
+ */
+public interface TransactionWorker {
+
+    /**
+     * Perform the work for a single transaction.
+     *
+     * @see TransactionRunner#run
+     */
+    void doWork()
+        throws Exception;
+}
diff --git a/src/com/sleepycat/collections/TupleSerialFactory.java b/src/com/sleepycat/collections/TupleSerialFactory.java
new file mode 100644
index 0000000000000000000000000000000000000000..72859358d5f970f90eeb05624ff294b03d5cfb9e
--- /dev/null
+++ b/src/com/sleepycat/collections/TupleSerialFactory.java
@@ -0,0 +1,147 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: TupleSerialFactory.java,v 1.42 2008/05/27 15:30:34 mark Exp $
+ */
+
+package com.sleepycat.collections;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.serial.ClassCatalog;
+import com.sleepycat.bind.serial.TupleSerialMarshalledBinding;
+import com.sleepycat.bind.serial.TupleSerialMarshalledKeyCreator;
+import com.sleepycat.bind.tuple.MarshalledTupleEntry; // for javadoc
+import com.sleepycat.bind.tuple.MarshalledTupleKeyEntity;
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.bind.tuple.TupleMarshalledBinding;
+import com.sleepycat.je.Database;
+
+/**
+ * Creates stored collections having tuple keys and serialized entity values.
+ * The entity classes must be Serializable and must implement the
+ * MarshalledTupleKeyEntity interfaces.  The key classes must either implement
+ * the MarshalledTupleEntry interface or be one of the Java primitive type
+ * classes.  Underlying binding objects are created automatically.
+ *
+ * @author Mark Hayes
+ */
+public class TupleSerialFactory {
+
+    private ClassCatalog catalog;
+
+    /**
+     * Creates a tuple-serial factory for given environment and class catalog.
+     */
+    public TupleSerialFactory(ClassCatalog catalog) {
+
+        this.catalog = catalog;
+    }
+
+    /**
+     * Returns the class catalog associated with this factory.
+     */
+    public final ClassCatalog getCatalog() {
+
+        return catalog;
+    }
+
+    /**
+     * Creates a map from a previously opened Database object.
+     *
+     * @param db the previously opened Database object.
+     *
+     * @param keyClass is the class used for map keys.  It must implement the
+     * {@link MarshalledTupleEntry} interface or be one of the Java primitive
+     * type classes.
+     *
+     * @param valueBaseClass the base class of the entity values for this
+     * store.  It must implement the  {@link MarshalledTupleKeyEntity}
+     * interface.
+     *
+     * @param writeAllowed is true to create a read-write collection or false
+     * to create a read-only collection.
+     */
+    public <K, V extends MarshalledTupleKeyEntity> StoredMap<K,V>
+        newMap(Database db,
+               Class<K> keyClass,
+               Class<V> valueBaseClass,
+               boolean writeAllowed) {
+
+        return new StoredMap<K,V>(db,
+                        getKeyBinding(keyClass),
+                        getEntityBinding(valueBaseClass),
+                        writeAllowed);
+    }
+
+    /**
+     * Creates a sorted map from a previously opened Database object.
+     *
+     * @param db the previously opened Database object.
+     *
+     * @param keyClass is the class used for map keys.  It must implement the
+     * {@link MarshalledTupleEntry} interface or be one of the Java primitive
+     * type classes.
+     *
+     * @param valueBaseClass the base class of the entity values for this
+     * store.  It must implement the  {@link MarshalledTupleKeyEntity}
+     * interface.
+     *
+     * @param writeAllowed is true to create a read-write collection or false
+     * to create a read-only collection.
+     */
+    public <K, V extends MarshalledTupleKeyEntity> StoredSortedMap<K,V>
+        newSortedMap(Database db,
+                     Class<K> keyClass,
+                     Class<V> valueBaseClass,
+                     boolean writeAllowed) {
+
+        return new StoredSortedMap(db,
+                        getKeyBinding(keyClass),
+                        getEntityBinding(valueBaseClass),
+                        writeAllowed);
+    }
+
+    /**
+     * Creates a <code>SecondaryKeyCreator</code> object for use in configuring
+     * a <code>SecondaryDatabase</code>.  The returned object implements
+     * the {@link com.sleepycat.je.SecondaryKeyCreator} interface.
+     *
+     * @param valueBaseClass the base class of the entity values for this
+     * store.  It must implement the  {@link MarshalledTupleKeyEntity}
+     * interface.
+     *
+     * @param keyName is the key name passed to the {@link
+     * MarshalledTupleKeyEntity#marshalSecondaryKey} method to identify the
+     * secondary key.
+     */
+    public <V extends MarshalledTupleKeyEntity>
+        TupleSerialMarshalledKeyCreator<V>
+        getKeyCreator(Class<V> valueBaseClass, String keyName) {
+
+        return new TupleSerialMarshalledKeyCreator<V>
+            (getEntityBinding(valueBaseClass), keyName);
+    }
+
+    public <V extends MarshalledTupleKeyEntity>
+        TupleSerialMarshalledBinding<V>
+        getEntityBinding(Class<V> baseClass) {
+
+        return new TupleSerialMarshalledBinding<V>(catalog, baseClass);
+    }
+
+    private <K> EntryBinding<K> getKeyBinding(Class<K> keyClass) {
+
+        EntryBinding<K> binding = TupleBinding.getPrimitiveBinding(keyClass);
+        if (binding == null) {
+
+            /*
+             * Cannot use type param <K> here because it does not implement
+             * MarshalledTupleEntry if it is a primitive class.
+             */
+            binding = new TupleMarshalledBinding(keyClass);
+        }
+        return binding;
+    }
+}
diff --git a/src/com/sleepycat/collections/package.html b/src/com/sleepycat/collections/package.html
new file mode 100644
index 0000000000000000000000000000000000000000..3b628d828d2d554eac364cf03109bab375eacd66
--- /dev/null
+++ b/src/com/sleepycat/collections/package.html
@@ -0,0 +1,21 @@
+<!-- $Id: package.html,v 1.24.2.2 2010/01/04 15:30:27 cwl Exp $ -->
+<html>
+<head>
+<!--
+
+ See the file LICENSE for redistribution information.
+
+ Copyright (c) 2002,2010 Oracle.  All rights reserved.
+
+ $Id: package.html,v 1.24.2.2 2010/01/04 15:30:27 cwl Exp $
+
+-->
+</head>
+<body>
+Data access based on the standard Java collections API.
+<!-- begin JE only -->
+Examples can be found in je/examples/collections. Build and run directions are
+in the installation notes.
+<!-- end JE only -->
+</body>
+</html>
diff --git a/src/com/sleepycat/compat/DbCompat.java b/src/com/sleepycat/compat/DbCompat.java
new file mode 100644
index 0000000000000000000000000000000000000000..754c9227e442959bdc5dca692554e5eda8b351e3
--- /dev/null
+++ b/src/com/sleepycat/compat/DbCompat.java
@@ -0,0 +1,397 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: DbCompat.java,v 1.26 2008/05/19 17:52:16 linda Exp $
+ */
+
+package com.sleepycat.compat;
+
+import java.io.FileNotFoundException;
+import java.util.Comparator;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.CursorConfig;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.SecondaryConfig;
+import com.sleepycat.je.SecondaryCursor;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.TransactionConfig;
+
+/**
+ * A minimal set of DB-JE compatibility methods for internal use only.
+ * Two versions are maintained in parallel in the DB and JE source trees.
+ * Used by the collections package.
+ */
+public class DbCompat {
+
+    /* Capabilities */
+
+    public static final boolean CDB = false;
+    public static final boolean JOIN = true;
+    public static final boolean NESTED_TRANSACTIONS = false;
+    public static final boolean INSERTION_ORDERED_DUPLICATES = false;
+    public static final boolean SEPARATE_DATABASE_FILES = false;
+    public static final boolean MEMORY_SUBSYSTEM = false;
+    public static final boolean LOCK_SUBSYSTEM = false;
+    public static final boolean HASH_METHOD = false;
+    public static final boolean RECNO_METHOD = false;
+    public static final boolean QUEUE_METHOD = false;
+    public static final boolean BTREE_RECNUM_METHOD = false;
+    public static final boolean OPTIONAL_READ_UNCOMMITTED = false;
+    public static final boolean SECONDARIES = true;
+    public static boolean TRANSACTION_RUNNER_PRINT_STACK_TRACES = true;
+    public static final boolean DATABASE_COUNT = true;
+
+    /* Methods used by the collections package. */
+
+    public static boolean getInitializeCache(EnvironmentConfig config) {
+        return true;
+    }
+
+    public static boolean getInitializeLocking(EnvironmentConfig config) {
+        return config.getLocking();
+    }
+
+    public static boolean getInitializeCDB(EnvironmentConfig config) {
+        return false;
+    }
+
+    public static boolean isTypeBtree(DatabaseConfig dbConfig) {
+        return true;
+    }
+
+    public static boolean isTypeHash(DatabaseConfig dbConfig) {
+        return false;
+    }
+
+    public static boolean isTypeQueue(DatabaseConfig dbConfig) {
+        return false;
+    }
+
+    public static boolean isTypeRecno(DatabaseConfig dbConfig) {
+        return false;
+    }
+
+    public static boolean getBtreeRecordNumbers(DatabaseConfig dbConfig) {
+        return false;
+    }
+
+    public static boolean getReadUncommitted(DatabaseConfig dbConfig) {
+        return true;
+    }
+
+    public static boolean getRenumbering(DatabaseConfig dbConfig) {
+        return false;
+    }
+
+    public static boolean getSortedDuplicates(DatabaseConfig dbConfig) {
+        return dbConfig.getSortedDuplicates();
+    }
+
+    public static boolean getUnsortedDuplicates(DatabaseConfig dbConfig) {
+        return false;
+    }
+
+    public static boolean getDeferredWrite(DatabaseConfig dbConfig) {
+        return dbConfig.getDeferredWrite();
+    }
+
+    // XXX Remove this when DB and JE support CursorConfig.cloneConfig
+    public static CursorConfig cloneCursorConfig(CursorConfig config) {
+        CursorConfig newConfig = new CursorConfig();
+        newConfig.setReadCommitted(config.getReadCommitted());
+        newConfig.setReadUncommitted(config.getReadUncommitted());
+        return newConfig;
+    }
+
+    public static boolean getWriteCursor(CursorConfig config) {
+        return false;
+    }
+
+    public static void setWriteCursor(CursorConfig config, boolean write) {
+        if (write) {
+            throw new UnsupportedOperationException();
+        }
+    }
+
+    public static void setRecordNumber(DatabaseEntry entry, int recNum) {
+        throw new UnsupportedOperationException();
+    }
+
+    public static int getRecordNumber(DatabaseEntry entry) {
+        throw new UnsupportedOperationException();
+    }
+
+    public static String getDatabaseFile(Database db)
+        throws DatabaseException {
+
+        return null;
+    }
+
+    public static long getDatabaseCount(Database db)
+        throws DatabaseException {
+
+        return db.count();
+    }
+
+    public static void syncDeferredWrite(Database db, boolean flushLog)
+        throws DatabaseException {
+
+        DbInternal.dbGetDatabaseImpl(db).sync(flushLog);
+    }
+
+    public static OperationStatus getCurrentRecordNumber(Cursor cursor,
+                                                         DatabaseEntry key,
+                                                         LockMode lockMode)
+        throws DatabaseException {
+
+        throw new UnsupportedOperationException();
+    }
+
+    public static OperationStatus getSearchRecordNumber(Cursor cursor,
+                                                        DatabaseEntry key,
+                                                        DatabaseEntry data,
+                                                        LockMode lockMode)
+        throws DatabaseException {
+
+        throw new UnsupportedOperationException();
+    }
+
+    public static OperationStatus getSearchRecordNumber(SecondaryCursor cursor,
+                                                        DatabaseEntry key,
+                                                        DatabaseEntry pKey,
+                                                        DatabaseEntry data,
+                                                        LockMode lockMode)
+        throws DatabaseException {
+
+        throw new UnsupportedOperationException();
+    }
+
+    public static OperationStatus putAfter(Cursor cursor,
+                                           DatabaseEntry key,
+                                           DatabaseEntry data)
+        throws DatabaseException {
+
+        throw new UnsupportedOperationException();
+    }
+
+    public static OperationStatus putBefore(Cursor cursor,
+                                            DatabaseEntry key,
+                                            DatabaseEntry data)
+        throws DatabaseException {
+
+        throw new UnsupportedOperationException();
+    }
+
+    public static OperationStatus append(Database db,
+                                         Transaction txn,
+                                         DatabaseEntry key,
+                                         DatabaseEntry data)
+        throws DatabaseException {
+
+        throw new UnsupportedOperationException();
+    }
+
+    public static Transaction getThreadTransaction(Environment env)
+	throws DatabaseException {
+
+        return env.getThreadTransaction();
+    }
+
+    /* Methods used by the collections tests. */
+
+    public static void setInitializeCache(EnvironmentConfig config,
+                                          boolean val) {
+        if (!val) {
+            throw new UnsupportedOperationException();
+        }
+    }
+
+    public static void setInitializeLocking(EnvironmentConfig config,
+                                            boolean val) {
+        if (!val) {
+            throw new UnsupportedOperationException();
+        }
+    }
+
+    public static void setInitializeCDB(EnvironmentConfig config,
+                                        boolean val) {
+        if (val) {
+            throw new UnsupportedOperationException();
+        }
+    }
+
+    public static void setLockDetectModeOldest(EnvironmentConfig config) {
+        /* JE does this by default, since it uses timeouts. */
+    }
+
+    public static void setSerializableIsolation(TransactionConfig config,
+                                                boolean val) {
+        config.setSerializableIsolation(val);
+    }
+
+
+    public static void setBtreeComparator(DatabaseConfig dbConfig,
+                                          Comparator<byte[]> comparator) {
+        dbConfig.setBtreeComparator(comparator);
+    }
+
+    public static void setTypeBtree(DatabaseConfig dbConfig) {
+    }
+
+    public static void setTypeHash(DatabaseConfig dbConfig) {
+        throw new UnsupportedOperationException();
+    }
+
+    public static void setTypeRecno(DatabaseConfig dbConfig) {
+        throw new UnsupportedOperationException();
+    }
+
+    public static void setTypeQueue(DatabaseConfig dbConfig) {
+        throw new UnsupportedOperationException();
+    }
+
+    public static void setBtreeRecordNumbers(DatabaseConfig dbConfig,
+                                             boolean val) {
+        throw new UnsupportedOperationException();
+    }
+
+    public static void setReadUncommitted(DatabaseConfig dbConfig,
+                                          boolean val) {
+    }
+
+    public static void setRenumbering(DatabaseConfig dbConfig,
+                                      boolean val) {
+        throw new UnsupportedOperationException();
+    }
+
+    public static void setSortedDuplicates(DatabaseConfig dbConfig,
+                                           boolean val) {
+        dbConfig.setSortedDuplicates(val);
+    }
+
+    public static void setUnsortedDuplicates(DatabaseConfig dbConfig,
+                                             boolean val) {
+        if (val) {
+            throw new UnsupportedOperationException();
+        }
+    }
+
+    public static void setDeferredWrite(DatabaseConfig dbConfig, boolean val) {
+        dbConfig.setDeferredWrite(val);
+    }
+
+    public static void setRecordLength(DatabaseConfig dbConfig, int val) {
+        if (val != 0) {
+            throw new UnsupportedOperationException();
+        }
+    }
+
+    public static void setRecordPad(DatabaseConfig dbConfig, int val) {
+        throw new UnsupportedOperationException();
+    }
+
+    public static Database openDatabase(Environment env,
+                                        Transaction txn,
+                                        String fileName,
+                                        String dbName,
+                                        DatabaseConfig config)
+        throws DatabaseException, FileNotFoundException {
+
+        assert fileName == null;
+        return env.openDatabase(txn, dbName, config);
+    }
+
+    public static SecondaryDatabase openSecondaryDatabase(
+                                        Environment env,
+                                        Transaction txn,
+                                        String fileName,
+                                        String dbName,
+                                        Database primaryDatabase,
+                                        SecondaryConfig config)
+        throws DatabaseException, FileNotFoundException {
+
+        assert fileName == null;
+        return env.openSecondaryDatabase(txn, dbName, primaryDatabase, config);
+    }
+
+    public static long truncateDatabase(Environment env,
+                                        Transaction txn,
+                                        String fileName,
+                                        String dbName,
+                                        boolean returnCount)
+        throws DatabaseException, FileNotFoundException {
+
+        assert fileName == null;
+        return env.truncateDatabase(txn, dbName, returnCount);
+    }
+
+    public static void removeDatabase(Environment env,
+                                      Transaction txn,
+                                      String fileName,
+                                      String dbName)
+        throws DatabaseException, FileNotFoundException {
+
+        assert fileName == null;
+        env.removeDatabase(txn, dbName);
+    }
+
+    public static void renameDatabase(Environment env,
+                                      Transaction txn,
+                                      String oldFileName,
+                                      String oldDbName,
+                                      String newFileName,
+                                      String newDbName)
+        throws DatabaseException, FileNotFoundException {
+
+        assert oldFileName == null;
+        assert newFileName == null;
+        env.renameDatabase(txn, oldDbName, newDbName);
+    }
+
+    public static Database testOpenDatabase(Environment env,
+                                            Transaction txn,
+                                            String file,
+                                            String name,
+                                            DatabaseConfig config)
+        throws DatabaseException, FileNotFoundException {
+
+        return env.openDatabase(txn, makeTestDbName(file, name), config);
+    }
+
+    public static SecondaryDatabase
+                  testOpenSecondaryDatabase(Environment env,
+                                            Transaction txn,
+                                            String file,
+                                            String name,
+                                            Database primary,
+                                            SecondaryConfig config)
+        throws DatabaseException, FileNotFoundException {
+
+        return env.openSecondaryDatabase(txn, makeTestDbName(file, name),
+                                         primary, config);
+    }
+
+    private static String makeTestDbName(String file, String name) {
+        if (file == null) {
+            return name;
+        } else {
+            if (name != null) {
+                return file + '.' + name;
+            } else {
+                return file;
+            }
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/APILockedException.java b/src/com/sleepycat/je/APILockedException.java
new file mode 100644
index 0000000000000000000000000000000000000000..a07985d37e77fa7093e9d538c5a7784c1add8a20
--- /dev/null
+++ b/src/com/sleepycat/je/APILockedException.java
@@ -0,0 +1,33 @@
+/*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: APILockedException.java,v 1.5.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+/**
+ * @hidden
+ * An APILockedException is thrown when a replicated environment
+ * does not permit application level operations.
+ */
+public class APILockedException extends DatabaseException {
+
+    public APILockedException() {
+	super();
+    }
+
+    public APILockedException(Throwable t) {
+        super(t);
+    }
+
+    public APILockedException(String message) {
+	super(message);
+    }
+
+    public APILockedException(String message, Throwable t) {
+        super(message, t);
+    }
+}
diff --git a/src/com/sleepycat/je/BtreeStats.java b/src/com/sleepycat/je/BtreeStats.java
new file mode 100644
index 0000000000000000000000000000000000000000..bc5c88925fa61d7dd32420171734af09e5f78794
--- /dev/null
+++ b/src/com/sleepycat/je/BtreeStats.java
@@ -0,0 +1,407 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: BtreeStats.java,v 1.15.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+/**
+ * The BtreeStats object is used to return Btree database statistics.
+ */
+public class BtreeStats extends DatabaseStats {
+
+    /* Number of Bottom Internal Nodes in the database's btree. */
+    private long binCount;
+
+    /* Number of Duplicate Bottom Internal Nodes in the database's btree. */
+    private long dbinCount;
+
+    /* Number of deleted Leaf Nodes in the database's btree. */
+    private long deletedLNCount;
+
+    /* Number of duplicate Leaf Nodes in the database's btree. */
+    private long dupCountLNCount;
+
+    /*
+     * Number of Internal Nodes in database's btree.  BIN's are not included.
+     */
+    private long inCount;
+
+    /*
+     * Number of Duplicate Internal Nodes in database's btree.  BIN's are not
+     * included.
+     */
+    private long dinCount;
+
+    /* Number of Leaf Nodes in the database's btree. */
+    private long lnCount;
+
+    /* Maximum depth of the in memory tree. */
+    private int mainTreeMaxDepth;
+
+    /* Maximum depth of the duplicate memory trees. */
+    private int duplicateTreeMaxDepth;
+
+    /* Histogram of INs by level. */
+    private long[] insByLevel;
+
+    /* Histogram of BINs by level. */
+    private long[] binsByLevel;
+
+    /* Histogram of DINs by level. */
+    private long[] dinsByLevel;
+
+    /* Histogram of DBINs by level. */
+    private long[] dbinsByLevel;
+
+    /**
+     * Returns the number of Bottom Internal Nodes in the database tree.
+     *
+     * <p>The information is included only if the {@link
+     * com.sleepycat.je.Database#getStats Database.getStats} call was not
+     * configured by the {@link com.sleepycat.je.StatsConfig#setFast
+     * StatsConfig.setFast} method.</p>
+     *
+     * @return number of Bottom Internal Nodes in the database tree.
+     */
+    public long getBottomInternalNodeCount() {
+        return binCount;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setBottomInternalNodeCount(long val) {
+        binCount = val;
+    }
+
+    /**
+     * Returns the number of Duplicate Bottom Internal Nodes in the database
+     * tree.
+     *
+     * <p>The information is included only if the {@link
+     * com.sleepycat.je.Database#getStats Database.getStats} call was not
+     * configured by the {@link com.sleepycat.je.StatsConfig#setFast
+     * StatsConfig.setFast} method.</p>
+     *
+     * @return number of Duplicate Bottom Internal Nodes in the database tree.
+     */
+    public long getDuplicateBottomInternalNodeCount() {
+        return dbinCount;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setDuplicateBottomInternalNodeCount(long val) {
+        dbinCount = val;
+    }
+
+    /**
+     * Returns the number of deleted data records in the database tree that
+     * are pending removal by the compressor.
+     *
+     * <p>The information is included only if the {@link
+     * com.sleepycat.je.Database#getStats Database.getStats} call was not
+     * configured by the {@link com.sleepycat.je.StatsConfig#setFast
+     * StatsConfig.setFast} method.</p>
+     *
+     * @return number of deleted data records in the database tree that are
+     * pending removal by the compressor.
+     */
+    public long getDeletedLeafNodeCount() {
+        return deletedLNCount;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setDeletedLeafNodeCount(long val) {
+        deletedLNCount = val;
+    }
+
+    /**
+     * Returns the number of duplicate count leaf nodes in the database tree.
+     *
+     * <p>The information is included only if the {@link
+     * com.sleepycat.je.Database#getStats Database.getStats} call was not
+     * configured by the {@link com.sleepycat.je.StatsConfig#setFast
+     * StatsConfig.setFast} method.</p>
+     *
+     * @return number of duplicate count leaf nodes in the database tree.
+     */
+    public long getDupCountLeafNodeCount() {
+        return dupCountLNCount;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setDupCountLeafNodeCount(long val) {
+        dupCountLNCount = val;
+    }
+
+    /**
+     * Returns the number of Internal Nodes in the database tree.
+     *
+     * <p>The information is included only if the {@link
+     * com.sleepycat.je.Database#getStats Database.getStats} call was not
+     * configured by the {@link com.sleepycat.je.StatsConfig#setFast
+     * StatsConfig.setFast} method.</p>
+     *
+     * @return number of Internal Nodes in the database tree.
+     */
+    public long getInternalNodeCount() {
+        return inCount;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setInternalNodeCount(long val) {
+        inCount = val;
+    }
+
+    /**
+     * Returns the number of Duplicate Internal Nodes in the database tree.
+     *
+     * <p>The information is included only if the {@link
+     * com.sleepycat.je.Database#getStats Database.getStats} call was not
+     * configured by the {@link com.sleepycat.je.StatsConfig#setFast
+     * StatsConfig.setFast} method.</p>
+     *
+     * @return number of Duplicate Internal Nodes in the database tree.
+     */
+    public long getDuplicateInternalNodeCount() {
+        return dinCount;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setDuplicateInternalNodeCount(long val) {
+        dinCount = val;
+    }
+
+    /**
+     * Returns the number of leaf nodes in the database tree, which can equal
+     * the number of records. This is calculated without locks or transactions,
+     * and therefore is only an accurate count of the current number of records
+     * when the database is quiescent.
+     *
+     * <p>The information is included only if the {@link
+     * com.sleepycat.je.Database#getStats Database.getStats} call was not
+     * configured by the {@link com.sleepycat.je.StatsConfig#setFast
+     * StatsConfig.setFast} method.</p>
+     *
+     * @return number of leaf nodes in the database tree, which can equal the
+     * number of records. This is calculated without locks or transactions, and
+     * therefore is only an accurate count of the current number of records
+     * when the database is quiescent.
+     */
+    public long getLeafNodeCount() {
+        return lnCount;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setLeafNodeCount(long val) {
+        lnCount = val;
+    }
+
+    /**
+     * Returns the maximum depth of the main database tree.
+     *
+     * <p>The information is included only if the {@link
+     * com.sleepycat.je.Database#getStats Database.getStats} call was not
+     * configured by the {@link com.sleepycat.je.StatsConfig#setFast
+     * StatsConfig.setFast} method.</p>
+     *
+     * @return maximum depth of the main database tree.
+     */
+    public int getMainTreeMaxDepth() {
+        return mainTreeMaxDepth;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setMainTreeMaxDepth(int val) {
+        mainTreeMaxDepth = val;
+    }
+
+    /**
+     * Returns the maximum depth of the duplicate database trees.
+     *
+     * <p>The information is included only if the {@link
+     * com.sleepycat.je.Database#getStats Database.getStats} call was not
+     * configured by the {@link com.sleepycat.je.StatsConfig#setFast
+     * StatsConfig.setFast} method.</p>
+     *
+     * @return maximum depth of the duplicate database trees.
+     */
+    public int getDuplicateTreeMaxDepth() {
+        return duplicateTreeMaxDepth;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setDuplicateTreeMaxDepth(int val) {
+        duplicateTreeMaxDepth = val;
+    }
+
+    /**
+     * Returns the count of Internal Nodes per level, indexed by level.
+     *
+     * <p>The information is included only if the {@link
+     * com.sleepycat.je.Database#getStats Database.getStats} call was not
+     * configured by the {@link com.sleepycat.je.StatsConfig#setFast
+     * StatsConfig.setFast} method.</p>
+     *
+     * @return count of Internal Nodes per level, indexed by level.
+     */
+    public long[] getINsByLevel() {
+        return insByLevel;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setINsByLevel(long[] insByLevel) {
+	this.insByLevel = insByLevel;
+    }
+
+    /**
+     * Returns the count of Bottom Internal Nodes per level, indexed by level.
+     *
+     * <p>The information is included only if the {@link
+     * com.sleepycat.je.Database#getStats Database.getStats} call was not
+     * configured by the {@link com.sleepycat.je.StatsConfig#setFast
+     * StatsConfig.setFast} method.</p>
+     *
+     * @return count of Bottom Internal Nodes per level, indexed by level.
+     */
+    public long[] getBINsByLevel() {
+        return binsByLevel;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setBINsByLevel(long[] binsByLevel) {
+	this.binsByLevel = binsByLevel;
+    }
+
+    /**
+     * Returns the count of Duplicate Internal Nodes per level, indexed by
+     * level.
+     *
+     * <p>The information is included only if the {@link
+     * com.sleepycat.je.Database#getStats Database.getStats} call was not
+     * configured by the {@link com.sleepycat.je.StatsConfig#setFast
+     * StatsConfig.setFast} method.</p>
+     *
+     * @return count of Duplicate Internal Nodes per level, indexed by level.
+     */
+    public long[] getDINsByLevel() {
+        return dinsByLevel;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setDINsByLevel(long[] dinsByLevel) {
+	this.dinsByLevel = dinsByLevel;
+    }
+
+    /**
+     * Returns the count of Duplicate Bottom Internal Nodes per level, indexed
+     * by level.
+     *
+     * <p>The information is included only if the {@link
+     * com.sleepycat.je.Database#getStats Database.getStats} call was not
+     * configured by the {@link com.sleepycat.je.StatsConfig#setFast
+     * StatsConfig.setFast} method.</p>
+     *
+     * @return count of Duplicate Bottom Internal Nodes per level, indexed by
+     * level.
+     */
+    public long[] getDBINsByLevel() {
+        return dbinsByLevel;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setDBINsByLevel(long[] dbinsByLevel) {
+	this.dbinsByLevel = dbinsByLevel;
+    }
+
+    private void arrayToString(long[] arr, StringBuffer sb) {
+	for (int i = 0; i < arr.length; i++) {
+	    long count = arr[i];
+	    if (count > 0) {
+		sb.append("  level ").append(i);
+		sb.append(": count=").append(count).append("\n");
+	    }
+	}
+    }
+
+    /**
+     * For convenience, the BtreeStats class has a toString method that lists
+     * all the data fields.
+     */
+    @Override
+    public String toString() {
+	StringBuffer sb = new StringBuffer();
+	if (binCount > 0) {
+	    sb.append("numBottomInternalNodes=");
+	    sb.append(binCount).append("\n");
+	    arrayToString(binsByLevel, sb);
+	}
+	if (inCount > 0) {
+	    sb.append("numInternalNodes=");
+	    sb.append(inCount).append("\n");
+	    arrayToString(insByLevel, sb);
+	}
+	if (dinCount > 0) {
+	    sb.append("numDuplicateInternalNodes=");
+	    sb.append(dinCount).append("\n");
+	    arrayToString(dinsByLevel, sb);
+	}
+	if (dbinCount > 0) {
+	    sb.append("numDuplicateBottomInternalNodes=");
+	    sb.append(dbinCount).append("\n");
+	    arrayToString(dbinsByLevel, sb);
+	}
+	sb.append("numLeafNodes=").append(lnCount).append("\n");
+	sb.append("numDeletedLeafNodes=").
+	    append(deletedLNCount).append("\n");
+	sb.append("numDuplicateCountLeafNodes=").
+	    append(dupCountLNCount).append("\n");
+	sb.append("mainTreeMaxDepth=").
+	    append(mainTreeMaxDepth).append("\n");
+	sb.append("duplicateTreeMaxDepth=").
+	    append(duplicateTreeMaxDepth).append("\n");
+
+	return sb.toString();
+    }
+}
diff --git a/src/com/sleepycat/je/CacheMode.java b/src/com/sleepycat/je/CacheMode.java
new file mode 100644
index 0000000000000000000000000000000000000000..4ceb994f9fbb5995f4f5e044bbaf710cdf4303aa
--- /dev/null
+++ b/src/com/sleepycat/je/CacheMode.java
@@ -0,0 +1,115 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: CacheMode.java,v 1.2.2.4 2010/03/26 13:23:54 mark Exp $
+ */
+package com.sleepycat.je;
+
+/**
+ * Modes that can be specified for control over caching of records in the JE
+ * in-memory cache.  When a record is stored or retrieved, the cache mode
+ * determines how long the record is subsequently retained in the JE in-memory
+ * cache, relative to other records in the cache.
+ *
+ * <p>When the cache overflows, JE must evict some records from the cache.  By
+ * default, JE uses a Least Recently Used (LRU) algorithm for determining which
+ * records to evict.  With the LRU algorithm, JE makes a best effort to evict
+ * the "coldest" (least recently used or accessed) records and to retain the
+ * "hottest" records in the cache for as long as possible.</p>
+ *
+ * <p>A non-default cache mode may be explicitly specified to override the
+ * default behavior of the LRU algorithm.  See {@link #KEEP_HOT} and {@link
+ * #UNCHANGED} for more information.  When no cache mode is explicitly
+ * specified, the default cache mode is {@link #DEFAULT}.  The default mode
+ * causes the normal LRU algorithm to be used.</p>
+ *
+ * <p>Note that JE makes a best effort to implement an approximation of an LRU
+ * algorithm, and the very coldest record is not always evicted from the cache
+ * first.  In addition, hotness and coldness are applied to the portion of the
+ * in-memory BTree that is accessed to perform the operation, not just to the
+ * record itself.</p>
+ *
+ * <p>The cache mode for cursor operations can be specified by calling {@link
+ * Cursor#setCacheMode Cursor.setCacheMode} after opening a {@link Cursor}.
+ * The cache mode applies to all operations subsequently performed with that
+ * cursor until the cursor is closed or its cache mode is changed.  The cache
+ * mode for {@link Database} methods may not be specified and the default cache
+ * mode is always used.  To override the default cache mode, you must open a
+ * Cursor.</p>
+ */
+public enum CacheMode {
+
+    /**
+     * The record's hotness is changed to "most recently used" by the operation
+     * where this cache mode is specified.
+     *
+     * <p>The record will be colder then other records accessed with a {@code
+     * KEEP_HOT} cache mode.  Otherwise, the record will be hotter than
+     * other records accessed before it and colder then other records accessed
+     * after it.</p>
+     *
+     * <p>This cache mode is used when the application does not need explicit
+     * control over the cache and a standard LRU implementation is
+     * sufficient.</p>
+     */
+    DEFAULT,
+
+    /**
+     * The record's hotness or coldness is unchanged by the operation where
+     * this cache mode is specified.
+     *
+     * <p>If the record was present in the cache prior to this operation, then
+     * its pre-existing hotness or coldness will not be changed.  If the record
+     * was added to the cache by this operation, it will have "maximum
+     * coldness" and will therefore be colder than other records.</p>
+     *
+     * <p>This cache mode is normally used when the application does not intend
+     * to access this record again soon.</p>
+     */
+    UNCHANGED,
+
+    /**
+     * The record is assigned "maximum hotness" by the operation where this
+     * cache mode is specified.
+     *
+     * <p>The record will have the same hotness as other records accessed with
+     * this cache mode.  Its relative hotness will not be reduced over time as
+     * other records are accessed.  It can only become colder over time if it
+     * is subsequently accessed with the {@code DEFAULT} or {@code MAKE_COLD}
+     * cache mode.</p>
+     *
+     * <p>This cache mode is normally used when the application intends to
+     * access this record again soon.</p>
+     */
+    KEEP_HOT,
+
+    /**
+     * The record is assigned "maximum coldness" by the operation where this
+     * cache mode is specified.
+     *
+     * <p>The record will have the same hotness as other records accessed with
+     * this cache mode.  It is very likely that this record will be evicted
+     * from the cache if space is needed.  It can only become warmer over time
+     * if it is subsequently accessed with the {@code DEFAULT} or
+     * {@code KEEP_HOT} cache mode.</p>
+     *
+     * <p>This cache mode is normally used when the application does not intend
+     * to access this record again soon.</p>
+     */
+    MAKE_COLD,
+
+    /**
+     * The record (leaf node) is evicted as soon as possible after the
+     * operation where this cache mode is specified.  If the record cannot be
+     * evicted immediately, it is assigned "maximum coldness" as if {@code
+     * MAKE_COLD} were used.
+     *
+     * <p>This cache mode is normally used when the application prefers to read
+     * the record from the log file when it is accessed again, rather than have
+     * it take up space in the JE cache and potentially cause expensive Java
+     * GC.</p>
+     */
+    EVICT_LN
+}
diff --git a/src/com/sleepycat/je/CheckpointConfig.java b/src/com/sleepycat/je/CheckpointConfig.java
new file mode 100644
index 0000000000000000000000000000000000000000..549a3890a72b9145e08d1c07c2fd43440c22b5ed
--- /dev/null
+++ b/src/com/sleepycat/je/CheckpointConfig.java
@@ -0,0 +1,138 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: CheckpointConfig.java,v 1.20.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+/**
+ * Specifies the attributes of a checkpoint operation invoked from {@link
+ * com.sleepycat.je.Environment#checkpoint Environment.checkpoint}.
+ */
+public class CheckpointConfig  {
+
+    /**
+     * Default configuration used if null is passed to {@link
+     * com.sleepycat.je.Environment#checkpoint Environment.checkpoint}.
+     */
+    public static final CheckpointConfig DEFAULT = new CheckpointConfig();
+
+    private boolean force = false;
+    private int kbytes = 0;
+    private int minutes = 0;
+    private boolean minimizeRecoveryTime = false;
+
+    /**
+     * An instance created using the default constructor is initialized with
+     * the system's default settings.
+     */
+    public CheckpointConfig() {
+    }
+
+    /**
+     * Configures the checkpoint log data threshold, in kilobytes.
+     *
+     * <p>The default is 0 for this class and the database environment.</p>
+     *
+     * @param kbytes If the kbytes parameter is non-zero, a checkpoint will
+     * be performed if more than kbytes of log data have been written since
+     * the last checkpoint.
+     */
+    public void setKBytes(int kbytes) {
+        this.kbytes = kbytes;
+    }
+
+    /**
+     * Returns the checkpoint log data threshold, in kilobytes.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @return The checkpoint log data threshold, in kilobytes.
+     */
+    public int getKBytes() {
+        return kbytes;
+    }
+
+    /**
+     * Configures the checkpoint time threshold, in minutes.
+     *
+     * <p>The default is 0 for this class and the database environment.</p>
+     *
+     * @param minutes If the minutes parameter is non-zero, a checkpoint is
+     * performed if more than min minutes have passed since the last
+     * checkpoint.
+     */
+    public void setMinutes(int minutes) {
+        this.minutes = minutes;
+    }
+
+    /**
+     * Returns the checkpoint time threshold, in minutes.
+     *
+     * @return The checkpoint time threshold, in minutes.
+     */
+    public int getMinutes() {
+        return minutes;
+    }
+
+    /**
+     * Configures the checkpoint force option.
+     *
+     * <p>The default is false for this class and the BDB JE environment.</p>
+     *
+     * @param force If set to true, force a checkpoint, even if there has
+     * been no activity since the last checkpoint.
+     */
+    public void setForce(boolean force) {
+        this.force = force;
+    }
+
+    /**
+     * Returns the configuration of the checkpoint force option.
+     *
+     * @return The configuration of the checkpoint force option.
+     */
+    public boolean getForce() {
+        return force;
+    }
+
+    /**
+     * Configures the minimize recovery time option.
+     *
+     * <p>The default is false for this class and the BDB JE environment.</p>
+     *
+     * @param minimizeRecoveryTime If set to true, the checkpoint will itself
+     * take longer but will cause a subsequent recovery (Environment.open) to
+     * finish more quickly.
+     */
+    public void setMinimizeRecoveryTime(boolean minimizeRecoveryTime) {
+        this.minimizeRecoveryTime = minimizeRecoveryTime;
+    }
+
+    /**
+     * Returns the configuration of the minimize recovery time option.
+     *
+     * @return The configuration of the minimize recovery time option.
+     */
+    public boolean getMinimizeRecoveryTime() {
+        return minimizeRecoveryTime;
+    }
+
+    /**
+     * Returns the values for each configuration attribute.
+     *
+     * @return the values for each configuration attribute.
+     */
+    @Override
+    public String toString() {
+        return "minutes=" + minutes +
+            "\nkBytes=" + kbytes +
+            "\nforce=" + force +
+            "\nminimizeRecoveryTime=" + minimizeRecoveryTime +
+            "\n";
+    }
+}
diff --git a/src/com/sleepycat/je/Cursor.java b/src/com/sleepycat/je/Cursor.java
new file mode 100644
index 0000000000000000000000000000000000000000..b8893972718e0d176e3eed71d7c8c45a0114af1c
--- /dev/null
+++ b/src/com/sleepycat/je/Cursor.java
@@ -0,0 +1,2565 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Cursor.java,v 1.216.2.5 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import com.sleepycat.je.dbi.CursorImpl;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.GetMode;
+import com.sleepycat.je.dbi.PutMode;
+import com.sleepycat.je.dbi.RangeRestartException;
+import com.sleepycat.je.dbi.CursorImpl.KeyChangeStatus;
+import com.sleepycat.je.dbi.CursorImpl.SearchMode;
+import com.sleepycat.je.latch.LatchSupport;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.tree.DBIN;
+import com.sleepycat.je.tree.Key;
+import com.sleepycat.je.tree.LN;
+import com.sleepycat.je.tree.Node;
+import com.sleepycat.je.txn.BuddyLocker;
+import com.sleepycat.je.txn.LockType;
+import com.sleepycat.je.txn.Locker;
+import com.sleepycat.je.txn.LockerFactory;
+import com.sleepycat.je.utilint.DatabaseUtil;
+import com.sleepycat.je.utilint.InternalException;
+
+/**
+ * A database cursor. Cursors are used for operating on collections of records,
+ * for iterating over a database, and for saving handles to individual records,
+ * so that they can be modified after they have been read.
+ *
+ * <p>Cursors which are opened with a transaction instance are transactional
+ * cursors and may be used by multiple threads, but only serially.  That is,
+ * the application must serialize access to the handle. Non-transactional
+ * cursors, opened with a null transaction instance, may not be used by
+ * multiple threads.</p>
+ *
+ * <p>If the cursor is to be used to perform operations on behalf of a
+ * transaction, the cursor must be opened and closed within the context of that
+ * single transaction.</p>
+ *
+ * <p>Once the cursor close method has been called, the handle may not be
+ * accessed again, regardless of the close method's success or failure.</p>
+ *
+ * <p>To obtain a cursor with default attributes:</p>
+ *
+ * <blockquote><pre>
+ *     Cursor cursor = myDatabase.openCursor(txn, null);
+ * </pre></blockquote>
+ *
+ * <p>To customize the attributes of a cursor, use a CursorConfig object.</p>
+ *
+ * <blockquote><pre>
+ *     CursorConfig config = new CursorConfig();
+ *     config.setDirtyRead(true);
+ *     Cursor cursor = myDatabase.openCursor(txn, config);
+ * </pre></blockquote>
+ *
+ * Modifications to the database during a sequential scan will be reflected in
+ * the scan; that is, records inserted behind a cursor will not be returned
+ * while records inserted in front of a cursor will be returned.
+ */
+public class Cursor {
+
+    /**
+     * The underlying cursor.
+     */
+    CursorImpl cursorImpl; // Used by subclasses.
+
+    /**
+     * The CursorConfig used to configure this cursor.
+     */
+    CursorConfig config;
+
+    /**
+     * True if update operations are prohibited through this cursor.  Update
+     * operations are prohibited if the database is read-only or:
+     *
+     * (1) The database is transactional,
+     *
+     * and
+     *
+     * (2) The user did not supply a txn to the cursor ctor (meaning, the
+     * locker is non-transactional).
+     */
+    private boolean updateOperationsProhibited;
+
+    /**
+     * Handle under which this cursor was created; may be null.
+     */
+    private Database dbHandle;
+
+    /**
+     * Database implementation.
+     */
+    private DatabaseImpl dbImpl;
+
+    /* Attributes */
+    private boolean readUncommittedDefault;
+    private boolean serializableIsolationDefault;
+
+    private Logger logger;
+
+    /**
+     * Creates a cursor for a given user transaction with
+     * retainNonTxnLocks=false.
+     *
+     * <p>If txn is null, a non-transactional cursor will be created that
+     * releases locks for the prior operation when the next operation
+     * suceeds.</p>
+     */
+    Cursor(Database dbHandle, Transaction txn, CursorConfig cursorConfig)
+        throws DatabaseException {
+
+        if (cursorConfig == null) {
+            cursorConfig = CursorConfig.DEFAULT;
+        }
+
+        Locker locker = LockerFactory.getReadableLocker
+            (dbHandle.getEnvironment(),
+             txn,
+             dbHandle.isTransactional(),
+             false /*retainNonTxnLocks*/,
+             cursorConfig.getReadCommitted());
+
+        init(dbHandle, locker, cursorConfig, false /*retainNonTxnLocks*/);
+    }
+
+    /**
+     * Creates a cursor for a given locker with retainNonTxnLocks=false.
+     *
+     * <p>If locker is null or is non-transactional, a non-transactional cursor
+     * will be created that releases locks for the prior operation when the
+     * next operation suceeds.</p>
+     */
+    Cursor(Database dbHandle, Locker locker, CursorConfig cursorConfig)
+        throws DatabaseException {
+
+        if (cursorConfig == null) {
+            cursorConfig = CursorConfig.DEFAULT;
+        }
+
+        locker = LockerFactory.getReadableLocker
+            (dbHandle.getEnvironment(),
+             dbHandle,
+             locker,
+             false /*retainNonTxnLocks*/,
+             cursorConfig.getReadCommitted());
+
+        init(dbHandle, locker, cursorConfig, false /*retainNonTxnLocks*/);
+    }
+
+    /**
+     * Creates a cursor for a given locker and retainNonTxnLocks parameter.
+     *
+     * <p>The locker parameter must be non-null.  With this constructor, we use
+     * the given locker and retainNonTxnLocks parameter without applying any
+     * special rules for different lockers -- the caller must supply the
+     * correct locker and retainNonTxnLocks combination.</p>
+     */
+    Cursor(Database dbHandle,
+           Locker locker,
+           CursorConfig cursorConfig,
+           boolean retainNonTxnLocks)
+        throws DatabaseException {
+
+        if (cursorConfig == null) {
+            cursorConfig = CursorConfig.DEFAULT;
+        }
+
+        init(dbHandle, locker, cursorConfig, retainNonTxnLocks);
+    }
+
+    private void init(Database dbHandle,
+                      Locker locker,
+                      CursorConfig cursorConfig,
+                      boolean retainNonTxnLocks)
+        throws DatabaseException {
+
+        assert locker != null;
+
+        DatabaseImpl databaseImpl = dbHandle.getDatabaseImpl();
+        cursorImpl = new CursorImpl(databaseImpl, locker, retainNonTxnLocks);
+
+        /* Perform eviction for user cursors. */
+        cursorImpl.setAllowEviction(true);
+
+        readUncommittedDefault =
+            cursorConfig.getReadUncommitted() ||
+            locker.isReadUncommittedDefault();
+
+        serializableIsolationDefault =
+            cursorImpl.getLocker().isSerializableIsolation();
+
+        updateOperationsProhibited =
+            (databaseImpl.isTransactional() && !locker.isTransactional()) ||
+            !dbHandle.isWritable();
+
+        this.dbImpl = databaseImpl;
+        this.dbHandle = dbHandle;
+        dbHandle.addCursor(this);
+        this.config = cursorConfig;
+        this.logger = databaseImpl.getDbEnvironment().getLogger();
+    }
+
+    /**
+     * Copy constructor.
+     */
+    Cursor(Cursor cursor, boolean samePosition)
+        throws DatabaseException {
+
+        readUncommittedDefault = cursor.readUncommittedDefault;
+        serializableIsolationDefault = cursor.serializableIsolationDefault;
+        updateOperationsProhibited = cursor.updateOperationsProhibited;
+
+        cursorImpl = cursor.cursorImpl.dup(samePosition);
+        dbImpl = cursor.dbImpl;
+        dbHandle = cursor.dbHandle;
+        if (dbHandle != null) {
+            dbHandle.addCursor(this);
+        }
+        config = cursor.config;
+        logger = dbImpl.getDbEnvironment().getLogger();
+    }
+
+    /**
+     * Internal entrypoint.
+     */
+    CursorImpl getCursorImpl() {
+        return cursorImpl;
+    }
+
+    /**
+     * Returns the Database handle associated with this Cursor.
+     *
+     * @return The Database handle associated with this Cursor.
+     */
+    public Database getDatabase() {
+        return dbHandle;
+    }
+
+    /**
+     * Always returns non-null, while getDatabase() returns null if no handle
+     * is associated with this cursor.
+     */
+    DatabaseImpl getDatabaseImpl() {
+        return dbImpl;
+    }
+
+    /**
+     * Returns this cursor's configuration.
+     *
+     * <p>This may differ from the configuration used to open this object if
+     * the cursor existed previously.</p>
+     *
+     * @return This cursor's configuration.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public CursorConfig getConfig() {
+        try {
+            return config.cloneConfig();
+        } catch (Error E) {
+            dbImpl.getDbEnvironment().invalidate(E);
+            throw E;
+        }
+    }
+
+    /**
+     * Returns the {@code CacheMode} used for operations performed using this
+     * cursor.  For a newly opened cursor, the default is {@link
+     * CacheMode#DEFAULT}.
+     *
+     * @see CacheMode
+     * @return the CacheMode object used for operations performed with this
+     * cursor.
+     */
+    public CacheMode getCacheMode() {
+        return cursorImpl.getCacheMode();
+    }
+
+    /**
+     * Changes the {@code CacheMode} used for operations performed using this
+     * cursor.  For a newly opened cursor, the default is {@link
+     * CacheMode#DEFAULT}.
+     *
+     * @param cacheMode is the {@code CacheMode} to use for subsequent
+     * operations using this cursor.
+     *
+     * @see CacheMode
+     */
+    public void setCacheMode(CacheMode cacheMode) {
+        assert cursorImpl != null;
+        cursorImpl.setCacheMode(cacheMode);
+    }
+
+    void setNonCloning(boolean nonCloning) {
+        cursorImpl.setNonCloning(nonCloning);
+    }
+
+    /**
+     * Discards the cursor.
+     *
+     * <p>This method may throw a DeadlockException, signaling the enclosing
+     * transaction should be aborted.  If the application is already intending
+     * to abort the transaction, this error can be ignored, and the application
+     * should proceed.</p>
+     *
+     * <p>The cursor handle may not be used again after this method has been
+     * called, regardless of the method's success or failure.</p>
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public void close()
+        throws DatabaseException {
+
+	try {
+	    checkState(false);
+	    cursorImpl.close();
+	    if (dbHandle != null) {
+		dbHandle.removeCursor(this);
+	    }
+	} catch (Error E) {
+	    dbImpl.getDbEnvironment().invalidate(E);
+	    throw E;
+	}
+    }
+
+    /**
+     * Returns a count of the number of data items for the key to which the
+     * cursor refers.
+     *
+     * @return A count of the number of data items for the key to which the
+     * cursor refers.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public int count()
+        throws DatabaseException {
+
+        checkState(true);
+        trace(Level.FINEST, "Cursor.count: ", null);
+
+        /*
+         * Specify a null LockMode to use default locking.  The API doesn't
+         * allow specifying a lock mode, but we should at least honor the
+         * configured default.
+         */
+        return countInternal(null);
+    }
+
+    /**
+     * Returns a new cursor with the same transaction and locker ID as the
+     * original cursor.
+     *
+     * <p>This is useful when an application is using locking and requires
+     * two or more cursors in the same thread of control.</p>
+     *
+     * @param samePosition If true, the newly created cursor is initialized
+     * to refer to the same position in the database as the original cursor
+     * (if any) and hold the same locks (if any). If false, or the original
+     * cursor does not hold a database position and locks, the returned
+     * cursor is uninitialized and will behave like a newly created cursor.
+     *
+     * @return A new cursor with the same transaction and locker ID as the
+     * original cursor.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public Cursor dup(boolean samePosition)
+        throws DatabaseException {
+
+        try {
+            checkState(false);
+            return new Cursor(this, samePosition);
+        } catch (Error E) {
+            dbImpl.getDbEnvironment().invalidate(E);
+            throw E;
+        }
+    }
+
+    /**
+     * Deletes the key/data pair to which the cursor refers.
+     *
+     * <p>When called on a cursor opened on a database that has been made into
+     * a secondary index, this method the key/data pair from the primary
+     * database and all secondary indices.</p>
+     *
+     * <p>The cursor position is unchanged after a delete, and subsequent calls
+     * to cursor functions expecting the cursor to refer to an existing key
+     * will fail.</p>
+     *
+     * @return an OperationStatus for the operation.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public OperationStatus delete()
+        throws DatabaseException {
+
+        checkState(true);
+        checkUpdatesAllowed("delete");
+        trace(Level.FINEST, "Cursor.delete: ", null);
+
+        return deleteInternal();
+    }
+
+    /**
+     * Stores a key/data pair into the database.
+     *
+     * <p>If the put method succeeds, the cursor is always positioned to refer
+     * to the newly inserted item.  If the put method fails for any reason, the
+     * state of the cursor will be unchanged.</p>
+     *
+     * <p>If the key already appears in the database and duplicates are
+     * supported, the new data value is inserted at the correct sorted
+     * location.  If the key already appears in the database and duplicates are
+     * not supported, the data associated with the key will be replaced.</p>
+     *
+     * @param key the key {@link com.sleepycat.je.DatabaseEntry
+     * DatabaseEntry} operated on.
+     *
+     * @param data the data {@link com.sleepycat.je.DatabaseEntry
+     * DatabaseEntry} stored.
+     *
+     * @return an OperationStatus for the operation.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public OperationStatus put(DatabaseEntry key, DatabaseEntry data)
+        throws DatabaseException {
+
+        checkState(false);
+        DatabaseUtil.checkForNullDbt(key, "key", true);
+        DatabaseUtil.checkForNullDbt(data, "data", true);
+        DatabaseUtil.checkForPartialKey(key);
+        checkUpdatesAllowed("put");
+        trace(Level.FINEST, "Cursor.put: ", key, data, null);
+
+        return putInternal(key, data, PutMode.OVERWRITE);
+    }
+
+    /**
+     * Stores a key/data pair into the database.
+     *
+     * <p>If the putNoOverwrite method succeeds, the cursor is always
+     * positioned to refer to the newly inserted item.  If the putNoOverwrite
+     * method fails for any reason, the state of the cursor will be
+     * unchanged.</p>
+     *
+     * <p>If the key already appears in the database, putNoOverwrite will
+     * return {@link com.sleepycat.je.OperationStatus#KEYEXIST
+     * OperationStatus.KEYEXIST}.</p>
+     *
+     * @param key the key {@link com.sleepycat.je.DatabaseEntry
+     * DatabaseEntry} operated on.
+     *
+     * @param data the data {@link com.sleepycat.je.DatabaseEntry
+     * DatabaseEntry} stored.
+     *
+     * @return an OperationStatus for the operation.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public OperationStatus putNoOverwrite(DatabaseEntry key,
+                                          DatabaseEntry data)
+        throws DatabaseException {
+
+        checkState(false);
+        DatabaseUtil.checkForNullDbt(key, "key", true);
+        DatabaseUtil.checkForNullDbt(data, "data", true);
+        DatabaseUtil.checkForPartialKey(key);
+        checkUpdatesAllowed("putNoOverwrite");
+        trace(Level.FINEST, "Cursor.putNoOverwrite: ", key, data, null);
+
+        return putInternal(key, data, PutMode.NOOVERWRITE);
+    }
+
+    /**
+     * Stores a key/data pair into the database.
+     *
+     * <p>If the putNoDupData method succeeds, the cursor is always positioned
+     * to refer to the newly inserted item.  If the putNoDupData method fails
+     * for any reason, the state of the cursor will be unchanged.</p>
+     *
+     * <p>Insert the specified key/data pair into the database, unless a
+     * key/data pair comparing equally to it already exists in the database.
+     * If a matching key/data pair already exists in the database, {@link
+     * com.sleepycat.je.OperationStatus#KEYEXIST OperationStatus.KEYEXIST} is
+     * returned.</p>
+     *
+     * @param key the key {@link com.sleepycat.je.DatabaseEntry DatabaseEntry}
+     * operated on.
+     *
+     * @param data the data {@link com.sleepycat.je.DatabaseEntry
+     * DatabaseEntry} stored.
+     *
+     * @return an OperationStatus for the operation.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public OperationStatus putNoDupData(DatabaseEntry key, DatabaseEntry data)
+        throws DatabaseException {
+
+        checkState(false);
+        DatabaseUtil.checkForNullDbt(key, "key", true);
+        DatabaseUtil.checkForNullDbt(data, "data", true);
+        DatabaseUtil.checkForPartialKey(key);
+        checkUpdatesAllowed("putNoDupData");
+        trace(Level.FINEST, "Cursor.putNoDupData: ", key, data, null);
+
+        return putInternal(key, data, PutMode.NODUP);
+    }
+
+    /**
+     * Replaces the data in the key/data pair at the current cursor position.
+     *
+     * <p>Whether the putCurrent method succeeds or fails for any reason, the
+     * state of the cursor will be unchanged.</p>
+     *
+     * <p>Overwrite the data of the key/data pair to which the cursor refers
+     * with the specified data item. This method will return
+     * OperationStatus.NOTFOUND if the cursor currently refers to an
+     * already-deleted key/data pair.</p>
+     *
+     * <p>For a database that does not support duplicates, the data may be
+     * changed by this method.  If duplicates are supported, the data may be
+     * changed only if a custom partial comparator is configured and the
+     * comparator considers the old and new data to be equal (that is, the
+     * comparator returns zero).  For more information on partial comparators
+     * see {@link DatabaseConfig#setDuplicateComparator}.</p>
+     *
+     * <p>If the old and new data are unequal according to the comparator, a
+     * {@code DatabaseException} is thrown.  Changing the data in this case
+     * would change the sort order of the record, which would change the cursor
+     * position, and this is not allowed.  To change the sort order of a
+     * record, delete it and then re-insert it.</p>
+     *
+     * @param data - the data DatabaseEntry stored.
+     *
+     * @return an OperationStatus for the operation.
+     *
+     * @throws DeadlockException - if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException - if an invalid parameter was
+     * specified.
+     *
+     * @throws DatabaseException - if the old and new data are not equal
+     * according to the configured duplicate comparator or default comparator,
+     * or if a failure occurs.
+     */
+    public OperationStatus putCurrent(DatabaseEntry data)
+        throws DatabaseException {
+
+        checkState(true);
+        DatabaseUtil.checkForNullDbt(data, "data", true);
+        checkUpdatesAllowed("putCurrent");
+        trace(Level.FINEST, "Cursor.putCurrent: ", null, data, null);
+
+        return putInternal(null, data, PutMode.CURRENT);
+    }
+
+    /**
+     * Returns the key/data pair to which the cursor refers.
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or
+     * does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the key returned as output.  Its byte array does not need to
+     * be initialized by the caller.
+     *
+     * @param data the data returned as output.  Its byte array does not need
+     * to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#KEYEMPTY
+     * OperationStatus.KEYEMPTY} if the key/pair at the cursor position has
+     * been deleted; otherwise, {@link
+     * com.sleepycat.je.OperationStatus#SUCCESS OperationStatus.SUCCESS}.
+     */
+    public OperationStatus getCurrent(DatabaseEntry key,
+                                      DatabaseEntry data,
+                                      LockMode lockMode)
+        throws DatabaseException {
+
+        try {
+            checkState(true);
+            checkArgsNoValRequired(key, data);
+            trace(Level.FINEST, "Cursor.getCurrent: ", lockMode);
+
+            return getCurrentInternal(key, data, lockMode);
+        } catch (Error E) {
+            dbImpl.getDbEnvironment().invalidate(E);
+            throw E;
+        }
+    }
+
+    /**
+     * Moves the cursor to the first key/data pair of the database, and returns
+     * that pair.  If the first key has duplicate values, the first data item
+     * in the set of duplicates is returned.
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the key returned as output.  Its byte array does not need to
+     * be initialized by the caller.
+     *
+     * @param data the data returned as output.  Its byte array does not need
+     * to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    public OperationStatus getFirst(DatabaseEntry key,
+                                    DatabaseEntry data,
+                                    LockMode lockMode)
+        throws DatabaseException {
+
+        checkState(false);
+        checkArgsNoValRequired(key, data);
+        trace(Level.FINEST, "Cursor.getFirst: ",lockMode);
+
+        return position(key, data, lockMode, true);
+    }
+
+    /**
+     * Moves the cursor to the last key/data pair of the database, and returns
+     * that pair.  If the last key has duplicate values, the last data item in
+     * the set of duplicates is returned.
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the key returned as output.  Its byte array does not need to
+     * be initialized by the caller.
+     *
+     * @param data the data returned as output.  Its byte array does not need
+     * to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    public OperationStatus getLast(DatabaseEntry key,
+                                   DatabaseEntry data,
+                                   LockMode lockMode)
+        throws DatabaseException {
+
+        checkState(false);
+        checkArgsNoValRequired(key, data);
+        trace(Level.FINEST, "Cursor.getLast: ", lockMode);
+
+        return position(key, data, lockMode, false);
+    }
+
+    /**
+     * Moves the cursor to the next key/data pair and returns that pair.
+     *
+     * <p>If the cursor is not yet initialized, move the cursor to the first
+     * key/data pair of the database, and return that pair.  Otherwise, the
+     * cursor is moved to the next key/data pair of the database, and that pair
+     * is returned.  In the presence of duplicate key values, the value of the
+     * key may not change.</p>
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the key returned as output.  Its byte array does not need to
+     * be initialized by the caller.
+     *
+     * @param data the data returned as output.  Its byte array does not need
+     * to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    public OperationStatus getNext(DatabaseEntry key,
+                                   DatabaseEntry data,
+                                   LockMode lockMode)
+        throws DatabaseException {
+
+        checkState(false);
+        checkArgsNoValRequired(key, data);
+        trace(Level.FINEST, "Cursor.getNext: ", lockMode);
+
+        if (cursorImpl.isNotInitialized()) {
+            return position(key, data, lockMode, true);
+        } else {
+            return retrieveNext(key, data, lockMode, GetMode.NEXT);
+        }
+    }
+
+    /**
+     * If the next key/data pair of the database is a duplicate data record for
+     * the current key/data pair, moves the cursor to the next key/data pair of
+     * the database and returns that pair.
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the key returned as output.  Its byte array does not need to
+     * be initialized by the caller.
+     *
+     * @param data the data returned as output.  Its byte array does not need
+     * to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    public OperationStatus getNextDup(DatabaseEntry key,
+                                      DatabaseEntry data,
+                                      LockMode lockMode)
+        throws DatabaseException {
+
+        checkState(true);
+        checkArgsNoValRequired(key, data);
+        trace(Level.FINEST, "Cursor.getNextDup: ", lockMode);
+
+        return retrieveNext(key, data, lockMode, GetMode.NEXT_DUP);
+    }
+
+    /**
+     * Moves the cursor to the next non-duplicate key/data pair and returns
+     * that pair.  If the matching key has duplicate values, the first data
+     * item in the set of duplicates is returned.
+     *
+     * <p>If the cursor is not yet initialized, move the cursor to the first
+     * key/data pair of the database, and return that pair.  Otherwise, the
+     * cursor is moved to the next non-duplicate key of the database, and that
+     * key/data pair is returned.</p>
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the key returned as output.  Its byte array does not need to
+     * be initialized by the caller.
+     *
+     * @param data the data returned as output.  Its byte array does not need
+     * to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    public OperationStatus getNextNoDup(DatabaseEntry key,
+                                        DatabaseEntry data,
+                                        LockMode lockMode)
+        throws DatabaseException {
+
+        checkState(false);
+        checkArgsNoValRequired(key, data);
+        trace(Level.FINEST, "Cursor.getNextNoDup: ", lockMode);
+
+        if (cursorImpl.isNotInitialized()) {
+            return position(key, data, lockMode, true);
+        } else {
+            return retrieveNext(key, data, lockMode, GetMode.NEXT_NODUP);
+        }
+    }
+
+    /**
+     * Moves the cursor to the previous key/data pair and returns that pair.
+     *
+     * <p>If the cursor is not yet initialized, move the cursor to the last
+     * key/data pair of the database, and return that pair.  Otherwise, the
+     * cursor is moved to the previous key/data pair of the database, and that
+     * pair is returned. In the presence of duplicate key values, the value of
+     * the key may not change.</p>
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the key returned as output.  Its byte array does not need to
+     * be initialized by the caller.
+     *
+     * @param data the data returned as output.  Its byte array does not need
+     * to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    public OperationStatus getPrev(DatabaseEntry key,
+                                   DatabaseEntry data,
+                                   LockMode lockMode)
+        throws DatabaseException {
+
+        checkState(false);
+        checkArgsNoValRequired(key, data);
+        trace(Level.FINEST, "Cursor.getPrev: ", lockMode);
+
+        if (cursorImpl.isNotInitialized()) {
+            return position(key, data, lockMode, false);
+        } else {
+            return retrieveNext(key, data, lockMode, GetMode.PREV);
+        }
+    }
+
+    /**
+     * If the previous key/data pair of the database is a duplicate data record
+     * for the current key/data pair, moves the cursor to the previous key/data
+     * pair of the database and returns that pair.
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the key returned as output.  Its byte array does not need to
+     * be initialized by the caller.
+     *
+     * @param data the data returned as output.  Its byte array does not need
+     * to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    public OperationStatus getPrevDup(DatabaseEntry key,
+                                      DatabaseEntry data,
+                                      LockMode lockMode)
+        throws DatabaseException {
+
+        checkState(true);
+        checkArgsNoValRequired(key, data);
+        trace(Level.FINEST, "Cursor.getPrevDup: ", lockMode);
+
+        return retrieveNext(key, data, lockMode, GetMode.PREV_DUP);
+    }
+
+    /**
+     * Moves the cursor to the previous non-duplicate key/data pair and returns
+     * that pair.  If the matching key has duplicate values, the last data item
+     * in the set of duplicates is returned.
+     *
+     * <p>If the cursor is not yet initialized, move the cursor to the last
+     * key/data pair of the database, and return that pair.  Otherwise, the
+     * cursor is moved to the previous non-duplicate key of the database, and
+     * that key/data pair is returned.</p>
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the key returned as output.  Its byte array does not need to
+     * be initialized by the caller.
+     *
+     * @param data the data returned as output.  Its byte array does not need
+     * to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    public OperationStatus getPrevNoDup(DatabaseEntry key,
+                                        DatabaseEntry data,
+                                        LockMode lockMode)
+        throws DatabaseException {
+
+        checkState(false);
+        checkArgsNoValRequired(key, data);
+        trace(Level.FINEST, "Cursor.getPrevNoDup: ", lockMode);
+
+        if (cursorImpl.isNotInitialized()) {
+            return position(key, data, lockMode, false);
+        } else {
+            return retrieveNext(key, data, lockMode, GetMode.PREV_NODUP);
+        }
+    }
+
+    /**
+     * Moves the cursor to the given key of the database, and returns the datum
+     * associated with the given key.  If the matching key has duplicate
+     * values, the first data item in the set of duplicates is returned.
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null
+     * or does not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the key used as input.  It must be initialized with a
+     * non-null byte array by the caller.
+     *
+     * @param data the data returned as output.  Its byte array does not need
+     * to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    public OperationStatus getSearchKey(DatabaseEntry key,
+                                        DatabaseEntry data,
+                                        LockMode lockMode)
+        throws DatabaseException {
+
+        checkState(false);
+        DatabaseUtil.checkForNullDbt(key, "key", true);
+        DatabaseUtil.checkForNullDbt(data, "data", false);
+        trace(Level.FINEST, "Cursor.getSearchKey: ", key, null, lockMode);
+
+        return search(key, data, lockMode, SearchMode.SET);
+    }
+
+    /**
+     * Moves the cursor to the closest matching key of the database, and
+     * returns the data item associated with the matching key.  If the matching
+     * key has duplicate values, the first data item in the set of duplicates
+     * is returned.
+     *
+     * <p>The returned key/data pair is for the smallest key greater than or
+     * equal to the specified key (as determined by the key comparison
+     * function), permitting partial key matches and range searches.</p>
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the key used as input and returned as output.  It must be
+     * initialized with a non-null byte array by the caller.
+     *
+     * @param data the data returned as output.  Its byte array does not need
+     * to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes
+     * are used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    public OperationStatus getSearchKeyRange(DatabaseEntry key,
+                                             DatabaseEntry data,
+                                             LockMode lockMode)
+        throws DatabaseException {
+
+        checkState(false);
+        DatabaseUtil.checkForNullDbt(key, "key", true);
+        DatabaseUtil.checkForNullDbt(data, "data", false);
+        trace(Level.FINEST, "Cursor.getSearchKeyRange: ", key, null, lockMode);
+
+        return search(key, data, lockMode, SearchMode.SET_RANGE);
+    }
+
+    /**
+     * Moves the cursor to the specified key/data pair, where both the key and
+     * data items must match.
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the key used as input.  It must be initialized with a
+     * non-null byte array by the caller.
+     *
+     * @param data the data used as input.  It must be initialized with a
+     * non-null byte array by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    public OperationStatus getSearchBoth(DatabaseEntry key,
+                                         DatabaseEntry data,
+                                         LockMode lockMode)
+        throws DatabaseException {
+
+        checkState(false);
+        checkArgsValRequired(key, data);
+        trace(Level.FINEST, "Cursor.getSearchBoth: ", key, data, lockMode);
+
+        return search(key, data, lockMode, SearchMode.BOTH);
+    }
+
+    /**
+     * Moves the cursor to the specified key and closest matching data item of
+     * the database.
+     *
+     * <p>In the case of any database supporting sorted duplicate sets, the
+     * returned key/data pair is for the smallest data item greater than or
+     * equal to the specified data item (as determined by the duplicate
+     * comparison function), permitting partial matches and range searches in
+     * duplicate data sets.</p>
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the key used as input and returned as output.  It must be
+     * initialized with a non-null byte array by the caller.
+     *
+     * @param data the data used as input and returned as output.  It must be
+     * initialized with a non-null byte array by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    public OperationStatus getSearchBothRange(DatabaseEntry key,
+                                              DatabaseEntry data,
+                                              LockMode lockMode)
+        throws DatabaseException {
+
+        checkState(false);
+        checkArgsValRequired(key, data);
+        trace(Level.FINEST, "Cursor.getSearchBothRange: ", key, data,
+              lockMode);
+
+        return search(key, data, lockMode, SearchMode.BOTH_RANGE);
+    }
+
+    /**
+     * Counts duplicates without parameter checking.
+     */
+    int countInternal(LockMode lockMode)
+        throws DatabaseException {
+
+        try {
+            CursorImpl original = null;
+            CursorImpl dup = null;
+
+            /*
+             * We depart from the usual beginRead/endRead sequence because
+             * count() should not retain locks unless transactions are used.
+             * Therefore we always close the dup cursor after using it.
+             */
+            try {
+                original = cursorImpl;
+                dup = original.cloneCursor(true);
+                return dup.count(getLockType(lockMode, false));
+            } finally {
+                if (dup != original &&
+                    dup != null) {
+                    dup.close();
+                }
+            }
+        } catch (Error E) {
+            dbImpl.getDbEnvironment().invalidate(E);
+            throw E;
+        }
+    }
+
+    /**
+     * Internal version of delete() that does no parameter checking.  Calls
+     * deleteNoNotify() and notifies triggers (performs secondary updates).
+     */
+    OperationStatus deleteInternal()
+        throws DatabaseException {
+
+        try {
+            /* Get existing data if updating secondaries. */
+            DatabaseEntry oldKey = null;
+            DatabaseEntry oldData = null;
+            boolean doNotifyTriggers =
+                dbHandle != null && dbHandle.hasTriggers();
+            if (doNotifyTriggers) {
+                oldKey = new DatabaseEntry();
+                oldData = new DatabaseEntry();
+                OperationStatus status = getCurrentInternal(oldKey, oldData,
+                                                            LockMode.RMW);
+                if (status != OperationStatus.SUCCESS) {
+                    return OperationStatus.KEYEMPTY;
+                }
+            }
+
+            /*
+             * Notify triggers before the actual deletion so that a primary
+             * record never exists while secondary keys refer to it.  This is
+             * relied on by secondary read-uncommitted.
+             */
+            if (doNotifyTriggers) {
+                dbHandle.notifyTriggers(cursorImpl.getLocker(),
+                                        oldKey, oldData, null);
+            }
+
+            /* The actual deletion. */
+            OperationStatus status = deleteNoNotify();
+            return status;
+        } catch (Error E) {
+            dbImpl.getDbEnvironment().invalidate(E);
+            throw E;
+        }
+    }
+
+    /**
+     * Clones the cursor, delete at current position, and if successful, swap
+     * cursors.  Does not notify triggers (does not perform secondary updates).
+     */
+    OperationStatus deleteNoNotify()
+        throws DatabaseException {
+
+        CursorImpl original = null;
+        CursorImpl dup = null;
+        OperationStatus status = OperationStatus.KEYEMPTY;
+        try {
+            /* Clone, add dup to cursor. */
+            original = cursorImpl;
+            dup = original.cloneCursor(true);
+
+            /* Latch the BINs and do the delete with the dup. */
+            dup.latchBINs();
+            status = dup.delete(dbImpl.getRepContext());
+
+            return status;
+        } finally {
+            if (original != null) {
+                original.releaseBINs();
+            }
+            if (dup != null) {
+                dup.releaseBINs();
+            }
+
+            /* Swap if it was a success. */
+            boolean success = (status == OperationStatus.SUCCESS);
+            if (cursorImpl == dup) {
+                if (!success) {
+                    cursorImpl.reset();
+                }
+            } else {
+                if (success) {
+                    original.close();
+                    cursorImpl = dup;
+                } else {
+                    dup.close();
+                }
+            }
+        }
+    }
+
+    /**
+     * Internal version of put() that does no parameter checking.  Calls
+     * putNoNotify() and notifies triggers (performs secondary updates).
+     * Prevents phantoms.
+     */
+    OperationStatus putInternal(DatabaseEntry key,
+                                DatabaseEntry data,
+                                PutMode putMode)
+        throws DatabaseException {
+
+        try {
+            /* Need to get existing data if updating secondaries. */
+            DatabaseEntry oldData = null;
+            boolean doNotifyTriggers =
+                dbHandle != null && dbHandle.hasTriggers();
+            if (doNotifyTriggers && (putMode == PutMode.CURRENT ||
+                                     putMode == PutMode.OVERWRITE)) {
+                oldData = new DatabaseEntry();
+                if (key == null && putMode == PutMode.CURRENT) {
+                    /* Key is returned by CursorImpl.putCurrent as foundKey. */
+                    key = new DatabaseEntry();
+                }
+            }
+
+            /* Perform put. */
+            OperationStatus commitStatus =
+                putNoNotify(key, data, putMode, oldData);
+
+            /* Notify triggers (update secondaries). */
+            if (doNotifyTriggers && commitStatus == OperationStatus.SUCCESS) {
+                if (oldData != null && oldData.getData() == null) {
+                    oldData = null;
+                }
+                dbHandle.notifyTriggers(cursorImpl.getLocker(), key,
+                                        oldData, data);
+            }
+            return commitStatus;
+        } catch (Error E) {
+            dbImpl.getDbEnvironment().invalidate(E);
+            throw E;
+        }
+    }
+
+    /**
+     * Performs the put operation but does not notify triggers (does not
+     * perform secondary updates).  Prevents phantoms.
+     */
+    OperationStatus putNoNotify(DatabaseEntry key,
+                                DatabaseEntry data,
+                                PutMode putMode,
+                                DatabaseEntry returnOldData)
+        throws DatabaseException {
+
+        Locker nextKeyLocker = null;
+        CursorImpl nextKeyCursor = null;
+        try {
+            /* If other transactions are serializable, lock the next key. */
+            Locker cursorLocker = cursorImpl.getLocker();
+            if (putMode != PutMode.CURRENT &&
+                dbImpl.getDbEnvironment().
+		getTxnManager().
+		areOtherSerializableTransactionsActive(cursorLocker)) {
+                nextKeyLocker = BuddyLocker.createBuddyLocker
+                    (dbImpl.getDbEnvironment(), cursorLocker);
+                nextKeyCursor = new CursorImpl(dbImpl, nextKeyLocker);
+                /* Perform eviction for user cursors. */
+                nextKeyCursor.setAllowEviction(true);
+                nextKeyCursor.lockNextKeyForInsert(key, data);
+            }
+
+            /* Perform the put operation. */
+            return putAllowPhantoms
+                (key, data, putMode, returnOldData, nextKeyCursor);
+        } finally {
+            /* Release the next-key lock. */
+            if (nextKeyCursor != null) {
+                nextKeyCursor.close();
+            }
+            if (nextKeyLocker != null) {
+                nextKeyLocker.operationEnd();
+            }
+        }
+    }
+
+    /**
+     * Clones the cursor, put key/data according to PutMode, and if successful,
+     * swap cursors.  Does not notify triggers (does not perform secondary
+     * updates).  Does not prevent phantoms.
+     *
+     * @param nextKeyCursor is the cursor used to lock the next key during
+     * phantom prevention.  If this cursor is non-null and initialized, it's
+     * BIN will be used to initialize the dup cursor used to perform insertion.
+     * This enables an optimization that skips the search for the BIN.
+     */
+    private OperationStatus putAllowPhantoms(DatabaseEntry key,
+                                             DatabaseEntry data,
+                                             PutMode putMode,
+                                             DatabaseEntry returnOldData,
+                                             CursorImpl nextKeyCursor)
+        throws DatabaseException {
+
+        if (data == null) {
+            throw new NullPointerException
+                ("put passed a null DatabaseEntry arg");
+        }
+
+        if (putMode != PutMode.CURRENT && key == null) {
+            throw new IllegalArgumentException
+                ("put passed a null DatabaseEntry arg");
+        }
+
+        CursorImpl original = null;
+        OperationStatus status = OperationStatus.NOTFOUND;
+        CursorImpl dup = null;
+        try {
+            /* Latch and clone. */
+            original = cursorImpl;
+
+            if (putMode == PutMode.CURRENT) {
+                /* Call addCursor for putCurrent. */
+                dup = original.cloneCursor(true);
+            } else {
+
+                /*
+                 * Do not call addCursor when inserting.  Copy the position of
+                 * nextKeyCursor if available.
+                 */
+                dup = original.cloneCursor(false, nextKeyCursor);
+            }
+
+            /* Perform operation. */
+            if (putMode == PutMode.CURRENT) {
+                status = dup.putCurrent
+                    (data, key, returnOldData, dbImpl.getRepContext());
+            } else if (putMode == PutMode.OVERWRITE) {
+                status = dup.put(key, data, returnOldData);
+            } else if (putMode == PutMode.NOOVERWRITE) {
+                status = dup.putNoOverwrite(key, data);
+            } else if (putMode == PutMode.NODUP) {
+                status = dup.putNoDupData(key, data);
+            } else {
+                throw new InternalException("unknown PutMode");
+            }
+
+            return status;
+        } finally {
+            if (original != null) {
+                original.releaseBINs();
+            }
+
+            boolean success = (status == OperationStatus.SUCCESS);
+            if (cursorImpl == dup) {
+                if (!success) {
+                    cursorImpl.reset();
+                }
+            } else {
+                if (success) {
+                    original.close();
+                    cursorImpl = dup;
+                } else {
+                    if (dup != null) {
+                        dup.close();
+                    }
+                }
+            }
+        }
+    }
+
+    /**
+     * Positions the cursor at the first or last record of the database.
+     * Prevents phantoms.
+     */
+    OperationStatus position(DatabaseEntry key,
+                             DatabaseEntry data,
+                             LockMode lockMode,
+                             boolean first)
+        throws DatabaseException {
+
+        try {
+            if (!isSerializableIsolation(lockMode)) {
+                return positionAllowPhantoms
+                    (key, data, getLockType(lockMode, false), first);
+            }
+
+            /*
+             * Perform range locking to prevent phantoms and handle restarts.
+             */
+            while (true) {
+                try {
+                    /* Range lock the EOF node before getLast. */
+                    if (!first) {
+                        cursorImpl.lockEofNode(LockType.RANGE_READ);
+                    }
+
+                    /* Use a range lock for getFirst. */
+                    LockType lockType = getLockType(lockMode, first);
+
+                    /* Perform operation. */
+                    OperationStatus status =
+                        positionAllowPhantoms(key, data, lockType, first);
+
+                    /*
+                     * Range lock the EOF node when getFirst returns NOTFOUND.
+                     */
+                    if (first && status != OperationStatus.SUCCESS) {
+                        cursorImpl.lockEofNode(LockType.RANGE_READ);
+                    }
+
+                    return status;
+                } catch (RangeRestartException e) {
+                    continue;
+                }
+            }
+        } catch (Error E) {
+            dbImpl.getDbEnvironment().invalidate(E);
+            throw E;
+        }
+    }
+
+    /**
+     * Positions without preventing phantoms.
+     */
+    private OperationStatus positionAllowPhantoms(DatabaseEntry key,
+                                                  DatabaseEntry data,
+                                                  LockType lockType,
+                                                  boolean first)
+        throws DatabaseException {
+
+        assert (key != null && data != null);
+
+        OperationStatus status = OperationStatus.NOTFOUND;
+        CursorImpl dup = null;
+        try {
+
+            /*
+             * Pass false: no need to call addCursor here because
+             * CursorImpl.position will be adding it after it finds the bin.
+             */
+            dup = beginRead(false);
+
+            /* Search for first or last. */
+            if (!dup.positionFirstOrLast(first, null)) {
+                /* Tree is empty. */
+                status = OperationStatus.NOTFOUND;
+                assert LatchSupport.countLatchesHeld() == 0:
+                    LatchSupport.latchesHeldToString();
+            } else {
+                /* Found something in this tree. */
+                assert LatchSupport.countLatchesHeld() == 1:
+                    LatchSupport.latchesHeldToString();
+
+                status = dup.getCurrentAlreadyLatched
+                    (key, data, lockType, first);
+
+                if (status != OperationStatus.SUCCESS) {
+                    /* The record we're pointing at may be deleted. */
+                    status = dup.getNext(key, data, lockType, first, false);
+                }
+            }
+        } finally {
+
+            /*
+             * positionFirstOrLast returns with the target BIN latched, so it
+             * is the responsibility of this method to make sure the latches
+             * are released.
+             */
+            cursorImpl.releaseBINs();
+            endRead(dup, status == OperationStatus.SUCCESS);
+        }
+        return status;
+    }
+
+    /**
+     * Performs search by key, data, or both.  Prevents phantoms.
+     */
+    OperationStatus search(DatabaseEntry key,
+                           DatabaseEntry data,
+                           LockMode lockMode,
+                           SearchMode searchMode)
+        throws DatabaseException {
+
+        try {
+            if (!isSerializableIsolation(lockMode)) {
+                LockType lockType = getLockType(lockMode, false);
+                KeyChangeStatus result = searchAllowPhantoms
+                    (key, data, lockType, lockType, searchMode);
+                return result.status;
+            }
+
+            /*
+             * Perform range locking to prevent phantoms and handle restarts.
+             */
+            while (true) {
+                try {
+                    /* Do not use a range lock for the initial search. */
+                    LockType searchLockType = getLockType(lockMode, false);
+
+                    /* Switch to a range lock when advancing forward. */
+                    LockType advanceLockType = getLockType(lockMode, true);
+
+                    /* Do not modify key/data params until SUCCESS. */
+                    DatabaseEntry tryKey = new DatabaseEntry
+                        (key.getData(), key.getOffset(), key.getSize());
+                    DatabaseEntry tryData = new DatabaseEntry
+                        (data.getData(), data.getOffset(), data.getSize());
+                    KeyChangeStatus result;
+
+                    if (searchMode.isExactSearch()) {
+
+                        /*
+                         * Artificial range search to range lock the next key.
+                         */
+                        result = searchExactAndRangeLock
+                            (tryKey, tryData, searchLockType, advanceLockType,
+                             searchMode);
+                    } else {
+                        /* Normal range search. */
+                        result = searchAllowPhantoms
+                            (tryKey, tryData, searchLockType, advanceLockType,
+                             searchMode);
+
+                        /* Lock the EOF node if no records follow the key. */
+                        if (result.status != OperationStatus.SUCCESS) {
+                            cursorImpl.lockEofNode(LockType.RANGE_READ);
+                        }
+                    }
+
+                    /*
+                     * Only overwrite key/data on SUCCESS, after all locking.
+                     */
+                    if (result.status == OperationStatus.SUCCESS) {
+                        key.setData(tryKey.getData(), 0, tryKey.getSize());
+                        data.setData(tryData.getData(), 0, tryData.getSize());
+                    }
+
+                    return result.status;
+                } catch (RangeRestartException e) {
+                    continue;
+                }
+            }
+        } catch (Error E) {
+            dbImpl.getDbEnvironment().invalidate(E);
+            throw E;
+        }
+    }
+
+    /**
+     * For an exact search, performs a range search and returns NOTFOUND if the
+     * key changes (or if the data changes for BOTH) during the search.
+     * If no exact match is found the range search will range lock the
+     * following key for phantom prevention.  Importantly, the cursor position
+     * is not changed if an exact match is not found, even though we advance to
+     * the following key in order to range lock it.
+     */
+    private KeyChangeStatus searchExactAndRangeLock(DatabaseEntry key,
+                                                    DatabaseEntry data,
+                                                    LockType searchLockType,
+                                                    LockType advanceLockType,
+                                                    SearchMode searchMode)
+        throws DatabaseException {
+
+        /* Convert exact search to range search. */
+        searchMode = (searchMode == SearchMode.SET) ?
+            SearchMode.SET_RANGE : SearchMode.BOTH_RANGE;
+
+        KeyChangeStatus result = null;
+
+        CursorImpl dup =
+            beginRead(false /* searchAndPosition will add cursor */);
+
+        try {
+
+            /*
+             * Perform a range search and return NOTFOUND if an exact match is
+             * not found.  Pass advanceAfterRangeSearch=true to advance even if
+             * the key is not matched, to lock the following key.
+             */
+            result = searchInternal
+                (dup, key, data, searchLockType, advanceLockType, searchMode,
+                 true /*advanceAfterRangeSearch*/);
+
+            /* If the key changed, then we do not have an exact match. */
+            if (result.keyChange && result.status == OperationStatus.SUCCESS) {
+                result.status = OperationStatus.NOTFOUND;
+            }
+        } finally {
+            endRead(dup, result != null &&
+                         result.status == OperationStatus.SUCCESS);
+        }
+
+        /*
+         * Lock the EOF node if there was no exact match and we did not
+         * range-lock the next record.
+         */
+        if (result.status != OperationStatus.SUCCESS && !result.keyChange) {
+            cursorImpl.lockEofNode(LockType.RANGE_READ);
+        }
+
+        return result;
+    }
+
+    /**
+     * Performs search without preventing phantoms.
+     */
+    private KeyChangeStatus searchAllowPhantoms(DatabaseEntry key,
+                                                DatabaseEntry data,
+                                                LockType searchLockType,
+                                                LockType advanceLockType,
+                                                SearchMode searchMode)
+        throws DatabaseException {
+
+        OperationStatus status = OperationStatus.NOTFOUND;
+
+        CursorImpl dup =
+            beginRead(false /* searchAndPosition will add cursor */);
+
+        try {
+            KeyChangeStatus result = searchInternal
+                (dup, key, data, searchLockType, advanceLockType, searchMode,
+                 false /*advanceAfterRangeSearch*/);
+
+            status = result.status;
+            return result;
+        } finally {
+            endRead(dup, status == OperationStatus.SUCCESS);
+        }
+    }
+
+    /**
+     * Performs search for a given CursorImpl.
+     */
+    private KeyChangeStatus searchInternal(CursorImpl dup,
+                                           DatabaseEntry key,
+                                           DatabaseEntry data,
+                                           LockType searchLockType,
+                                           LockType advanceLockType,
+                                           SearchMode searchMode,
+                                           boolean advanceAfterRangeSearch)
+        throws DatabaseException {
+
+        assert key != null && data != null;
+
+        OperationStatus status = OperationStatus.NOTFOUND;
+        boolean keyChange = false;
+
+        try {
+            /* search */
+            int searchResult =
+                dup.searchAndPosition(key, data, searchMode, searchLockType);
+            if ((searchResult & CursorImpl.FOUND) != 0) {
+
+                /*
+                 * The search found a possibly valid record.
+                 * CursorImpl.searchAndPosition's job is to settle the cursor
+                 * at a particular location on a BIN. In some cases, the
+                 * current position may not actually hold a valid record, so
+                 * it's this layer's responsiblity to judge if it might need to
+                 * bump the cursor along and search more. For example, we might
+                 * have to do so if the position holds a deleted record.
+                 *
+                 * Advance the cursor if:
+                 *
+                 * 1. This is a range type search and there was no match on the
+                 * search criteria (the key or key and data depending on the
+                 * type of search). Then we search forward until there's a
+                 * match.
+                 *
+                 * 2. If this is not a range type search, check the record at
+                 * the current position. If this is not a duplicate set,
+                 * CursorImpl.searchAndPosition gave us an exact answer.
+                 * However since it doesn't peer into the duplicate set, we may
+                 * need to probe further in if there are deleted records in the
+                 * duplicate set. i.e, we have to be able to find k1/d2 even if
+                 * there's k1/d1(deleted), k1/d2, k1/d3, etc in a duplicate
+                 * set.
+                 *
+                 * Note that searchResult has four bits possibly set:
+                 *
+                 * FOUND has already been checked above.
+                 *
+                 * EXACT_KEY means an exact match on the key portion was made.
+                 *
+                 * EXACT_DATA means that if searchMode was BOTH or BOTH_RANGE
+                 * then an exact match was made on the data (in addition to the
+                 * key).
+                 *
+                 * FOUND_LAST means that the cursor is positioned at the last
+                 * record in the database.
+                 */
+                boolean exactKeyMatch =
+                    ((searchResult & CursorImpl.EXACT_KEY) != 0);
+                boolean exactDataMatch =
+                    ((searchResult & CursorImpl.EXACT_DATA) != 0);
+                boolean foundLast =
+                    ((searchResult & CursorImpl.FOUND_LAST) != 0);
+
+                /*
+                 * rangeMatch means that a range match of some sort (either
+                 * SET_RANGE or BOTH_RANGE) was specified and there wasn't a
+                 * complete match.  If SET_RANGE was spec'd and EXACT_KEY was
+                 * not returned as set, then the key didn't match exactly.  If
+                 * BOTH_RANGE was spec'd and EXACT_DATA was not returned as
+                 * set, then the data didn't match exactly.
+                 */
+                boolean rangeMatch = false;
+                if (searchMode == SearchMode.SET_RANGE &&
+                    !exactKeyMatch) {
+                    rangeMatch = true;
+                }
+
+                if (searchMode == SearchMode.BOTH_RANGE &&
+                    (!exactKeyMatch || !exactDataMatch)) {
+                    rangeMatch = true;
+                }
+
+                /*
+                 * Pass null for key to getCurrentAlreadyLatched if searchMode
+                 * is SET since the key is not supposed to be set in that case.
+                 */
+                DatabaseEntry useKey =
+                    (searchMode == SearchMode.SET) ?
+                    null : key;
+
+                /*
+                 * rangeMatch => an exact match was not found so we need to
+                 * advance the cursor to a real item using getNextXXX.  If
+                 * rangeMatch is true, then cursor is currently on some entry,
+                 * but that entry is either deleted or is prior to the target
+                 * key/data.  It is also possible that rangeMatch is false (we
+                 * have an exact match) but the entry is deleted.  So we test
+                 * for rangeMatch or a deleted entry, and if either is true
+                 * then we advance to the next non-deleted entry.
+                 */
+                if (rangeMatch ||
+                    (status = dup.getCurrentAlreadyLatched
+                     (useKey, data, searchLockType, true)) ==
+                    OperationStatus.KEYEMPTY) {
+
+                    if (foundLast) {
+                        status = OperationStatus.NOTFOUND;
+                    } else if (searchMode == SearchMode.SET) {
+
+                        /*
+                         * SET is an exact operation, so this isn't a
+                         * rangeMatch, it's a deleted record.  We should
+                         * advance, but only to duplicates for the same key.
+                         */
+                        status = dup.getNextDuplicate
+                            (key, data, advanceLockType, true, rangeMatch);
+                    } else if (searchMode == SearchMode.BOTH) {
+
+                        /*
+                         * BOTH is also an exact operation, but we should not
+                         * advance past a deleted record because the data match
+                         * is exact.  However, this API should return NOTFOUND
+                         * instead of KEYEMPTY (which may be been set above).
+                         */
+                        if (status == OperationStatus.KEYEMPTY) {
+                            status = OperationStatus.NOTFOUND;
+                        }
+                    } else {
+                        assert !searchMode.isExactSearch();
+
+                        /* Save the search key for a BOTH_RANGE search. */
+                        byte[] searchKey = null;
+                        if (searchMode.isDataSearch()) {
+                            searchKey = Key.makeKey(key);
+                        }
+
+                        /*
+                         * This may be a deleted record or a rangeMatch, and in
+                         * either case we should advance.  We must determine
+                         * whether the key changes when we advance.
+                         */
+                        if (exactKeyMatch) {
+                            KeyChangeStatus result =
+                                dup.getNextWithKeyChangeStatus
+                                (key, data, advanceLockType, true, rangeMatch);
+                            status = result.status;
+
+                            /*
+                             * For BOTH_RANGE, advancing always causes a data
+                             * change, which is considered a key change.  For
+                             * SET_RANGE, getNextWithKeyChangeStatus determined
+                             * the key change status.
+                             */
+                            keyChange = searchMode.isDataSearch() ?
+                                (status == OperationStatus.SUCCESS) :
+                                result.keyChange;
+
+                        } else if (searchMode.isDataSearch() &&
+                                   !advanceAfterRangeSearch) {
+
+                            /*
+                             * If we did not match the key (exactly) for
+                             * BOTH_RANGE, and advanceAfterRangeSearch is
+                             * false, then return NOTFOUND.
+                             */
+                             status = OperationStatus.NOTFOUND;
+                        } else {
+
+                            /*
+                             * If we didn't match the key, skip over duplicates
+                             * to the next key with getNextNoDup.
+                             */
+                            status = dup.getNextNoDup
+                                (key, data, advanceLockType, true, rangeMatch);
+
+                            /* getNextNoDup always causes a key change. */
+                            keyChange = (status == OperationStatus.SUCCESS);
+                        }
+
+                        /*
+                         * If we moved past the search key after a BOTH_RANGE
+                         * search, return NOTFOUND.  Leave the keyChange value
+                         * intact, since we want to return this accurately
+                         * regardless of the status return.
+                         */
+                        if (status == OperationStatus.SUCCESS &&
+                            searchMode.isDataSearch()) {
+                            if (Key.compareKeys
+                                (key.getData(), searchKey,
+                                 dbImpl.getBtreeComparator()) != 0) {
+                                status = OperationStatus.NOTFOUND;
+                            }
+                        }
+                    }
+                }
+            }
+        } finally {
+
+            /*
+             * searchAndPosition returns with the target BIN latched, so it is
+             * the responsibility of this method to make sure the latches are
+             * released.
+             */
+            cursorImpl.releaseBINs();
+            if (status != OperationStatus.SUCCESS && dup != cursorImpl) {
+                dup.releaseBINs();
+            }
+        }
+
+        return new KeyChangeStatus(status, keyChange);
+    }
+
+    /**
+     * Retrieves the next or previous record.  Prevents phantoms.
+     */
+    OperationStatus retrieveNext(DatabaseEntry key,
+                                 DatabaseEntry data,
+                                 LockMode lockMode,
+                                 GetMode getMode)
+        throws DatabaseException {
+
+        try {
+            if (!isSerializableIsolation(lockMode)) {
+                return retrieveNextAllowPhantoms
+                    (key, data, getLockType(lockMode, false), getMode);
+            }
+
+            /*
+             * Perform range locking to prevent phantoms and handle restarts.
+             */
+            while (true) {
+                try {
+                    OperationStatus status;
+                    if (getMode == GetMode.NEXT_DUP) {
+
+                        /*
+                         * Special case to lock the next key if no more dups.
+                         */
+                        status = getNextDupAndRangeLock(key, data, lockMode);
+                    } else {
+
+                        /* Get a range lock for 'prev' operations. */
+                        if (!getMode.isForward()) {
+                            rangeLockCurrentPosition(getMode);
+                        }
+
+                        /*
+                         * Use a range lock if performing a 'next' operation.
+                         */
+                        LockType lockType =
+                            getLockType(lockMode, getMode.isForward());
+
+                        /* Perform the operation. */
+                        status = retrieveNextAllowPhantoms
+                            (key, data, lockType, getMode);
+
+                        if (getMode.isForward() &&
+                            status != OperationStatus.SUCCESS) {
+                            /* NEXT, NEXT_NODUP: lock the EOF node. */
+                            cursorImpl.lockEofNode(LockType.RANGE_READ);
+                        }
+                    }
+
+                    return status;
+                } catch (RangeRestartException e) {
+                    continue;
+                }
+            }
+        } catch (Error E) {
+            dbImpl.getDbEnvironment().invalidate(E);
+            throw E;
+        }
+    }
+
+    /**
+     * Retrieves the next dup; if no next dup is found then range lock the
+     * following key for phantom prevention.  Importantly, the cursor position
+     * is not changed if there are no more dups, even though we advance to the
+     * following key in order to range lock it.
+     */
+    private OperationStatus getNextDupAndRangeLock(DatabaseEntry key,
+                                                   DatabaseEntry data,
+                                                   LockMode lockMode)
+        throws DatabaseException {
+
+        /* Do not modify key/data params until SUCCESS. */
+        DatabaseEntry tryKey = new DatabaseEntry();
+        DatabaseEntry tryData = new DatabaseEntry();
+
+        /* Get a range lock. */
+        LockType lockType = getLockType(lockMode, true);
+        OperationStatus status;
+        boolean noNextKeyFound;
+
+        /*
+         * Perform a NEXT and return NOTFOUND if the key changes
+         * during the search.
+         */
+        while (true) {
+            assert LatchSupport.countLatchesHeld() == 0;
+            CursorImpl dup = beginRead(true);
+
+            try {
+                KeyChangeStatus result = dup.getNextWithKeyChangeStatus
+                    (tryKey, tryData, lockType, true, false);
+                status = result.status;
+                noNextKeyFound = (status != OperationStatus.SUCCESS);
+                if (result.keyChange && status == OperationStatus.SUCCESS) {
+                    status = OperationStatus.NOTFOUND;
+                }
+            } catch (DatabaseException DBE) {
+                endRead(dup, false);
+                throw DBE;
+            }
+
+            if (checkForInsertion(GetMode.NEXT, cursorImpl, dup)) {
+                endRead(dup, false);
+                continue;
+            } else {
+                endRead(dup, status == OperationStatus.SUCCESS);
+                assert LatchSupport.countLatchesHeld() == 0;
+                break;
+            }
+        }
+
+        /* Lock the EOF node if no more records, whether or not more dups. */
+        if (noNextKeyFound) {
+            cursorImpl.lockEofNode(LockType.RANGE_READ);
+        }
+
+        /* Only overwrite key/data on SUCCESS. */
+        if (status == OperationStatus.SUCCESS) {
+            key.setData(tryKey.getData(), 0, tryKey.getSize());
+            data.setData(tryData.getData(), 0, tryData.getSize());
+        }
+
+        return status;
+    }
+
+    /**
+     * For 'prev' operations, upgrades to a range lock at the current position.
+     * For PREV_NODUP, range locks the first duplicate instead.  If there are no
+     * records at the current position, get a range lock on the next record or,
+     * if not found, on the logical EOF node.  Do not modify the current
+     * cursor position, use a separate cursor.
+     */
+    private void rangeLockCurrentPosition(GetMode getMode)
+        throws DatabaseException {
+
+        DatabaseEntry tempKey = new DatabaseEntry();
+        DatabaseEntry tempData = new DatabaseEntry();
+        tempKey.setPartial(0, 0, true);
+        tempData.setPartial(0, 0, true);
+
+        OperationStatus status;
+        CursorImpl dup = cursorImpl.cloneCursor(true);
+        try {
+            if (getMode == GetMode.PREV_NODUP) {
+                status = dup.getFirstDuplicate
+                    (tempKey, tempData, LockType.RANGE_READ);
+            } else {
+                status = dup.getCurrent
+                    (tempKey, tempData, LockType.RANGE_READ);
+            }
+            if (status != OperationStatus.SUCCESS) {
+                while (true) {
+                    assert LatchSupport.countLatchesHeld() == 0;
+
+                    status = dup.getNext
+                        (tempKey, tempData, LockType.RANGE_READ, true, false);
+
+                    if (checkForInsertion(GetMode.NEXT, cursorImpl, dup)) {
+                        dup.close();
+                        dup = cursorImpl.cloneCursor(true);
+                        continue;
+                    } else {
+                        assert LatchSupport.countLatchesHeld() == 0;
+                        break;
+                    }
+                }
+            }
+        } finally {
+            if (cursorImpl == dup) {
+                dup.reset();
+            } else {
+                dup.close();
+            }
+        }
+
+        if (status != OperationStatus.SUCCESS) {
+            cursorImpl.lockEofNode(LockType.RANGE_READ);
+        }
+    }
+
+    /**
+     * Retrieves without preventing phantoms.
+     */
+    private OperationStatus retrieveNextAllowPhantoms(DatabaseEntry key,
+                                                      DatabaseEntry data,
+                                                      LockType lockType,
+                                                      GetMode getMode)
+        throws DatabaseException {
+
+        assert (key != null && data != null);
+
+        OperationStatus status;
+
+        while (true) {
+            assert LatchSupport.countLatchesHeld() == 0;
+            CursorImpl dup = beginRead(true);
+
+            try {
+                if (getMode == GetMode.NEXT) {
+                    status = dup.getNext
+                        (key, data, lockType, true, false);
+                } else if (getMode == GetMode.PREV) {
+                    status = dup.getNext
+                        (key, data, lockType, false, false);
+                } else if (getMode == GetMode.NEXT_DUP) {
+                    status = dup.getNextDuplicate
+                        (key, data, lockType, true, false);
+                } else if (getMode == GetMode.PREV_DUP) {
+                    status = dup.getNextDuplicate
+                        (key, data, lockType, false, false);
+                } else if (getMode == GetMode.NEXT_NODUP) {
+                    status = dup.getNextNoDup
+                        (key, data, lockType, true, false);
+                } else if (getMode == GetMode.PREV_NODUP) {
+                    status = dup.getNextNoDup
+                        (key, data, lockType, false, false);
+                } else {
+                    throw new InternalException("unknown GetMode");
+                }
+            } catch (DatabaseException DBE) {
+                endRead(dup, false);
+                throw DBE;
+            }
+
+            if (checkForInsertion(getMode, cursorImpl, dup)) {
+                endRead(dup, false);
+                continue;
+            } else {
+                endRead(dup, status == OperationStatus.SUCCESS);
+                assert LatchSupport.countLatchesHeld() == 0;
+                break;
+            }
+        }
+        return status;
+    }
+
+    /**
+     * Returns the current key and data.  There is no need to prevent phantoms.
+     */
+    OperationStatus getCurrentInternal(DatabaseEntry key,
+                                       DatabaseEntry data,
+                                       LockMode lockMode)
+        throws DatabaseException {
+
+        /* Do not use a range lock. */
+        LockType lockType = getLockType(lockMode, false);
+
+        return cursorImpl.getCurrent(key, data, lockType);
+    }
+
+    /*
+     * Something may have been added to the original cursor (cursorImpl) while
+     * we were getting the next BIN.  cursorImpl would have been adjusted
+     * properly but we would have skipped a BIN in the process.
+     *
+     * Note that when we call LN.isDeleted(), we do not need to lock the LN.
+     * If we see a non-committed deleted entry, we'll just iterate around in
+     * the caller.  So a false positive is ok.
+     *
+     * @return true if an unaccounted for insertion happened.
+     */
+    private boolean checkForInsertion(GetMode getMode,
+                                      CursorImpl origCursor,
+                                      CursorImpl dupCursor)
+        throws DatabaseException {
+
+        BIN origBIN = origCursor.getBIN();
+        BIN dupBIN = dupCursor.getBIN();
+        DBIN origDBIN = origCursor.getDupBIN();
+
+        /* If fetchTarget returns null below, a deleted LN was cleaned. */
+
+        boolean forward = true;
+        if (getMode == GetMode.PREV ||
+            getMode == GetMode.PREV_DUP ||
+            getMode == GetMode.PREV_NODUP) {
+            forward = false;
+        }
+        boolean ret = false;
+        if (origBIN != dupBIN) {
+            /* We jumped to the next BIN during getNext(). */
+            origCursor.latchBINs();
+
+            try {
+                if (origDBIN == null) {
+                    if (forward) {
+                        if (origBIN.getNEntries() - 1 >
+                            origCursor.getIndex()) {
+
+                            /*
+                             * We were adjusted to something other than the
+                             * last entry so some insertion happened.
+                             */
+                            for (int i = origCursor.getIndex() + 1;
+                                 i < origBIN.getNEntries();
+                                 i++) {
+                                if (!origBIN.isEntryKnownDeleted(i)) {
+                                    Node n = origBIN.fetchTarget(i);
+                                    if (n != null && !n.containsDuplicates()) {
+                                        LN ln = (LN) n;
+                                        /* See comment above about locking. */
+                                        if (!ln.isDeleted()) {
+                                            ret = true;
+                                            break;
+                                        }
+                                    }
+                                } else {
+                                    /* Need to check the DupCountLN. */
+                                }
+                            }
+                        }
+                    } else {
+                        if (origCursor.getIndex() > 0) {
+
+                            /*
+                             * We were adjusted to something other than the
+                             * first entry so some insertion happened.
+                             */
+                            for (int i = 0; i < origCursor.getIndex(); i++) {
+                                if (!origBIN.isEntryKnownDeleted(i)) {
+                                    Node n = origBIN.fetchTarget(i);
+                                    if (n != null && !n.containsDuplicates()) {
+                                        LN ln = (LN) n;
+                                        /* See comment above about locking. */
+                                        if (!ln.isDeleted()) {
+                                            ret = true;
+                                            break;
+                                        }
+                                    } else {
+                                        /* Need to check the DupCountLN. */
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            } finally {
+                origCursor.releaseBINs();
+            }
+            return ret;
+        }
+
+        if (origDBIN != dupCursor.getDupBIN() &&
+            origCursor.getIndex() == dupCursor.getIndex() &&
+            getMode != GetMode.NEXT_NODUP &&
+            getMode != GetMode.PREV_NODUP) {
+            /* Same as above, only for the dupBIN. */
+            origCursor.latchBINs();
+            try {
+                if (forward) {
+                    if (origDBIN.getNEntries() - 1 >
+                        origCursor.getDupIndex()) {
+
+                        /*
+                         * We were adjusted to something other than the last
+                         * entry so some insertion happened.
+                         */
+                        for (int i = origCursor.getDupIndex() + 1;
+                             i < origDBIN.getNEntries();
+                             i++) {
+                            if (!origDBIN.isEntryKnownDeleted(i)) {
+                                Node n = origDBIN.fetchTarget(i);
+                                LN ln = (LN) n;
+                                /* See comment above about locking. */
+                                if (n != null && !ln.isDeleted()) {
+                                    ret = true;
+                                    break;
+                                }
+                            }
+                        }
+                    }
+                } else {
+                    if (origCursor.getDupIndex() > 0) {
+
+                        /*
+                         * We were adjusted to something other than the first
+                         * entry so some insertion happened.
+                         */
+                        for (int i = 0; i < origCursor.getDupIndex(); i++) {
+                            if (!origDBIN.isEntryKnownDeleted(i)) {
+                                Node n = origDBIN.fetchTarget(i);
+                                LN ln = (LN) n;
+                                /* See comment above about locking. */
+                                if (n != null && !ln.isDeleted()) {
+                                    ret = true;
+                                    break;
+                                }
+                            }
+                        }
+                    }
+                }
+            } finally {
+                origCursor.releaseBINs();
+            }
+            return ret;
+        }
+        return false;
+    }
+
+    /**
+     * If the cursor is initialized, dups it and returns the dup; otherwise,
+     * return the original.  This avoids the overhead of duping when the
+     * original is uninitialized.  The cursor returned must be passed to
+     * endRead() to close the correct cursor.
+     */
+    private CursorImpl beginRead(boolean addCursor)
+        throws DatabaseException {
+
+        CursorImpl dup;
+        if (cursorImpl.isNotInitialized()) {
+            dup = cursorImpl;
+        } else {
+            dup = cursorImpl.cloneCursor(addCursor);
+        }
+        return dup;
+    }
+
+    /**
+     * If the operation is successful, swaps cursors and closes the original
+     * cursor; otherwise, closes the duped cursor.  In the case where the
+     * original cursor was not duped by beginRead because it was uninitialized,
+     * just resets the original cursor if the operation did not succeed.
+     */
+    private void endRead(CursorImpl dup, boolean success)
+        throws DatabaseException {
+
+        if (dup == cursorImpl) {
+            if (!success) {
+                cursorImpl.reset();
+            }
+        } else {
+            if (success) {
+                cursorImpl.close();
+                cursorImpl = dup;
+            } else {
+                dup.close();
+            }
+        }
+    }
+
+    boolean advanceCursor(DatabaseEntry key, DatabaseEntry data) {
+        return cursorImpl.advanceCursor(key, data);
+    }
+
+    private LockType getLockType(LockMode lockMode, boolean rangeLock) {
+
+        if (isReadUncommittedMode(lockMode)) {
+            return LockType.NONE;
+        } else if (lockMode == null || lockMode == LockMode.DEFAULT) {
+            return rangeLock ? LockType.RANGE_READ: LockType.READ;
+        } else if (lockMode == LockMode.RMW) {
+            return rangeLock ? LockType.RANGE_WRITE: LockType.WRITE;
+        } else if (lockMode == LockMode.READ_COMMITTED) {
+            throw new IllegalArgumentException
+                (lockMode.toString() + " not allowed with Cursor methods");
+        } else {
+            assert false : lockMode;
+            return LockType.NONE;
+        }
+    }
+
+    /**
+     * Returns whether the given lock mode will cause a read-uncommitted when
+     * used with this cursor, taking into account the default cursor
+     * configuration.
+     */
+    boolean isReadUncommittedMode(LockMode lockMode) {
+
+        return (lockMode == LockMode.READ_UNCOMMITTED ||
+                (readUncommittedDefault &&
+                 (lockMode == null || lockMode == LockMode.DEFAULT)));
+    }
+
+    private boolean isSerializableIsolation(LockMode lockMode) {
+
+        return serializableIsolationDefault &&
+               !isReadUncommittedMode(lockMode);
+    }
+
+    /**
+     * @hidden
+     * For internal use only.
+     */
+    protected void checkUpdatesAllowed(String operation)
+        throws DatabaseException {
+
+        if (updateOperationsProhibited) {
+            throw new DatabaseException
+                ("A transaction was not supplied when opening this cursor: " +
+                 operation);
+        }
+    }
+
+    /**
+     * Note that this flavor of checkArgs doesn't require that the key and data
+     * are not null.
+     */
+    private void checkArgsNoValRequired(DatabaseEntry key,
+                                        DatabaseEntry data) {
+        DatabaseUtil.checkForNullDbt(key, "key", false);
+        DatabaseUtil.checkForNullDbt(data, "data", false);
+    }
+
+    /**
+     * Note that this flavor of checkArgs requires that the key and data are
+     * not null.
+     */
+    private void checkArgsValRequired(DatabaseEntry key,
+                                      DatabaseEntry data) {
+        DatabaseUtil.checkForNullDbt(key, "key", true);
+        DatabaseUtil.checkForNullDbt(data, "data", true);
+    }
+
+    /**
+     * Checks the environment and cursor state.
+     */
+    void checkState(boolean mustBeInitialized)
+        throws DatabaseException {
+
+        checkEnv();
+        cursorImpl.checkCursorState(mustBeInitialized);
+    }
+
+    /**
+     * @throws RunRecoveryException if the underlying environment is invalid.
+     */
+    void checkEnv()
+        throws RunRecoveryException {
+
+        cursorImpl.checkEnv();
+    }
+
+    /**
+     * Sends trace messages to the java.util.logger. Don't rely on the logger
+     * alone to conditionalize whether we send this message, we don't even want
+     * to construct the message if the level is not enabled.
+     */
+    void trace(Level level,
+               String methodName,
+               DatabaseEntry key,
+               DatabaseEntry data,
+               LockMode lockMode) {
+        if (logger.isLoggable(level)) {
+            StringBuffer sb = new StringBuffer();
+            sb.append(methodName);
+            traceCursorImpl(sb);
+            if (key != null) {
+                sb.append(" key=").append(key.dumpData());
+            }
+            if (data != null) {
+                sb.append(" data=").append(data.dumpData());
+            }
+            if (lockMode != null) {
+                sb.append(" lockMode=").append(lockMode);
+            }
+            logger.log(level, sb.toString());
+        }
+    }
+    /**
+     * Sends trace messages to the java.util.logger. Don't rely on the logger
+     * alone to conditionalize whether we send this message, we don't even want
+     * to construct the message if the level is not enabled.
+     */
+    void trace(Level level, String methodName, LockMode lockMode) {
+        if (logger.isLoggable(level)) {
+            StringBuffer sb = new StringBuffer();
+            sb.append(methodName);
+            traceCursorImpl(sb);
+            if (lockMode != null) {
+                sb.append(" lockMode=").append(lockMode);
+            }
+            logger.log(level, sb.toString());
+        }
+    }
+
+    private void traceCursorImpl(StringBuffer sb) {
+        sb.append(" locker=").append(cursorImpl.getLocker().getId());
+        if (cursorImpl.getBIN() != null) {
+            sb.append(" bin=").append(cursorImpl.getBIN().getNodeId());
+        }
+        sb.append(" idx=").append(cursorImpl.getIndex());
+
+        if (cursorImpl.getDupBIN() != null) {
+            sb.append(" Dbin=").append(cursorImpl.getDupBIN().getNodeId());
+        }
+        sb.append(" dupIdx=").append(cursorImpl.getDupIndex());
+    }
+}
diff --git a/src/com/sleepycat/je/CursorConfig.java b/src/com/sleepycat/je/CursorConfig.java
new file mode 100644
index 0000000000000000000000000000000000000000..5dcc79b961198126af000f86ff4d22bc25510675
--- /dev/null
+++ b/src/com/sleepycat/je/CursorConfig.java
@@ -0,0 +1,169 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: CursorConfig.java,v 1.25.2.4 2010/03/22 21:53:33 mark Exp $
+ */
+
+package com.sleepycat.je;
+
+/**
+ * Specifies the attributes of database cursor.  An instance created with the
+ * default constructor is initialized with the system's default settings.
+ */
+public class CursorConfig implements Cloneable {
+
+    /**
+     * Default configuration used if null is passed to methods that create a
+     * cursor.
+     */
+    public static final CursorConfig DEFAULT = new CursorConfig();
+
+    /**
+     * A convenience instance to configure read operations performed by the
+     * cursor to return modified but not yet committed data.
+     */
+    public static final CursorConfig READ_UNCOMMITTED = new CursorConfig();
+
+    /**
+     * A convenience instance to configure read operations performed by the
+     * cursor to return modified but not yet committed data.
+     *
+     * @deprecated This has been replaced by {@link #READ_UNCOMMITTED} to
+     * conform to ANSI database isolation terminology.
+     */
+    public static final CursorConfig DIRTY_READ = READ_UNCOMMITTED;
+
+    /**
+     * A convenience instance to configure a cursor for read committed
+     * isolation.
+     *
+     * This ensures the stability of the current data item read by the cursor
+     * but permits data read by this cursor to be modified or deleted prior to
+     * the commit of the transaction.
+     */
+    public static final CursorConfig READ_COMMITTED = new CursorConfig();
+
+    static {
+        READ_UNCOMMITTED.setReadUncommitted(true);
+        READ_COMMITTED.setReadCommitted(true);
+    }
+
+    private boolean readUncommitted = false;
+    private boolean readCommitted = false;
+
+    /**
+     * An instance created using the default constructor is initialized with
+     * the system's default settings.
+     */
+    public CursorConfig() {
+    }
+
+    /**
+     * Configures read operations performed by the cursor to return modified
+     * but not yet committed data.
+     *
+     * @param readUncommitted If true, configure read operations performed by
+     * the cursor to return modified but not yet committed data.
+     *
+     * @see LockMode#READ_UNCOMMITTED
+     */
+    public void setReadUncommitted(boolean readUncommitted) {
+        this.readUncommitted = readUncommitted;
+    }
+
+    /**
+     * Returns true if read operations performed by the cursor are configured
+     * to return modified but not yet committed data.
+     *
+     * @return true if read operations performed by the cursor are configured
+     * to return modified but not yet committed data.
+     *
+     * @see LockMode#READ_UNCOMMITTED
+     */
+    public boolean getReadUncommitted() {
+        return readUncommitted;
+    }
+
+    /**
+     * Configures read operations performed by the cursor to return modified
+     * but not yet committed data.
+     *
+     * @param dirtyRead If true, configure read operations performed by the
+     * cursor to return modified but not yet committed data.
+     *
+     * @deprecated This has been replaced by {@link #setReadUncommitted} to
+     * conform to ANSI database isolation terminology.
+     */
+    public void setDirtyRead(boolean dirtyRead) {
+        setReadUncommitted(dirtyRead);
+    }
+
+    /**
+     * Returns true if read operations performed by the cursor are configured
+     * to return modified but not yet committed data.
+     *
+     * @return true if read operations performed by the cursor are configured
+     * to return modified but not yet committed data.
+     *
+     * @deprecated This has been replaced by {@link #getReadUncommitted} to
+     * conform to ANSI database isolation terminology.
+     */
+    public boolean getDirtyRead() {
+        return getReadUncommitted();
+    }
+
+    /**
+     * Configures read operations performed by the cursor to obey read
+     * committed isolation. Read committed isolation provides for cursor
+     * stability but not repeatable reads. Data items which have been
+     * previously read by this transaction may be deleted or modified by other
+     * transactions before the cursor is closed or the transaction completes.
+     *
+     * @param readCommitted If true, configure read operations performed by
+     * the cursor to obey read commited isolation.
+     *
+     * @see LockMode#READ_COMMITTED
+     */
+    public void setReadCommitted(boolean readCommitted) {
+        this.readCommitted = readCommitted;
+    }
+
+    /**
+     * Returns true if read operations performed by the cursor are configured
+     * to obey read committed isolation.
+     *
+     * @return true if read operations performed by the cursor are configured
+     * to obey read committed isolation.w
+     *
+     * @see LockMode#READ_COMMITTED
+     */
+    public boolean getReadCommitted() {
+        return readCommitted;
+    }
+
+    /**
+     * Internal method used by Cursor to create a copy of the application
+     * supplied configuration. Done this way to provide non-public cloning.
+     */
+    CursorConfig cloneConfig() {
+        try {
+            return (CursorConfig) super.clone();
+        } catch (CloneNotSupportedException willNeverOccur) {
+            return null;
+        }
+    }
+
+    /**
+     * Returns the values for each configuration attribute.
+     *
+     * @return the values for each configuration attribute.
+     */
+    @Override
+    public String toString() {
+        return "readUncommitted=" + readUncommitted +
+            "\nreadCommitted=" + readCommitted +
+            "\n";
+    }
+}
diff --git a/src/com/sleepycat/je/Database.java b/src/com/sleepycat/je/Database.java
new file mode 100644
index 0000000000000000000000000000000000000000..433b76c26251815b96d06618b3f13f65762f1554
--- /dev/null
+++ b/src/com/sleepycat/je/Database.java
@@ -0,0 +1,1742 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Database.java,v 1.242.2.3 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.List;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.GetMode;
+import com.sleepycat.je.dbi.PutMode;
+import com.sleepycat.je.dbi.CursorImpl.SearchMode;
+import com.sleepycat.je.txn.Locker;
+import com.sleepycat.je.txn.LockerFactory;
+import com.sleepycat.je.utilint.DatabaseUtil;
+import com.sleepycat.je.utilint.TinyHashSet;
+
+/**
+ * A database handle.
+ *
+ * <p>Database attributes are specified in the {@link
+ * com.sleepycat.je.DatabaseConfig DatabaseConfig} class. Database handles are
+ * free-threaded and may be used concurrently by multiple threads.</p>
+ *
+ * <p>To open an existing database with default attributes:</p>
+ *
+ * <blockquote><pre>
+ *     Environment env = new Environment(home, null);
+ *     Database myDatabase = env.openDatabase(null, "mydatabase", null);
+ * </pre></blockquote>
+ *
+ * <p>To create a transactional database that supports duplicates:</p>
+ *
+ * <blockquote><pre>
+ *     DatabaseConfig dbConfig = new DatabaseConfig();
+ *     dbConfig.setTransactional(true);
+ *     dbConfig.setAllowCreate(true);
+ *     dbConfig.setSortedDuplicates(true);
+ *     Database newlyCreateDb = env.openDatabase(txn, "mydatabase", dbConfig);
+ * </pre></blockquote>
+ */
+public class Database {
+
+    /*
+     * DbState embodies the Database handle state.
+     */
+    static class DbState {
+        private String stateName;
+
+        DbState(String stateName) {
+            this.stateName = stateName;
+        }
+
+        @Override
+        public String toString() {
+            return "DbState." + stateName;
+        }
+    }
+
+    static DbState OPEN = new DbState("OPEN");
+    static DbState CLOSED = new DbState("CLOSED");
+    static DbState INVALID = new DbState("INVALID");
+
+    /* The current state of the handle. */
+    private volatile DbState state;
+
+    /* Handles onto the owning environment and the databaseImpl object. */
+    Environment envHandle;            // used by subclasses
+    private DatabaseImpl databaseImpl;
+
+    DatabaseConfig configuration;     // properties used at execution
+
+    /* True if this handle permits write operations; */
+    private boolean isWritable;
+
+    /* Transaction that owns the db lock held while the Database is open. */
+    Locker handleLocker;
+
+    /* Set of cursors open against this db handle. */
+    private TinyHashSet<Cursor> cursors = new TinyHashSet<Cursor>();
+
+    /*
+     * DatabaseTrigger list.  The list is null if empty, and is checked for
+     * null to avoiding read locking overhead when no triggers are present.
+     * Access to this list is protected by the shared trigger latch in
+     * EnvironmentImpl.
+     */
+    private List<DatabaseTrigger> triggerList;
+
+    private Logger logger;
+
+    /**
+     * Creates a database but does not open or fully initialize it.  Is
+     * protected for use in compat package.
+     * @param env
+     */
+    protected Database(Environment env) {
+        this.envHandle = env;
+        handleLocker = null;
+        logger = envHandle.getEnvironmentImpl().getLogger();
+    }
+
+    /**
+     * Creates a database, called by Environment.
+     */
+    void initNew(Environment env,
+                 Locker locker,
+                 String databaseName,
+                 DatabaseConfig dbConfig)
+        throws DatabaseException {
+
+        dbConfig.validateForNewDb();
+
+        init(env, dbConfig);
+
+        /* Make the databaseImpl. */
+        EnvironmentImpl environmentImpl =
+            DbInternal.envGetEnvironmentImpl(envHandle);
+        databaseImpl = environmentImpl.getDbTree().createDb
+	    (locker, databaseName, dbConfig, this);
+        databaseImpl.addReferringHandle(this);
+
+        /*
+         * Copy the replicated setting into the cloned handle configuration.
+         */
+        configuration.setReplicated(databaseImpl.isReplicated());
+    }
+
+    /**
+     * Opens a database, called by Environment.
+     */
+    void initExisting(Environment env,
+                      Locker locker,
+                      DatabaseImpl databaseImpl,
+                      DatabaseConfig dbConfig)
+        throws DatabaseException {
+
+        /*
+         * Make sure the configuration used for the open is compatible with the
+         * existing databaseImpl.
+         */
+        validateConfigAgainstExistingDb(dbConfig, databaseImpl);
+
+        init(env, dbConfig);
+        this.databaseImpl = databaseImpl;
+        databaseImpl.addReferringHandle(this);
+
+        /*
+         * Copy the duplicates, transactional and replicated properties of the
+         * underlying database, in case the useExistingConfig property is set.
+         */
+        configuration.setSortedDuplicates(databaseImpl.getSortedDuplicates());
+        configuration.setTransactional(databaseImpl.isTransactional());
+        configuration.setReplicated(databaseImpl.isReplicated());
+    }
+
+    private void init(Environment env,
+                      DatabaseConfig config)
+        throws DatabaseException {
+
+        handleLocker = null;
+
+        envHandle = env;
+        configuration = config.cloneConfig();
+        isWritable = !configuration.getReadOnly();
+        state = OPEN;
+    }
+
+    /**
+     * Sees if this new handle's configuration is compatible with the
+     * pre-existing database.
+     */
+    private void validateConfigAgainstExistingDb(DatabaseConfig config,
+                                                 DatabaseImpl databaseImpl)
+        throws DatabaseException {
+
+        /*
+         * The sortedDuplicates, temporary, and replicated properties are
+         * persistent and immutable.  But they do not need to be specified if
+         * the useExistingConfig property is set.
+         */
+        if (!config.getUseExistingConfig()) {
+            validatePropertyMatches
+                ("sortedDuplicates", databaseImpl.getSortedDuplicates(),
+                 config.getSortedDuplicates());
+            validatePropertyMatches
+                ("temporary", databaseImpl.isTemporary(),
+                 config.getTemporary());
+            /* Only check replicated if the environment is replicated. */
+            if (envHandle.getEnvironmentImpl().isReplicated()) {
+                if (databaseImpl.unknownReplicated()) {
+                    throw new UnsupportedOperationException("Conversion " +
+                          "of standalone environments to replicated " +
+                          "environments isn't supported yet");
+                }
+                validatePropertyMatches
+                    ("replicated", databaseImpl.isReplicated(),
+                     DbInternal.getDbConfigReplicated(config));
+            }
+        }
+
+        /*
+         * The transactional and deferredWrite properties are kept constant
+         * while any handles are open, and set when the first handle is opened.
+         * But if an existing handle is open and the useExistingConfig property
+         * is set, then they do not need to be specified.
+         */
+        if (databaseImpl.hasOpenHandles()) {
+            if (!config.getUseExistingConfig()) {
+                validatePropertyMatches
+                    ("transactional", databaseImpl.isTransactional(),
+                     config.getTransactional());
+                validatePropertyMatches
+                    ("deferredWrite", databaseImpl.isDurableDeferredWrite(),
+                     config.getDeferredWrite());
+            }
+        } else {
+            databaseImpl.setTransactional(config.getTransactional());
+            databaseImpl.setDeferredWrite(config.getDeferredWrite());
+        }
+
+        /*
+         * Only re-set the comparators if the override is allowed.
+         */
+	boolean dbImplModified = false;
+        if (config.getOverrideBtreeComparator()) {
+	    dbImplModified |= databaseImpl.setBtreeComparator
+                (config.getBtreeComparator(),
+                 config.getBtreeComparatorByClassName());
+        }
+
+        if (config.getOverrideDuplicateComparator()) {
+            dbImplModified |= databaseImpl.setDuplicateComparator
+                (config.getDuplicateComparator(),
+                 config.getDuplicateComparatorByClassName());
+        }
+
+        boolean newKeyPrefixing = config.getKeyPrefixing();
+        if (newKeyPrefixing != databaseImpl.getKeyPrefixing()) {
+            dbImplModified = true;
+            if (newKeyPrefixing) {
+                databaseImpl.setKeyPrefixing();
+            } else {
+                databaseImpl.clearKeyPrefixing();
+            }
+        }
+
+	/* [#15743] */
+	if (dbImplModified) {
+	    EnvironmentImpl envImpl = envHandle.getEnvironmentImpl();
+
+	    /* Dirty the root. */
+	    envImpl.getDbTree().modifyDbRoot(databaseImpl);
+	}
+    }
+
+    private void validatePropertyMatches(String propName,
+                                         boolean existingValue,
+                                         boolean newValue)
+        throws IllegalArgumentException {
+
+        if (newValue != existingValue) {
+            throw new IllegalArgumentException
+                ("You can't open a Database with a " + propName +
+                 " configuration of " + newValue +
+                 " if the underlying database was created with a " +
+                 propName + " setting of " + existingValue + '.');
+        }
+    }
+
+    /**
+     * Discards the database handle.
+     * <p>
+     * When closing the last open handle for a deferred-write database, any
+     * cached database information is flushed to disk as if {@link #sync} were
+     * called.
+     * <p>
+     * The database handle should not be closed while any other handle that
+     * refers to it is not yet closed; for example, database handles should not
+     * be closed while cursor handles into the database remain open, or
+     * transactions that include operations on the database have not yet been
+     * committed or aborted.  Specifically, this includes {@link
+     * com.sleepycat.je.Cursor Cursor} and {@link com.sleepycat.je.Transaction
+     * Transaction} handles.
+     * <p>
+     * When multiple threads are using the {@link com.sleepycat.je.Database
+     * Database} handle concurrently, only a single thread may call this
+     * method.
+     * <p>
+     * The database handle may not be accessed again after this method is
+     * called, regardless of the method's success or failure.
+     * <p>
+     * When called on a database that is the primary database for a secondary
+     * index, the primary database should be closed only after all secondary
+     * indices which reference it have been closed.
+     *
+     * @see DatabaseConfig#setDeferredWrite DatabaseConfig.setDeferredWrite
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public void close()
+        throws DatabaseException {
+
+        try {
+            closeInternal(true /* doSyncDw */);
+        } catch (Error E) {
+            DbInternal.envGetEnvironmentImpl(envHandle).invalidate(E);
+            throw E;
+        }
+    }
+
+    /*
+     * This method is private for now because it is incomplete.  To fully
+     * implement it we must clear all dirty nodes for the database that is
+     * closed, since otherwise they will be flushed during the next checkpoint.
+     */
+    @SuppressWarnings("unused")
+    private void closeNoSync()
+        throws DatabaseException {
+
+        try {
+            closeInternal(false /* doSyncDw */);
+        } catch (Error E) {
+            DbInternal.envGetEnvironmentImpl(envHandle).invalidate(E);
+            throw E;
+        }
+    }
+
+    private void closeInternal(boolean doSyncDw)
+        throws DatabaseException {
+
+        StringBuffer errors = null;
+        DatabaseImpl dbClosed = null;
+
+        synchronized (this) {
+            checkEnv();
+            checkProhibitedDbState(CLOSED, "Can't close Database:");
+
+            trace(Level.FINEST, "Database.close: ", null, null);
+
+            /* Disassociate triggers before closing. */
+            removeAllTriggers();
+
+            envHandle.removeReferringHandle(this);
+            if (cursors.size() > 0) {
+                errors = new StringBuffer
+                    ("There are open cursors against the database.\n");
+                errors.append("They will be closed.\n");
+
+                /*
+                 * Copy the cursors set before iterating since the dbc.close()
+                 * mutates the set.
+                 */
+                Iterator<Cursor> iter = cursors.copy().iterator();
+                while (iter.hasNext()) {
+                    Cursor dbc = iter.next();
+
+                    try {
+                        dbc.close();
+                    } catch (DatabaseException DBE) {
+                        errors.append("Exception while closing cursors:\n");
+                        errors.append(DBE.toString());
+                    }
+                }
+            }
+
+            if (databaseImpl != null) {
+                dbClosed = databaseImpl;
+                databaseImpl.removeReferringHandle(this);
+                envHandle.getEnvironmentImpl().
+                    getDbTree().releaseDb(databaseImpl);
+                databaseImpl = null;
+
+                /*
+                 * Tell our protecting txn that we're closing. If this type of
+                 * transaction doesn't live beyond the life of the handle, it
+                 * will release the db handle lock.
+                 */
+                handleLocker.setHandleLockOwner(true, this, true);
+                handleLocker.operationEnd(true);
+                state = CLOSED;
+            }
+        }
+
+        /*
+         * Notify the database when a handle is closed.  This should not be
+         * done while synchronized since it may perform database removal or
+         * sync.  Statements above are synchronized obove but this section must
+         * not be.
+         */
+        if (dbClosed != null) {
+            dbClosed.handleClosed(doSyncDw);
+        }
+
+        if (errors != null) {
+            throw new DatabaseException(errors.toString());
+        }
+    }
+
+    /**
+     * Flushes any cached information for this database to disk; only
+     * applicable for deferred-write databases.
+     * <p> Note that deferred-write databases are automatically flushed to disk
+     * when the {@link #close} method is called.
+     *
+     * @see DatabaseConfig#setDeferredWrite DatabaseConfig.setDeferredWrite
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public void sync()
+        throws DatabaseException {
+
+        checkEnv();
+        checkRequiredDbState(OPEN, "Can't call Database.sync:");
+        checkWritable("sync");
+        trace(Level.FINEST, "Database.sync", null, null, null, null);
+
+        databaseImpl.sync(true);
+    }
+
+    /**
+     * Opens a sequence in the database.
+     *
+     * @param txn For a transactional database, an explicit transaction may
+     * be specified, or null may be specified to use auto-commit.  For a
+     * non-transactional database, null must be specified.
+     *
+     * @param key The key {@link com.sleepycat.je.DatabaseEntry
+     * DatabaseEntry} of the sequence.
+     *
+     * @param config The sequence attributes.  If null, default
+     * attributes are used.
+     *
+     * @return A sequence handle.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public Sequence openSequence(Transaction txn,
+                                 DatabaseEntry key,
+                                 SequenceConfig config)
+        throws DatabaseException {
+
+        try {
+            checkEnv();
+            DatabaseUtil.checkForNullDbt(key, "key", true);
+            checkRequiredDbState(OPEN, "Can't call Database.openSequence:");
+            checkWritable("openSequence");
+            trace(Level.FINEST, "Database.openSequence", txn, key, null, null);
+
+            return new Sequence(this, txn, key, config);
+        } catch (Error E) {
+            DbInternal.envGetEnvironmentImpl(envHandle).invalidate(E);
+            throw E;
+        }
+    }
+
+    /**
+     * Removes the sequence from the database.  This method should not be
+     * called if there are open handles on this sequence.
+     *
+     * @param txn For a transactional database, an explicit transaction may be
+     * specified, or null may be specified to use auto-commit.  For a
+     * non-transactional database, null must be specified.
+     *
+     * @param key The key {@link com.sleepycat.je.DatabaseEntry
+     * DatabaseEntry} of the sequence.
+     * 
+     * @throws DatabaseException
+     */
+    public void removeSequence(Transaction txn, DatabaseEntry key)
+        throws DatabaseException {
+
+        try {
+            delete(txn, key);
+        } catch (Error E) {
+            DbInternal.envGetEnvironmentImpl(envHandle).invalidate(E);
+            throw E;
+        }
+    }
+
+    /**
+     * Returns a cursor into the database.
+     *
+     * @param txn To use a cursor for writing to a transactional database, an
+     * explicit transaction must be specified.  For read-only access to a
+     * transactional database, the transaction may be null.  For a
+     * non-transactional database, the transaction must be null.
+     *
+     * <p>To transaction-protect cursor operations, cursors must be opened and
+     * closed within the context of a transaction, and the txn parameter
+     * specifies the transaction context in which the cursor will be used.</p>
+     *
+     * @param cursorConfig The cursor attributes.  If null, default
+     * attributes are used.
+     *
+     * @return A database cursor.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public synchronized Cursor openCursor(Transaction txn,
+                                          CursorConfig cursorConfig)
+        throws DatabaseException {
+
+        try {
+            checkEnv();
+            checkRequiredDbState(OPEN, "Can't open a cursor");
+            CursorConfig useConfig =
+                (cursorConfig == null) ? CursorConfig.DEFAULT : cursorConfig;
+
+            if (useConfig.getReadUncommitted() &&
+                useConfig.getReadCommitted()) {
+                throw new IllegalArgumentException
+                    ("Only one may be specified: " +
+                     "ReadCommitted or ReadUncommitted");
+            }
+
+            trace(Level.FINEST, "Database.openCursor", txn, cursorConfig);
+            Cursor ret = newDbcInstance(txn, useConfig);
+
+            return ret;
+        } catch (Error E) {
+            DbInternal.envGetEnvironmentImpl(envHandle).invalidate(E);
+            throw E;
+        }
+    }
+
+    /**
+     * Is overridden by SecondaryDatabase.
+     */
+    Cursor newDbcInstance(Transaction txn,
+                          CursorConfig cursorConfig)
+        throws DatabaseException {
+
+        return new Cursor(this, txn, cursorConfig);
+    }
+
+    /**
+     * Removes key/data pairs from the database.
+     *
+     * <p>The key/data pair associated with the specified key is discarded
+     * from the database.  In the presence of duplicate key values, all
+     * records associated with the designated key will be discarded.</p>
+     *
+     * <p>The key/data pair is also deleted from any associated secondary
+     * databases.</p>
+     *
+     * @param txn For a transactional database, an explicit transaction may
+     * be specified, or null may be specified to use auto-commit.  For a
+     * non-transactional database, null must be specified.
+     *
+     * @param key the key {@link com.sleepycat.je.DatabaseEntry DatabaseEntry}
+     * operated on.
+     *
+     * @return The method will return {@link
+     * com.sleepycat.je.OperationStatus#NOTFOUND OperationStatus.NOTFOUND} if
+     * the specified key is not found in the database; otherwise the method
+     * will return {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public OperationStatus delete(Transaction txn, DatabaseEntry key)
+        throws DatabaseException {
+
+        try {
+            checkEnv();
+            DatabaseUtil.checkForNullDbt(key, "key", true);
+            checkRequiredDbState(OPEN, "Can't call Database.delete:");
+            checkWritable("delete");
+            trace(Level.FINEST, "Database.delete", txn, key, null, null);
+
+            OperationStatus commitStatus = OperationStatus.NOTFOUND;
+            Locker locker = null;
+            try {
+                locker = LockerFactory.getWritableLocker
+                    (envHandle, txn, isTransactional(),
+                     databaseImpl.isReplicated()); // autoTxnIsReplicated
+                commitStatus = deleteInternal(locker, key, null);
+                return commitStatus;
+            } finally {
+                if (locker != null) {
+                    locker.operationEnd(commitStatus);
+                }
+            }
+        } catch (Error E) {
+            DbInternal.envGetEnvironmentImpl(envHandle).invalidate(E);
+            throw E;
+        }
+    }
+
+    /*
+     * This is commented out until we agree on whether this should even be in
+     * the API.  See [14264].
+    private OperationStatus delete(Transaction txn,
+                                   DatabaseEntry key,
+                                   DatabaseEntry data)
+        throws DatabaseException {
+
+        try {
+            checkEnv();
+            DatabaseUtil.checkForNullDbt(key, "key", true);
+            DatabaseUtil.checkForNullDbt(data, "data", true);
+            checkRequiredDbState(OPEN, "Can't call Database.delete:");
+            checkWritable("delete");
+            trace(Level.FINEST, "Database.delete", txn, key, data, null);
+
+            OperationStatus commitStatus = OperationStatus.NOTFOUND;
+            Locker locker = null;
+            try {
+                locker = LockerFactory.getWritableLocker
+                    (envHandle, txn, isTransactional());
+                commitStatus = deleteInternal(locker, key, data);
+                return commitStatus;
+            } finally {
+                if (locker != null) {
+                    locker.operationEnd(commitStatus);
+                }
+            }
+        } catch (Error E) {
+            DbInternal.envGetEnvironmentImpl(envHandle).invalidate(E);
+            throw E;
+        }
+    }
+    */
+
+    /**
+     * Internal version of delete() that does no parameter checking.  Notify
+     * triggers.  Deletes all duplicates.
+     */
+    OperationStatus deleteInternal(Locker locker,
+                                   DatabaseEntry key,
+                                   DatabaseEntry data)
+        throws DatabaseException {
+
+        Cursor cursor = null;
+        try {
+            cursor = new Cursor(this, locker, null);
+            cursor.setNonCloning(true);
+            OperationStatus commitStatus = OperationStatus.NOTFOUND;
+
+            /* Position a cursor at the specified data record. */
+            DatabaseEntry oldData;
+            OperationStatus searchStatus;
+            if (data == null) {
+                oldData = new DatabaseEntry();
+                searchStatus =
+                    cursor.search(key, oldData, LockMode.RMW, SearchMode.SET);
+            } else {
+                oldData = data;
+                searchStatus =
+                    cursor.search(key, oldData, LockMode.RMW, SearchMode.BOTH);
+            }
+
+            /* Delete all records with that key. */
+            if (searchStatus == OperationStatus.SUCCESS) {
+                do {
+
+                    /*
+                     * Notify triggers before the actual deletion so that a
+                     * primary record never exists while secondary keys refer
+                     * to it.  This is relied on by secondary read-uncommitted.
+                     */
+                    if (hasTriggers()) {
+                        notifyTriggers(locker, key, oldData, null);
+                    }
+                    /* The actual deletion. */
+                    commitStatus = cursor.deleteNoNotify();
+                    if (commitStatus != OperationStatus.SUCCESS) {
+                        return commitStatus;
+                    }
+
+                    if (data != null) {
+                        /* delete(key, data) called so only delete one item. */
+                        break;
+                    }
+
+                    /* Get another duplicate. */
+                    if (databaseImpl.getSortedDuplicates()) {
+                        searchStatus =
+                            cursor.retrieveNext(key, oldData,
+                                                LockMode.RMW,
+                                                GetMode.NEXT_DUP);
+                    } else {
+                        searchStatus = OperationStatus.NOTFOUND;
+                    }
+                } while (searchStatus == OperationStatus.SUCCESS);
+                commitStatus = OperationStatus.SUCCESS;
+            }
+            return commitStatus;
+        } finally {
+            if (cursor != null) {
+                cursor.close();
+            }
+        }
+    }
+
+    /**
+     * Retrieves the key/data pair with the given key.  If the matching key has
+     * duplicate values, the first data item in the set of duplicates is
+     * returned. Retrieval of duplicates requires the use of {@link Cursor}
+     * operations.
+     *
+     * @param txn For a transactional database, an explicit transaction may be
+     * specified to transaction-protect the operation, or null may be specified
+     * to perform the operation without transaction protection.  For a
+     * non-transactional database, null must be specified.
+     *
+     * @param key the key used as input.  It must be initialized with a
+     * non-null byte array by the caller.
+     *
+     * @param data the data returned as output.  Its byte array does not need
+     * to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     *
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public OperationStatus get(Transaction txn,
+                               DatabaseEntry key,
+                               DatabaseEntry data,
+                               LockMode lockMode)
+        throws DatabaseException {
+
+        try {
+            checkEnv();
+            DatabaseUtil.checkForNullDbt(key, "key", true);
+            DatabaseUtil.checkForNullDbt(data, "data", false);
+            checkRequiredDbState(OPEN, "Can't call Database.get:");
+            trace(Level.FINEST, "Database.get", txn, key, null, lockMode);
+
+            CursorConfig cursorConfig = CursorConfig.DEFAULT;
+            if (lockMode == LockMode.READ_COMMITTED) {
+                cursorConfig = CursorConfig.READ_COMMITTED;
+                lockMode = null;
+            }
+
+            Cursor cursor = null;
+            try {
+                cursor = new Cursor(this, txn, cursorConfig);
+                cursor.setNonCloning(true);
+                return cursor.search(key, data, lockMode, SearchMode.SET);
+            } finally {
+                if (cursor != null) {
+                    cursor.close();
+                }
+            }
+        } catch (Error E) {
+            DbInternal.envGetEnvironmentImpl(envHandle).invalidate(E);
+            throw E;
+        }
+    }
+
+    /**
+     * Retrieves the key/data pair with the given key and data value, that is,
+     * both the key and data items must match.
+     *
+     * @param txn For a transactional database, an explicit transaction may be
+     * specified to transaction-protect the operation, or null may be specified
+     * to perform the operation without transaction protection.  For a
+     * non-transactional database, null must be specified.
+     *
+     * @param key the keyused as input.  It must be initialized with a non-null
+     * byte array by the caller.
+     *
+     * @param data the dataused as input.  It must be initialized with a
+     * non-null byte array by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public OperationStatus getSearchBoth(Transaction txn,
+                                         DatabaseEntry key,
+                                         DatabaseEntry data,
+                                         LockMode lockMode)
+        throws DatabaseException {
+
+        try {
+            checkEnv();
+            DatabaseUtil.checkForNullDbt(key, "key", true);
+            DatabaseUtil.checkForNullDbt(data, "data", true);
+            checkRequiredDbState(OPEN, "Can't call Database.getSearchBoth:");
+            trace(Level.FINEST, "Database.getSearchBoth", txn, key, data,
+                  lockMode);
+
+            CursorConfig cursorConfig = CursorConfig.DEFAULT;
+            if (lockMode == LockMode.READ_COMMITTED) {
+                cursorConfig = CursorConfig.READ_COMMITTED;
+                lockMode = null;
+            }
+
+            Cursor cursor = null;
+            try {
+                cursor = new Cursor(this, txn, cursorConfig);
+                cursor.setNonCloning(true);
+                return cursor.search(key, data, lockMode, SearchMode.BOTH);
+            } finally {
+                if (cursor != null) {
+                    cursor.close();
+                }
+            }
+        } catch (Error E) {
+            DbInternal.envGetEnvironmentImpl(envHandle).invalidate(E);
+            throw E;
+        }
+    }
+
+    /**
+     * Stores the key/data pair into the database.
+     *
+     * <p>If the key already appears in the database and duplicates are not
+     * configured, the data associated with the key will be replaced.  If the
+     * key already appears in the database and sorted duplicates are
+     * configured, the new data value is inserted at the correct sorted
+     * location.</p>
+     *
+     * @param txn For a transactional database, an explicit transaction may be
+     * specified, or null may be specified to use auto-commit.  For a
+     * non-transactional database, null must be specified.
+     *
+     * @param key the key {@link com.sleepycat.je.DatabaseEntry DatabaseEntry}
+     * operated on.
+     *
+     * @param data the data {@link com.sleepycat.je.DatabaseEntry
+     * DatabaseEntry} stored.
+     * 
+     * @return {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS} if the operation succeeds.
+
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public OperationStatus put(Transaction txn,
+                               DatabaseEntry key,
+                               DatabaseEntry data)
+        throws DatabaseException {
+
+        checkEnv();
+        DatabaseUtil.checkForNullDbt(key, "key", true);
+        DatabaseUtil.checkForNullDbt(data, "data", true);
+        DatabaseUtil.checkForPartialKey(key);
+        checkRequiredDbState(OPEN, "Can't call Database.put");
+        checkWritable("put");
+        trace(Level.FINEST, "Database.put", txn, key, data, null);
+
+        return putInternal(txn, key, data, PutMode.OVERWRITE);
+    }
+
+    /**
+     * Stores the key/data pair into the database if the key does not already
+     * appear in the database.
+     *
+     * <p>This method will return {@link
+     * com.sleepycat.je.OperationStatus#KEYEXIST OpeationStatus.KEYEXIST} if
+     * the key already exists in the database, even if the database supports
+     * duplicates.</p>
+     *
+     * @param txn For a transactional database, an explicit transaction may be
+     * specified, or null may be specified to use auto-commit.  For a
+     * non-transactional database, null must be specified.
+     *
+     * @param key the key {@link com.sleepycat.je.DatabaseEntry DatabaseEntry}
+     * operated on.
+     *
+     * @param data the data {@link com.sleepycat.je.DatabaseEntry
+     * DatabaseEntry} stored.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#KEYEXIST
+     * OperationStatus.KEYEXIST} if the key already appears in the database, 
+     * else {@link com.sleepycat.je.OperationStatus#SUCCESS 
+     * OperationStatus.SUCCESS}
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws DatabaseException if any other failure occurs.
+     */
+    public OperationStatus putNoOverwrite(Transaction txn,
+                                          DatabaseEntry key,
+                                          DatabaseEntry data)
+        throws DatabaseException {
+
+        checkEnv();
+        DatabaseUtil.checkForNullDbt(key, "key", true);
+        DatabaseUtil.checkForNullDbt(data, "data", true);
+        DatabaseUtil.checkForPartialKey(key);
+        checkRequiredDbState(OPEN, "Can't call Database.putNoOverWrite");
+        checkWritable("putNoOverwrite");
+        trace(Level.FINEST, "Database.putNoOverwrite", txn, key, data, null);
+
+        return putInternal(txn, key, data, PutMode.NOOVERWRITE);
+    }
+
+    /**
+     * Stores the key/data pair into the database if it does not already appear
+     * in the database.
+     *
+     * <p>This method may only be called if the underlying database has been
+     * configured to support sorted duplicates.</p>
+     *
+     * @param txn For a transactional database, an explicit transaction may be
+     * specified, or null may be specified to use auto-commit.  For a
+     * non-transactional database, null must be specified.
+     *
+     * @param key the key {@link com.sleepycat.je.DatabaseEntry DatabaseEntry}
+     * operated on.
+     *
+     * @param data the data {@link com.sleepycat.je.DatabaseEntry
+     * DatabaseEntry} stored.
+     *
+     * @return true if the key/data pair already appears in the database, this
+     * method will return {@link com.sleepycat.je.OperationStatus#KEYEXIST
+     * OperationStatus.KEYEXIST}.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public OperationStatus putNoDupData(Transaction txn,
+                                        DatabaseEntry key,
+                                        DatabaseEntry data)
+        throws DatabaseException {
+
+        checkEnv();
+        DatabaseUtil.checkForNullDbt(key, "key", true);
+        DatabaseUtil.checkForNullDbt(data, "data", true);
+        DatabaseUtil.checkForPartialKey(key);
+        checkRequiredDbState(OPEN, "Can't call Database.putNoDupData");
+        checkWritable("putNoDupData");
+        trace(Level.FINEST, "Database.putNoDupData", txn, key, data, null);
+
+        return putInternal(txn, key, data, PutMode.NODUP);
+    }
+
+    /**
+     * Internal version of put() that does no parameter checking.
+     */
+    OperationStatus putInternal(Transaction txn,
+                                DatabaseEntry key,
+                                DatabaseEntry data,
+                                PutMode putMode)
+        throws DatabaseException {
+
+        try {
+            Locker locker = null;
+            Cursor cursor = null;
+            OperationStatus commitStatus = OperationStatus.KEYEXIST;
+            try {
+                locker = LockerFactory.getWritableLocker
+                    (envHandle, txn, isTransactional(),
+                     databaseImpl.isReplicated()); // autoTxnIsReplicated
+
+                cursor = new Cursor(this, locker, null);
+                cursor.setNonCloning(true);
+                commitStatus = cursor.putInternal(key, data, putMode);
+                return commitStatus;
+            } finally {
+                if (cursor != null) {
+                    cursor.close();
+                }
+                if (locker != null) {
+                    locker.operationEnd(commitStatus);
+                }
+            }
+        } catch (Error E) {
+            DbInternal.envGetEnvironmentImpl(envHandle).invalidate(E);
+            throw E;
+        }
+    }
+
+    /**
+     * Creates a specialized join cursor for use in performing equality or
+     * natural joins on secondary indices.
+     *
+     * <p>Each cursor in the <code>cursors</code> array must have been
+     * initialized to refer to the key on which the underlying database should
+     * be joined.  Typically, this initialization is done by calling {@link
+     * Cursor#getSearchKey Cursor.getSearchKey}.</p>
+     *
+     * <p>Once the cursors have been passed to this method, they should not be
+     * accessed or modified until the newly created join cursor has been
+     * closed, or else inconsistent results may be returned.  However, the
+     * position of the cursors will not be changed by this method or by the
+     * methods of the join cursor.</p>
+     *
+     * @param cursors an array of cursors associated with this primary
+     * database.
+     *
+     * @param config The join attributes.  If null, default attributes are
+     * used.
+     *
+     * @return a specialized cursor that returns the results of the equality
+     * join operation.
+     *
+     * @throws DatabaseException if a failure occurs. @see JoinCursor
+     */
+    public JoinCursor join(Cursor[] cursors, JoinConfig config)
+        throws DatabaseException {
+
+        try {
+            checkEnv();
+            checkRequiredDbState(OPEN, "Can't call Database.join");
+            DatabaseUtil.checkForNullParam(cursors, "cursors");
+            if (cursors.length == 0) {
+                throw new IllegalArgumentException
+                    ("At least one cursor is required.");
+            }
+
+            /*
+             * Check that all cursors use the same locker, if any cursor is
+             * transactional.  And if non-transactional, that all databases are
+             * in the same environment.
+             */
+            Locker locker = cursors[0].getCursorImpl().getLocker();
+            if (!locker.isTransactional()) {
+                EnvironmentImpl env = envHandle.getEnvironmentImpl();
+                for (int i = 1; i < cursors.length; i += 1) {
+                    Locker locker2 = cursors[i].getCursorImpl().getLocker();
+                    if (locker2.isTransactional()) {
+                        throw new IllegalArgumentException
+                            ("All cursors must use the same transaction.");
+                    }
+                    EnvironmentImpl env2 = cursors[i].getDatabaseImpl()
+                        .getDbEnvironment();
+                    if (env != env2) {
+                        throw new IllegalArgumentException
+                            ("All cursors must use the same environment.");
+                    }
+                }
+                locker = null; /* Don't reuse a non-transactional locker. */
+            } else {
+                for (int i = 1; i < cursors.length; i += 1) {
+                    Locker locker2 = cursors[i].getCursorImpl().getLocker();
+                    if (locker.getTxnLocker() != locker2.getTxnLocker()) {
+                        throw new IllegalArgumentException
+                            ("All cursors must use the same transaction.");
+                    }
+                }
+            }
+
+            /* Create the join cursor. */
+            return new JoinCursor(locker, this, cursors, config);
+        } catch (Error E) {
+            DbInternal.envGetEnvironmentImpl(envHandle).invalidate(E);
+            throw E;
+        }
+    }
+
+    /**
+     * Preloads the cache.  This method should only be called when there are no
+     * operations being performed on the database in other threads.  Executing
+     * preload during concurrent updates may result in some or all of the tree
+     * being loaded into the JE cache.  Executing preload during any other
+     * types of operations may result in JE exceeding its allocated cache
+     * size. preload() effectively locks the entire database and therefore will
+     * lock out the checkpointer, cleaner, and compressor, as well as not allow
+     * eviction to occur.
+     *
+     * @deprecated As of JE 2.0.83, replaced by {@link
+     * Database#preload(PreloadConfig)}.</p>
+     *
+     * @param maxBytes The maximum number of bytes to load.  If maxBytes is 0,
+     * je.evictor.maxMemory is used.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public void preload(long maxBytes)
+        throws DatabaseException {
+
+        checkEnv();
+        checkRequiredDbState(OPEN, "Can't call Database.preload");
+        databaseImpl.checkIsDeleted("preload");
+
+        PreloadConfig config = new PreloadConfig();
+        config.setMaxBytes(maxBytes);
+        databaseImpl.preload(config);
+    }
+
+    /**
+     * Preloads the cache.  This method should only be called when there are no
+     * operations being performed on the database in other threads.  Executing
+     * preload during concurrent updates may result in some or all of the tree
+     * being loaded into the JE cache.  Executing preload during any other
+     * types of operations may result in JE exceeding its allocated cache
+     * size. preload() effectively locks the entire database and therefore will
+     * lock out the checkpointer, cleaner, and compressor, as well as not allow
+     * eviction to occur.
+     *
+     * @deprecated As of JE 2.0.101, replaced by {@link
+     * Database#preload(PreloadConfig)}.</p>
+     *
+     * @param maxBytes The maximum number of bytes to load.  If maxBytes is 0,
+     * je.evictor.maxMemory is used.
+     *
+     * @param maxMillisecs The maximum time in milliseconds to use when
+     * preloading.  Preloading stops once this limit has been reached.  If
+     * maxMillisecs is 0, preloading can go on indefinitely or until maxBytes
+     * (if non-0) is reached.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public void preload(long maxBytes, long maxMillisecs)
+        throws DatabaseException {
+
+        checkEnv();
+        checkRequiredDbState(OPEN, "Can't call Database.preload");
+        databaseImpl.checkIsDeleted("preload");
+
+        PreloadConfig config = new PreloadConfig();
+        config.setMaxBytes(maxBytes);
+        config.setMaxMillisecs(maxMillisecs);
+        databaseImpl.preload(config);
+    }
+
+    /**
+     * Preloads the cache.  This method should only be called when there are no
+     * operations being performed on the database in other threads.  Executing
+     * preload during concurrent updates may result in some or all of the tree
+     * being loaded into the JE cache.  Executing preload during any other
+     * types of operations may result in JE exceeding its allocated cache
+     * size. preload() effectively locks the entire database and therefore will
+     * lock out the checkpointer, cleaner, and compressor, as well as not allow
+     * eviction to occur.
+     *
+     * @param config The PreloadConfig object that specifies the parameters
+     * of the preload.
+     *
+     * @return A PreloadStats object with various statistics about the
+     * preload() operation.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public PreloadStats preload(PreloadConfig config)
+        throws DatabaseException {
+
+        checkEnv();
+        checkRequiredDbState(OPEN, "Can't call Database.preload");
+        databaseImpl.checkIsDeleted("preload");
+
+        return databaseImpl.preload(config);
+    }
+
+    /**
+     * Counts the key/data pairs in the database. This operation is faster than
+     * obtaining a count from a cursor based scan of the database, and will not
+     * perturb the current contents of the cache. However, the count is not
+     * guaranteed to be accurate if there are concurrent updates.
+     *
+     * <p>A count of the key/data pairs in the database is returned without
+     * adding to the cache.  The count may not be accurate in the face of
+     * concurrent update operations in the database.</p>
+     *
+     * @return The count of key/data pairs in the database.
+     */
+    public long count()
+        throws DatabaseException {
+
+        checkEnv();
+        checkRequiredDbState(OPEN, "Can't call Database.count");
+        databaseImpl.checkIsDeleted("count");
+
+        return databaseImpl.count();
+    }
+
+    /**
+     * Returns database statistics.
+     *
+     * <p>If this method has not been configured to avoid expensive operations
+     * (using the {@link com.sleepycat.je.StatsConfig#setFast
+     * StatsConfig.setFast} method), it will access some of or all the pages in
+     * the database, incurring a severe performance penalty as well as possibly
+     * flushing the underlying cache.</p>
+     *
+     * <p>In the presence of multiple threads or processes accessing an active
+     * database, the information returned by this method may be
+     * out-of-date.</p>
+     *
+     * @param config The statistics returned; if null, default statistics are
+     * returned.
+     *
+     * @return Database statistics.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public DatabaseStats getStats(StatsConfig config)
+        throws DatabaseException {
+
+        checkEnv();
+        checkRequiredDbState(OPEN, "Can't call Database.stat");
+        StatsConfig useConfig =
+            (config == null) ? StatsConfig.DEFAULT : config;
+
+        if (databaseImpl != null) {
+            databaseImpl.checkIsDeleted("stat");
+            return databaseImpl.stat(useConfig);
+        }
+        return null;
+    }
+
+    /**
+     * Verifies the integrity of the database.
+     *
+     * <p>Verification is an expensive operation that should normally only be
+     * used for troubleshooting and debugging.</p>
+     *
+     * @param config Configures the verify operation; if null, the default
+     * operation is performed.
+     *
+     * @return Database statistics.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public DatabaseStats verify(VerifyConfig config)
+        throws DatabaseException {
+
+        try {
+            checkEnv();
+            checkRequiredDbState(OPEN, "Can't call Database.verify");
+            databaseImpl.checkIsDeleted("verify");
+            VerifyConfig useConfig =
+                (config == null) ? VerifyConfig.DEFAULT : config;
+
+            DatabaseStats stats = databaseImpl.getEmptyStats();
+            databaseImpl.verify(useConfig, stats);
+            return stats;
+        } catch (Error E) {
+            DbInternal.envGetEnvironmentImpl(envHandle).invalidate(E);
+            throw E;
+        }
+    }
+
+    /**
+     * Returns the database name.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @return The database name.
+     */
+    public String getDatabaseName()
+        throws DatabaseException {
+
+        try {
+            checkEnv();
+            if (databaseImpl != null) {
+                return databaseImpl.getName();
+            } else {
+                return null;
+            }
+        } catch (Error E) {
+            DbInternal.envGetEnvironmentImpl(envHandle).invalidate(E);
+            throw E;
+        }
+    }
+
+    /*
+     * Non-transactional database name, safe to access when creating error
+     * messages.
+     */
+    String getDebugName() {
+        if (databaseImpl != null) {
+            return databaseImpl.getDebugName();
+        } else {
+            return null;
+        }
+    }
+
+    /**
+     * Returns this Database object's configuration.
+     *
+     * <p>This may differ from the configuration used to open this object if
+     * the database existed previously.</p>
+     *
+     * @return This Database object's configuration.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public DatabaseConfig getConfig()
+        throws DatabaseException {
+
+        try {
+            DatabaseConfig showConfig = configuration.cloneConfig();
+
+            /*
+             * Set the comparators from the database impl, they might have
+             * changed from another handle.
+             */
+            Comparator<byte[]> btComp = null;
+            Comparator<byte[]> dupComp = null;
+            boolean btCompByClass = false;
+            boolean dupCompByClass = false;
+            if (databaseImpl != null) {
+                btComp = databaseImpl.getBtreeComparator();
+                dupComp = databaseImpl.getDuplicateComparator();
+                btCompByClass = databaseImpl.getBtreeComparatorByClass();
+                dupCompByClass = databaseImpl.getDuplicateComparatorByClass();
+            }
+            showConfig.setBtreeComparatorInternal(btComp, btCompByClass);
+            showConfig.setDuplicateComparatorInternal(dupComp, dupCompByClass);
+            return showConfig;
+        } catch (Error E) {
+            DbInternal.envGetEnvironmentImpl(envHandle).invalidate(E);
+            throw E;
+        }
+    }
+
+    /**
+     * Equivalent to getConfig().getTransactional() but cheaper.
+     */
+    boolean isTransactional()
+        throws DatabaseException {
+
+        return databaseImpl.isTransactional();
+    }
+
+    /**
+     * Returns the {@link com.sleepycat.je.Environment Environment} handle for
+     * the database environment underlying the {@link
+     * com.sleepycat.je.Database Database}.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @return The {@link com.sleepycat.je.Environment Environment} handle
+     * for the database environment underlying the {@link
+     * com.sleepycat.je.Database Database}.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public Environment getEnvironment()
+        throws DatabaseException {
+
+        return envHandle;
+    }
+
+    /**
+     * Returns a list of all {@link com.sleepycat.je.SecondaryDatabase
+     * SecondaryDatabase} objects associated with a primary database.
+     *
+     * <p>If no secondaries are associated or this is itself a secondary
+     * database, an empty list is returned.</p>
+     *
+     * @return A list of all {@link com.sleepycat.je.SecondaryDatabase
+     * SecondaryDatabase} objects associated with a primary database.
+     */
+    public List<SecondaryDatabase> getSecondaryDatabases()
+        throws DatabaseException {
+
+        try {
+            List<SecondaryDatabase> list = new ArrayList<SecondaryDatabase>();
+            if (hasTriggers()) {
+                acquireTriggerListReadLock();
+                try {
+                    for (int i = 0; i < triggerList.size(); i += 1) {
+                        DatabaseTrigger t = triggerList.get(i);
+                        if (t instanceof SecondaryTrigger) {
+                            list.add(((SecondaryTrigger) t).getDb());
+                        }
+                    }
+                } finally {
+                    releaseTriggerListReadLock();
+                }
+            }
+            return list;
+        } catch (Error E) {
+            DbInternal.envGetEnvironmentImpl(envHandle).invalidate(E);
+            throw E;
+        }
+    }
+
+    /*
+     * Helpers, not part of the public API
+     */
+
+    /**
+     * Returns true if the Database was opened read/write.
+     *
+     * @return true if the Database was opened read/write.
+     */
+    boolean isWritable() {
+        return isWritable;
+    }
+
+    /**
+     * Returns the databaseImpl object instance.
+     */
+    DatabaseImpl getDatabaseImpl() {
+        return databaseImpl;
+    }
+
+    /**
+     * The handleLocker is the one that holds the db handle lock.
+     */
+    void setHandleLocker(Locker locker) {
+        handleLocker = locker;
+    }
+
+    synchronized void removeCursor(Cursor dbc) {
+        cursors.remove(dbc);
+    }
+
+    synchronized void addCursor(Cursor dbc) {
+        cursors.add(dbc);
+    }
+
+    /**
+     * @throws DatabaseException if the Database state is not this value.
+     */
+    void checkRequiredDbState(DbState required, String msg)
+        throws DatabaseException {
+
+        if (state != required) {
+            throw new DatabaseException
+                (msg + " Database state can't be " + state +
+                 " must be " + required);
+        }
+    }
+
+    /**
+     * @throws DatabaseException if the Database state is this value.
+     */
+    void checkProhibitedDbState(DbState prohibited, String msg)
+        throws DatabaseException {
+
+        if (state == prohibited) {
+            throw new DatabaseException
+                (msg + " Database state must not be " + prohibited);
+        }
+    }
+
+    /**
+     * @throws RunRecoveryException if the underlying environment is
+     * invalid
+     */
+    void checkEnv()
+        throws RunRecoveryException {
+
+        EnvironmentImpl env = envHandle.getEnvironmentImpl();
+        if (env != null) {
+            env.checkIfInvalid();
+        }
+    }
+
+    /**
+     * Invalidates the handle, called by txn.abort by way of DbInternal.
+     *
+     * Note that this method (unlike close) does not call handleClosed, which
+     * performs sync and removal of DW DBs.  A DW DB cannot be transactional. 
+     */
+    synchronized void invalidate() {
+        state = INVALID;
+        envHandle.removeReferringHandle(this);
+        if (databaseImpl != null) {
+            databaseImpl.removeReferringHandle(this);
+            envHandle.getEnvironmentImpl().
+                getDbTree().releaseDb(databaseImpl);
+
+            /*
+             * Database.close may be called after an abort.  By setting the
+             * databaseImpl field to null we ensure that close won't call
+             * releaseDb or endOperation. [#13415]
+             */
+            databaseImpl = null;
+        }
+    }
+
+    /**
+     * Checks that write operations aren't used on a readonly Database.
+     */
+    private void checkWritable(String operation)
+        throws DatabaseException {
+
+        if (!isWritable) {
+            throw new UnsupportedOperationException
+                ("Database is Read Only: " + operation);
+        }
+    }
+
+    /**
+     * Sends trace messages to the java.util.logger. Don't rely on the logger
+     * alone to conditionalize whether we send this message, we don't even want
+     * to construct the message if the level is not enabled.
+     */
+    void trace(Level level,
+               String methodName,
+               Transaction txn,
+               DatabaseEntry key,
+               DatabaseEntry data,
+               LockMode lockMode)
+        throws DatabaseException {
+
+        if (logger.isLoggable(level)) {
+            StringBuffer sb = new StringBuffer();
+            sb.append(methodName);
+            if (txn != null) {
+                sb.append(" txnId=").append(txn.getId());
+            }
+            sb.append(" key=").append(key.dumpData());
+            if (data != null) {
+                sb.append(" data=").append(data.dumpData());
+            }
+            if (lockMode != null) {
+                sb.append(" lockMode=").append(lockMode);
+            }
+            logger.log(level, sb.toString());
+        }
+    }
+
+    /**
+     * Sends trace messages to the java.util.logger. Don't rely on the logger
+     * alone to conditionalize whether we send this message, we don't even want
+     * to construct the message if the level is not enabled.
+     */
+    void trace(Level level,
+               String methodName,
+               Transaction txn,
+               CursorConfig config)
+        throws DatabaseException {
+
+        if (logger.isLoggable(level)) {
+            StringBuffer sb = new StringBuffer();
+            sb.append(methodName);
+            sb.append(" name=" + getDebugName());
+            if (txn != null) {
+                sb.append(" txnId=").append(txn.getId());
+            }
+            if (config != null) {
+                sb.append(" config=").append(config);
+            }
+            logger.log(level, sb.toString());
+        }
+    }
+
+    /*
+     * Manage triggers.
+     */
+
+    /**
+     * Returns whether any triggers are currently associated with this primary.
+     * Note that an update of the trigger list may be in progress and this
+     * method does not wait for that update to be completed.
+     */
+    boolean hasTriggers() {
+
+        return triggerList != null;
+    }
+
+    /**
+     * Gets a read-lock on the list of triggers.  releaseTriggerListReadLock()
+     * must be called to release the lock.  Called by all primary put and
+     * delete operations.
+     */
+    private void acquireTriggerListReadLock()
+        throws DatabaseException {
+
+        EnvironmentImpl env = envHandle.getEnvironmentImpl();
+        env.getTriggerLatch().acquireShared();
+        if (triggerList == null) {
+            triggerList = new ArrayList<DatabaseTrigger>();
+        }
+    }
+
+    /**
+     * Releases a lock acquired by calling acquireTriggerListReadLock().
+     */
+    private void releaseTriggerListReadLock()
+        throws DatabaseException {
+
+        EnvironmentImpl env = envHandle.getEnvironmentImpl();
+        env.getTriggerLatch().release();
+    }
+
+    /**
+     * Gets a write lock on the list of triggers.  An empty list is created if
+     * necessary, so null is never returned.  releaseTriggerListWriteLock()
+     * must always be called to release the lock.
+     */
+    private void acquireTriggerListWriteLock()
+        throws DatabaseException {
+
+        EnvironmentImpl env = envHandle.getEnvironmentImpl();
+        env.getTriggerLatch().acquireExclusive();
+        if (triggerList == null) {
+            triggerList = new ArrayList<DatabaseTrigger>();
+        }
+    }
+
+    /**
+     * Releases a lock acquired by calling acquireTriggerListWriteLock().  If
+     * the list is now empty then it is set to null, that is, hasTriggers()
+     * will subsequently return false.
+     */
+    private void releaseTriggerListWriteLock()
+        throws DatabaseException {
+
+        if (triggerList.size() == 0) {
+            triggerList = null;
+        }
+        EnvironmentImpl env = envHandle.getEnvironmentImpl();
+        env.getTriggerLatch().release();
+    }
+
+    /**
+     * Adds a given trigger to the list of triggers.  Called while opening
+     * a SecondaryDatabase.
+     *
+     * @param insertAtFront true to insert at the front, or false to append.
+     */
+    void addTrigger(DatabaseTrigger trigger, boolean insertAtFront)
+        throws DatabaseException {
+
+        acquireTriggerListWriteLock();
+        try {
+            if (insertAtFront) {
+                triggerList.add(0, trigger);
+            } else {
+                triggerList.add(trigger);
+            }
+            trigger.triggerAdded(this);
+        } finally {
+            releaseTriggerListWriteLock();
+        }
+    }
+
+    /**
+     * Removes a given trigger from the list of triggers.  Called by
+     * SecondaryDatabase.close().
+     */
+    void removeTrigger(DatabaseTrigger trigger)
+        throws DatabaseException {
+
+        acquireTriggerListWriteLock();
+        try {
+            triggerList.remove(trigger);
+            trigger.triggerRemoved(this);
+        } finally {
+            releaseTriggerListWriteLock();
+        }
+    }
+
+    /**
+     * Clears the list of triggers.  Called by close(), this allows closing the
+     * primary before its secondaries, although we document that secondaries
+     * should be closed first.
+     */
+    private void removeAllTriggers()
+        throws DatabaseException {
+
+        acquireTriggerListWriteLock();
+        try {
+            for (int i = 0; i < triggerList.size(); i += 1) {
+                DatabaseTrigger trigger = triggerList.get(i);
+                trigger.triggerRemoved(this);
+            }
+            triggerList.clear();
+        } finally {
+            releaseTriggerListWriteLock();
+        }
+    }
+
+    /**
+     * Notifies associated triggers when a put() or delete() is performed on
+     * the primary.  This method is normally called only if hasTriggers() has
+     * returned true earlier.  This avoids acquiring a shared latch for
+     * primaries with no triggers.  If a trigger is added during the update
+     * process, there is no requirement to immediately start updating it.
+     *
+     * @param locker the internal locker.
+     *
+     * @param priKey the primary key.
+     *
+     * @param oldData the primary data before the change, or null if the record
+     * did not previously exist.
+     *
+     * @param newData the primary data after the change, or null if the record
+     * has been deleted.
+     */
+    void notifyTriggers(Locker locker,
+                        DatabaseEntry priKey,
+                        DatabaseEntry oldData,
+                        DatabaseEntry newData)
+        throws DatabaseException {
+
+        acquireTriggerListReadLock();
+        try {
+            for (int i = 0; i < triggerList.size(); i += 1) {
+                DatabaseTrigger trigger = triggerList.get(i);
+
+                /* Notify trigger. */
+                trigger.databaseUpdated
+                    (this, locker, priKey, oldData, newData);
+            }
+        } finally {
+            releaseTriggerListReadLock();
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/DatabaseConfig.java b/src/com/sleepycat/je/DatabaseConfig.java
new file mode 100644
index 0000000000000000000000000000000000000000..cd38c0e8ab5bfc5de4e8223160a500d21012067b
--- /dev/null
+++ b/src/com/sleepycat/je/DatabaseConfig.java
@@ -0,0 +1,1051 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DatabaseConfig.java,v 1.48.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.util.Arrays;
+import java.util.Comparator;
+
+import com.sleepycat.je.dbi.DatabaseImpl;
+
+/**
+ * Specifies the attributes of a database.
+ */
+public class DatabaseConfig implements Cloneable {
+
+    /**
+     * An instance created using the default constructor is initialized with
+     * the system's default settings.
+     */
+    public static final DatabaseConfig DEFAULT = new DatabaseConfig();
+
+    private boolean allowCreate = false;
+    private boolean exclusiveCreate = false;
+    private boolean transactional = false;
+    private boolean readOnly = false;
+    private boolean duplicatesAllowed = false;
+    private boolean deferredWrite = false;
+    private boolean temporary = false;
+    private boolean keyPrefixingEnabled = false;
+
+    /*
+     * An internal attibute indicating that the database is replicated in an
+     * HA system. Not yet publically settable.
+     */
+    private boolean replicated = true;
+
+    /* User defined Btree and duplicate comparison functions, if specified.*/
+    private int nodeMax;
+    private int nodeMaxDupTree;
+    private Comparator<byte[]> btreeComparator = null;
+    private Comparator<byte[]> duplicateComparator = null;
+    private boolean btreeComparatorByClassName = false;
+    private boolean duplicateComparatorByClassName = false;
+    private boolean overrideBtreeComparator = false;
+    private boolean overrideDupComparator = false;
+    private boolean useExistingConfig = false;
+
+    /**
+     * An instance created using the default constructor is initialized with
+     * the system's default settings.
+     */
+    public DatabaseConfig() {
+    }
+
+    /**
+     * Configures the {@link com.sleepycat.je.Environment#openDatabase
+     * Environment.openDatabase} method to create the database if it does not
+     * already exist.
+     *
+     * @param allowCreate If true, configure the {@link
+     * com.sleepycat.je.Environment#openDatabase Environment.openDatabase}
+     * method to create the database if it does not already exist.
+     */
+    public void setAllowCreate(boolean allowCreate) {
+        this.allowCreate = allowCreate;
+    }
+
+    /**
+     * Returns true if the {@link com.sleepycat.je.Environment#openDatabase
+     * Environment.openDatabase} method is configured to create the database
+     * if it does not already exist.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @return True if the {@link com.sleepycat.je.Environment#openDatabase
+     * Environment.openDatabase} method is configured to create the database
+     * if it does not already exist.
+     */
+    public boolean getAllowCreate() {
+        return allowCreate;
+    }
+
+    /**
+     * Configure the {@link com.sleepycat.je.Environment#openDatabase
+     * Environment.openDatabase} method to fail if the database already exists.
+     *
+     * <p>The exclusiveCreate mode is only meaningful if specified with the
+     * allowCreate mode.</p>
+     *
+     * @param exclusiveCreate If true, configure the {@link
+     * com.sleepycat.je.Environment#openDatabase Environment.openDatabase}
+     * method to fail if the database already exists.
+     */
+    public void setExclusiveCreate(boolean exclusiveCreate) {
+        this.exclusiveCreate = exclusiveCreate;
+    }
+
+    /**
+     * Returns true if the {@link com.sleepycat.je.Environment#openDatabase
+     * Environment.openDatabase} method is configured to fail if the database
+     * already exists.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @return True if the {@link com.sleepycat.je.Environment#openDatabase
+     * Environment.openDatabase} method is configured to fail if the database
+     * already exists.
+     */
+    public boolean getExclusiveCreate() {
+        return exclusiveCreate;
+    }
+
+    /**
+     * Configure the database to support sorted, duplicate data items.
+     *
+     * <p>Insertion when the key of the key/data pair being inserted already
+     * exists in the database will be successful.  The ordering of duplicates
+     * in the database is determined by the duplicate comparison function.</p>
+     *
+     * <p>If the application does not specify a duplicate data item comparison
+     * function, a default lexical comparison will be used.</p>
+     *
+     * <p>If a primary database is to be associated with one or more secondary
+     * databases, it may not be configured for duplicates.</p>
+     *
+     * <p>Calling this method affects the database, including all threads of
+     * control accessing the database.</p>
+     *
+     * <p>If the database already exists when the database is opened, any
+     * database configuration specified by this method must be the same as the
+     * existing database or an error will be returned.</p>
+     *
+     * @param duplicatesAllowed If true, configure the database to support
+     * duplicate data items. A value of false is illegal to this method, that
+     * is, once set, the configuration cannot be cleared.
+     */
+    public void setSortedDuplicates(boolean duplicatesAllowed) {
+        this.duplicatesAllowed = duplicatesAllowed;
+    }
+
+    /**
+     * Returns true if the database is configured to support sorted duplicate
+     * data items.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @return True if the database is configured to support sorted duplicate
+     * data items.
+     */
+    public boolean getSortedDuplicates() {
+        return duplicatesAllowed;
+    }
+
+    /**
+     * Returns the key prefixing configuration.
+     *
+     * @return true if key prefixing has been enabled in this database.
+     */
+    public boolean getKeyPrefixing() {
+        return keyPrefixingEnabled;
+    }
+
+    /**
+     * Configure the database to support key prefixing. Key prefixing causes
+     * the representation of keys in the b-tree internal nodes to be split
+     * between the common prefix of all keys and the suffixes.  Using this
+     * may result in a more space-efficient representation in both the
+     * in-memory and on-disk formats, but at some possible performance cost.
+     *
+     * @param keyPrefixingEnabled If true, enables keyPrefixing for the
+     * database.
+     */
+    public void setKeyPrefixing(boolean keyPrefixingEnabled) {
+        this.keyPrefixingEnabled = keyPrefixingEnabled;
+    }
+
+    /**
+     * Encloses the database open within a transaction.
+     *
+     * <p>If the call succeeds, the open operation will be recoverable.  If the
+     * call fails, no database will have been created.</p>
+     *
+     * <p>All future operations on this database, which are not explicitly
+     * enclosed in a transaction by the application, will be enclosed in in a
+     * transaction within the library.</p>
+     *
+     * @param transactional If true, enclose the database open within a
+     * transaction.
+     */
+    public void setTransactional(boolean transactional) {
+        this.transactional = transactional;
+    }
+    /**
+     * Returns true if the database open is enclosed within a transaction.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @return True if the database open is enclosed within a transaction.
+     */
+    public boolean getTransactional() {
+        return transactional;
+    }
+
+    /**
+     * Configures the database in read-only mode.
+     *
+     * <p>Any attempt to modify items in the database will fail, regardless of
+     * the actual permissions of any underlying files.</p>
+     *
+     * @param readOnly If true, configure the database in read-only mode.
+     */
+    public void setReadOnly(boolean readOnly) {
+        this.readOnly = readOnly;
+    }
+
+    /**
+     * Returns true if the database is configured in read-only mode.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @return True if the database is configured in read-only mode.
+     */
+    public boolean getReadOnly() {
+        return readOnly;
+    }
+
+    /**
+     * Configures the {@link com.sleepycat.je.Environment#openDatabase
+     * Environment.openDatabase} method to have a B+Tree fanout of
+     * nodeMaxEntries.
+     *
+     * <p>The nodeMaxEntries parameter is only meaningful if specified with the
+     * allowCreate mode.</p>
+     *
+     * @param nodeMaxEntries The maximum children per B+Tree node.
+     */
+    public void setNodeMaxEntries(int nodeMaxEntries) {
+        this.nodeMax = nodeMaxEntries;
+    }
+
+    /**
+     * Configures the {@link com.sleepycat.je.Environment#openDatabase
+     * Environment.openDatabase} method to have a B+Tree duplicate tree fanout
+     * of nodeMaxDupTreeEntries.
+     *
+     * <p>The nodeMaxDupTreeEntries parameter is only meaningful if specified
+     * with the allowCreate mode.</p>
+     *
+     * @param nodeMaxDupTreeEntries The maximum children per duplicate B+Tree
+     * node.
+     */
+    public void setNodeMaxDupTreeEntries(int nodeMaxDupTreeEntries) {
+        this.nodeMaxDupTree = nodeMaxDupTreeEntries;
+    }
+
+    /**
+     * Returns the maximum number of children a B+Tree node can have.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @return The maximum number of children a B+Tree node can have.
+     */
+    public int getNodeMaxEntries() {
+        return nodeMax;
+    }
+
+    /**
+     * Returns the maximum number of children a B+Tree duplicate tree node can
+     * have.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @return The maximum number of children a B+Tree duplicate tree node can
+     * have.
+     */
+    public int getNodeMaxDupTreeEntries() {
+        return nodeMaxDupTree;
+    }
+
+    /**
+     * By default, a byte by byte lexicographic comparison is used for btree
+     * keys. To customize the comparison, supply a different Comparator.
+     *
+     * <p>Note that there are two ways to set the comparator: by specifying the
+     * class or by specifying a serializable object.  This method is used to
+     * specify a serializable object.  The comparator class must implement
+     * java.util.Comparator and must be serializable.  JE will serialize the
+     * Comparator and deserialize it when subsequently opening the
+     * database.</p>
+     *
+     * <p>The Comparator.compare() method is passed the byte arrays that are
+     * stored in the database. If you know how your data is organized in the
+     * byte array, then you can write a comparison routine that directly
+     * examines the contents of the arrays. Otherwise, you have to reconstruct
+     * your original objects, and then perform the comparison.  See the <a
+     * href="{@docRoot}/../GettingStartedGuide/comparator.html"
+     * target="_top">Getting Started Guide</a> for examples.</p>
+     *
+     * <p><em>WARNING:</em> There are several special considerations that must
+     * be taken into account when implementing a comparator.<p>
+     * <ul>
+     *   <li>Comparator instances are shared by multiple threads and comparator
+     *   methods are called without any special synchronization. Therefore,
+     *   comparators must be thread safe.  In general no shared state should be
+     *   used and any caching of computed values must be done with proper
+     *   synchronization.</li>
+     *
+     *   <li>Because records are stored in the order determined by the
+     *   Comparator, the Comparator's behavior must not change over time and
+     *   therefore should not be dependent on any state that may change over
+     *   time.  In addition, although it is possible to change the comparator
+     *   for an existing database, care must be taken that the new comparator
+     *   provides compatible results with the previous comparator, or database
+     *   corruption will occur.</li>
+     *
+     *   <li>JE uses comparators internally in a wide variety of circumstances,
+     *   so custom comparators must be sure to return valid values for any two
+     *   arbitrary keys.  The user must not make any assumptions about the
+     *   range of key values that might be compared. For example, it's possible
+     *   for the comparator may be used against previously deleted values.</li>
+     * </ul>
+     *
+     * <p>A special type of comparator is a <em>partial comparator</em>, which
+     * compares a proper subset (not all bytes) of the key.  A partial
+     * comparator allows uniquely identifying a record by a partial key value.
+     * For example, the key could contain multiple fields but could uniquely
+     * identify the record with a single field.  The partial comparator could
+     * then compare only the single identifying field.  A query ({@link
+     * Cursor#getSearchKey Cursor.getSearchKey}, for example) could then be
+     * performed by passing a partial key that contains only the identifying
+     * field.</p>
+     *
+     * <p>A partial comparator has limited value when used as a Btree
+     * comparator. Instead of using a partial comparator, the non-identifying
+     * fields of the key could be placed in the data portion of the key/data
+     * pair.  This makes the key smaller, which normally provides better
+     * performance.  A partial comparator is much more useful when used as a
+     * duplicate comparator (see {@link #setDuplicateComparator
+     * setDuplicateComparator}).</p>
+     *
+     * <p>However, if you do use a partial comparator as a Btree comparator, be
+     * aware that you may not configure the database for duplicates (true may
+     * not be passed to {@link #setSortedDuplicates setSortedDuplicates}).  In
+     * a duplicate set, each key must have the same (identical bytes) key.  The
+     * internal structure of JE's Btree cannot support duplicates with
+     * non-identical keys, and cannot support specifying non-identical keys for
+     * addressing the records in a duplicate set.</p>
+     *
+     * The comparator for an existing database will not be overridden unless
+     * setOverrideBtreeComparator() is set to true.
+     */
+    public void setBtreeComparator(Comparator<byte[]> btreeComparator) {
+        /* Note: comparator may be null */
+        this.btreeComparator = validateComparator(btreeComparator, "Btree");
+        this.btreeComparatorByClassName = false;
+    }
+
+    /**
+     * By default, a byte by byte lexicographic comparison is used for btree
+     * keys. To customize the comparison, supply a different Comparator.
+     *
+     * <p>Note that there are two ways to set the comparator: by specifying the
+     * class or by specifying a serializable object.  This method is used to
+     * specify a Comparator class.  The comparator class must implement
+     * java.util.Comparator and must have a public zero-parameter constructor.
+     * JE will store the class name and instantiate the Comparator by class
+     * name (using <code>Class.forName</code> and <code>newInstance</code>)
+     * when subsequently opening the database.  Because the Comparator is
+     * instantiated using its default constructor, it should not be dependent
+     * on other constructor parameters.</p>
+     *
+     * <p>The Comparator.compare() method is passed the byte arrays that are
+     * stored in the database. If you know how your data is organized in the
+     * byte array, then you can write a comparison routine that directly
+     * examines the contents of the arrays. Otherwise, you have to reconstruct
+     * your original objects, and then perform the comparison.  See the <a
+     * href="{@docRoot}/../GettingStartedGuide/comparator.html"
+     * target="_top">Getting Started Guide</a> for examples.</p>
+     *
+     * <p><em>WARNING:</em> There are several special considerations that must
+     * be taken into account when implementing a comparator.<p>
+     * <ul>
+     *   <li>Comparator instances are shared by multiple threads and comparator
+     *   methods are called without any special synchronization. Therefore,
+     *   comparators must be thread safe.  In general no shared state should be
+     *   used and any caching of computed values must be done with proper
+     *   synchronization.</li>
+     *
+     *   <li>Because records are stored in the order determined by the
+     *   Comparator, the Comparator's behavior must not change over time and
+     *   therefore should not be dependent on any state that may change over
+     *   time.  In addition, although it is possible to change the comparator
+     *   for an existing database, care must be taken that the new comparator
+     *   provides compatible results with the previous comparator, or database
+     *   corruption will occur.</li>
+     *
+     *   <li>JE uses comparators internally in a wide variety of circumstances,
+     *   so custom comparators must be sure to return valid values for any two
+     *   arbitrary keys.  The user must not make any assumptions about the
+     *   range of key values that might be compared. For example, it's possible
+     *   for the comparator may be used against previously deleted values.</li>
+     * </ul>
+     *
+     * <p>A special type of comparator is a <em>partial comparator</em>, which
+     * compares a proper subset (not all bytes) of the key.  A partial
+     * comparator allows uniquely identifying a record by a partial key value.
+     * For example, the key could contain multiple fields but could uniquely
+     * identify the record with a single field.  The partial comparator could
+     * then compare only the single identifying field.  A query ({@link
+     * Cursor#getSearchKey Cursor.getSearchKey}, for example) could then be
+     * performed by passing a partial key that contains only the identifying
+     * field.</p>
+     *
+     * <p>A partial comparator has limited value when used as a Btree
+     * comparator. Instead of using a partial comparator, the non-identifying
+     * fields of the key could be placed in the data portion of the key/data
+     * pair.  This makes the key smaller, which normally provides better
+     * performance.  A partial comparator is much more useful when used as a
+     * duplicate comparator (see {@link #setDuplicateComparator
+     * setDuplicateComparator}).</p>
+     *
+     * <p>However, if you do use a partial comparator as a Btree comparator,
+     * please be aware that you may not configure the database for duplicates
+     * (true may not be passed to {@link #setSortedDuplicates
+     * setSortedDuplicates}).  In a duplicate set, each key must have the same
+     * (identical bytes) key.  The internal structure of JE's Btree cannot
+     * support duplicates with non-identical keys, and cannot support
+     * specifying non-identical keys for addressing the records in a duplicate
+     * set.</p>
+     *
+     * The comparator for an existing database will not be overridden unless
+     * setOverrideBtreeComparator() is set to true.
+     */
+    public void setBtreeComparator(Class<? extends Comparator<byte[]>> 
+                                   btreeComparatorClass) {
+
+        /* Note: comparator may be null */
+        this.btreeComparator = validateComparator(btreeComparatorClass, 
+                                                  "Btree");
+        this.btreeComparatorByClassName = true;
+    }
+
+    /**
+     * Returns the Comparator used for key comparison on this database.
+     */
+    public Comparator<byte[]> getBtreeComparator() {
+        return btreeComparator;
+    }
+
+    /**
+     * Returns true if the btree comparator is set by class name, not by
+     * serializable Comparator object
+     * @return true if the comparator is set by class name, not by serializable
+     * Comparator object.
+     */
+    public boolean getBtreeComparatorByClassName() {
+        return btreeComparatorByClassName;
+    }
+
+    /**
+     * Sets to true if the database exists and the btree comparator specified
+     * in this configuration object should override the current comparator.
+     *
+     * @param override Set to true to override the existing comparator.
+     */
+    public void setOverrideBtreeComparator(boolean override) {
+        overrideBtreeComparator = override;
+    }
+
+    /**
+     * Returns the override setting for the btree comparator.
+     */
+    public boolean getOverrideBtreeComparator() {
+        return overrideBtreeComparator;
+    }
+
+    /**
+     * By default, a byte by byte lexicographic comparison is used for
+     * duplicate data items in a duplicate set.  To customize the comparison,
+     * supply a different Comparator.
+     *
+     * <p>Note that there are two ways to set the comparator: by specifying the
+     * class or by specifying a serializable object.  This method is used to
+     * specify a serializable object.  The comparator class must implement
+     * java.util.Comparator and must be serializable.  JE will serialize the
+     * Comparator and deserialize it when subsequently opening the
+     * database.</p>
+     *
+     * <p>The Comparator.compare() method is passed the byte arrays that are
+     * stored in the database. If you know how your data is organized in the
+     * byte array, then you can write a comparison routine that directly
+     * examines the contents of the arrays. Otherwise, you have to reconstruct
+     * your original objects, and then perform the comparison.  See the <a
+     * href="{@docRoot}/../GettingStartedGuide/comparator.html"
+     * target="_top">Getting Started Guide</a> for examples.</p>
+     *
+     * <p><em>WARNING:</em> There are several special considerations that must
+     * be taken into account when implementing a comparator.<p>
+     * <ul>
+     *   <li>Comparator instances are shared by multiple threads and comparator
+     *   methods are called without any special synchronization. Therefore,
+     *   comparators must be thread safe.  In general no shared state should be
+     *   used and any caching of computed values must be done with proper
+     *   synchronization.</li>
+     *
+     *   <li>Because records are stored in the order determined by the
+     *   Comparator, the Comparator's behavior must not change over time and
+     *   therefore should not be dependent on any state that may change over
+     *   time.  In addition, although it is possible to change the comparator
+     *   for an existing database, care must be taken that the new comparator
+     *   provides compatible results with the previous comparator, or database
+     *   corruption will occur.</li>
+     *
+     *   <li>JE uses comparators internally in a wide variety of circumstances,
+     *   so custom comparators must be sure to return valid values for any two
+     *   arbitrary keys.  The user must not make any assumptions about the
+     *   range of key values that might be compared. For example, it's possible
+     *   for the comparator may be used against previously deleted values.</li>
+     * </ul>
+     *
+     * <p>A special type of comparator is a <em>partial comparator</em>, which
+     * is a comparator that compares a proper subset (not all bytes) of the
+     * data.  A partial comparator allows uniquely identifying a record within
+     * a duplicate set by a partial data value.  For example, the data could
+     * contain multiple fields but could uniquely identify the record with a
+     * single field.  The partial comparator could then compare only the single
+     * identifying field.  A query ({@link Cursor#getSearchBoth
+     * Cursor.getSearchBoth}, for example) could then be performed by passing a
+     * partial data value that contains only the identifying field.</p>
+     *
+     * <p>When using a partial comparator, it is possible to update the data
+     * for a duplicate record, as long as only the non-identifying fields in
+     * the data are changed.  See {@link Cursor#putCurrent Cursor.putCurrent}
+     * for more information.</p>
+     *
+     * The comparator for an existing database will not be overridden unless
+     * setOverrideDuplicateComparator() is set to true.
+     */
+    public void 
+        setDuplicateComparator(Comparator<byte[]> duplicateComparator) {
+
+        /* Note: comparator may be null */
+        this.duplicateComparator =
+            validateComparator(duplicateComparator, "Duplicate");
+        this.duplicateComparatorByClassName = false;
+    }
+
+    /**
+     * By default, a byte by byte lexicographic comparison is used for
+     * duplicate data items in a duplicate set.  To customize the comparison,
+     * supply a different Comparator.
+     *
+     * <p>Note that there are two ways to set the comparator: by specifying the
+     * class or by specifying a serializable object.  This method is used to
+     * specify a Comparator class.  The comparator class must implement
+     * java.util.Comparator and must have a public zero-parameter constructor.
+     * JE will store the class name and instantiate the Comparator by class
+     * name (using <code>Class.forName</code> and <code>newInstance</code>)
+     * when subsequently opening the database.  Because the Comparator is
+     * instantiated using its default constructor, it should not be dependent
+     * on other constructor parameters.</p>
+     *
+     * <p>The Comparator.compare() method is passed the byte arrays that are
+     * stored in the database. If you know how your data is organized in the
+     * byte array, then you can write a comparison routine that directly
+     * examines the contents of the arrays. Otherwise, you have to reconstruct
+     * your original objects, and then perform the comparison.  See the <a
+     * href="{@docRoot}/../GettingStartedGuide/comparator.html"
+     * target="_top">Getting Started Guide</a> for examples.</p>
+     *
+     * <p><em>WARNING:</em> There are several special considerations that must
+     * be taken into account when implementing a comparator.<p>
+     * <ul>
+     *   <li>Comparator instances are shared by multiple threads and comparator
+     *   methods are called without any special synchronization. Therefore,
+     *   comparators must be thread safe.  In general no shared state should be
+     *   used and any caching of computed values must be done with proper
+     *   synchronization.</li>
+     *
+     *   <li>Because records are stored in the order determined by the
+     *   Comparator, the Comparator's behavior must not change over time and
+     *   therefore should not be dependent on any state that may change over
+     *   time.  In addition, although it is possible to change the comparator
+     *   for an existing database, care must be taken that the new comparator
+     *   provides compatible results with the previous comparator, or database
+     *   corruption will occur.</li>
+     *
+     *   <li>JE uses comparators internally in a wide variety of circumstances,
+     *   so custom comparators must be sure to return valid values for any two
+     *   arbitrary keys.  The user must not make any assumptions about the
+     *   range of key values that might be compared. For example, it's possible
+     *   for the comparator may be used against previously deleted values.</li>
+     * </ul>
+     *
+     * <p>A special type of comparator is a <em>partial comparator</em>, which
+     * is a comparator that compares a proper subset (not all bytes) of the
+     * data.  A partial comparator allows uniquely identifying a record within
+     * a duplicate set by a partial data value.  For example, the data could
+     * contain multiple fields but could uniquely identify the record with a
+     * single field.  The partial comparator could then compare only the single
+     * identifying field.  A query ({@link Cursor#getSearchBoth
+     * Cursor.getSearchBoth}, for example) could then be performed by passing a
+     * partial data value that contains only the identifying field.</p>
+     *
+     * <p>When using a partial comparator, it is possible to update the data
+     * for a duplicate record, as long as only the non-identifying fields in
+     * the data are changed.  See {@link Cursor#putCurrent Cursor.putCurrent}
+     * for more information.</p>
+     *
+     * The comparator for an existing database will not be overridden unless
+     * setOverrideDuplicateComparator() is set to true.
+     */
+    public void setDuplicateComparator(Class<? extends Comparator<byte[]>> 
+                                       duplicateComparatorClass) {
+
+        /* Note: comparator may be null */
+        this.duplicateComparator = validateComparator(duplicateComparatorClass,
+                                                      "Duplicate");
+        this.duplicateComparatorByClassName = true;
+    }
+
+    /**
+     * Returns the Comparator used for duplicate record comparison on this
+     * database.
+     */
+    public Comparator<byte[]> getDuplicateComparator() {
+        return duplicateComparator;
+    }
+
+    /**
+     * Returns true if the duplicate comparator is set by class name, not by
+     * serializable Comparator object.
+     *
+     * @return true if the duplicate comparator is set by class name, not by
+     * serializable Comparator object.
+     */
+    public boolean getDuplicateComparatorByClassName() {
+        return duplicateComparatorByClassName;
+    }
+
+    /**
+     * Sets to true if the database exists and the duplicate comparator
+     * specified in this configuration object should override the current
+     * comparator.
+     *
+     * @param override Set to true to override the existing comparator.
+     */
+    public void setOverrideDuplicateComparator(boolean override) {
+        overrideDupComparator = override;
+    }
+
+    /**
+     * Returns the override setting for the duplicate comparator.
+     */
+    public boolean getOverrideDuplicateComparator() {
+        return overrideDupComparator;
+    }
+
+    /**
+     * Sets the temporary database option.
+     *
+     * <p> Temporary databases operate internally in deferred-write mode to
+     * provide reduced disk I/O and increased concurrency.  But unlike an
+     * ordinary deferred-write database, the information in a temporary
+     * database is not durable or persistent.
+     *
+     * <p> A temporary database is not flushed to disk when the database is
+     * closed or when a checkpoint is performed, and the Database.sync method
+     * may not be called.  When all handles for a temporary database are
+     * closed, the database is automatically removed.  If a crash occurs before
+     * closing a temporary database, the database will be automatically removed
+     * when the environment is re-opened.
+     *
+     * <p> Note that although temporary databases can page to disk if the cache
+     * is not large enough to hold the databases, they are much more efficient
+     * if the database remains in memory. See the JE FAQ on the Oracle
+     * Technology Network site for information on how to estimate the cache
+     * size needed by a given database.
+     *
+     * <p>
+     * See the Getting Started Guide, Database chapter for a full description
+     * of temporary databases.
+     * <p>
+     * @param temporary if true, the database will be opened as a temporary
+     * database.
+     */
+    public void setTemporary(boolean temporary) {
+        this.temporary = temporary;
+    }
+
+    /**
+     * Returns the temporary database option.
+     * @return boolean if true, the database is temporary.
+     */
+    public boolean getTemporary() {
+        return temporary;
+    }
+
+    /**
+     * Sets the deferred-write option.
+     *
+     * <p> Deferred-write databases have reduced disk I/O and improved
+     * concurrency.  Disk I/O is reduced when data records are frequently
+     * modified or deleted.  The information in a deferred-write database is
+     * not guaranteed to be durable or persistent until Database.close() or
+     * Database.sync() is called, or a checkpoint is performed.
+     *
+     * <p> After a deferred-write database is closed it may be re-opened as an
+     * ordinary transactional or non-transactional database.  For example, this
+     * can be used to initially load a large data set in deferred-write mode
+     * and then switch to transactional mode for subsequent operations.
+     *
+     * <p> Note that although deferred-write databases can page to disk if the
+     * cache is not large enough to hold the databases, they are much more
+     * efficient if the database remains in memory. See the JE FAQ on the
+     * Oracle Technology Network site for information on how to estimate the
+     * cache size needed by a given database.
+     *
+     * <p> See the Getting Started Guide, Database chapter for a full
+     * description of deferred-write databases.
+     *
+     * <p>
+     * @param deferredWrite if true, the database will be opened as a
+     * deferred-write database.
+     */
+    public void setDeferredWrite(boolean deferredWrite) {
+        this.deferredWrite = deferredWrite;
+    }
+
+    /**
+     * Returns the deferred-write option.
+     *
+     * @return boolean if true, deferred-write is enabled.
+     */
+    public boolean getDeferredWrite() {
+        return deferredWrite;
+    }
+
+    /**
+     * Used to set the comparator when filling in a configuration from an
+     * existing database.
+     */
+    void setBtreeComparatorInternal(Comparator<byte[]> comparator,
+                                    boolean byClassName) {
+        btreeComparator = comparator;
+        btreeComparatorByClassName = byClassName;
+    }
+
+    /**
+     * Used to set the comparator when filling in a configuration from an
+     * existing database.
+     */
+    void setDuplicateComparatorInternal(Comparator<byte[]> comparator,
+                                        boolean byClassName) {
+        duplicateComparator = comparator;
+        duplicateComparatorByClassName = byClassName;
+    }
+
+    /**
+     * For utilities, to avoid having to know the configuration of a database.
+     */
+    void setUseExistingConfig(boolean useExistingConfig) {
+        this.useExistingConfig = useExistingConfig;
+    }
+
+    /**
+     * For utilities, to avoid having to know the configuration of a database.
+     */
+    boolean getUseExistingConfig() {
+        return useExistingConfig;
+    }
+
+    /** Not for public use yet. */
+    void setReplicated(boolean replicated) {
+        this.replicated = replicated;
+    }
+
+    /** Not for public use yet. */
+    boolean getReplicated() {
+        return replicated;
+    }
+
+    /**
+     * Returns a copy of this configuration object.
+     */
+    public DatabaseConfig cloneConfig() {
+        try {
+            return (DatabaseConfig) super.clone();
+        } catch (CloneNotSupportedException willNeverOccur) {
+            return null;
+        }
+    }
+
+    /*
+     * For JCA Database handle caching.
+     */
+    void validate(DatabaseConfig config)
+        throws DatabaseException {
+
+        if (config == null) {
+            config = DatabaseConfig.DEFAULT;
+        }
+
+        boolean txnMatch = (config.transactional == transactional);
+        boolean roMatch = (config.readOnly == readOnly);
+        boolean sdMatch = (config.duplicatesAllowed == duplicatesAllowed);
+        boolean dwMatch = (config.getDeferredWrite() == deferredWrite);
+        boolean btCmpMatch = true;
+        if (config.overrideBtreeComparator) {
+            if (btreeComparator == null) {
+                btCmpMatch = (config.btreeComparator == null);
+            } else if (config.btreeComparatorByClassName !=
+                       btreeComparatorByClassName) {
+                btCmpMatch = false;
+            } else if (btreeComparatorByClassName) {
+                btCmpMatch = btreeComparator.getClass() ==
+                             config.btreeComparator.getClass();
+            } else {
+                btCmpMatch = Arrays.equals
+                    (DatabaseImpl.objectToBytes
+                        (btreeComparator, "Btree"),
+                     DatabaseImpl.objectToBytes
+                        (config.btreeComparator, "Btree"));
+            }
+        }
+        boolean dtCmpMatch = true;
+        if (config.overrideDupComparator) {
+            if (duplicateComparator == null) {
+                dtCmpMatch = (config.duplicateComparator == null);
+            } else if (config.duplicateComparatorByClassName !=
+                       duplicateComparatorByClassName) {
+                dtCmpMatch = false;
+            } else if (duplicateComparatorByClassName) {
+                dtCmpMatch = duplicateComparator.getClass() ==
+                             config.duplicateComparator.getClass();
+            } else {
+                dtCmpMatch = Arrays.equals
+                    (DatabaseImpl.objectToBytes
+                        (duplicateComparator, "Duplicate"),
+                     DatabaseImpl.objectToBytes
+                        (config.duplicateComparator, "Duplicate"));
+            }
+        }
+
+        if (txnMatch &&
+            roMatch &&
+            sdMatch &&
+            dwMatch &&
+            btCmpMatch &&
+            dtCmpMatch) {
+            return;
+        } else {
+            String message =
+                genDatabaseConfigMismatchMessage
+                (config, txnMatch, roMatch, sdMatch, dwMatch,
+                 btCmpMatch, dtCmpMatch);
+            throw new DatabaseException(message);
+        }
+    }
+
+    private String genDatabaseConfigMismatchMessage(DatabaseConfig config,
+                                                    boolean txnMatch,
+                                                    boolean roMatch,
+                                                    boolean sdMatch,
+                                                    boolean dwMatch,
+                                                    boolean btCmpMatch,
+                                                    boolean dtCmpMatch) {
+        StringBuilder ret = new StringBuilder
+            ("The following DatabaseConfig parameters for the\n" +
+             "cached Database do not match the parameters for the\n" +
+             "requested Database:\n");
+        if (!txnMatch) {
+            ret.append(" Transactional\n");
+        }
+        
+        if (!roMatch) {
+            ret.append(" Read-Only\n");
+        }
+        
+        if (!sdMatch) {
+            ret.append(" Sorted Duplicates\n");
+        }
+        
+        if (!dwMatch) {
+            ret.append(" Deferred Write");
+        }
+
+        if (!btCmpMatch) {
+            ret.append(" Btree Comparator\n");
+        }
+        
+        if (!dtCmpMatch) {
+            ret.append(" Duplicate Comparator\n");
+        }
+
+        return ret.toString();
+    }
+
+    /**
+     * Checks that this comparator can be serialized by JE.
+     */
+    private Comparator<byte[]> 
+        validateComparator(Comparator<byte[]> comparator, String type)
+
+        throws IllegalArgumentException {
+
+        if (comparator == null) {
+            return null;
+        }
+
+        try {
+            return DatabaseImpl.instantiateComparator(comparator, type);
+        } catch (DatabaseException e) {
+            throw new IllegalArgumentException
+                (type +
+                 " comparator is not valid: " +
+                 e.getMessage() +
+                 "\nThe comparator object must be serializable.");
+        }
+    }
+
+    /**
+     * Checks that this comparator class can be instantiated by JE.
+     */
+    private Comparator<byte[]> 
+        validateComparator(Class<? extends Comparator<byte[]>> comparatorClass,
+                           String type)
+        throws IllegalArgumentException {
+
+        if (comparatorClass == null) {
+            return null;
+        }
+
+        if (!Comparator.class.isAssignableFrom(comparatorClass)) {
+            throw new IllegalArgumentException
+                (comparatorClass.getName() +
+                 " is is not valid as a " + type +
+                 " comparator because it does not " +
+                 " implement java.util.Comparator.");
+        }
+
+        try {
+            return DatabaseImpl.instantiateComparator(comparatorClass, type);
+        } catch (DatabaseException e) {
+            throw new IllegalArgumentException
+                (type +
+                 " comparator is not valid: " +
+                 e.getMessage() +
+                 "\nPerhaps you have not implemented a zero-parameter " +
+                 "constructor for the comparator or the comparator class " +
+                 "cannot be found.");
+        }
+    }
+
+    /**
+     * Checks that this database configuration is valid for a new, non-existant
+     * database.
+     */
+    void validateForNewDb()
+        throws DatabaseException {
+
+        if (readOnly) {
+            throw new DatabaseException
+                ("DatabaseConfig.setReadOnly() must be set to false " +
+                 "when creating a Database");
+        }
+
+        if (transactional && deferredWrite) {
+            throw new DatabaseException("deferredWrite mode is not yet " +
+                                        "supported for transactional " +
+                                        "databases");
+        }
+    }
+
+    /**
+     * For unit tests, checks that the database configuration attributes that
+     * are saved persistently are equal.
+     */
+    boolean persistentEquals(DatabaseConfig other) {
+        if (duplicatesAllowed != other.duplicatesAllowed)
+            return false;
+
+        if (temporary != other.temporary)
+            return false;
+
+        if (replicated != other.replicated)
+            return false;
+
+        if (nodeMax != other.nodeMax)
+            return false;
+
+        if (nodeMaxDupTree != other.nodeMaxDupTree)
+            return false;
+
+        if (((btreeComparator == null) && (other.btreeComparator != null)) ||
+            ((btreeComparator != null) && (other.btreeComparator == null))) {
+            return false;
+        }
+
+        if (btreeComparator != null) {
+            if (btreeComparator.getClass() !=
+                other.btreeComparator.getClass())
+            return false;
+        }
+
+        if (((duplicateComparator == null) &&
+             (other.duplicateComparator != null)) ||
+            ((duplicateComparator != null) &&
+             (other.duplicateComparator == null))) {
+            return false;
+        }
+
+        if ((duplicateComparator != null)) {
+            if (duplicateComparator.getClass() !=
+                other.duplicateComparator.getClass())
+                return false;
+        }
+
+        return true;
+    }
+
+    /**
+     * Returns the values for each configuration attribute.
+     *
+     * @return the values for each configuration attribute.
+     */
+    @Override
+    public String toString() {
+        return "allowCreate=" + allowCreate +
+            "\nexclusiveCreate=" + exclusiveCreate +
+            "\ntransactional=" + transactional +
+            "\nreadOnly=" + readOnly +
+            "\nduplicatesAllowed=" + duplicatesAllowed +
+            "\ndeferredWrite=" + deferredWrite +
+            "\ntemporary=" + temporary +
+            "\nkeyPrefixingEnabled=" + keyPrefixingEnabled +
+            "\n";
+    }
+}
diff --git a/src/com/sleepycat/je/DatabaseEntry.java b/src/com/sleepycat/je/DatabaseEntry.java
new file mode 100644
index 0000000000000000000000000000000000000000..c7a75d7c727b7d5be24cf8f355fc5b760c9fd57e
--- /dev/null
+++ b/src/com/sleepycat/je/DatabaseEntry.java
@@ -0,0 +1,476 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DatabaseEntry.java,v 1.48.2.3 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import com.sleepycat.je.tree.Key;
+import com.sleepycat.util.keyrange.KeyRange;
+
+/**
+ * Encodes database key and data items as a byte array.
+ *
+ * <p>Storage and retrieval for the {@link com.sleepycat.je.Database Database}
+ * and {@link com.sleepycat.je.Cursor Cursor} methods are based on key/data
+ * pairs. Both key and data items are represented by DatabaseEntry objects.
+ * Key and data byte arrays may refer to arrays of zero length up to arrays of
+ * essentially unlimited length.</p>
+ *
+ * <p>The DatabaseEntry class provides simple access to an underlying object
+ * whose elements can be examined or changed.  DatabaseEntry objects can be
+ * subclassed, providing a way to associate with it additional data or
+ * references to other structures.</p>
+ *
+ * <p>Access to DatabaseEntry objects is not re-entrant. In particular, if
+ * multiple threads simultaneously access the same DatabaseEntry object using
+ * {@link com.sleepycat.je.Database Database} or {@link com.sleepycat.je.Cursor
+ * Cursor} methods, the results are undefined.</p>
+ *
+ * <p>DatabaseEntry objects may be used in conjunction with the object mapping
+ * support provided in the {@link com.sleepycat.bind} package.</p>
+ *
+ * <h3>Input and Output Parameters</h3>
+ *
+ * <p>DatabaseEntry objects are used for both input data (when writing to a
+ * database or specifying a search parameter) and output data (when reading
+ * from a database).  For certain methods, one parameter may be an input
+ * parameter and another may be an output parameter.  For example, the {@link
+ * Database#get} method has an input key parameter and an output data
+ * parameter.  The documentation for each method describes whether its
+ * parameters are input or output parameters.</p>
+ *
+ * <p>For DatabaseEntry input parameters, the caller is responsible for
+ * initializing the data array of the DatabaseEntry.  For DatabaseEntry output
+ * parameters, the method called will initialize the data array.</p>
+ *
+ * <p>Also note that for DatabaseEntry output parameters, the method called
+ * will always allocate a new byte array.  The byte array specified by the
+ * caller will not be used.  Therefore, after calling a method that returns
+ * output parameters, the application can safely keep a reference to the byte
+ * array returned by {@link #getData} without danger that the array will be
+ * overwritten in a subsequent call.</p>
+ *
+ * <h3>Offset and Size Properties</h3>
+ *
+ * <p>By default the Offset property is zero and the Size property is the
+ * length of the byte array.  However, to allow for optimizations involving the
+ * partial use of a byte array, the Offset and Size may be set to non-default
+ * values.</p>
+ *
+ * <p>For DatabaseEntry output parameters, the Size will always be set to the
+ * length of the byte array and the Offset will always be set to zero.</p>
+ *
+ * <p>However, for DatabaseEntry input parameters the Offset and Size are set
+ * to non-default values by the built-in tuple and serial bindings.  For
+ * example, with a tuple or serial binding the byte array is grown dynamically
+ * as data is output, and the Size is set to the number of bytes actually used.
+ * For a serial binding, the Offset is set to a non-zero value in order to
+ * implement an optimization having to do with the serialization stream
+ * header.</p>
+ *
+ * <p>Therefore, for output DatabaseEntry parameters the application can assume
+ * that the Offset is zero and the Size is the length of the byte
+ * array. However, for input DatabaseEntry parameters the application should
+ * not make this assumption.  In general, it is safest for the application to
+ * always honor the Size and Offset properties, rather than assuming they have
+ * default values.</p>
+ *
+ * <h3>Partial Offset and Length Properties</h3>
+ *
+ * <p>By default the specified data (byte array, offset and size) corresponds
+ * to the full stored key or data item.  Optionally, the Partial property can
+ * be set to true, and the PartialOffset and PartialLength properties are used
+ * to specify the portion of the key or data item to be read or written.  For
+ * details, see the {@link #setPartial(int,int,boolean)} method.</p>
+ *
+ * Note that the Partial properties are set only by the caller.  They will
+ * never be set by a Database or Cursor method, nor will they every be set by
+ * bindings.  Therefore, the application can assume that the Partial properties
+ * are not set, unless the application itself sets them explicitly.
+ */
+public class DatabaseEntry {
+
+    /* Currently, JE stores all data records as byte array */
+    private byte[] data;
+    private int dlen = 0;
+    private int doff = 0;
+    private int offset = 0;
+    private int size = 0;
+    private boolean partial = false;
+
+    /* FindBugs - ignore not "final" since a user can set this. */
+    /** @hidden
+     * The maximum number of bytes to show when toString() is called.
+     */
+    public static int MAX_DUMP_BYTES = 100;
+
+    /**
+     * Returns all the attributes of the database entry in text form, including
+     * the underlying data.  The maximum number of bytes that will be formatted
+     * is taken from the static variable DatabaseEntry.MAX_DUMP_BYTES, which
+     * defaults to 100.  MAX_DUMP_BYTES may be changed by an application if it
+     * wishes to cause more bytes to be formatted.
+     */
+    @Override
+    public String toString() {
+	StringBuffer sb = new StringBuffer("<DatabaseEntry");
+        if (partial) {
+            sb.append(" partial=\"true");
+            sb.append("\" doff=\"").append(doff);
+            sb.append("\" dlen=\"").append(dlen);
+            sb.append("\"");
+        }
+	sb.append(" offset=\"").append(offset);
+	sb.append("\" size=\"").append(size);
+	sb.append("\" data=\"").append(dumpData());
+	if ((size - 1) > MAX_DUMP_BYTES) {
+	    sb.append(" ... ").append((size - MAX_DUMP_BYTES) +
+				      " bytes not shown ");
+	}
+	sb.append("\"/>");
+	return sb.toString();
+    }
+
+    /*
+     * Constructors
+     */
+
+    /**
+     * Constructs a DatabaseEntry with null data. The offset and size are set
+     * to zero.
+     */
+    public DatabaseEntry() {
+    }
+
+    /**
+     * Constructs a DatabaseEntry with a given byte array.  The offset is set
+     * to zero; the size is set to the length of the array, or to zero if null
+     * is passed.
+     *
+     * @param data Byte array wrapped by the DatabaseEntry.
+     */
+    public DatabaseEntry(byte[] data) {
+        this.data = data;
+        if (data != null) {
+            this.size = data.length;
+        }
+    }
+
+    /**
+     * Constructs a DatabaseEntry with a given byte array, offset and size.
+     *
+     * @param data Byte array wrapped by the DatabaseEntry.
+     *
+     * @param offset Offset in the first byte in the byte array to be included.
+     *
+     * @param size Number of bytes in the byte array to be included.
+     */
+    public DatabaseEntry(byte[] data, int offset, int size) {
+        this.data = data;
+        this.offset = offset;
+        this.size = size;
+    }
+
+    /*
+     * Accessors
+     */
+
+    /**
+     * Returns the byte array.
+     *
+     * <p>For a DatabaseEntry that is used as an output parameter, the byte
+     * array will always be a newly allocated array.  The byte array specified
+     * by the caller will not be used and may be null.</p>
+     *
+     * @return The byte array.
+     */
+    public byte[] getData() {
+        return data;
+    }
+
+    /**
+     * Sets the byte array.  The offset is set to zero; the size is set to the
+     * length of the array, or to zero if null is passed.
+     *
+     * @param data Byte array wrapped by the DatabaseEntry.
+     */
+    public void setData(byte[] data) {
+	this.data = data;
+        offset = 0;
+	size = (data == null) ? 0 : data.length;
+    }
+
+    /**
+     * Sets the byte array, offset and size.
+     *
+     * @param data Byte array wrapped by the DatabaseEntry.
+     *
+     * @param offset Offset in the first byte in the byte array to be included.
+     *
+     * @param size Number of bytes in the byte array to be included.
+     */
+    public void setData(byte[] data, int offset, int size) {
+	this.data = data;
+        this.offset = offset;
+        this.size = size;
+    }
+
+    /**
+     * Configures this DatabaseEntry to read or write partial records.
+     *
+     * <p>Do partial retrieval or storage of an item.  If the calling
+     * application is doing a retrieval, length bytes specified by
+     * <tt>dlen</tt>, starting at the offset set by <tt>doff</tt> bytes from
+     * the beginning of the retrieved data record are returned as if they
+     * comprised the entire record.  If any or all of the specified bytes do
+     * not exist in the record, the get is successful, and any existing bytes
+     * are returned.</p>
+     *
+     * <p>For example, if the data portion of a retrieved record was 100 bytes,
+     * and a partial retrieval was done using a DatabaseEntry having a partial
+     * length of 20 and a partial offset of 85, the retrieval would succeed and
+     * the retrieved data would be the last 15 bytes of the record.</p>
+     *
+     * <p>If the calling application is storing an item, length bytes specified
+     * by <tt>dlen</tt>, starting at the offset set by <tt>doff</tt> bytes from
+     * the beginning of the specified key's data item are replaced by the data
+     * specified by the DatabaseEntry.  If the partial length is smaller than
+     * the data, the record will grow; if the partial length is larger than the
+     * data, the record will shrink.  If the specified bytes do not exist, the
+     * record will be extended using nul bytes as necessary, and the store will
+     * succeed.</p>
+     *
+     * <p>It is an error to specify a partial key when performing a put
+     * operation of any kind.</p>
+     *
+     * <p>It is an error to attempt a partial store using the {@link
+     * com.sleepycat.je.Database#put Database.put} method in a database that
+     * supports duplicate records. Partial stores in databases supporting
+     * duplicate records must be done using a cursor method.</p>
+     *
+     * <p>Note that the Partial properties are set only by the caller.  They
+     * will never be set by a Database or Cursor method.</p>
+     *
+     * @param doff The offset of the partial record being read or written by
+     * the application, in bytes.
+     *
+     * @param dlen The byte length of the partial record being read or written
+     * by the application, in bytes.
+     *
+     * @param partial Whether this DatabaseEntry is configured to read or write
+     * partial records.
+     */
+    public void setPartial(int doff, int dlen, boolean partial) {
+        setPartialOffset(doff);
+        setPartialLength(dlen);
+        setPartial(partial);
+    }
+
+    /**
+     * Returns the byte length of the partial record being read or written by
+     * the application, in bytes.
+     *
+     * <p>Note that the Partial properties are set only by the caller.  They
+     * will never be set by a Database or Cursor method.</p>
+     *
+     * @return The byte length of the partial record being read or written by
+     * the application, in bytes.
+     *
+     * @see #setPartial(int,int,boolean)
+     */
+    public int getPartialLength() {
+        return dlen;
+    }
+
+    /**
+     * Sets the byte length of the partial record being read or written by the
+     * application, in bytes.
+     *
+     * <p>Note that the Partial properties are set only by the caller.  They
+     * will never be set by a Database or Cursor method.</p>
+     *
+     * @param dlen The byte length of the partial record being read or written
+     * by the
+     *
+     * @see #setPartial(int,int,boolean) application, in bytes.
+     */
+    public void setPartialLength(int dlen) {
+        this.dlen = dlen;
+    }
+
+    /**
+     * Returns the offset of the partial record being read or written by the
+     * application, in bytes.
+     *
+     * <p>Note that the Partial properties are set only by the caller.  They
+     * will never be set by a Database or Cursor method.</p>
+     *
+     * @return The offset of the partial record being read or written by the
+     * application, in bytes.
+     *
+     * @see #setPartial(int,int,boolean)
+     */
+    public int getPartialOffset() {
+        return doff;
+    }
+
+    /**
+     * Sets the offset of the partial record being read or written by the
+     * application, in bytes.
+     *
+     * <p>Note that the Partial properties are set only by the caller.  They
+     * will never be set by a Database or Cursor method.</p>
+     *
+     * @param doff The offset of the partial record being read or written by
+     * the application, in bytes.
+     *
+     * @see #setPartial(int,int,boolean)
+     */
+    public void setPartialOffset(int doff) {
+        this.doff = doff;
+    }
+
+    /**
+     * Returns whether this DatabaseEntry is configured to read or write
+     * partial records.
+     *
+     * <p>Note that the Partial properties are set only by the caller.  They
+     * will never be set by a Database or Cursor method.</p>
+     *
+     * @return Whether this DatabaseEntry is configured to read or write
+     * partial records.
+     *
+     * @see #setPartial(int,int,boolean)
+     */
+    public boolean getPartial() {
+        return partial;
+    }
+
+    /**
+     * Configures this DatabaseEntry to read or write partial records.
+     *
+     * <p>Note that the Partial properties are set only by the caller.  They
+     * will never be set by a Database or Cursor method.</p>
+     *
+     * @param partial Whether this DatabaseEntry is configured to read or write
+     * partial records.
+     *
+     * @see #setPartial(int,int,boolean)
+     */
+    public void setPartial(boolean partial) {
+        this.partial = partial;
+    }
+
+    /**
+     * Returns the byte offset into the data array.
+     *
+     * <p>For a DatabaseEntry that is used as an output parameter, the offset
+     * will always be zero.</p>
+     *
+     * @return Offset in the first byte in the byte array to be included.
+     */
+    public int getOffset() {
+        return offset;
+    }
+
+    /**
+     * Sets the byte offset into the data array.
+     *
+     * @param offset Offset in the first byte in the byte array to be included.
+     */
+    public void setOffset(int offset) {
+        this.offset = offset;
+    }
+
+    /**
+     * Returns the byte size of the data array.
+     *
+     * <p>For a DatabaseEntry that is used as an output parameter, the size
+     * will always be the length of the data array.</p>
+     *
+     * @return Number of bytes in the byte array to be included.
+     */
+    public int getSize() {
+        return size;
+    }
+
+    /**
+     * Sets the byte size of the data array.
+     *
+     * @param size Number of bytes in the byte array to be included.
+     */
+    public void setSize(int size) {
+        this.size = size;
+    }
+
+    /**
+     * Dumps the data as a byte array, for tracing purposes
+     */
+    String dumpData() {
+	return Key.DUMP_TYPE.dumpByteArray
+	    (KeyRange.getByteArray(this, MAX_DUMP_BYTES));
+    }
+
+    /**
+     * Compares the data of two entries for byte-by-byte equality.
+     *
+     * <p>In either entry, if the offset is non-zero or the size is not equal
+     * to the data array length, then only the data bounded by these values is
+     * compared.  The data array length and offset need not be the same in both
+     * entries for them to be considered equal.</p>
+     *
+     * <p>If the data array is null in one entry, then to be considered equal
+     * both entries must have a null data array.</p>
+     *
+     * <p>If the partial property is set in either entry, then to be considered
+     * equal both entries must have the same partial properties: partial,
+     * partialOffset and partialLength.
+     */
+    @Override
+    public boolean equals(Object o) {
+        if (!(o instanceof DatabaseEntry)) {
+            return false;
+        }
+        DatabaseEntry e = (DatabaseEntry) o;
+        if (partial || e.partial) {
+            if (partial != e.partial ||
+                dlen != e.dlen ||
+                doff != e.doff) {
+                return false;
+            }
+        }
+        if (data == null && e.data == null) {
+            return true;
+        }
+        if (data == null || e.data == null) {
+            return false;
+        }
+        if (size != e.size) {
+            return false;
+        }
+        for (int i = 0; i < size; i += 1) {
+            if (data[offset + i] != e.data[e.offset + i]) {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    /**
+     * Returns a hash code based on the data value.
+     */
+    @Override
+    public int hashCode() {
+        int hash = 0;
+        if (data != null) {
+            for (int i = 0; i < size; i += 1) {
+                hash += data[offset + i];
+            }
+        }
+        return hash;
+    }
+}
diff --git a/src/com/sleepycat/je/DatabaseException.java b/src/com/sleepycat/je/DatabaseException.java
new file mode 100644
index 0000000000000000000000000000000000000000..47c3814125be8a39022c87f6a74d3bee6c10a1de
--- /dev/null
+++ b/src/com/sleepycat/je/DatabaseException.java
@@ -0,0 +1,47 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DatabaseException.java,v 1.26.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+/**
+ * The root of all database exceptions.
+ *
+ * Note that in some cases, certain methods return status values without
+ * issuing an exception. This occurs in situations that are not normally
+ * considered an error, but when some informational status is returned.  For
+ * example, {@link com.sleepycat.je.Database#get Database.get} returns {@link
+ * com.sleepycat.je.OperationStatus#NOTFOUND OperationStatus.NOTFOUND} when a
+ * requested key does not appear in the database.
+ */
+public class DatabaseException extends Exception {
+
+    public DatabaseException() {
+        super();
+    }
+
+    public DatabaseException(Throwable t) {
+        super(t);
+    }
+
+    public DatabaseException(String message) {
+        super(getVersionHeader() + message);
+    }
+
+    public DatabaseException(String message, Throwable t) {
+        super((getVersionHeader() + message), t);
+    }
+
+    /* 
+     * @hidden 
+     * Utility for generating the version at the start of the exception 
+     * message. Public for unit tests. 
+     */
+    public static String getVersionHeader() {
+        return "(JE " + JEVersion.CURRENT_VERSION + ") ";
+    }
+}
diff --git a/src/com/sleepycat/je/DatabaseNotFoundException.java b/src/com/sleepycat/je/DatabaseNotFoundException.java
new file mode 100644
index 0000000000000000000000000000000000000000..98a3e173cb688057c59b0249c6e3a1bae933a1b4
--- /dev/null
+++ b/src/com/sleepycat/je/DatabaseNotFoundException.java
@@ -0,0 +1,32 @@
+/*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DatabaseNotFoundException.java,v 1.9.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+/**
+ * Thrown when an operation requires a database and that database does not
+ * exist.
+ */
+public class DatabaseNotFoundException extends DatabaseException {
+
+    public DatabaseNotFoundException() {
+	super();
+    }
+
+    public DatabaseNotFoundException(Throwable t) {
+        super(t);
+    }
+
+    public DatabaseNotFoundException(String message) {
+	super(message);
+    }
+
+    public DatabaseNotFoundException(String message, Throwable t) {
+        super(message, t);
+    }
+}
diff --git a/src/com/sleepycat/je/DatabaseStats.java b/src/com/sleepycat/je/DatabaseStats.java
new file mode 100644
index 0000000000000000000000000000000000000000..7aabee2d206471f0df20a07fd73ea961508b7eb1
--- /dev/null
+++ b/src/com/sleepycat/je/DatabaseStats.java
@@ -0,0 +1,19 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DatabaseStats.java,v 1.24.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.io.Serializable;
+
+/**
+ * Statistics for a single database.
+ */
+public abstract class DatabaseStats implements Serializable {
+    // no public constructor
+    protected DatabaseStats() {}
+}
diff --git a/src/com/sleepycat/je/DatabaseTrigger.java b/src/com/sleepycat/je/DatabaseTrigger.java
new file mode 100644
index 0000000000000000000000000000000000000000..ce60d936b271e9a8bb269111775a7dfd89366d81
--- /dev/null
+++ b/src/com/sleepycat/je/DatabaseTrigger.java
@@ -0,0 +1,70 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DatabaseTrigger.java,v 1.11.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import com.sleepycat.je.txn.Locker;
+
+/**
+ * Implemented to receive database update notifications.
+ *
+ * <p>The databaseUpdated() method may perform additional database operations
+ * using the transaction passed to it, or by starting a new transaction.
+ * The transaction passed may not be explicitly committed or aborted.</p>
+ */
+interface DatabaseTrigger {
+
+    /**
+     * Notifies the trigger that it has been added and will start receiving
+     * update notifications.
+     *
+     * @param db the database to which the trigger was added.
+     */
+    void triggerAdded(Database db);
+
+    /**
+     * Notifies the trigger that it has been removed and will stop receiving
+     * update notifications.
+     *
+     * @param db the database from which the trigger was removed.
+     */
+    void triggerRemoved(Database db);
+
+    /**
+     * Notifies the trigger that a put or delete operation has been performed
+     * on the database.
+     *
+     * <p>When a new entry is inserted, oldData will be null and newData will
+     * be non-null.</p>
+     *
+     * <p>When an existing entry is updated, oldData and newData will be
+     * non-null.</p>
+     *
+     * <p>When an existing entry is deleted, oldData will be non-null and
+     * newData will be null.</p>
+     *
+     * @param db the database that was modified.
+     *
+     * @param locker the internal locker.
+     *
+     * @param priKey the primary key, which is never null.
+     *
+     * @param oldData the primary data before the change, or null if the record
+     * did not previously exist.
+     *
+     * @param newData the primary data after the change, or null if the record
+     * has been deleted.
+     */
+    void databaseUpdated(Database db,
+                         Locker locker,
+                         DatabaseEntry priKey,
+                         DatabaseEntry oldData,
+                         DatabaseEntry newData)
+        throws DatabaseException;
+}
+
diff --git a/src/com/sleepycat/je/DatabaseUtil.java b/src/com/sleepycat/je/DatabaseUtil.java
new file mode 100644
index 0000000000000000000000000000000000000000..bf67002a3a59314164838b4c0fc77601665836f5
--- /dev/null
+++ b/src/com/sleepycat/je/DatabaseUtil.java
@@ -0,0 +1,55 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DatabaseUtil.java,v 1.36.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+/**
+ * Utils for use in the db package.
+ */
+class DatabaseUtil {
+
+    /**
+     * Throws an exception if the parameter is null.
+     */
+    static void checkForNullParam(Object param, String name) {
+        if (param == null) {
+            throw new NullPointerException(name + " cannot be null");
+        }
+    }
+
+    /**
+     * Throws an exception if the dbt is null or the data field is not set.
+     */
+    static void checkForNullDbt(DatabaseEntry dbt,
+				String name,
+				boolean checkData) {
+        if (dbt == null) {
+            throw new NullPointerException
+		("DatabaseEntry " + name + " cannot be null");
+        }
+
+        if (checkData) {
+            if (dbt.getData() == null) {
+                throw new NullPointerException
+		    ("Data field for DatabaseEntry " +
+		     name + " cannot be null");
+            }
+        }
+    }
+
+    /**
+     * Throws an exception if the key dbt has the partial flag set.  This
+     * method should be called for all put() operations.
+     */
+    static void checkForPartialKey(DatabaseEntry dbt) {
+        if (dbt.getPartial()) {
+            throw new IllegalArgumentException
+		("A partial key DatabaseEntry is not allowed");
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/DbInternal.java b/src/com/sleepycat/je/DbInternal.java
new file mode 100644
index 0000000000000000000000000000000000000000..04797153fcaca20355ac0572d3b08664acd4c0f1
--- /dev/null
+++ b/src/com/sleepycat/je/DbInternal.java
@@ -0,0 +1,319 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbInternal.java,v 1.57.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.io.File;
+import java.util.Properties;
+
+import com.sleepycat.je.dbi.CursorImpl;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.GetMode;
+import com.sleepycat.je.txn.Locker;
+import com.sleepycat.je.txn.Txn;
+
+/**
+ * @hidden
+ * For internal use only. It serves to shelter methods that must be public to
+ * be used by other BDB JE packages but that are not part of the public API
+ * available to applications.
+ */
+public class DbInternal {
+
+    /**
+     * Proxy to Database.invalidate()
+     */
+    public static void dbInvalidate(Database db) {
+        db.invalidate();
+    }
+
+    /**
+     * Proxy to Database.setHandleLockOwnerTxn
+     */
+    public static void dbSetHandleLocker(Database db, Locker locker) {
+        db.setHandleLocker(locker);
+    }
+
+    /**
+     * Proxy to Environment.getDbEnvironment
+     */
+    public static EnvironmentImpl envGetEnvironmentImpl(Environment env) {
+        return env.getEnvironmentImpl();
+    }
+
+    /**
+     * Proxy to Cursor.retrieveNext().
+     */
+    public static OperationStatus retrieveNext(Cursor cursor,
+                                               DatabaseEntry key,
+                                               DatabaseEntry data,
+                                               LockMode lockMode,
+                                               GetMode getMode)
+        throws DatabaseException {
+
+        return cursor.retrieveNext(key, data, lockMode, getMode);
+    }
+
+    /**
+     * Proxy to Cursor.advanceCursor()
+     */
+    public static boolean advanceCursor(Cursor cursor,
+                                        DatabaseEntry key,
+                                        DatabaseEntry data) {
+	return cursor.advanceCursor(key, data);
+    }
+
+    /**
+     * Proxy to Cursor.getCursorImpl()
+     */
+    public static CursorImpl getCursorImpl(Cursor cursor) {
+        return cursor.getCursorImpl();
+    }
+
+    /**
+     * Proxy to Database.getDatabase()
+     */
+    public static DatabaseImpl dbGetDatabaseImpl(Database db) {
+        return db.getDatabaseImpl();
+    }
+
+    /**
+     * Proxy to JoinCursor.getSortedCursors()
+     */
+    public static Cursor[] getSortedCursors(JoinCursor cursor) {
+	return cursor.getSortedCursors();
+    }
+
+    /**
+     * Proxy to EnvironmentConfig.setLoadPropertyFile()
+     */
+    public static void setLoadPropertyFile(EnvironmentConfig config,
+                                           boolean loadProperties) {
+        config.setLoadPropertyFile(loadProperties);
+    }
+
+    /**
+     * Proxy to EnvironmentConfig.setCreateUP()
+     */
+    public static void setCreateUP(EnvironmentConfig config,
+                                   boolean checkpointUP) {
+        config.setCreateUP(checkpointUP);
+    }
+
+    /**
+     * Proxy to EnvironmentConfig.getCreateUP()
+     */
+    public static boolean getCreateUP(EnvironmentConfig config) {
+        return config.getCreateUP();
+    }
+
+    /**
+     * Proxy to EnvironmentConfig.setCheckpointUP()
+     */
+    public static void setCheckpointUP(EnvironmentConfig config,
+                                       boolean checkpointUP) {
+        config.setCheckpointUP(checkpointUP);
+    }
+
+    /**
+     * Proxy to EnvironmentConfig.getCheckpointUP()
+     */
+    public static boolean getCheckpointUP(EnvironmentConfig config) {
+        return config.getCheckpointUP();
+    }
+
+    /**
+     * Proxy to EnvironmentConfig.setTxnReadCommitted()
+     */
+    public static void setTxnReadCommitted(EnvironmentConfig config,
+                                           boolean txnReadCommitted) {
+        config.setTxnReadCommitted(txnReadCommitted);
+    }
+
+    /**
+     * Proxy to EnvironmentConfig.setTxnReadCommitted()
+     */
+    public static boolean getTxnReadCommitted(EnvironmentConfig config) {
+        return config.getTxnReadCommitted();
+    }
+
+    /**
+     * Proxy to EnvironmentConfig.cloneConfig()
+     */
+    public static EnvironmentConfig cloneConfig(EnvironmentConfig config) {
+        return config.cloneConfig();
+    }
+
+    /**
+     * Proxy to EnvironmentMutableConfig.cloneMutableConfig()
+     */
+    public static
+        EnvironmentMutableConfig cloneMutableConfig(EnvironmentMutableConfig
+                                                    config) {
+        return config.cloneMutableConfig();
+    }
+
+    /**
+     * Proxy to EnvironmentMutableConfig.checkImmutablePropsForEquality()
+     */
+    public static void
+        checkImmutablePropsForEquality(EnvironmentMutableConfig config,
+                                       EnvironmentMutableConfig passedConfig)
+        throws IllegalArgumentException {
+
+        config.checkImmutablePropsForEquality(passedConfig);
+    }
+
+    /**
+     * Proxy to EnvironmentMutableConfig.copyMutablePropsTo()
+     */
+    public static void copyMutablePropsTo(EnvironmentMutableConfig config,
+                                          EnvironmentMutableConfig toConfig) {
+        config.copyMutablePropsTo(toConfig);
+    }
+
+    /**
+     * Proxy to EnvironmentMutableConfig.validateParams.
+     */
+    public static void disableParameterValidation
+	(EnvironmentMutableConfig config) {
+	config.setValidateParams(false);
+    }
+
+    /**
+     * Proxy to EnvironmentMutableConfig.getProps
+     */
+    public static Properties getProps(EnvironmentMutableConfig config) {
+        return config.getProps();
+    }
+
+    /**
+     * Proxy to DatabaseConfig.setUseExistingConfig()
+     */
+    public static void setUseExistingConfig(DatabaseConfig config,
+                                            boolean useExistingConfig) {
+        config.setUseExistingConfig(useExistingConfig);
+    }
+
+    /**
+     * Proxy to DatabaseConfig.match(DatabaseConfig()
+     */
+    public static void databaseConfigValidate(DatabaseConfig config1,
+						 DatabaseConfig config2)
+	throws DatabaseException {
+
+        config1.validate(config2);
+    }
+
+    /**
+     * Proxy to Transaction.getLocker()
+     */
+    public static Locker getLocker(Transaction txn)
+        throws DatabaseException {
+
+        return txn.getLocker();
+    }
+
+    /**
+     * Proxy to Environment.getDefaultTxnConfig()
+     */
+    public static TransactionConfig getDefaultTxnConfig(Environment env) {
+        return env.getDefaultTxnConfig();
+    }
+
+    /**
+     * Get an Environment only if the environment is already open. This
+     * will register this Environment in the EnvironmentImpl's reference count,
+     * but will not configure the environment.
+     * @return null if the environment is not already open.
+     */
+    public static Environment getEnvironmentShell(File environmentHome) {
+        Environment env = null;
+        try {
+            env = new Environment(environmentHome);
+
+            /* If the environment is not already open, return a null. */
+            if (env.getEnvironmentImpl() == null) {
+                env = null;
+            }
+        } catch (DatabaseException e) {
+
+	    /*
+	     * Klockwork - ok
+             * the environment is not valid.
+	     */
+        }
+        return env;
+    }
+
+    public static RunRecoveryException makeNoArgsRRE() {
+	return new RunRecoveryException();
+    }
+
+    public static ExceptionEvent makeExceptionEvent(Exception e, String n) {
+	return new ExceptionEvent(e, n);
+    }
+
+    public static Database openLocalInternalDatabase(Environment env,
+						String databaseName,
+						DatabaseConfig dbConfig)
+	throws DatabaseException {
+
+	return env.openLocalInternalDatabase(databaseName, dbConfig);
+    }
+
+    public static void removeInternalDatabase(Environment env,
+                                              Transaction txn,
+                                              String databaseName,
+                                              boolean autoTxnIsReplicated)
+	throws DatabaseException {
+
+	env.removeDatabaseInternal(txn, databaseName,
+                                          autoTxnIsReplicated);
+    }
+
+    public static long truncateInternalDatabase(Environment env,
+                                                Transaction txn,
+                                                String databaseName,
+                                                boolean returnCount,
+                                                boolean autoTxnIsReplicated)
+	throws DatabaseException {
+
+	return env.truncateDatabaseInternal(txn, databaseName, returnCount,
+                                            autoTxnIsReplicated);
+    }
+
+    public static void setDbConfigReplicated(DatabaseConfig dbConfig,
+                                             boolean replicated) {
+        dbConfig.setReplicated(replicated);
+    }
+
+    public static boolean getDbConfigReplicated(DatabaseConfig dbConfig) {
+
+        return dbConfig.getReplicated();
+    }
+
+    public static boolean dbConfigPersistentEquals(DatabaseConfig dbConfig,
+                                                   DatabaseConfig other) {
+
+        return dbConfig.persistentEquals(other);
+    }
+
+    public static Environment makeEnvironment(File envHome,
+                                              EnvironmentConfig config,
+                                              boolean replicationIntended)
+        throws DatabaseException {
+
+        return new Environment(envHome, config, replicationIntended);
+    }
+
+    public static Txn getTxn(Transaction transaction) {
+        return transaction.getTxn();
+    }
+}
diff --git a/src/com/sleepycat/je/DeadlockException.java b/src/com/sleepycat/je/DeadlockException.java
new file mode 100644
index 0000000000000000000000000000000000000000..285ddac3a188d65b93f7c6bb7de39d3247f24f3b
--- /dev/null
+++ b/src/com/sleepycat/je/DeadlockException.java
@@ -0,0 +1,93 @@
+/*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DeadlockException.java,v 1.18.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+/**
+ * DeadlockException is thrown to a thread of control when multiple threads
+ * competing for a lock are deadlocked or when a lock request would need to
+ * block and the transaction has been configured to not wait for locks. The
+ * exception carrys two arrays of transaction ids, one of the owners and the
+ * other of the waiters, at the time of the timeout.
+ */
+public class DeadlockException extends DatabaseException {
+
+    private long[] ownerTxnIds;
+    private long[] waiterTxnIds;
+    private long timeoutMillis;
+
+    public DeadlockException() {
+	super();
+    }
+
+    public DeadlockException(Throwable t) {
+        super(t);
+    }
+
+    public DeadlockException(String message) {
+	super(message);
+    }
+
+    public DeadlockException(String message, Throwable t) {
+        super(message, t);
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setOwnerTxnIds(long[] ownerTxnIds) {
+	this.ownerTxnIds = ownerTxnIds;
+    }
+
+    /**
+     * Returns an array of longs containing transaction ids of owners at the
+     * the time of the timeout.
+     *
+     * @return an array of longs containing transaction ids of owners at the
+     * the time of the timeout.
+     */
+    public long[] getOwnerTxnIds() {
+	return ownerTxnIds;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setWaiterTxnIds(long[] waiterTxnIds) {
+	this.waiterTxnIds = waiterTxnIds;
+    }
+
+    /**
+     * Returns an array of longs containing transaction ids of waiters at the
+     * the time of the timeout.
+     *
+     * @return an array of longs containing transaction ids of waiters at the
+     * the time of the timeout.
+     */
+    public long[] getWaiterTxnIds() {
+	return waiterTxnIds;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setTimeoutMillis(long timeoutMillis) {
+	this.timeoutMillis = timeoutMillis;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public long getTimeoutMillis() {
+	return timeoutMillis;
+    }
+}
diff --git a/src/com/sleepycat/je/Durability.java b/src/com/sleepycat/je/Durability.java
new file mode 100644
index 0000000000000000000000000000000000000000..f1d16434b25adcebfbe7118b63c8b73db8fca329
--- /dev/null
+++ b/src/com/sleepycat/je/Durability.java
@@ -0,0 +1,177 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Durability.java,v 1.5.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+/**
+ * @hidden
+ *
+ * Durability defines the overall durability characteristics associated with a
+ * transaction. When operating on a local environment the durability of a
+ * transaction is completely determined by the local SyncPolicy that is in
+ * effect. In a replicated environment, the overall durability is additionally
+ * a function of the ReplicaAclPolicy used by the master and the SyncPolicy in
+ * effect at each Replica.
+ */
+public class Durability {
+
+    /**
+     * Defines the synchronization policy to be used when committing a
+     * transaction.
+     */
+    public enum SyncPolicy {
+
+        /**
+         *  Write and synchronously flush the log on transaction commit.
+         *  Transactions exhibit all the ACID (atomicity, consistency,
+         *  isolation, and durability) properties.
+         *
+         *  This is the default.
+         */
+        SYNC,
+
+        /**
+         * Do not write or synchronously flush the log on transaction commit.
+         * Transactions exhibit the ACI (atomicity, consistency, and isolation)
+         * properties, but not D (durability); that is, database integrity will
+         * be maintained, but if the application or system fails, it is
+         * possible some number of the most recently committed transactions may
+         * be undone during recovery. The number of transactions at risk is
+         * governed by how many log updates can fit into the log buffer, how
+         * often the operating system flushes dirty buffers to disk, and how
+         * often the log is checkpointed.
+         */
+        NO_SYNC,
+
+        /**
+         * Write but do not synchronously flush the log on transaction commit.
+         * Transactions exhibit the ACI (atomicity, consistency, and isolation)
+         * properties, but not D (durability); that is, database integrity will
+         * be maintained, but if the operating system fails, it is possible
+         * some number of the most recently committed transactions may be
+         * undone during recovery. The number of transactions at risk is
+         * governed by how often the operating system flushes dirty buffers to
+         * disk, and how often the log is checkpointed.
+         */
+        WRITE_NO_SYNC
+    };
+
+    /**
+     * A replicated environment makes it possible to increase an application's
+     * transaction commit guarantees by committing changes to its replicas on
+     * the network. ReplicaAckPolicy defines the policy for how such network
+     * commits are handled.
+     *
+     * The choice of a ReplicaAckPolicy must be consistent across all the
+     * replicas in a replication group, to ensure that the policy is
+     * consistently enforced in the event of an election.
+     */
+    public enum ReplicaAckPolicy {
+
+        /**
+         * All replicas must acknowledge that they have committed the
+         * transaction. This policy should be selected only if your replication
+         * group has a small number of replicas, and those replicas are on
+         * extremely reliable networks and servers.
+         */
+        ALL,
+
+        /**
+         * No transaction commit acknowledgments are required and the master
+         * will never wait for replica acknowledgments. In this case,
+         * transaction durability is determined entirely by the type of commit
+         * that is being performed on the master.
+         */
+        NONE,
+
+        /**
+         * A quorum of replicas must acknowledge that they have committed the
+         * transaction. A quorum is reached when acknowledgments are received
+         * from the minimum number of environments needed to ensure that the
+         * transaction remains durable if an election is held. That is, the
+         * master wants to hear from enough replicas that they have committed
+         * the transaction so that if an election is held, the modifications
+         * will exist even if a new master is selected.
+         *
+         * This is the default.
+         */
+        QUORUM;
+
+        /**
+         * Returns the minimum number of replication nodes required to
+         * implement the ReplicaAckPolicy for a given group size.
+         *
+         * @param groupSize the size of the replication group.
+         *
+         * @return the number of nodes that are needed
+         */
+        public int requiredNodes(int groupSize) {
+            switch (this) {
+            case ALL:
+                return groupSize;
+            case NONE:
+                return 1;
+            case QUORUM:
+                return (groupSize <= 2) ? 1 : (groupSize / 2 + 1);
+            }
+            assert false : "unreachable";
+            return Integer.MAX_VALUE;
+        }
+    }
+
+    /* The sync policy in effect on the local node. */
+    final private SyncPolicy localSync;
+
+    /* The sync policy in effect on a replica. */
+    final private SyncPolicy replicaSync;
+
+    /* The replica acknowledgment policy to be used. */
+    final private ReplicaAckPolicy replicaAck;
+
+    /**
+     * Creates an instance of a Durability specification.
+     *
+     * @param localSync the SyncPolicy to be used when committing the
+     * transaction locally.
+     * @param replicaSync the SyncPolicy to be used remotely, as part of a
+     * transaction acknowledgment, at a Replica node.
+     * @param replicaAck the acknowledgment policy used when obtaining
+     * transaction acknowledgments from Replicas.
+     */
+    public Durability(SyncPolicy localSync,
+                      SyncPolicy replicaSync,
+                      ReplicaAckPolicy replicaAck) {
+        this.localSync = localSync;
+        this.replicaSync = replicaSync;
+        this.replicaAck = replicaAck;
+    }
+
+    /**
+     * Returns the transaction synchronization policy to be used locally when
+     * committing a transaction.
+     */
+    public SyncPolicy getLocalSync() {
+        return localSync;
+    }
+
+    /**
+     * Returns the transaction synchronization policy to be used by the replica
+     * as it replays a transaction that needs an acknowledgment.
+     */
+    public SyncPolicy getReplicaSync() {
+        return replicaSync;
+    }
+
+    /**
+     * Returns the replica acknowledgment policy used by the master when
+     * committing changes to a replicated environment.
+     */
+    public ReplicaAckPolicy getReplicaAck() {
+        return replicaAck;
+    }
+}
diff --git a/src/com/sleepycat/je/Environment.java b/src/com/sleepycat/je/Environment.java
new file mode 100644
index 0000000000000000000000000000000000000000..dbd93d0d559131eb6777b996245609332905c712
--- /dev/null
+++ b/src/com/sleepycat/je/Environment.java
@@ -0,0 +1,1794 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Environment.java,v 1.217.2.4 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.io.File;
+import java.io.PrintStream;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+import java.util.logging.Level;
+
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.DbConfigManager;
+import com.sleepycat.je.dbi.DbEnvPool;
+import com.sleepycat.je.dbi.DbTree;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.ReplicationContext;
+import com.sleepycat.je.txn.Locker;
+import com.sleepycat.je.txn.LockerFactory;
+import com.sleepycat.je.txn.Txn;
+import com.sleepycat.je.utilint.DatabaseUtil;
+import com.sleepycat.je.utilint.Tracer;
+
+/**
+ * A database environment.  Environments include support for some or all of
+ * caching, locking, logging and transactions.
+ *
+ * <p>To open an existing environment with default attributes the application
+ * may use a default environment configuration object or null:</p>
+ *
+ * <blockquote><pre>
+ *   // Open an environment handle with default attributes.
+ *   Environment env = new Environment(home, new EnvironmentConfig());
+ * </pre></blockquote>
+ *
+ * <p>or</p>
+ *
+ * <blockquote><pre>
+ *     Environment env = new Environment(home, null);
+ * </pre></blockquote>
+ *
+ * <p>Note that many Environment objects may access a single environment.</p>
+ *
+ * <p>To create an environment or customize attributes, the application should
+ * customize the configuration class. For example:</p>
+ *
+ * <blockquote><pre>
+ *     EnvironmentConfig envConfig = new EnvironmentConfig();
+ *     envConfig.setTransactional(true);
+ *     envConfig.setAllowCreate(true);
+ *     envConfig.setCacheSize(1000000);
+ *     <p>
+ *     Environment newlyCreatedEnv = new Environment(home, envConfig);
+ * </pre></blockquote>
+ *
+ * <p>Note that environment configuration parameters can also be set through
+ * the &lt;environment home&gt;/je.properties file. This file takes precedence
+ * over any programmatically specified configuration parameters so that
+ * configuration changes can be made without recompiling. Environment
+ * configuration follows this order of precedence:</p>
+ *
+ * <ol>
+ * <li>Configuration parameters specified in
+ * &lt;environment home&gt;/je.properties take first precedence.
+ * <li> Configuration parameters set in the EnvironmentConfig object used at
+ * Environment construction are next.
+ * <li>Any configuration parameters not set by the application are set to
+ * system defaults, described along with the parameter name String constants
+ * in the EnvironmentConfig class.
+ * </ol>
+ *
+ * <p>An <em>environment handle</em> is an Environment instance.  More than one
+ * Environment instance may be created for the same physical directory, which
+ * is the same as saying that more than one Environment handle may be open at
+ * one time for a given environment.</p>
+ *
+ * The Environment handle should not be closed while any other handle remains
+ * open that is using it as a reference (for example, {@link
+ * com.sleepycat.je.Database Database} or {@link com.sleepycat.je.Transaction
+ * Transaction}.  Once {@link com.sleepycat.je.Environment#close
+ * Environment.close} is called, this object may not be accessed again,
+ * regardless of whether or not it throws an exception.
+ */
+public class Environment {
+
+    /**
+     * @hidden
+     *  envImpl is a reference to the shared underlying environment.
+     */
+    protected EnvironmentImpl envImpl;
+    private TransactionConfig defaultTxnConfig;
+    private EnvironmentMutableConfig handleConfig;
+
+    private Set<Database> referringDbs;
+    private Set<Transaction> referringDbTxns;
+
+    private boolean valid;
+
+    /**
+     * @hidden
+     * The name of the cleaner daemon thread.  This constant is passed to an
+     * ExceptionEvent's threadName argument when an exception is thrown in the
+     * cleaner daemon thread.
+     */
+    public static final String CLEANER_NAME = "Cleaner";
+
+    /**
+     * @hidden
+     * The name of the IN Compressor daemon thread.  This constant is passed to
+     * an ExceptionEvent's threadName argument when an exception is thrown in
+     * the IN Compressor daemon thread.
+     */
+    public static final String INCOMP_NAME = "INCompressor";
+
+    /**
+     * @hidden
+     * The name of the Checkpointer daemon thread.  This constant is passed to
+     * an ExceptionEvent's threadName argument when an exception is thrown in
+     * the Checkpointer daemon thread.
+     */
+    public static final String CHECKPOINTER_NAME = "Checkpointer";
+
+    /**
+    * Creates a database environment handle.
+    *
+    * @param envHome The database environment's home directory.
+    *
+    * @param configuration The database environment attributes.  If null,
+    * default attributes are used.
+    *
+    * @throws IllegalArgumentException if an invalid parameter was specified.
+    *
+    * @throws DatabaseException if a failure occurs.
+    *
+    * @throws EnvironmentLockedException when an environment cannot be opened
+    * for write access because another process has the same environment open
+    * for write access.
+     */
+    public Environment(File envHome, EnvironmentConfig configuration)
+        throws DatabaseException, EnvironmentLockedException {
+
+        this(envHome, configuration, true /*openIfNeeded*/,
+             false /*replicationIntended*/);
+    }
+
+    /**
+     * Replication support. Environments are created before the replicator, but
+     * we must check at recovery time whether the environment will be used for
+     * replication, so we can error check the persistent replication bit.
+     */
+    Environment(File envHome,
+                EnvironmentConfig configuration,
+                boolean replicationIntended)
+        throws DatabaseException {
+
+        this(envHome, configuration, true /*openIfNeeded*/,
+             replicationIntended);
+    }
+
+    /**
+     * Gets an Environment for an existing EnvironmentImpl. Used by utilities
+     * such as the JMX MBean which don't want to open the environment or be
+     * reference counted. The calling application must take care not to retain
+     */
+    Environment(File envHome)
+        throws DatabaseException {
+
+        this(envHome, null /*configuration*/, false /*openIfNeeded*/,
+             false /*replicationIntended*/);
+    }
+
+    /**
+     * Internal common constructor.
+     */
+    private Environment(File envHome,
+                        EnvironmentConfig configuration,
+                        boolean openIfNeeded,
+                        boolean replicationIntended)
+        throws DatabaseException {
+
+        /* If openIfNeeded is false, then configuration must be null. */
+        assert openIfNeeded || configuration == null;
+
+        envImpl = null;
+        referringDbs = Collections.synchronizedSet(new HashSet<Database>());
+        referringDbTxns =
+            Collections.synchronizedSet(new HashSet<Transaction>());
+        valid = false;
+
+        DatabaseUtil.checkForNullParam(envHome, "envHome");
+
+        /* If the user specified a null object, use the default */
+        EnvironmentConfig baseConfig = (configuration == null) ?
+            EnvironmentConfig.DEFAULT : configuration;
+
+        /* Make a copy, apply je.properties, and init the handle config. */
+        EnvironmentConfig useConfig = baseConfig.cloneConfig();
+        applyFileConfig(envHome, useConfig);
+        copyToHandleConfig(useConfig, useConfig);
+
+        /* Open a new or existing environment in the shared pool. */
+        envImpl = DbEnvPool.getInstance().getEnvironment
+            (envHome, useConfig,
+             configuration != null /*checkImmutableParams*/,
+             openIfNeeded, replicationIntended);
+
+        valid = true;
+    }
+
+    /**
+     * Applies the configurations specified in the je.properties file to
+     * override any programatically set configurations.
+     */
+    private void applyFileConfig(File envHome,
+                                 EnvironmentMutableConfig useConfig)
+        throws IllegalArgumentException {
+
+        /* Apply the je.properties file. */
+        if (useConfig.getLoadPropertyFile()) {
+            DbConfigManager.applyFileConfig(envHome,
+                                            DbInternal.getProps(useConfig),
+                                            false,       // forReplication
+                                            useConfig.getClass().getName());
+        }
+    }
+
+    /**
+     * The Environment.close method closes the Berkeley DB environment.
+     *
+     * <p>When the last environment handle is closed, allocated resources are
+     * freed, and daemon threads are stopped, even if they are performing work.
+     * For example, if the cleaner is still cleaning the log, it will be
+     * stopped at the next reasonable opportunity and perform no more cleaning
+     * operations.</p>
+     *
+     * <p>The Environment handle should not be closed while any other handle
+     * that refers to it is not yet closed; for example, database environment
+     * handles must not be closed while database handles remain open, or
+     * transactions in the environment have not yet committed or aborted.
+     * Specifically, this includes {@link com.sleepycat.je.Database Database},
+     * {@link com.sleepycat.je.Cursor Cursor} and {@link
+     * com.sleepycat.je.Transaction Transaction} handles.</p>
+     *
+     * <p>In multithreaded applications, only a single thread should call
+     * Environment.close. Other callers will see a DatabaseException
+     * complaining that the handle is already closed.</p>
+     *
+     * <p>After Environment.close has been called, regardless of its return,
+     * the Berkeley DB environment handle may not be accessed again.</p>
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public synchronized void close()
+        throws DatabaseException {
+
+        checkHandleIsValid();
+        try {
+            checkEnv();
+        } catch (RunRecoveryException e) {
+
+            /*
+             * We're trying to close on an environment that has seen a fatal
+             * exception. Try to do the minimum, such as closing file
+             * descriptors, to support re-opening the environment in the same
+             * JVM.
+             */
+            if (envImpl != null) {
+                envImpl.closeAfterRunRecovery();
+            }
+            return;
+        }
+
+        StringBuffer errors = new StringBuffer();
+        try {
+            if (referringDbs != null) {
+                int nDbs = referringDbs.size();
+                if (nDbs != 0) {
+                    errors.append("There ");
+                    if (nDbs == 1) {
+                        errors.append
+                            ("is 1 open Database in the Environment.\n");
+                    } else {
+                        errors.append("are ");
+                        errors.append(nDbs);
+                        errors.append
+                            (" open Databases in the Environment.\n");
+                    }
+                    errors.append("Closing the following databases:\n");
+
+                    /*
+                     * Copy the referringDbs Set because db.close() below
+                     * modifies this Set, potentially causing a
+                     * ConcurrentModificationException.
+                     */
+                    Iterator<Database> iter =
+                        new HashSet<Database>(referringDbs).iterator();
+                    while (iter.hasNext()) {
+                        String dbName = "";
+                        try {
+                            Database db = iter.next();
+
+                            /*
+                             * Save the db name before we attempt the close,
+                             * it's unavailable after the close.
+                             */
+                            dbName = db.getDebugName();
+                            errors.append(dbName).append(" ");
+                            db.close();
+                        } catch (RunRecoveryException e) {
+                            throw e;
+                        } catch (DatabaseException E) {
+                            errors.append("\nWhile closing Database ");
+                            errors.append(dbName);
+                            errors.append(" encountered exception: ");
+                            errors.append(E).append("\n");
+                        } catch (Exception E) {
+                            errors = new StringBuffer();
+                            throw new DatabaseException(E);
+                        }
+                    }
+                }
+            }
+
+            if (referringDbTxns != null) {
+                int nTxns = referringDbTxns.size();
+                if (nTxns != 0) {
+                    Iterator<Transaction> iter = referringDbTxns.iterator();
+                    errors.append("There ");
+                    if (nTxns == 1) {
+                        errors.append("is 1 existing transaction opened");
+                        errors.append(" against the Environment.\n");
+                    } else {
+                        errors.append("are ");
+                        errors.append(nTxns);
+                        errors.append(" existing transactions opened against");
+                        errors.append(" the Environment.\n");
+                    }
+                    errors.append("Aborting open transactions ...\n");
+
+                    while (iter.hasNext()) {
+                        Transaction txn = iter.next();
+                        try {
+                            errors.append("aborting " + txn);
+                            txn.abort();
+                        } catch (RunRecoveryException e) {
+                            throw e;
+                        } catch (DatabaseException DBE) {
+                            errors.append("\nWhile aborting transaction ");
+                            errors.append(txn.getId());
+                            errors.append(" encountered exception: ");
+                            errors.append(DBE).append("\n");
+                        }
+                    }
+                }
+            }
+
+            try {
+                envImpl.close();
+            } catch (RunRecoveryException e) {
+                throw e;
+            } catch (DatabaseException DBE) {
+                errors.append
+                    ("\nWhile closing Environment encountered exception: ");
+                errors.append(DBE).append("\n");
+            }
+        } finally {
+            envImpl = null;
+            valid = false;
+            if (errors.length() > 0) {
+                throw new DatabaseException(errors.toString());
+            }
+        }
+    }
+
+    /**
+     * Opens, and optionally creates, a <code>Database</code>.
+     *
+     * @param txn For a transactional database, an explicit transaction may be
+     * specified, or null may be specified to use auto-commit.  For a
+     * non-transactional database, null must be specified.
+     *
+     * @param databaseName The name of the database.
+     *
+     * @param dbConfig The database attributes.  If null, default attributes
+     * are used.
+     *
+     * @return Database handle.
+     *
+     * @throws DatabaseNotFoundException if the database file does not exist.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public synchronized Database openDatabase(Transaction txn,
+                                              String databaseName,
+                                              DatabaseConfig dbConfig)
+        throws DatabaseException {
+
+        checkHandleIsValid();
+        checkEnv();
+        /*
+         * Currently all user-created databases are replicated in a
+         * replicated environment.
+         */
+        try {
+            if (dbConfig == null) {
+                dbConfig = DatabaseConfig.DEFAULT;
+            }
+
+            Database db = new Database(this);
+            setupDatabase(txn, db, databaseName, dbConfig,
+                          false,                  // needWritableLockerForInit,
+                          false,                   // allowReservedName,
+                          envImpl.isReplicated()); // autoTxnIsReplicated
+            return db;
+        } catch (Error E) {
+            envImpl.invalidate(E);
+            throw E;
+        }
+    }
+
+    /**
+     * Creates a database for internal JE use. Used in situations when the user
+     * needs a Database handle; some internal uses go directly to the
+     * DatabaseImpl. DbConfig should not be null.
+     *  - permits use of reserved names.
+     *  - the Locker used is non-transactional or an auto commit txn
+     *  - the database is not replicated.
+     */
+    synchronized Database openLocalInternalDatabase(String databaseName,
+                                                    DatabaseConfig dbConfig)
+        throws DatabaseException {
+
+        /* Should only be used for non-replicated cases. */
+        assert !DbInternal.getDbConfigReplicated(dbConfig):
+            databaseName + " shouldn't be replicated";
+
+        try {
+            Database db = new Database(this);
+            setupDatabase(null, // txn
+                          db, databaseName, dbConfig,
+                          false,  // needWritableLockerForInit,
+                          true,   // allowReservedName
+                          false); // autoTxnIsReplicated
+            return db;
+        } catch (Error E) {
+            envImpl.invalidate(E);
+            throw E;
+        }
+    }
+
+    /**
+     * Opens and optionally creates a <code>SecondaryDatabase</code>.
+     *
+     * <p>Note that the associations between primary and secondary databases
+     * are not stored persistently.  Whenever a primary database is opened for
+     * write access by the application, the appropriate associated secondary
+     * databases should also be opened by the application.  This is necessary
+     * to ensure data integrity when changes are made to the primary
+     * database.</p>
+     *
+     * @param txn For a transactional database, an explicit transaction may be
+     * specified, or null may be specified to use auto-commit.  For a
+     * non-transactional database, null must be specified.
+     *
+     * @param databaseName The name of the database.
+     *
+     * @param primaryDatabase the primary database with which the secondary
+     * database will be associated.  The primary database must not be
+     * configured for duplicates.
+     *
+     * @param dbConfig The secondary database attributes.  If null, default
+     * attributes are used.
+     *
+     * @return Database handle.
+     *
+     * @throws DatabaseNotFoundException if the database file does not exist.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public synchronized
+        SecondaryDatabase openSecondaryDatabase(Transaction txn,
+                                                String databaseName,
+                                                Database primaryDatabase,
+                                                SecondaryConfig dbConfig)
+        throws DatabaseException {
+
+        checkHandleIsValid();
+        checkEnv();
+        try {
+            if (dbConfig == null) {
+                dbConfig = SecondaryConfig.DEFAULT;
+            }
+            SecondaryDatabase db =
+                new SecondaryDatabase(this, dbConfig, primaryDatabase);
+
+            /*
+             * If we're populating the secondary, we should own with
+             * a writable Locker.
+             */
+            boolean needWritableLockerForInit = dbConfig.getAllowPopulate();
+            setupDatabase(txn, db, databaseName, dbConfig,
+                          needWritableLockerForInit,
+                          false,                   // allowReservedName
+                          envImpl.isReplicated()); // autoTxnIsReplicated
+            return db;
+        } catch (Error E) {
+            envImpl.invalidate(E);
+            throw E;
+        }
+    }
+
+    /**
+     * @param txn may be null
+     * @param newDb is the Database handle which houses this database
+     * @param allowReserveName true if this database may use one of the
+     * names reserved for JE internal databases.
+     * @param autoTxnIsReplicated true if this setupDatabase is going to set
+     * up a replicated database.
+     */
+    private void setupDatabase(Transaction txn,
+                               Database newDb,
+                               String databaseName,
+                               DatabaseConfig dbConfig,
+                               boolean needWritableLockerForInit,
+                               boolean allowReservedName,
+                               boolean autoTxnIsReplicated)
+        throws DatabaseException {
+
+        checkEnv();
+        DatabaseUtil.checkForNullParam(databaseName, "databaseName");
+
+        Tracer.trace(Level.FINEST, envImpl, "Environment.open: " +
+                     " name=" + databaseName +
+                     " dbConfig=" + dbConfig);
+
+        /*
+         * Check that the open configuration is valid and doesn't conflict with
+         * the envImpl configuration.
+         */
+        validateDbConfig(dbConfig, databaseName);
+        validateDbConfigAgainstEnv(dbConfig, databaseName);
+
+        /* Perform eviction before each operation that allocates memory. */
+        envImpl.getEvictor().doCriticalEviction(false); // backgroundIO
+
+        Locker locker = null;
+        DatabaseImpl database = null;
+        boolean operationOk = false;
+        boolean dbIsClosing = false;
+        try {
+
+            /*
+             * Does this database exist? Get a transaction to use. If the
+             * database exists already, we really only need a readable locker.
+             * If the database must be created, we need a writable one.
+             * Unfortunately, we have to get the readable one first before we
+             * know whether we have to create.  However, if we need to write
+             * during initialization (to populate a secondary for example),
+             * then just create a writable locker now.
+             */
+            boolean isWritableLocker;
+            if (needWritableLockerForInit) {
+                locker = LockerFactory.getWritableLocker
+                    (this,
+                     txn,
+                     dbConfig.getTransactional(),
+                     true,  // retainNonTxnLocks
+                     autoTxnIsReplicated,
+                     null);
+                isWritableLocker = true;
+            } else {
+                locker = LockerFactory.getReadableLocker
+                    (this, txn,
+                     dbConfig.getTransactional(),
+                     true,   // retainNonTxnLocks
+                     false); // readCommittedIsolation
+                isWritableLocker = !dbConfig.getTransactional() ||
+                    locker.isTransactional();
+            }
+
+            database = envImpl.getDbTree().getDb(locker, databaseName, newDb);
+            boolean databaseExists =
+                (database != null) && !database.isDeleted();
+
+            if (databaseExists) {
+                if (dbConfig.getAllowCreate() &&
+                    dbConfig.getExclusiveCreate()) {
+                    /* We intended to create this, but it already exists. */
+                    dbIsClosing = true;
+                    throw new DatabaseException
+                        ("Database " + databaseName + " already exists");
+                }
+
+                newDb.initExisting(this, locker, database, dbConfig);
+            } else {
+                /* Release deleted DB. [#13415] */
+                envImpl.getDbTree().releaseDb(database);
+                database = null;
+
+                if (!allowReservedName &&
+                    DbTree.isReservedDbName(databaseName)) {
+                    throw new IllegalArgumentException
+                        (databaseName + " is a reserved database name.");
+                }
+
+                /* No database. Create if we're allowed to. */
+                if (dbConfig.getAllowCreate()) {
+
+                    /*
+                     * We're going to have to do some writing. Switch to a
+                     * writable locker if we don't already have one.  Note
+                     * that the existing locker does not hold the handle lock
+                     * we need, since the database was not found; therefore it
+                     * is OK to call operationEnd on the existing locker.
+                     */
+                    if (!isWritableLocker) {
+                        locker.operationEnd(OperationStatus.SUCCESS);
+                        locker = LockerFactory.getWritableLocker
+                            (this,
+                             txn,
+                             dbConfig.getTransactional(),
+                             true,  // retainNonTxnLocks
+                             autoTxnIsReplicated,
+                             null);
+                        isWritableLocker  = true;
+                    }
+
+                    newDb.initNew(this, locker, databaseName, dbConfig);
+                } else {
+                    /* We aren't allowed to create this database. */
+                    throw new DatabaseNotFoundException("Database " +
+                                                        databaseName +
+                                                        " not found.");
+                }
+            }
+
+            operationOk = true;
+            addReferringHandle(newDb);
+        } finally {
+
+            /*
+             * Tell the transaction that this operation is over. Some types of
+             * transactions (BasicLocker and auto Txn) will actually finish. The
+             * transaction can decide if it is finishing and if it needs to
+             * transfer the db handle lock it owns to someone else.
+             */
+            if (locker != null) {
+                locker.setHandleLockOwner(operationOk, newDb, dbIsClosing);
+                locker.operationEnd(operationOk);
+            }
+
+            /*
+             * Normally releaseDb will be called when the DB is closed, or by
+             * abort if a transaction is used, or by setHandleLockOwner if a
+             * non-transactional locker is used.  But when the open operation
+             * fails and the Database.databaseImpl field was not initialized,
+             * we must call releaseDb here. [#13415]
+             */
+            if ((!operationOk || dbIsClosing) &&
+                newDb.getDatabaseImpl() == null) {
+                envImpl.getDbTree().releaseDb(database);
+            }
+        }
+    }
+
+    private void validateDbConfig(DatabaseConfig dbConfig, String databaseName)
+        throws IllegalArgumentException {
+
+        if ((dbConfig.getDeferredWrite() && dbConfig.getTemporary()) ||
+            (dbConfig.getDeferredWrite() && dbConfig.getTransactional()) ||
+            (dbConfig.getTemporary() && dbConfig.getTransactional())) {
+            throw new IllegalArgumentException
+                ("Attempted to open Database " + databaseName +
+                 " and two ore more of the following exclusive properties" +
+                 " are true: deferredWrite, temporary, transactional");
+        }
+
+        /*
+         * R/W database handles on a replicated database must be transactional,
+         * for now. In the future we may support non-transactional database
+         * handles.
+         */
+        if (envImpl.isReplicated() &&
+            dbConfig.getReplicated() &&
+            !dbConfig.getReadOnly()) {
+            if (!dbConfig.getTransactional()) {
+                throw new IllegalArgumentException
+                    ("Read/Write Database instances for replicated " +
+                     "database " + databaseName + " must be transactional.");
+            }
+        }
+    }
+
+    private void validateDbConfigAgainstEnv(DatabaseConfig dbConfig,
+                                            String databaseName)
+        throws IllegalArgumentException {
+
+        /* Check operation's transactional status against the Environment */
+        if (dbConfig.getTransactional() &&
+            !(envImpl.isTransactional())) {
+            throw new IllegalArgumentException
+                ("Attempted to open Database " + databaseName +
+                 " transactionally, but parent Environment is" +
+                 " not transactional");
+        }
+
+        /* Check read/write status */
+        if (envImpl.isReadOnly() && (!dbConfig.getReadOnly())) {
+            throw new IllegalArgumentException
+                ("Attempted to open Database " + databaseName +
+                 " as writable but parent Environment is read only ");
+        }
+    }
+
+    /**
+     * Removes a database.
+     *
+     * <p>Applications should never remove databases with open {@link
+     * com.sleepycat.je.Database Database} handles.</p>
+     *
+     * @param txn For a transactional environment, an explicit transaction
+     * may be specified or null may be specified to use auto-commit.  For a
+     * non-transactional environment, null must be specified.
+     *
+     * @param databaseName The database to be removed.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws DatabaseNotFoundException if the database file does not exist.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public void removeDatabase(Transaction txn,
+                               String databaseName)
+        throws DatabaseException {
+
+        checkHandleIsValid();
+
+        removeDatabaseInternal(txn,
+                               databaseName,
+                               envImpl.isReplicated()); // autoTxnIsReplicated
+    }
+
+    void removeDatabaseInternal(Transaction txn,
+                                String databaseName,
+                                boolean autoTxnIsReplicated)
+        throws DatabaseException {
+
+        checkHandleIsValid();
+        checkEnv();
+        DatabaseUtil.checkForNullParam(databaseName, "databaseName");
+
+        Locker locker = null;
+        boolean operationOk = false;
+        try {
+
+            /*
+             * Note: use env level isTransactional as proxy for the db
+             * isTransactional.
+             */
+            locker = LockerFactory.getWritableLocker
+                (this,
+                 txn,
+                 envImpl.isTransactional(),
+                 true,  // retainNonTxnLocks,
+                 autoTxnIsReplicated,
+                 null);
+            envImpl.getDbTree().dbRemove(locker,
+                                         databaseName,
+                                         null /*checkId*/);
+            operationOk = true;
+        } catch (Error E) {
+            envImpl.invalidate(E);
+            throw E;
+        } finally {
+            if (locker != null) {
+                locker.operationEnd(operationOk);
+            }
+        }
+    }
+
+    /**
+     * Renames a database.
+     *
+     * <p>Applications should never rename databases with open {@link
+     * com.sleepycat.je.Database Database} handles.</p>
+     *
+     * @param txn For a transactional environment, an explicit transaction
+     * may be specified or null may be specified to use auto-commit.  For a
+     * non-transactional environment, null must be specified.
+     *
+     * @param databaseName The new name of the database.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws DatabaseNotFoundException if the database file does not exist.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public void renameDatabase(Transaction txn,
+                               String databaseName,
+                               String newName)
+        throws DatabaseException {
+
+        DatabaseUtil.checkForNullParam(databaseName, "databaseName");
+        DatabaseUtil.checkForNullParam(newName, "newName");
+
+        checkHandleIsValid();
+        checkEnv();
+
+        Locker locker = null;
+        boolean operationOk = false;
+        try {
+
+            /*
+             * Note: use env level isTransactional as proxy for the db
+             * isTransactional.
+             */
+            locker = LockerFactory.getWritableLocker
+                (this, txn,
+                 envImpl.isTransactional(),
+                 true /*retainNonTxnLocks*/,
+                 envImpl.isReplicated(),  // autoTxnIsReplicated
+                 null);
+            envImpl.getDbTree().dbRename(locker, databaseName, newName);
+            operationOk = true;
+        } catch (Error E) {
+            envImpl.invalidate(E);
+            throw E;
+        } finally {
+            if (locker != null) {
+                locker.operationEnd(operationOk);
+            }
+        }
+    }
+
+    /**
+     * Empties the database, discarding all records it contains.
+     *
+     * <p>When called on a database configured with secondary indices, the
+     * application is responsible for also truncating all associated secondary
+     * indices.</p>
+     *
+     * <p>Applications should never truncate databases with open {@link
+     * com.sleepycat.je.Database Database} handles.</p>
+     *
+     * @param txn For a transactional environment, an explicit transaction may
+     * be specified or null may be specified to use auto-commit.  For a
+     * non-transactional environment, null must be specified.
+     *
+     * @param databaseName The database to be truncated.
+     *
+     * @param returnCount If true, count and return the number of records
+     * discarded.
+     *
+     * @return The number of records discarded, or -1 if returnCount is false.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws DatabaseNotFoundException if the database file does not exist.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public long truncateDatabase(Transaction txn,
+                                 String databaseName,
+                                 boolean returnCount)
+        throws DatabaseException {
+
+        checkHandleIsValid();
+
+        return truncateDatabaseInternal
+            (txn, databaseName, returnCount,
+             envImpl.isReplicated()); // autoTxnIsReplicated
+    }
+
+    long truncateDatabaseInternal(Transaction txn,
+                                  String databaseName,
+                                  boolean returnCount,
+                                  boolean autoTxnIsReplicated)
+        throws DatabaseException {
+
+        checkHandleIsValid();
+        checkEnv();
+        DatabaseUtil.checkForNullParam(databaseName, "databaseName");
+
+        Locker locker = null;
+        boolean operationOk = false;
+        long count = 0;
+        try {
+
+            /*
+             * Note: use env level isTransactional as proxy for the db
+             * isTransactional.
+             */
+            locker = LockerFactory.getWritableLocker
+                (this, txn,
+                 envImpl.isTransactional(),
+                 true /*retainNonTxnLocks*/,
+                 autoTxnIsReplicated,
+                 null);
+
+            count = envImpl.getDbTree().truncate(locker,
+                                                 databaseName,
+                                                 returnCount);
+
+            operationOk = true;
+        } catch (Error E) {
+            envImpl.invalidate(E);
+            throw E;
+        } finally {
+            if (locker != null) {
+                locker.operationEnd(operationOk);
+            }
+        }
+        return count;
+    }
+
+    /**
+     * For unit testing.  Returns the current memory usage in bytes for all
+     * btrees in the envImpl.
+     */
+    long getMemoryUsage()
+        throws DatabaseException {
+
+        checkHandleIsValid();
+        checkEnv();
+
+        return envImpl.getMemoryBudget().getCacheMemoryUsage();
+    }
+
+    /**
+     * Returns the database environment's home directory.
+     *
+     * @return The database environment's home directory.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public File getHome()
+        throws DatabaseException {
+
+        checkHandleIsValid();
+
+        return envImpl.getEnvironmentHome();
+    }
+
+    /*
+     * Transaction management
+     */
+
+    /**
+     * Returns the default txn config for this environment handle.
+     */
+    TransactionConfig getDefaultTxnConfig() {
+        return defaultTxnConfig;
+    }
+
+    /**
+     * Copies the handle properties out of the config properties, and
+     * initializes the default transaction config.
+     */
+    private void copyToHandleConfig(EnvironmentMutableConfig useConfig,
+                                    EnvironmentConfig initStaticConfig)
+        throws DatabaseException {
+
+        /*
+         * Create the new objects, initialize them, then change the instance
+         * fields.  This avoids synchronization issues.
+         */
+        EnvironmentMutableConfig newHandleConfig =
+            new EnvironmentMutableConfig();
+        useConfig.copyHandlePropsTo(newHandleConfig);
+        this.handleConfig = newHandleConfig;
+
+        TransactionConfig newTxnConfig =
+            TransactionConfig.DEFAULT.cloneConfig();
+        newTxnConfig.setNoSync(handleConfig.getTxnNoSync());
+        newTxnConfig.setWriteNoSync(handleConfig.getTxnWriteNoSync());
+        newTxnConfig.setDurability(handleConfig.getDurability());
+        newTxnConfig.setConsistencyPolicy
+            (handleConfig.getConsistencyPolicy());
+        if (initStaticConfig != null) {
+            newTxnConfig.setSerializableIsolation
+                (initStaticConfig.getTxnSerializableIsolation());
+            newTxnConfig.setReadCommitted
+                (initStaticConfig.getTxnReadCommitted());
+        } else {
+            newTxnConfig.setSerializableIsolation
+                (defaultTxnConfig.getSerializableIsolation());
+            newTxnConfig.setReadCommitted
+                (defaultTxnConfig.getReadCommitted());
+        }
+        this.defaultTxnConfig = newTxnConfig;
+    }
+
+    /**
+     * Creates a new transaction in the database environment.
+     *
+     * <p>Transaction handles are free-threaded; transactions handles may be
+     * used concurrently by multiple threads.</p>
+     *
+     * <p>Cursors may not span transactions; that is, each cursor must be
+     * opened and closed within a single transaction. The parent parameter is a
+     * placeholder for nested transactions, and must currently be null.</p>
+     *
+     * @param txnConfig The transaction attributes.  If null, default
+     * attributes are used.
+     *
+     * @return The newly created transaction's handle.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public Transaction beginTransaction(Transaction parent,
+                                        TransactionConfig txnConfig)
+        throws DatabaseException {
+
+        try {
+            return beginTransactionInternal(parent, txnConfig);
+        } catch (Error E) {
+            if (envImpl != null) {
+                envImpl.invalidate(E);
+            }
+            throw E;
+        }
+    }
+
+    private Transaction beginTransactionInternal(Transaction parent,
+                                                 TransactionConfig txnConfig)
+        throws DatabaseException {
+
+        checkHandleIsValid();
+        checkEnv();
+
+        if (!envImpl.isTransactional()) {
+            throw new UnsupportedOperationException
+                ("Transactions can not be used in a non-transactional " +
+                 "environment");
+        }
+
+        checkTxnConfig(txnConfig);
+
+        /*
+         * Apply txn config defaults.  We don't need to clone unless we have to
+         * apply the env default, since we don't hold onto a txn config
+         * reference.
+         */
+        TransactionConfig useConfig = null;
+        if (txnConfig == null) {
+            useConfig = defaultTxnConfig;
+        } else {
+            if (defaultTxnConfig.getNoSync() ||
+                defaultTxnConfig.getWriteNoSync()) {
+
+                /*
+                 * The environment sync settings have been set, check if any
+                 * were set in the user's txn config. If none were set in the
+                 * user's config, apply the environment defaults
+                 */
+                if (!txnConfig.getNoSync() &&
+                    !txnConfig.getSync() &&
+                    !txnConfig.getWriteNoSync()) {
+                    useConfig = txnConfig.cloneConfig();
+                    if (defaultTxnConfig.getWriteNoSync()) {
+                        useConfig.setWriteNoSync(true);
+                    } else {
+                        useConfig.setNoSync(true);
+                    }
+                }
+            }
+
+            if ((defaultTxnConfig.getDurability() != null) &&
+                 (txnConfig.getDurability() == null)) {
+                /*
+                 * Inherit transaction durability from the environment in the
+                 * absence of an explicit transaction config durability.
+                 */
+                if (useConfig == null) {
+                    useConfig = txnConfig.cloneConfig();
+                }
+                useConfig.setDurability(defaultTxnConfig.getDurability());
+            }
+
+            /* Apply isolation level default. */
+            if (!txnConfig.getSerializableIsolation() &&
+                !txnConfig.getReadCommitted() &&
+                !txnConfig.getReadUncommitted()) {
+                if (defaultTxnConfig.getSerializableIsolation()) {
+                    if (useConfig == null) {
+                        useConfig = txnConfig.cloneConfig();
+                    }
+                    useConfig.setSerializableIsolation(true);
+                } else if (defaultTxnConfig.getReadCommitted()) {
+                    if (useConfig == null) {
+                        useConfig = txnConfig.cloneConfig();
+                    }
+                    useConfig.setReadCommitted(true);
+                }
+            }
+
+            /* No environment level defaults applied. */
+            if (useConfig == null) {
+                useConfig = txnConfig;
+            }
+        }
+        Txn internalTxn = envImpl.txnBegin(parent, useConfig);
+
+        /*
+         * Currently all user transactions in a replicated environment are
+         * replicated.
+         */
+        internalTxn.setRepContext(envImpl.isReplicated() ?
+                                  ReplicationContext.MASTER :
+                                  ReplicationContext.NO_REPLICATE);
+
+        Transaction txn = new Transaction(this, internalTxn);
+        addReferringHandle(txn);
+        return txn;
+    }
+
+    /**
+     * Checks the txnConfig object to ensure that its correctly configured and
+     * is compatible with the configuration of the Environment.
+     *
+     * @param txnConfig the configuration being checked.
+     *
+     * @throws IllegalArgumentException if any of the checks fail.
+     */
+    private void checkTxnConfig(TransactionConfig txnConfig)
+        throws IllegalArgumentException {
+        if (txnConfig == null) {
+            return ;
+        }
+        if ((txnConfig.getSerializableIsolation() &&
+             txnConfig.getReadUncommitted()) ||
+            (txnConfig.getSerializableIsolation() &&
+             txnConfig.getReadCommitted()) ||
+            (txnConfig.getReadUncommitted() &&
+             txnConfig.getReadCommitted())) {
+            throw new IllegalArgumentException
+                ("Only one may be specified: SerializableIsolation, " +
+                "ReadCommitted or ReadUncommitted");
+        }
+        if ((txnConfig.getDurability() != null) &&
+            ((defaultTxnConfig.getSync() ||
+              defaultTxnConfig.getNoSync() ||
+              defaultTxnConfig.getWriteNoSync()))) {
+           throw new IllegalArgumentException
+               ("Mixed use of deprecated durability API for the " +
+                "Environment with the new durability API for " +
+                "TransactionConfig.setDurability()");
+        }
+        if ((defaultTxnConfig.getDurability() != null) &&
+            ((txnConfig.getSync() ||
+              txnConfig.getNoSync() ||
+              txnConfig.getWriteNoSync()))) {
+            throw new IllegalArgumentException
+                   ("Mixed use of new durability API for the " +
+                    "Environment with the deprecated durability API for " +
+                    "TransactionConfig.");
+        }
+    }
+
+    /**
+     * Synchronously checkpoint the database environment.
+     *
+     * <p>This is an optional action for the application since this activity
+     * is, by default, handled by a database environment owned background
+     * thread.</p>
+     *
+     * @param ckptConfig The checkpoint attributes.  If null, default
+     * attributes are used.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public void checkpoint(CheckpointConfig ckptConfig)
+        throws DatabaseException {
+
+        try {
+            checkHandleIsValid();
+            checkEnv();
+            CheckpointConfig useConfig =
+                (ckptConfig == null) ? CheckpointConfig.DEFAULT : ckptConfig;
+
+            envImpl.invokeCheckpoint(useConfig,
+                                     false, // flushAll
+                                     "api");
+        } catch (Error E) {
+            if (envImpl != null) {
+                envImpl.invalidate(E);
+            }
+            throw E;
+        }
+    }
+
+    /**
+     * Synchronously flushes database environment databases to stable storage.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public void sync()
+        throws DatabaseException {
+
+        try {
+            checkHandleIsValid();
+            checkEnv();
+            CheckpointConfig config = new CheckpointConfig();
+            config.setForce(true);
+            envImpl.invokeCheckpoint(config,
+                                     true,  // flushAll
+                                     "sync");
+        } catch (Error E) {
+            if (envImpl != null) {
+                envImpl.invalidate(E);
+            }
+            throw E;
+        }
+    }
+
+    /**
+     * Synchronously invokes database environment log cleaning.  This method is
+     * called periodically by the cleaner daemon thread.
+     *
+     * <p>Zero or more log files will be cleaned as necessary to bring the disk
+     * space utilization of the environment above the configured minimum
+     * utilization threshold.  The threshold is determined by the
+     * <code>je.cleaner.minUtilization</code> configuration setting.</p>
+     *
+     * <p>Note that <code>cleanLog</code> does not perform the complete task of
+     * cleaning a log file.  Eviction and checkpointing migrate records that
+     * are marked by the cleaner, and a full checkpoint is necessary following
+     * cleaning before cleaned files will be deleted (or renamed).  Checkpoints
+     * normally occur periodically and when the environment is closed.</p>
+     *
+     * <p>This is an optional action for the application since this activity
+     * is, by default, handled by a database environment owned background
+     * thread.</p>
+     *
+     * <p>There are two intended use cases for the <code>cleanLog</code>
+     * method.  The first case is where the application wishes to disable the
+     * built-in cleaner thread.  To replace the functionality of the cleaner
+     * thread, the application should call <code>cleanLog</code>
+     * periodically.</p>
+     *
+     * <p>In the second use case, "batch cleaning", the application disables
+     * the cleaner thread for maximum performance during active periods, and
+     * calls <code>cleanLog</code> during periods when the application is
+     * quiescent or less active than usual.  If the cleaner has a large number
+     * of files to clean, <code>cleanLog</code> may stop without reaching the
+     * target utilization; to ensure that the target utilization is reached,
+     * <code>cleanLog</code> should be called in a loop until it returns
+     * zero. And to complete the work of cleaning, a checkpoint is necessary.
+     * An example of performing batch cleaning follows.</p>
+     *
+     * <pre>
+     *       Environment env;
+     *       boolean anyCleaned = false;
+     *       while (env.cleanLog() &gt; 0) {
+     *           anyCleaned = true;
+     *       }
+     *       if (anyCleaned) {
+     *           CheckpointConfig force = new CheckpointConfig();
+     *           force.setForce(true);
+     *           env.checkpoint(force);
+     *       }
+     * </pre>
+     *
+     * @return The number of log files that were cleaned, and that will be
+     * deleted (or renamed) when a qualifying checkpoint occurs.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public int cleanLog()
+        throws DatabaseException {
+
+        try {
+            checkHandleIsValid();
+            checkEnv();
+            return envImpl.invokeCleaner();
+        } catch (Error E) {
+            if (envImpl != null) {
+                envImpl.invalidate(E);
+            }
+            throw E;
+        }
+    }
+
+    /**
+     * Synchronously invokes the mechanism for keeping memory usage within the
+     * cache size boundaries.
+     *
+     * <p>This is an optional action for the application since this activity
+     * is, by default, handled by a database environment owned background
+     * thread.</p>
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public void evictMemory()
+        throws DatabaseException {
+
+        try {
+            checkHandleIsValid();
+            checkEnv();
+            envImpl.invokeEvictor();
+        } catch (Error E) {
+            if (envImpl != null) {
+                envImpl.invalidate(E);
+            }
+            throw E;
+        }
+    }
+
+    /**
+     * Synchronously invokes the compressor mechanism which compacts in memory
+     * data structures after delete operations.
+     *
+     * <p>This is an optional action for the application since this activity
+     * is, by default, handled by a database environment owned background
+     * thread.</p>
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public void compress()
+        throws DatabaseException {
+
+        try {
+            checkHandleIsValid();
+            checkEnv();
+            envImpl.invokeCompressor();
+        } catch (Error E) {
+            if (envImpl != null) {
+                envImpl.invalidate(E);
+            }
+            throw E;
+        }
+    }
+
+    /**
+     * Returns this object's configuration.
+     *
+     * @return This object's configuration.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public EnvironmentConfig getConfig()
+        throws DatabaseException {
+
+        try {
+            checkHandleIsValid();
+            EnvironmentConfig config = envImpl.cloneConfig();
+            handleConfig.copyHandlePropsTo(config);
+            config.fillInEnvironmentGeneratedProps(envImpl);
+            return config;
+        } catch (Error E) {
+            if (envImpl != null) {
+                envImpl.invalidate(E);
+            }
+            throw E;
+        }
+    }
+
+    /**
+     * Sets database environment attributes.
+     *
+     * <p>Attributes only apply to a specific Environment object and are not
+     * necessarily shared by other Environment objects accessing this
+     * database environment.</p>
+     *
+     * @param mutableConfig The database environment attributes.  If null,
+     * default attributes are used.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public synchronized void setMutableConfig(EnvironmentMutableConfig
+                                              mutableConfig)
+	throws DatabaseException {
+
+        /*
+         * This method is synchronized so that we atomically call both
+         * EnvironmentImpl.setMutableConfig and copyToHandleConfig. This ensures
+         * that the handle and the EnvironmentImpl properties match.
+         */
+        try {
+            checkHandleIsValid();
+            DatabaseUtil.checkForNullParam(mutableConfig, "mutableConfig");
+
+            /*
+             * Change the mutable properties specified in the given
+             * configuratation.
+             */
+            envImpl.setMutableConfig(mutableConfig);
+
+            /* Reset the handle config properties. */
+            copyToHandleConfig(mutableConfig, null);
+        } catch (Error E) {
+            if (envImpl != null) {
+                envImpl.invalidate(E);
+            }
+            throw E;
+        }
+    }
+
+    /**
+     * Returns database environment attributes.
+     *
+     * @return Environment attributes.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public EnvironmentMutableConfig getMutableConfig()
+        throws DatabaseException {
+
+        try {
+            checkHandleIsValid();
+            EnvironmentMutableConfig config =
+                envImpl.cloneMutableConfig();
+            handleConfig.copyHandlePropsTo(config);
+            config.fillInEnvironmentGeneratedProps(envImpl);
+            return config;
+        } catch (Error E) {
+            if (envImpl != null) {
+                envImpl.invalidate(E);
+            }
+            throw E;
+        }
+    }
+
+    /**
+     * Not public yet, since there's nothing to upgrade.
+     */
+    void upgrade()
+        throws DatabaseException {
+
+        /* Do nothing.  Nothing to upgrade yet. */
+    }
+
+    /**
+     * Returns the general database environment statistics.
+     *
+     * @param config The general statistics attributes.  If null, default
+     * attributes are used.
+     *
+     * @return The general database environment statistics.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public EnvironmentStats getStats(StatsConfig config)
+        throws DatabaseException {
+
+        checkHandleIsValid();
+        checkEnv();
+        try {
+            StatsConfig useConfig =
+                (config == null) ? StatsConfig.DEFAULT : config;
+
+            if (envImpl != null) {
+                return envImpl.loadStats(useConfig);
+            } else {
+                return new EnvironmentStats();
+            }
+        } catch (Error E) {
+            if (envImpl != null) {
+                envImpl.invalidate(E);
+            }
+            throw E;
+        }
+    }
+
+    /**
+     * Returns the database environment's locking statistics.
+     *
+     * @param config The locking statistics attributes.  If null, default
+     * attributes are used.
+     *
+     * @return The database environment's locking statistics.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public LockStats getLockStats(StatsConfig config)
+        throws DatabaseException {
+
+        try {
+            checkHandleIsValid();
+            checkEnv();
+            StatsConfig useConfig =
+                (config == null) ? StatsConfig.DEFAULT : config;
+
+            return envImpl.lockStat(useConfig);
+        } catch (Error E) {
+            if (envImpl != null) {
+                envImpl.invalidate(E);
+            }
+            throw E;
+        }
+    }
+
+    /**
+     * Returns the database environment's transactional statistics.
+     *
+     * @param config The transactional statistics attributes.  If null,
+     * default attributes are used.
+     *
+     * @return The database environment's transactional statistics.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public TransactionStats getTransactionStats(StatsConfig config)
+        throws DatabaseException {
+
+        try {
+            checkHandleIsValid();
+            checkEnv();
+            StatsConfig useConfig =
+                (config == null) ? StatsConfig.DEFAULT : config;
+            return envImpl.txnStat(useConfig);
+        } catch (Error E) {
+            if (envImpl != null) {
+                envImpl.invalidate(E);
+            }
+            throw E;
+        }
+    }
+
+    /**
+     * Returns a List of database names for the database environment.
+     *
+     * <p>Each element in the list is a String.</p>
+     *
+     * @return A List of database names for the database environment.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public List<String> getDatabaseNames()
+        throws DatabaseException {
+
+        try {
+            checkHandleIsValid();
+            checkEnv();
+            return envImpl.getDbTree().getDbNames();
+        } catch (Error E) {
+            if (envImpl != null) {
+                envImpl.invalidate(E);
+            }
+            throw E;
+        }
+    }
+
+    /**
+     * For internal use only.
+     * @hidden
+     * Scans raw log entries in the JE log between two given points, passing
+     * all records for a given set of databases to the scanRecord method of the
+     * given LogScanner object.
+     *
+     * <p>EnvironmentStats.getEndOfLog should be used to get the end-of-log at
+     * a particular point in time.  Values returned by that method can be
+     * passed for the startPostion and endPosition parameters.</p>
+     *
+     * <p><em>WARNING:</em> This interface is meant for low level processing of
+     * log records, not for application level queries. See LogScanner for
+     * further restrictions!</p>
+     *
+     * @param startPosition the log position at which to start scanning. If no
+     * such log position exists, the first existing position greater or less
+     * (if forward is true or false) is used.
+     *
+     * @param endPosition the log position at which to stop scanning. If no
+     * such log position exists, the first existing position less or greater
+     * (if forward is true or false) is used.
+     *
+     * @param config the parameters for this scanLog invocation.
+     *
+     * @param scanner is an object of a class that implements the LogScanner
+     * interface, to process scanned records.
+     *
+     * @return true if the scan was completed, or false if the scan was
+     * canceled because LogScanner.scanRecord returned false.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public boolean scanLog(long startPosition,
+                           long endPosition,
+                           LogScanConfig config,
+                           LogScanner scanner)
+	throws DatabaseException {
+
+        try {
+            checkHandleIsValid();
+            checkEnv();
+
+            if (startPosition < 0 ||
+                endPosition < 0) {
+                throw new IllegalArgumentException
+                    ("The start or end position argument is negative.");
+            }
+
+            if (config.getForwards()) {
+                if (startPosition >= endPosition) {
+                    throw new IllegalArgumentException
+                        ("The startPosition (" + startPosition +
+                        ") is not before the endPosition (" +
+                        endPosition + ") on a forward scan.");
+                }
+            } else {
+                if (startPosition < endPosition) {
+                    throw new IllegalArgumentException
+                        ("The startPosition (" +
+                         startPosition +
+                         ") is not after the endPosition (" +
+                         endPosition + ") on a backward scan.");
+                }
+            }
+
+            return envImpl.scanLog(startPosition, endPosition,
+				   config, scanner);
+        } catch (Error E) {
+            if (envImpl != null) {
+                envImpl.invalidate(E);
+            }
+            throw E;
+        }
+    }
+
+    /**
+     * Returns if the database environment is consistent and correct.
+     *
+     * <p>Verification is an expensive operation that should normally only be
+     * used for troubleshooting and debugging.</p>
+     *
+     * @param config The verification attributes.  If null, default
+     * attributes are used.
+     *
+     * @param out The stream to which verification debugging information is
+     * written.
+     *
+     * @return true if the database environment is consistent and correct.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public boolean verify(VerifyConfig config, PrintStream out)
+        throws DatabaseException {
+
+        try {
+            checkHandleIsValid();
+            checkEnv();
+            VerifyConfig useConfig =
+                (config == null) ? VerifyConfig.DEFAULT : config;
+            return envImpl.verify(useConfig, out);
+        } catch (Error E) {
+            if (envImpl != null) {
+                envImpl.invalidate(E);
+            }
+            throw E;
+        }
+    }
+
+    /**
+     * Returns the transaction associated with this thread if implied
+     * transactions are being used.  Implied transactions are used in an XA or
+     * JCA "Local Transaction" environment.  In an XA environment the
+     * XAEnvironment.start() entrypoint causes a transaction to be created and
+     * become associated with the calling thread.  Subsequent API calls
+     * implicitly use that transaction.  XAEnvironment.end() causes the
+     * transaction to be disassociated with the thread.  In a JCA Local
+     * Transaction environment, the call to JEConnectionFactory.getConnection()
+     * causes a new transaction to be created and associated with the calling
+     * thread.
+     */
+    public Transaction getThreadTransaction()
+        throws DatabaseException {
+
+        checkHandleIsValid();
+        checkEnv();
+        try {
+            return envImpl.getTxnManager().getTxnForThread();
+        } catch (Error E) {
+            if (envImpl != null) {
+                envImpl.invalidate(E);
+            }
+            throw E;
+        }
+    }
+
+    /**
+     * Sets the transaction associated with this thread if implied transactions
+     * are being used.  Implied transactions are used in an XA or JCA "Local
+     * Transaction" environment.  In an XA environment the
+     * XAEnvironment.start() entrypoint causes a transaction to be created and
+     * become associated with the calling thread.  Subsequent API calls
+     * implicitly use that transaction.  XAEnvironment.end() causes the
+     * transaction to be disassociated with the thread.  In a JCA Local
+     * Transaction environment, the call to JEConnectionFactory.getConnection()
+     * causes a new transaction to be created and associated with the calling
+     * thread.
+     */
+    public void setThreadTransaction(Transaction txn) {
+
+        try {
+            checkHandleIsValid();
+            checkEnv();
+        } catch (DatabaseException databaseException) {
+            /* API compatibility hack. See SR 15861 for details. */
+            throw new IllegalStateException(databaseException.getMessage());
+        }
+        try {
+            envImpl.getTxnManager().setTxnForThread(txn);
+        } catch (Error E) {
+            envImpl.invalidate(E);
+            throw E;
+        }
+    }
+
+    /*
+     * Non public api -- helpers
+     */
+
+    /*
+     * Let the Environment remember what's opened against it.
+     */
+    void addReferringHandle(Database db) {
+        referringDbs.add(db);
+    }
+
+    /**
+     * Lets the Environment remember what's opened against it.
+     */
+    void addReferringHandle(Transaction txn) {
+        referringDbTxns.add(txn);
+    }
+
+    /**
+     * The referring db has been closed.
+     */
+    void removeReferringHandle(Database db) {
+        referringDbs.remove(db);
+    }
+
+    /**
+     * The referring Transaction has been closed.
+     */
+    void removeReferringHandle(Transaction txn) {
+        referringDbTxns.remove(txn);
+    }
+
+    /**
+     * For internal use only.
+     * @hidden
+     * @throws DatabaseException if the environment is not open.
+     */
+    public void checkHandleIsValid()
+        throws DatabaseException {
+
+        if (!valid) {
+            throw new DatabaseException
+                ("Attempt to use non-open Environment object().");
+        }
+    }
+
+    /*
+     * Debugging aids.
+     */
+
+    /**
+     * Internal entrypoint.
+     */
+    EnvironmentImpl getEnvironmentImpl() {
+        return envImpl;
+    }
+
+    /**
+     * For internal use only.
+     * @hidden
+     * Throws if the envImpl is invalid.
+     */
+    protected void checkEnv()
+        throws DatabaseException, RunRecoveryException {
+
+        if (envImpl == null) {
+            return;
+        }
+        envImpl.checkIfInvalid();
+        envImpl.checkNotClosed();
+    }
+}
+
diff --git a/src/com/sleepycat/je/EnvironmentConfig.java b/src/com/sleepycat/je/EnvironmentConfig.java
new file mode 100644
index 0000000000000000000000000000000000000000..bea70fc108c9e22a04672733eb0c51e07de82ff6
--- /dev/null
+++ b/src/com/sleepycat/je/EnvironmentConfig.java
@@ -0,0 +1,2100 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EnvironmentConfig.java,v 1.52.2.4 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.util.Properties;
+
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DbConfigManager;
+
+/**
+ * Specifies the attributes of an environment.
+ *
+ * <p>To change the default settings for a database environment, an application
+ * creates a configuration object, customizes settings and uses it for
+ * environment construction. The set methods of this class validate the
+ * configuration values when the method is invoked.  An
+ * IllegalArgumentException is thrown if the value is not valid for that
+ * attribute.</p>
+ *
+ * <p>All commonly used environment attributes have convenience setter/getter
+ * methods defined in this class.  For example, to change the default
+ * transaction timeout setting for an environment, the application should do
+ * the following:</p>
+ *
+ * <blockquote><pre>
+ *     // customize an environment configuration
+ *     EnvironmentConfig envConfig = new EnvironmentConfig();
+ * envConfig.setTxnTimeout(10000);  // will throw if timeout value is
+ * invalid
+ *     // Open the environment.
+ *     Environment myEnvironment = new Environment(home, envConfig);
+ * </pre></blockquote>
+ *
+ * <p>Additional parameters are described by the parameter name String
+ * constants in this class. These additional parameters will not be needed by
+ * most applications. This category of properties can be specified for the
+ * EnvironmentConfig object through a Properties object read by
+ * EnvironmentConfig(Properties), or individually through
+ * EnvironmentConfig.setConfigParam().</p>
+ *
+ * <p>For example, an application can change the default btree node size
+ * with:</p>
+ *
+ * <blockquote><pre>
+ *     envConfig.setConfigParam("je.nodeMaxEntries", "256");
+ * </pre></blockquote>
+ *
+ * <p>Environment configuration follows this order of precedence:</p>
+ * <ol>
+ * <li>Configuration parameters specified in
+ * &lt;environment home&gt;/je.properties take first precedence.
+ * <li>Configuration parameters set in the EnvironmentConfig object used at
+ * Environment construction are next.
+ * <li>Any configuration parameters not set by the application are set to
+ * system defaults, described along with the parameter name String constants
+ * in this class.</li>
+ * </ol>
+ *
+ * <p>An EnvironmentConfig can be used to specify both mutable and immutable
+ * environment properties.  Immutable properties may be specified when the
+ * first Environment handle (instance) is opened for a given physical
+ * environment.  When more handles are opened for the same environment, the
+ * following rules apply:</p>
+ *
+ * <ol> <li>Immutable properties must equal the original values specified when
+ * constructing an Environment handle for an already open environment.  When a
+ * mismatch occurs, an exception is thrown.
+ *
+ * <li>Mutable properties are ignored when constructing an Environment handle
+ * for an already open environment.  </ol>
+ *
+ * <p>After an Environment has been constructed, its mutable properties may be
+ * changed using {@link Environment#setMutableConfig}.  See {@link
+ * EnvironmentMutableConfig} for a list of mutable properties; all other
+ * properties are immutable.  Whether a property is mutable or immutable is
+ * also described along with the parameter name String constants in this
+ * class.</p>
+ *
+ * <h4>Getting the Current Environment Properties</h4>
+ *
+ * To get the current "live" properties of an environment after constructing it
+ * or changing its properties, you must call {@link Environment#getConfig} or
+ * {@link Environment#getMutableConfig}.  The original EnvironmentConfig or
+ * EnvironmentMutableConfig object used to set the properties is not kept up to
+ * date as properties are changed, and does not reflect property validation or
+ * properties that are computed.
+ */
+public class EnvironmentConfig extends EnvironmentMutableConfig {
+
+    /**
+     * @hidden
+     * For internal use, to allow null as a valid value for the config
+     * parameter.
+     */
+    public static final EnvironmentConfig DEFAULT = new EnvironmentConfig();
+
+    /**
+     * The {@link #setCacheSize CacheSize} property.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Long</td>
+     * <td>Yes</td>
+     * <td>0</td>
+     * <td>-none-</td>
+     * <td>-none-</td>
+     * </tr>
+     * </table></p>
+     *
+     * @see #setCacheSize
+     */
+    public static final String MAX_MEMORY = "je.maxMemory";
+
+    /**
+     * The {@link #setCachePercent CachePercent} property.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>Yes</td>
+     * <td>60</td>
+     * <td>1</td>
+     * <td>90</td>
+     * </tr>
+     * </table></p>
+     *
+     * @see #setCachePercent
+     */
+    public static final String MAX_MEMORY_PERCENT = "je.maxMemoryPercent";
+
+    /**
+     * The {@link #setSharedCache SharedCache} property.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>No</td>
+     * <td>false</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String SHARED_CACHE = "je.sharedCache";
+
+    /**
+     * If true, a checkpoint is forced following recovery, even if the
+     * log ends with a checkpoint.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>No</td>
+     * <td>false</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String ENV_RECOVERY_FORCE_CHECKPOINT =
+        "je.env.recoveryForceCheckpoint";
+
+    /**
+     * If true, starts up the INCompressor thread.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>Yes</td>
+     * <td>true</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String ENV_RUN_IN_COMPRESSOR =
+        "je.env.runINCompressor";
+
+    /**
+     * If true, starts up the checkpointer thread.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>Yes</td>
+     * <td>true</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String ENV_RUN_CHECKPOINTER = "je.env.runCheckpointer";
+
+    /**
+     * If true, starts up the cleaner thread.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>Yes</td>
+     * <td>true</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String ENV_RUN_CLEANER = "je.env.runCleaner";
+
+    /**
+     * The maximum number of read operations performed by JE background
+     * activities (e.g., cleaning) before sleeping to ensure that application
+     * threads can perform I/O.  If zero (the default) then no limitation on
+     * I/O is enforced.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>Yes</td>
+     * <td>0</td>
+     * <td>0</td>
+     * <td>-none-</td>
+     * </tr>
+     * </table></p>
+     *
+     * @see #ENV_BACKGROUND_SLEEP_INTERVAL
+     */
+    public static final String ENV_BACKGROUND_READ_LIMIT =
+        "je.env.backgroundReadLimit";
+
+    /**
+     * The maximum number of write operations performed by JE background
+     * activities (e.g., checkpointing and eviction) before sleeping to ensure
+     * that application threads can perform I/O.  If zero (the default) then no
+     * limitation on I/O is enforced.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>Yes</td>
+     * <td>0</td>
+     * <td>0</td>
+     * <td>-none-</td>
+     * </tr>
+     * </table></p>
+     *
+     * @see #ENV_BACKGROUND_SLEEP_INTERVAL
+     */
+    public static final String ENV_BACKGROUND_WRITE_LIMIT =
+        "je.env.backgroundWriteLimit";
+
+    /**
+     * The maximum time in milliseconds to wait for an API call to start
+     * executing when the API is locked.  The default timeout is indefinite.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>Yes</td>
+     * <td>{@value java.lang.Integer#MAX_VALUE}</td>
+     * <td>0</td>
+     * <td>-none</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String ENV_LOCKOUT_TIMEOUT = "je.env.lockoutTimeout";
+
+    /**
+     * The number of microseconds that JE background activities will sleep when
+     * the {@link #ENV_BACKGROUND_WRITE_LIMIT} or {@link
+     * #ENV_BACKGROUND_WRITE_LIMIT} is reached.  If {@link
+     * #ENV_BACKGROUND_WRITE_LIMIT} and {@link #ENV_BACKGROUND_WRITE_LIMIT} are
+     * zero, this setting is not used.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Long</td>
+     * <td>Yes</td>
+     * <td>1000</td>
+     * <td>1000</td>
+     * <td>{@value java.lang.Long#MAX_VALUE}</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String ENV_BACKGROUND_SLEEP_INTERVAL =
+        "je.env.backgroundSleepInterval";
+
+    /**
+     * Debugging support: check leaked locks and txns at env close.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>No</td>
+     * <td>true</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String ENV_CHECK_LEAKS = "je.env.checkLeaks";
+
+    /**
+     * Debugging support: call Thread.yield() at strategic points.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>No</td>
+     * <td>false</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String ENV_FORCED_YIELD = "je.env.forcedYield";
+
+    /**
+     * If true, create an environment that is capable of performing
+     * transactions.  If true is not passed, transactions may not be used.  For
+     * licensing purposes, the use of this method distinguishes the use of the
+     * Transactional product.  Note that if transactions are not used,
+     * specifying true does not create additional overhead in the environment.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>No</td>
+     * <td>false</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String ENV_IS_TRANSACTIONAL = "je.env.isTransactional";
+
+    /**
+     * If true, create the environment with record locking.  This property
+     * should be set to false only in special circumstances when it is safe to
+     * run without record locking.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>No</td>
+     * <td>true</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String ENV_IS_LOCKING = "je.env.isLocking";
+
+    /**
+     * If true, open the environment read-only.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>No</td>
+     * <td>false</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String ENV_READ_ONLY = "je.env.isReadOnly";
+
+    /**
+     * If true, use latches instead of synchronized blocks to implement the
+     * lock table and log write mutexes. Latches require that threads queue to
+     * obtain the mutex in question and therefore guarantee that there will be
+     * no mutex starvation, but do incur a performance penalty. Latches should
+     * not be necessary in most cases, so synchronized blocks are the default.
+     * An application that puts heavy load on JE with threads with different
+     * thread priorities might find it useful to use latches.  In a Java 5 JVM,
+     * where java.util.concurrent.locks.ReentrantLock is used for the latch
+     * implementation, this parameter will determine whether they are 'fair' or
+     * not.  This parameter is 'static' across all environments.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>No</td>
+     * <td>false</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String ENV_FAIR_LATCHES = "je.env.fairLatches";
+
+    /**
+     * If true, enable eviction of metadata for closed databases.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>No</td>
+     * <td>true</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String ENV_DB_EVICTION = "je.env.dbEviction";
+
+    /**
+     * By default, JE passes an entire log record to the Adler32 class for
+     * checksumming.  This can cause problems with the GC in some cases if the
+     * records are large and there is concurrency.  Setting this parameter will
+     * cause JE to pass chunks of the log record to the checksumming class so
+     * that the GC does not block.  0 means do not chunk.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>Yes</td>
+     * <td>0</td>
+     * <td>0</td>
+     * <td>1048576 (1M)</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String ADLER32_CHUNK_SIZE = "je.adler32.chunkSize";
+
+    /**
+     * The total memory taken by log buffers, in bytes. If 0, use 7% of
+     * je.maxMemory. If 0 and je.sharedCache=true, use 7% divided by N where N
+     * is the number of environments sharing the global cache.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Long</td>
+     * <td>No</td>
+     * <td>0</td>
+     * <td>{@value
+     * com.sleepycat.je.config.EnvironmentParams#LOG_MEM_SIZE_MIN}</td>
+     * <td>-none-</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String LOG_TOTAL_BUFFER_BYTES =
+        "je.log.totalBufferBytes";
+
+    /**
+     * The number of JE log buffers.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>No</td>
+     * <td>{@value
+     * com.sleepycat.je.config.EnvironmentParams#NUM_LOG_BUFFERS_DEFAULT}</td>
+     * <td>2</td>
+     * <td>-none-</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String LOG_NUM_BUFFERS = "je.log.numBuffers";
+
+    /**
+     * The maximum starting size of a JE log buffer.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>No</td>
+     * <td>1048576 (1M)</td>
+     * <td>1024 (1K)</td>
+     * <td>-none-</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String LOG_BUFFER_SIZE = "je.log.bufferSize";
+
+    /**
+     * The buffer size for faulting in objects from disk, in bytes.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>No</td>
+     * <td>2048 (2K)</td>
+     * <td>32</td>
+     * <td>-none-</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String LOG_FAULT_READ_SIZE = "je.log.faultReadSize";
+
+    /**
+     * The read buffer size for log iterators, which are used when scanning the
+     * log during activities like log cleaning and environment open, in bytes.
+     * This may grow as the system encounters larger log entries.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>No</td>
+     * <td>8192 (8K)</td>
+     * <td>128</td>
+     * <td>-none-</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String LOG_ITERATOR_READ_SIZE =
+        "je.log.iteratorReadSize";
+
+    /**
+     * The maximum read buffer size for log iterators, which are used when
+     * scanning the log during activities like log cleaning and environment
+     * open, in bytes.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>No</td>
+     * <td>16777216 (16M)</td>
+     * <td>128</td>
+     * <td>-none-</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String LOG_ITERATOR_MAX_SIZE =
+        "je.log.iteratorMaxSize";
+
+    /**
+     * The maximum size of each individual JE log file, in bytes.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td><td>JVM</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Long</td>
+     * <td>No</td>
+     * <td>10000000 (10M)</td>
+     * <td>1000000 (1M)</td>
+     * <td>4294967296 (4G)</td>
+     * <td>Conventional JVM</td>
+     * </tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Long</td>
+     * <td>No</td>
+     * <td>100000 (100K)</td>
+     * <td>10000 (10K)</td>
+     * <td>4294967296 (4G)</td>
+     * <td>Dalvik JVM</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String LOG_FILE_MAX = "je.log.fileMax";
+
+    /**
+     * If true, perform a checksum check when reading entries from log.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>No</td>
+     * <td>true</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String LOG_CHECKSUM_READ = "je.log.checksumRead";
+
+    /**
+     * If true, perform a checksum verification just before and after writing
+     * to the log.  This is primarily used for debugging.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>No</td>
+     * <td>false</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String LOG_VERIFY_CHECKSUMS = "je.log.verifyChecksums";
+
+    /**
+     * If true, operates in an in-memory test mode without flushing the log to
+     * disk. An environment directory must be specified, but it need not exist
+     * and no files are written.  The system operates until it runs out of
+     * memory, at which time an OutOfMemoryError is thrown.  Because the entire
+     * log is kept in memory, this mode is normally useful only for testing.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>No</td>
+     * <td>false</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String LOG_MEM_ONLY = "je.log.memOnly";
+
+    /**
+     * The size of the file handle cache.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>No</td>
+     * <td>100</td>
+     * <td>3</td>
+     * <td>-none-</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String LOG_FILE_CACHE_SIZE = "je.log.fileCacheSize";
+
+    /**
+     * The timeout limit for group file sync, in microseconds.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Long</td>
+     * <td>No</td>
+     * <td>500000 (0.5 sec)</td>
+     * <td>10000< (0.01 sec)/td>
+     * <td>-none-</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String LOG_FSYNC_TIMEOUT = "je.log.fsyncTimeout";
+
+    /**
+     * If true (default is false) O_DSYNC is used to open JE log files.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>No</td>
+     * <td>false</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String LOG_USE_ODSYNC = "je.log.useODSYNC";
+
+    /**
+     * If true (default is false) NIO is used for all file I/O.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>No</td>
+     * <td>false</td>
+     * </tr>
+     * </table></p>
+     * @deprecated NIO is no longer used by JE and this parameter has no
+     * effect.
+     */
+    public static final String LOG_USE_NIO = "je.log.useNIO";
+
+    /**
+     * If true (default is false) direct NIO buffers are used.  This setting is
+     * only used if {@link #LOG_USE_NIO} is true.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>No</td>
+     * <td>false</td>
+     * </tr>
+     * </table></p>
+     * @deprecated NIO is no longer used by JE and this parameter has no
+     * effect.
+     */
+    public static final String LOG_DIRECT_NIO = "je.log.directNIO";
+
+    /**
+     * If non-0 (default is 0) break all IO into chunks of this size.  This
+     * setting is only used if {@link #LOG_USE_NIO} is true.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Long</td>
+     * <td>No</td>
+     * <td>0</td>
+     * <td>0</td>
+     * <td>67108864 (64M)</td>
+     * </tr>
+     * </table></p>
+     * @deprecated NIO is no longer used by JE and this parameter has no
+     * effect.
+     */
+    public static final String LOG_CHUNKED_NIO = "je.log.chunkedNIO";
+
+    /**
+     * The maximum number of entries in an internal btree node.  This can be
+     * set per-database using the DatabaseConfig object.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>No</td>
+     * <td>128</td>
+     * <td>4</td>
+     * <td>32767 (32K)</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String NODE_MAX_ENTRIES = "je.nodeMaxEntries";
+
+    /**
+     * The maximum number of entries in an internal dup btree node.  This can
+     * be set per-database using the DatabaseConfig object.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>No</td>
+     * <td>128</td>
+     * <td>4</td>
+     * <td>32767 (32K)</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String NODE_DUP_TREE_MAX_ENTRIES =
+        "je.nodeDupTreeMaxEntries";
+
+    /**
+     * After this many deltas, log a full version.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>No</td>
+     * <td>10</td>
+     * <td>0</td>
+     * <td>100</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String TREE_MAX_DELTA = "je.tree.maxDelta";
+
+    /**
+     * If less than this percentage of entries are changed on a BIN, log a
+     * delta instead of a full version.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>No</td>
+     * <td>25</td>
+     * <td>0</td>
+     * <td>75</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String TREE_BIN_DELTA = "je.tree.binDelta";
+
+    /**
+     * The minimum bytes allocated out of the memory cache to hold Btree data
+     * including internal nodes and record keys and data.  If the specified
+     * value is larger than the size initially available in the cache, it will
+     * be truncated to the amount available.
+     *
+     * <p>{@link #TREE_MIN_MEMORY} is the minimum for a single environment.  By
+     * default, 500 KB or the size initially available in the cache is used,
+     * whichever is smaller.</p>
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Long</td>
+     * <td>Yes</td>
+     * <td>512000 (500K)</td>
+     * <td>51200 (50K)</td>
+     * <td>-none-</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String TREE_MIN_MEMORY = "je.tree.minMemory";
+
+    /**
+     * The compressor thread wakeup interval in microseconds.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Long</td>
+     * <td>No</td>
+     * <td>5000000 (5 sec)</td>
+     * <td>1000000 (1 sec)</td>
+     * <td>4294967296 (71.6 min)</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String COMPRESSOR_WAKEUP_INTERVAL =
+        "je.compressor.wakeupInterval";
+
+    /**
+     * The number of times to retry a compression run if a deadlock occurs.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>No</td>
+     * <td>3</td>
+     * <td>0</td>
+     * <td>-none-</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String COMPRESSOR_DEADLOCK_RETRY =
+        "je.compressor.deadlockRetry";
+
+    /**
+     * The lock timeout for compressor transactions in microseconds.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Long</td>
+     * <td>No</td>
+     * <td>500000 (0.5 sec)</td>
+     * <td>0</td>
+     * <td>4294967296 (71.6 min)</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String COMPRESSOR_LOCK_TIMEOUT =
+        "je.compressor.lockTimeout";
+
+    /**
+     * If true, when the compressor encounters an empty database, the root node
+     * of the Btree is deleted.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>No</td>
+     * <td>false</td>
+     * </tr>
+     * </table></p>
+     *
+     * @deprecated as of 3.3.87.  Compression of the root node no longer has
+     * any benefit and this feature has been removed.  This parameter has no
+     * effect.
+     */
+    public static final String COMPRESSOR_PURGE_ROOT =
+        "je.compressor.purgeRoot";
+
+    /**
+     * When eviction occurs, the evictor will push memory usage to this number
+     * of bytes below {@link #MAX_MEMORY}.  No more than 50% of je.maxMemory
+     * will be evicted per eviction cycle, regardless of this setting.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Long</td>
+     * <td>No</td>
+     * <td>524288 (512K)</td>
+     * <td>1024 (1K)</td>
+     * <td>-none-</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String EVICTOR_EVICT_BYTES = "je.evictor.evictBytes";
+
+    /**
+     * The number of nodes in one evictor scan.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>No</td>
+     * <td>10</td>
+     * <td>1</td>
+     * <td>1000</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String EVICTOR_NODES_PER_SCAN =
+        "je.evictor.nodesPerScan";
+
+    /**
+     * The number of times to retry the evictor if it runs into a deadlock.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>No</td>
+     * <td>3</td>
+     * <td>0</td>
+     * <td>-none-</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String EVICTOR_DEADLOCK_RETRY =
+        "je.evictor.deadlockRetry";
+
+    /**
+     * If true (the default), use an LRU-only policy to select nodes for
+     * eviction.  If false, select by Btree level first, and then by LRU.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>No</td>
+     * <td>true</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String EVICTOR_LRU_ONLY = "je.evictor.lruOnly";
+
+    /**
+     * Call Thread.yield() at each check for cache overflow. This improves GC
+     * performance on some systems.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>No</td>
+     * <td>false</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String EVICTOR_FORCED_YIELD = "je.evictor.forcedYield";
+
+    /**
+     * Ask the checkpointer to run every time we write this many bytes to the
+     * log. If set, supercedes {@link #CHECKPOINTER_WAKEUP_INTERVAL}. To use
+     * time based checkpointing, set this to 0.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td><td>JVM</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Long</td>
+     * <td>No</td>
+     * <td>20000000 (20M)</td>
+     * <td>0</td>
+     * <td>-none-</td>
+     * <td>Conventional JVM</td>
+     * </tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Long</td>
+     * <td>No</td>
+     * <td>200000 (200K)</td>
+     * <td>0</td>
+     * <td>-none-</td>
+     * <td>Dalvik JVM</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String CHECKPOINTER_BYTES_INTERVAL =
+        "je.checkpointer.bytesInterval";
+
+    /**
+     * The checkpointer wakeup interval in microseconds. By default, this
+     * is inactive and we wakeup the checkpointer as a function of the
+     * number of bytes written to the log ({@link
+     * #CHECKPOINTER_BYTES_INTERVAL}).
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Long</td>
+     * <td>No</td>
+     * <td>0</td>
+     * <td>1000000 (1 sec)</td>
+     * <td>4294967296 (71.6 min)</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String CHECKPOINTER_WAKEUP_INTERVAL =
+        "je.checkpointer.wakeupInterval";
+
+    /**
+     * The number of times to retry a checkpoint if it runs into a deadlock.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>No</td>
+     * <td>3</td>
+     * <td>0</td>
+     * <td>-none-</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String CHECKPOINTER_DEADLOCK_RETRY =
+        "je.checkpointer.deadlockRetry";
+
+    /**
+     * If true, the checkpointer uses more resources in order to complete the
+     * checkpoint in a shorter time interval.  Btree latches are held and other
+     * threads are blocked for a longer period.  Log cleaner record migration
+     * is performed by cleaner threads instead of during checkpoints.  When set
+     * to true, application response time may be longer during a checkpoint,
+     * and more cleaner threads may be required to maintain the configured log
+     * utilization.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>Yes</td>
+     * <td>false</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String CHECKPOINTER_HIGH_PRIORITY =
+        "je.checkpointer.highPriority";
+
+    /**
+     * The cleaner will keep the total disk space utilization percentage above
+     * this value.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>Yes</td>
+     * <td>50</td>
+     * <td>0</td>
+     * <td>90</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String CLEANER_MIN_UTILIZATION =
+        "je.cleaner.minUtilization";
+
+    /**
+     * A log file will be cleaned if its utilization percentage is below this
+     * value, irrespective of total utilization.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>Yes</td>
+     * <td>5</td>
+     * <td>0</td>
+     * <td>50</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String CLEANER_MIN_FILE_UTILIZATION =
+        "je.cleaner.minFileUtilization";
+
+    /**
+     * The cleaner checks disk utilization every time we write this many bytes
+     * to the log.  If zero (and by default) it is set to the {@link
+     * #LOG_FILE_MAX} value divided by four.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Long</td>
+     * <td>Yes</td>
+     * <td>0</td>
+     * <td>0</td>
+     * <td>-none-</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String CLEANER_BYTES_INTERVAL =
+        "je.cleaner.bytesInterval";
+
+    /**
+     * If true, the cleaner will fetch records to determine their size to more
+     * accurately calculate log utilization.  This setting is used during DB
+     * truncation/removal and during recovery, and will cause more I/O during
+     * those operations when set to true.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>Yes</td>
+     * <td>false</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String CLEANER_FETCH_OBSOLETE_SIZE =
+        "je.cleaner.fetchObsoleteSize";
+
+    /**
+     * The number of times to retry cleaning if a deadlock occurs.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>Yes</td>
+     * <td>3</td>
+     * <td>0</td>
+     * <td>-none-</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String CLEANER_DEADLOCK_RETRY =
+        "je.cleaner.deadlockRetry";
+
+    /**
+     * The lock timeout for cleaner transactions in microseconds.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Long</td>
+     * <td>Yes</td>
+     * <td>500000 (0.5 sec)</td>
+     * <td>0</td>
+     * <td>4294967296 (71.6 min)</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String CLEANER_LOCK_TIMEOUT = "je.cleaner.lockTimeout";
+
+    /**
+     * If true, the cleaner deletes log files after successful cleaning.  If
+     * false, the cleaner changes log file extensions to .DEL instead of
+     * deleting them.  The latter is useful for diagnosing log cleaning
+     * problems.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>Yes</td>
+     * <td>true</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String CLEANER_EXPUNGE = "je.cleaner.expunge";
+
+    /**
+     * The minimum age of a file (number of files between it and the active
+     * file) to qualify it for cleaning under any conditions.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>Yes</td>
+     * <td>2</td>
+     * <td>1</td>
+     * <td>1000</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String CLEANER_MIN_AGE = "je.cleaner.minAge";
+
+    /**
+     * The maximum number of log files in the cleaner's backlog, or zero if
+     * there is no limit.  Changing this property can impact the performance of
+     * some out-of-memory applications.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>Yes</td>
+     * <td>0</td>
+     * <td>0</td>
+     * <td>100000</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String CLEANER_MAX_BATCH_FILES =
+        "je.cleaner.maxBatchFiles";
+
+    /**
+     * The read buffer size for cleaning.  If zero (the default), then {@link
+     * #LOG_ITERATOR_READ_SIZE} value is used.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>Yes</td>
+     * <td>0</td>
+     * <td>128</td>
+     * <td>-none-</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String CLEANER_READ_SIZE = "je.cleaner.readSize";
+
+    /**
+     * Tracking of detailed cleaning information will use no more than this
+     * percentage of the cache.  The default value is 2% of {@link
+     * #MAX_MEMORY}. If 0 and {@link #SHARED_CACHE} is true, use 2% divided by
+     * N where N is the number of environments sharing the global cache.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>Yes</td>
+     * <td>2</td>
+     * <td>1</td>
+     * <td>90</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String CLEANER_DETAIL_MAX_MEMORY_PERCENTAGE =
+        "je.cleaner.detailMaxMemoryPercentage";
+
+    /**
+     * Specifies a list of files or file ranges to be cleaned at a time when no
+     * other log cleaning is necessary.  This parameter is intended for use in
+     * forcing the cleaning of a large number of log files.  File numbers are
+     * in hex and are comma separated or hyphen separated to specify ranges,
+     * e.g.: '9,a,b-d' will clean 5 files.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>String</td>
+     * <td>No</td>
+     * <td>""</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String CLEANER_FORCE_CLEAN_FILES =
+        "je.cleaner.forceCleanFiles";
+
+    /**
+     * All log files having a log version prior to the specified version will
+     * be cleaned at a time when no other log cleaning is necessary.  Intended
+     * for use in upgrading old format log files forward to the current log
+     * format version, e.g., to take advantage of format improvements; note
+     * that log upgrading is optional.  The default value zero (0) specifies
+     * that no upgrading will occur.  The value negative one (-1) specifies
+     * upgrading to the current log version.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>No</td>
+     * <td>0</td>
+     * <td>-1</td>
+     * <td>-none-</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String CLEANER_UPGRADE_TO_LOG_VERSION =
+        "je.cleaner.upgradeToLogVersion";
+
+    /**
+     * The number of threads allocated by the cleaner for log file processing.
+     * If the cleaner backlog becomes large, try increasing this value.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>Yes</td>
+     * <td>1</td>
+     * <td>1</td>
+     * <td>-none-</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String CLEANER_THREADS = "je.cleaner.threads";
+
+    /**
+     * The look ahead cache size for cleaning in bytes.  Increasing this value
+     * can reduce the number of Btree lookups.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>Yes</td>
+     * <td>8192 (8K)</td>
+     * <td>0</td>
+     * <td>-none-</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String CLEANER_LOOK_AHEAD_CACHE_SIZE =
+        "je.cleaner.lookAheadCacheSize";
+
+    /**
+     * Number of Lock Tables.  Set this to a value other than 1 when an
+     * application has multiple threads performing concurrent JE operations.
+     * It should be set to a prime number, and in general not higher than the
+     * number of application threads performing JE operations.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>No</td>
+     * <td>1</td>
+     * <td>1</td>
+     * <td>32767 (32K)</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String LOCK_N_LOCK_TABLES = "je.lock.nLockTables";
+
+    /**
+     * The {@link #setLockTimeout LockTimeout} property.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Long</td>
+     * <td>No</td>
+     * <td>500000 (0.5 sec)</td>
+     * <td>0</td>
+     * <td>4294967296 (71.6 min)</td>
+     * </tr>
+     * </table></p>
+     *
+     * @see #setLockTimeout
+     */
+    public static final String LOCK_TIMEOUT = "je.lock.timeout";
+
+    /**
+     * The {@link #setTxnTimeout TxnTimeout} property.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Long</td>
+     * <td>No</td>
+     * <td>0</td>
+     * <td>0</td>
+     * <td>4294967296 (71.6 min)</td>
+     * </tr>
+     * </table></p>
+     *
+     * @see #setTxnTimeout
+     */
+    public static final String TXN_TIMEOUT = "je.txn.timeout";
+
+    /**
+     * The {@link #setTxnSerializableIsolation TxnSerializableIsolation}
+     * property.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>No</td>
+     * <td>false</td>
+     * </tr>
+     * </table></p>
+     *
+     * @see #setTxnSerializableIsolation
+     */
+    public static final String TXN_SERIALIZABLE_ISOLATION =
+        "je.txn.serializableIsolation";
+
+    /**
+     * Set this parameter to true to add stacktrace information to deadlock
+     * (lock timeout) exception messages.  The stack trace will show where each
+     * lock was taken.  The default is false, and true should only be used
+     * during debugging because of the added memory/processing cost.  This
+     * parameter is 'static' across all environments.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>Yes</td>
+     * <td>false</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String TXN_DEADLOCK_STACK_TRACE =
+        "je.txn.deadlockStackTrace";
+
+    /**
+     * Dump the lock table when a lock timeout is encountered, for debugging
+     * assistance.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>Yes</td>
+     * <td>false</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String TXN_DUMP_LOCKS = "je.txn.dumpLocks";
+
+    /**
+     * Use FileHandler in logging system.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>No</td>
+     * <td>false</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String TRACE_FILE = "java.util.logging.FileHandler.on";
+
+    /**
+     * Use ConsoleHandler in logging system.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>No</td>
+     * <td>false</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String TRACE_CONSOLE =
+        "java.util.logging.ConsoleHandler.on";
+
+    /**
+     * Use DbLogHandler in logging system.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Boolean</td>
+     * <td>No</td>
+     * <td>true</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String TRACE_DB = "java.util.logging.DbLogHandler.on";
+
+    /**
+     * Log file limit for FileHandler.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>No</td>
+     * <td>10000000 (10M)</td>
+     * <td>1000</td>
+     * <td>1000000000 (1G)</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String TRACE_FILE_LIMIT =
+        "java.util.logging.FileHandler.limit";
+
+    /**
+     * Log file count for FileHandler.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td>
+     * <td>Default</td><td>Minimum</td><td>Maximum</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>Integer</td>
+     * <td>No</td>
+     * <td>10</td>
+     * <td>1</td>
+     * <td>-none-</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String TRACE_FILE_COUNT =
+        "java.util.logging.FileHandler.count";
+
+    /**
+     * Trace messages equal and above this level will be logged.  Value should
+     * be one of the predefined java.util.logging.Level values.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>String</td>
+     * <td>No</td>
+     * <td>"INFO"</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String TRACE_LEVEL = "java.util.logging.level";
+
+    /**
+     * Lock manager specific trace messages will be issued at this level.
+     * Value should be one of the predefined java.util.logging.Level values.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>String</td>
+     * <td>No</td>
+     * <td>"FINE"</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String TRACE_LEVEL_LOCK_MANAGER =
+        "java.util.logging.level.lockMgr";
+
+    /**
+     * Recovery specific trace messages will be issued at this level.  Value
+     * should be one of the predefined java.util.logging.Level values.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>String</td>
+     * <td>No</td>
+     * <td>"FINE"</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String TRACE_LEVEL_RECOVERY =
+        "java.util.logging.level.recovery";
+
+    /**
+     * Evictor specific trace messages will be issued at this level.  Value
+     * should be one of the predefined java.util.logging.Level values.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>String</td>
+     * <td>No</td>
+     * <td>"FINE"</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String TRACE_LEVEL_EVICTOR =
+        "java.util.logging.level.evictor";
+
+    /**
+     * Cleaner specific detailed trace messages will be issued at this level.
+     * Value should be one of the predefined java.util.logging.Level values.
+     *
+     * <p><table border"1">
+     * <tr><td>Name</td><td>Type</td><td>Mutable</td><td>Default</td></tr>
+     * <tr>
+     * <td>{@value}</td>
+     * <td>String</td>
+     * <td>Yes</td>
+     * <td>"FINE"</td>
+     * </tr>
+     * </table></p>
+     */
+    public static final String TRACE_LEVEL_CLEANER =
+        "java.util.logging.level.cleaner";
+
+    /**
+     * For unit testing, to prevent creating the utilization profile DB.
+     */
+    private boolean createUP = true;
+
+    /**
+     * For unit testing, to prevent writing utilization data during checkpoint.
+     */
+    private boolean checkpointUP = true;
+
+    private boolean allowCreate = false;
+
+    /**
+     * For unit testing, to set readCommitted as the default.
+     */
+    private boolean txnReadCommitted = false;
+
+    /**
+     * Creates an EnvironmentConfig initialized with the system default
+     * settings.
+     */
+    public EnvironmentConfig() {
+        super();
+    }
+
+    /**
+    * Creates an EnvironmentConfig which includes the properties specified in
+    * the properties parameter.
+    *
+    * @param properties Supported properties are described in the sample
+    * property file.
+    *
+    * @throws IllegalArgumentException If any properties read from the
+    * properties param are invalid.
+     */
+    public EnvironmentConfig(Properties properties)
+        throws IllegalArgumentException {
+
+        super(properties);
+    }
+
+    /**
+     * If true, creates the database environment if it doesn't already exist.
+     *
+     * @param allowCreate If true, the database environment is created if it
+     * doesn't already exist.
+     */
+    public void setAllowCreate(boolean allowCreate) {
+
+        this.allowCreate = allowCreate;
+    }
+
+    /**
+     * Returns a flag that specifies if we may create this environment.
+     *
+     * @return true if we may create this environment.
+     */
+    public boolean getAllowCreate() {
+
+        return allowCreate;
+    }
+
+    /**
+     * Configures the lock timeout, in microseconds.
+     *
+     * <p>Equivalent to setting the je.lock.timeout parameter in the
+     * je.properties file.</p>
+     *
+     * @param timeout The lock timeout, in microseconds. A value of 0 turns off
+     * lock timeouts.
+     *
+     * @throws IllegalArgumentException If the value of timeout is negative
+     * @see Transaction#setLockTimeout
+     */
+    public void setLockTimeout(long timeout)
+        throws IllegalArgumentException {
+
+        DbConfigManager.setVal(props,
+                               EnvironmentParams.LOCK_TIMEOUT,
+                               Long.toString(timeout),
+                               validateParams);
+    }
+
+    /**
+     * Returns the lock timeout setting, in microseconds.
+     *
+     * A value of 0 means no timeout is set.
+     */
+    public long getLockTimeout() {
+
+        String val = DbConfigManager.getVal(props,
+                                            EnvironmentParams.LOCK_TIMEOUT);
+        long timeout = 0;
+        try {
+            timeout = Long.parseLong(val);
+        } catch (NumberFormatException e) {
+            throw new IllegalArgumentException
+		("Bad value for timeout:" + e.getMessage());
+        }
+        return timeout;
+    }
+
+    /**
+     * Configures the database environment to be read only, and any attempt to
+     * modify a database will fail.
+     *
+     * @param readOnly If true, configure the database environment to be read
+     * only, and any attempt to modify a database will fail.
+     */
+    public void setReadOnly(boolean readOnly) {
+
+        DbConfigManager.setVal(props,
+                               EnvironmentParams.ENV_RDONLY,
+                               Boolean.toString(readOnly),
+                               validateParams);
+    }
+
+    /**
+     * Returns true if the database environment is configured to be read only.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @return True if the database environment is configured to be read only.
+     */
+    public boolean getReadOnly() {
+
+        String val = DbConfigManager.getVal(props,
+                                            EnvironmentParams.ENV_RDONLY);
+        return (Boolean.valueOf(val)).booleanValue();
+    }
+
+    /**
+     * Configures the database environment for transactions.
+     *
+     * <p>This configuration option should be used when transactional
+     * guarantees such as atomicity of multiple operations and durability are
+     * important.</p>
+     *
+     * @param transactional If true, configure the database environment for
+     * transactions.
+     */
+    public void setTransactional(boolean transactional) {
+
+        DbConfigManager.setVal(props,
+                               EnvironmentParams.ENV_INIT_TXN,
+                               Boolean.toString(transactional),
+                               validateParams);
+    }
+
+    /**
+     * Returns true if the database environment is configured for transactions.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @return True if the database environment is configured for transactions.
+     */
+    public boolean getTransactional() {
+
+        String val = DbConfigManager.getVal(props,
+                                            EnvironmentParams.ENV_INIT_TXN);
+        return (Boolean.valueOf(val)).booleanValue();
+    }
+
+    /**
+     * Configures the database environment for no locking.
+     *
+     * <p>This configuration option should be used when locking guarantees such
+     * as consistency and isolation are not important.  If locking mode is
+     * disabled (it is enabled by default), the cleaner is automatically
+     * disabled.  The user is responsible for invoking the cleaner and ensuring
+     * that there are no concurrent operations while the cleaner is
+     * running.</p>
+     *
+     * @param locking If false, configure the database environment for no
+     * locking.  The default is true.
+     */
+    public void setLocking(boolean locking) {
+
+        DbConfigManager.setVal(props,
+                               EnvironmentParams.ENV_INIT_LOCKING,
+                               Boolean.toString(locking),
+                               validateParams);
+    }
+
+    /**
+     * Returns true if the database environment is configured for locking.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @return True if the database environment is configured for locking.
+     */
+    public boolean getLocking() {
+
+        String val =
+            DbConfigManager.getVal(props, EnvironmentParams.ENV_INIT_LOCKING);
+        return (Boolean.valueOf(val)).booleanValue();
+    }
+
+    /**
+     * Configures the transaction timeout, in microseconds.
+     *
+     * <p>Equivalent to setting the je.txn.timeout parameter in the
+     * je.properties file.</p>
+     *
+     * @param timeout The transaction timeout, in microseconds. A value of 0
+     * turns off transaction timeouts.
+     *
+     * @throws IllegalArgumentException If the value of timeout is negative
+     *
+     * @see Transaction#setTxnTimeout
+     */
+    public void setTxnTimeout(long timeout)
+        throws IllegalArgumentException {
+
+        DbConfigManager.setVal(props,
+                               EnvironmentParams.TXN_TIMEOUT,
+                               Long.toString(timeout),
+                               validateParams);
+    }
+
+    /**
+     * Returns the transaction timeout, in microseconds.
+     *
+     * <p>A value of 0 means transaction timeouts are not configured.</p>
+     *
+     * @return The transaction timeout, in microseconds.
+     */
+    public long getTxnTimeout() {
+
+        String val = DbConfigManager.getVal(props,
+                                            EnvironmentParams.TXN_TIMEOUT);
+        long timeout = 0;
+        try {
+            timeout = Long.parseLong(val);
+        } catch (NumberFormatException e) {
+            throw new IllegalArgumentException
+		("Bad value for timeout:" + e.getMessage());
+        }
+        return timeout;
+    }
+
+    /**
+     * Configures all transactions for this environment to have Serializable
+     * (Degree 3) isolation.  By setting Serializable isolation, phantoms will
+     * be prevented.  By default transactions provide Repeatable Read
+     * isolation.
+     *
+     * The default is false for the database environment.
+     *
+     * @see LockMode
+     */
+    public void setTxnSerializableIsolation(boolean txnSerializableIsolation) {
+
+        DbConfigManager.setVal(props,
+                               EnvironmentParams.TXN_SERIALIZABLE_ISOLATION,
+                               Boolean.toString(txnSerializableIsolation),
+                               validateParams);
+    }
+
+    /**
+     * Returns true if all transactions for this environment has been
+     * configured to have Serializable (Degree 3) isolation.
+     *
+     * @return true if the environment has been configured to have repeatable
+     * read isolation.
+     *
+     * @see LockMode
+     */
+    public boolean getTxnSerializableIsolation() {
+
+        String val = DbConfigManager.getVal
+	    (props, EnvironmentParams.TXN_SERIALIZABLE_ISOLATION);
+        return (Boolean.valueOf(val)).booleanValue();
+    }
+
+    /**
+     * For unit testing, sets readCommitted as the default.
+     */
+    void setTxnReadCommitted(boolean txnReadCommitted) {
+
+        this.txnReadCommitted = txnReadCommitted;
+    }
+
+    /**
+     * For unit testing, to set readCommitted as the default.
+     */
+    boolean getTxnReadCommitted() {
+
+        return txnReadCommitted;
+    }
+
+    /**
+     * If true, the shared cache is used by this environment.
+     *
+     * <p>By default this parameter is false and this environment uses a
+     * private cache.  If this parameter is set to true, this environment will
+     * use a cache that is shared with all other open environments in this
+     * process that also set this parameter to true.  There is a single shared
+     * cache per process.</p>
+     *
+     * <p>By using the shared cache, multiple open environments will make
+     * better use of memory because the cache LRU algorithm is applied across
+     * all information in all environments sharing the cache.  For example, if
+     * one environment is open but not recently used, then it will only use a
+     * small portion of the cache, leaving the rest of the cache for
+     * environments that have been recently used.</p>
+     *
+     * @param sharedCache If true, the shared cache is used by this
+     * environment.
+     */
+    public void setSharedCache(boolean sharedCache) {
+
+        DbConfigManager.setVal(props,
+                               EnvironmentParams.ENV_SHARED_CACHE,
+                               Boolean.toString(sharedCache),
+                               validateParams);
+    }
+
+    /**
+     * Returns true if the shared cache is used by this environment.
+     *
+     * @return true if the shared cache is used by this environment. @see
+     * #setSharedCache
+     */
+    public boolean getSharedCache() {
+
+        String val = DbConfigManager.getVal
+            (props, EnvironmentParams.ENV_SHARED_CACHE);
+        return (Boolean.valueOf(val)).booleanValue();
+    }
+
+    /* Documentation inherited from EnvironmentMutableConfig.setConfigParam. */
+    @Override
+    public void setConfigParam(String paramName,
+			       String value)
+        throws IllegalArgumentException {
+
+        DbConfigManager.setConfigParam(props,
+                                       paramName,
+                                       value,
+                                       false, /* requireMutablity */
+                                       validateParams,
+                                       false  /* forReplication */,
+				       true   /* verifyForReplication */);
+    }
+
+    /**
+     * For unit testing, to prevent creating the utilization profile DB.
+     */
+    void setCreateUP(boolean createUP) {
+        this.createUP = createUP;
+    }
+
+    /**
+     * For unit testing, to prevent creating the utilization profile DB.
+     */
+    boolean getCreateUP() {
+        return createUP;
+    }
+
+    /**
+     * For unit testing, to prevent writing utilization data during checkpoint.
+     */
+    void setCheckpointUP(boolean checkpointUP) {
+        this.checkpointUP = checkpointUP;
+    }
+
+    /**
+     * For unit testing, to prevent writing utilization data during checkpoint.
+     */
+    boolean getCheckpointUP() {
+        return checkpointUP;
+    }
+
+    /**
+     * Used by Environment to create a copy of the application
+     * supplied configuration.
+     */
+    EnvironmentConfig cloneConfig() {
+        try {
+            return (EnvironmentConfig) clone();
+        } catch (CloneNotSupportedException willNeverOccur) {
+            return null;
+        }
+    }
+
+    /**
+     * Display configuration values.
+     */
+    @Override
+    public String toString() {
+        return ("allowCreate=" + allowCreate + "\n" + super.toString());
+    }
+}
diff --git a/src/com/sleepycat/je/EnvironmentLockedException.java b/src/com/sleepycat/je/EnvironmentLockedException.java
new file mode 100644
index 0000000000000000000000000000000000000000..05dea3e197b52252b7298b4fab42cc16b3e28062
--- /dev/null
+++ b/src/com/sleepycat/je/EnvironmentLockedException.java
@@ -0,0 +1,35 @@
+/*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EnvironmentLockedException.java,v 1.1.2.3 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+/**
+ * Thrown by the Environment constructor when an environment cannot be
+ * opened for write access because another process has the same environment
+ * open for write access.
+ */
+public class EnvironmentLockedException extends DatabaseException {
+
+    private static final long serialVersionUID = 629594964L;
+
+    public EnvironmentLockedException() {
+        super();
+    }
+
+    public EnvironmentLockedException(Throwable t) {
+        super(t);
+    }
+
+    public EnvironmentLockedException(String message) {
+        super(message);
+    }
+
+    public EnvironmentLockedException(String message, Throwable t) {
+        super(message, t);
+    }
+}
diff --git a/src/com/sleepycat/je/EnvironmentMutableConfig.java b/src/com/sleepycat/je/EnvironmentMutableConfig.java
new file mode 100644
index 0000000000000000000000000000000000000000..de5dc086ce6864a427b0db48b917353140ca39a4
--- /dev/null
+++ b/src/com/sleepycat/je/EnvironmentMutableConfig.java
@@ -0,0 +1,618 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EnvironmentMutableConfig.java,v 1.44.2.3 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.util.Enumeration;
+import java.util.Iterator;
+import java.util.Properties;
+
+import com.sleepycat.je.config.ConfigParam;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DbConfigManager;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+
+/**
+ * Specifies the environment attributes that may be changed after the
+ * environment has been opened.  EnvironmentMutableConfig is a parameter to
+ * {@link Environment#setMutableConfig} and is returned by {@link
+ * Environment#getMutableConfig}.
+ *
+ * <p>There are two types of mutable environment properties: per-environment
+ * handle properties, and environment wide properties.</p>
+ *
+ * <h4>Per-Environment Handle Properties</h4>
+ *
+ * <p>Per-environment handle properties apply only to a single Environment
+ * instance.  For example, to change the default transaction commit behavior
+ * for a single environment handle, do this:</p>
+ *
+ * <blockquote><pre>
+ *     // Specify no-sync behavior for a given handle.
+ *     EnvironmentMutableConfig mutableConfig = myEnvHandle.getMutableConfig();
+ *     mutableConfig.setTxnNoSync(true);
+ *     myEnvHandle.setMutableConfig(mutableConfig);
+ * </pre></blockquote>
+ *
+ * <p>The per-environment handle properties are listed below.  These properties
+ * are accessed using the setter and getter methods listed, as shown in the
+ * example above.</p>
+ *
+ * <ul>
+ * <li>{@link #setTxnNoSync}, {@link #getTxnNoSync}</li>
+ * <li>{@link #setTxnWriteNoSync}, {@link #getTxnWriteNoSync}</li>
+ * </ul>
+ *
+ * <h4>Environment-Wide Mutable Properties</h4>
+ *
+ * <p>Environment-wide mutable properties are those that can be changed for an
+ * environment as a whole, irrespective of which environment instance (for the
+ * same physical environment) is used.  For example, to stop the cleaner daemon
+ * thread, do this:</p>
+ *
+ * <blockquote><pre>
+ *     // Stop the cleaner daemon thread for the environment.
+ *     EnvironmentMutableConfig mutableConfig = myEnvHandle.getMutableConfig();
+ *     mutableConfig.setConfigParam("je.env.runCleaner", "false");
+ *     myEnvHandle.setMutableConfig(mutableConfig);
+ * </pre></blockquote>
+ *
+ * <p>The environment-wide mutable properties are listed below.  These
+ * properties are accessed using the {@link #setConfigParam} and {@link
+ * #getConfigParam} methods, as shown in the example above, using the property
+ * names listed below.  In some cases setter and getter methods are also
+ * available.</p>
+ *
+ * <ul>
+ * <li>je.maxMemory ({@link #setCacheSize}, {@link #getCacheSize})</li>
+ * <li>je.maxMemoryPercent ({@link #setCachePercent},
+ * {@link #getCachePercent})</li>
+ * <li>je.env.runINCompressor</li>
+ * <li>je.env.runEvictor</li>
+ * <li>je.env.runCheckpointer</li>
+ * <li>je.env.runCleaner</li>
+ * </ul>
+ *
+ * <h4>Getting the Current Environment Properties</h4>
+ *
+ * To get the current "live" properties of an environment after constructing it
+ * or changing its properties, you must call {@link Environment#getConfig} or
+ * {@link Environment#getMutableConfig}.  The original EnvironmentConfig or
+ * EnvironmentMutableConfig object used to set the properties is not kept up to
+ * date as properties are changed, and does not reflect property validation or
+ * properties that are computed. @see EnvironmentConfig
+ */
+public class EnvironmentMutableConfig implements Cloneable {
+
+    /*
+     * Change copyHandlePropsTo and Environment.copyToHandleConfig when adding
+     * fields here.
+     */
+    private boolean txnNoSync = false;
+    private boolean txnWriteNoSync = false;
+    private Durability durability = null;
+    private ReplicaConsistencyPolicy consistencyPolicy = null;
+
+    /**
+     * @hidden
+     * Cache size is a category of property that is calculated within the
+     * environment.  It is only supplied when returning the cache size to the
+     * application and never used internally; internal code directly checks
+     * with the MemoryBudget class;
+     */
+    protected long cacheSize;
+
+    /**
+     * @hidden
+     * Note that in the implementation we choose not to extend Properties in
+     * order to keep the configuration type safe.
+     */
+    protected Properties props;
+
+    /**
+     * For unit testing, to prevent loading of je.properties.
+     */
+    private boolean loadPropertyFile = true;
+
+    /**
+     * Internal boolean that says whether or not to validate params.  Setting
+     * it to false means that parameter value validatation won't be performed
+     * during setVal() calls.  Only should be set to false by unit tests using
+     * DbInternal.
+     */
+    boolean validateParams = true;
+
+    private ExceptionListener exceptionListener = null;
+
+    /**
+     * An instance created using the default constructor is initialized with
+     * the system's default settings.
+     */
+    public EnvironmentMutableConfig() {
+        props = new Properties();
+    }
+
+    /**
+     * Used by EnvironmentConfig to construct from properties.
+     */
+    EnvironmentMutableConfig(Properties properties)
+        throws IllegalArgumentException {
+
+        DbConfigManager.validateProperties(properties,
+                                           false,  // forReplication
+                                           this.getClass().getName(),
+					   true);  // verifyForReplication
+        /* For safety, copy the passed in properties. */
+        props = new Properties();
+        props.putAll(properties);
+    }
+
+    /**
+     * Configures the database environment for asynchronous transactions.
+     *
+     * @param noSync If true, do not write or synchronously flush the log on
+     * transaction commit. This means that transactions exhibit the ACI
+     * (Atomicity, Consistency, and Isolation) properties, but not D
+     * (Durability); that is, database integrity is maintained, but if the JVM
+     * or operating system fails, it is possible some number of the most
+     * recently committed transactions may be undone during recovery. The
+     * number of transactions at risk is governed by how many updates fit into
+     * a log buffer, how often the operating system flushes dirty buffers to
+     * disk, and how often the database environment is checkpointed.
+     *
+     * <p>This attribute is false by default for this class and for the
+     * database environment.</p>
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public void setTxnNoSync(boolean noSync) {
+        TransactionConfig.checkMixedMode
+            (false, noSync, txnWriteNoSync, durability);
+        txnNoSync = noSync;
+    }
+
+    /**
+     * Returns true if the database environment is configured for asynchronous
+     * transactions.
+     *
+     * @return true if the database environment is configured for asynchronous
+     * transactions.
+     */
+    public boolean getTxnNoSync() {
+        return txnNoSync;
+    }
+
+    /**
+     * Configures the database environment for transactions which write but do
+     * not flush the log.
+     *
+     * @param writeNoSync If true, write but do not synchronously flush the log
+     * on transaction commit. This means that transactions exhibit the ACI
+     * (Atomicity, Consistency, and Isolation) properties, but not D
+     * (Durability); that is, database integrity is maintained, but if the
+     * operating system fails, it is possible some number of the most recently
+     * committed transactions may be undone during recovery. The number of
+     * transactions at risk is governed by how often the operating system
+     * flushes dirty buffers to disk, and how often the database environment is
+     * checkpointed.
+     *
+     * <p>The motivation for this attribute is to provide a transaction that
+     * has more durability than asynchronous (nosync) transactions, but has
+     * higher performance than synchronous transactions.</p>
+     *
+     * <p>This attribute is false by default for this class and for the
+     * database environment.</p>
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public void setTxnWriteNoSync(boolean writeNoSync) {
+        TransactionConfig.checkMixedMode
+            (false, txnNoSync, writeNoSync, durability);
+        txnWriteNoSync = writeNoSync;
+    }
+
+    /**
+     * Returns true if the database environment is configured for transactions
+     * which write but do not flush the log.
+     *
+     * @return true if the database environment is configured for transactions
+     * which write but do not flush the log.
+     */
+    public boolean getTxnWriteNoSync() {
+        return txnWriteNoSync;
+    }
+
+    /**
+     * @hidden
+     * Feature not yet available.
+     *
+     * Configures the durability associated with transactions.
+     *
+     * @param durability the durability definition
+     */
+    public void setDurability(Durability durability) {
+        TransactionConfig.checkMixedMode
+            (false, txnNoSync, txnWriteNoSync, durability);
+        this.durability = durability;
+    }
+
+    /**
+     * @hidden
+     * Feature not yet available.
+     *
+     * Returns the durability associated with the configuration.
+     *
+     * @return the durability setting currently associated with this config.
+     */
+    public Durability getDurability() {
+        return durability;
+    }
+
+    /**
+     * @hidden
+     * Feature not yet available.
+     *
+     * Associates a consistency policy with this configuration.
+     *
+     * @param consistencyPolicy the consistency definition
+     */
+    public void setConsistencyPolicy
+        (ReplicaConsistencyPolicy consistencyPolicy) {
+        this.consistencyPolicy = consistencyPolicy;
+    }
+
+    /**
+     * @hidden
+     * Feature not yet available.
+     *
+     * Returns the consistency policy associated with the configuration.
+     *
+     * @return the consistency policy currently associated with this config.
+     */
+    public ReplicaConsistencyPolicy getConsistencyPolicy() {
+        return consistencyPolicy;
+    }
+
+    /**
+     * Configures the memory available to the database system, in bytes.
+     *
+     * <p>Equivalent to setting the je.maxMemory property in the je.properties
+     * file. The system will evict database objects when it comes within a
+     * prescribed margin of the limit.</p>
+     *
+     * <p>By default, JE sets the cache size to:</p>
+     *
+     * <pre><blockquote>
+     *         je.maxMemoryPercent *  JVM maximum memory
+     * </pre></blockquote>
+     *
+     * <p>where JVM maximum memory is specified by the JVM -Xmx flag. However,
+     * calling setCacheSize() with a non-zero value overrides the percentage
+     * based calculation and sets the cache size explicitly.</p>
+     *
+     * <p>Note that the log buffer cache may be cleared if the cache size is
+     * changed after the environment has been opened.</p>
+     *
+     * <p>If setSharedCache(true) is called, setCacheSize and setCachePercent
+     * specify the total size of the shared cache, and changing these
+     * parameters will change the size of the shared cache.</p>
+     *
+     * @param totalBytes The memory available to the database system, in bytes.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public void setCacheSize(long totalBytes)
+        throws IllegalArgumentException {
+
+        DbConfigManager.setVal(props, EnvironmentParams.MAX_MEMORY,
+                               Long.toString(totalBytes), validateParams);
+    }
+
+    /**
+     * Returns the memory available to the database system, in bytes. A valid
+     * value is only available if this EnvironmentConfig object has been
+     * returned from Environment.getConfig();
+     *
+     * @return The memory available to the database system, in bytes.
+     */
+    public long getCacheSize() {
+
+        /*
+         * CacheSize is filled in from the EnvironmentImpl by way of
+         * fillInEnvironmentGeneratedProps.
+         */
+        return cacheSize;
+    }
+
+    /**
+     * <p>By default, JE sets its cache size proportionally to the JVM
+     * memory. This formula is used:</p>
+     *
+     * <blockquote><pre>
+     *         je.maxMemoryPercent *  JVM maximum memory
+     * </pre></blockquote>
+     *
+     * <p>where JVM maximum memory is specified by the JVM -Xmx flag.
+     * setCachePercent() specifies the percentage used and is equivalent to
+     * setting the je.maxMemoryPercent property in the je.properties file.</p>
+     *
+     * <p>Calling setCacheSize() with a non-zero value overrides the percentage
+     * based calculation and sets the cache size explicitly.</p>
+     *
+     * <p>Note that the log buffer cache may be cleared if the cache size is
+     * changed after the environment has been opened.</p>
+     *
+     * <p>If setSharedCache(true) is called, setCacheSize and setCachePercent
+     * specify the total size of the shared cache, and changing these
+     * parameters will change the size of the shared cache.</p>
+     *
+     * @param percent The percent of JVM memory to allocate to the JE cache.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public void setCachePercent(int percent)
+        throws IllegalArgumentException {
+
+        DbConfigManager.setVal(props, EnvironmentParams.MAX_MEMORY_PERCENT,
+                               Integer.toString(percent), validateParams);
+    }
+
+    /**
+     * Returns the percentage value used in the JE cache size calculation.
+     *
+     * @return the percentage value used in the JE cache size calculation.
+     */
+    public int getCachePercent() {
+
+        String val =
+            DbConfigManager.getVal(props,
+                                   EnvironmentParams.MAX_MEMORY_PERCENT);
+        try {
+            return Integer.parseInt(val);
+        } catch (NumberFormatException e) {
+            throw new IllegalArgumentException
+		("Cache percent is not a valid integer: " + e.getMessage());
+        }
+    }
+
+    /**
+     * Sets the exception listener for an Environment.  The listener is called
+     * when a daemon thread throws an exception, in order to provide a
+     * notification mechanism for these otherwise asynchronous exceptions.
+     * Daemon thread exceptions are also printed through stderr.
+     * <p>
+     * Not all daemon exceptions are fatal, and the application bears
+     * responsibility for choosing how to respond to the notification. Since
+     * exceptions may repeat, the application should also choose how to handle
+     * a spate of exceptions. For example, the application may choose to act
+     * upon each notification, or it may choose to batch up its responses
+     * by implementing the listener so it stores exceptions, and only acts
+     * when a certain number have been received.
+     * @param exceptionListener the callback to be executed when an exception
+     * occurs.
+     */
+    public void setExceptionListener(ExceptionListener exceptionListener) {
+	this.exceptionListener = exceptionListener;
+    }
+
+    /**
+     * Returns the exception listener, if set.
+     */
+    public ExceptionListener getExceptionListener() {
+	return exceptionListener;
+    }
+
+    /**
+     * Validates the value prescribed for the configuration parameter; if it is
+     * valid, the value is set in the configuration.
+     *
+     * @param paramName The name of the configuration parameter. See
+     * the sample je.properties file for descriptions of all parameters.
+     *
+     * @param value The value for this configuration parameter.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public void setConfigParam(String paramName, String value)
+        throws IllegalArgumentException {
+
+        DbConfigManager.setConfigParam(props,
+                                       paramName,
+                                       value,
+                                       true, /* require mutability. */
+                                       validateParams,
+                                       false /* forReplication */,
+				       true  /* verifyForReplication */);
+    }
+
+    /**
+     * Returns the value for this configuration parameter.
+     *
+     * @param paramName Name of the requested parameter.
+     *
+     * @throws IllegalArgumentException if the configParamName is invalid.
+     */
+    public String getConfigParam(String paramName)
+        throws IllegalArgumentException {
+
+       return DbConfigManager.getConfigParam(props, paramName);
+    }
+
+    /*
+     * Helpers
+     */
+    void setValidateParams(boolean validateParams) {
+	this.validateParams = validateParams;
+    }
+
+    /**
+     * Checks that the immutable values in the environment config used to open
+     * an environment match those in the config object saved by the underlying
+     * shared EnvironmentImpl.
+     */
+    void checkImmutablePropsForEquality(EnvironmentMutableConfig passedConfig)
+        throws IllegalArgumentException {
+
+        Properties passedProps = passedConfig.props;
+        Iterator<String> iter = EnvironmentParams.SUPPORTED_PARAMS.keySet().iterator();
+        while (iter.hasNext()) {
+            String paramName = iter.next();
+            ConfigParam param = EnvironmentParams.SUPPORTED_PARAMS.get(paramName);
+            assert param != null;
+            if (!param.isMutable()) {
+                String paramVal = props.getProperty(paramName);
+                String useParamVal = passedProps.getProperty(paramName);
+                if ((paramVal != null) ? (!paramVal.equals(useParamVal))
+                                       : (useParamVal != null)) {
+                    throw new IllegalArgumentException
+                        (paramName + " is set to " +
+                         useParamVal +
+                         " in the config parameter" +
+                         " which is incompatible" +
+                         " with the value of " +
+                         paramVal + " in the" +
+                         " underlying environment");
+                }
+            }
+        }
+    }
+
+    /**
+     * @hidden
+     * For internal use only.
+     * Overrides Object.clone() to clone all properties, used by this class and
+     * EnvironmentConfig.
+     */
+    @Override
+    protected Object clone()
+        throws CloneNotSupportedException {
+
+        EnvironmentMutableConfig copy =
+            (EnvironmentMutableConfig) super.clone();
+        copy.props = (Properties) props.clone();
+        return copy;
+    }
+
+    /**
+     * Used by Environment to create a copy of the application supplied
+     * configuration. Done this way to provide non-public cloning.
+     */
+    EnvironmentMutableConfig cloneMutableConfig() {
+        try {
+            EnvironmentMutableConfig copy = (EnvironmentMutableConfig) clone();
+            /* Remove all immutable properties. */
+            copy.clearImmutableProps();
+            return copy;
+        } catch (CloneNotSupportedException willNeverOccur) {
+            return null;
+        }
+    }
+
+    /**
+     * Copies the per-handle properties of this object to the given config
+     * object.
+     */
+    void copyHandlePropsTo(EnvironmentMutableConfig other) {
+        other.txnNoSync = txnNoSync;
+        other.txnWriteNoSync = txnWriteNoSync;
+        other.durability = durability;
+        other.consistencyPolicy = consistencyPolicy;
+    }
+
+    /**
+     * Copies all mutable props to the given config object.
+     * Unchecked suppress here because Properties don't play well with 
+     * generics in Java 1.5 
+     */
+    @SuppressWarnings("unchecked") 
+	void copyMutablePropsTo(EnvironmentMutableConfig toConfig) {
+
+        Properties toProps = toConfig.props;
+        Enumeration propNames = props.propertyNames();
+        while (propNames.hasMoreElements()) {
+            String paramName = (String) propNames.nextElement();
+            ConfigParam param = (ConfigParam)
+                EnvironmentParams.SUPPORTED_PARAMS.get(paramName);
+            assert param != null;
+            if (param.isMutable()) {
+                String newVal = props.getProperty(paramName);
+                toProps.setProperty(paramName, newVal);
+            }
+        }
+	toConfig.exceptionListener = this.exceptionListener;
+    }
+
+    /**
+     * Fills in the properties calculated by the environment to the given
+     * config object.
+     */
+    void fillInEnvironmentGeneratedProps(EnvironmentImpl envImpl) {
+        cacheSize = envImpl.getMemoryBudget().getMaxMemory();
+    }
+
+   /**
+    * Removes all immutable props.
+    * Unchecked suppress here because Properties don't play well with 
+    * generics in Java 1.5
+    */ 
+    @SuppressWarnings("unchecked")
+    private void clearImmutableProps() {
+        Enumeration propNames = props.propertyNames();
+        while (propNames.hasMoreElements()) {
+            String paramName = (String) propNames.nextElement();
+            ConfigParam param = (ConfigParam)
+                EnvironmentParams.SUPPORTED_PARAMS.get(paramName);
+            assert param != null;
+            if (!param.isMutable()) {
+                props.remove(paramName);
+            }
+        }
+    }
+
+    Properties getProps() {
+        return props;
+    }
+
+    /**
+     * For unit testing, to prevent loading of je.properties.
+     */
+    void setLoadPropertyFile(boolean loadPropertyFile) {
+        this.loadPropertyFile = loadPropertyFile;
+    }
+
+    /**
+     * For unit testing, to prevent loading of je.properties.
+     */
+    boolean getLoadPropertyFile() {
+        return loadPropertyFile;
+    }
+
+    /**
+     * Testing support
+     */
+    public int getNumExplicitlySetParams() {
+        return props.size();
+    }
+
+    /**
+     * Display configuration values.
+     */
+    @Override
+    public String toString() {
+        return ("cacheSize=" + cacheSize + "\n" +
+                "txnNoSync=" + txnNoSync + "\n" +
+                "txnWriteNoSync=" + txnWriteNoSync + "\n" +
+                props.toString() + "\n");
+    }
+}
diff --git a/src/com/sleepycat/je/EnvironmentStats.java b/src/com/sleepycat/je/EnvironmentStats.java
new file mode 100644
index 0000000000000000000000000000000000000000..848b59581efc4ade37cdb2b6cad0a4b952257dae
--- /dev/null
+++ b/src/com/sleepycat/je/EnvironmentStats.java
@@ -0,0 +1,1682 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EnvironmentStats.java,v 1.58.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.io.Serializable;
+import java.text.DecimalFormat;
+
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * System wide statistics for a single environment.
+ */
+public class EnvironmentStats implements Serializable {
+
+    /* INCompressor */
+
+    /**
+     * The number of bins encountered by the INCompressor that were split
+     * between the time they were put on the compressor queue and when the
+     * compressor ran.
+     */
+    private long splitBins;
+
+    /**
+     * The number of bins encountered by the INCompressor that had their
+     * database closed between the time they were put on the compressor queue
+     * and when the compressor ran.
+     */
+    private long dbClosedBins;
+
+    /**
+     * The number of bins encountered by the INCompressor that had cursors
+     * referring to them when the compressor ran.
+     */
+    private long cursorsBins;
+
+    /**
+     * The number of bins encountered by the INCompressor that were not
+     * actually empty when the compressor ran.
+     */
+    private long nonEmptyBins;
+
+    /**
+     * The number of bins that were successfully processed by the IN
+     * Compressor.
+     */
+    private long processedBins;
+
+    /**
+     * The number of entries in the INCompressor queue when the getStats() call
+     * was made.
+     */
+    private long inCompQueueSize;
+
+    /* Evictor */
+
+    /**
+     * The number of passes made to the evictor.
+     */
+    private long nEvictPasses;
+
+    /**
+     * The accumulated number of nodes selected to evict.
+     */
+    private long nNodesSelected;
+
+    /**
+     * The accumulated number of nodes scanned in order to select the eviction
+     * set.
+     */
+    private long nNodesScanned;
+
+    /**
+     * The accumulated number of nodes evicted.
+     */
+    private long nNodesExplicitlyEvicted;
+
+    /**
+     * The accumulated number of database root nodes evicted.
+     */
+    private long nRootNodesEvicted;
+
+    /**
+     * The number of BINs stripped by the evictor.
+     */
+    private long nBINsStripped;
+
+    /**
+     * The number of bytes we need to evict in order to get under budget.
+     */
+    private long requiredEvictBytes;
+
+    /* Checkpointer */
+
+    /**
+     * The total number of checkpoints run so far.
+     */
+    private long nCheckpoints;
+
+    /**
+     * The Id of the last checkpoint.
+     */
+    private long lastCheckpointId;
+
+    /**
+     * The accumulated number of full INs flushed to the log.
+     */
+    private long nFullINFlush;
+
+    /**
+     * The accumulated number of full BINs flushed to the log.
+     */
+    private long nFullBINFlush;
+
+    /**
+     * The accumulated number of Delta INs flushed to the log.
+     */
+    private long nDeltaINFlush;
+
+    /**
+     * The location in the log of the last checkpoint start.
+     */
+    private long lastCheckpointStart;
+
+    /**
+     * The location in the log of the last checkpoint end.
+     */
+    private long lastCheckpointEnd;
+
+    /**
+     * The location of the next entry to be written to the log.
+     */
+    private long endOfLog;
+
+    /* Cleaner */
+
+    /** The number of files to be cleaned to reach the target utilization. */
+    private int cleanerBacklog;
+
+    /** The number of cleaner runs this session. */
+    private long nCleanerRuns;
+
+    /** The number of cleaner file deletions this session. */
+    private long nCleanerDeletions;
+
+    /**
+     * The accumulated number of INs obsolete.
+     */
+    private long nINsObsolete;
+
+    /**
+     * The accumulated number of INs cleaned.
+     */
+    private long nINsCleaned;
+
+    /**
+     * The accumulated number of INs that were not found in the tree anymore
+     * (deleted).
+     */
+    private long nINsDead;
+
+    /**
+     * The accumulated number of INs migrated.
+     */
+    private long nINsMigrated;
+
+    /**
+     * The accumulated number of LNs obsolete.
+     */
+    private long nLNsObsolete;
+
+    /**
+     * The accumulated number of LNs cleaned.
+     */
+    private long nLNsCleaned;
+
+    /**
+     * The accumulated number of LNs that were not found in the tree anymore
+     * (deleted).
+     */
+    private long nLNsDead;
+
+    /**
+     * The accumulated number of LNs encountered that were locked.
+     */
+    private long nLNsLocked;
+
+    /**
+     * The accumulated number of LNs encountered that were migrated forward
+     * in the log.
+     */
+    private long nLNsMigrated;
+
+    /**
+     * The accumulated number of LNs that were marked for migration during
+     * cleaning.
+     */
+    private long nLNsMarked;
+
+    /**
+     * The accumulated number of LNs processed without a tree lookup.
+     */
+    private long nLNQueueHits;
+
+    /**
+     * The accumulated number of LNs processed because they were previously
+     * locked.
+     */
+    private long nPendingLNsProcessed;
+
+    /**
+     * The accumulated number of LNs processed because they were previously
+     * marked for migration.
+     */
+    private long nMarkedLNsProcessed;
+
+    /**
+     * The accumulated number of LNs processed because they are soon to be
+     * cleaned.
+     */
+    private long nToBeCleanedLNsProcessed;
+
+    /**
+     * The accumulated number of LNs processed because they qualify for
+     * clustering.
+     */
+    private long nClusterLNsProcessed;
+
+    /**
+     * The accumulated number of pending LNs that could not be locked for
+     * migration because of a long duration application lock.
+     */
+    private long nPendingLNsLocked;
+
+    /**
+     * The accumulated number of log entries read by the cleaner.
+     */
+    private long nCleanerEntriesRead;
+
+    /*
+     * Cache
+     */
+    private int nSharedCacheEnvironments; // num of envs sharing the cache
+    private long sharedCacheTotalBytes;   // shared cache consumed, in bytes
+    private long cacheTotalBytes; // local cache consumed, in bytes
+    private long bufferBytes;  // cache consumed by the log buffers, in bytes
+    private long dataBytes;    // cache consumed by the Btree, in bytes
+    private long adminBytes;   // part of cache used by log cleaner metadata,
+                               // and other administrative structures
+    private long lockBytes;    // part of cache used by locks and txns
+    private long nNotResident; // had to be instantiated from an LSN
+    private long nCacheMiss;   // had to retrieve from disk
+    private int  nLogBuffers;  // number of existing log buffers
+
+    /*
+     * Random vs Sequential IO and byte counts.
+     */
+    private long nRandomReads;
+    private long nRandomWrites;
+    private long nSequentialReads;
+    private long nSequentialWrites;
+    private long nRandomReadBytes;
+    private long nRandomWriteBytes;
+    private long nSequentialReadBytes;
+    private long nSequentialWriteBytes;
+
+    /*
+     * Log activity
+     */
+    private long nFSyncs;   // Number of fsyncs issued. May be less than
+                              // nFSyncRequests because of group commit
+    private long nFSyncRequests; // Number of fsyncs requested.
+    private long nFSyncTimeouts; // Number of group fsync requests that
+                                   // turned into singleton fsyncs.
+    /*
+     * Number of reads which had to be repeated when faulting in an object from
+     * disk because the read chunk size controlled by je.log.faultReadSize is
+     * too small.
+     */
+    private long nRepeatFaultReads;
+
+    /*
+     * Number of times we have to use the temporary marshalling buffer to write
+     * to the log.
+     */
+    private long nTempBufferWrites;
+
+    /*
+     * Number of times we try to read a log entry larger than the read buffer
+     * size and can't grow the log buffer to accomodate the large object. This
+     * happens during scans of the log during activities like environment open
+     * or log cleaning. Implies that the the read chunk size controlled by
+     * je.log.iteratorReadSize is too small.
+     */
+    private long nRepeatIteratorReads;
+
+    /* FileManager open file cache stats. */
+    private int nFileOpens;
+    private int nOpenFiles;
+
+    /*
+     * Approximation of the total log size in bytes.
+     */
+    private long totalLogSize;
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public EnvironmentStats() {
+        reset();
+    }
+
+    /**
+     * Resets all stats.
+     */
+    private void reset() {
+        // InCompressor
+        splitBins = 0;
+        dbClosedBins = 0;
+        cursorsBins = 0;
+        nonEmptyBins = 0;
+        processedBins = 0;
+        inCompQueueSize = 0;
+
+        // Evictor
+        nEvictPasses = 0;
+        nNodesSelected = 0;
+        nNodesScanned = 0;
+        nNodesExplicitlyEvicted = 0;
+        nRootNodesEvicted = 0;
+        nBINsStripped = 0;
+        requiredEvictBytes = 0;
+
+        // Checkpointer
+        nCheckpoints = 0;
+        lastCheckpointId = 0;
+        nFullINFlush = 0;
+        nFullBINFlush = 0;
+        nDeltaINFlush = 0;
+        lastCheckpointStart = DbLsn.NULL_LSN;
+        lastCheckpointEnd = DbLsn.NULL_LSN;
+	endOfLog = DbLsn.NULL_LSN;
+
+        // Cleaner
+        cleanerBacklog = 0;
+        nCleanerRuns = 0;
+        nCleanerDeletions = 0;
+        nINsObsolete = 0;
+        nINsCleaned = 0;
+        nINsDead = 0;
+        nINsMigrated = 0;
+        nLNsObsolete = 0;
+        nLNsCleaned = 0;
+        nLNsDead = 0;
+        nLNsLocked = 0;
+        nLNsMigrated = 0;
+        nLNsMarked = 0;
+        nLNQueueHits = 0;
+        nPendingLNsProcessed = 0;
+        nMarkedLNsProcessed = 0;
+        nToBeCleanedLNsProcessed = 0;
+        nClusterLNsProcessed = 0;
+        nPendingLNsLocked = 0;
+        nCleanerEntriesRead = 0;
+
+        // Cache
+        nSharedCacheEnvironments = 0;
+        sharedCacheTotalBytes = 0;
+        cacheTotalBytes = 0;
+        nNotResident = 0;
+        nCacheMiss = 0;
+        nLogBuffers = 0;
+        bufferBytes = 0;
+
+        // IO
+        nRandomReads = 0;
+        nRandomWrites = 0;
+        nSequentialReads = 0;
+        nSequentialWrites = 0;
+        nRandomReadBytes = 0;
+        nRandomWriteBytes = 0;
+        nSequentialReadBytes = 0;
+        nSequentialWriteBytes = 0;
+
+        // Log
+        nFSyncs = 0;
+        nFSyncRequests = 0;
+        nFSyncTimeouts = 0;
+        nRepeatFaultReads = 0;
+	nTempBufferWrites = 0;
+        nRepeatIteratorReads = 0;
+        nFileOpens = 0;
+        nOpenFiles = 0;
+        totalLogSize = 0;
+    }
+
+    /**
+     * The number of bins encountered by the INCompressor that had cursors
+     * referring to them when the compressor ran.
+     */
+    public long getCursorsBins() {
+        return cursorsBins;
+    }
+
+    /**
+     * The number of bins encountered by the INCompressor that had their
+     * database closed between the time they were put on the compressor queue
+     * and when the compressor ran.
+     */
+    public long getDbClosedBins() {
+        return dbClosedBins;
+    }
+
+    /**
+     * The number of entries in the INCompressor queue when the getStats()
+     * call was made.
+     */
+    public long getInCompQueueSize() {
+        return inCompQueueSize;
+    }
+
+    /**
+     * The Id of the last checkpoint.
+     */
+    public long getLastCheckpointId() {
+        return lastCheckpointId;
+    }
+
+    /**
+     * The total number of requests for database objects which were not in
+     * memory.
+     */
+    public long getNCacheMiss() {
+        return nCacheMiss;
+    }
+
+    /**
+     * The total number of checkpoints run so far.
+     */
+    public long getNCheckpoints() {
+        return nCheckpoints;
+    }
+
+    /**
+     * The number of files to be cleaned to reach the target utilization.
+     */
+    public int getCleanerBacklog() {
+        return cleanerBacklog;
+    }
+
+    /**
+     * The number of cleaner runs this session.
+     */
+    public long getNCleanerRuns() {
+        return nCleanerRuns;
+    }
+
+    /**
+     * The number of cleaner file deletions this session.
+     */
+    public long getNCleanerDeletions() {
+        return nCleanerDeletions;
+    }
+
+    /**
+     * The accumulated number of Delta INs flushed to the log.
+     */
+    public long getNDeltaINFlush() {
+        return nDeltaINFlush;
+    }
+
+    /**
+     * The location in the log of the last checkpoint end.
+     */
+    public long getLastCheckpointEnd() {
+        return lastCheckpointEnd;
+    }
+
+    /**
+     * The location of the next entry to be written to the log.
+     *
+     * <p>Note that the log entries prior to this position may not yet have
+     * been flushed to disk.  Flushing can be forced using a Sync or
+     * WriteNoSync commit, or a checkpoint.</p>
+     */
+    public long getEndOfLog() {
+        return endOfLog;
+    }
+
+    /**
+     * The location in the log of the last checkpoint start.
+     */
+    public long getLastCheckpointStart() {
+        return lastCheckpointStart;
+    }
+
+    /**
+     * The accumulated number of log entries read by the cleaner.
+     */
+    public long getNCleanerEntriesRead() {
+        return nCleanerEntriesRead;
+    }
+
+    /**
+     * The number of passes made to the evictor.
+     */
+    public long getNEvictPasses() {
+        return nEvictPasses;
+    }
+
+    /**
+     * The number of fsyncs issued through the group commit manager.
+     */
+    public long getNFSyncs() {
+        return nFSyncs;
+    }
+
+    /**
+     * The number of fsyncs requested through the group commit manager.
+     */
+    public long getNFSyncRequests() {
+        return nFSyncRequests;
+    }
+
+    /**
+     * The number of fsync requests submitted to the group commit manager which
+     * timed out.
+     */
+    public long getNFSyncTimeouts() {
+        return nFSyncTimeouts;
+    }
+
+    /**
+     * The accumulated number of full INs flushed to the log.
+     */
+    public long getNFullINFlush() {
+        return nFullINFlush;
+    }
+
+    /**
+     * The accumulated number of full BINS flushed to the log.
+     */
+    public long getNFullBINFlush() {
+        return nFullBINFlush;
+    }
+
+    /**
+     * The accumulated number of INs obsolete.
+     */
+    public long getNINsObsolete() {
+        return nINsObsolete;
+    }
+
+    /**
+     * The accumulated number of INs cleaned.
+     */
+    public long getNINsCleaned() {
+        return nINsCleaned;
+    }
+
+    /**
+     * The accumulated number of INs that were not found in the tree anymore
+     * (deleted).
+     */
+    public long getNINsDead() {
+        return nINsDead;
+    }
+
+    /**
+     * The accumulated number of INs migrated.
+     */
+    public long getNINsMigrated() {
+        return nINsMigrated;
+    }
+
+    /**
+     * The accumulated number of LNs obsolete.
+     */
+    public long getNLNsObsolete() {
+        return nLNsObsolete;
+    }
+
+    /**
+     * The accumulated number of LNs cleaned.
+     */
+    public long getNLNsCleaned() {
+        return nLNsCleaned;
+    }
+
+    /**
+     * The accumulated number of LNs that were not found in the tree anymore
+     * (deleted).
+     */
+    public long getNLNsDead() {
+        return nLNsDead;
+    }
+
+    /**
+     * The accumulated number of LNs encountered that were locked.
+     */
+    public long getNLNsLocked() {
+        return nLNsLocked;
+    }
+
+    /**
+     * The accumulated number of LNs encountered that were migrated forward in
+     * the log.
+     */
+    public long getNLNsMigrated() {
+        return nLNsMigrated;
+    }
+
+    /**
+     * The accumulated number of LNs that were marked for migration during
+     * cleaning.
+     */
+    public long getNLNsMarked() {
+        return nLNsMarked;
+    }
+
+    /**
+     * The accumulated number of LNs processed without a tree lookup.
+     */
+    public long getNLNQueueHits() {
+        return nLNQueueHits;
+    }
+
+    /**
+     * The accumulated number of LNs processed because they were previously
+     * locked.
+     */
+    public long getNPendingLNsProcessed() {
+        return nPendingLNsProcessed;
+    }
+
+    /**
+     * The accumulated number of LNs processed because they were previously
+     * marked for migration.
+     */
+    public long getNMarkedLNsProcessed() {
+        return nMarkedLNsProcessed;
+    }
+
+    /**
+     * The accumulated number of LNs processed because they are soon to be
+     * cleaned.
+     */
+    public long getNToBeCleanedLNsProcessed() {
+        return nToBeCleanedLNsProcessed;
+    }
+
+    /**
+     * The accumulated number of LNs processed because they qualify for
+     * clustering.
+     */
+    public long getNClusterLNsProcessed() {
+        return nClusterLNsProcessed;
+    }
+
+    /**
+     * The accumulated number of pending LNs that could not be locked for
+     * migration because of a long duration application lock.
+     */
+    public long getNPendingLNsLocked() {
+        return nPendingLNsLocked;
+    }
+
+    /**
+     * The number of log buffers currently instantiated.
+     */
+    public int getNLogBuffers() {
+        return nLogBuffers;
+    }
+
+    /**
+     * The number of disk reads which required respositioning the disk head
+     * more than 1MB from the previous file position.  Reads in a different
+     * *.jdb log file then the last IO constitute a random read.
+     * <p>
+     * This number is approximate and may differ from the actual number of
+     * random disk reads depending on the type of disks and file system, disk
+     * geometry, and file system cache size.
+     */
+    public long getNRandomReads() {
+        return nRandomReads;
+    }
+
+    /**
+     * The number of bytes read which required respositioning the disk head
+     * more than 1MB from the previous file position.  Reads in a different
+     * *.jdb log file then the last IO constitute a random read.
+     * <p>
+     * This number is approximate vary depending on the type of disks and file
+     * system, disk geometry, and file system cache size.
+     */
+    public long getNRandomReadBytes() {
+        return nRandomReadBytes;
+    }
+
+    /**
+     * The number of disk writes which required respositioning the disk head by
+     * more than 1MB from the previous file position.  Writes to a different
+     * *.jdb log file (i.e. a file "flip") then the last IO constitute a random
+     * write.
+     * <p>
+     * This number is approximate and may differ from the actual number of
+     * random disk writes depending on the type of disks and file system, disk
+     * geometry, and file system cache size.
+     */
+    public long getNRandomWrites() {
+        return nRandomWrites;
+    }
+
+    /**
+     * The number of bytes written which required respositioning the disk head
+     * more than 1MB from the previous file position.  Writes in a different
+     * *.jdb log file then the last IO constitute a random write.
+     * <p>
+     * This number is approximate vary depending on the type of disks and file
+     * system, disk geometry, and file system cache size.
+     */
+    public long getNRandomWriteBytes() {
+        return nRandomWriteBytes;
+    }
+
+    /**
+     * The number of disk reads which did not require respositioning the disk
+     * head more than 1MB from the previous file position.  Reads in a
+     * different *.jdb log file then the last IO constitute a random read.
+     * <p>
+     * This number is approximate and may differ from the actual number of
+     * sequential disk reads depending on the type of disks and file system,
+     * disk geometry, and file system cache size.
+     */
+    public long getNSequentialReads() {
+        return nSequentialReads;
+    }
+
+    /**
+     * The number of bytes read which did not require respositioning the disk
+     * head more than 1MB from the previous file position.  Reads in a
+     * different *.jdb log file then the last IO constitute a random read.
+     * <p>
+     * This number is approximate vary depending on the type of disks and file
+     * system, disk geometry, and file system cache size.
+     */
+    public long getNSequentialReadBytes() {
+        return nSequentialReadBytes;
+    }
+
+    /**
+     * The number of disk writes which did not require respositioning the disk
+     * head by more than 1MB from the previous file position.  Writes to a
+     * different *.jdb log file (i.e. a file "flip") then the last IO
+     * constitute a random write.
+     * <p>
+     * This number is approximate and may differ from the actual number of
+     * sequential disk writes depending on the type of disks and file system,
+     * disk geometry, and file system cache size.
+     */
+    public long getNSequentialWrites() {
+        return nSequentialWrites;
+    }
+
+    /**
+     * The number of bytes written which did not require respositioning the
+     * disk head more than 1MB from the previous file position.  Writes in a
+     * different *.jdb log file then the last IO constitute a random write.
+     * <p>
+     * This number is approximate vary depending on the type of disks and file
+     * system, disk geometry, and file system cache size.
+     */
+    public long getNSequentialWriteBytes() {
+        return nSequentialWriteBytes;
+    }
+
+    /**
+     * The accumulated number of nodes evicted.
+     */
+    public long getNNodesExplicitlyEvicted() {
+        return nNodesExplicitlyEvicted;
+    }
+
+    /**
+     * The accumulated number of database root nodes evicted.
+     */
+    public long getNRootNodesEvicted() {
+        return nRootNodesEvicted;
+    }
+
+    /**
+     * The number of BINS stripped by the evictor.
+     */
+    public long getNBINsStripped() {
+        return nBINsStripped;
+    }
+
+    /**
+     * The number of bytes that must be evicted in order to get within the
+     * memory budget.
+     */
+    public long getRequiredEvictBytes() {
+        return requiredEvictBytes;
+    }
+
+    /**
+     * The accumulated number of nodes scanned in order to select the
+     * eviction set.
+     */
+    public long getNNodesScanned() {
+        return nNodesScanned;
+    }
+
+    /**
+     * The accumulated number of nodes selected to evict.
+     */
+    public long getNNodesSelected() {
+        return nNodesSelected;
+    }
+
+    /**
+     * The number of environments using the shared cache.  This method says
+     * nothing about whether this environment is using the shared cache or not.
+     */
+    public int getNSharedCacheEnvironments() {
+        return nSharedCacheEnvironments;
+    }
+
+    /**
+     * The total amount of the shared JE cache in use, in bytes.  If this
+     * environment uses the shared cache, this method returns the total amount
+     * used by all environments that are sharing the cache.  If this
+     * environment does not use the shared cache, this method returns zero.
+     *
+     * <p>To get the configured maximum cache size, see {@link
+     * EnvironmentMutableConfig#getCacheSize}.</p>
+     */
+    public long getSharedCacheTotalBytes() {
+        return sharedCacheTotalBytes;
+    }
+
+    /**
+     * The total amount of JE cache in use, in bytes.  If this environment uses
+     * the shared cache, this method returns only the amount used by this
+     * environment.
+     *
+     * <p>This method returns the sum of {@link #getDataBytes}, {@link
+     * #getAdminBytes}, {@link #getLockBytes} and {@link #getBufferBytes}.</p>
+     *
+     * <p>To get the configured maximum cache size, see {@link
+     * EnvironmentMutableConfig#getCacheSize}.</p>
+     */
+    public long getCacheTotalBytes() {
+        return cacheTotalBytes;
+    }
+
+    /**
+     * The total memory currently consumed by log buffers, in bytes.  If this
+     * environment uses the shared cache, this method returns only the amount
+     * used by this environment.
+     */
+    public long getBufferBytes() {
+        return bufferBytes;
+    }
+
+    /**
+     * The amount of JE cache used for holding data, keys and internal Btree
+     * nodes, in bytes.  If this environment uses the shared cache, this method
+     * returns only the amount used by this environment.
+     */
+    public long getDataBytes() {
+        return dataBytes;
+    }
+
+    /**
+     * The number of bytes of JE cache used for log cleaning metadata and other
+     * administrative structures.  If this environment uses the shared cache,
+     * this method returns only the amount used by this environment.
+     */
+    public long getAdminBytes() {
+        return adminBytes;
+    }
+
+    /**
+     * The number of bytes of JE cache used for holding locks and transactions.
+     * If this environment uses the shared cache, this method returns only the
+     * amount used by this environment.
+     */
+    public long getLockBytes() {
+        return lockBytes;
+    }
+
+    /**
+     * The amount of JE cache used for all items except for the log buffers, in
+     * bytes.  If this environment uses the shared cache, this method returns
+     * only the amount used by this environment.
+     *
+     * @deprecated Please use {@link #getDataBytes} to get the amount of cache
+     * used for data and use {@link #getAdminBytes}, {@link #getLockBytes} and
+     * {@link #getBufferBytes} to get other components of the total cache usage
+     * ({@link #getCacheTotalBytes}).
+     */
+    public long getCacheDataBytes() {
+        return cacheTotalBytes - bufferBytes;
+    }
+
+    /**
+     * The number of requests for database objects not contained within the
+     * in memory data structures.
+     */
+    public long getNNotResident() {
+        return nNotResident;
+    }
+
+    /**
+     * The number of bins encountered by the INCompressor that were not
+     * actually empty when the compressor ran.
+     */
+    public long getNonEmptyBins() {
+        return nonEmptyBins;
+    }
+
+    /**
+     * The number of bins that were successfully processed by the IN
+     * Compressor.
+     */
+    public long getProcessedBins() {
+        return processedBins;
+    }
+
+    /**
+     * The number of reads which had to be repeated when faulting in an object
+     * from disk because the read chunk size controlled by je.log.faultReadSize
+     * is too small.
+     */
+    public long getNRepeatFaultReads() {
+        return nRepeatFaultReads;
+    }
+
+    /**
+     * The number of writes which had to be completed using the temporary
+     * marshalling buffer because the fixed size log buffers specified by
+     * je.log.totalBufferBytes and je.log.numBuffers were not large enough.
+     */
+    public long getNTempBufferWrites() {
+        return nTempBufferWrites;
+    }
+
+    /**
+     * The number of times we try to read a log entry larger than the read
+     * buffer size and can't grow the log buffer to accommodate the large
+     * object. This happens during scans of the log during activities like
+     * environment open or log cleaning. Implies that the read chunk size
+     * controlled by je.log.iteratorReadSize is too small.
+     */
+    public long getNRepeatIteratorReads() {
+        return nRepeatIteratorReads;
+    }
+
+    /**
+     * The number of times a log file has been opened.
+     */
+    public int getNFileOpens() {
+        return nFileOpens;
+    }
+
+    /**
+     * The number of files currently open in the file cache.
+     */
+    public int getNOpenFiles() {
+        return nOpenFiles;
+    }
+
+    /**
+     * An approximation of the current total log size in bytes.
+     */
+    public long getTotalLogSize() {
+        return totalLogSize;
+    }
+
+    /**
+     * The number of bins encountered by the INCompressor that were split
+     * between the time they were put on the compressor queue and when the
+     * compressor ran.
+     */
+    public long getSplitBins() {
+        return splitBins;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNSharedCacheEnvironments(int nSharedCacheEnvironments) {
+        this.nSharedCacheEnvironments = nSharedCacheEnvironments;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setSharedCacheTotalBytes(long sharedCacheTotalBytes) {
+        this.sharedCacheTotalBytes = sharedCacheTotalBytes;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setCacheTotalBytes(long cacheTotalBytes) {
+        this.cacheTotalBytes = cacheTotalBytes;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setDataBytes(long dataBytes) {
+        this.dataBytes = dataBytes;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setAdminBytes(long adminBytes) {
+        this.adminBytes = adminBytes;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setLockBytes(long lockBytes) {
+        this.lockBytes = lockBytes;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNNotResident(long nNotResident) {
+        this.nNotResident = nNotResident;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNCacheMiss(long nCacheMiss) {
+        this.nCacheMiss = nCacheMiss;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNLogBuffers(int nLogBuffers) {
+        this.nLogBuffers = nLogBuffers;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setBufferBytes(long bufferBytes) {
+        this.bufferBytes = bufferBytes;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setCursorsBins(long val) {
+        cursorsBins = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setDbClosedBins(long val) {
+        dbClosedBins = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setInCompQueueSize(long val) {
+        inCompQueueSize = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setLastCheckpointId(long l) {
+        lastCheckpointId = l;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNCheckpoints(long val) {
+        nCheckpoints = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setCleanerBacklog(int val) {
+        cleanerBacklog = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNCleanerRuns(long val) {
+        nCleanerRuns = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNCleanerDeletions(long val) {
+        nCleanerDeletions = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNDeltaINFlush(long val) {
+        nDeltaINFlush = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setLastCheckpointEnd(long lsn) {
+        lastCheckpointEnd = lsn;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setEndOfLog(long lsn) {
+        endOfLog = lsn;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setLastCheckpointStart(long lsn) {
+        lastCheckpointStart = lsn;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNCleanerEntriesRead(long val) {
+        nCleanerEntriesRead = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNEvictPasses(long val) {
+        nEvictPasses = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNFSyncs(long val) {
+        nFSyncs = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNFSyncRequests(long val) {
+        nFSyncRequests = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNFSyncTimeouts(long val) {
+        nFSyncTimeouts = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNFullINFlush(long val) {
+        nFullINFlush = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNFullBINFlush(long val) {
+        nFullBINFlush = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNINsObsolete(long val) {
+        nINsObsolete = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNINsCleaned(long val) {
+        nINsCleaned = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNINsDead(long val) {
+        nINsDead = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNINsMigrated(long val) {
+        nINsMigrated = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNLNsObsolete(long val) {
+        nLNsObsolete = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNLNsCleaned(long val) {
+        nLNsCleaned = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNLNsDead(long val) {
+        nLNsDead = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNLNsLocked(long val) {
+        nLNsLocked = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNLNsMigrated(long val) {
+        nLNsMigrated = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNLNsMarked(long val) {
+        nLNsMarked = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNLNQueueHits(long val) {
+        nLNQueueHits = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNPendingLNsProcessed(long val) {
+        nPendingLNsProcessed = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNMarkedLNsProcessed(long val) {
+        nMarkedLNsProcessed = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNToBeCleanedLNsProcessed(long val) {
+        nToBeCleanedLNsProcessed = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNRandomReads(long val) {
+        nRandomReads = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNRandomWrites(long val) {
+        nRandomWrites = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNSequentialReads(long val) {
+        nSequentialReads = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNSequentialWrites(long val) {
+        nSequentialWrites = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNRandomReadBytes(long val) {
+        nRandomReadBytes = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNRandomWriteBytes(long val) {
+        nRandomWriteBytes = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNSequentialReadBytes(long val) {
+        nSequentialReadBytes = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNSequentialWriteBytes(long val) {
+        nSequentialWriteBytes = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNClusterLNsProcessed(long val) {
+        nClusterLNsProcessed = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNPendingLNsLocked(long val) {
+        nPendingLNsLocked = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNNodesExplicitlyEvicted(long l) {
+        nNodesExplicitlyEvicted = l;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNRootNodesEvicted(long l) {
+        nRootNodesEvicted = l;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setRequiredEvictBytes(long l) {
+        requiredEvictBytes = l;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNBINsStripped(long l) {
+        nBINsStripped = l;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNNodesScanned(long l) {
+        nNodesScanned = l;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNNodesSelected(long l) {
+        nNodesSelected = l;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNonEmptyBins(long val) {
+        nonEmptyBins = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setProcessedBins(long val) {
+        processedBins = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNRepeatFaultReads(long val) {
+        nRepeatFaultReads = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNTempBufferWrites(long val) {
+        nTempBufferWrites = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNRepeatIteratorReads(long val) {
+        nRepeatIteratorReads = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNFileOpens(int val) {
+        nFileOpens = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNOpenFiles(int val) {
+        nOpenFiles = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setTotalLogSize(long val) {
+        totalLogSize = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setSplitBins(long val) {
+        splitBins = val;
+    }
+
+    /**
+     * Returns a String representation of the stats in the form of
+     * &lt;stat&gt;=&lt;value&gt;
+     */
+    @Override
+    public String toString() {
+        DecimalFormat f = new DecimalFormat("###,###,###,###,###,###,###");
+
+        StringBuffer sb = new StringBuffer();
+        sb.append("\nCompression stats\n");
+        sb.append("splitBins=").append(f.format(splitBins)).append('\n');
+        sb.append("dbClosedBins=").append(f.format(dbClosedBins)).append('\n');
+        sb.append("cursorsBins=").append(f.format(cursorsBins)).append('\n');
+        sb.append("nonEmptyBins=").append(f.format(nonEmptyBins)).append('\n');
+        sb.append("processedBins=").
+            append(f.format(processedBins)).append('\n');
+        sb.append("inCompQueueSize=").
+            append(f.format(inCompQueueSize)).append('\n');
+
+        // Evictor
+        sb.append("\nEviction stats\n");
+        sb.append("nEvictPasses=").append(f.format(nEvictPasses)).append('\n');
+        sb.append("nNodesSelected=").
+            append(f.format(nNodesSelected)).append('\n');
+        sb.append("nNodesScanned=").
+            append(f.format(nNodesScanned)).append('\n');
+        sb.append("nNodesExplicitlyEvicted=").
+           append(f.format(nNodesExplicitlyEvicted)).append('\n');
+        sb.append("nRootNodesEvicted=").
+           append(f.format(nRootNodesEvicted)).append('\n');
+        sb.append("nBINsStripped=").
+            append(f.format(nBINsStripped)).append('\n');
+        sb.append("requiredEvictBytes=").
+            append(f.format(requiredEvictBytes)).append('\n');
+
+        // Checkpointer
+        sb.append("\nCheckpoint stats\n");
+        sb.append("nCheckpoints=").append(f.format(nCheckpoints)).append('\n');
+        sb.append("lastCheckpointId=").
+            append(f.format(lastCheckpointId)).append('\n');
+        sb.append("nFullINFlush=").append(f.format(nFullINFlush)).append('\n');
+        sb.append("nFullBINFlush=").
+            append(f.format(nFullBINFlush)).append('\n');
+        sb.append("nDeltaINFlush=").
+            append(f.format(nDeltaINFlush)).append('\n');
+        sb.append("lastCheckpointStart=").
+           append(DbLsn.getNoFormatString(lastCheckpointStart)).append('\n');
+        sb.append("lastCheckpointEnd=").
+           append(DbLsn.getNoFormatString(lastCheckpointEnd)).append('\n');
+        sb.append("endOfLog=").
+           append(DbLsn.getNoFormatString(endOfLog)).append('\n');
+
+        // Cleaner
+        sb.append("\nCleaner stats\n");
+        sb.append("cleanerBacklog=").
+            append(f.format(cleanerBacklog)).append('\n');
+        sb.append("nCleanerRuns=").
+            append(f.format(nCleanerRuns)).append('\n');
+        sb.append("nCleanerDeletions=").
+            append(f.format(nCleanerDeletions)).append('\n');
+        sb.append("nINsObsolete=").append(f.format(nINsObsolete)).append('\n');
+        sb.append("nINsCleaned=").append(f.format(nINsCleaned)).append('\n');
+        sb.append("nINsDead=").append(f.format(nINsDead)).append('\n');
+        sb.append("nINsMigrated=").append(f.format(nINsMigrated)).append('\n');
+        sb.append("nLNsObsolete=").append(f.format(nLNsObsolete)).append('\n');
+        sb.append("nLNsCleaned=").append(f.format(nLNsCleaned)).append('\n');
+        sb.append("nLNsDead=").append(f.format(nLNsDead)).append('\n');
+        sb.append("nLNsLocked=").append(f.format(nLNsLocked)).append('\n');
+        sb.append("nLNsMigrated=").append(f.format(nLNsMigrated)).append('\n');
+        sb.append("nLNsMarked=").append(f.format(nLNsMarked)).append('\n');
+        sb.append("nLNQueueHits=").
+            append(f.format(nLNQueueHits)).append('\n');
+        sb.append("nPendingLNsProcessed=").
+            append(f.format(nPendingLNsProcessed)).append('\n');
+        sb.append("nMarkedLNsProcessed=").
+            append(f.format(nMarkedLNsProcessed)).append('\n');
+        sb.append("nToBeCleanedLNsProcessed=").
+            append(f.format(nToBeCleanedLNsProcessed)).append('\n');
+        sb.append("nClusterLNsProcessed=").
+            append(f.format(nClusterLNsProcessed)).append('\n');
+        sb.append("nPendingLNsLocked=").
+            append(f.format(nPendingLNsLocked)).append('\n');
+        sb.append("nCleanerEntriesRead=").
+            append(f.format(nCleanerEntriesRead)).append('\n');
+
+        // Cache
+        sb.append("\nCache stats\n");
+        sb.append("nNotResident=").append(f.format(nNotResident)).append('\n');
+        sb.append("nCacheMiss=").append(f.format(nCacheMiss)).append('\n');
+        sb.append("nLogBuffers=").append(f.format(nLogBuffers)).append('\n');
+        sb.append("bufferBytes=").append(f.format(bufferBytes)).append('\n');
+        sb.append("dataBytes=").append(f.format(dataBytes)).append('\n');
+        sb.append("adminBytes=").append(f.format(adminBytes)).append('\n');
+        sb.append("lockBytes=").append(f.format(lockBytes)).append('\n');
+        sb.append("cacheTotalBytes=").
+            append(f.format(cacheTotalBytes)).append('\n');
+        sb.append("sharedCacheTotalBytes=").
+            append(f.format(sharedCacheTotalBytes)).append('\n');
+        sb.append("nSharedCacheEnvironments=").
+            append(f.format(nSharedCacheEnvironments)).append('\n');
+
+        // IO
+        sb.append("\nIO Stats\n");
+        sb.append("nRandomReads=").append(f.format(nRandomReads)).append('\n');
+        sb.append("nRandomWrites=").append(f.format(nRandomWrites)).
+            append('\n');
+        sb.append("nSequentialReads=").append(f.format(nSequentialReads)).
+            append('\n');
+        sb.append("nSequentialWrites=").append(f.format(nSequentialWrites)).
+            append('\n');
+        sb.append("nRandomReadBytes=").append(f.format(nRandomReadBytes)).
+            append('\n');
+        sb.append("nRandomWriteBytes=").append(f.format(nRandomWriteBytes)).
+            append('\n');
+        sb.append("nSequentialReadBytes=").
+            append(f.format(nSequentialReadBytes)).append('\n');
+        sb.append("nSequentialWriteBytes=").
+            append(f.format(nSequentialWriteBytes)).append('\n');
+
+        // Logging
+        sb.append("\nLogging stats\n");
+        sb.append("nFSyncs=").append(f.format(nFSyncs)).append('\n');
+        sb.append("nFSyncRequests=").
+            append(f.format(nFSyncRequests)).append('\n');
+        sb.append("nFSyncTimeouts=").
+            append(f.format(nFSyncTimeouts)).append('\n');
+        sb.append("nRepeatFaultReads=").
+            append(f.format(nRepeatFaultReads)).append('\n');
+        sb.append("nTempBufferWrite=").
+            append(f.format(nTempBufferWrites)).append('\n');
+        sb.append("nRepeatIteratorReads=").
+            append(f.format(nRepeatIteratorReads)).append('\n');
+        sb.append("nFileOpens=").
+            append(f.format(nFileOpens)).append('\n');
+        sb.append("nOpenFiles=").
+            append(f.format(nOpenFiles)).append('\n');
+        sb.append("totalLogSize=").
+            append(f.format(totalLogSize)).append('\n');
+
+        return sb.toString();
+    }
+}
diff --git a/src/com/sleepycat/je/ExceptionEvent.java b/src/com/sleepycat/je/ExceptionEvent.java
new file mode 100644
index 0000000000000000000000000000000000000000..903f2da34a6b155acc075f8dff2d7c3516a6a7e0
--- /dev/null
+++ b/src/com/sleepycat/je/ExceptionEvent.java
@@ -0,0 +1,58 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ExceptionEvent.java,v 1.10.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import com.sleepycat.je.utilint.Tracer;
+
+/**
+ * A class representing an exception event.  Contains an exception and the name
+ * of the daemon thread that it was thrown from.
+ */
+public class ExceptionEvent {
+
+    private Exception exception;
+    private String threadName;
+
+    public ExceptionEvent(Exception exception, String threadName) {
+	this.exception = exception;
+	this.threadName = threadName;
+    }
+
+    public ExceptionEvent(Exception exception) {
+	this.exception = exception;
+	this.threadName = Thread.currentThread().toString();
+    }
+
+    /**
+     * Returns the exception in the event.
+     */
+    public Exception getException() {
+	return exception;
+    }
+
+    /**
+     * Returns the name of the daemon thread that threw the exception.
+     */
+    public String getThreadName() {
+	return threadName;
+    }
+
+    @Override
+    public String toString() {
+	StringBuffer sb = new StringBuffer();
+	sb.append("<ExceptionEvent exception=\"");
+	sb.append(exception);
+	sb.append("\" threadName=\"");
+	sb.append(threadName);
+	sb.append("\">");
+        sb.append(Tracer.getStackTrace(exception));
+	return sb.toString();
+    }
+}
+
diff --git a/src/com/sleepycat/je/ExceptionListener.java b/src/com/sleepycat/je/ExceptionListener.java
new file mode 100644
index 0000000000000000000000000000000000000000..50bd4c951c5f1c1c2464c7b8d90ddee0c549f0bb
--- /dev/null
+++ b/src/com/sleepycat/je/ExceptionListener.java
@@ -0,0 +1,25 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ExceptionListener.java,v 1.6.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+/**
+ * A callback to notify the application program when an exception occurs in a
+ * JE Daemon thread.
+ */
+public interface ExceptionListener {
+
+    /**
+     * This method is called if an exception is seen in a JE Daemon thread.
+     *
+     * @param event the ExceptionEvent representing the exception that was
+     * thrown.
+     */	
+    void exceptionThrown(ExceptionEvent event);
+}
+
diff --git a/src/com/sleepycat/je/ForeignKeyDeleteAction.java b/src/com/sleepycat/je/ForeignKeyDeleteAction.java
new file mode 100644
index 0000000000000000000000000000000000000000..2117ce502d3b1ede138e77616c8bd6e7849536b0
--- /dev/null
+++ b/src/com/sleepycat/je/ForeignKeyDeleteAction.java
@@ -0,0 +1,61 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ForeignKeyDeleteAction.java,v 1.11.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+/**
+ * The action taken when a referenced record in the foreign key database is
+ * deleted.
+ *
+ * <p>The delete action applies to a secondary database that is configured to
+ * have a foreign key integrity constraint.  The delete action is specified by
+ * calling {@link SecondaryConfig#setForeignKeyDeleteAction}.</p>
+ *
+ * <p>When a record in the foreign key database is deleted, it is checked to
+ * see if it is referenced by any record in the associated secondary database.
+ * If the key is referenced, the delete action is applied.  By default, the
+ * delete action is {@link #ABORT}.</p>
+ *
+ * @see SecondaryConfig
+ */
+public class ForeignKeyDeleteAction {
+
+    private String name;
+
+    private ForeignKeyDeleteAction(String name) {
+	this.name = name;
+    }
+
+    /**
+     * When a referenced record in the foreign key database is deleted, abort
+     * the transaction by throwing a <code>DatabaseException</code>.
+     */
+    public final static ForeignKeyDeleteAction ABORT =
+	new ForeignKeyDeleteAction("ABORT");
+
+    /**
+     * When a referenced record in the foreign key database is deleted, delete
+     * the primary database record that references it.
+     */
+    public final static ForeignKeyDeleteAction CASCADE =
+	new ForeignKeyDeleteAction("CASCADE");
+
+    /**
+     * When a referenced record in the foreign key database is deleted, set the
+     * reference to null in the primary database record that references it,
+     * thereby deleting the secondary key. @see ForeignKeyNullifier @see
+     * ForeignMultiKeyNullifier
+     */
+    public final static ForeignKeyDeleteAction NULLIFY =
+	new ForeignKeyDeleteAction("NULLIFY");
+
+    @Override
+    public String toString() {
+	return "ForeignKeyDeleteAction." + name;
+    }
+}
diff --git a/src/com/sleepycat/je/ForeignKeyNullifier.java b/src/com/sleepycat/je/ForeignKeyNullifier.java
new file mode 100644
index 0000000000000000000000000000000000000000..2112e2114342926e0d9677d102c8530a42528686
--- /dev/null
+++ b/src/com/sleepycat/je/ForeignKeyNullifier.java
@@ -0,0 +1,56 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ForeignKeyNullifier.java,v 1.9.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+/**
+ * The interface implemented for setting single-valued foreign keys to null.
+ *
+ * <p>A key nullifier is used with a secondary database that is configured to
+ * have a foreign key integrity constraint and a delete action of {@link
+ * ForeignKeyDeleteAction#NULLIFY}.  The key nullifier is specified by calling
+ * {@link SecondaryConfig#setForeignKeyNullifier}.</p>
+ *
+ * <p>When a referenced record in the foreign key database is deleted and the
+ * foreign key delete action is <code>NULLIFY</code>, the {@link
+ * ForeignKeyNullifier#nullifyForeignKey} method is called.  This method sets
+ * the foreign key reference to null in the datum of the primary database.  The
+ * primary database is then updated to contain the modified datum.  The result
+ * is that the secondary key is deleted.</p>
+ *
+ * This interface may be used along with {@link SecondaryKeyCreator} for
+ * many-to-one and one-to-one relationships.  It may <em>not</em> be used with
+ * {@link SecondaryMultiKeyCreator} because the secondary key is not passed as
+ * a parameter to the nullifyForeignKey method and this method would not know
+ * which key to nullify.  When using {@link SecondaryMultiKeyCreator}, use
+ * {@link ForeignMultiKeyNullifier} instead.
+ */
+public interface ForeignKeyNullifier {
+
+    /**
+     * Sets the foreign key reference to null in the datum of the primary
+     * database.
+     *
+     * @param secondary the database in which the foreign key integrity
+     * constraint is defined. This parameter is passed for informational
+     * purposes but is not commonly used.
+     *
+     * @param data the existing primary datum in which the foreign key
+     * reference should be set to null.  This parameter should be updated by
+     * this method if it returns true.
+     *
+     * @return true if the datum was modified, or false to indicate that the
+     * key is not present.
+     *
+     * @throws DatabaseException if an error occurs attempting to clear the key
+     * reference.
+     */
+    public boolean nullifyForeignKey(SecondaryDatabase secondary,
+                                     DatabaseEntry data)
+	throws DatabaseException;
+}
diff --git a/src/com/sleepycat/je/ForeignKeyTrigger.java b/src/com/sleepycat/je/ForeignKeyTrigger.java
new file mode 100644
index 0000000000000000000000000000000000000000..0dad77c2356757b76b37abed4b9d7b97cc1ce4bb
--- /dev/null
+++ b/src/com/sleepycat/je/ForeignKeyTrigger.java
@@ -0,0 +1,41 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ForeignKeyTrigger.java,v 1.8.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import com.sleepycat.je.txn.Locker;
+
+class ForeignKeyTrigger implements DatabaseTrigger {
+
+    private SecondaryDatabase secDb;
+
+    ForeignKeyTrigger(SecondaryDatabase secDb) {
+
+        this.secDb = secDb;
+    }
+
+    public void triggerAdded(Database db) {
+    }
+
+    public void triggerRemoved(Database db) {
+
+        secDb.clearForeignKeyTrigger();
+    }
+
+    public void databaseUpdated(Database db,
+                                Locker locker,
+                                DatabaseEntry priKey,
+                                DatabaseEntry oldData,
+                                DatabaseEntry newData)
+        throws DatabaseException {
+
+        if (newData == null) {
+            secDb.onForeignKeyDelete(locker, priKey);
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/ForeignMultiKeyNullifier.java b/src/com/sleepycat/je/ForeignMultiKeyNullifier.java
new file mode 100644
index 0000000000000000000000000000000000000000..ccddefa7b9a546eca058b155022dcdf2eae7bd42
--- /dev/null
+++ b/src/com/sleepycat/je/ForeignMultiKeyNullifier.java
@@ -0,0 +1,62 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ForeignMultiKeyNullifier.java,v 1.8.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+/**
+ * The interface implemented for setting multi-valued foreign keys to null.
+ *
+ * <p>A key nullifier is used with a secondary database that is configured to
+ * have a foreign key integrity constraint and a delete action of {@link
+ * ForeignKeyDeleteAction#NULLIFY}.  The key nullifier is specified by calling
+ * {@link SecondaryConfig#setForeignMultiKeyNullifier}.</p>
+ *
+ * <p>When a referenced record in the foreign key database is deleted and the
+ * foreign key delete action is <code>NULLIFY</code>, the {@link
+ * ForeignMultiKeyNullifier#nullifyForeignKey} method is called.  This method
+ * sets the foreign key reference to null in the datum of the primary
+ * database. The primary database is then updated to contain the modified
+ * datum.  The result is that the secondary key is deleted.</p>
+ *
+ * This interface may be used along with {@link SecondaryKeyCreator} or {@link
+ * SecondaryMultiKeyCreator} for many-to-many, one-to-many, many-to-one and
+ * one-to-one relationships.
+ */
+public interface ForeignMultiKeyNullifier {
+
+    /**
+     * Sets the foreign key reference to null in the datum of the primary
+     * database.
+     *
+     * @param secondary the database in which the foreign key integrity
+     * constraint is defined. This parameter is passed for informational
+     * purposes but is not commonly used.
+     *
+     * @param key the existing primary key.  This parameter is passed for
+     * informational purposes but is not commonly used.
+     *
+     * @param data the existing primary datum in which the foreign key
+     * reference should be set to null.  This parameter should be updated by
+     * this method if it returns true.
+     *
+     * @param secKey the secondary key to be nullified.  This parameter is
+     * needed for knowing which key to nullify when multiple keys are present,
+     * as when {@link SecondaryMultiKeyCreator} is used.
+     *
+     * @return true if the datum was modified, or false to indicate that the
+     * key is not present.
+     *
+     * @throws DatabaseException if an error occurs attempting to clear the key
+     * reference.
+     */
+    public boolean nullifyForeignKey(SecondaryDatabase secondary,
+                                     DatabaseEntry key,
+                                     DatabaseEntry data,
+                                     DatabaseEntry secKey)
+	throws DatabaseException;
+}
diff --git a/src/com/sleepycat/je/JEVersion.java b/src/com/sleepycat/je/JEVersion.java
new file mode 100644
index 0000000000000000000000000000000000000000..8da8256ebdc5e937b976876e992ff2127a942f3f
--- /dev/null
+++ b/src/com/sleepycat/je/JEVersion.java
@@ -0,0 +1,99 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: JEVersion.java,v 1.111.2.33 2010/03/23 15:01:50 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+/**
+ * Berkeley DB Java Edition version information.  Versions consist of major,
+ * minor and patch numbers.
+ *
+ * There is one JEVersion object per running JVM and it may be accessed using
+ * the static field JEVersion.CURRENT_VERSION.
+ */
+public class JEVersion {
+
+    /**
+     * Release version.
+     */
+    public static final JEVersion CURRENT_VERSION =
+        new JEVersion(3, 3, 98, null);
+
+    private int majorNum;
+    private int minorNum;
+    private int patchNum;
+    private String name;
+
+    private JEVersion(int majorNum, int minorNum, int patchNum, String name) {
+        this.majorNum = majorNum;
+        this.minorNum = minorNum;
+        this.patchNum = patchNum;
+        this.name = name;
+    }
+
+    @Override
+    public String toString() {
+        return getVersionString();
+    }
+
+    /**
+     * Major number of the release version.
+     *
+     * @return The major number of the release version.
+     */
+    public int getMajor() {
+        return majorNum;
+    }
+
+    /**
+     * Minor number of the release version.
+     *
+     * @return The minor number of the release version.
+     */
+    public int getMinor() {
+        return minorNum;
+    }
+
+    /**
+     * Patch number of the release version.
+     *
+     * @return The patch number of the release version.
+     */
+    public int getPatch() {
+        return patchNum;
+    }
+
+    /**
+     * The numeric version string, without the patch tag.
+     *
+     * @return The release version
+     */
+    public String getNumericVersionString() {
+        StringBuffer version = new StringBuffer();
+        version.append(majorNum).append(".");
+        version.append(minorNum).append(".");
+        version.append(patchNum);
+        return version.toString();
+    }
+
+    /**
+     * Release version, suitable for display.
+     *
+     * @return The release version, suitable for display.
+     */
+    public String getVersionString() {
+        StringBuffer version = new StringBuffer();
+        version.append(majorNum).append(".");
+        version.append(minorNum).append(".");
+        version.append(patchNum);
+	if (name != null) {
+	    version.append(" (");
+	    version.append(name).append(")");
+	}
+        return version.toString();
+    }
+}
diff --git a/src/com/sleepycat/je/JoinConfig.java b/src/com/sleepycat/je/JoinConfig.java
new file mode 100644
index 0000000000000000000000000000000000000000..c7978613d0ff88b1822ee18fc924a2e748d898ff
--- /dev/null
+++ b/src/com/sleepycat/je/JoinConfig.java
@@ -0,0 +1,109 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: JoinConfig.java,v 1.12.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+/**
+ * The configuration properties of a <code>JoinCursor</code>. The join cursor
+ * configuration is specified when calling {@link Database#join Database.join}.
+ *
+ * <p>To create a configuration object with default attributes:</p>
+ *
+ * <pre>
+ *     JoinConfig config = new JoinConfig();
+ * </pre>
+ *
+ * <p>To set custom attributes:</p>
+ *
+ * <pre>
+ *     JoinConfig config = new JoinConfig();
+ *     config.setNoSort(true);
+ * </pre>
+ *
+ * @see Database#join Database.join
+ * @see JoinCursor
+ */
+public class JoinConfig implements Cloneable {
+
+    /**
+     * Default configuration used if null is passed to {@link
+     * com.sleepycat.je.Database#join Database.join}.
+     */
+    public static final JoinConfig DEFAULT = new JoinConfig();
+
+    private boolean noSort;
+
+    /**
+     * Creates an instance with the system's default settings.
+     */
+    public JoinConfig() {
+    }
+
+    /**
+     * Specifies whether automatic sorting of the input cursors is disabled.
+     *
+     * <p>Joined values are retrieved by doing a sequential iteration over the
+     * first cursor in the cursor array, and a nested iteration over each
+     * following cursor in the order they are specified in the array. This
+     * requires database traversals to search for the current datum in all the
+     * cursors after the first. For this reason, the best join performance
+     * normally results from sorting the cursors from the one that refers to
+     * the least number of data items to the one that refers to the
+     * most. Unless this method is called with true, <code>Database.join</code>
+     * does this sort on behalf of its caller.</p>
+     *
+     * <p>If the data are structured so that cursors with many data items also
+     * share many common elements, higher performance will result from listing
+     * those cursors before cursors with fewer data items; that is, a sort
+     * order other than the default. Calling this method permits applications
+     * to perform join optimization prior to calling
+     * <code>Database.join</code>.</p>
+     *
+     * @param noSort whether automatic sorting of the input cursors is
+     * disabled.
+     *
+     * @see Database#join Database.join
+     */
+    public void setNoSort(boolean noSort) {
+        this.noSort = noSort;
+    }
+
+    /**
+     * Returns whether automatic sorting of the input cursors is disabled.
+     *
+     * @return whether automatic sorting of the input cursors is disabled.
+     *
+     * @see #setNoSort
+     */
+    public boolean getNoSort() {
+        return noSort;
+    }
+
+    /**
+     * Used by SecondaryDatabase to create a copy of the application
+     * supplied configuration. Done this way to provide non-public cloning.
+     */
+    JoinConfig cloneConfig() {
+        try {
+            return (JoinConfig) super.clone();
+        } catch (CloneNotSupportedException willNeverOccur) {
+            return null;
+        }
+    }
+
+    /**
+     * Returns the values for each configuration attribute.
+     *
+     * @return the values for each configuration attribute.
+     */
+    @Override
+    public String toString() {
+        return "noSort=" + noSort +
+            "\n";
+    }
+}
diff --git a/src/com/sleepycat/je/JoinCursor.java b/src/com/sleepycat/je/JoinCursor.java
new file mode 100644
index 0000000000000000000000000000000000000000..2ea13eb819051221702ae40ba7be4649e3d9aa4d
--- /dev/null
+++ b/src/com/sleepycat/je/JoinCursor.java
@@ -0,0 +1,404 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: JoinCursor.java,v 1.20.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.logging.Level;
+
+import com.sleepycat.je.dbi.GetMode;
+import com.sleepycat.je.dbi.CursorImpl.SearchMode;
+import com.sleepycat.je.txn.Locker;
+import com.sleepycat.je.utilint.DatabaseUtil;
+
+/**
+ * A specialized join cursor for use in performing equality or natural joins on
+ * secondary indices.
+ *
+ * <p>A join cursor is returned when calling {@link Database#join
+ * Database.join}.</p>
+ *
+ * <p>To open a join cursor using two secondary cursors:</p>
+ *
+ * <pre>
+ *     Transaction txn = ...
+ *     Database primaryDb = ...
+ *     SecondaryDatabase secondaryDb1 = ...
+ *     SecondaryDatabase secondaryDb2 = ...
+ *     <p>
+ *     SecondaryCursor cursor1 = null;
+ *     SecondaryCursor cursor2 = null;
+ *     JoinCursor joinCursor = null;
+ *     try {
+ *         DatabaseEntry key = new DatabaseEntry();
+ *         DatabaseEntry data = new DatabaseEntry();
+ *         <p>
+ *         cursor1 = secondaryDb1.openSecondaryCursor(txn, null);
+ *         cursor2 = secondaryDb2.openSecondaryCursor(txn, null);
+ *         <p>
+ *         key.setData(...); // initialize key for secondary index 1
+ *         OperationStatus status1 =
+ *         cursor1.getSearchKey(key, data, LockMode.DEFAULT);
+ *         key.setData(...); // initialize key for secondary index 2
+ *         OperationStatus status2 =
+ *         cursor2.getSearchKey(key, data, LockMode.DEFAULT);
+ *         <p>
+ *         if (status1 == OperationStatus.SUCCESS &amp;&amp;
+ *                 status2 == OperationStatus.SUCCESS) {
+ *             <p>
+ *             SecondaryCursor[] cursors = {cursor1, cursor2};
+ *             joinCursor = primaryDb.join(cursors, null);
+ *             <p>
+ *             while (true) {
+ *                 OperationStatus joinStatus = joinCursor.getNext(key, data,
+ *                     LockMode.DEFAULT);
+ *                 if (joinStatus == OperationStatus.SUCCESS) {
+ *                      // Do something with the key and data.
+ *                 } else {
+ *                     break;
+ *                 }
+ *             }
+ *         }
+ *     } finally {
+ *         if (cursor1 != null) {
+ *             cursor1.close();
+ *         }
+ *         if (cursor2 != null) {
+ *             cursor2.close();
+ *         }
+ *         if (joinCursor != null) {
+ *             joinCursor.close();
+ *         }
+ *     }
+ * </pre>
+ */
+public class JoinCursor {
+
+    private JoinConfig config;
+    private Database priDb;
+    private Cursor priCursor;
+    private Cursor[] secCursors;
+    private DatabaseEntry[] cursorScratchEntries;
+    private DatabaseEntry scratchEntry;
+
+    /**
+     * Creates a join cursor without parameter checking.
+     */
+    JoinCursor(Locker locker,
+               Database primaryDb,
+               final Cursor[] cursors,
+               JoinConfig configParam)
+        throws DatabaseException {
+
+        priDb = primaryDb;
+        config = (configParam != null) ? configParam.cloneConfig()
+                                       : JoinConfig.DEFAULT;
+        scratchEntry = new DatabaseEntry();
+        cursorScratchEntries = new DatabaseEntry[cursors.length];
+        Cursor[] sortedCursors = new Cursor[cursors.length];
+        System.arraycopy(cursors, 0, sortedCursors, 0, cursors.length);
+
+        if (!config.getNoSort()) {
+
+            /*
+             * Sort ascending by duplicate count.  Collect counts before
+             * sorting so that countInternal() is called only once per cursor.
+             * Use READ_UNCOMMITTED to avoid blocking writers.
+             */
+            final int[] counts = new int[cursors.length];
+            for (int i = 0; i < cursors.length; i += 1) {
+                counts[i] = cursors[i].countInternal
+                    (LockMode.READ_UNCOMMITTED);
+                assert counts[i] >= 0;
+            }
+            Arrays.sort(sortedCursors, new Comparator<Cursor>() {
+                public int compare(Cursor o1, Cursor o2) {
+                    int count1 = -1;
+                    int count2 = -1;
+
+                    /*
+                     * Scan for objects in cursors not sortedCursors since
+                     * sortedCursors is being sorted in place.
+                     */
+                    for (int i = 0; i < cursors.length &&
+                                    (count1 < 0 || count2 < 0); i += 1) {
+                        if (cursors[i] == o1) {
+                            count1 = counts[i];
+                        } else if (cursors[i] == o2) {
+                            count2 = counts[i];
+                        }
+                    }
+                    assert count1 >= 0 && count2 >= 0;
+                    return (count1 - count2);
+                }
+            });
+        }
+
+        /*
+         * Open and dup cursors last.  If an error occurs before the
+         * constructor is complete, close them and ignore exceptions during
+         * close.
+         */
+        try {
+            priCursor = new Cursor(priDb, locker, null);
+            secCursors = new Cursor[cursors.length];
+            for (int i = 0; i < cursors.length; i += 1) {
+                secCursors[i] = new Cursor(sortedCursors[i], true);
+            }
+        } catch (DatabaseException e) {
+            close(e); /* will throw e */
+        }
+    }
+
+    /**
+     * Closes the cursors that have been opened by this join cursor.
+     *
+     * <p>The cursors passed to {@link Database#join Database.join} are not
+     * closed by this method, and should be closed by the caller.</p>
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public void close()
+        throws DatabaseException {
+
+        if (priCursor == null) {
+            throw new DatabaseException("Already closed");
+        }
+        close(null);
+    }
+
+    /**
+     * Close all cursors we own, throwing only the first exception that occurs.
+     *
+     * @param firstException an exception that has already occured, or null.
+     */
+    private void close(DatabaseException firstException)
+        throws DatabaseException {
+
+        if (priCursor != null) {
+            try {
+                priCursor.close();
+            } catch (DatabaseException e) {
+                if (firstException == null) {
+                    firstException = e;
+                }
+            }
+            priCursor = null;
+        }
+        for (int i = 0; i < secCursors.length; i += 1) {
+            if (secCursors[i] != null) {
+                try {
+                    secCursors[i].close();
+                } catch (DatabaseException e) {
+                    if (firstException == null) {
+                        firstException = e;
+                    }
+                }
+                secCursors[i] = null;
+            }
+        }
+        if (firstException != null) {
+            throw firstException;
+        }
+    }
+
+    /**
+     * For unit testing.
+     */
+    Cursor[] getSortedCursors() {
+        return secCursors;
+    }
+
+    /**
+     * Returns the primary database handle associated with this cursor.
+     *
+     * @return the primary database handle associated with this cursor.
+     */
+    public Database getDatabase() {
+
+        return priDb;
+    }
+
+    /**
+     * Returns this object's configuration.
+     *
+     * @return this object's configuration.
+     */
+    public JoinConfig getConfig() {
+
+        return config.cloneConfig();
+    }
+
+    /**
+     * Returns the next primary key resulting from the join operation.
+     *
+     * <p>An entry is returned by the join cursor for each primary key/data
+     * pair having all secondary key values that were specified using the array
+     * of secondary cursors passed to {@link Database#join Database.join}.</p>
+     *
+     * @param key the primary key returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     *
+     * @param lockMode the locking attributes; if null, default attributes
+     * are used.
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or
+     * does not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public OperationStatus getNext(DatabaseEntry key,
+                                   LockMode lockMode)
+        throws DatabaseException {
+
+        priCursor.checkEnv();
+        DatabaseUtil.checkForNullDbt(key, "key", false);
+        priCursor.trace(Level.FINEST, "JoinCursor.getNext(key): ", lockMode);
+
+        return retrieveNext(key, null, lockMode);
+    }
+
+    /**
+     * Returns the next primary key and data resulting from the join operation.
+     *
+     * <p>An entry is returned by the join cursor for each primary key/data
+     * pair having all secondary key values that were specified using the array
+     * of secondary cursors passed to {@link Database#join Database.join}.</p>
+     *
+     * @param key the primary key returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param data the primary data returned as output.  Its byte array does
+     * not need to be initialized by the caller.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     *
+     * @param lockMode the locking attributes; if null, default attributes
+     * are used.
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or
+     * does not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public OperationStatus getNext(DatabaseEntry key,
+                                   DatabaseEntry data,
+                                   LockMode lockMode)
+        throws DatabaseException {
+
+        priCursor.checkEnv();
+        DatabaseUtil.checkForNullDbt(key, "key", false);
+        DatabaseUtil.checkForNullDbt(data, "data", false);
+        priCursor.trace(Level.FINEST, "JoinCursor.getNext(key,data): ",
+                        lockMode);
+
+        return retrieveNext(key, data, lockMode);
+    }
+
+    /**
+     * Internal version of getNext(), with an optional data param.
+     * <p>
+     * Since duplicates are always sorted and duplicate-duplicates are not
+     * allowed, a natural join can be implemented by simply traversing through
+     * the duplicates of the first cursor to find candidate keys, and then
+     * looking for each candidate key in the duplicate set of the other
+     * cursors, without ever reseting a cursor to the beginning of the
+     * duplicate set.
+     * <p>
+     * This only works when the same duplicate comparison method is used for
+     * all cursors.  We don't check for that, we just assume the user won't
+     * violate that rule.
+     * <p>
+     * A future optimization would be to add a SearchMode.BOTH_DUPS operation
+     * and use it instead of using SearchMode.BOTH.  This would be the
+     * equivalent of the undocumented DB_GET_BOTHC operation used by DB core's
+     * join() implementation.
+     */
+    private OperationStatus retrieveNext(DatabaseEntry keyParam,
+                                         DatabaseEntry dataParam,
+                                         LockMode lockMode)
+        throws DatabaseException {
+
+        outerLoop: while (true) {
+
+            /* Process the first cursor to get a candidate key. */
+            Cursor secCursor = secCursors[0];
+            DatabaseEntry candidateKey = cursorScratchEntries[0];
+            OperationStatus status;
+            if (candidateKey == null) {
+                /* Get first duplicate at initial cursor position. */
+                candidateKey = new DatabaseEntry();
+                cursorScratchEntries[0] = candidateKey;
+                status = secCursor.getCurrentInternal(scratchEntry,
+                                                      candidateKey,
+                                                      lockMode);
+            } else {
+                /* Already initialized, move to the next candidate key. */
+                status = secCursor.retrieveNext(scratchEntry, candidateKey,
+                                                lockMode,
+                                                GetMode.NEXT_DUP);
+            }
+            if (status != OperationStatus.SUCCESS) {
+                /* No more candidate keys. */
+                return status;
+            }
+
+            /* Process the second and following cursors. */
+            for (int i = 1; i < secCursors.length; i += 1) {
+                secCursor = secCursors[i];
+                DatabaseEntry secKey = cursorScratchEntries[i];
+                if (secKey == null) {
+                    secKey = new DatabaseEntry();
+                    cursorScratchEntries[i] = secKey;
+                    status = secCursor.getCurrentInternal(secKey, scratchEntry,
+                                                          lockMode);
+                    assert status == OperationStatus.SUCCESS;
+                }
+                scratchEntry.setData(secKey.getData(), secKey.getOffset(),
+                                     secKey.getSize());
+                status = secCursor.search(scratchEntry, candidateKey, lockMode,
+                                          SearchMode.BOTH);
+                if (status != OperationStatus.SUCCESS) {
+                    /* No match, get another candidate key. */
+                    continue outerLoop;
+                }
+            }
+
+            /* The candidate key was found for all cursors. */
+            if (dataParam != null) {
+                status = priCursor.search(candidateKey, dataParam,
+                                          lockMode, SearchMode.SET);
+                if (status != OperationStatus.SUCCESS) {
+                    throw new DatabaseException("Secondary corrupt");
+                }
+            }
+            keyParam.setData(candidateKey.getData(), candidateKey.getOffset(),
+                             candidateKey.getSize());
+            return OperationStatus.SUCCESS;
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/LockMode.java b/src/com/sleepycat/je/LockMode.java
new file mode 100644
index 0000000000000000000000000000000000000000..9ea4def7fa110ff9b4ec31700aa06d249f0f4d05
--- /dev/null
+++ b/src/com/sleepycat/je/LockMode.java
@@ -0,0 +1,191 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LockMode.java,v 1.25.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+/**
+ * Record lock modes for read operations. Lock mode parameters may be specified
+ * for all operations that retrieve data.
+ *
+ * <p><strong>Locking Rules</strong></p>
+ *
+ * <p>Together with {@link CursorConfig}, {@link TransactionConfig} and {@link
+ * EnvironmentConfig} settings, lock mode parameters determine how records are
+ * locked during read operations.  Record locking is used to enforce the
+ * isolation modes that are configured.  Record locking is summarized below for
+ * read and write operations.  For more information on isolation levels and
+ * transactions, see <a
+ * href="{@docRoot}/../TransactionGettingStarted/index.html"
+ * target="_top">Writing Transactional Applications</a>.</p>
+ *
+ * <p>With one exception, a record lock is always acquired when a record is
+ * read or written, and a cursor will always hold the lock as long as it is
+ * positioned on the record.  The exception is when {@link #READ_UNCOMMITTED}
+ * is specified, which allows a record to be read without any locking.</p>
+ *
+ * <p>Both read (shared) and write (exclusive) locks are used.  Read locks are
+ * normally acquired on read ({@code get} method} operations and write locks on
+ * write ({@code put} method) operations.  However, a write lock will be
+ * acquired on a read operation if {@link #RMW} is specified.</p>
+ *
+ * <p>Because read locks are shared, multiple accessors may read the same
+ * record.  Because write locks are exclusive, if a record is written by one
+ * accessor it may not be read or written by another accessor.  An accessor is
+ * either a transaction or a thread (for non-transactional operations).</p>
+ *
+ * <p>Whether additional locking is performed and how locks are released depend
+ * on whether the operation is transactional and other configuration
+ * settings.</p>
+ *
+ * <p><strong>Transactional Locking</strong></p>
+ *
+ * <p>Transactional operations include all write operations for a transactional
+ * database, and read operations when a non-null {@link Transaction} parameter
+ * is passed.  When a null transaction parameter is passed for a write
+ * operation for a transactional database, an auto-commit transaction is
+ * automatically used.</p>
+ *
+ * <p>With transactions, read and write locks are normally held until the end
+ * of the transaction (commit or abort).  Write locks are always held until the
+ * end of the transaction.  However, if {@link #READ_COMMITTED} is configured,
+ * then read locks for cursor operations are only held during the operation and
+ * while the cursor is positioned on the record.  The read lock is released
+ * when the cursor is moved to a different record or closed.  When {@link
+ * #READ_COMMITTED} is used for a database (non-cursor) operation, the read
+ * lock is released before the method returns.</p>
+ *
+ * <p>When neither {@link #READ_UNCOMMITTED} nor {@link #READ_COMMITTED} is
+ * specified, read and write locking as described above provide Repeatable Read
+ * isolation, which is the default transactional isolation level.  If
+ * Serializable isolation is configured, additional "next key" locking is
+ * performed to prevent "phantoms" -- records that are not visible at one point
+ * in a transaction but that become visible at a later point after being
+ * inserted by another transaction.  Serializable isolation is configured via
+ * {@link TransactionConfig#setSerializableIsolation} or {@link
+ * EnvironmentConfig#setTxnSerializableIsolation}.</p>
+ *
+ * <p><strong>Non-Transactional Locking</strong></p>
+ *
+ * <p>Non-transactional operations include all operations for a
+ * non-transactional database (including a Deferred Write database), and read
+ * operations for a transactional database when a null {@link Transaction}
+ * parameter is passed.</p>
+ *
+ * <p>For non-transctional operations, both read and write locks are only held
+ * while a cursor is positioned on the record, and are released when the cursor
+ * is moved to a different record or closed.  For database (non-cursor)
+ * operations, the read or write lock is released before the method
+ * returns.</p>
+ * 
+ * <p>This behavior is similar to {@link #READ_COMMITTED}, except that both
+ * read and write locks are released.  Configuring {@link #READ_COMMITTED} for
+ * a non-transactional database cursor has no effect.</p>
+ *
+ * <p>Because the current thread is the accessor (locker) for non-transactional
+ * operations, a single thread may have multiple cursors open without locking
+ * conflicts.  Two non-transactional cursors in the same thread may access the
+ * same record via write or read operations without conflicts, and the changes
+ * make by one cursor will be visible to the other cursor.</p>
+ *
+ * <p>However, a non-transactional operation will conflict with a transactional
+ * operation for the same record even when performed in the same thread.  When
+ * using a transaction in a particular thread for a particular database, to
+ * avoid conflicts you should use that transaction for all access to that
+ * database in that thread.  In other words, to avoid conflicts always pass the
+ * transction parameter, not null, for all operations.  If you don't wish to
+ * hold the read lock for the duration of the transaction, specify {@link
+ * #READ_COMMITTED}.</p>
+ */
+public class LockMode {
+    private String lockModeName;
+
+    private LockMode(String lockModeName) {
+	this.lockModeName = lockModeName;
+    }
+
+    /**
+     * Uses the default lock mode and is equivalent to passing {@code null} for
+     * the lock mode parameter.
+     *
+     * <p>The default lock mode is {@link #READ_UNCOMMITTED} when this lock
+     * mode is configured via {@link CursorConfig#setReadUncommitted} or {@link
+     * TransactionConfig#setReadUncommitted}.  The Read Uncommitted mode
+     * overrides any other configuration settings.</p>
+     *
+     * <p>Otherwise, the default lock mode is {@link #READ_COMMITTED} when this
+     * lock mode is configured via {@link CursorConfig#setReadCommitted} or
+     * {@link TransactionConfig#setReadCommitted}.  The Read Committed mode
+     * overrides other configuration settings except for {@link
+     * #READ_UNCOMMITTED}.</p>
+     *
+     * <p>Otherwise, the default lock mode is to acquire read locks and release
+     * them according to the {@link LockMode default locking rules} for
+     * transactional and non-transactional operations.</p>
+     */
+    public static final LockMode DEFAULT = new LockMode("DEFAULT");
+
+    /**
+     * Reads modified but not yet committed data.
+     *
+     * <p>The Read Uncommitted mode is used if this lock mode is explicitly
+     * passed for the lock mode parameter, or if null or {@link #DEFAULT} is
+     * passed and Read Uncommitted is the default -- see {@link #DEFAULT} for
+     * details.</p>
+     *
+     * <p>See the {@link LockMode locking rules} for information on how Read
+     * Uncommitted impacts transactional and non-transactional locking.</p>
+     */
+    public static final LockMode READ_UNCOMMITTED =
+        new LockMode("READ_UNCOMMITTED");
+
+    /**
+     * Read modified but not yet committed data.
+     *
+     * @deprecated This has been replaced by {@link #READ_UNCOMMITTED} to
+     * conform to ANSI database isolation terminology.
+     */
+    public static final LockMode DIRTY_READ = READ_UNCOMMITTED;
+
+    /**
+     * Read committed isolation provides for cursor stability but not
+     * repeatable reads.  Data items which have been previously read by this
+     * transaction may be deleted or modified by other transactions before the
+     * cursor is closed or the transaction completes.
+     *
+     * Note that this LockMode may only be passed to {@link Database} get
+     * methods, not to {@link Cursor} methods.  To configure a cursor for Read
+     * Committed isolation, use {@link CursorConfig#setReadCommitted}.
+     *
+     * <p>See the {@link LockMode locking rules} for information on how Read
+     * Committed impacts transactional and non-transactional locking.</p>
+     */
+    public static final LockMode READ_COMMITTED =
+        new LockMode("READ_COMMITTED");
+
+    /**
+     * Acquire write locks instead of read locks when doing the retrieval.
+     *
+     * <p>Because it causes a write lock to be acquired, specifying this lock
+     * mode as a {@link Cursor} or {@link Database} {@code get} (read) method
+     * parameter will override the Read Committed or Read Uncommitted isolation
+     * mode that is configured using {@link CursorConfig} or {@link
+     * TransactionConfig}.  The write lock will acquired and held until the end
+     * of the transaction.  For non-transactional use, the write lock will be
+     * released when the cursor is moved to a new position or closed.</p>
+     *
+     * <p>Setting this flag can eliminate deadlock during a read-modify-write
+     * cycle by acquiring the write lock during the read part of the cycle so
+     * that another thread of control acquiring a read lock for the same item,
+     * in its own read-modify-write cycle, will not result in deadlock.</p>
+     */
+    public static final LockMode RMW = new LockMode("RMW");
+
+    public String toString() {
+	return "LockMode." + lockModeName;
+    }
+}
diff --git a/src/com/sleepycat/je/LockNotGrantedException.java b/src/com/sleepycat/je/LockNotGrantedException.java
new file mode 100644
index 0000000000000000000000000000000000000000..061347f26245b61e0f5b55b71faff9aa243e9264
--- /dev/null
+++ b/src/com/sleepycat/je/LockNotGrantedException.java
@@ -0,0 +1,43 @@
+/*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LockNotGrantedException.java,v 1.10.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+/**
+ * A LockNotGrantedException is thrown when a non-blocking operation fails to
+ * get a lock.
+ */
+public class LockNotGrantedException extends DeadlockException {
+
+    /*
+     * LockNotGrantedException extends DeadlockException in order to support
+     * the approach that all application need only handle
+     * DeadlockException. The idea is that we don't want an application to fail
+     * because a new type of exception is thrown when an operation is changed
+     * to non-blocking.
+     *
+     * Applications that care about LockNotGrantedExceptions can add another
+     * catch block to handle it, but otherwise they can be handled the same way
+     * as deadlocks.  See SR [#10672]
+     */
+    public LockNotGrantedException() {
+	super();
+    }
+
+    public LockNotGrantedException(Throwable t) {
+        super(t);
+    }
+
+    public LockNotGrantedException(String message) {
+	super(message);
+    }
+
+    public LockNotGrantedException(String message, Throwable t) {
+        super(message, t);
+    }
+}
diff --git a/src/com/sleepycat/je/LockStats.java b/src/com/sleepycat/je/LockStats.java
new file mode 100644
index 0000000000000000000000000000000000000000..9994b0b8387c375b39f22cf808ff72af2a139619
--- /dev/null
+++ b/src/com/sleepycat/je/LockStats.java
@@ -0,0 +1,229 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LockStats.java,v 1.31.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.io.Serializable;
+
+import com.sleepycat.je.latch.LatchStats;
+
+/**
+ * Lock statistics for a database environment.
+ *
+ * <p> Note that some of the lock statistics may be expensive to obtain because
+ * the lock table is unavailable while the statistics are gathered. These
+ * expensive statistics are only provided if {@link
+ * com.sleepycat.je.Environment#getLockStats Environment.getLockStats} is
+ * called with a StatsConfig parameter that has been configured for "slow"
+ * stats.
+ */
+public class LockStats implements Serializable {
+
+    /**
+     * Total locks currently in lock table.
+     */
+    private int nTotalLocks;
+
+    /**
+     * Total read locks currently held.
+     */
+    private int nReadLocks;
+
+    /**
+     * Total write locks currently held.
+     */
+    private int nWriteLocks;
+
+    /**
+     * Total transactions waiting for locks.
+     */
+    private int nWaiters;
+
+    /**
+     * Total lock owners in lock table.
+     */
+    private int nOwners;
+
+    /**
+     * Number of times a lock request was made.
+     */
+    private long nRequests;
+
+    /**
+     * Number of times a lock request blocked.
+     */
+    private long nWaits;
+
+    /**
+     * LockTable latch stats.
+     */
+    private LatchStats lockTableLatchStats;
+
+    /**
+     * Total lock owners in lock table.  Only provided when {@link
+     * com.sleepycat.je.Environment#getLockStats Environment.getLockStats} is
+     * called in "slow" mode.
+     */
+    public int getNOwners() {
+        return nOwners;
+    }
+
+    /**
+     * Total read locks currently held.  Only provided when {@link
+     * com.sleepycat.je.Environment#getLockStats Environment.getLockStats} is
+     * called in "slow" mode.
+     */
+    public int getNReadLocks() {
+        return nReadLocks;
+    }
+
+    /**
+     * Total locks currently in lock table.  Only provided when {@link
+     * com.sleepycat.je.Environment#getLockStats Environment.getLockStats} is
+     * called in "slow" mode.
+     */
+    public int getNTotalLocks() {
+        return nTotalLocks;
+    }
+
+    /**
+     * Total transactions waiting for locks.  Only provided when {@link
+     * com.sleepycat.je.Environment#getLockStats Environment.getLockStats} is
+     * called in "slow" mode.
+     */
+    public int getNWaiters() {
+        return nWaiters;
+    }
+
+    /**
+     * Total write locks currently held.  Only provided when {@link
+     * com.sleepycat.je.Environment#getLockStats Environment.getLockStats} is
+     * called in "slow" mode.
+     */
+    public int getNWriteLocks() {
+        return nWriteLocks;
+    }
+
+    /**
+     * Total number of lock requests to date.
+     */
+    public long getNRequests() {
+        return nRequests;
+    }
+
+    /**
+     * Total number of lock waits to date.
+     */
+    public long getNWaits() {
+        return nWaits;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNOwners(int val) {
+        nOwners = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNReadLocks(int val) {
+        nReadLocks = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void accumulateNTotalLocks(int val) {
+        nTotalLocks += val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNWaiters(int val) {
+        nWaiters = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNWriteLocks(int val) {
+        nWriteLocks = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNRequests(long requests) {
+        this.nRequests = requests;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNWaits(long waits) {
+        this.nWaits = waits;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void accumulateLockTableLatchStats(LatchStats latchStats) {
+	if (lockTableLatchStats == null) {
+	    lockTableLatchStats = latchStats;
+	    return;
+	}
+
+        lockTableLatchStats.nAcquiresNoWaiters +=
+	    latchStats.nAcquiresNoWaiters;
+        lockTableLatchStats.nAcquiresSelfOwned +=
+	    latchStats.nAcquiresSelfOwned;
+        lockTableLatchStats.nAcquiresUpgrade +=
+	    latchStats.nAcquiresUpgrade;
+        lockTableLatchStats.nAcquiresWithContention +=
+	    latchStats.nAcquiresWithContention;
+        lockTableLatchStats.nAcquireNoWaitSuccessful +=
+	    latchStats.nAcquireNoWaitSuccessful;
+        lockTableLatchStats.nAcquireNoWaitUnsuccessful +=
+	    latchStats.nAcquireNoWaitUnsuccessful;
+        lockTableLatchStats.nAcquireSharedSuccessful +=
+	    latchStats.nAcquireSharedSuccessful;
+    }
+
+    /**
+     * For convenience, the LockStats class has a toString method that lists
+     * all the data fields.
+     */
+    @Override
+    public String toString() {
+        StringBuffer sb = new StringBuffer();
+
+        sb.append("\nFast mode stats (always available)\n");
+        sb.append("nRequests=").append(nRequests).append('\n');
+        sb.append("nWaits=").append(nWaits).append('\n');
+
+        sb.append("\nSlow mode stats (not available in fast mode)\n");
+        sb.append("nTotalLocks=").append(nTotalLocks).append('\n');
+        sb.append("nReadLocks=").append(nReadLocks).append('\n');
+        sb.append("nWriteLocks=").append(nWriteLocks).append('\n');
+        sb.append("nWaiters=").append(nWaiters).append('\n');
+        sb.append("nOwners=").append(nOwners).append('\n');
+        sb.append("lockTableLatch:\n").append(lockTableLatchStats);
+        return sb.toString();
+    }
+}
diff --git a/src/com/sleepycat/je/LogScanConfig.java b/src/com/sleepycat/je/LogScanConfig.java
new file mode 100644
index 0000000000000000000000000000000000000000..8deb2a4e77f96bc24f074ef00de23853ac5d9cb9
--- /dev/null
+++ b/src/com/sleepycat/je/LogScanConfig.java
@@ -0,0 +1,55 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LogScanConfig.java,v 1.7.2.3 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+/**
+ * For internal use only.
+ * @hidden
+ * Specify the attributes of a log scan.
+ */
+public class LogScanConfig {
+
+    private boolean forwards = true;
+
+    /**
+     * An instance created using the default constructor is initialized with
+     * the system's default settings.
+     */
+    public LogScanConfig() {
+    }
+
+    /**
+     * Configure {@link Environment#scanLog} to scan forwards through the log.
+     * <p>
+     * @param forwards If true, configure {@link Environment#scanLog} to scan
+     * forwards through the log.  The default is true.
+     */
+    public void setForwards(boolean forwards) {
+        this.forwards = forwards;
+    }
+
+    /**
+     * If true is returned, {@link Environment#scanLog} is configured to scan
+     * forwards.
+     */
+    public boolean getForwards() {
+        return forwards;
+    }
+
+    /**
+     * Returns the values for each configuration attribute.
+     *
+     * @return the values for each configuration attribute.
+     */
+    @Override
+    public String toString() {
+        return "forwards=" + forwards +
+            "\n";
+    }
+}
diff --git a/src/com/sleepycat/je/LogScanner.java b/src/com/sleepycat/je/LogScanner.java
new file mode 100644
index 0000000000000000000000000000000000000000..a5b7c258c405ce7e2565135ac4c6572428d4edaa
--- /dev/null
+++ b/src/com/sleepycat/je/LogScanner.java
@@ -0,0 +1,72 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LogScanner.java,v 1.3.2.3 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+/**
+ * For internal use only.
+ * @hidden
+ * Used with Environment.scanLog to scan raw log entries.  An instance of a
+ * class that implements this interface should be passed to
+ * Environment.scanLog.
+ *
+ * <p><em>WARNING:</em> This interface is meant for low level processing of log
+ * records, not for application level queries.  See the following
+ * restrictions!</p>
+ *
+ * <p>Please be aware that raw log entries are passed to the scanRecord method.
+ * It is the responsibility of the caller to ensure that these records
+ * represent valid records in a Database by performing queries such as
+ * Database.get.  Be sure to take into account the following information about
+ * records that are passed to the scanRecord method:</p>
+ * 
+ * <ul> <li>Records may be part of transactions that were aborted or never
+ * committed, as well as part of committed transactions or not part of any
+ * transaction (written non-transactionally).</li>
+ * 
+ * <li>Records may be part of the interval specified when calling scanLog
+ * because they were written by the log cleaner as it migrates records forward,
+ * and not written by the application itself in the specified interval.</li>
+ * 
+ * <li>For a given record, there may be multiple versions of the record passed
+ * to scanRecord because multiple versions of that record were written to the
+ * log.</li>
+ * 
+ * <li>For a given record, a deleted version of the record may appear before or
+ * after a non-deleted version of the record.</li>
+ *
+ * <li>The cleaner must be disabled while this method is running.</li>
+ *
+ * <li>This method should only be invoked immediately after recovery and prior
+ * to any updates to the Environment.</li> </ul>
+ */
+
+public interface LogScanner {
+
+    /**
+     * Called for each record scanned.
+     *
+     * @param key is the key entry of the record scanned.  This parameter will
+     * not be null.
+     *
+     * @param data is the data entry of the record scanned. This parameter may
+     * be null for deleted records.
+     *
+     * @param deleted is true if the given record is deleted at this point in
+     * the log.  Note that it may also appear earlier or later in the log.
+     *
+     * @param databaseName the name of the database in which the record
+     * appears.  Note that if the database was renamed, this is the last known
+     * name of the database at the time scanLog is called.
+     */
+    public boolean scanRecord(DatabaseEntry key,
+                              DatabaseEntry data,
+                              boolean deleted,
+                              String databaseName);
+}
+
diff --git a/src/com/sleepycat/je/OperationStatus.java b/src/com/sleepycat/je/OperationStatus.java
new file mode 100644
index 0000000000000000000000000000000000000000..5d283c48f53db2550de617d5daa0d7d40533b95f
--- /dev/null
+++ b/src/com/sleepycat/je/OperationStatus.java
@@ -0,0 +1,54 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: OperationStatus.java,v 1.15.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+/**
+ * Status values from database operations.
+ */
+public class OperationStatus {
+
+    /**
+     * The operation was successful.
+     */
+    public static final OperationStatus SUCCESS =
+	new OperationStatus("SUCCESS");
+
+    /**
+     * The operation to insert data was configured to not allow overwrite and
+     * the key already exists in the database.
+     */
+    public static final OperationStatus KEYEXIST =
+	new OperationStatus("KEYEXIST");
+
+    /**
+     * The cursor operation was unsuccessful because the current record was
+     * deleted.
+     */
+    public static final OperationStatus KEYEMPTY =
+	new OperationStatus("KEYEMPTY");
+
+    /**
+     * The requested key/data pair was not found.
+     */
+    public static final OperationStatus NOTFOUND =
+	new OperationStatus("NOTFOUND");
+
+    /* For toString. */
+    private String statusName;
+
+    private OperationStatus(String statusName) {
+	this.statusName = statusName;
+    }
+
+    /** {@inheritDoc} */
+    @Override
+    public String toString() {
+	return "OperationStatus." + statusName;
+    }
+}
diff --git a/src/com/sleepycat/je/PreloadConfig.java b/src/com/sleepycat/je/PreloadConfig.java
new file mode 100644
index 0000000000000000000000000000000000000000..24f9825dd3f670e903cd833cd1dac2ed25ca74ef
--- /dev/null
+++ b/src/com/sleepycat/je/PreloadConfig.java
@@ -0,0 +1,120 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PreloadConfig.java,v 1.9.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+/**
+ * Specifies the attributes of an application invoked preload operation.
+ */
+public class PreloadConfig implements Cloneable {
+
+    private long maxBytes;
+    private long maxMillisecs;
+    private boolean loadLNs;
+
+    /**
+     * Default configuration used if null is passed to {@link
+     * com.sleepycat.je.Database#preload Database.preload}.
+     */
+    public PreloadConfig() {
+    }
+
+    /**
+     * Configure the maximum number of bytes to preload.
+     *
+     * <p>The default is 0 for this class.</p>
+     *
+     * @param maxBytes If the maxBytes parameter is non-zero, a preload will
+     * stop when the cache contains this number of bytes.
+     */
+    public void setMaxBytes(long maxBytes) {
+	this.maxBytes = maxBytes;
+    }
+
+    /**
+     * Return the number of bytes in the cache to stop the preload at.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @return The number of bytes in the cache to stop the preload at.
+     */
+    public long getMaxBytes() {
+        return maxBytes;
+    }
+
+    /**
+     * Configure the maximum number of milliseconds to execute preload.
+     *
+     * <p>The default is 0 for this class.</p>
+     *
+     * @param maxMillisecs If the maxMillisecs parameter is non-zero, a preload
+     * will stop when this amount of time has passed.
+     */
+    public void setMaxMillisecs(long maxMillisecs) {
+	this.maxMillisecs = maxMillisecs;
+    }
+
+    /**
+     * Return the number of millisecs to stop the preload after.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @return The number of millisecs to stop the preload after.
+     */
+    public long getMaxMillisecs() {
+        return maxMillisecs;
+    }
+
+    /**
+     * Configure the preload load LNs option.
+     *
+     * <p>The default is false for this class.</p>
+     *
+     * @param loadLNs If set to true, the preload will load Leaf Nodes (LNs)
+     * containing the data values.
+     */
+    public void setLoadLNs(boolean loadLNs) {
+	this.loadLNs = loadLNs;
+    }
+
+    /**
+     * Return the configuration of the preload load LNs option.
+     *
+     * @return The configuration of the preload load LNs option.
+     */
+    public boolean getLoadLNs() {
+        return loadLNs;
+    }
+
+    /**
+     * Used by Database to create a copy of the application supplied
+     * configuration. Done this way to provide non-public cloning.
+     */
+    DatabaseConfig cloneConfig() {
+        try {
+            return (DatabaseConfig) super.clone();
+        } catch (CloneNotSupportedException willNeverOccur) {
+            return null;
+        }
+    }
+
+    /**
+     * Returns the values for each configuration attribute.
+     *
+     * @return the values for each configuration attribute.
+     */
+    @Override
+    public String toString() {
+        return "maxBytes=" + maxBytes +
+            "\nmaxMillisecs=" + maxMillisecs +
+            "\nloadLNs=" + loadLNs +
+            "\n";
+    }
+}
diff --git a/src/com/sleepycat/je/PreloadStats.java b/src/com/sleepycat/je/PreloadStats.java
new file mode 100644
index 0000000000000000000000000000000000000000..d645474a4b1ef2292ab3468b02a07f2dd246f88b
--- /dev/null
+++ b/src/com/sleepycat/je/PreloadStats.java
@@ -0,0 +1,229 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PreloadStats.java,v 1.10.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.io.Serializable;
+
+/**
+ * Statistics returned from {@link com.sleepycat.je.Database#preload 
+ * Database.preload}
+ */
+public class PreloadStats implements Serializable {
+
+    /**
+     * The number of INs loaded during the preload() operation.
+     */
+    private int nINsLoaded;
+
+    /**
+     * The number of BINs loaded during the preload() operation.
+     */
+    private int nBINsLoaded;
+
+    /**
+     * The number of LNs loaded during the preload() operation.
+     */
+    private int nLNsLoaded;
+
+    /**
+     * The number of DINs loaded during the preload() operation.
+     */
+    private int nDINsLoaded;
+
+    /**
+     * The number of DBINs loaded during the preload() operation.
+     */
+    private int nDBINsLoaded;
+
+    /**
+     * The number of DupCountLNs loaded during the preload() operation.
+     */
+    private int nDupCountLNsLoaded;
+
+    /**
+     * The status of the preload() operation.
+     */
+    private PreloadStatus status;
+
+    PreloadStats(int nINsLoaded,
+                 int nBINsLoaded,
+                 int nLNsLoaded,
+                 int nDINsLoaded,
+                 int nDBINsLoaded,
+                 int nDupCountLNsLoaded,
+                 PreloadStatus status) {
+
+        this.nINsLoaded = nINsLoaded;
+        this.nBINsLoaded = nBINsLoaded;
+        this.nLNsLoaded = nLNsLoaded;
+        this.nDINsLoaded = nDINsLoaded;
+        this.nDBINsLoaded = nDBINsLoaded;
+        this.nDupCountLNsLoaded = nDupCountLNsLoaded;
+        this.status = status;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public PreloadStats() {
+        reset();
+    }
+
+    /**
+     * Resets all stats.
+     */
+    private void reset() {
+	nINsLoaded = 0;
+	nBINsLoaded = 0;
+	nLNsLoaded = 0;
+	nDINsLoaded = 0;
+	nDBINsLoaded = 0;
+	nDupCountLNsLoaded = 0;
+	status = PreloadStatus.SUCCESS;
+    }
+
+    /**
+     * Returns the number of INs that were loaded into the cache during the
+     * preload() operation.
+     */
+    public int getNINsLoaded() {
+        return nINsLoaded;
+    }
+
+    /**
+     * Returns the number of BINs that were loaded into the cache during the
+     * preload() operation.
+     */
+    public int getNBINsLoaded() {
+        return nBINsLoaded;
+    }
+
+    /**
+     * Returns the number of LNs that were loaded into the cache during the
+     * preload() operation.
+     */
+    public int getNLNsLoaded() {
+        return nLNsLoaded;
+    }
+
+    /**
+     * Returns the number of DINs that were loaded into the cache during the
+     * preload() operation.
+     */
+    public int getNDINsLoaded() {
+        return nDINsLoaded;
+    }
+
+    /**
+     * Returns the number of DBINs that were loaded into the cache during the
+     * preload() operation.
+     */
+    public int getNDBINsLoaded() {
+        return nDBINsLoaded;
+    }
+
+    /**
+     * Returns the number of DupCountLNs that were loaded into the cache during
+     * the preload() operation.
+     */
+    public int getNDupCountLNsLoaded() {
+        return nDupCountLNsLoaded;
+    }
+
+    /**
+     * Returns the PreloadStatus value for the preload() operation.
+     */
+    public PreloadStatus getStatus() {
+        return status;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void incINsLoaded() {
+        this.nINsLoaded++;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void incBINsLoaded() {
+        this.nBINsLoaded++;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void incLNsLoaded() {
+        this.nLNsLoaded++;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void addLNsLoaded(int newLNs ) {
+        this.nLNsLoaded+=newLNs;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void incDINsLoaded() {
+        this.nDINsLoaded++;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void incDBINsLoaded() {
+        this.nDBINsLoaded++;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void incDupCountLNsLoaded() {
+        this.nDupCountLNsLoaded++;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setStatus(PreloadStatus status) {
+        this.status = status;
+    }
+
+    /**
+     * Returns a String representation of the stats in the form of
+     * &lt;stat&gt;=&lt;value&gt;
+     */
+    @Override
+    public String toString() {
+        StringBuffer sb = new StringBuffer();
+	sb.append("status=").append(status).append('\n');
+	sb.append("nINsLoaded=").append(nINsLoaded).append('\n');
+	sb.append("nBINsLoaded=").append(nBINsLoaded).append('\n');
+	sb.append("nLNsLoaded=").append(nLNsLoaded).append('\n');
+	sb.append("nDINsLoaded=").append(nDINsLoaded).append('\n');
+	sb.append("nDBINsLoaded=").append(nDBINsLoaded).append('\n');
+	sb.append("nDupCountLNsLoaded=").append(nDupCountLNsLoaded).
+	    append('\n');
+
+        return sb.toString();
+    }
+}
diff --git a/src/com/sleepycat/je/PreloadStatus.java b/src/com/sleepycat/je/PreloadStatus.java
new file mode 100644
index 0000000000000000000000000000000000000000..7f40400ca06067e5fbff374ade78425f323147e8
--- /dev/null
+++ b/src/com/sleepycat/je/PreloadStatus.java
@@ -0,0 +1,51 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PreloadStatus.java,v 1.11.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.io.Serializable;
+
+/**
+ * Describes the result of the {@link com.sleepycat.je.Database#preload
+ * Database.preload} operation.
+ */
+public class PreloadStatus implements Serializable {
+
+	/* For toString. */
+    private String statusName;
+
+    private PreloadStatus(String statusName) {
+	this.statusName = statusName;
+    }
+
+    @Override
+    public String toString() {
+	return "PreloadStatus." + statusName;
+    }
+
+    /**
+     * {@link com.sleepycat.je.Database#preload Database.preload} 
+     * was successful.
+     */
+    public static final PreloadStatus SUCCESS =
+	new PreloadStatus("SUCCESS");
+
+    /**
+     * {@link com.sleepycat.je.Database#preload Database.preload} 
+     * filled maxBytes of the cache.
+     */
+    public static final PreloadStatus FILLED_CACHE =
+	new PreloadStatus("FILLED_CACHE");
+
+    /**
+     * {@link com.sleepycat.je.Database#preload Database.preload} 
+     * took more than maxMillisecs.
+     */
+    public static final PreloadStatus EXCEEDED_TIME =
+	new PreloadStatus("EXCEEDED_TIME");
+}
diff --git a/src/com/sleepycat/je/ReplicaConsistencyPolicy.java b/src/com/sleepycat/je/ReplicaConsistencyPolicy.java
new file mode 100644
index 0000000000000000000000000000000000000000..730d7a9aa181ff1221e7a17fdf59185c37270ce9
--- /dev/null
+++ b/src/com/sleepycat/je/ReplicaConsistencyPolicy.java
@@ -0,0 +1,31 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ReplicaConsistencyPolicy.java,v 1.3.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import com.sleepycat.je.dbi.ReplicatorInstance;
+
+/**
+ * @hidden
+ * Feature not yet available.
+ *
+ * The interface for Consistency policies used to provide consistency
+ * guarantees at a Replica. A transaction initiated at a replica will wait in
+ * the Environment.beginTransaction method until the required consistency
+ * policy is satisfied.
+ */
+public interface ReplicaConsistencyPolicy {
+
+    /**
+     * Ensures that the replica is within the constraints specified by this
+     * policy. If it isn't the method waits until the constraint is satisfied
+     * by the replica.
+     */
+    public void ensureConsistency(ReplicatorInstance repInstance)
+        throws InterruptedException, DatabaseException;
+}
diff --git a/src/com/sleepycat/je/RunRecoveryException.java b/src/com/sleepycat/je/RunRecoveryException.java
new file mode 100644
index 0000000000000000000000000000000000000000..a40040ae56607ec15497f12e50ad08f663942714
--- /dev/null
+++ b/src/com/sleepycat/je/RunRecoveryException.java
@@ -0,0 +1,74 @@
+/*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RunRecoveryException.java,v 1.25.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import com.sleepycat.je.dbi.EnvironmentImpl;
+
+/**
+ * Thrown when the JE environment has encountered an exception or a 
+ * resource shortfall and cannot continue on safely. The Environment will
+ * no longer permit any operations and the application must be reinstantiated 
+ * the Environment.
+ */
+public class RunRecoveryException extends DatabaseException {
+
+    private boolean alreadyThrown = false;
+
+    RunRecoveryException() {
+	super();
+    }
+
+    public RunRecoveryException(EnvironmentImpl env) {
+        super();
+        invalidate(env);
+    }
+
+    public RunRecoveryException(EnvironmentImpl env, Throwable t) {
+        super(t);
+        invalidate(env);
+    }
+
+    public RunRecoveryException(EnvironmentImpl env, String message) {
+        super(message);
+        invalidate(env);
+    }
+
+    public RunRecoveryException(EnvironmentImpl env,
+                                String message,
+				Throwable t) {
+        super(message, t);
+        invalidate(env);
+    }
+
+    private void invalidate(EnvironmentImpl env) {
+	if (env != null) {
+	    env.invalidate(this);
+	}
+    }
+
+    /**
+     * @hidden
+     * Remember that this was already thrown. That way, if we re-throw it
+     * because another handle uses the environment after the fatal throw, the
+     * message is more clear.
+     */
+    public void setAlreadyThrown(boolean alreadyThrown) {
+        this.alreadyThrown = alreadyThrown;
+    }
+
+    @Override
+    public String toString() {
+        if (alreadyThrown) {
+            return "Environment invalid because of previous exception: " +
+		super.toString();
+        } else {
+            return super.toString();
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/SecondaryConfig.java b/src/com/sleepycat/je/SecondaryConfig.java
new file mode 100644
index 0000000000000000000000000000000000000000..86f150ef7dca69cd2d1a9dcb9752fea2906da663
--- /dev/null
+++ b/src/com/sleepycat/je/SecondaryConfig.java
@@ -0,0 +1,486 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SecondaryConfig.java,v 1.25.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import com.sleepycat.je.utilint.DatabaseUtil;
+
+/**
+ * The configuration properties of a <code>SecondaryDatabase</code> extend
+ * those of a primary <code>Database</code>. The secondary database
+ * configuration is specified when calling {@link
+ * Environment#openSecondaryDatabase Environment.openSecondaryDatabase}.
+ *
+ * <p>To create a configuration object with default attributes:</p>
+ *
+ * <pre>
+ *     SecondaryConfig config = new SecondaryConfig();
+ * </pre>
+ *
+ * <p>To set custom attributes:</p>
+ *
+ * <pre>
+ *     SecondaryConfig config = new SecondaryConfig();
+ *     config.setAllowCreate(true);
+ *     config.setSortedDuplicates(true);
+ *     config.setKeyCreator(new MyKeyCreator());
+ * </pre>
+ * <p>
+ * <hr>
+ * <p>NOTE: There are two situations where the use of secondary databases
+ * without transactions requires special consideration.  When using a
+ * transactional database or when doing read operations only, this note does
+ * not apply.
+ *
+ * <ul> <li>If secondary is configured to not allow duplicates, when the
+ * secondary is being updated it is possible that an error will occur when the
+ * secondary key value in a record being added is already present in the
+ * database.  A <code>DatabaseException</code> will be thrown in this
+ * situation.</li>
+ *
+ * <li>If a foreign key constraint is configured with the delete action
+ * <code>ABORT</code> (the default setting), a <code>DatabaseException</code>
+ * will be thrown if an attempt is made to delete a referenced foreign
+ * key.</li> </ul>
+ *
+ * <p>In both cases, the operation will be partially complete because the
+ * primary database record will have already been updated or deleted.  In the
+ * presence of transactions, the exception will cause the transaction to abort.
+ * Without transactions, it is the responsibility of the caller to handle the
+ * results of the incomplete update or to take steps to prevent this situation
+ * from happening in the first place.</p>
+ *
+ * </hr>
+ *
+ * @see Environment#openSecondaryDatabase
+ * Environment.openSecondaryDatabase @see SecondaryDatabase
+ */
+public class SecondaryConfig extends DatabaseConfig {
+
+    /*
+     * For internal use, to allow null as a valid value for the config
+     * parameter.
+     */
+    public static final SecondaryConfig DEFAULT = new SecondaryConfig();
+
+    private boolean allowPopulate;
+    private SecondaryKeyCreator keyCreator;
+    private SecondaryMultiKeyCreator multiKeyCreator;
+    private Database foreignKeyDatabase;
+    private ForeignKeyDeleteAction foreignKeyDeleteAction =
+            ForeignKeyDeleteAction.ABORT;
+    private ForeignKeyNullifier foreignKeyNullifier;
+    private ForeignMultiKeyNullifier foreignMultiKeyNullifier;
+    private boolean immutableSecondaryKey;
+
+    /**
+     * Creates an instance with the system's default settings.
+     */
+    public SecondaryConfig() {
+    }
+
+    /**
+     * Specifies the user-supplied object used for creating single-valued
+     * secondary keys.
+     *
+     * <p>Unless the primary database is read-only, a key creator is required
+     * when opening a secondary database.  Either a KeyCreator or
+     * MultiKeyCreator must be specified, but both may not be specified.</p>
+     *
+     * <p>Unless the primary database is read-only, a key creator is required
+     * when opening a secondary database.</p>
+     *
+     * <p><em>WARNING:</em> Key creator instances are shared by multiple
+     * threads and key creator methods are called without any special
+     * synchronization.  Therefore, key creators must be thread safe.  In
+     * general no shared state should be used and any caching of computed
+     * values must be done with proper synchronization.</p>
+     *
+     * @param keyCreator the user-supplied object used for creating
+     * single-valued secondary keys.
+     */
+    public void setKeyCreator(SecondaryKeyCreator keyCreator) {
+        this.keyCreator = keyCreator;
+    }
+
+    /**
+     * Returns the user-supplied object used for creating single-valued
+     * secondary keys.
+     *
+     * @return the user-supplied object used for creating single-valued
+     * secondary keys.
+     *
+     * @see #setKeyCreator
+     */
+    public SecondaryKeyCreator getKeyCreator() {
+        return keyCreator;
+    }
+
+    /**
+     * Specifies the user-supplied object used for creating multi-valued
+     * secondary keys.
+     *
+     * <p>Unless the primary database is read-only, a key creator is required
+     * when opening a secondary database.  Either a KeyCreator or
+     * MultiKeyCreator must be specified, but both may not be specified.</p>
+     *
+     * <p><em>WARNING:</em> Key creator instances are shared by multiple
+     * threads and key creator methods are called without any special
+     * synchronization.  Therefore, key creators must be thread safe.  In
+     * general no shared state should be used and any caching of computed
+     * values must be done with proper synchronization.</p>
+     *
+     * @param multiKeyCreator the user-supplied object used for creating
+     * multi-valued secondary keys.
+     */
+    public void setMultiKeyCreator(SecondaryMultiKeyCreator multiKeyCreator) {
+        this.multiKeyCreator = multiKeyCreator;
+    }
+
+    /**
+     * Returns the user-supplied object used for creating multi-valued
+     * secondary keys.
+     *
+     * @return the user-supplied object used for creating multi-valued
+     * secondary keys.
+     *
+     * @see #setKeyCreator
+     */
+    public SecondaryMultiKeyCreator getMultiKeyCreator() {
+        return multiKeyCreator;
+    }
+
+    /**
+     * Specifies whether automatic population of the secondary is allowed.
+     *
+     * <p>If automatic population is allowed, when the secondary database is
+     * opened it is checked to see if it is empty.  If it is empty, the primary
+     * database is read in its entirety and keys are added to the secondary
+     * database using the information read from the primary.</p>
+     *
+     * <p>If this property is set to true and the database is transactional,
+     * the population of the secondary will be done within the explicit or
+     * auto-commit transaction that is used to open the database.</p>
+     *
+     * @param allowPopulate whether automatic population of the secondary is
+     * allowed.
+     */
+    public void setAllowPopulate(boolean allowPopulate) {
+        this.allowPopulate = allowPopulate;
+    }
+
+    /**
+     * Returns whether automatic population of the secondary is allowed.  If
+     * {@link #setAllowPopulate} has not been called, this method returns
+     * false.
+     *
+     * @return whether automatic population of the secondary is allowed.
+     *
+     * @see #setAllowPopulate
+     */
+    public boolean getAllowPopulate() {
+        return allowPopulate;
+    }
+
+    /**
+     * Defines a foreign key integrity constraint for a given foreign key
+     * database.
+     *
+     * <p>If this property is non-null, a record must be present in the
+     * specified foreign database for every record in the secondary database,
+     * where the secondary key value is equal to the foreign database key
+     * value. Whenever a record is to be added to the secondary database, the
+     * secondary key is used as a lookup key in the foreign database.  If the
+     * key is not found in the foreign database, a
+     * <code>DatabaseException</code> is thrown.</p>
+     *
+     * <p>The foreign database must not have duplicates allowed.  If duplicates
+     * are allowed, an IllegalArgumentException will be thrown when the
+     * secondary database is opened.</p>
+     *
+     * @param foreignKeyDatabase the database used to check the foreign key
+     * integrity constraint, or null if no foreign key constraint should be
+     * checked.
+     */
+    public void setForeignKeyDatabase(Database foreignKeyDatabase) {
+        this.foreignKeyDatabase = foreignKeyDatabase;
+    }
+
+    /**
+     * Returns the database used to check the foreign key integrity constraint,
+     * or null if no foreign key constraint will be checked.
+     *
+     * @return the foreign key database, or null.
+     *
+     * @see #setForeignKeyDatabase
+     */
+    public Database getForeignKeyDatabase() {
+        return foreignKeyDatabase;
+    }
+
+    /**
+     * Specifies the action taken when a referenced record in the foreign key
+     * database is deleted.
+     *
+     * <p>This property is ignored if the foreign key database property is
+     * null.</p>
+     *
+     * @param foreignKeyDeleteAction the action taken when a referenced record
+     * in the foreign key database is deleted.
+     *
+     * @see ForeignKeyDeleteAction @see #setForeignKeyDatabase
+     */
+    public void setForeignKeyDeleteAction(ForeignKeyDeleteAction
+                                          foreignKeyDeleteAction) {
+        DatabaseUtil.checkForNullParam(foreignKeyDeleteAction,
+                                       "foreignKeyDeleteAction");
+        this.foreignKeyDeleteAction = foreignKeyDeleteAction;
+    }
+
+    /**
+     * Returns the action taken when a referenced record in the foreign key
+     * database is deleted.
+     *
+     * @return the action taken when a referenced record in the foreign key
+     * database is deleted.
+     *
+     * @see #setForeignKeyDeleteAction
+     */
+    public ForeignKeyDeleteAction getForeignKeyDeleteAction() {
+        return foreignKeyDeleteAction;
+    }
+
+    /**
+     * Specifies the user-supplied object used for setting single-valued
+     * foreign keys to null.
+     *
+     * <p>This method may <em>not</em> be used along with {@link
+     * #setMultiKeyCreator}.  When using a multi-key creator, use {@link
+     * #setForeignMultiKeyNullifier} instead.</p>
+     *
+     * <p>If the foreign key database property is non-null and the foreign key
+     * delete action is <code>NULLIFY</code>, this property is required to be
+     * non-null; otherwise, this property is ignored.</p>
+     *
+     * <p><em>WARNING:</em> Key nullifier instances are shared by multiple
+     * threads and key nullifier methods are called without any special
+     * synchronization.  Therefore, key creators must be thread safe.  In
+     * general no shared state should be used and any caching of computed
+     * values must be done with proper synchronization.</p>
+     *
+     * @param foreignKeyNullifier the user-supplied object used for setting
+     * single-valued foreign keys to null.
+     *
+     * @see ForeignKeyNullifier @see ForeignKeyDeleteAction#NULLIFY @see
+     * #setForeignKeyDatabase
+     */
+    public void setForeignKeyNullifier(ForeignKeyNullifier
+                                       foreignKeyNullifier) {
+        this.foreignKeyNullifier = foreignKeyNullifier;
+    }
+
+    /**
+     * Returns the user-supplied object used for setting single-valued foreign
+     * keys to null.
+     *
+     * @return the user-supplied object used for setting single-valued foreign
+     * keys to null.
+     *
+     * @see #setForeignKeyNullifier
+     */
+    public ForeignKeyNullifier getForeignKeyNullifier() {
+        return foreignKeyNullifier;
+    }
+
+    /**
+     * Specifies the user-supplied object used for setting multi-valued foreign
+     * keys to null.
+     *
+     * <p>If the foreign key database property is non-null and the foreign key
+     * delete action is <code>NULLIFY</code>, this property is required to be
+     * non-null; otherwise, this property is ignored.</p>
+     *
+     * <p><em>WARNING:</em> Key nullifier instances are shared by multiple
+     * threads and key nullifier methods are called without any special
+     * synchronization.  Therefore, key creators must be thread safe.  In
+     * general no shared state should be used and any caching of computed
+     * values must be done with proper synchronization.</p>
+     *
+     * @param foreignMultiKeyNullifier the user-supplied object used for
+     * setting multi-valued foreign keys to null.
+     *
+     * @see ForeignMultiKeyNullifier @see ForeignKeyDeleteAction#NULLIFY @see
+     * #setForeignKeyDatabase
+     */
+    public void setForeignMultiKeyNullifier(ForeignMultiKeyNullifier
+                                            foreignMultiKeyNullifier) {
+        this.foreignMultiKeyNullifier = foreignMultiKeyNullifier;
+    }
+
+    /**
+     * Returns the user-supplied object used for setting multi-valued foreign
+     * keys to null.
+     *
+     * @return the user-supplied object used for setting multi-valued foreign
+     * keys to null.
+     *
+     * @see #setForeignMultiKeyNullifier
+     */
+    public ForeignMultiKeyNullifier getForeignMultiKeyNullifier() {
+        return foreignMultiKeyNullifier;
+    }
+
+    /**
+     * Specifies whether the secondary key is immutable.
+     *
+     * <p>Specifying that a secondary key is immutable can be used to optimize
+     * updates when the secondary key in a primary record will never be changed
+     * after that primary record is inserted.  For immutable secondary keys, a
+     * best effort is made to avoid calling
+     * <code>SecondaryKeyCreator.createSecondaryKey</code> when a primary
+     * record is updated.  This optimization may reduce the overhead of an
+     * update operation significantly if the <code>createSecondaryKey</code>
+     * operation is expensive.</p>
+     *
+     * <p>Be sure to set this property to true only if the secondary key in the
+     * primary record is never changed.  If this rule is violated, the
+     * secondary index will become corrupted, that is, it will become out of
+     * sync with the primary.</p>
+     *
+     * @param immutableSecondaryKey whether the secondary key is immutable.
+     */
+    public void setImmutableSecondaryKey(boolean immutableSecondaryKey) {
+        this.immutableSecondaryKey = immutableSecondaryKey;
+    }
+
+    /**
+     * Returns whether the secondary key is immutable.  If {@link
+     * #setImmutableSecondaryKey} has not been called, this method returns
+     * false.
+     *
+     * @return whether the secondary key is immutable.
+     *
+     * @see #setImmutableSecondaryKey
+     */
+    public boolean getImmutableSecondaryKey() {
+        return immutableSecondaryKey;
+    }
+
+    /*
+     * For JCA Database handle caching.
+     */
+    @Override
+    void validate(DatabaseConfig configArg)
+	throws DatabaseException {
+
+	super.validate(configArg);
+
+	if (configArg == null ||
+	    !(configArg instanceof SecondaryConfig)) {
+	    throw new DatabaseException
+		("The SecondaryConfig argument is null.");
+	}
+
+	SecondaryConfig config = (SecondaryConfig) configArg;
+
+	boolean kcMatch = equalOrBothNull
+            (config.getKeyCreator(), keyCreator);
+	boolean mkcMatch = equalOrBothNull
+            (config.getMultiKeyCreator(), multiKeyCreator);
+	boolean fkdMatch =
+	    (config.getForeignKeyDatabase() == foreignKeyDatabase);
+	boolean fkdaMatch =
+	    (config.getForeignKeyDeleteAction() == foreignKeyDeleteAction);
+	boolean fknMatch = equalOrBothNull
+	    (config.getForeignKeyNullifier(), foreignKeyNullifier);
+	boolean fmknMatch = equalOrBothNull
+	    (config.getForeignMultiKeyNullifier(), foreignMultiKeyNullifier);
+	boolean imskMatch =
+	    (config.getImmutableSecondaryKey() == immutableSecondaryKey);
+	if (kcMatch &&
+            mkcMatch &&
+	    fkdMatch &&
+	    fkdaMatch &&
+	    fknMatch &&
+	    fmknMatch &&
+	    imskMatch) {
+	    return;
+	}
+
+	String message =
+	    genSecondaryConfigMismatchMessage(
+                config, kcMatch, mkcMatch, fkdMatch, fkdaMatch,
+                fknMatch, fmknMatch, imskMatch);
+	throw new DatabaseException(message);
+    }
+
+    private boolean equalOrBothNull(Object o1, Object o2) {
+        return (o1 != null) ? o1.equals(o2) : (o2 == null);
+    }
+
+    String genSecondaryConfigMismatchMessage(DatabaseConfig config,
+					     boolean kcMatch,
+					     boolean mkcMatch,
+					     boolean fkdMatch,
+					     boolean fkdaMatch,
+					     boolean fknMatch,
+					     boolean fmknMatch,
+					     boolean imskMatch) {
+	StringBuffer ret = new StringBuffer
+	    ("The following SecondaryConfig parameters for the\n" +
+	     "cached Database do not match the parameters for the\n" +
+	     "requested Database:\n");
+	if (!kcMatch) {
+	    ret.append(" SecondaryKeyCreator\n");
+	}
+
+	if (!mkcMatch) {
+	    ret.append(" SecondaryMultiKeyCreator\n");
+	}
+
+	if (!fkdMatch) {
+	    ret.append(" ForeignKeyDelete\n");
+	}
+
+	if (!fkdaMatch) {
+	    ret.append(" ForeignKeyDeleteAction\n");
+	}
+
+	if (!fknMatch) {
+	    ret.append(" ForeignKeyNullifier\n");
+	}
+
+	if (!fknMatch) {
+	    ret.append(" ForeignMultiKeyNullifier\n");
+	}
+
+	if (!imskMatch) {
+	    ret.append(" ImmutableSecondaryKey\n");
+	}
+
+	return ret.toString();
+    }
+
+    /**
+     * Returns the values for each configuration attribute.
+     *
+     * @return the values for each configuration attribute.
+     */
+    @Override
+    public String toString() {
+        return "keyCreator=" + keyCreator +
+            "\nmultiKeyCreator=" + multiKeyCreator +
+            "\nallowPopulate=" + allowPopulate +
+            "\nforeignKeyDatabase=" + foreignKeyDatabase +
+            "\nforeignKeyDeleteAction=" + foreignKeyDeleteAction +
+            "\nforeignKeyNullifier=" + foreignKeyNullifier +
+            "\nforeignMultiKeyNullifier=" + foreignMultiKeyNullifier +
+            "\nimmutableSecondaryKey=" + immutableSecondaryKey +
+            "\n";
+    }
+}
diff --git a/src/com/sleepycat/je/SecondaryCursor.java b/src/com/sleepycat/je/SecondaryCursor.java
new file mode 100644
index 0000000000000000000000000000000000000000..181ee3109de160e2b0d3d6585478d32810c6dd4d
--- /dev/null
+++ b/src/com/sleepycat/je/SecondaryCursor.java
@@ -0,0 +1,1655 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SecondaryCursor.java,v 1.45.2.3 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.util.HashSet;
+import java.util.Set;
+import java.util.logging.Level;
+
+import com.sleepycat.je.dbi.CursorImpl;
+import com.sleepycat.je.dbi.GetMode;
+import com.sleepycat.je.dbi.CursorImpl.SearchMode;
+import com.sleepycat.je.log.LogUtils;
+import com.sleepycat.je.txn.Locker;
+import com.sleepycat.je.utilint.DatabaseUtil;
+
+/**
+ * A database cursor for a secondary database. Cursors are not thread safe and
+ * the application is responsible for coordinating any multithreaded access to
+ * a single cursor object.
+ *
+ * <p>Secondary cursors are returned by {@link SecondaryDatabase#openCursor
+ * SecondaryDatabase.openCursor} and {@link
+ * SecondaryDatabase#openSecondaryCursor
+ * SecondaryDatabase.openSecondaryCursor}.  The distinguishing characteristics
+ * of a secondary cursor are:</p>
+ *
+ * <ul> <li>Direct calls to <code>put()</code> methods on a secondary cursor
+ * are prohibited.
+ *
+ * <li>The {@link #delete} method of a secondary cursor will delete the primary
+ * record and as well as all its associated secondary records.
+ *
+ * <li>Calls to all get methods will return the data from the associated
+ * primary database.
+ *
+ * <li>Additional get method signatures are provided to return the primary key
+ * in an additional pKey parameter.
+ *
+ * <li>Calls to {@link #dup} will return a {@link SecondaryCursor}.
+ *
+ * <li>The {@link #dupSecondary} method is provided to return a {@link
+ * SecondaryCursor} that doesn't require casting.  </ul>
+ *
+ * <p>To obtain a secondary cursor with default attributes:</p>
+ *
+ * <blockquote><pre>
+ *     SecondaryCursor cursor = myDb.openSecondaryCursor(txn, null);
+ * </pre></blockquote>
+ *
+ * <p>To customize the attributes of a cursor, use a CursorConfig object.</p>
+ *
+ * <blockquote><pre>
+ *     CursorConfig config = new CursorConfig();
+ *     config.setDirtyRead(true);
+ *     SecondaryCursor cursor = myDb.openSecondaryCursor(txn, config);
+ * </pre></blockquote>
+ */
+public class SecondaryCursor extends Cursor {
+
+    private SecondaryDatabase secondaryDb;
+    private Database primaryDb;
+
+    /**
+     * Cursor constructor. Not public. To get a cursor, the user should call
+     * SecondaryDatabase.cursor();
+     */
+    SecondaryCursor(SecondaryDatabase dbHandle,
+                    Transaction txn,
+                    CursorConfig cursorConfig)
+        throws DatabaseException {
+
+        super(dbHandle, txn, cursorConfig);
+        secondaryDb = dbHandle;
+        primaryDb = dbHandle.getPrimaryDatabase();
+    }
+
+    /**
+     * Copy constructor.
+     */
+    private SecondaryCursor(SecondaryCursor cursor, boolean samePosition)
+        throws DatabaseException {
+
+        super(cursor, samePosition);
+        secondaryDb = cursor.secondaryDb;
+        primaryDb = cursor.primaryDb;
+    }
+
+    /**
+     * Returns the primary {@link com.sleepycat.je.Database Database}
+     * associated with this cursor.
+     *
+     * <p>Calling this method is the equivalent of the following
+     * expression:</p>
+     *
+     * <blockquote><pre>
+     *         ((SecondaryDatabase) this.getDatabase()).getPrimaryDatabase()
+     * </pre></blockquote>
+     *
+     * @return The primary {@link com.sleepycat.je.Database Database}
+     * associated with this cursor.
+     */
+    public Database getPrimaryDatabase() {
+	return primaryDb;
+    }
+
+    /**
+     * Returns a new <code>SecondaryCursor</code> for the same transaction as
+     * the original cursor.
+     */
+    @Override
+    public Cursor dup(boolean samePosition)
+        throws DatabaseException {
+
+        checkState(false);
+        return new SecondaryCursor(this, samePosition);
+    }
+
+    /**
+     * Returns a new copy of the cursor as a <code>SecondaryCursor</code>.
+     *
+     * <p>Calling this method is the equivalent of calling {@link #dup} and
+     * casting the result to {@link SecondaryCursor}.</p>
+     *
+     * @see #dup
+     */
+    public SecondaryCursor dupSecondary(boolean samePosition)
+        throws DatabaseException {
+
+        return (SecondaryCursor) dup(samePosition);
+    }
+
+    /**
+     * Delete the key/data pair to which the cursor refers from the primary
+     * database and all secondary indices.
+     *
+     * <p>This method behaves as if {@link Database#delete} were called for the
+     * primary database, using the primary key obtained via the secondary key
+     * parameter.</p>
+     *
+     * The cursor position is unchanged after a delete, and subsequent calls to
+     * cursor functions expecting the cursor to refer to an existing key will
+     * fail.
+     */
+    @Override
+    public OperationStatus delete()
+        throws DatabaseException {
+
+        checkState(true);
+        checkUpdatesAllowed("delete");
+        trace(Level.FINEST, "SecondaryCursor.delete: ", null);
+
+        /* Read the primary key (the data of a secondary). */
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry pKey = new DatabaseEntry();
+        OperationStatus status = getCurrentInternal(key, pKey,
+                                                    LockMode.RMW);
+
+        /* Delete the primary and all secondaries (including this one). */
+        if (status == OperationStatus.SUCCESS) {
+            status =
+		primaryDb.deleteInternal(cursorImpl.getLocker(), pKey, null);
+            if (status != OperationStatus.SUCCESS) {
+                SecondaryDatabase secDb = (SecondaryDatabase) getDatabase();
+                throw secDb.secondaryCorruptException();
+            }
+        }
+        return status;
+    }
+
+    /**
+     * This operation is not allowed on a secondary database. {@link
+     * UnsupportedOperationException} will always be thrown by this method.
+     * The corresponding method on the primary database should be used instead.
+     */
+    @Override
+    public OperationStatus put(DatabaseEntry key, DatabaseEntry data)
+        throws DatabaseException {
+
+        throw SecondaryDatabase.notAllowedException();
+    }
+
+    /**
+     * This operation is not allowed on a secondary database. {@link
+     * UnsupportedOperationException} will always be thrown by this method.
+     * The corresponding method on the primary database should be used instead.
+     */
+    @Override
+    public OperationStatus putNoOverwrite(DatabaseEntry key,
+                                          DatabaseEntry data)
+        throws DatabaseException {
+
+        throw SecondaryDatabase.notAllowedException();
+    }
+
+    /**
+     * This operation is not allowed on a secondary database. {@link
+     * UnsupportedOperationException} will always be thrown by this method.
+     * The corresponding method on the primary database should be used instead.
+     */
+    @Override
+    public OperationStatus putNoDupData(DatabaseEntry key, DatabaseEntry data)
+        throws DatabaseException {
+
+        throw SecondaryDatabase.notAllowedException();
+    }
+
+    /**
+     * This operation is not allowed on a secondary database. {@link
+     * UnsupportedOperationException} will always be thrown by this method.
+     * The corresponding method on the primary database should be used instead.
+     */
+    @Override
+    public OperationStatus putCurrent(DatabaseEntry data)
+        throws DatabaseException {
+
+        throw SecondaryDatabase.notAllowedException();
+    }
+
+    /**
+     * Returns the key/data pair to which the cursor refers.
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the secondary key returned as output.  Its byte array does
+     * not need to be initialized by the caller.
+     *
+     * @param data the primary data returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#KEYEMPTY
+     * OperationStatus.KEYEMPTY} if the key/pair at the cursor position has been
+     * deleted; otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    @Override
+    public OperationStatus getCurrent(DatabaseEntry key,
+                                      DatabaseEntry data,
+                                      LockMode lockMode)
+        throws DatabaseException {
+
+        return getCurrent(key, new DatabaseEntry(), data, lockMode);
+    }
+
+    /**
+     * Returns the key/data pair to which the cursor refers.
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the secondary key returned as output.  Its byte array does
+     * not need to be initialized by the caller.
+     *
+     * @param data the primary data returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#KEYEMPTY
+     * OperationStatus.KEYEMPTY} if the key/pair at the cursor position has been
+     * deleted; otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    public OperationStatus getCurrent(DatabaseEntry key,
+                                      DatabaseEntry pKey,
+                                      DatabaseEntry data,
+                                      LockMode lockMode)
+        throws DatabaseException {
+
+        checkState(true);
+        checkArgsNoValRequired(key, pKey, data);
+        trace(Level.FINEST, "SecondaryCursor.getCurrent: ", lockMode);
+
+        return getCurrentInternal(key, pKey, data, lockMode);
+    }
+
+    /**
+     * Move the cursor to the first key/data pair of the database, and return
+     * that pair.  If the first key has duplicate values, the first data item
+     * in the set of duplicates is returned.
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the secondary key returned as output.  Its byte array does
+     * not need to be initialized by the caller.
+     *
+     * @param data the primary data returned as output.  Its byte array does
+     * not need to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    @Override
+    public OperationStatus getFirst(DatabaseEntry key,
+                                    DatabaseEntry data,
+                                    LockMode lockMode)
+        throws DatabaseException {
+
+        return getFirst(key, new DatabaseEntry(), data, lockMode);
+    }
+
+    /**
+     * Move the cursor to the first key/data pair of the database, and return
+     * that pair.  If the first key has duplicate values, the first data item
+     * in the set of duplicates is returned.
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the secondary key returned as output.  Its byte array does
+     * not need to be initialized by the caller.
+     *
+     * @param pKey the primary key returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param data the primary data returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    public OperationStatus getFirst(DatabaseEntry key,
+                                    DatabaseEntry pKey,
+                                    DatabaseEntry data,
+                                    LockMode lockMode)
+        throws DatabaseException {
+
+        checkState(false);
+        checkArgsNoValRequired(key, pKey, data);
+        trace(Level.FINEST, "SecondaryCursor.getFirst: ", lockMode);
+
+        return position(key, pKey, data, lockMode, true);
+    }
+
+    /**
+     * Move the cursor to the last key/data pair of the database, and return
+     * that pair.  If the last key has duplicate values, the last data item in
+     * the set of duplicates is returned.
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the secondary key returned as output.  Its byte array does
+     * not need to be initialized by the caller.
+     *
+     * @param data the primary data returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    @Override
+    public OperationStatus getLast(DatabaseEntry key,
+                                   DatabaseEntry data,
+                                   LockMode lockMode)
+        throws DatabaseException {
+
+        return getLast(key, new DatabaseEntry(), data, lockMode);
+    }
+
+    /**
+     * Move the cursor to the last key/data pair of the database, and return
+     * that pair.  If the last key has duplicate values, the last data item in
+     * the set of duplicates is returned.
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the secondary key returned as output.  Its byte array does
+     * not need to be initialized by the caller.
+     *
+     * @param pKey the primary key returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param data the primary data returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    public OperationStatus getLast(DatabaseEntry key,
+                                   DatabaseEntry pKey,
+                                   DatabaseEntry data,
+                                   LockMode lockMode)
+        throws DatabaseException {
+
+        checkState(false);
+        checkArgsNoValRequired(key, pKey, data);
+        trace(Level.FINEST, "SecondaryCursor.getLast: ", lockMode);
+
+        return position(key, pKey, data, lockMode, false);
+    }
+
+    /**
+     * Move the cursor to the next key/data pair and return that pair.  If the
+     * matching key has duplicate values, the first data item in the set of
+     * duplicates is returned.
+     *
+     * <p>If the cursor is not yet initialized, move the cursor to the first
+     * key/data pair of the database, and return that pair.  Otherwise, the
+     * cursor is moved to the next key/data pair of the database, and that pair
+     * is returned.  In the presence of duplicate key values, the value of the
+     * key may not change.</p>
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the secondary key returned as output.  Its byte array does
+     * not need to be initialized by the caller.
+     *
+     * @param data the primary data returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    @Override
+    public OperationStatus getNext(DatabaseEntry key,
+                                   DatabaseEntry data,
+                                   LockMode lockMode)
+        throws DatabaseException {
+
+        return getNext(key, new DatabaseEntry(), data, lockMode);
+    }
+
+    /**
+     * Move the cursor to the next key/data pair and return that pair.  If the
+     * matching key has duplicate values, the first data item in the set of
+     * duplicates is returned.
+     *
+     * <p>If the cursor is not yet initialized, move the cursor to the first
+     * key/data pair of the database, and return that pair.  Otherwise, the
+     * cursor is moved to the next key/data pair of the database, and that pair
+     * is returned.  In the presence of duplicate key values, the value of the
+     * key may not change.</p>
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the secondary key returned as output.  Its byte array does
+     * not need to be initialized by the caller.
+     *
+     * @param pKey the primary key returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param data the primary data returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    public OperationStatus getNext(DatabaseEntry key,
+                                   DatabaseEntry pKey,
+                                   DatabaseEntry data,
+                                   LockMode lockMode)
+        throws DatabaseException {
+
+        checkState(false);
+        checkArgsNoValRequired(key, pKey, data);
+        trace(Level.FINEST, "SecondaryCursor.getNext: ", lockMode);
+
+        if (cursorImpl.isNotInitialized()) {
+            return position(key, pKey, data, lockMode, true);
+        } else {
+            return retrieveNext(key, pKey, data, lockMode, GetMode.NEXT);
+        }
+    }
+
+    /**
+     * If the next key/data pair of the database is a duplicate data record for
+     * the current key/data pair, move the cursor to the next key/data pair of
+     * the database and return that pair.
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the secondary key returned as output.  Its byte array does
+     * not need to be initialized by the caller.
+     *
+     * @param data the primary data returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    @Override
+    public OperationStatus getNextDup(DatabaseEntry key,
+                                      DatabaseEntry data,
+                                      LockMode lockMode)
+        throws DatabaseException {
+
+        return getNextDup(key, new DatabaseEntry(), data, lockMode);
+    }
+
+    /**
+     * If the next key/data pair of the database is a duplicate data record for
+     * the current key/data pair, move the cursor to the next key/data pair of
+     * the database and return that pair.
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the secondary key returned as output.  Its byte array does
+     * not need to be initialized by the caller.
+     *
+     * @param pKey the primary key returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param data the primary data returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    public OperationStatus getNextDup(DatabaseEntry key,
+                                      DatabaseEntry pKey,
+                                      DatabaseEntry data,
+                                      LockMode lockMode)
+        throws DatabaseException {
+
+        checkState(true);
+        checkArgsNoValRequired(key, pKey, data);
+        trace(Level.FINEST, "SecondaryCursor.getNextDup: ", lockMode);
+
+        return retrieveNext(key, pKey, data, lockMode, GetMode.NEXT_DUP);
+    }
+
+    /**
+     * Move the cursor to the next non-duplicate key/data pair and return that
+     * pair.  If the matching key has duplicate values, the first data item in
+     * the set of duplicates is returned.
+     *
+     * <p>If the cursor is not yet initialized, move the cursor to the first
+     * key/data pair of the database, and return that pair.  Otherwise, the
+     * cursor is moved to the next non-duplicate key of the database, and that
+     * key/data pair is returned.</p>
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the secondary key returned as output.  Its byte array does
+     * not need to be initialized by the caller.
+     *
+     * @param data the primary data returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    @Override
+    public OperationStatus getNextNoDup(DatabaseEntry key,
+                                        DatabaseEntry data,
+                                        LockMode lockMode)
+        throws DatabaseException {
+
+        return getNextNoDup(key, new DatabaseEntry(), data, lockMode);
+    }
+
+    /**
+     * Move the cursor to the next non-duplicate key/data pair and return that
+     * pair.  If the matching key has duplicate values, the first data item in
+     * the set of duplicates is returned.
+     *
+     * <p>If the cursor is not yet initialized, move the cursor to the first
+     * key/data pair of the database, and return that pair.  Otherwise, the
+     * cursor is moved to the next non-duplicate key of the database, and that
+     * key/data pair is returned.</p>
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the secondary key returned as output.  Its byte array does
+     * not need to be initialized by the caller.
+     *
+     * @param pKey the primary key returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param data the primary data returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    public OperationStatus getNextNoDup(DatabaseEntry key,
+                                        DatabaseEntry pKey,
+                                        DatabaseEntry data,
+                                        LockMode lockMode)
+        throws DatabaseException {
+
+        checkState(false);
+        checkArgsNoValRequired(key, pKey, data);
+        trace(Level.FINEST, "SecondaryCursor.getNextNoDup: ", null, null,
+              lockMode);
+
+        if (cursorImpl.isNotInitialized()) {
+            return position(key, pKey, data, lockMode, true);
+        } else {
+            return retrieveNext(key, pKey, data, lockMode,
+                                GetMode.NEXT_NODUP);
+        }
+    }
+
+    /**
+     * Move the cursor to the previous key/data pair and return that pair. If
+     * the matching key has duplicate values, the last data item in the set of
+     * duplicates is returned.
+     *
+     * <p>If the cursor is not yet initialized, move the cursor to the last
+     * key/data pair of the database, and return that pair.  Otherwise, the
+     * cursor is moved to the previous key/data pair of the database, and that
+     * pair is returned. In the presence of duplicate key values, the value of
+     * the key may not change.</p>
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the secondary key returned as output.  Its byte array does
+     * not need to be initialized by the caller.
+     *
+     * @param data the primary data returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    @Override
+    public OperationStatus getPrev(DatabaseEntry key,
+                                   DatabaseEntry data,
+                                   LockMode lockMode)
+        throws DatabaseException {
+
+        return getPrev(key, new DatabaseEntry(), data, lockMode);
+    }
+
+    /**
+     * Move the cursor to the previous key/data pair and return that pair. If
+     * the matching key has duplicate values, the last data item in the set of
+     * duplicates is returned.
+     *
+     * <p>If the cursor is not yet initialized, move the cursor to the last
+     * key/data pair of the database, and return that pair.  Otherwise, the
+     * cursor is moved to the previous key/data pair of the database, and that
+     * pair is returned. In the presence of duplicate key values, the value of
+     * the key may not change.</p>
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the secondary key returned as output.  Its byte array does
+     * not need to be initialized by the caller.
+     *
+     * @param pKey the primary key returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param data the primary data returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    public OperationStatus getPrev(DatabaseEntry key,
+                                   DatabaseEntry pKey,
+                                   DatabaseEntry data,
+                                   LockMode lockMode)
+        throws DatabaseException {
+
+        checkState(false);
+        checkArgsNoValRequired(key, pKey, data);
+        trace(Level.FINEST, "SecondaryCursor.getPrev: ", lockMode);
+
+        if (cursorImpl.isNotInitialized()) {
+            return position(key, pKey, data, lockMode, false);
+        } else {
+            return retrieveNext(key, pKey, data, lockMode, GetMode.PREV);
+        }
+    }
+
+    /**
+     * If the previous key/data pair of the database is a duplicate data record
+     * for the current key/data pair, move the cursor to the previous key/data
+     * pair of the database and return that pair.
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the secondary key returned as output.  Its byte array does
+     * not need to be initialized by the caller.
+     *
+     * @param data the primary data returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    @Override
+    public OperationStatus getPrevDup(DatabaseEntry key,
+                                      DatabaseEntry data,
+                                      LockMode lockMode)
+        throws DatabaseException {
+
+        return getPrevDup(key, new DatabaseEntry(), data, lockMode);
+    }
+
+    /**
+     * If the previous key/data pair of the database is a duplicate data record
+     * for the current key/data pair, move the cursor to the previous key/data
+     * pair of the database and return that pair.
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the secondary key returned as output.  Its byte array does
+     * not need to be initialized by the caller.
+     *
+     * @param pKey the primary key returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param data the primary data returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    public OperationStatus getPrevDup(DatabaseEntry key,
+                                      DatabaseEntry pKey,
+                                      DatabaseEntry data,
+                                      LockMode lockMode)
+        throws DatabaseException {
+
+        checkState(true);
+        checkArgsNoValRequired(key, pKey, data);
+        trace(Level.FINEST, "SecondaryCursor.getPrevDup: ", lockMode);
+
+        return retrieveNext(key, pKey, data, lockMode, GetMode.PREV_DUP);
+    }
+
+    /**
+     * Move the cursor to the previous non-duplicate key/data pair and return
+     * that pair.  If the matching key has duplicate values, the last data item
+     * in the set of duplicates is returned.
+     *
+     * <p>If the cursor is not yet initialized, move the cursor to the last
+     * key/data pair of the database, and return that pair.  Otherwise, the
+     * cursor is moved to the previous non-duplicate key of the database, and
+     * that key/data pair is returned.</p>
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the secondary key returned as output.  Its byte array does
+     * not need to be initialized by the caller.
+     *
+     * @param data the primary data returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    @Override
+    public OperationStatus getPrevNoDup(DatabaseEntry key,
+                                        DatabaseEntry data,
+                                        LockMode lockMode)
+        throws DatabaseException {
+
+        return getPrevNoDup(key, new DatabaseEntry(), data, lockMode);
+    }
+
+    /**
+     * Move the cursor to the previous non-duplicate key/data pair and return
+     * that pair.  If the matching key has duplicate values, the last data item
+     * in the set of duplicates is returned.
+     *
+     * <p>If the cursor is not yet initialized, move the cursor to the last
+     * key/data pair of the database, and return that pair.  Otherwise, the
+     * cursor is moved to the previous non-duplicate key of the database, and
+     * that key/data pair is returned.</p>
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the secondary key returned as output.  Its byte array does
+     * not need to be initialized by the caller.
+     *
+     * @param pKey the primary key returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param data the primary data returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    public OperationStatus getPrevNoDup(DatabaseEntry key,
+                                        DatabaseEntry pKey,
+                                        DatabaseEntry data,
+                                        LockMode lockMode)
+        throws DatabaseException {
+
+        checkState(false);
+        checkArgsNoValRequired(key, pKey, data);
+        trace(Level.FINEST, "SecondaryCursor.getPrevNoDup: ", lockMode);
+
+        if (cursorImpl.isNotInitialized()) {
+            return position(key, pKey, data, lockMode, false);
+        } else {
+            return retrieveNext(key, pKey, data, lockMode,
+                                GetMode.PREV_NODUP);
+        }
+    }
+
+    /**
+     * Move the cursor to the given key of the database, and return the datum
+     * associated with the given key.  If the matching key has duplicate
+     * values, the first data item in the set of duplicates is returned.
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the secondary key used as input.  It must be initialized with
+     * a non-null byte array by the caller.
+     *
+     * @param data the primary data returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    @Override
+    public OperationStatus getSearchKey(DatabaseEntry key,
+                                        DatabaseEntry data,
+                                        LockMode lockMode)
+        throws DatabaseException {
+
+        return getSearchKey(key, new DatabaseEntry(), data, lockMode);
+    }
+
+    /**
+     * Move the cursor to the given key of the database, and return the datum
+     * associated with the given key.  If the matching key has duplicate
+     * values, the first data item in the set of duplicates is returned.
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the secondary key used as input.  It must be initialized with
+     * a non-null byte array by the caller.
+     *
+     * @param pKey the primary key returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param data the primary data returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    public OperationStatus getSearchKey(DatabaseEntry key,
+                                        DatabaseEntry pKey,
+                                        DatabaseEntry data,
+                                        LockMode lockMode)
+        throws DatabaseException {
+
+        checkState(false);
+        DatabaseUtil.checkForNullDbt(key, "key", true);
+        DatabaseUtil.checkForNullDbt(pKey, "pKey", false);
+        DatabaseUtil.checkForNullDbt(data, "data", false);
+        trace(Level.FINEST, "SecondaryCursor.getSearchKey: ", key, null,
+              lockMode);
+
+        return search(key, pKey, data, lockMode, SearchMode.SET);
+    }
+
+    /**
+     * Move the cursor to the closest matching key of the database, and return
+     * the data item associated with the matching key.  If the matching key has
+     * duplicate values, the first data item in the set of duplicates is
+     * returned.
+     *
+     * <p>The returned key/data pair is for the smallest key greater than or
+     * equal to the specified key (as determined by the key comparison
+     * function), permitting partial key matches and range searches.</p>
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the secondary key used as input and returned as output.  It
+     * must be initialized with a non-null byte array by the caller.
+     *
+     * @param data the primary data returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    @Override
+    public OperationStatus getSearchKeyRange(DatabaseEntry key,
+                                             DatabaseEntry data,
+                                             LockMode lockMode)
+        throws DatabaseException {
+
+        return getSearchKeyRange(key, new DatabaseEntry(), data, lockMode);
+    }
+
+    /**
+     * Move the cursor to the closest matching key of the database, and return
+     * the data item associated with the matching key.  If the matching key has
+     * duplicate values, the first data item in the set of duplicates is
+     * returned.
+     *
+     * <p>The returned key/data pair is for the smallest key greater than or
+     * equal to the specified key (as determined by the key comparison
+     * function), permitting partial key matches and range searches.</p>
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the secondary key used as input and returned as output.  It
+     * must be initialized with a non-null byte array by the caller.
+     *
+     * @param pKey the primary key returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param data the primary data returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    public OperationStatus getSearchKeyRange(DatabaseEntry key,
+                                             DatabaseEntry pKey,
+                                             DatabaseEntry data,
+                                             LockMode lockMode)
+        throws DatabaseException {
+
+        checkState(false);
+        DatabaseUtil.checkForNullDbt(key, "key", true);
+        DatabaseUtil.checkForNullDbt(pKey, "pKey", false);
+        DatabaseUtil.checkForNullDbt(data, "data", false);
+        trace(Level.FINEST, "SecondaryCursor.getSearchKeyRange: ", key, data,
+              lockMode);
+
+        return search(key, pKey, data, lockMode, SearchMode.SET_RANGE);
+    }
+
+    /**
+     * This operation is not allowed with this method signature. {@link
+     * UnsupportedOperationException} will always be thrown by this method.
+     * The corresponding method with the <code>pKey</code> parameter should be
+     * used instead.
+     */
+    @Override
+    public OperationStatus getSearchBoth(DatabaseEntry key,
+                                         DatabaseEntry data,
+                                         LockMode lockMode)
+        throws DatabaseException {
+
+        throw SecondaryDatabase.notAllowedException();
+    }
+
+    /**
+     * Move the cursor to the specified secondary and primary key, where both
+     * the primary and secondary key items must match.
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the secondary key used as input.  It must be initialized with
+     * a non-null byte array by the caller.
+     *
+     * @param pKey the primary key used as input.  It must be initialized with a
+     * non-null byte array by the caller.
+     *
+     * @param data the primary data returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    public OperationStatus getSearchBoth(DatabaseEntry key,
+                                         DatabaseEntry pKey,
+                                         DatabaseEntry data,
+                                         LockMode lockMode)
+        throws DatabaseException {
+
+        checkState(false);
+        DatabaseUtil.checkForNullDbt(key, "key", true);
+        DatabaseUtil.checkForNullDbt(pKey, "pKey", true);
+        DatabaseUtil.checkForNullDbt(data, "data", false);
+        trace(Level.FINEST, "SecondaryCursor.getSearchBoth: ", key, data,
+              lockMode);
+
+        return search(key, pKey, data, lockMode, SearchMode.BOTH);
+    }
+
+    /**
+     * This operation is not allowed with this method signature. {@link
+     * UnsupportedOperationException} will always be thrown by this method.
+     * The corresponding method with the <code>pKey</code> parameter should be
+     * used instead.
+     */
+    @Override
+    public OperationStatus getSearchBothRange(DatabaseEntry key,
+                                              DatabaseEntry data,
+                                              LockMode lockMode)
+        throws DatabaseException {
+
+        throw SecondaryDatabase.notAllowedException();
+    }
+
+    /**
+     * Move the cursor to the specified secondary key and closest matching
+     * primary key of the database.
+     *
+     * <p>In the case of any database supporting sorted duplicate sets, the
+     * returned key/data pair is for the smallest primary key greater than or
+     * equal to the specified primary key (as determined by the key comparison
+     * function), permitting partial matches and range searches in duplicate
+     * data sets.</p>
+     *
+     * <p>If this method fails for any reason, the position of the cursor will
+     * be unchanged.</p>
+     *
+     * @throws NullPointerException if a DatabaseEntry parameter is null or does
+     * not contain a required non-null byte array.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     *
+     * @param key the secondary key used as input and returned as output.  It
+     * must be initialized with a non-null byte array by the caller.
+     *
+     * @param pKey the primary key used as input and returned as output.  It
+     * must be initialized with a non-null byte array by the caller.
+     *
+     * @param data the primary data returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     */
+    public OperationStatus getSearchBothRange(DatabaseEntry key,
+                                              DatabaseEntry pKey,
+                                              DatabaseEntry data,
+                                              LockMode lockMode)
+        throws DatabaseException {
+
+        checkState(false);
+        DatabaseUtil.checkForNullDbt(key, "key", true);
+        DatabaseUtil.checkForNullDbt(pKey, "pKey", true);
+        DatabaseUtil.checkForNullDbt(data, "data", false);
+        trace(Level.FINEST, "SecondaryCursor.getSearchBothRange: ", key, data,
+              lockMode);
+
+        return search(key, pKey, data, lockMode, SearchMode.BOTH_RANGE);
+    }
+
+    /**
+     * Returns the current key and data.
+     */
+    private OperationStatus getCurrentInternal(DatabaseEntry key,
+                                               DatabaseEntry pKey,
+                                               DatabaseEntry data,
+                                               LockMode lockMode)
+        throws DatabaseException {
+
+        OperationStatus status = getCurrentInternal(key, pKey, lockMode);
+        if (status == OperationStatus.SUCCESS) {
+
+            /*
+             * May return KEYEMPTY if read-uncommitted and the primary was
+             * deleted.
+             */
+            status = readPrimaryAfterGet(key, pKey, data, lockMode);
+        }
+        return status;
+    }
+
+    /**
+     * Calls search() and retrieves primary data.
+     */
+    OperationStatus search(DatabaseEntry key,
+                           DatabaseEntry pKey,
+                           DatabaseEntry data,
+                           LockMode lockMode,
+                           SearchMode searchMode)
+        throws DatabaseException {
+
+        /*
+         * Perform retries to account for deletions during a read-uncommitted.
+         */
+        while (true) {
+            OperationStatus status = search(key, pKey, lockMode, searchMode);
+            if (status != OperationStatus.SUCCESS) {
+                return status;
+            }
+            status = readPrimaryAfterGet(key, pKey, data, lockMode);
+            if (status == OperationStatus.SUCCESS) {
+                return status;
+            }
+        }
+    }
+
+    /**
+     * Calls position() and retrieves primary data.
+     */
+    OperationStatus position(DatabaseEntry key,
+                             DatabaseEntry pKey,
+                             DatabaseEntry data,
+                             LockMode lockMode,
+                             boolean first)
+        throws DatabaseException {
+
+        /*
+         * Perform retries to account for deletions during a read-uncommitted.
+         */
+        while (true) {
+            OperationStatus status = position(key, pKey, lockMode, first);
+            if (status != OperationStatus.SUCCESS) {
+                return status;
+            }
+            status = readPrimaryAfterGet(key, pKey, data, lockMode);
+            if (status == OperationStatus.SUCCESS) {
+                return status;
+            }
+        }
+    }
+
+    /**
+     * Calls retrieveNext() and retrieves primary data.
+     */
+    OperationStatus retrieveNext(DatabaseEntry key,
+                                 DatabaseEntry pKey,
+                                 DatabaseEntry data,
+                                 LockMode lockMode,
+                                 GetMode getMode)
+        throws DatabaseException {
+
+        /*
+         * Perform retries to account for deletions during a read-uncommitted.
+         */
+        while (true) {
+            OperationStatus status = retrieveNext(key, pKey, lockMode,
+                                                  getMode);
+            if (status != OperationStatus.SUCCESS) {
+                return status;
+            }
+            status = readPrimaryAfterGet(key, pKey, data, lockMode);
+            if (status == OperationStatus.SUCCESS) {
+                return status;
+            }
+        }
+    }
+
+    /**
+     * Reads the primary data for a primary key that was read via a secondary.
+     * When SUCCESS is returned by this method, the caller should return
+     * SUCCESS.  When KEYEMPTY is returned, the caller should treat this as a
+     * deleted record and either retry the operation (in the case of position,
+     * search, and retrieveNext) or return KEYEMPTY (in the case of
+     * getCurrent).  KEYEMPTY is only returned when read-uncommitted is used.
+     *
+     * @return SUCCESS if the primary was read succesfully, or KEYEMPTY if
+     * using read-uncommitted and the primary has been deleted, or KEYEMPTY if
+     * using read-uncommitted and the primary has been updated and no longer
+     * contains the secondary key.
+     *
+     * @throws DatabaseException to indicate a corrupt secondary reference if
+     * the primary record is not found and read-uncommitted is not used (or
+     * read-uncommitted is used, but we cannot verify that a valid deletion has
+     * occured).
+     */
+    private OperationStatus readPrimaryAfterGet(DatabaseEntry key,
+                                                DatabaseEntry pKey,
+                                                DatabaseEntry data,
+                                                LockMode lockMode)
+        throws DatabaseException {
+
+        /*
+         * There is no need to read the primary if no data and no locking
+         * (read-uncommitted) are requested by the caller.  However, if partial
+         * data is requested along with read-uncommitted, then we must read all
+         * data in order to call the key creator below. [#14966]
+         */
+        DatabaseEntry copyToPartialEntry = null;
+        boolean readUncommitted = isReadUncommittedMode(lockMode);
+        if (readUncommitted && data.getPartial()) {
+            if (data.getPartialLength() == 0) {
+                /* No need to read the primary. */
+                data.setData(LogUtils.ZERO_LENGTH_BYTE_ARRAY);
+                return OperationStatus.SUCCESS;
+            } else {
+                /* Read all data and then copy the requested partial data. */
+                copyToPartialEntry = data;
+                data = new DatabaseEntry();
+            }
+        }
+
+        Locker locker = cursorImpl.getLocker();
+        Cursor cursor = null;
+        try {
+
+            /*
+             * Do not release non-transactional locks when reading the primary
+             * cursor.  They are held until all locks for this operation are
+             * released by the secondary cursor.  [#15573]
+             */
+	    cursor = new Cursor(primaryDb, locker, null,
+                                true /*retainNonTxnLocks*/);
+            OperationStatus status =
+                cursor.search(pKey, data, lockMode, SearchMode.SET);
+            if (status != OperationStatus.SUCCESS) {
+
+                /*
+                 * If using read-uncommitted and the primary is not found,
+                 * check to see if the secondary key also has been deleted.  If
+                 * so, the primary was deleted in between reading the secondary
+                 * and the primary.  It is not corrupt, so we return KEYEMPTY.
+                 */
+                if (readUncommitted) {
+                    status = getCurrentInternal(key, pKey, lockMode);
+                    if (status == OperationStatus.KEYEMPTY) {
+                        return status;
+                    }
+                }
+
+                /* Secondary reference is corrupt. */
+                SecondaryDatabase secDb = (SecondaryDatabase) getDatabase();
+                throw secDb.secondaryCorruptException();
+            }
+
+            /*
+             * If using read-uncommitted and the primary was found, check to
+             * see if primary was updated so that it no longer contains the
+             * secondary key.  If it has been, return KEYEMPTY.
+             */
+            if (readUncommitted) {
+                SecondaryConfig conf =
+                    secondaryDb.getPrivateSecondaryConfig();
+
+                /*
+                 * If the secondary key is immutable, or the key creators are
+                 * null (the database is read only), then we can skip this
+                 * check.
+                 */
+                if (conf.getImmutableSecondaryKey()) {
+                    /* Do nothing. */
+                } else if (conf.getKeyCreator() != null) {
+
+                    /*
+                     * Check that the key we're using is equal to the key
+                     * returned by the key creator.
+                     */
+                    DatabaseEntry secKey = new DatabaseEntry();
+                    if (!conf.getKeyCreator().createSecondaryKey
+                            (secondaryDb, pKey, data, secKey) ||
+                        !secKey.equals(key)) {
+                        return OperationStatus.KEYEMPTY;
+                    }
+                } else if (conf.getMultiKeyCreator() != null) {
+
+                    /*
+                     * Check that the key we're using is in the set returned by
+                     * the key creator.
+                     */
+                    Set<DatabaseEntry> results = new HashSet<DatabaseEntry>();
+                    conf.getMultiKeyCreator().createSecondaryKeys
+                        (secondaryDb, pKey, data, results);
+                    if (!results.contains(key)) {
+                        return OperationStatus.KEYEMPTY;
+                    }
+                }
+            }
+
+            /*
+             * When a partial entry was requested but we read all the data,
+             * copy the requested partial data to the caller's entry. [#14966]
+             */
+            if (copyToPartialEntry != null) {
+                CursorImpl.setDbt(copyToPartialEntry, data.getData());
+            }
+            return OperationStatus.SUCCESS;
+        } finally {
+            if (cursor != null) {
+                cursor.close();
+            }
+        }
+    }
+
+    /**
+     * Note that this flavor of checkArgs doesn't require that the dbt data is
+     * set.
+     */
+    private void checkArgsNoValRequired(DatabaseEntry key,
+                                        DatabaseEntry pKey,
+                                        DatabaseEntry data) {
+        DatabaseUtil.checkForNullDbt(key, "key", false);
+        DatabaseUtil.checkForNullDbt(pKey, "pKey", false);
+        DatabaseUtil.checkForNullDbt(data, "data", false);
+    }
+}
diff --git a/src/com/sleepycat/je/SecondaryDatabase.java b/src/com/sleepycat/je/SecondaryDatabase.java
new file mode 100644
index 0000000000000000000000000000000000000000..17243d20e0c376c18755fcf4d90dc89631af419b
--- /dev/null
+++ b/src/com/sleepycat/je/SecondaryDatabase.java
@@ -0,0 +1,1041 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SecondaryDatabase.java,v 1.62.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Set;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.GetMode;
+import com.sleepycat.je.dbi.PutMode;
+import com.sleepycat.je.dbi.CursorImpl.SearchMode;
+import com.sleepycat.je.txn.Locker;
+import com.sleepycat.je.txn.LockerFactory;
+import com.sleepycat.je.utilint.DatabaseUtil;
+
+/**
+ * A secondary database handle.
+ *
+ * <p>Secondary databases are opened with {@link
+ * Environment#openSecondaryDatabase Environment.openSecondaryDatabase} and are
+ * always associated with a single primary database.  The distinguishing
+ * characteristics of a secondary database are:</p>
+ *
+ * <ul> <li>Records are automatically added to a secondary database when
+ * records are added, modified and deleted in the primary database.  Direct
+ * calls to <code>put()</code> methods on a secondary database are
+ * prohibited.</li>
+ * <li>The {@link #delete delete} method of a secondary database will delete
+ * the primary record and as well as all its associated secondary records.</li>
+ * <li>Calls to all <code>get()</code> methods will return the data from the
+ * associated primary database.</li>
+ * <li>Additional <code>get()</code> method signatures are provided to return
+ * the primary key in an additional <code>pKey</code> parameter.</li>
+ * <li>Calls to {@link #openCursor openCursor} will return a {@link
+ * SecondaryCursor}, which itself has <code>get()</code> methods that return
+ * the data of the primary database and additional <code>get()</code> method
+ * signatures for returning the primary key.</li>
+ * <li>The {@link #openSecondaryCursor openSecondaryCursor} method is provided
+ * to return a {@link SecondaryCursor} that doesn't require casting.</li>
+ * </ul>
+ * <p>Before opening or creating a secondary database you must implement
+ * the {@link SecondaryKeyCreator} or {@link SecondaryMultiKeyCreator}
+ * interface.</p>
+ *
+ * <p>For example, to create a secondary database that supports duplicates:</p>
+ *
+ * <pre>
+ *     Database primaryDb; // The primary database must already be open.
+ *     SecondaryKeyCreator keyCreator; // Your key creator implementation.
+ *     SecondaryConfig secConfig = new SecondaryConfig();
+ *     secConfig.setAllowCreate(true);
+ *     secConfig.setSortedDuplicates(true);
+ *     secConfig.setKeyCreator(keyCreator);
+ *     SecondaryDatabase newDb = env.openSecondaryDatabase(transaction,
+ *                                                         "myDatabaseName",
+ *                                                         primaryDb,
+ *                                                         secConfig)
+ * </pre>
+ *
+ * <p>If a primary database is to be associated with one or more secondary
+ * databases, it may not be configured for duplicates.</p>
+ *
+ * Note that the associations between primary and secondary databases are not
+ * stored persistently.  Whenever a primary database is opened for write access
+ * by the application, the appropriate associated secondary databases should
+ * also be opened by the application.  This is necessary to ensure data
+ * integrity when changes are made to the primary database.
+ */
+public class SecondaryDatabase extends Database {
+
+    /* For type-safe check against EMPTY_SET */
+    private static final Set<DatabaseEntry> EMPTY_SET =
+        Collections.emptySet();
+
+    private Database primaryDb;
+    private SecondaryConfig secondaryConfig;
+    private SecondaryTrigger secondaryTrigger;
+    private ForeignKeyTrigger foreignKeyTrigger;
+
+    /**
+     * Creates a secondary database but does not open or fully initialize it.
+     */
+    SecondaryDatabase(Environment env,
+		      SecondaryConfig secConfig,
+                      Database primaryDatabase)
+        throws DatabaseException {
+
+        super(env);
+        DatabaseUtil.checkForNullParam(primaryDatabase, "primaryDatabase");
+        primaryDatabase.checkRequiredDbState(OPEN, "Can't use as primary:");
+        if (primaryDatabase.configuration.getSortedDuplicates()) {
+            throw new IllegalArgumentException
+                ("Duplicates must not be allowed for a primary database: " +
+                 primaryDatabase.getDebugName());
+        }
+        if (env.getEnvironmentImpl() !=
+                primaryDatabase.getEnvironment().getEnvironmentImpl()) {
+            throw new IllegalArgumentException
+                ("Primary and secondary databases must be in the same" +
+                 " environment");
+        }
+        if (secConfig.getKeyCreator() != null &&
+            secConfig.getMultiKeyCreator() != null) {
+            throw new IllegalArgumentException
+                ("secConfig.getKeyCreator() and getMultiKeyCreator() may not" +
+                 " both be non-null");
+        }
+        if (!primaryDatabase.configuration.getReadOnly() &&
+            secConfig.getKeyCreator() == null &&
+            secConfig.getMultiKeyCreator() == null) {
+            throw new NullPointerException
+                ("secConfig and getKeyCreator()/getMultiKeyCreator()" +
+                 " may be null only if the primary database is read-only");
+        }
+        if (secConfig.getForeignKeyNullifier() != null &&
+            secConfig.getForeignMultiKeyNullifier() != null) {
+            throw new IllegalArgumentException
+                ("secConfig.getForeignKeyNullifier() and" +
+                 " getForeignMultiKeyNullifier() may not both be non-null");
+        }
+        if (secConfig.getForeignKeyDeleteAction() ==
+                         ForeignKeyDeleteAction.NULLIFY &&
+            secConfig.getForeignKeyNullifier() == null &&
+            secConfig.getForeignMultiKeyNullifier() == null) {
+            throw new NullPointerException
+                ("ForeignKeyNullifier or ForeignMultiKeyNullifier must be" +
+                 " non-null when ForeignKeyDeleteAction is NULLIFY");
+        }
+        if (secConfig.getForeignKeyNullifier() != null &&
+            secConfig.getMultiKeyCreator() != null) {
+            throw new IllegalArgumentException
+                ("ForeignKeyNullifier may not be used with" +
+                 " SecondaryMultiKeyCreator -- use" +
+                 " ForeignMultiKeyNullifier instead");
+        }
+        if (secConfig.getForeignKeyDatabase() != null) {
+            Database foreignDb = secConfig.getForeignKeyDatabase();
+            if (foreignDb.getDatabaseImpl().getSortedDuplicates()) {
+                throw new IllegalArgumentException
+                    ("Duplicates must not be allowed for a foreign key " +
+                     " database: " + foreignDb.getDebugName());
+            }
+        }
+        primaryDb = primaryDatabase;
+        secondaryTrigger = new SecondaryTrigger(this);
+        if (secConfig.getForeignKeyDatabase() != null) {
+            foreignKeyTrigger = new ForeignKeyTrigger(this);
+        }
+    }
+
+    /**
+     * Create a database, called by Environment
+     */
+    @Override
+    void initNew(Environment env,
+                 Locker locker,
+                 String databaseName,
+                 DatabaseConfig dbConfig)
+        throws DatabaseException {
+
+        super.initNew(env, locker, databaseName, dbConfig);
+        init(locker);
+    }
+
+    /**
+     * Open a database, called by Environment
+     */
+    @Override
+    void initExisting(Environment env,
+                      Locker locker,
+                      DatabaseImpl database,
+                      DatabaseConfig dbConfig)
+        throws DatabaseException {
+
+        /* Disallow one secondary associated with two different primaries. */
+        Database otherPriDb = database.findPrimaryDatabase();
+        if (otherPriDb != null &&
+            otherPriDb.getDatabaseImpl() != primaryDb.getDatabaseImpl()) {
+            throw new IllegalArgumentException
+                ("Secondary is already associated with a different primary: " +
+                 otherPriDb.getDebugName());
+        }
+
+        super.initExisting(env, locker, database, dbConfig);
+        init(locker);
+    }
+
+    /**
+     * Adds secondary to primary's list, and populates the secondary if needed.
+     *
+     * @param locker should be the locker used to open the database.  If a
+     * transactional locker, the population operations will occur in the same
+     * transaction; this may result in a large number of retained locks.  If a
+     * non-transactional locker, the Cursor will create a ThreadLocker (even if
+     * a BasicLocker used for handle locking is passed), and locks will not be
+     * retained.
+     */
+    private void init(Locker locker)
+        throws DatabaseException {
+
+        trace(Level.FINEST, "SecondaryDatabase open");
+
+        secondaryConfig = (SecondaryConfig) configuration;
+
+        /*
+	 * Insert foreign key triggers at the front of the list and append
+	 * secondary triggers at the end, so that ForeignKeyDeleteAction.ABORT
+	 * is applied before deleting the secondary keys.
+	 */
+        primaryDb.addTrigger(secondaryTrigger, false);
+
+        Database foreignDb = secondaryConfig.getForeignKeyDatabase();
+        if (foreignDb != null) {
+            foreignDb.addTrigger(foreignKeyTrigger, true);
+        }
+
+        /* Populate secondary if requested and secondary is empty. */
+        if (secondaryConfig.getAllowPopulate()) {
+            Cursor secCursor = null;
+            Cursor priCursor = null;
+            try {
+                secCursor = new Cursor(this, locker, null);
+                DatabaseEntry key = new DatabaseEntry();
+                DatabaseEntry data = new DatabaseEntry();
+                OperationStatus status = secCursor.position(key, data,
+                                                            LockMode.DEFAULT,
+                                                            true);
+                if (status == OperationStatus.NOTFOUND) {
+                    /* Is empty, so populate */
+                    priCursor = new Cursor(primaryDb, locker, null);
+                    status = priCursor.position(key, data, LockMode.DEFAULT,
+                                                true);
+                    while (status == OperationStatus.SUCCESS) {
+                        updateSecondary(locker, secCursor, key, null, data);
+                        status = priCursor.retrieveNext(key, data,
+                                                        LockMode.DEFAULT,
+                                                        GetMode.NEXT);
+                    }
+                }
+            } finally {
+                if (secCursor != null) {
+                    secCursor.close();
+                }
+                if (priCursor != null) {
+                    priCursor.close();
+                }
+            }
+        }
+    }
+
+    /**
+     * Closes a secondary database and dis-associates it from its primary
+     * database. A secondary database should be closed before closing its
+     * associated primary database.
+     *
+     * {@inheritDoc}
+     */
+    @Override
+    public synchronized void close()
+        throws DatabaseException {
+
+        if (primaryDb != null && secondaryTrigger != null) {
+            primaryDb.removeTrigger(secondaryTrigger);
+        }
+        Database foreignDb = secondaryConfig.getForeignKeyDatabase();
+        if (foreignDb != null && foreignKeyTrigger != null) {
+            foreignDb.removeTrigger(foreignKeyTrigger);
+        }
+        super.close();
+    }
+
+    /**
+     * Should be called by the secondaryTrigger while holding a write lock on
+     * the trigger list.
+     */
+    void clearPrimary() {
+        primaryDb = null;
+        secondaryTrigger  = null;
+    }
+
+    /**
+     * Should be called by the foreignKeyTrigger while holding a write lock on
+     * the trigger list.
+     */
+    void clearForeignKeyTrigger() {
+        foreignKeyTrigger = null;
+    }
+
+    /**
+     * Returns the primary database associated with this secondary database.
+     *
+     * @return the primary database associated with this secondary database.
+     */
+    public Database getPrimaryDatabase()
+        throws DatabaseException {
+
+        return primaryDb;
+    }
+
+    /**
+     * Returns a copy of the secondary configuration of this database.
+     *
+     * @return a copy of the secondary configuration of this database.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public SecondaryConfig getSecondaryConfig()
+        throws DatabaseException {
+
+        return (SecondaryConfig) getConfig();
+    }
+
+    /**
+     * Returns the secondary config without cloning, for internal use.
+     */
+    public SecondaryConfig getPrivateSecondaryConfig() {
+        return secondaryConfig;
+    }
+
+    /**
+     * Obtain a cursor on a database, returning a
+     * <code>SecondaryCursor</code>. Calling this method is the equivalent of
+     * calling {@link #openCursor} and casting the result to {@link
+     * SecondaryCursor}.
+     *
+     * @param txn To use a cursor for writing to a transactional database, an
+     * explicit transaction must be specified.  For read-only access to a
+     * transactional database, the transaction may be null.  For a
+     * non-transactional database, the transaction must be null.
+     *
+     * <p>To transaction-protect cursor operations, cursors must be opened and
+     * closed within the context of a transaction, and the txn parameter
+     * specifies the transaction context in which the cursor will be used.</p>
+     *
+     * @param cursorConfig The cursor attributes.  If null, default attributes
+     * are used.
+     *
+     * @return A secondary database cursor.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public SecondaryCursor openSecondaryCursor(Transaction txn,
+                                               CursorConfig cursorConfig)
+        throws DatabaseException {
+
+        return (SecondaryCursor) openCursor(txn, cursorConfig);
+    }
+
+    /**
+     * Overrides Database method.
+     */
+    @Override
+    Cursor newDbcInstance(Transaction txn,
+                          CursorConfig cursorConfig)
+        throws DatabaseException {
+
+        return new SecondaryCursor(this, txn, cursorConfig);
+    }
+
+    /**
+     * Deletes the primary key/data pair associated with the specified
+     * secondary key.  In the presence of duplicate key values, all primary
+     * records associated with the designated secondary key will be deleted.
+     *
+     * When the primary records are deleted, their associated secondary records
+     * are deleted as if {@link Database#delete} were called.  This includes,
+     * but is not limited to, the secondary record referenced by the given key.
+     */
+    @Override
+    public OperationStatus delete(Transaction txn, DatabaseEntry key)
+        throws DatabaseException {
+
+        checkEnv();
+        DatabaseUtil.checkForNullDbt(key, "key", true);
+        checkRequiredDbState(OPEN, "Can't call SecondaryDatabase.delete:");
+        trace(Level.FINEST, "SecondaryDatabase.delete", txn,
+              key, null, null);
+
+        Locker locker = null;
+        Cursor cursor = null;
+
+        OperationStatus commitStatus = OperationStatus.NOTFOUND;
+        try {
+            locker = LockerFactory.getWritableLocker
+                (envHandle,
+                 txn,
+                 isTransactional(),
+                 getDatabaseImpl().isReplicated()); // autoTxnIsReplicated
+
+            /* Read the primary key (the data of a secondary). */
+            cursor = new Cursor(this, locker, null);
+            DatabaseEntry pKey = new DatabaseEntry();
+            OperationStatus searchStatus =
+                cursor.search(key, pKey, LockMode.RMW, SearchMode.SET);
+
+            /*
+             * For each duplicate secondary key, delete the primary record and
+             * all its associated secondary records, including the one
+             * referenced by this secondary cursor.
+             */
+            while (searchStatus == OperationStatus.SUCCESS) {
+                commitStatus = primaryDb.deleteInternal(locker, pKey, null);
+                if (commitStatus != OperationStatus.SUCCESS) {
+                    throw secondaryCorruptException();
+                }
+                searchStatus = cursor.retrieveNext
+                    (key, pKey, LockMode.RMW, GetMode.NEXT_DUP);
+            }
+            return commitStatus;
+	} catch (Error E) {
+	    DbInternal.envGetEnvironmentImpl(envHandle).invalidate(E);
+	    throw E;
+        } finally {
+            if (cursor != null) {
+                cursor.close();
+            }
+            if (locker != null) {
+                locker.operationEnd(commitStatus);
+            }
+        }
+    }
+
+    /**
+     * Retrieves the key/data pair with the given key.  If the matching key has
+     * duplicate values, the first data item in the set of duplicates is
+     * returned. Retrieval of duplicates requires the use of {@link Cursor}
+     * operations.
+     *
+     * @param txn For a transactional database, an explicit transaction may be
+     * specified to transaction-protect the operation, or null may be specified
+     * to perform the operation without transaction protection.  For a
+     * non-transactional database, null must be specified.
+     *
+     * @param key the secondary key used as input.  It must be initialized with
+     * a non-null byte array by the caller.     *
+     *
+     * @param data the primary data returned as output.  Its byte array does
+     * not need to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes
+     * are used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    @Override
+    public OperationStatus get(Transaction txn,
+                               DatabaseEntry key,
+                               DatabaseEntry data,
+                               LockMode lockMode)
+        throws DatabaseException {
+
+        return get(txn, key, new DatabaseEntry(), data, lockMode);
+    }
+
+    /**
+     * Retrieves the key/data pair with the given key.  If the matching key has
+     * duplicate values, the first data item in the set of duplicates is
+     * returned. Retrieval of duplicates requires the use of {@link Cursor}
+     * operations.
+     *
+     * @param txn For a transactional database, an explicit transaction may be
+     * specified to transaction-protect the operation, or null may be specified
+     * to perform the operation without transaction protection.  For a
+     * non-transactional database, null must be specified.
+     *
+     * @param key the secondary key used as input.  It must be initialized with
+     * a non-null byte array by the caller.
+     *
+     * @param pKey the primary key returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param data the primary data returned as output.  Its byte array does
+     * not need to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public OperationStatus get(Transaction txn,
+                               DatabaseEntry key,
+                               DatabaseEntry pKey,
+                               DatabaseEntry data,
+                               LockMode lockMode)
+        throws DatabaseException {
+
+        checkEnv();
+        DatabaseUtil.checkForNullDbt(key, "key", true);
+        DatabaseUtil.checkForNullDbt(pKey, "pKey", false);
+        DatabaseUtil.checkForNullDbt(data, "data", false);
+        checkRequiredDbState(OPEN, "Can't call SecondaryDatabase.get:");
+        trace(Level.FINEST, "SecondaryDatabase.get", txn, key, null, lockMode);
+
+        CursorConfig cursorConfig = CursorConfig.DEFAULT;
+        if (lockMode == LockMode.READ_COMMITTED) {
+            cursorConfig = CursorConfig.READ_COMMITTED;
+            lockMode = null;
+        }
+
+        SecondaryCursor cursor = null;
+        try {
+	    cursor = new SecondaryCursor(this, txn, cursorConfig);
+            return cursor.search(key, pKey, data, lockMode, SearchMode.SET);
+	} catch (Error E) {
+	    DbInternal.envGetEnvironmentImpl(envHandle).invalidate(E);
+	    throw E;
+        } finally {
+            if (cursor != null) {
+                cursor.close();
+            }
+        }
+    }
+
+    /**
+     * This operation is not allowed with this method signature. {@link
+     * UnsupportedOperationException} will always be thrown by this method.
+     * The corresponding method with the <code>pKey</code> parameter should be
+     * used instead.
+     */
+    @Override
+    public OperationStatus getSearchBoth(Transaction txn,
+                                         DatabaseEntry key,
+                                         DatabaseEntry data,
+                                         LockMode lockMode)
+        throws DatabaseException {
+
+        throw notAllowedException();
+    }
+
+    /**
+     * Retrieves the key/data pair with the specified secondary and primary
+     * key, that is, both the primary and secondary key items must match.
+     *
+     * @param txn For a transactional database, an explicit transaction may be
+     * specified to transaction-protect the operation, or null may be specified
+     * to perform the operation without transaction protection.  For a
+     * non-transactional database, null must be specified.
+     *
+     * @param key the secondary keyused as input.  It must be initialized with
+     * a non-null byte array by the caller.
+     *
+     * @param pKey the primary keyused as input.  It must be initialized with a
+     * non-null byte array by the caller.
+     *
+     * @param data the primary data returned as output.  Its byte array does not
+     * need to be initialized by the caller.
+     *
+     * @param lockMode the locking attributes; if null, default attributes are
+     * used.
+     *
+     * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND
+     * OperationStatus.NOTFOUND} if no matching key/data pair is found;
+     * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS
+     * OperationStatus.SUCCESS}.
+     *
+     * @throws DeadlockException if the operation was selected to resolve a
+     * deadlock.
+     *
+     * @throws IllegalArgumentException if an invalid parameter was specified.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public OperationStatus getSearchBoth(Transaction txn,
+                                         DatabaseEntry key,
+                                         DatabaseEntry pKey,
+                                         DatabaseEntry data,
+                                         LockMode lockMode)
+        throws DatabaseException {
+
+        checkEnv();
+        DatabaseUtil.checkForNullDbt(key, "key", true);
+        DatabaseUtil.checkForNullDbt(pKey, "pKey", true);
+        DatabaseUtil.checkForNullDbt(data, "data", false);
+        checkRequiredDbState(OPEN,
+                             "Can't call SecondaryDatabase.getSearchBoth:");
+        trace(Level.FINEST, "SecondaryDatabase.getSearchBoth", txn, key, data,
+              lockMode);
+
+        CursorConfig cursorConfig = CursorConfig.DEFAULT;
+        if (lockMode == LockMode.READ_COMMITTED) {
+            cursorConfig = CursorConfig.READ_COMMITTED;
+            lockMode = null;
+        }
+
+        SecondaryCursor cursor = null;
+        try {
+	    cursor = new SecondaryCursor(this, txn, cursorConfig);
+            return cursor.search(key, pKey, data, lockMode, SearchMode.BOTH);
+	} catch (Error E) {
+	    DbInternal.envGetEnvironmentImpl(envHandle).invalidate(E);
+	    throw E;
+        } finally {
+            if (cursor != null) {
+                cursor.close();
+            }
+        }
+    }
+
+    /**
+     * This operation is not allowed on a secondary database. {@link
+     * UnsupportedOperationException} will always be thrown by this method.
+     * The corresponding method on the primary database should be used instead.
+     */
+    @Override
+    public OperationStatus put(Transaction txn,
+                               DatabaseEntry key,
+                               DatabaseEntry data)
+        throws DatabaseException {
+
+        throw notAllowedException();
+    }
+
+    /**
+     * This operation is not allowed on a secondary database. {@link
+     * UnsupportedOperationException} will always be thrown by this method.
+     * The corresponding method on the primary database should be used instead.
+     */
+    @Override
+    public OperationStatus putNoOverwrite(Transaction txn,
+                                          DatabaseEntry key,
+                                          DatabaseEntry data)
+        throws DatabaseException {
+
+        throw notAllowedException();
+    }
+
+    /**
+     * This operation is not allowed on a secondary database. {@link
+     * UnsupportedOperationException} will always be thrown by this method.
+     * The corresponding method on the primary database should be used instead.
+     */
+    @Override
+    public OperationStatus putNoDupData(Transaction txn,
+                                        DatabaseEntry key,
+                                        DatabaseEntry data)
+        throws DatabaseException {
+
+        throw notAllowedException();
+    }
+
+    /**
+     * This operation is not allowed on a secondary database. {@link
+     * UnsupportedOperationException} will always be thrown by this method.
+     * The corresponding method on the primary database should be used instead.
+     */
+    @Override
+    public JoinCursor join(Cursor[] cursors, JoinConfig config)
+        throws DatabaseException {
+
+        throw notAllowedException();
+    }
+
+    /**
+     * Updates a single secondary when a put() or delete() is performed on the
+     * primary.
+     *
+     * @param locker the internal locker.
+     *
+     * @param cursor secondary cursor to use, or null if this method should
+     * open and close a cursor if one is needed.
+     *
+     * @param priKey the primary key.
+     *
+     * @param oldData the primary data before the change, or null if the record
+     * did not previously exist.
+     *
+     * @param newData the primary data after the change, or null if the record
+     * has been deleted.
+     */
+    void updateSecondary(Locker locker,
+                         Cursor cursor,
+                         DatabaseEntry priKey,
+                         DatabaseEntry oldData,
+                         DatabaseEntry newData)
+        throws DatabaseException {
+
+        /*
+         * If we're updating the primary and the secondary key cannot be
+         * changed, optimize for that case by doing nothing.
+         */
+        if (secondaryConfig.getImmutableSecondaryKey() &&
+            oldData != null && newData != null) {
+            return;
+        }
+
+        SecondaryKeyCreator keyCreator = secondaryConfig.getKeyCreator();
+        if (keyCreator != null) {
+            /* Each primary record may have a single secondary key. */
+            assert secondaryConfig.getMultiKeyCreator() == null;
+
+            /* Get old and new secondary keys. */
+            DatabaseEntry oldSecKey = null;
+            if (oldData != null) {
+                oldSecKey = new DatabaseEntry();
+                if (!keyCreator.createSecondaryKey(this, priKey, oldData,
+                                                   oldSecKey)) {
+                    oldSecKey = null;
+                }
+            }
+            DatabaseEntry newSecKey = null;
+            if (newData != null) {
+                newSecKey = new DatabaseEntry();
+                if (!keyCreator.createSecondaryKey(this, priKey, newData,
+                                                   newSecKey)) {
+                    newSecKey = null;
+                }
+            }
+
+            /* Update secondary if old and new keys are unequal. */
+            if ((oldSecKey != null && !oldSecKey.equals(newSecKey)) ||
+                (newSecKey != null && !newSecKey.equals(oldSecKey))) {
+
+                boolean localCursor = (cursor == null);
+                if (localCursor) {
+                    cursor = new Cursor(this, locker, null);
+                }
+                try {
+                    /* Delete the old key. */
+                    if (oldSecKey != null) {
+                        deleteKey(cursor, priKey, oldSecKey);
+                    }
+                    /* Insert the new key. */
+                    if (newSecKey != null) {
+                        insertKey(locker, cursor, priKey, newSecKey);
+                    }
+                } finally {
+                    if (localCursor && cursor != null) {
+                        cursor.close();
+                    }
+                }
+            }
+        } else {
+            /* Each primary record may have multiple secondary keys. */
+            SecondaryMultiKeyCreator multiKeyCreator =
+                secondaryConfig.getMultiKeyCreator();
+            assert multiKeyCreator != null;
+
+            /* Get old and new secondary keys. */
+            Set<DatabaseEntry> oldKeys = EMPTY_SET;
+            Set<DatabaseEntry> newKeys = EMPTY_SET;
+            if (oldData != null) {
+                oldKeys = new HashSet<DatabaseEntry>();
+                multiKeyCreator.createSecondaryKeys(this, priKey,
+                                                    oldData, oldKeys);
+            }
+            if (newData != null) {
+                newKeys = new HashSet<DatabaseEntry>();
+                multiKeyCreator.createSecondaryKeys(this, priKey,
+                                                    newData, newKeys);
+            }
+
+            /* Update the secondary if there is a difference. */
+            if (!oldKeys.equals(newKeys)) {
+
+                boolean localCursor = (cursor == null);
+                if (localCursor) {
+                    cursor = new Cursor(this, locker, null);
+                }
+                try {
+                    /* Delete old keys that are no longer present. */
+                    Set<DatabaseEntry> oldKeysCopy = oldKeys;
+                    if (oldKeys != EMPTY_SET) {
+                        oldKeysCopy = new HashSet<DatabaseEntry>(oldKeys);
+                        oldKeys.removeAll(newKeys);
+                        for (Iterator<DatabaseEntry> i = oldKeys.iterator();
+                             i.hasNext();) {
+                            DatabaseEntry oldKey = i.next();
+                            deleteKey(cursor, priKey, oldKey);
+                        }
+                    }
+                    /* Insert new keys that were not present before. */
+                    if (newKeys != EMPTY_SET) {
+                        newKeys.removeAll(oldKeysCopy);
+                        for (Iterator<DatabaseEntry> i = newKeys.iterator();
+                             i.hasNext();) {
+                            DatabaseEntry newKey = i.next();
+                            insertKey(locker, cursor, priKey, newKey);
+                        }
+                    }
+                } finally {
+                    if (localCursor && cursor != null) {
+                        cursor.close();
+                    }
+                }
+            }
+        }
+    }
+
+    /**
+     * Deletes an old secondary key.
+     */
+    private void deleteKey(Cursor cursor,
+                           DatabaseEntry priKey,
+                           DatabaseEntry oldSecKey)
+        throws DatabaseException {
+
+        OperationStatus status =
+            cursor.search(oldSecKey, priKey,
+                          LockMode.RMW,
+                          SearchMode.BOTH);
+        if (status == OperationStatus.SUCCESS) {
+            cursor.deleteInternal();
+        } else {
+            throw new DatabaseException
+                ("Secondary " + getDebugName() +
+                " is corrupt: the primary record contains a key" +
+                " that is not present in the secondary");
+        }
+    }
+
+    /**
+     * Inserts a new secondary key.
+     */
+    private void insertKey(Locker locker,
+                           Cursor cursor,
+                           DatabaseEntry priKey,
+                           DatabaseEntry newSecKey)
+        throws DatabaseException {
+
+        /* Check for the existence of a foreign key. */
+        Database foreignDb =
+            secondaryConfig.getForeignKeyDatabase();
+        if (foreignDb != null) {
+            Cursor foreignCursor = null;
+            try {
+                foreignCursor = new Cursor(foreignDb, locker,
+                                           null);
+                DatabaseEntry tmpData = new DatabaseEntry();
+                OperationStatus status =
+                    foreignCursor.search(newSecKey, tmpData,
+                                         LockMode.DEFAULT,
+                                         SearchMode.SET);
+                if (status != OperationStatus.SUCCESS) {
+                    throw new DatabaseException
+                        ("Secondary " + getDebugName() +
+                         " foreign key not allowed: it is not" +
+                         " present in the foreign database " +
+                         foreignDb.getDebugName());
+                }
+            } finally {
+                if (foreignCursor != null) {
+                    foreignCursor.close();
+                }
+            }
+        }
+
+        /* Insert the new key. */
+        OperationStatus status;
+        if (configuration.getSortedDuplicates()) {
+            status = cursor.putInternal(newSecKey, priKey,
+                                        PutMode.NODUP);
+        } else {
+            status = cursor.putInternal(newSecKey, priKey,
+                                        PutMode.NOOVERWRITE);
+        }
+        if (status != OperationStatus.SUCCESS) {
+            throw new DatabaseException
+                ("Could not insert secondary key in " +
+                 getDebugName() + ' ' + status);
+        }
+    }
+
+    /**
+     * Called by the ForeignKeyTrigger when a record in the foreign database is
+     * deleted.
+     *
+     * @param secKey is the primary key of the foreign database, which is the
+     * secondary key (ordinary key) of this secondary database.
+     */
+    void onForeignKeyDelete(Locker locker, DatabaseEntry secKey)
+        throws DatabaseException {
+
+        ForeignKeyDeleteAction deleteAction =
+            secondaryConfig.getForeignKeyDeleteAction();
+
+        /* Use RMW if we're going to be deleting the secondary records. */
+        LockMode lockMode = (deleteAction == ForeignKeyDeleteAction.ABORT) ?
+	    LockMode.DEFAULT :
+	    LockMode.RMW;
+
+        /*
+         * Use the deleted foreign primary key to read the data of this
+         * database, which is the associated primary's key.
+         */
+        DatabaseEntry priKey = new DatabaseEntry();
+        Cursor cursor = null;
+        OperationStatus status;
+        try {
+	    cursor = new Cursor(this, locker, null);
+            status = cursor.search(secKey, priKey, lockMode,
+                                   SearchMode.SET);
+            while (status == OperationStatus.SUCCESS) {
+
+                if (deleteAction == ForeignKeyDeleteAction.ABORT) {
+
+                    /*
+                     * ABORT - throw an exception to cause the user to abort
+                     * the transaction.
+                     */
+                    throw new DatabaseException
+                        ("Secondary " + getDebugName() +
+                         " refers to a foreign key that has been deleted" +
+                         " (ForeignKeyDeleteAction.ABORT)");
+
+                } else if (deleteAction == ForeignKeyDeleteAction.CASCADE) {
+
+                    /*
+                     * CASCADE - delete the associated primary record.
+                     */
+                    Cursor priCursor = null;
+                    try {
+                        DatabaseEntry data = new DatabaseEntry();
+                        priCursor = new Cursor(primaryDb, locker, null);
+                        status = priCursor.search(priKey, data, LockMode.RMW,
+                                                  SearchMode.SET);
+                        if (status == OperationStatus.SUCCESS) {
+                            priCursor.delete();
+                        } else {
+                            throw secondaryCorruptException();
+                        }
+                    } finally {
+                        if (priCursor != null) {
+                            priCursor.close();
+                        }
+                    }
+
+                } else if (deleteAction == ForeignKeyDeleteAction.NULLIFY) {
+
+                    /*
+                     * NULLIFY - set the secondary key to null in the
+                     * associated primary record.
+                     */
+                    Cursor priCursor = null;
+                    try {
+                        DatabaseEntry data = new DatabaseEntry();
+                        priCursor = new Cursor(primaryDb, locker, null);
+                        status = priCursor.search(priKey, data, LockMode.RMW,
+                                                  SearchMode.SET);
+                        if (status == OperationStatus.SUCCESS) {
+                            ForeignMultiKeyNullifier multiNullifier =
+                                secondaryConfig.getForeignMultiKeyNullifier();
+                            if (multiNullifier != null) {
+                                if (multiNullifier.nullifyForeignKey
+                                        (this, priKey, data, secKey)) {
+                                    priCursor.putCurrent(data);
+                                }
+                            } else {
+                                ForeignKeyNullifier nullifier =
+                                    secondaryConfig.getForeignKeyNullifier();
+                                if (nullifier.nullifyForeignKey
+                                        (this, data)) {
+                                    priCursor.putCurrent(data);
+                                }
+                            }
+                        } else {
+                            throw secondaryCorruptException();
+                        }
+                    } finally {
+                        if (priCursor != null) {
+                            priCursor.close();
+                        }
+                    }
+                } else {
+                    /* Should never occur. */
+                    throw new IllegalStateException();
+                }
+
+                status = cursor.retrieveNext(secKey, priKey, LockMode.DEFAULT,
+                                             GetMode.NEXT_DUP);
+            }
+        } finally {
+            if (cursor != null) {
+                cursor.close();
+            }
+        }
+    }
+
+    DatabaseException secondaryCorruptException()
+        throws DatabaseException {
+
+        throw new DatabaseException
+            ("Secondary " + getDebugName() + " is corrupt: it refers" +
+             " to a missing key in the primary database");
+    }
+
+    static UnsupportedOperationException notAllowedException() {
+
+        throw new UnsupportedOperationException
+            ("Operation not allowed on a secondary");
+    }
+
+    /**
+     * Send trace messages to the java.util.logger. Don't rely on the logger
+     * alone to conditionalize whether we send this message, we don't even want
+     * to construct the message if the level is not enabled.
+     */
+    void trace(Level level,
+               String methodName)
+        throws DatabaseException {
+
+        Logger logger = envHandle.getEnvironmentImpl().getLogger();
+        if (logger.isLoggable(level)) {
+            StringBuffer sb = new StringBuffer();
+            sb.append(methodName);
+            sb.append(" name=").append(getDebugName());
+            sb.append(" primary=").append(primaryDb.getDebugName());
+
+            logger.log(level, sb.toString());
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/SecondaryKeyCreator.java b/src/com/sleepycat/je/SecondaryKeyCreator.java
new file mode 100644
index 0000000000000000000000000000000000000000..ecedb8d82c21b7d45e5425aff17cb387af446066
--- /dev/null
+++ b/src/com/sleepycat/je/SecondaryKeyCreator.java
@@ -0,0 +1,117 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SecondaryKeyCreator.java,v 1.13.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+/**
+ * The interface implemented for extracting single-valued secondary keys from
+ * primary records.
+ *
+ * <p>The key creator object is specified by calling {@link
+ * SecondaryConfig#setKeyCreator SecondaryConfig.setKeyCreator}. The secondary
+ * database configuration is specified when calling {@link
+ * Environment#openSecondaryDatabase Environment.openSecondaryDatabase}.</p>
+ *
+ * <p>For example:</p>
+ *
+ * <pre>
+ *     class MyKeyCreator implements SecondaryKeyCreator {
+ *         public boolean createSecondaryKey(SecondaryDatabase secondary,
+ *                                             DatabaseEntry key,
+ *                                             DatabaseEntry data,
+ *                                             DatabaseEntry result)
+ *             throws DatabaseException {
+ *             //
+ *             // DO HERE: Extract the secondary key from the primary key and
+ *             // data, and set the secondary key into the result parameter.
+ *             //
+ *             return true;
+ *         }
+ *     }
+ *     ...
+ *     SecondaryConfig secConfig = new SecondaryConfig();
+ *     secConfig.setKeyCreator(new MyKeyCreator());
+ *     // Now pass secConfig to Environment.openSecondaryDatabase
+ * </pre>
+ *
+ * <p>Use this interface when zero or one secondary key is present in a single
+ * primary record, in other words, for many-to-one and one-to-one
+ * relationships. When more than one secondary key may be present (for
+ * many-to-many and one-to-many relationships), use the {@link
+ * SecondaryMultiKeyCreator} interface instead.  The table below summarizes how
+ * to create all four variations of relationships.</p>
+ * <div>
+ * <table border="yes">
+ *     <tr><th>Relationship</th>
+ *         <th>Interface</th>
+ *         <th>Duplicates</th>
+ *         <th>Example</th>
+ *     </tr>
+ *     <tr><td>One-to-one</td>
+ *         <td>{@link SecondaryKeyCreator}</td>
+ *         <td>No</td>
+ *         <td>A person record with a unique social security number key.</td>
+ *     </tr>
+ *     <tr><td>Many-to-one</td>
+ *         <td>{@link SecondaryKeyCreator}</td>
+ *         <td>Yes</td>
+ *         <td>A person record with a non-unique employer key.</td>
+ *     </tr>
+ *     <tr><td>One-to-many</td>
+ *         <td>{@link SecondaryMultiKeyCreator}</td>
+ *         <td>No</td>
+ *         <td>A person record with multiple unique email address keys.</td>
+ *     </tr>
+ *     <tr><td>Many-to-many</td>
+ *         <td>{@link SecondaryMultiKeyCreator}</td>
+ *         <td>Yes</td>
+ *         <td>A person record with multiple non-unique organization keys.</td>
+ *     </tr>
+ * </table>
+ *
+ * </div>
+ *
+ * <p>To configure a database for duplicates. pass true to {@link
+ * DatabaseConfig#setSortedDuplicates}.</p>
+ */
+public interface SecondaryKeyCreator {
+
+    /**
+     * Creates a secondary key entry, given a primary key and data entry.
+     *
+     * <p>A secondary key may be derived from the primary key, primary data, or
+     * a combination of the primary key and data.  For secondary keys that are
+     * optional, the key creator method may return false and the key/data pair
+     * will not be indexed.  To ensure the integrity of a secondary database
+     * the key creator method must always return the same result for a given
+     * set of input parameters.</p>
+     *
+     * @param secondary the database to which the secondary key will be
+     * added. This parameter is passed for informational purposes but is not
+     * commonly used.
+     *
+     * @param key the primary key entry.  This parameter must not be modified
+     * by this method.
+     *
+     * @param data the primary data entry.  This parameter must not be modified
+     * by this method.
+     *
+     * @param result the secondary key created by this method.
+     *
+     * @return true if a key was created, or false to indicate that the key is
+     * not present.
+     *
+     * @throws DatabaseException if an error occurs attempting to create the
+     * secondary key.
+     */
+    public boolean createSecondaryKey(SecondaryDatabase secondary,
+				      DatabaseEntry key,
+				      DatabaseEntry data,
+				      DatabaseEntry result)
+	throws DatabaseException;
+}
diff --git a/src/com/sleepycat/je/SecondaryMultiKeyCreator.java b/src/com/sleepycat/je/SecondaryMultiKeyCreator.java
new file mode 100644
index 0000000000000000000000000000000000000000..ae5f6f57c8509df7fbc538a73292dfb72813b967
--- /dev/null
+++ b/src/com/sleepycat/je/SecondaryMultiKeyCreator.java
@@ -0,0 +1,124 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SecondaryMultiKeyCreator.java,v 1.9.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.util.Set;
+
+/**
+ * The interface implemented for extracting multi-valued secondary keys from
+ * primary records.
+ *
+ * <p>The key creator object is specified by calling {@link
+ * SecondaryConfig#setMultiKeyCreator SecondaryConfig.setMultiKeyCreator}. The
+ * secondary database configuration is specified when calling {@link
+ * Environment#openSecondaryDatabase Environment.openSecondaryDatabase}.</p>
+ *
+ * <p>For example:</p>
+ *
+ * <pre>
+ *     class MyMultiKeyCreator implements SecondaryMultiKeyCreator {
+ *         public void createSecondaryKeys(SecondaryDatabase secondary,
+ *                                         DatabaseEntry key,
+ *                                         DatabaseEntry data,
+ *                                         Set&lt;DatabaseEntry&gt; results)
+ *             throws DatabaseException {
+ *             //
+ *             // DO HERE: Extract the secondary keys from the primary key and
+ * // data.  For each key extracted, create a DatabaseEntry and add it
+ *             // to the results set.
+ *             //
+ *         }
+ *     }
+ *     ...
+ *     SecondaryConfig secConfig = new SecondaryConfig();
+ *     secConfig.setMultiKeyCreator(new MyMultiKeyCreator());
+ *     // Now pass secConfig to Environment.openSecondaryDatabase
+ * </pre>
+ *
+ * <p>Use this interface when any number of secondary keys may be present in a
+ * single primary record, in other words, for many-to-many and one-to-many
+ * relationships. When only zero or one secondary key is present (for
+ * many-to-one and one-to-one relationships) you may use the {@link
+ * SecondaryKeyCreator} interface instead. The table below summarizes how to
+ * create all four variations of relationships.</p>
+ * <div>
+ * <table border="yes">
+ *     <tr><th>Relationship</th>
+ *         <th>Interface</th>
+ *         <th>Duplicates</th>
+ *         <th>Example</th>
+ *     </tr>
+ *     <tr><td>One-to-one</td>
+ *         <td>{@link SecondaryKeyCreator}</td>
+ *         <td>No</td>
+ *         <td>A person record with a unique social security number key.</td>
+ *     </tr>
+ *     <tr><td>Many-to-one</td>
+ *         <td>{@link SecondaryKeyCreator}</td>
+ *         <td>Yes</td>
+ *         <td>A person record with a non-unique employer key.</td>
+ *     </tr>
+ *     <tr><td>One-to-many</td>
+ *         <td>{@link SecondaryMultiKeyCreator}</td>
+ *         <td>No</td>
+ *         <td>A person record with multiple unique email address keys.</td>
+ *     </tr>
+ *     <tr><td>Many-to-many</td>
+ *         <td>{@link SecondaryMultiKeyCreator}</td>
+ *         <td>Yes</td>
+ *         <td>A person record with multiple non-unique organization keys.</td>
+ *     </tr>
+ * </table>
+ *
+ * </div>
+ *
+ * <p>To configure a database for duplicates. pass true to {@link
+ * DatabaseConfig#setSortedDuplicates}.</p>
+ *
+ * <p>Note that <code>SecondaryMultiKeyCreator</code> may also be used for
+ * single key secondaries (many-to-one and one-to-one); in this case, at most a
+ * single key is added to the results set.
+ * <code>SecondaryMultiKeyCreator</code> is only slightly less efficient than
+ * {@link SecondaryKeyCreator} in that two or three temporary sets must be
+ * created to hold the results. @see SecondaryConfig</p>
+ */
+public interface SecondaryMultiKeyCreator {
+
+    /**
+     * Creates a secondary key entry, given a primary key and data entry.
+     *
+     * <p>A secondary key may be derived from the primary key, primary data, or
+     * a combination of the primary key and data.  Zero or more secondary keys
+     * may be derived from the primary record and returned in the results
+     * parameter. To ensure the integrity of a secondary database the key
+     * creator method must always return the same results for a given set of
+     * input parameters.</p>
+     *
+     * @param secondary the database to which the secondary key will be
+     * added. This parameter is passed for informational purposes but is not
+     * commonly used.
+     *
+     * @param key the primary key entry.  This parameter must not be modified
+     * by this method.
+     *
+     * @param data the primary data entry.  This parameter must not be modified
+     * by this method.
+     *
+     * @param results the set to contain the the secondary key DatabaseEntry
+     * objects created by this method.
+     *
+     * @throws DatabaseException if an error occurs attempting to create the
+     * secondary key.
+     */
+    public void createSecondaryKeys(SecondaryDatabase secondary,
+				    DatabaseEntry key,
+				    DatabaseEntry data,
+				    Set<DatabaseEntry> results)
+	throws DatabaseException;
+}
diff --git a/src/com/sleepycat/je/SecondaryTrigger.java b/src/com/sleepycat/je/SecondaryTrigger.java
new file mode 100644
index 0000000000000000000000000000000000000000..3d3ae1b0c01f9d65b7af7ad4df655c85f1b8a7c5
--- /dev/null
+++ b/src/com/sleepycat/je/SecondaryTrigger.java
@@ -0,0 +1,44 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SecondaryTrigger.java,v 1.14.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import com.sleepycat.je.txn.Locker;
+
+class SecondaryTrigger implements DatabaseTrigger {
+
+    private SecondaryDatabase secDb;
+
+    SecondaryTrigger(SecondaryDatabase secDb) {
+
+        this.secDb = secDb;
+    }
+
+    final SecondaryDatabase getDb() {
+
+        return secDb;
+    }
+
+    public void triggerAdded(Database db) {
+    }
+
+    public void triggerRemoved(Database db) {
+
+        secDb.clearPrimary();
+    }
+
+    public void databaseUpdated(Database db,
+                                Locker locker,
+                                DatabaseEntry priKey,
+                                DatabaseEntry oldData,
+                                DatabaseEntry newData)
+        throws DatabaseException {
+
+        secDb.updateSecondary(locker, null, priKey, oldData, newData);
+    }
+}
diff --git a/src/com/sleepycat/je/Sequence.java b/src/com/sleepycat/je/Sequence.java
new file mode 100644
index 0000000000000000000000000000000000000000..f40c44083c1e827cb27cfde47c599218e1c03956
--- /dev/null
+++ b/src/com/sleepycat/je/Sequence.java
@@ -0,0 +1,549 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2005,2008 Oracle.  All rights reserved.
+ *
+ * $Id: Sequence.java,v 1.15 2008/01/17 17:22:10 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.math.BigInteger;
+import java.nio.ByteBuffer;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import com.sleepycat.je.log.LogUtils;
+import com.sleepycat.je.txn.Locker;
+import com.sleepycat.je.txn.LockerFactory;
+
+/**
+ * A Sequence handle is used to manipulate a sequence record in a
+ * database. Sequence handles are opened using the {@link
+ * com.sleepycat.je.Database#openSequence Database.openSequence} method.
+ */
+public class Sequence {
+
+    private static final byte FLAG_INCR = ((byte) 0x1);
+    private static final byte FLAG_WRAP = ((byte) 0x2);
+    private static final byte FLAG_OVER = ((byte) 0x4);
+
+    /* Allocation size for the record data. */
+    private static final int MAX_DATA_SIZE = 50;
+
+    /* Version of the format for fields stored in the sequence record. */
+    private static final byte CURRENT_VERSION = 1;
+
+    /* A sequence is a unique record in a database. */
+    private Database db;
+    private DatabaseEntry key;
+
+    /* Persistent fields. */
+    private boolean wrapAllowed;
+    private boolean increment;
+    private boolean overflow;
+    private long rangeMin;
+    private long rangeMax;
+    private long storedValue;
+
+    /* Handle-specific fields. */
+    private int cacheSize;
+    private long cacheValue;
+    private long cacheLast;
+    private int nGets;
+    private int nCachedGets;
+    private TransactionConfig autoCommitConfig;
+    private Logger logger;
+
+    /*
+     * The cache holds the range of values [cacheValue, cacheLast], which is
+     * the same as [cacheValue, storedValue) at the time the record is written.
+     * At store time, cacheLast is set to one before (after) storedValue.
+     *
+     * storedValue may be used by other Sequence handles with separate caches.
+     * storedValue is always the next value to be returned by any handle that
+     * runs out of cached values.
+     */
+
+    /**
+     * Opens a sequence handle, adding the sequence record if appropriate.
+     */
+    Sequence(Database db,
+             Transaction txn,
+             DatabaseEntry key,
+             SequenceConfig config)
+        throws DatabaseException {
+
+        if (db.getDatabaseImpl().getSortedDuplicates()) {
+            throw new IllegalArgumentException
+                ("Sequences not supported in databases configured for " +
+                 "duplicates");
+        }
+
+        SequenceConfig useConfig = (config != null) ?
+            config : SequenceConfig.DEFAULT;
+
+        if (useConfig.getRangeMin() >= useConfig.getRangeMax()) {
+            throw new IllegalArgumentException
+                ("Minimum sequence value must be less than the maximum");
+        }
+
+        if (useConfig.getInitialValue() > useConfig.getRangeMax() ||
+            useConfig.getInitialValue() < useConfig.getRangeMin()) {
+            throw new IllegalArgumentException
+                ("Initial sequence value is out of range");
+        }
+
+        if (useConfig.getRangeMin() >
+            useConfig.getRangeMax() - useConfig.getCacheSize()) {
+            throw new IllegalArgumentException
+                ("The cache size is larger than the sequence range");
+        }
+
+        if (useConfig.getAutoCommitNoSync()) {
+            autoCommitConfig = new TransactionConfig();
+            autoCommitConfig.setNoSync(true);
+        } else {
+            /* Use the environment's default transaction config. */
+            autoCommitConfig = null;
+        }
+
+        this.db = db;
+        this.key = copyEntry(key);
+        logger = db.getEnvironment().getEnvironmentImpl().getLogger();
+
+        /* Perform an auto-commit transaction to create the sequence. */
+        Locker locker = null;
+        Cursor cursor = null;
+        OperationStatus status = OperationStatus.NOTFOUND;
+        try {
+            locker = LockerFactory.getWritableLocker
+                (db.getEnvironment(),
+                 txn,
+                 db.isTransactional(),
+		 false,
+                 db.getDatabaseImpl().isReplicated(), // autoTxnIsReplicated
+                 autoCommitConfig);
+
+            cursor = new Cursor(db, locker, null);
+
+            if (useConfig.getAllowCreate()) {
+
+                /* Get the persistent fields from the config. */
+                rangeMin = useConfig.getRangeMin();
+                rangeMax = useConfig.getRangeMax();
+                increment = !useConfig.getDecrement();
+                wrapAllowed = useConfig.getWrap();
+                storedValue = useConfig.getInitialValue();
+
+                /*
+                 * To avoid dependence on SerializableIsolation, try
+                 * putNoOverwrite first.  If it fails, then try to get an
+                 * existing record.
+                 */
+                status = cursor.putNoOverwrite(key, makeData());
+
+                if (status == OperationStatus.KEYEXIST) {
+                    if (useConfig.getExclusiveCreate()) {
+                        throw new DatabaseException
+                            ("ExclusiveCreate=true and the sequence record " +
+                             "already exists.");
+                    }
+                    if (!readData(cursor, null)) {
+                        throw new DatabaseException
+                            ("Sequence record removed during openSequence.");
+                    }
+                    status = OperationStatus.SUCCESS;
+                }
+            } else {
+
+                /* Get an existing record. */
+                if (!readData(cursor, null)) {
+                    throw new DatabaseException
+                        ("AllowCreate=false and the sequence record " +
+                         "does not exist.");
+                }
+                status = OperationStatus.SUCCESS;
+            }
+        } finally {
+            if (cursor != null) {
+                cursor.close();
+            }
+            if (locker != null) {
+                locker.operationEnd(status);
+            }
+        }
+
+        /*
+         * cacheLast is initialized such that the cache will be considered
+         * empty the first time get() is called.
+         */
+        cacheSize = useConfig.getCacheSize();
+        cacheValue = storedValue;
+        cacheLast = increment ? (storedValue - 1) : (storedValue + 1);
+    }
+
+    /**
+     * Closes a sequence.  Any unused cached values are lost.
+     *
+     * <p>The sequence handle may not be used again after this method has
+     * been called, regardless of the method's success or failure.</p>
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public void close()
+        throws DatabaseException {
+
+        /* Defined only for DB compatibility and possible future use. */
+    }
+
+    /**
+     * Returns the next available element in the sequence and changes the
+     * sequence value by <code>delta</code>.  The value of <code>delta</code>
+     * must be greater than zero.  If there are enough cached values in the
+     * sequence handle then they will be returned.  Otherwise the next value
+     * will be fetched from the database and incremented (decremented) by
+     * enough to cover the <code>delta</code> and the next batch of cached
+     * values.
+     *
+     * This method is synchronized to protect updating of the cached value,
+     * since multiple threads may share a single handle.  Multiple handles for
+     * the same database/key may be used to increase concurrency.</p>
+     *
+     * <p>The <code>txn</code> handle must be null if the sequence handle was
+     * opened with a non-zero cache size.</p>
+     *
+     * <p>For maximum concurrency, a non-zero cache size should be specified
+     * prior to opening the sequence handle, the <code>txn</code> handle should
+     * be <code>null</code>, and {@link
+     * com.sleepycat.je.SequenceConfig#setAutoCommitNoSync
+     * SequenceConfig.setAutoCommitNoSync} should be called to disable log
+     * flushes.</p>
+     *
+     * @param txn For a transactional database, an explicit transaction may be
+     * specified, or null may be specified to use auto-commit.  For a
+     * non-transactional database, null must be specified.
+     *
+     * @param delta the amount by which to increment or decrement the sequence
+     *
+     * @return the next available element in the sequence
+     */
+    public synchronized long get(Transaction txn, int delta)
+        throws DatabaseException {
+
+        /* Check parameters, being careful of overflow. */
+        if (delta <= 0) {
+            throw new IllegalArgumentException
+                ("Sequence delta must be greater than zero");
+        }
+        if (rangeMin > rangeMax - delta) {
+            throw new IllegalArgumentException
+                ("Sequence delta is larger than the range");
+        }
+
+        /* Status variables for tracing. */
+        boolean cached = true;
+        boolean wrapped = false;
+
+        /*
+         * Determine whether we have exceeded the cache.  The cache size is
+         * always <= Integer.MAX_VALUE, so we don't have to worry about
+         * overflow here as long as we subtract the two long values first.
+         */
+        if ((increment && delta > ((cacheLast - cacheValue) + 1)) ||
+            (!increment && delta > ((cacheValue - cacheLast) + 1))) {
+
+            cached = false;
+
+            /*
+             * We need to allocate delta or cacheSize values, whichever is
+             * larger, by incrementing or decrementing the stored value by
+             * adjust.
+             */
+            int adjust = (delta > cacheSize) ? delta : cacheSize;
+
+            /* Perform an auto-commit transaction to update the sequence. */
+            Locker locker = null;
+            Cursor cursor = null;
+            OperationStatus status = OperationStatus.NOTFOUND;
+            try {
+                locker = LockerFactory.getWritableLocker
+                    (db.getEnvironment(),
+                     txn,
+                     db.isTransactional(),
+                     false,                  // retainNonTxnLocks
+                     db.getDatabaseImpl().isReplicated(),
+                                             // autoTxnIsReplicated
+                     autoCommitConfig);
+
+                cursor = new Cursor(db, locker, null);
+
+                /* Get the existing record. */
+                readDataRequired(cursor, LockMode.RMW);
+
+                /* If we would have wrapped when not allowed, overflow. */
+                if (overflow) {
+                    throw new DatabaseException
+                        ("Sequence overflow " + storedValue);
+                }
+
+                /*
+                 * Handle wrapping.  The range size can be larger than a long
+                 * can hold, so to avoid arithmetic overflow we use BigInteger
+                 * arithmetic.  Since we are going to write, the BigInteger
+                 * overhead is acceptable.
+                 */
+                BigInteger availBig;
+                if (increment) {
+                    /* Available amount: rangeMax - storedValue */
+                    availBig = BigInteger.valueOf(rangeMax).
+                        subtract(BigInteger.valueOf(storedValue));
+                } else {
+                    /* Available amount: storedValue - rangeMin */
+                    availBig = BigInteger.valueOf(storedValue).
+                        subtract(BigInteger.valueOf(rangeMin));
+                }
+
+                if (availBig.compareTo(BigInteger.valueOf(adjust)) < 0) {
+                    /* If availBig < adjust then availBig fits in an int. */
+                    int availInt = (int) availBig.longValue();
+                    if (availInt < delta) {
+                        if (wrapAllowed) {
+                            /* Wrap to the opposite range end point. */
+                            storedValue = increment ? rangeMin : rangeMax;
+                            wrapped = true;
+                        } else {
+                            /* Signal an overflow next time. */
+                            overflow = true;
+                            adjust = 0;
+                        }
+                    } else {
+
+                        /*
+                         * If the delta fits in the cache available, don't wrap
+                         * just to allocate the full cacheSize; instead,
+                         * allocate as much as is available.
+                         */
+                        adjust = availInt;
+                    }
+                }
+
+                /* Negate the adjustment for decrementing. */
+                if (!increment) {
+                    adjust = -adjust;
+                }
+
+                /* Set the stored value one past the cached amount. */
+                storedValue += adjust;
+
+                /* Write the new stored value. */
+                cursor.put(key, makeData());
+                status = OperationStatus.SUCCESS;
+            } finally {
+                if (cursor != null) {
+                    cursor.close();
+                }
+                if (locker != null) {
+                    locker.operationEnd(status);
+                }
+            }
+
+            /* The cache now contains the range: [cacheValue, storedValue) */
+            cacheValue = storedValue - adjust;
+            cacheLast = storedValue + (increment ? (-1) : 1);
+        }
+
+        /* Return the current value and increment/decrement it by delta. */
+        long retVal = cacheValue;
+        if (increment) {
+            cacheValue += delta;
+        } else {
+            cacheValue -= delta;
+        }
+
+        /* Increment stats. */
+        nGets += 1;
+        if (cached) {
+            nCachedGets += 1;
+        }
+
+        /* Trace this method at the FINEST level. */
+        if (logger.isLoggable(Level.FINEST)) {
+            logger.log
+                (Level.FINEST,
+                 "Sequence.get" +
+                 " value=" + retVal +
+                 " cached=" + cached +
+                 " wrapped=" + wrapped);
+        }
+
+        return retVal;
+    }
+
+    /**
+     * Returns the Database handle associated with this sequence.
+     *
+     * @return The Database handle associated with this sequence.
+     */
+    public Database getDatabase()
+        throws DatabaseException {
+
+        return db;
+    }
+
+    /**
+     * Returns the DatabaseEntry used to open this sequence.
+     *
+     * @return The DatabaseEntry used to open this sequence.
+     */
+    public DatabaseEntry getKey()
+        throws DatabaseException {
+
+        return copyEntry(key);
+    }
+
+    /**
+     * Returns statistical information about the sequence.
+     *
+     * <p>In the presence of multiple threads or processes accessing an active
+     * sequence, the information returned by this method may be
+     * out-of-date.</p>
+     *
+     * <p>The getStats method cannot be transaction-protected. For this reason,
+     * it should be called in a thread of control that has no open cursors or
+     * active transactions.</p>
+     *
+     * @param config The statistics returned; if null, default statistics are
+     * returned.
+     *
+     * @return Sequence statistics.
+     */
+    public SequenceStats getStats(StatsConfig config)
+        throws DatabaseException {
+
+        if (config == null) {
+            config = StatsConfig.DEFAULT;
+        }
+
+        if (!config.getFast()) {
+
+            /*
+             * storedValue may have been updated by another handle since it
+             * was last read by this handle.  Fetch the last written value.
+             * READ_UNCOMMITTED must be used to avoid lock conflicts.
+             */
+            Cursor cursor = db.openCursor(null, null);
+            try {
+                readDataRequired(cursor, LockMode.READ_UNCOMMITTED);
+            } finally {
+                cursor.close();
+            }
+        }
+
+        SequenceStats stats = new SequenceStats
+            (nGets,
+             nCachedGets,
+             storedValue,
+             cacheValue,
+             cacheLast,
+             rangeMin,
+             rangeMax,
+             cacheSize);
+
+        if (config.getClear()) {
+            nGets = 0;
+            nCachedGets = 0;
+        }
+
+        return stats;
+    }
+
+    /**
+     * Reads persistent fields from the sequence record.  Throws an exception
+     * if the key is not present in the database.
+     */
+    private void readDataRequired(Cursor cursor, LockMode lockMode)
+        throws DatabaseException {
+
+        if (!readData(cursor, lockMode)) {
+            throw new DatabaseException
+                ("The sequence record has been deleted while it is open.");
+        }
+    }
+
+    /**
+     * Reads persistent fields from the sequence record.  Returns false if the
+     * key is not present in the database.
+     */
+    private boolean readData(Cursor cursor, LockMode lockMode)
+        throws DatabaseException {
+
+        /* Fetch the sequence record. */
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status = cursor.getSearchKey(key, data, lockMode);
+        if (status != OperationStatus.SUCCESS) {
+            return false;
+        }
+        ByteBuffer buf = ByteBuffer.wrap(data.getData());
+
+        /* Get the persistent fields from the record data. */
+        byte version = buf.get();
+        byte flags = buf.get();
+        boolean unpacked = (version < 1);
+        rangeMin = LogUtils.readLong(buf, unpacked);
+        rangeMax = LogUtils.readLong(buf, unpacked);
+        storedValue = LogUtils.readLong(buf, unpacked);
+
+        increment = (flags & FLAG_INCR) != 0;
+        wrapAllowed = (flags & FLAG_WRAP) != 0;
+        overflow = (flags & FLAG_OVER) != 0;
+
+        return true;
+    }
+
+    /**
+     * Makes a storable database entry from the persistent fields.
+     */
+    private DatabaseEntry makeData() {
+
+        byte[] data = new byte[MAX_DATA_SIZE];
+        ByteBuffer buf = ByteBuffer.wrap(data);
+
+        byte flags = 0;
+        if (increment) {
+            flags |= FLAG_INCR;
+        }
+        if (wrapAllowed) {
+            flags |= FLAG_WRAP;
+        }
+        if (overflow) {
+            flags |= FLAG_OVER;
+        }
+
+        buf.put(CURRENT_VERSION);
+        buf.put(flags);
+        LogUtils.writePackedLong(buf, rangeMin);
+        LogUtils.writePackedLong(buf, rangeMax);
+        LogUtils.writePackedLong(buf, storedValue);
+
+        return new DatabaseEntry(data, 0, buf.position());
+    }
+
+    /**
+     * Returns a deep copy of the given database entry.
+     */
+    private DatabaseEntry copyEntry(DatabaseEntry entry) {
+
+	int len = entry.getSize();
+        byte[] data;
+	if (len == 0) {
+	    data = LogUtils.ZERO_LENGTH_BYTE_ARRAY;
+	} else {
+	    data = new byte[len];
+	    System.arraycopy
+		(entry.getData(), entry.getOffset(), data, 0, data.length);
+	}
+
+        return new DatabaseEntry(data);
+    }
+}
diff --git a/src/com/sleepycat/je/SequenceConfig.java b/src/com/sleepycat/je/SequenceConfig.java
new file mode 100644
index 0000000000000000000000000000000000000000..29cee6571b5960aecd3922ff63930f916ddf0d79
--- /dev/null
+++ b/src/com/sleepycat/je/SequenceConfig.java
@@ -0,0 +1,293 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2005,2008 Oracle.  All rights reserved.
+ *
+ * $Id: SequenceConfig.java,v 1.11 2008/06/10 00:21:30 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+/**
+ * Specifies the attributes of a sequence.
+ */
+public class SequenceConfig {
+
+    /**
+     * Default configuration used if null is passed to methods that create a
+     * cursor.
+     */
+    public static final SequenceConfig DEFAULT = new SequenceConfig();
+
+    /* Parameters */
+    private int cacheSize = 0;
+    private long rangeMin = Long.MIN_VALUE;
+    private long rangeMax = Long.MAX_VALUE;
+    private long initialValue = 0L;
+
+    /* Flags */
+    private boolean allowCreate;
+    private boolean decrement;
+    private boolean exclusiveCreate;
+    private boolean autoCommitNoSync;
+    private boolean wrap;
+
+    /**
+     * An instance created using the default constructor is initialized with
+     * the system's default settings.
+     */
+    public SequenceConfig() {
+    }
+
+    /**
+     * Configures the {@link com.sleepycat.je.Database#openSequence
+     * Database.openSequence} method to create the sequence if it does not
+     * already exist.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @param allowCreate If true, configure the {@link
+     * com.sleepycat.je.Database#openSequence Database.openSequence} method to
+     * create the sequence if it does not already exist.
+     */
+    public void setAllowCreate(boolean allowCreate) {
+        this.allowCreate = allowCreate;
+    }
+
+    /**
+     * Returns true if the {@link com.sleepycat.je.Database#openSequence
+     * Database.openSequence} method is configured to create the sequence if it
+     * does not already exist.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @return True if the {@link com.sleepycat.je.Database#openSequence
+     * Database.openSequence} method is configured to create the sequence if it
+     * does not already exist.
+     */
+    public boolean getAllowCreate() {
+        return allowCreate;
+    }
+
+    /**
+     * Set the Configure the number of elements cached by a sequence handle.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @param cacheSize The Configure the number of elements cached by a
+     * sequence handle.
+     */
+    public void setCacheSize(int cacheSize) {
+        this.cacheSize = cacheSize;
+    }
+
+    /**
+     * Returns the number of elements cached by a sequence handle..
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @return The number of elements cached by a sequence handle..
+     */
+    public int getCacheSize() {
+        return cacheSize;
+    }
+
+    /**
+     * Specifies that the sequence should be decremented.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @param decrement If true, specify that the sequence should be
+     * decremented.
+     */
+    public void setDecrement(boolean decrement) {
+        this.decrement = decrement;
+    }
+
+    /**
+     * Returns true if the sequence is configured to decrement.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @return True if the sequence is configured to decrement.
+     */
+    public boolean getDecrement() {
+         return decrement;
+    }
+
+    /**
+     * Configures the {@link com.sleepycat.je.Database#openSequence
+     * Database.openSequence} method to fail if the database already exists.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @param exclusiveCreate If true, configure the {@link
+     * com.sleepycat.je.Database#openSequence Database.openSequence} method to
+     * fail if the database already exists.
+     */
+    public void setExclusiveCreate(boolean exclusiveCreate) {
+        this.exclusiveCreate = exclusiveCreate;
+    }
+
+    /**
+     * Returns true if the {@link com.sleepycat.je.Database#openSequence
+     * Database.openSequence} method is configured to fail if the database
+     * already exists.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @return True if the {@link com.sleepycat.je.Database#openSequence
+     * Database.openSequence} method is configured to fail if the database
+     * already exists.
+     */
+    public boolean getExclusiveCreate() {
+        return exclusiveCreate;
+    }
+
+    /**
+     * Sets the initial value for a sequence.
+     *
+     * <p>This call is only effective when the sequence is being created.</p>
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @param initialValue The Set the initial value for a sequence.
+     */
+    public void setInitialValue(long initialValue) {
+        this.initialValue = initialValue;
+    }
+
+    /**
+     * Returns the initial value for a sequence..
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @return The initial value for a sequence..
+     */
+    public long getInitialValue() {
+        return initialValue;
+    }
+
+    /**
+     * Configures auto-commit operations on the sequence to not flush the
+     * transaction log.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @param autoCommitNoSync If true, configure auto-commit operations on
+     * the sequence to not flush the transaction log.
+     */
+    public void setAutoCommitNoSync(boolean autoCommitNoSync) {
+        this.autoCommitNoSync = autoCommitNoSync;
+    }
+
+    /**
+     * Returns true if the auto-commit operations on the sequence are configure
+     * to not flush the transaction log..
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @return True if the auto-commit operations on the sequence are configure
+     * to not flush the transaction log..
+     */
+    public boolean getAutoCommitNoSync() {
+        return autoCommitNoSync;
+    }
+
+    /**
+     * Configures a sequence range.  This call is only effective when the
+     * sequence is being created.
+     *
+     * @param min The minimum value for the sequence.
+     *
+     * @param max The maximum value for the sequence.
+     */
+    public void setRange(long min, long max) {
+        this.rangeMin = min;
+        this.rangeMax = max;
+    }
+
+    /**
+     * Returns the minimum value for the sequence.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @return The minimum value for the sequence.
+     */
+    public long getRangeMin() {
+        return rangeMin;
+    }
+
+    /**
+     * Returns the maximum value for the sequence.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @return The maximum value for the sequence.
+     */
+    public long getRangeMax() {
+        return rangeMax;
+    }
+
+    /**
+     * Specifies that the sequence should wrap around when it is incremented
+     * (decremented) past the specified maximum (minimum) value.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @param wrap If true, specify that the sequence should wrap around when
+     * it is incremented (decremented) past the specified maximum (minimum)
+     * value.
+     */
+    public void setWrap(boolean wrap) {
+        this.wrap = wrap;
+    }
+
+    /**
+     * Returns true if the sequence will wrap around when it is incremented
+     * (decremented) past the specified maximum (minimum) value.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @return True if the sequence will wrap around when it is incremented
+     * (decremented) past the specified maximum (minimum) value.
+     */
+    public boolean getWrap() {
+        return wrap;
+    }
+
+    /**
+     * Returns the values for each configuration attribute.
+     *
+     * @return the values for each configuration attribute.
+     */
+    @Override
+    public String toString() {
+        return "allowCreate=" + allowCreate +
+            "\ncacheSize=" + cacheSize +
+            "\ndecrement=" + decrement +
+            "\nexclusiveCreate=" + exclusiveCreate +
+            "\ninitialValue=" + initialValue +
+            "\nautoCommitNoSync=" + autoCommitNoSync +
+            "\nrangeMin=" + rangeMin +
+            "\nrangeMax=" + rangeMax +
+            "\nwrap=" + wrap +
+            "\n";
+    }
+}
diff --git a/src/com/sleepycat/je/SequenceStats.java b/src/com/sleepycat/je/SequenceStats.java
new file mode 100644
index 0000000000000000000000000000000000000000..78e0c64759fcfdebc74e8819fd010f9bcad9f25b
--- /dev/null
+++ b/src/com/sleepycat/je/SequenceStats.java
@@ -0,0 +1,130 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2005,2008 Oracle.  All rights reserved.
+ *
+ * $Id: SequenceStats.java,v 1.9 2008/06/10 02:52:08 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+/**
+ * A SequenceStats object is used to return sequence statistics.
+ */
+public class SequenceStats {
+
+    private int nGets;
+    private int nCachedGets;
+    private long current;
+    private long value;
+    private long lastValue;
+    private long min;
+    private long max;
+    private int cacheSize;
+
+    SequenceStats(int nGets,
+                  int nCachedGets,
+                  long current,
+                  long value,
+                  long lastValue,
+                  long min,
+                  long max,
+                  int cacheSize) {
+
+        this.nGets = nGets;
+        this.nCachedGets = nCachedGets;
+        this.current = current;
+        this.value = value;
+        this.lastValue = lastValue;
+        this.min = min;
+        this.max = max;
+        this.cacheSize = cacheSize;
+    }
+
+    /**
+     * Returns the number of times that Sequence.get was called successfully.
+     *
+     * @return number of times that Sequence.get was called successfully.
+     */
+    public int getNGets() {
+        return nGets;
+    }
+
+    /**
+     * Returns the number of times that Sequence.get was called and a cached
+     * value was returned.
+     *
+     * @return number of times that Sequence.get was called and a cached
+     * value was returned.
+     */
+    public int getNCachedGets() {
+        return nCachedGets;
+    }
+
+    /**
+     * Returns the current value of the sequence in the database.
+     *
+     * @return current value of the sequence in the database.
+     */
+    public long getCurrent() {
+        return current;
+    }
+
+    /**
+     * Returns the current cached value of the sequence.
+     *
+     * @return current cached value of the sequence.
+     */
+    public long getValue() {
+        return value;
+    }
+
+    /**
+     * Returns the last cached value of the sequence.
+     *
+     * @return last cached value of the sequence.
+     */
+    public long getLastValue() {
+        return lastValue;
+    }
+
+    /**
+     * Returns the minimum permitted value of the sequence.
+     *
+     * @return minimum permitted value of the sequence.
+     */
+    public long getMin() {
+        return min;
+    }
+
+    /**
+     * Returns the maximum permitted value of the sequence.
+     *
+     * @return maximum permitted value of the sequence.
+     */
+    public long getMax() {
+        return max;
+    }
+
+    /**
+     * Returns the number of values that will be cached in this handle.
+     *
+     * @return number of values that will be cached in this handle.
+     */
+    public int getCacheSize() {
+        return cacheSize;
+    }
+
+    @Override
+    public String toString() {
+        return "nGets=" + nGets
+            + "\nnCachedGets=" + nCachedGets
+            + "\ncurrent=" + current
+            + "\nvalue=" + value
+            + "\nlastValue=" + lastValue
+            + "\nmin=" + min
+            + "\nmax=" + max
+            + "\ncacheSize=" + cacheSize
+            ;
+    }
+}
diff --git a/src/com/sleepycat/je/StatsConfig.java b/src/com/sleepycat/je/StatsConfig.java
new file mode 100644
index 0000000000000000000000000000000000000000..e7ba7163643a50901fbd4cf21738d2fe9b4a5a40
--- /dev/null
+++ b/src/com/sleepycat/je/StatsConfig.java
@@ -0,0 +1,130 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: StatsConfig.java,v 1.19.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.io.PrintStream;
+
+/**
+ * Specifies the attributes of a statistics retrieval operation.
+ */
+public class StatsConfig {
+
+    /**
+     * A convenience instance embodying the default configuration.
+     */
+    public static final StatsConfig DEFAULT = new StatsConfig();
+
+    private boolean fast = false;
+    private boolean clear = false;
+    private PrintStream showProgressStream = null;
+    private int showProgressInterval = 0;
+
+    /**
+     * An instance created using the default constructor is initialized with
+     * the system's default settings.
+     */
+    public StatsConfig() {
+    }
+
+    /**
+     * Configures the statistics operation to return only the values which do
+     * not incur some performance penalty.
+     *
+     * <p>The default value is false.</p>
+     *
+     * <p>For example, skip stats that require a traversal of the database or
+     * in-memory tree, or which lock down the lock table for a period of
+     * time.</p>
+     *
+     * @param fast If set to true, configure the statistics operation to return
+     * only the values which do not incur some performance penalty.
+     */
+    public void setFast(boolean fast) {
+        this.fast = fast;
+    }
+
+    /**
+     * Returns true if the statistics operation is configured to return only
+     * the values which do not require expensive actions.
+     *
+     * @return true if the statistics operation is configured to return only
+     * the values which do not require expensive actions.
+     */
+    public boolean getFast() {
+        return fast;
+    }
+
+    /**
+     * Configures the statistics operation to reset statistics after they are
+     * returned. The default value is false.
+     *
+     * @param clear If set to true, configure the statistics operation to
+     * reset statistics after they are returned.
+     */
+    public void setClear(boolean clear) {
+        this.clear = clear;
+    }
+
+    /**
+     * Returns true if the statistics operation is configured to reset
+     * statistics after they are returned.
+     *
+     * @return true if the statistics operation is configured to reset
+     * statistics after they are returned.
+     */
+    public boolean getClear() {
+        return clear;
+    }
+
+    /**
+     * Configures the statistics operation to display progress to the
+     * PrintStream argument.  The accumulated statistics will be displayed
+     * every N records, where N is the value of showProgressInterval.
+     */
+    public void setShowProgressStream(PrintStream showProgressStream) {
+        this.showProgressStream = showProgressStream;
+    }
+
+    /**
+     * Returns the PrintStream on which the progress messages will be displayed
+     * during long running statistics gathering operations.
+     */
+    public PrintStream getShowProgressStream() {
+        return showProgressStream;
+    }
+
+    /**
+     * When the statistics operation is configured to display progress the
+     * showProgressInterval is the number of LNs between each progress report.
+     */
+    public void setShowProgressInterval(int showProgressInterval) {
+        this.showProgressInterval = showProgressInterval;
+    }
+
+    /**
+     * Returns the showProgressInterval value, if set.
+     */
+    public int getShowProgressInterval() {
+        return showProgressInterval;
+    }
+
+    /**
+     * Returns the values for each configuration attribute.
+     *
+     * @return the values for each configuration attribute.
+     */
+    @Override
+    public String toString() {
+        return "fast=" + fast +
+            "\nclear=" + clear +
+            "\nshowProgressStream=" + showProgressStream +
+            "\nshowProgressInterval=" + showProgressInterval +
+            "\n";
+    }
+}
diff --git a/src/com/sleepycat/je/Transaction.java b/src/com/sleepycat/je/Transaction.java
new file mode 100644
index 0000000000000000000000000000000000000000..c9561113f85812ec80ee32078bb08f87b87bdc06
--- /dev/null
+++ b/src/com/sleepycat/je/Transaction.java
@@ -0,0 +1,470 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Transaction.java,v 1.60.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.txn.Locker;
+import com.sleepycat.je.txn.Txn;
+import com.sleepycat.je.utilint.PropUtil;
+
+/**
+ * The Transaction object is the handle for a transaction.  Methods off the
+ * transaction handle are used to configure, abort and commit the transaction.
+ * Transaction handles are provided to other Berkeley DB methods in order to
+ * transactionally protect those operations.
+ *
+ * <p>Transaction handles are free-threaded; transactions handles may be used
+ * concurrently by multiple threads. Once the {@link
+ * com.sleepycat.je.Transaction#abort Transaction.abort} or {@link
+ * com.sleepycat.je.Transaction#commit Transaction.commit} methods are called,
+ * the handle may not be accessed again, regardless of the success or failure
+ * of the method.</p>
+ *
+ * <p>To obtain a transaction with default attributes:</p>
+ *
+ * <blockquote><pre>
+ *     Transaction txn = myEnvironment.beginTransaction(null, null);
+ * </pre></blockquote>
+ *
+ * <p>To customize the attributes of a transaction:</p>
+ *
+ * <blockquote><pre>
+ *     TransactionConfig config = new TransactionConfig();
+ *     config.setReadUncommitted(true);
+ *     Transaction txn = myEnvironment.beginTransaction(null, config);
+ * </pre></blockquote>
+ */
+public class Transaction {
+
+    private Txn txn;
+    private Environment env;
+    private long id;
+    private String name;
+
+    /**
+     * Creates a transaction.
+     */
+    Transaction(Environment env, Txn txn) {
+        this.env = env;
+        this.txn = txn;
+
+        /*
+         * Copy the id to this wrapper object so the id will be available
+         * after the transaction is closed and the txn field is nulled.
+         */
+        this.id = txn.getId();
+    }
+
+    /**
+     * Cause an abnormal termination of the transaction.
+     *
+     * <p>The log is played backward, and any necessary undo operations are
+     * done. Before Transaction.abort returns, any locks held by the
+     * transaction will have been released.</p>
+     *
+     * <p>In the case of nested transactions, aborting a parent transaction
+     * causes all children (unresolved or not) of the parent transaction to be
+     * aborted.</p>
+     *
+     * <p>All cursors opened within the transaction must be closed before the
+     * transaction is aborted.</p>
+     *
+     * <p>After Transaction.abort has been called, regardless of its return,
+     * the {@link com.sleepycat.je.Transaction Transaction} handle may not be
+     * accessed again.</p>
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public void abort()
+	throws DatabaseException {
+
+	try {
+	    checkEnv();
+	    env.removeReferringHandle(this);
+	    txn.abort(false);      // no sync required
+
+	    /* Remove reference to internal txn, so we can reclaim memory. */
+	    txn = null;
+	} catch (Error E) {
+	    DbInternal.envGetEnvironmentImpl(env).invalidate(E);
+	    throw E;
+	}
+    }
+
+    /**
+     * Return the transaction's unique ID.
+     *
+     * @return The transaction's unique ID.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public long getId()
+        throws DatabaseException {
+
+        return id;
+    }
+
+    /**
+     * End the transaction.  If the environment is configured for synchronous
+     * commit, the transaction will be committed synchronously to stable
+     * storage before the call returns.  This means the transaction will
+     * exhibit all of the ACID (atomicity, consistency, isolation, and
+     * durability) properties.
+     *
+     * <p>If the environment is not configured for synchronous commit, the
+     * commit will not necessarily have been committed to stable storage before
+     * the call returns.  This means the transaction will exhibit the ACI
+     * (atomicity, consistency, and isolation) properties, but not D
+     * (durability); that is, database integrity will be maintained, but it is
+     * possible this transaction may be undone during recovery.</p>
+     *
+     * <p>All cursors opened within the transaction must be closed before the
+     * transaction is committed.</p>
+     *
+     * <p>After this method returns the {@link com.sleepycat.je.Transaction
+     * Transaction} handle may not be accessed again, regardless of the
+     * method's success or failure. If the method encounters an error, the
+     * transaction and all child transactions of the transaction will have been
+     * aborted when the call returns.</p>
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public void commit()
+	throws DatabaseException {
+
+	try {
+	    checkEnv();
+	    env.removeReferringHandle(this);
+	    txn.commit();
+	    /* Remove reference to internal txn, so we can reclaim memory. */
+	    txn = null;
+	} catch (Error E) {
+	    DbInternal.envGetEnvironmentImpl(env).invalidate(E);
+	    throw E;
+	}
+    }
+
+    /**
+     *
+     * @hidden
+     * Feature not yet available.
+     *
+     * End the transaction using the specified durability requirements. This
+     * requirement overrides any default durability requirements associated
+     * with the environment. If the durability requirements cannot be satisfied,
+     * an exception is thrown to describe the problem. Please see
+     * {@link Durability} for specific exceptions that could result when the
+     * durability requirements cannot be satisfied.
+     *
+     * @param durability the durability requirements for this transaction
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public void commit(Durability durability)
+        throws DatabaseException {
+
+        doCommit(durability, false /* explicitSync */);
+    }
+
+    /**
+     * End the transaction, committing synchronously.  This means the
+     * transaction will exhibit all of the ACID (atomicity, consistency,
+     * isolation, and durability) properties.
+     *
+     * <p>This behavior is the default for database environments unless
+     * otherwise configured using the {@link
+     * com.sleepycat.je.EnvironmentConfig#setTxnNoSync
+     * EnvironmentConfig.setTxnNoSync} method.  This behavior may also be set
+     * for a single transaction using the {@link
+     * com.sleepycat.je.Environment#beginTransaction
+     * Environment.beginTransaction} method.  Any value specified to this
+     * method overrides both of those settings.</p>
+     *
+     * <p>All cursors opened within the transaction must be closed before the
+     * transaction is committed.</p>
+     *
+     * <p>After this method returns the {@link com.sleepycat.je.Transaction
+     * Transaction} handle may not be accessed again, regardless of the
+     * method's success or failure. If the method encounters an error, the
+     * transaction and all child transactions of the transaction will have been
+     * aborted when the call returns.</p>
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public void commitSync()
+	throws DatabaseException {
+
+        doCommit(TransactionConfig.SYNC, true /* explicitSync */);
+    }
+
+    /**
+     * End the transaction, not committing synchronously. This means the
+     * transaction will exhibit the ACI (atomicity, consistency, and isolation)
+     * properties, but not D (durability); that is, database integrity will be
+     * maintained, but it is possible this transaction may be undone during
+     * recovery.
+     *
+     * <p>This behavior may be set for a database environment using the {@link
+     * com.sleepycat.je.EnvironmentConfig#setTxnNoSync
+     * EnvironmentConfig.setTxnNoSync} method or for a single transaction using
+     * the {@link com.sleepycat.je.Environment#beginTransaction
+     * Environment.beginTransaction} method.  Any value specified to this
+     * method overrides both of those settings.</p>
+     *
+     * <p>All cursors opened within the transaction must be closed before the
+     * transaction is committed.</p>
+     *
+     * <p>After this method returns the {@link com.sleepycat.je.Transaction
+     * Transaction} handle may not be accessed again, regardless of the
+     * method's success or failure. If the method encounters an error, the
+     * transaction and all child transactions of the transaction will have been
+     * aborted when the call returns.</p>
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public void commitNoSync()
+	throws DatabaseException {
+
+        doCommit(TransactionConfig.NO_SYNC, true /* explicitSync */);
+    }
+
+    /**
+     * End the transaction, committing synchronously.  This means the
+     * transaction will exhibit all of the ACID (atomicity, consistency,
+     * isolation, and durability) properties.
+     *
+     * <p>This behavior is the default for database environments unless
+     * otherwise configured using the {@link
+     * com.sleepycat.je.EnvironmentConfig#setTxnNoSync
+     * EnvironmentConfig.setTxnNoSync} method.  This behavior may also be set
+     * for a single transaction using the {@link
+     * com.sleepycat.je.Environment#beginTransaction
+     * Environment.beginTransaction} method.  Any value specified to this
+     * method overrides both of those settings.</p>
+     *
+     * <p>All cursors opened within the transaction must be closed before the
+     * transaction is committed.</p>
+     *
+     * <p>After this method returns the {@link com.sleepycat.je.Transaction
+     * Transaction} handle may not be accessed again, regardless of the
+     * method's success or failure. If the method encounters an error, the
+     * transaction and all child transactions of the transaction will have been
+     * aborted when the call returns.</p>
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public void commitWriteNoSync()
+	throws DatabaseException {
+
+        doCommit(TransactionConfig.WRITE_NO_SYNC, true /* explicitSync */);
+    }
+
+
+    /**
+     * @hidden
+     * For internal use.
+     */
+    public boolean getPrepared() {
+	return txn.getPrepared();
+    }
+
+    /**
+     * Perform error checking and invoke the commit on Txn.
+     *
+     * @param durability the durability to use for the commit
+     * @param explicitSync true if the method was invoked from one of the
+     * sync-specific APIs, false if durability was used explicitly. This
+     * parameter exists solely to support mixed mode api usage checks.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    private void doCommit(Durability durability, boolean explicitSync)
+	throws DatabaseException {
+
+	try {
+	    checkEnv();
+	    env.removeReferringHandle(this);
+	    if (explicitSync) {
+	        /* A sync-specific api was invoked. */
+	        if (txn.getExplicitDurabilityConfigured()) {
+	            throw new IllegalArgumentException
+	                ("Mixed use of deprecated durability API for the " +
+	                 "transaction commit with the new durability API for " +
+	                "TransactionConfig or MutableEnvironmentConfig");
+	        }
+	    } else if (txn.getExplicitSyncConfigured()) {
+	        /* Durability was explicitly configured for commit */
+                throw new IllegalArgumentException
+                    ("Mixed use of new durability API for the " +
+                      "transaction commit with deprecated durability API for " +
+                      "TransactionConfig or MutableEnvironmentConfig");
+	    }
+	    txn.commit(durability);
+	    /* Remove reference to internal txn, so we can reclaim memory. */
+	    txn = null;
+	} catch (Error E) {
+	    DbInternal.envGetEnvironmentImpl(env).invalidate(E);
+	    throw E;
+	}
+    }
+
+    /**
+     * Configure the timeout value for the transaction lifetime.
+     *
+     * <p>If the transaction runs longer than this time, the transaction may
+     * throw {@link com.sleepycat.je.DatabaseException DatabaseException}.</p>
+     *
+     * <p>Timeouts are checked whenever a thread of control blocks on a lock or
+     * when deadlock detection is performed.  For this reason, the accuracy of
+     * the timeout depends on how often deadlock detection is performed.</p>
+     *
+     * @param timeOut The timeout value for the transaction lifetime, in
+     * microseconds. A value of 0 disables timeouts for the transaction.
+     *
+     * @throws IllegalArgumentException If the value of timeout is negative
+     * @throws DatabaseException if a failure occurs.
+     */
+    public void setTxnTimeout(long timeOut)
+        throws IllegalArgumentException, DatabaseException {
+
+        checkEnv();
+        txn.setTxnTimeout(PropUtil.microsToMillis(timeOut));
+    }
+
+    /**
+     * Configure the lock request timeout value for the transaction.
+     *
+     * <p>If a lock request cannot be granted in this time, the transaction may
+     * throw {@link com.sleepycat.je.DatabaseException DatabaseException}.</p>
+     *
+     * <p>Timeouts are checked whenever a thread of control blocks on a lock or
+     * when deadlock detection is performed.  For this reason, the accuracy of
+     * the timeout depends on how often deadlock detection is performed.</p>
+     *
+     * @param timeOut The lock request timeout value for the transaction, in
+     * microseconds.  A value of 0 disables timeouts for the transaction.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @throws IllegalArgumentException If the value of timeout is negative.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public void setLockTimeout(long timeOut)
+        throws IllegalArgumentException, DatabaseException {
+
+        checkEnv();
+        txn.setLockTimeout(PropUtil.microsToMillis(timeOut));
+    }
+
+    /**
+     * Set the user visible name for the transaction.
+     *
+     * @param name The user visible name for the transaction.
+     *
+     */
+    public void setName(String name) {
+	this.name = name;
+    }
+
+    /**
+     * Get the user visible name for the transaction.
+     *
+     * @return The user visible name for the transaction.
+     *
+     */
+    public String getName() {
+	return name;
+    }
+
+    /**
+     * @hidden
+     * For internal use.
+     */
+    @Override
+    public int hashCode() {
+	return (int) id;
+    }
+
+    /**
+     * @hidden
+     * For internal use.
+     */
+    @Override
+    public boolean equals(Object o) {
+	if (o == null) {
+	    return false;
+	}
+
+	if (!(o instanceof Transaction)) {
+	    return false;
+	}
+
+	if (((Transaction) o).id == id) {
+	    return true;
+	}
+
+	return false;
+    }
+
+    @Override
+    public String toString() {
+	StringBuffer sb = new StringBuffer();
+	sb.append("<Transaction id=\"");
+	sb.append(id).append("\"");
+	if (name != null) {
+	    sb.append(" name=\"");
+	    sb.append(name).append("\"");
+	    }
+	sb.append(">");
+	return sb.toString();
+    }
+
+    /**
+     * This method should only be called by the LockerFactory.getReadableLocker
+     * and getWritableLocker methods.  The locker returned does not enforce the
+     * readCommitted isolation setting.
+     */
+    Locker getLocker()
+        throws DatabaseException {
+
+        if (txn == null) {
+            throw new DatabaseException("Transaction " + id +
+                                        " has been closed and is no longer"+
+                                        " usable.");
+        } else {
+            return txn;
+        }
+    }
+
+    /*
+     * Helpers
+     */
+
+    Txn getTxn() {
+	return txn;
+    }
+
+    /**
+     * @throws RunRecoveryException if the underlying environment is invalid.
+     */
+    private void checkEnv()
+        throws DatabaseException {
+
+        EnvironmentImpl envImpl =  env.getEnvironmentImpl();
+        if (envImpl == null) {
+            throw new DatabaseException("The environment has been closed. " +
+                                        " This transaction is no longer" +
+                                        " usable.");
+        }
+
+        envImpl.checkIfInvalid();
+    }
+}
diff --git a/src/com/sleepycat/je/TransactionConfig.java b/src/com/sleepycat/je/TransactionConfig.java
new file mode 100644
index 0000000000000000000000000000000000000000..f731d20bbe2b31bc61681558701fa04a0917e447
--- /dev/null
+++ b/src/com/sleepycat/je/TransactionConfig.java
@@ -0,0 +1,474 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TransactionConfig.java,v 1.24.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import com.sleepycat.je.Durability.ReplicaAckPolicy;
+import com.sleepycat.je.Durability.SyncPolicy;
+
+/**
+ * Specifies the attributes of a database environment transaction.
+ */
+public class TransactionConfig implements Cloneable {
+
+    /**
+     * Default configuration used if null is passed to methods that create a
+     * transaction.
+     */
+    public static final TransactionConfig DEFAULT = new TransactionConfig();
+
+    private boolean sync = false;
+    private boolean noSync = false;
+    private boolean writeNoSync = false;
+    private Durability durability = null;
+    private ReplicaConsistencyPolicy consistencyPolicy;
+
+    private boolean noWait = false;
+    private boolean readUncommitted = false;
+    private boolean readCommitted = false;
+    private boolean serializableIsolation = false;
+
+    /* Convenience constants for local (non-replicated) use. */
+
+    /**
+     * @hidden
+     * Feature not yet available.
+     *
+     * Defines a durability policy with SYNC for local commit synchronization.
+     *
+     * The replicated environment policies default to SYNC for commits of
+     * replicated transactions that need acknowledgment and QUORUM for the
+     * acknowledgment policy.
+     */
+    public static final Durability SYNC =
+        new Durability(SyncPolicy.SYNC,
+                       SyncPolicy.SYNC,
+                       ReplicaAckPolicy.QUORUM);
+
+    /**
+     * @hidden
+     * Feature not yet available.
+     *
+     * Defines a durability policy with NO_SYNC for local commit
+     * synchronization.
+     *
+     * The replicated environment policies default to SYNC for commits of
+     * replicated transactions that need acknowledgment and QUORUM for the
+     * acknowledgment policy.
+     */
+    public static final Durability NO_SYNC =
+        new Durability(SyncPolicy.NO_SYNC,
+                       SyncPolicy.SYNC,
+                       ReplicaAckPolicy.QUORUM);
+
+    /**
+     * @hidden
+     * Feature not yet available.
+     *
+     * Defines a durability policy with WRITE_NO_SYNC for local commit
+     * synchronization.
+     *
+     * The replicated environment policies default to SYNC for commits of
+     * replicated transactions that need acknowledgment and QUORUM for the
+     * acknowledgment policy.
+     */
+    public static final Durability WRITE_NO_SYNC =
+        new Durability(SyncPolicy.WRITE_NO_SYNC,
+                       SyncPolicy.SYNC,
+                       ReplicaAckPolicy.QUORUM);
+
+    /**
+     * An instance created using the default constructor is initialized with
+     * the system's default settings.
+     */
+    public TransactionConfig() {
+    }
+
+    /**
+     * @hidden
+     * For internal use only.
+     *
+     * Maps the existing sync settings to the equivalent durability settings.
+     * Figure out what we should do on commit. TransactionConfig could be
+     * set with conflicting values; take the most stringent ones first.
+     * All environment level defaults were applied by the caller.
+     *
+     * ConfigSync  ConfigWriteNoSync ConfigNoSync   default
+     *    0                 0             0         sync
+     *    0                 0             1         nosync
+     *    0                 1             0         write nosync
+     *    0                 1             1         write nosync
+     *    1                 0             0         sync
+     *    1                 0             1         sync
+     *    1                 1             0         sync
+     *    1                 1             1         sync
+     *
+     * @return the equivalent durability
+     */
+    public Durability getDurabilityFromSync() {
+        if (sync) {
+            return SYNC;
+        } else if (writeNoSync) {
+            return WRITE_NO_SYNC;
+        } else if (noSync) {
+            return NO_SYNC;
+        }
+        return SYNC;
+    }
+
+    /**
+     * Configures the transaction to write and synchronously flush the log it
+     * when commits.
+     *
+     * <p>This behavior may be set for a database environment using the
+     * Environment.setMutableConfig method. Any value specified to this method
+     * overrides that setting.</p>
+     *
+     * <p>The default is false for this class and true for the database
+     * environment.</p>
+     *
+     * <p>If true is passed to both setSync and setNoSync, setSync will take
+     * precedence.</p>
+     *
+     * @param sync If true, transactions exhibit all the ACID (atomicity,
+     * consistency, isolation, and durability) properties.
+     */
+    public void setSync(boolean sync) {
+        checkMixedMode(sync, noSync, writeNoSync, durability);
+        this.sync = sync;
+    }
+
+    /**
+     * Returns true if the transaction is configured to write and synchronously
+     * flush the log it when commits.
+     *
+     * @return true if the transaction is configured to write and synchronously
+     * flush the log it when commits.
+     */
+    public boolean getSync() {
+        return sync;
+    }
+
+    /**
+     * Configures the transaction to not write or synchronously flush the log
+     * it when commits.
+     *
+     * <p>This behavior may be set for a database environment using the
+     * Environment.setMutableConfig method. Any value specified to this method
+     * overrides that setting.</p>
+     *
+     * <p>The default is false for this class and the database environment.</p>
+     *
+     * @param noSync If true, transactions exhibit the ACI (atomicity,
+     * consistency, and isolation) properties, but not D (durability); that is,
+     * database integrity will be maintained, but if the application or system
+     * fails, it is possible some number of the most recently committed
+     * transactions may be undone during recovery. The number of transactions
+     * at risk is governed by how many log updates can fit into the log buffer,
+     * how often the operating system flushes dirty buffers to disk, and how
+     * often the log is checkpointed.
+     */
+    public void setNoSync(boolean noSync) {
+        checkMixedMode(sync, noSync, writeNoSync, durability);
+        this.noSync = noSync;
+    }
+
+    /**
+     * Returns true if the transaction is configured to not write or
+     * synchronously flush the log it when commits.
+     *
+     * @return true if the transaction is configured to not write or
+     * synchronously flush the log it when commits.
+     */
+    public boolean getNoSync() {
+        return noSync;
+    }
+
+    /**
+     * Configures the transaction to write but not synchronously flush the log
+     * it when commits.
+     *
+     * <p>This behavior may be set for a database environment using the
+     * Environment.setMutableConfig method. Any value specified to this method
+     * overrides that setting.</p>
+     *
+     * <p>The default is false for this class and the database environment.</p>
+     *
+     * @param writeNoSync If true, transactions exhibit the ACI (atomicity,
+     * consistency, and isolation) properties, but not D (durability); that is,
+     * database integrity will be maintained, but if the operating system
+     * fails, it is possible some number of the most recently committed
+     * transactions may be undone during recovery. The number of transactions
+     * at risk is governed by how often the operating system flushes dirty
+     * buffers to disk, and how often the log is checkpointed.
+     */
+    public void setWriteNoSync(boolean writeNoSync) {
+        checkMixedMode(sync, noSync, writeNoSync, durability);
+        this.writeNoSync = writeNoSync;
+    }
+
+    /**
+     * Returns true if the transaction is configured to write but not
+     * synchronously flush the log it when commits.
+     *
+     * @return true if the transaction is configured to not write or
+     * synchronously flush the log it when commits.
+     */
+    public boolean getWriteNoSync() {
+        return writeNoSync;
+    }
+
+    /**
+     * @hidden
+     * Feature not yet available.
+     *
+     * Configures the durability associated with a transaction when it commits.
+     * Changes to durability are not reflected back to the "sync" booleans --
+     * there isn't a one to one mapping.
+     *
+     * Note that you should not use both the durability and the XXXSync() apis
+     * on the same config object.
+     *
+     * @param durability the durability definition
+     */
+    public void setDurability(Durability durability) {
+        checkMixedMode(sync, noSync, writeNoSync, durability);
+        this.durability = durability;
+    }
+
+    /**
+     * @hidden
+     * Feature not yet available.
+     *
+     * Returns the durability associated with the configuration. As a
+     * compatibility hack, it currently returns the local durability
+     * computed from the current "sync" settings, if the durability has not
+     * been explicitly set by the application.
+     *
+     * @return the durability setting currently associated with this config.
+     */
+    public Durability getDurability() {
+        return durability;
+    }
+
+    /**
+     * @hidden
+     * Feature not yet available.
+     *
+     * Associates a consistency policy with this configuration.
+     *
+     * @param consistencyPolicy the consistency definition
+     */
+    public void setConsistencyPolicy
+        (ReplicaConsistencyPolicy consistencyPolicy) {
+        this.consistencyPolicy = consistencyPolicy;
+    }
+
+    /**
+     * @hidden
+     * Feature not yet available.
+     *
+     * Returns the consistency policy associated with the configuration.
+     *
+     * @return the consistency policy currently associated with this config.
+     */
+    public ReplicaConsistencyPolicy getConsistencyPolicy() {
+        return consistencyPolicy;
+    }
+
+    /**
+     * Configures the transaction to not wait if a lock request cannot be
+     * immediately granted.
+     *
+     * <p>The default is false for this class and the database environment.</p>
+     *
+     * @param noWait If true, transactions will not wait if a lock request
+     * cannot be immediately granted, instead {@link
+     * com.sleepycat.je.DeadlockException DeadlockException} will be thrown.
+     */
+    public void setNoWait(boolean noWait) {
+        this.noWait = noWait;
+    }
+
+    /**
+     * Returns true if the transaction is configured to not wait if a lock
+     * request cannot be immediately granted.
+     *
+     * @return true if the transaction is configured to not wait if a lock
+     * request cannot be immediately granted.
+     */
+    public boolean getNoWait() {
+        return noWait;
+    }
+
+    /**
+     * Configures read operations performed by the transaction to return
+     * modified but not yet committed data.
+     *
+     * @param readUncommitted If true, configure read operations performed by
+     * the transaction to return modified but not yet committed data.
+     *
+     * @see LockMode#READ_UNCOMMITTED
+     */
+    public void setReadUncommitted(boolean readUncommitted) {
+        this.readUncommitted = readUncommitted;
+    }
+
+    /**
+     * Returns true if read operations performed by the transaction are
+     * configured to return modified but not yet committed data.
+     *
+     * @return true if read operations performed by the transaction are
+     * configured to return modified but not yet committed data.
+     *
+     * @see LockMode#READ_UNCOMMITTED
+     */
+    public boolean getReadUncommitted() {
+        return readUncommitted;
+    }
+
+    /**
+     * Configures read operations performed by the transaction to return
+     * modified but not yet committed data.
+     *
+     * @param dirtyRead If true, configure read operations performed by the
+     * transaction to return modified but not yet committed data.
+     *
+     * @deprecated This has been replaced by {@link #setReadUncommitted} to
+     * conform to ANSI database isolation terminology.
+     */
+    public void setDirtyRead(boolean dirtyRead) {
+        setReadUncommitted(dirtyRead);
+    }
+
+    /**
+     * Returns true if read operations performed by the transaction are
+     * configured to return modified but not yet committed data.
+     *
+     * @return true if read operations performed by the transaction are
+     * configured to return modified but not yet committed data.
+     *
+     * @deprecated This has been replaced by {@link #getReadUncommitted} to
+     * conform to ANSI database isolation terminology.
+     */
+    public boolean getDirtyRead() {
+        return getReadUncommitted();
+    }
+
+    /**
+     * Configures the transaction for read committed isolation.
+     *
+     * <p>This ensures the stability of the current data item read by the
+     * cursor but permits data read by this transaction to be modified or
+     * deleted prior to the commit of the transaction.</p>
+     *
+     * @param readCommitted If true, configure the transaction for read
+     * committed isolation.
+     *
+     * @see LockMode#READ_COMMITTED
+     */
+    public void setReadCommitted(boolean readCommitted) {
+        this.readCommitted = readCommitted;
+    }
+
+    /**
+     * Returns true if the transaction is configured for read committed
+     * isolation.
+     *
+     * @return true if the transaction is configured for read committed
+     * isolation.
+     *
+     * @see LockMode#READ_COMMITTED
+     */
+    public boolean getReadCommitted() {
+        return readCommitted;
+    }
+
+    /**
+     * Configures this transaction to have serializable (degree 3) isolation.
+     * By setting serializable isolation, phantoms will be prevented.
+     *
+     * <p>By default a transaction provides Repeatable Read isolation; {@link
+     * EnvironmentConfig#setTxnSerializableIsolation} may be called to override
+     * the default.  If the environment is configured for serializable
+     * isolation, all transactions will be serializable regardless of whether
+     * this method is called; calling {@link #setSerializableIsolation} with a
+     * false parameter will not disable serializable isolation.</p>
+     *
+     * The default is false for this class and the database environment.
+     *
+     * @see LockMode
+     */
+    public void setSerializableIsolation(boolean serializableIsolation) {
+        this.serializableIsolation = serializableIsolation;
+    }
+
+    /**
+     * Returns true if the transaction has been explicitly configured to have
+     * serializable (degree 3) isolation.
+     *
+     * @return true if the transaction has been configured to have serializable
+     * isolation.
+     *
+     * @see LockMode
+     */
+    public boolean getSerializableIsolation() {
+        return serializableIsolation;
+    }
+
+    /**
+     * Used by Environment to create a copy of the application supplied
+     * configuration. Done this way to provide non-public cloning.
+     */
+    TransactionConfig cloneConfig() {
+        try {
+            return (TransactionConfig) super.clone();
+        } catch (CloneNotSupportedException willNeverOccur) {
+            return null;
+        }
+    }
+
+    /**
+     *
+     * Checks to catch mixing of deprecated and non-deprecated forms of the
+     * API. It's invoked before setting any of the config parameters. The
+     * arguments represent the new state of the durability configuration,
+     * before it has been changed.
+     */
+    static void checkMixedMode(boolean sync,
+                               boolean noSync,
+                               boolean writeNoSync,
+                               Durability durability)
+        throws IllegalArgumentException {
+
+        if ((sync || noSync || writeNoSync) && (durability != null)) {
+            throw new IllegalArgumentException
+            ("Mixed use of deprecated and current durability APIs is not " +
+             " supported");
+        }
+    }
+
+    /**
+     * Returns the values for each configuration attribute.
+     *
+     * @return the values for each configuration attribute.
+     */
+    @Override
+    public String toString() {
+        return "sync=" + sync +
+            "\nnoSync=" + noSync +
+            "\nwriteNoSync=" + writeNoSync +
+            "\ndurability=" + durability +
+            "\nconsistencyPolicy=" + consistencyPolicy +
+            "\nnoWait=" + noWait +
+            "\nreadUncommitted=" + readUncommitted +
+            "\nreadCommitted=" + readCommitted +
+            "\nSerializableIsolation=" + serializableIsolation +
+            "\n";
+    }
+}
diff --git a/src/com/sleepycat/je/TransactionStats.java b/src/com/sleepycat/je/TransactionStats.java
new file mode 100644
index 0000000000000000000000000000000000000000..4d6c3d5488be3474bcefc13dd1252ac15c834b7f
--- /dev/null
+++ b/src/com/sleepycat/je/TransactionStats.java
@@ -0,0 +1,315 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: TransactionStats.java,v 1.35 2008/06/10 02:52:08 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.io.Serializable;
+import java.util.Date;
+
+/**
+ * Transaction statistics for a database environment.
+ */
+public class TransactionStats implements Serializable {
+
+    /**
+     * The time the last completed checkpoint finished (as the number of
+     * seconds since the Epoch, returned by the IEEE/ANSI Std 1003.1 (POSIX)
+     * time interface).
+     */
+    private long lastCheckpointTime;
+
+    /**
+     * The last transaction ID allocated.
+     */
+    private long lastTxnId;
+
+    /**
+     * The number of transactions that are currently active.
+     */
+    private int nActive;
+
+    /**
+     * The number of transactions that have begun.
+     */
+    private long nBegins;
+
+    /**
+     * The number of transactions that have aborted.
+     */
+    private long nAborts;
+
+    /**
+     * The number of transactions that have committed.
+     */
+    private long nCommits;
+
+    /**
+     * The number of XA transactions that have aborted.
+     */
+    private long nXAAborts;
+
+    /**
+     * The number of XA transactions that have been prepared.
+     */
+    private long nXAPrepares;
+
+    /**
+     * The number of XA transactions that have committed.
+     */
+    private long nXACommits;
+
+    /**
+     * The array of active transactions. Each element of the array is an object
+     * of type TransactionStats.Active.
+     */
+    private Active activeTxns[];
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public TransactionStats() {
+    }
+
+    /**
+     * The Active class represents an active transaction.
+     */
+    public static class Active implements Serializable {
+
+	/**
+	 * The transaction ID of the transaction.
+	 */
+	private long txnId;
+
+	/**
+	 * The transaction ID of the parent transaction (or 0, if no parent).
+	 */
+	private long parentId;
+
+        /**
+         * The transaction name, including the thread name if available.
+         */
+        private String name;
+
+        /**
+         * The transaction ID of the transaction.
+         */
+        public long getId() {
+            return txnId;
+        }
+
+        /**
+         * The transaction ID of the parent transaction (or 0, if no parent).
+         */
+        public long getParentId() {
+            return parentId;
+        }
+
+        /**
+         * The transaction name, including the thread name if available.
+         */
+        public String getName() {
+            return name;
+        }
+
+	/**
+         * @hidden
+	 * Internal use only.
+	 */
+        public Active(String name, long txnId, long parentId) {
+            this.name = name;
+            this.txnId = txnId;
+            this.parentId = parentId;
+        }
+
+        @Override
+	public String toString() {
+	    return "txnId = " + txnId + " txnName = " + name;
+	}
+    }
+
+    /**
+     * Return the array of active transactions.
+     *
+     * @return The array of active transactions.
+     */
+    public Active[] getActiveTxns() {
+        return activeTxns;
+    }
+
+    /**
+     * The time the last completed checkpoint finished (as the number of
+     * seconds since the Epoch, returned by the IEEE/ANSI Std 1003.1 (POSIX)
+     * time interface).
+     */
+    public long getLastCheckpointTime() {
+        return lastCheckpointTime;
+    }
+
+    /**
+     * The last transaction ID allocated.
+     */
+    public long getLastTxnId() {
+        return lastTxnId;
+    }
+
+    /**
+     * The number of transactions that have aborted.
+     */
+    public long getNAborts() {
+        return nAborts;
+    }
+
+    /**
+     * The number of XA transactions that have aborted.
+     */
+    public long getNXAAborts() {
+        return nXAAborts;
+    }
+
+    /**
+     * The number of XA transactions that have been prepared.
+     */
+    public long getNXAPrepares() {
+        return nXAPrepares;
+    }
+
+    /**
+     * The number of transactions that are currently active.
+     */
+    public int getNActive() {
+        return nActive;
+    }
+
+    /**
+     * The number of transactions that have begun.
+     */
+    public long getNBegins() {
+        return nBegins;
+    }
+
+    /**
+     * The number of transactions that have committed.
+     */
+    public long getNCommits() {
+        return nCommits;
+    }
+
+    /**
+     * The number of XA transactions that have committed.
+     */
+    public long getNXACommits() {
+        return nXACommits;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setActiveTxns(Active[] actives) {
+        activeTxns = actives;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setLastCheckpointTime(long l) {
+        lastCheckpointTime = l;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setLastTxnId(long val) {
+        lastTxnId = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNAborts(long val) {
+        nAborts = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNXAAborts(long val) {
+        nXAAborts = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNActive(int val) {
+        nActive = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNBegins(long val) {
+        nBegins = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNCommits(long val) {
+        nCommits = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNXACommits(long val) {
+        nXACommits = val;
+    }
+
+    /**
+     * @hidden
+     * Internal use only.
+     */
+    public void setNXAPrepares(long val) {
+        nXAPrepares = val;
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public String toString() {
+        StringBuffer sb = new StringBuffer();
+        sb.append("nBegins=").append(nBegins).append('\n');
+        sb.append("nAborts=").append(nAborts).append('\n');
+        sb.append("nCommits=").append(nCommits).append('\n');
+        sb.append("nXAPrepares=").append(nXAPrepares).append('\n');
+        sb.append("nXAAborts=").append(nXAAborts).append('\n');
+        sb.append("nXACommits=").append(nXACommits).append('\n');
+        sb.append("nActive=").append(nActive).append('\n');
+        sb.append("activeTxns=[");
+        if (activeTxns != null) {
+            for (int i = 0; i < activeTxns.length; i += 1) {
+                sb.append("  ").append(activeTxns[i]).append('\n');
+            }
+        }
+        sb.append("]\n");
+        sb.append("lastTxnId=").append(lastTxnId).append('\n');
+        sb.append("lastCheckpointTime=").
+           append(new Date(lastCheckpointTime)).append('\n');
+        return sb.toString();
+    }
+}
diff --git a/src/com/sleepycat/je/VerifyConfig.java b/src/com/sleepycat/je/VerifyConfig.java
new file mode 100644
index 0000000000000000000000000000000000000000..e8e9f3ef6d58559166f04a2f5f263f00e1772b6b
--- /dev/null
+++ b/src/com/sleepycat/je/VerifyConfig.java
@@ -0,0 +1,189 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: VerifyConfig.java,v 1.16.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.io.PrintStream;
+
+/**
+ * Specifies the attributes of a verification operation.
+ */
+public class VerifyConfig {
+
+    /*
+     * For internal use, to allow null as a valid value for the config
+     * parameter.
+     */
+    public static final VerifyConfig DEFAULT = new VerifyConfig();
+
+    private boolean propagateExceptions = false;
+    private boolean aggressive = false;
+    private boolean printInfo = false;
+    private PrintStream showProgressStream = null;
+    private int showProgressInterval = 0;
+
+    /**
+     * An instance created using the default constructor is initialized with
+     * the system's default settings.
+     */
+    public VerifyConfig() {
+    }
+
+    /**
+     * Configures {@link com.sleepycat.je.Environment#verify
+     * Environment.verify} and {@link com.sleepycat.je.Database#verify
+     * Database.verify} to propagate exceptions found during verification.
+     *
+     * <p>By default this is false and exception information is printed to
+     * System.out for notification but does not stop the verification activity,
+     * which continues on for as long as possible.</p>
+     *
+     * @param propagate If set to true, configure {@link
+     * com.sleepycat.je.Environment#verify Environment.verify} and {@link
+     * com.sleepycat.je.Database#verify Database.verify} to propagate
+     * exceptions found during verification.
+     */
+    public void setPropagateExceptions(boolean propagate) {
+        propagateExceptions = propagate;
+    }
+
+    /**
+     * Returns true if the {@link com.sleepycat.je.Environment#verify
+     * Environment.verify} and {@link com.sleepycat.je.Database#verify
+     * Database.verify} are configured to propagate exceptions found during
+     * verification.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @return True if the {@link com.sleepycat.je.Environment#verify
+     * Environment.verify} and {@link com.sleepycat.je.Database#verify
+     * Database.verify} are configured to propagate exceptions found during
+     * verification.
+     */
+    public boolean getPropagateExceptions() {
+        return propagateExceptions;
+    }
+
+    /**
+     * Configures {@link com.sleepycat.je.Environment#verify
+     * Environment.verify} and {@link com.sleepycat.je.Database#verify
+     * Database.verify} to perform fine granularity consistency checking that
+     * includes verifying in memory constructs.
+     *
+     * <p>This level of checking should only be performed while the database
+     * environment is quiescent.</p>
+     *
+     * <p>By default this is false.</p>
+     *
+     * @param aggressive If set to true, configure {@link
+     * com.sleepycat.je.Environment#verify Environment.verify} and {@link
+     * com.sleepycat.je.Database#verify Database.verify} to perform fine
+     * granularity consistency checking that includes verifying in memory
+     * constructs.
+     */
+    public void setAggressive(boolean aggressive) {
+        this.aggressive = aggressive;
+    }
+
+    /**
+     * Returns true if the {@link com.sleepycat.je.Environment#verify
+     * Environment.verify} and {@link com.sleepycat.je.Database#verify
+     * Database.verify} are configured to perform fine granularity consistency
+     * checking that includes verifying in memory constructs.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @return True if the {@link com.sleepycat.je.Environment#verify
+     * Environment.verify} and {@link com.sleepycat.je.Database#verify
+     * Database.verify} are configured to perform fine granularity consistency
+     * checking that includes verifying in memory constructs.
+     */
+    public boolean getAggressive() {
+        return aggressive;
+    }
+
+    /**
+     * Configures {@link com.sleepycat.je.Environment#verify
+     * Environment.verify} and {@link com.sleepycat.je.Database#verify
+     * Database.verify} to print basic verification information to System.out.
+     *
+     * <p>By default this is false.</p>
+     *
+     * @param printInfo If set to true, configure {@link
+     * com.sleepycat.je.Environment#verify Environment.verify} and {@link
+     * com.sleepycat.je.Database#verify Database.verify} to print basic
+     * verification information to System.out.
+     */
+    public void setPrintInfo(boolean printInfo) {
+        this.printInfo = printInfo;
+    }
+
+    /**
+     * Returns true if the {@link com.sleepycat.je.Environment#verify
+     * Environment.verify} and {@link com.sleepycat.je.Database#verify
+     * Database.verify} are configured to print basic verification information
+     * to System.out.
+     *
+     * <p>This method may be called at any time during the life of the
+     * application.</p>
+     *
+     * @return True if the {@link com.sleepycat.je.Environment#verify
+     * Environment.verify} and {@link com.sleepycat.je.Database#verify
+     * Database.verify} are configured to print basic verification information
+     * to System.out.
+     */
+    public boolean getPrintInfo() {
+        return printInfo;
+    }
+
+    /**
+     * Configures the verify operation to display progress to the PrintStream
+     * argument.  The accumulated statistics will be displayed every N records,
+     * where N is the value of showProgressInterval.
+     */
+    public void setShowProgressStream(PrintStream showProgressStream) {
+        this.showProgressStream = showProgressStream;
+    }
+
+    /**
+     * Returns the PrintStream on which the progress messages will be displayed
+     * during long running verify operations.
+     */
+    public PrintStream getShowProgressStream() {
+        return showProgressStream;
+    }
+
+    /**
+     * When the verify operation is configured to display progress the
+     * showProgressInterval is the number of LNs between each progress report.
+     */
+    public void setShowProgressInterval(int showProgressInterval) {
+        this.showProgressInterval = showProgressInterval;
+    }
+
+    /**
+     * Returns the showProgressInterval value, if set.
+     */
+    public int getShowProgressInterval() {
+        return showProgressInterval;
+    }
+
+    /**
+     * Returns the values for each configuration attribute.
+     *
+     * @return the values for each configuration attribute.
+     */
+    @Override
+    public String toString() {
+        StringBuilder sb = new StringBuilder();
+        sb.append("propagateExceptions=").append(propagateExceptions);
+        return sb.toString();
+    }
+}
diff --git a/src/com/sleepycat/je/XAEnvironment.java b/src/com/sleepycat/je/XAEnvironment.java
new file mode 100644
index 0000000000000000000000000000000000000000..27f42c0febd05e644eca7e684e72c6ffec99b510
--- /dev/null
+++ b/src/com/sleepycat/je/XAEnvironment.java
@@ -0,0 +1,384 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: XAEnvironment.java,v 1.14.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.io.File;
+
+import javax.transaction.xa.XAException;
+import javax.transaction.xa.XAResource;
+import javax.transaction.xa.Xid;
+
+import com.sleepycat.je.txn.Txn;
+import com.sleepycat.je.txn.TxnManager;
+
+/**
+ * An Environment that implements XAResource.  If JE is used in an XA
+ * environment, this class should be used instead of Environment so that
+ * appropriate XA functions are available.
+ */
+public class XAEnvironment extends Environment implements XAResource {
+
+    private static final boolean DEBUG = false;
+
+    /**
+     * Create a database environment handle.
+     *
+     * @param envHome The database environment's home directory.
+     *
+     * @param configuration The database environment attributes.  If null,
+     * default attributes are used.
+     */
+    public XAEnvironment(File envHome, EnvironmentConfig configuration)
+        throws DatabaseException {
+
+	super(envHome, configuration);
+    }
+
+    /**
+     * Used to get the Transaction object given an XA Xid.
+     */
+    public Transaction getXATransaction(Xid xid)
+	throws DatabaseException {
+
+	Txn ret = envImpl.getTxnManager().getTxnFromXid(xid);
+	if (ret == null) {
+	    return null;
+	}
+
+	/* Do we guarantee object identity for Transaction objects? */
+	return new Transaction(this, ret);
+    }
+
+    /**
+     * Used to set the Transaction object for an XA Xid.  Public for tests.
+     */
+    public void setXATransaction(Xid xid, Transaction txn)
+	throws DatabaseException {
+
+	envImpl.getTxnManager().
+	    registerXATxn(xid, txn.getTxn(), false);
+    }
+
+    /*
+     * XAResource methods.
+     */
+
+    public void commit(Xid xid, boolean ignore /*onePhase*/)
+	throws XAException {
+
+	if (DEBUG) {
+	    System.out.println("*** commit called " + xid + "/" + ignore);
+	}
+
+	if (xid == null) {
+	    return;
+	}
+
+	try {
+	    checkEnv();
+	    Transaction txn = getXATransaction(xid);
+	    if (txn == null) {
+		throw new XAException
+		    ("No transaction found for " + xid + " during commit.");
+	    }
+	    removeReferringHandle(txn);
+	    if (txn.getTxn().getOnlyAbortable()) {
+		throw new XAException(XAException.XA_RBROLLBACK);
+	    }
+	    txn.getTxn().commit(xid);
+	} catch (DatabaseException DE) {
+	    throwNewXAException(DE);
+	}
+	if (DEBUG) {
+	    System.out.println("*** commit finished");
+	}
+    }
+
+    public void end(Xid xid, int flags)
+	throws XAException {
+
+	if (DEBUG) {
+	    System.out.println("*** end called " + xid + "/" + flags);
+	}
+
+	/* flags - One of TMSUCCESS, TMFAIL, or TMSUSPEND. */
+
+	boolean tmFail = (flags & XAResource.TMFAIL) != 0;
+	boolean tmSuccess = (flags & XAResource.TMSUCCESS) != 0;
+	boolean tmSuspend = (flags & XAResource.TMSUSPEND) != 0;
+	if ((tmFail && tmSuccess) ||
+	    ((tmFail || tmSuccess) && tmSuspend)) {
+	    throw new XAException(XAException.XAER_INVAL);
+	}
+
+	try {
+	    if (DEBUG) {
+		System.out.println
+		    ("Transaction for " + Thread.currentThread() + " is " +
+		     envImpl.getTxnManager().getTxnForThread());
+	    }
+
+	    Transaction txn =
+		envImpl.getTxnManager().unsetTxnForThread();
+	    if (txn == null) {
+		txn = getXATransaction(xid);
+		boolean isSuspended = (txn != null) &&
+		    txn.getTxn().isSuspended();
+		if (!isSuspended) {
+		    throw new XAException(XAException.XAER_NOTA);
+		}
+	    }
+
+	    if (tmFail) {
+		txn.getTxn().setOnlyAbortable();
+	    }
+
+	    if (tmSuspend) {
+		txn.getTxn().setSuspended(true);
+	    }
+
+	} catch (DatabaseException DE) {
+	    throwNewXAException(DE);
+	}
+    }
+
+    public void forget(Xid xid)
+	throws XAException {
+
+	if (DEBUG) {
+	    System.out.println("*** forget called");
+	}
+
+	throw new XAException(XAException.XAER_NOTA);
+    }
+
+    public boolean isSameRM(XAResource rm)
+	throws XAException {
+
+	if (DEBUG) {
+	    System.out.println("*** isSameRM called");
+	}
+
+	try {
+	    checkEnv();
+	} catch (DatabaseException DE) {
+	    throwNewXAException(DE);
+	}
+
+	if (rm == null) {
+	    return false;
+	}
+
+	if (!(rm instanceof XAEnvironment)) {
+	    return false;
+	}
+
+	return envImpl ==
+	    DbInternal.envGetEnvironmentImpl((XAEnvironment) rm);
+    }
+
+    public int prepare(Xid xid)
+	throws XAException {
+
+	if (DEBUG) {
+	    System.out.println("*** prepare called");
+	}
+
+	try {
+	    checkEnv();
+	    Transaction txn = getXATransaction(xid);
+	    if (txn == null) {
+		throw new XAException
+		    ("No transaction found for " + xid + " during prepare.");
+	    }
+	    int ret = txn.getTxn().prepare(xid);
+
+	    if (DEBUG) {
+		System.out.println("*** prepare returning " + ret);
+	    }
+
+	    /*
+	     * If this transaction was R/O, then there were no writes.  We'll
+	     * commit it here because the user doesn't need to (and isn't
+	     * allowed to either).
+	     */
+	    if (ret == XAResource.XA_RDONLY) {
+		commit(xid, true);
+	    }
+
+	    return ret;
+	} catch (DatabaseException DE) {
+	    throwNewXAException(DE);
+	}
+	return XAResource.XA_OK;        // for compiler
+    }
+
+    public Xid[] recover(int flags)
+	throws XAException {
+
+	if (DEBUG) {
+	    System.out.println("*** recover called");
+	}
+
+	/* flags - One of TMSTARTRSCAN, TMENDRSCAN, TMNOFLAGS. */
+
+	boolean tmStartRScan = (flags & XAResource.TMSTARTRSCAN) != 0;
+	boolean tmEndRScan = (flags & XAResource.TMENDRSCAN) != 0;
+	if ((tmStartRScan && tmEndRScan) ||
+	    (!tmStartRScan && !tmEndRScan && flags != TMNOFLAGS)) {
+	    throw new XAException(XAException.XAER_INVAL);
+	}
+
+	/*
+	 * We don't have to actually do anything with STARTRSCAN or ENDRSCAN
+	 * since we return the whole set of Xid's to be recovered on each call.
+	 */
+	try {
+	    checkHandleIsValid();
+	    checkEnv();
+
+	    if (DEBUG) {
+		System.out.println("*** recover returning1");
+	    }
+
+	    return envImpl.getTxnManager().XARecover();
+	} catch (DatabaseException DE) {
+	    throwNewXAException(DE);
+	}
+	return null;                // for compiler
+    }
+
+    public void rollback(Xid xid)
+	throws XAException {
+
+	if (DEBUG) {
+	    System.out.println("*** rollback called");
+	}
+
+	try {
+	    checkEnv();
+	    Transaction txn = getXATransaction(xid);
+	    if (txn == null) {
+		throw new XAException
+		    ("No transaction found for " + xid + " during abort.");
+	    }
+	    removeReferringHandle(txn);
+	    txn.getTxn().abort(xid);
+	} catch (DatabaseException DE) {
+	    throwNewXAException(DE);
+	}
+
+	if (DEBUG) {
+	    System.out.println("*** rollback returning");
+	}
+    }
+
+    public int getTransactionTimeout()
+	throws XAException {
+
+	try {
+	    return (int) ((getConfig().getTxnTimeout() + 999999L) / 1000000L);
+	} catch (Exception DE) {
+	    throwNewXAException(DE);
+	}
+	return 0;                // for compiler
+    }
+
+    public boolean setTransactionTimeout(int timeout)
+	throws XAException {
+
+	return false;
+    }
+
+    public void start(Xid xid, int flags)
+	throws XAException {
+
+	if (DEBUG) {
+	    System.out.println("*** start called " + xid + "/" + flags);
+	}
+
+	boolean tmJoin = (flags & XAResource.TMJOIN) != 0;
+	boolean tmResume = (flags & XAResource.TMRESUME) != 0;
+
+	/* Check flags - only one of TMNOFLAGS, TMJOIN, or TMRESUME. */
+	if (xid == null ||
+	    (tmJoin && tmResume) ||
+	    (!tmJoin &&
+	     !tmResume &&
+	     flags != XAResource.TMNOFLAGS)) {
+	    throw new XAException(XAException.XAER_INVAL);
+	}
+
+	try {
+	    Transaction txn = getXATransaction(xid);
+	    TxnManager txnMgr = envImpl.getTxnManager();
+
+	    if (flags == XAResource.TMNOFLAGS) {
+
+		/*
+		 * If neither RESUME nor JOIN was set, make sure xid doesn't
+		 * exist in allXATxns.  Throw XAER_DUPID if it does.
+		 */
+		if (txn == null) {
+		    if (DEBUG) {
+			System.out.println
+			    ("Transaction for XID " + xid + " being created");
+		    }
+
+		    txn = beginTransaction(null, null);
+		    setXATransaction(xid, txn);
+
+		} else {
+		    throw new XAException(XAException.XAER_DUPID);
+		}
+	    } else if (tmJoin) {
+		if (txn == null) {
+		    throw new XAException(XAException.XAER_NOTA);
+		}
+
+		if (txnMgr.getTxnForThread() != null ||
+		    txn.getPrepared()) {
+		    throw new XAException(XAException.XAER_PROTO);
+		}
+	    } else if (tmResume) {
+		if (txn == null) {
+		    throw new XAException(XAException.XAER_NOTA);
+		}
+
+		if (!txn.getTxn().isSuspended()) {
+		    throw new XAException(XAException.XAER_PROTO);
+		}
+		txn.getTxn().setSuspended(false);
+	    }
+
+	    if (DEBUG) {
+		System.out.println
+		    ("Setting Transaction for " + Thread.currentThread());
+	    }
+	    txnMgr.setTxnForThread(txn);
+	} catch (DatabaseException DE) {
+	    if (DEBUG) {
+		System.out.println("*** start exception");
+	    }
+	    throwNewXAException(DE);
+	}
+
+	if (DEBUG) {
+	    System.out.println("*** start finished");
+	}
+    }
+
+    private void throwNewXAException(Exception E)
+	throws XAException {
+
+	XAException ret = new XAException(E.toString());
+	ret.initCause(E);
+	throw ret;
+    }
+}
diff --git a/src/com/sleepycat/je/cleaner/BaseLocalUtilizationTracker.java b/src/com/sleepycat/je/cleaner/BaseLocalUtilizationTracker.java
new file mode 100644
index 0000000000000000000000000000000000000000..d94174e2f47e866ab15ef2a735ceca932dba57ce
--- /dev/null
+++ b/src/com/sleepycat/je/cleaner/BaseLocalUtilizationTracker.java
@@ -0,0 +1,165 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: BaseLocalUtilizationTracker.java,v 1.8.2.4 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.util.Iterator;
+import java.util.Map;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+
+/**
+ * Shared implementation for all local utilization trackers.  Per-database
+ * utilization info is tracked in a local map rather than in the live
+ * DatabaseImpl objects.  The transferToUtilizationTracker method is called to
+ * transfer per-file and per-database info to the (global) UtilizationTracker.
+ */
+abstract class BaseLocalUtilizationTracker extends BaseUtilizationTracker {
+
+    /**
+     * Map of per-database utilization info.
+     *
+     * In LocalUtilizationTracker:
+     *    IdentityHashMap of DatabaseImpl to DbFileSummaryMap
+     *
+     * In RecoveryUtilizationTracker:
+     *    HashMap of DatabaseId to DbFileSummaryMap
+     */
+    private Map<Object, DbFileSummaryMap> dbMap;
+
+    /**
+     * Creates a local tracker with a map keyed by DatabaseId or DatabaseImpl.
+     *
+     * When used by this class dbMap is an IdentityHashMap keyed by
+     * DatabaseImpl. When used by RecoveryUtilizationTracker dbMap is a HashMap
+     * keyed by DatabaseId.
+     */
+    BaseLocalUtilizationTracker(EnvironmentImpl env, 
+                                Map<Object, DbFileSummaryMap> dbMap)
+        throws DatabaseException {
+
+        super(env, env.getCleaner());
+        this.dbMap = dbMap;
+    }
+
+    /**
+     * Returns the map of databases; for use by subclasses.
+     */
+    Map<Object, DbFileSummaryMap> getDatabaseMap() {
+        return dbMap;
+    }
+
+    /**
+     * Transfers counts and offsets from this local tracker to the given
+     * (global) UtilizationTracker and to the live DatabaseImpl objects.
+     *
+     * <p>When called after recovery has finished, must be called under the log
+     * write latch.</p>
+     */
+    public void transferToUtilizationTracker(UtilizationTracker tracker)
+        throws DatabaseException {
+
+        /* Add file summary information, including obsolete offsets. */
+        for (TrackedFileSummary localSummary : getTrackedFiles()) {
+            TrackedFileSummary fileSummary =
+                tracker.getFileSummary(localSummary.getFileNumber());
+            fileSummary.addTrackedSummary(localSummary);
+        }
+
+        /* Add DbFileSummary information. */
+        Iterator<Map.Entry<Object,DbFileSummaryMap>> dbEntries = 
+            dbMap.entrySet().iterator();
+
+        while (dbEntries.hasNext()) {
+            Map.Entry<Object,DbFileSummaryMap> dbEntry = dbEntries.next();
+            DatabaseImpl db = databaseKeyToDatabaseImpl(dbEntry.getKey());
+            /* If db is null, it was deleted. */
+            DbFileSummaryMap fileMap = dbEntry.getValue();
+            if (db != null) {
+                Iterator<Map.Entry<Long,DbFileSummary>> fileEntries = 
+                    fileMap.entrySet().iterator();
+
+                while (fileEntries.hasNext()) {
+                    Map.Entry<Long,DbFileSummary> fileEntry = 
+                        fileEntries.next();
+
+                    Long fileNum = fileEntry.getKey();
+                    DbFileSummary dbFileSummary =
+                        db.getDbFileSummary(fileNum, true /*willModify*/);
+                    if (dbFileSummary != null) {
+                        DbFileSummary localSummary = fileEntry.getValue();
+                        dbFileSummary.add(localSummary);
+                    }
+                }
+            }
+            /* Ensure that DbTree.releaseDb is called. [#16329] */
+            releaseDatabaseImpl(db);
+            /* This object is being discarded, subtract it from the budget. */
+            fileMap.subtractFromMemoryBudget();
+        }
+    }
+
+    /**
+     * Returns the DatabaseImpl from the database key, which is either the
+     * DatabaseId or DatabaseImpl.  The releaseDatabaseImpl must be called
+     * with the DatabaseImpl returned by this method.
+     */
+    abstract DatabaseImpl databaseKeyToDatabaseImpl(Object databaseKey)
+        throws DatabaseException;
+
+    /**
+     * Must be called after calling databaseKeyToDatabaseImpl.  The db
+     * parameter may be null, in which case no action is taken.
+     *
+     * If DbTree.getDb is called by the implementation of
+     * databaseKeyToDatabaseImpl, then DbTree.releaseDb must be called by the
+     * implementation of this method.
+     */
+    abstract void releaseDatabaseImpl(DatabaseImpl db);
+
+    /**
+     * Allocates DbFileSummary information locally in this object rather than
+     * in the DatabaseImpl.
+     *
+     * @param databaseKey is either a DatabaseId or DatabaseImpl depending on
+     * whether called from the RecoveryUtilizationTracker or
+     * LocalUtilizationTracker, respectively.
+     *
+     * @return the summary, or null if the databaseKey param is null.
+     */
+    DbFileSummary getDbFileSummary(Object databaseKey, long fileNum) {
+        if (databaseKey != null) {
+            DbFileSummaryMap fileMap = dbMap.get(databaseKey);
+            if (fileMap == null) {
+                fileMap = new DbFileSummaryMap(true /* countParentMapEntry */);
+                fileMap.init(env);
+                dbMap.put(databaseKey, fileMap);
+            }
+            return fileMap.get
+                (Long.valueOf(fileNum), true /*adjustMemBudget*/,
+                 true /*checkResurrected*/, env.getFileManager());
+        } else {
+            return null;
+        }
+    }
+
+    /**
+     * Deallocates all DbFileSummary objects for the given database key.
+     * For use by subclasses.
+     */
+    void removeDbFileSummaries(Object databaseKey) {
+        /* The dbMap entry is budgeted by the DbFileSummaryMap. */
+        DbFileSummaryMap fileMap =
+            dbMap.remove(databaseKey);
+        if (fileMap != null) {
+            fileMap.subtractFromMemoryBudget();
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/cleaner/BaseUtilizationTracker.java b/src/com/sleepycat/je/cleaner/BaseUtilizationTracker.java
new file mode 100644
index 0000000000000000000000000000000000000000..06b6ef81e301eb3db78783a9ed39d5adb3a94bc5
--- /dev/null
+++ b/src/com/sleepycat/je/cleaner/BaseUtilizationTracker.java
@@ -0,0 +1,342 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: BaseUtilizationTracker.java,v 1.10.2.3 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * Shared implementation for all utilization trackers.  The base implementation
+ * keeps track of per-file utilization info only.  Subclasses keep track of
+ * per-database info.
+ */
+public abstract class BaseUtilizationTracker {
+
+    EnvironmentImpl env;
+    Cleaner cleaner;
+    long bytesSinceActivate;
+    private long activeFile;
+
+    /**
+     * The tracked files are maintained in a volatile field Map that is "copied
+     * on write" whenever an element is added or removed.  Add and remove are
+     * called only under the log write latch, but get and iteration may be
+     * performed at any time because the map is read-only.
+     */
+    private volatile Map<Long,TrackedFileSummary> fileSummaries;
+
+    BaseUtilizationTracker(EnvironmentImpl env, Cleaner cleaner)
+        throws DatabaseException {
+
+        assert cleaner != null;
+        this.env = env;
+        this.cleaner = cleaner;
+        fileSummaries = new HashMap<Long,TrackedFileSummary>();
+        activeFile = -1;
+    }
+
+    public EnvironmentImpl getEnvironment() {
+        return env;
+    }
+
+    /**
+     * Returns a snapshot of the files being tracked as of the last time a
+     * log entry was added.  The summary info returned is the delta since the
+     * last checkpoint, not the grand totals, and is approximate since it is
+     * changing in real time.  This method may be called without holding the
+     * log write latch.
+     *
+     * <p>If files are added or removed from the collection of tracked files in
+     * real time, the returned collection will not be changed since it is a
+     * snapshot.  But the objects contained in the collection are live and will
+     * be updated in real time under the log write latch.  The collection and
+     * the objects in the collection should not be modified by the caller.</p>
+     */
+    public Collection<TrackedFileSummary> getTrackedFiles() {
+        return fileSummaries.values();
+    }
+
+    /**
+     * Returns one file from the snapshot of tracked files, or null if the
+     * given file number is not in the snapshot array.
+     *
+     * @see #getTrackedFiles
+     */
+    public TrackedFileSummary getTrackedFile(long fileNum) {
+
+        return fileSummaries.get(fileNum);
+    }
+
+    /**
+     * Counts the addition of all new log entries including LNs, and returns
+     * whether the cleaner should be woken.
+     *
+     * <p>For the global tracker, must be called under the log write latch.</p>
+     */
+    final boolean countNew(long lsn,
+                           Object databaseKey,
+                           LogEntryType type,
+                           int size) {
+        assert type != null;
+        /* Count in per-file and per-file-per-db summaries. */
+        long fileNum = DbLsn.getFileNumber(lsn);
+        FileSummary fileSummary = getFileSummary(fileNum);
+        fileSummary.totalCount += 1;
+        fileSummary.totalSize += size;
+        if (isNodeType(type)) {
+            assert databaseKey != null :
+                "No DB for lsn=" + DbLsn.getNoFormatString(lsn) +
+                " type: " + type;
+            DbFileSummary dbFileSummary =
+                getDbFileSummary(databaseKey, fileNum);
+            if (isLNType(type)) {
+                fileSummary.totalLNCount += 1;
+                fileSummary.totalLNSize += size;
+                if (dbFileSummary != null) {
+                    dbFileSummary.totalLNCount += 1;
+                    dbFileSummary.totalLNSize += size;
+                }
+            } else {
+                fileSummary.totalINCount += 1;
+                fileSummary.totalINSize += size;
+                if (dbFileSummary != null) {
+                    dbFileSummary.totalINCount += 1;
+                    dbFileSummary.totalINSize += size;
+                }
+            }
+        }
+        /* Increment bytes and indicate whether to wakeup the cleaner. */
+        bytesSinceActivate += size;
+        return (bytesSinceActivate >= env.getCleaner().cleanerBytesInterval);
+    }
+
+    /**
+     * Counts an obsolete node by incrementing the obsolete count and size.
+     * Tracks the LSN offset if trackOffset is true and the offset is non-zero.
+     *
+     * <p>For the global tracker, must be called under the log write latch.</p>
+     */
+    final void countObsolete(long lsn,
+                             Object databaseKey,
+                             LogEntryType type,
+                             int size,
+                             boolean countPerFile,
+                             boolean countPerDb,
+                             boolean trackOffset) {
+        /* Only node types are counted obsolete. */
+        assert isNodeType(type);
+        boolean isLN = isLNType(type);
+        long fileNum = DbLsn.getFileNumber(lsn);
+        if (countPerFile) {
+            TrackedFileSummary fileSummary = getFileSummary(fileNum);
+            if (isLN) {
+                fileSummary.obsoleteLNCount += 1;
+                /* The size is optional when tracking obsolete LNs. */
+                if (size > 0) {
+                    fileSummary.obsoleteLNSize += size;
+                    fileSummary.obsoleteLNSizeCounted += 1;
+                }
+            } else {
+                fileSummary.obsoleteINCount += 1;
+                /* The size is not allowed when tracking obsolete INs. */
+                assert size == 0;
+            }
+            if (trackOffset) {
+                long offset = DbLsn.getFileOffset(lsn);
+                if (offset != 0) {
+                    fileSummary.trackObsolete(offset);
+                }
+            }
+        }
+        if (countPerDb) {
+            assert databaseKey != null :
+                "No DB for lsn=" + DbLsn.getNoFormatString(lsn) +
+                " type: " + type;
+            DbFileSummary dbFileSummary =
+                getDbFileSummary(databaseKey, fileNum);
+            if (dbFileSummary != null) {
+                if (isLN) {
+                    dbFileSummary.obsoleteLNCount += 1;
+                    /* The size is optional when tracking obsolete LNs. */
+                    if (size > 0) {
+                        dbFileSummary.obsoleteLNSize += size;
+                        dbFileSummary.obsoleteLNSizeCounted += 1;
+                    }
+                } else {
+                    dbFileSummary.obsoleteINCount += 1;
+                    /* The size is not allowed when tracking obsolete INs. */
+                    assert size == 0;
+                }
+            }
+        }
+    }
+
+    /**
+     * Counts all active LSNs in a database as obsolete in the per-file
+     * utilization summaries.  This method is called during database
+     * remove/truncate or when replaying those operations during recovery.
+     *
+     * <p>For the global tracker, must be called under the log write latch.</p>
+     *
+     * @param dbFileSummaries the map of Long file number to DbFileSummary for
+     * a database that is being deleted.
+     *
+     * @param mapLnLsn is the LSN of the MapLN when recovery is replaying the
+     * truncate/remove, or NULL_LSN when called outside of recovery; obsolete
+     * totals should only be counted when this LSN is prior to the LSN of the
+     * FileSummaryLN for the file being counted.
+     */
+    public void countObsoleteDb(DbFileSummaryMap dbFileSummaries,
+                                long mapLnLsn) {
+        Iterator<Map.Entry<Long,DbFileSummary>> entries = 
+            dbFileSummaries.entrySet().iterator();
+
+        while (entries.hasNext()) {
+            Map.Entry<Long,DbFileSummary> entry = entries.next();
+            Long fileNum = entry.getKey();
+            if (isFileUncounted(fileNum, mapLnLsn)) {
+                DbFileSummary dbFileSummary = entry.getValue();
+                TrackedFileSummary fileSummary =
+                    getFileSummary(fileNum.longValue());
+
+                /*
+                 * Count as obsolete the currently active amounts in the
+                 * database, which are the total amounts minus the previously
+                 * counted obsolete amounts.
+                 */
+                int lnObsoleteCount = dbFileSummary.totalLNCount -
+                                      dbFileSummary.obsoleteLNCount;
+                int lnObsoleteSize  = dbFileSummary.totalLNSize -
+                                      dbFileSummary.obsoleteLNSize;
+                int inObsoleteCount = dbFileSummary.totalINCount -
+                                      dbFileSummary.obsoleteINCount;
+                fileSummary.obsoleteLNCount += lnObsoleteCount;
+                fileSummary.obsoleteLNSizeCounted += lnObsoleteCount;
+                fileSummary.obsoleteLNSize += lnObsoleteSize;
+                fileSummary.obsoleteINCount += inObsoleteCount;
+
+                /*
+                 * Do not update the DbFileSummary.  It will be flushed when
+                 * the MapLN is deleted.  If later replayed during recovery, we
+                 * will call this method to update the per-file utilization.
+                 */
+            }
+        }
+    }
+
+    /**
+     * Returns whether file summary information for the given LSN is not
+     * already counted.  Outside of recovery, always returns true.  For
+     * recovery, is overridden by RecoveryUtilizationTracker and returns
+     * whether the FileSummaryLN for the given file is prior to the given LSN.
+     * .
+     */
+    boolean isFileUncounted(Long fileNum, long lsn) {
+        return true;
+    }
+
+    /**
+     * Returns a DbFileSummary for the given database key and file number,
+     * adding an empty one if the file is not already being tracked.
+     *
+     * <p>This method is implemented by subclasses which interpret the
+     * databaseKey as either the DatabaseImpl or a DatabaseId.</p>
+     *
+     * <p>For the global tracker, must be called under the log write latch.</p>
+     *
+     * @return the summary, or null if the DB should not be tracked because
+     * the file has been deleted.
+     */
+    abstract DbFileSummary getDbFileSummary(Object databaseKey, long fileNum);
+
+    /**
+     * Returns a tracked file for the given file number, adding an empty one
+     * if the file is not already being tracked.
+     *
+     * <p>For the global tracker, must be called under the log write latch.</p>
+     */
+    TrackedFileSummary getFileSummary(long fileNum) {
+
+        if (activeFile < fileNum) {
+            activeFile = fileNum;
+        }
+        Long fileNumLong = Long.valueOf(fileNum);
+        TrackedFileSummary file = fileSummaries.get(fileNumLong);
+        if (file == null) {
+            /* Assign fileSummaries field after modifying the new map. */
+            file = new TrackedFileSummary(this, fileNum, cleaner.trackDetail);
+            Map<Long, TrackedFileSummary> newFiles = 
+                new HashMap<Long,TrackedFileSummary>(fileSummaries);
+            newFiles.put(fileNumLong, file);
+            fileSummaries = newFiles;
+        }
+        return file;
+    }
+
+    /**
+     * Called after the FileSummaryLN is written to the log during checkpoint.
+     *
+     * <p>We keep the active file summary in the tracked file map, but we
+     * remove older files to prevent unbounded growth of the map.</p>
+     *
+     * <p>Must be called under the log write latch.</p>
+     */
+    void resetFile(TrackedFileSummary fileSummary) {
+
+        if (fileSummary.getFileNumber() < activeFile &&
+            fileSummary.getAllowFlush()) {
+            /* Assign fileSummaries field after modifying the new map. */
+            Map<Long, TrackedFileSummary> newFiles = 
+                new HashMap<Long,TrackedFileSummary>(fileSummaries);
+            newFiles.remove(fileSummary.getFileNumber());
+            fileSummaries = newFiles;
+        }
+    }
+
+    /**
+     * Returns whether the given type is a node; a null type is assumed to be
+     * an LN.
+     */
+    boolean isNodeType(LogEntryType type) {
+        return type == null || type.isNodeType();
+    }
+
+    /**
+     * Returns whether the given type is an LN; a null type is assumed to be an
+     * LN.
+     */
+    boolean isLNType(LogEntryType type) {
+        if (type != null) {
+            int len = LogEntryType.IN_TYPES.length;
+            for (int i = 0; i < len; i += 1) {
+                if (LogEntryType.IN_TYPES[i] == type) {
+                    return false;
+                }
+            }
+        }
+        return true;
+    }
+
+    /**
+     * Update memory budgets when this tracker is closed and will never be
+     * accessed again.
+     */
+    void close() {
+        for (TrackedFileSummary t: fileSummaries.values()) {
+            t.close();
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/cleaner/Cleaner.java b/src/com/sleepycat/je/cleaner/Cleaner.java
new file mode 100644
index 0000000000000000000000000000000000000000..5ece0db811910545bbfcee8090ad18dbbd42abbd
--- /dev/null
+++ b/src/com/sleepycat/je/cleaner/Cleaner.java
@@ -0,0 +1,1542 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Cleaner.java,v 1.213.2.5 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.Set;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.EnvironmentMutableConfig;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.ExceptionListener;
+import com.sleepycat.je.RunRecoveryException;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.cleaner.FileSelector.CheckpointStartCleanerState;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.DbConfigManager;
+import com.sleepycat.je.dbi.DbTree;
+import com.sleepycat.je.dbi.EnvConfigObserver;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.log.ReplicationContext;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.tree.ChildReference;
+import com.sleepycat.je.tree.DIN;
+import com.sleepycat.je.tree.LN;
+import com.sleepycat.je.tree.Node;
+import com.sleepycat.je.tree.Tree;
+import com.sleepycat.je.tree.TreeLocation;
+import com.sleepycat.je.txn.BasicLocker;
+import com.sleepycat.je.txn.LockGrantType;
+import com.sleepycat.je.txn.LockResult;
+import com.sleepycat.je.txn.LockType;
+import com.sleepycat.je.utilint.DaemonRunner;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.PropUtil;
+import com.sleepycat.je.utilint.Tracer;
+
+/**
+ * The Cleaner is responsible for effectively garbage collecting the JE log.
+ * It looks through log files and locates log records (IN's and LN's of all
+ * flavors) that are superceded by later versions.  Those that are "current"
+ * are propagated to a newer log file so that older log files can be deleted.
+ */
+public class Cleaner implements DaemonRunner, EnvConfigObserver {
+    /* From cleaner */
+    static final String CLEAN_IN = "CleanIN:";
+    static final String CLEAN_LN = "CleanLN:";
+    static final String CLEAN_MIGRATE_LN = "CleanMigrateLN:";
+    static final String CLEAN_PENDING_LN = "CleanPendingLN:";
+
+    /**
+     * Whether to fetch LNs for files in the to-be-cleaned set during lazy
+     * migration.  This is currently enabled because we do not support the
+     * dynamic addition of cleaner threads; that way, if the configured cleaner
+     * threads cannot keep up, we use proactive migration to keep up.
+     */
+    static final boolean PROACTIVE_MIGRATION = true;
+
+    /**
+     * Whether to update the IN generation count during searches.  This is
+     * currently disabled because 1) we update the generation of the BIN when
+     * we set a MIGRATE flag and 2) if the BIN is not evicted its parents will
+     * not be, so not updating the generation during the search has no benefit.
+     * By not updating the generation during searches for which we do NOT set
+     * the MIGRATE flag, we avoid holding INs in the cache that are not needed
+     * for lazy migration.  However, we do very few searches for obsolete LNs
+     * because the obsolete tracking info prevents this, so the benefit of not
+     * updating the generation during searches is questionable.  In other
+     * words, changing this setting will have little effect.
+     */
+    static final CacheMode UPDATE_GENERATION = CacheMode.UNCHANGED;
+
+    /**
+     * Whether the cleaner should participate in critical eviction.  Ideally
+     * the cleaner would not participate in eviction, since that would reduce
+     * the cost of cleaning.  However, the cleaner can add large numbers of
+     * nodes to the cache.  By not participating in eviction, other threads
+     * could be kept in a constant state of eviction and would effectively
+     * starve.  Therefore, this setting is currently enabled.
+     */
+    static final boolean DO_CRITICAL_EVICTION = true;
+
+    /*
+     * Cumulative counters.  Updates to these counters occur in multiple
+     * threads, including FileProcessor threads,  and are not synchronized.
+     * This could produce errors in counting, but avoids contention around stat
+     * updates.
+     */
+    long nCleanerRuns = 0;
+    long nCleanerDeletions = 0;
+    long nINsObsolete = 0;
+    long nINsCleaned = 0;
+    long nINsDead = 0;
+    long nINsMigrated = 0;
+    long nLNsObsolete = 0;
+    long nLNsCleaned = 0;
+    long nLNsDead = 0;
+    long nLNsLocked = 0;
+    long nLNsMigrated = 0;
+    long nLNsMarked = 0;
+    long nLNQueueHits = 0;
+    long nPendingLNsProcessed = 0;
+    long nMarkedLNsProcessed = 0;
+    long nToBeCleanedLNsProcessed = 0;
+    long nClusterLNsProcessed = 0;
+    long nPendingLNsLocked = 0;
+    long nEntriesRead = 0;
+    long nRepeatIteratorReads = 0;
+
+    /*
+     * Configuration parameters are non-private for use by FileProcessor,
+     * UtilizationTracker.
+     */
+    long lockTimeout;
+    int readBufferSize;
+    int lookAheadCacheSize;
+    long nDeadlockRetries;
+    boolean expunge;
+    boolean clusterResident;
+    boolean clusterAll;
+    int maxBatchFiles;
+    Level detailedTraceLevel;
+    long cleanerBytesInterval;
+    boolean trackDetail;
+    boolean fetchObsoleteSize;
+    boolean lazyMigration;
+
+    /**
+     * All files that are to-be-cleaned.  Used to perform proactive migration.
+     * Is read-only after assignment, so no synchronization is needed.
+     */
+    private Set<Long> toBeCleanedFiles = Collections.emptySet();
+
+    /**
+     * All files that are below the minUtilization threshold.  Used to perform
+     * clustering migration.  Is read-only after assignment, so no
+     * synchronization is needed.
+     */
+    private Set<Long> lowUtilizationFiles = Collections.emptySet();
+
+    private String name;
+    private EnvironmentImpl env;
+    private UtilizationProfile profile;
+    private UtilizationTracker tracker;
+    private FileSelector fileSelector;
+    private FileProcessor[] threads;
+
+    /*
+     * Log file deletion must check for the presence of read/only processes
+     * and ongoing backups.
+     */
+    private Object deleteFileLock;
+    private int deleteProhibited;  // protected by deleteFileLock
+
+    public Cleaner(EnvironmentImpl env, String name)
+        throws DatabaseException {
+
+        this.env = env;
+        this.name = name;
+        tracker = new UtilizationTracker(env, this);
+        profile = new UtilizationProfile(env, tracker);
+        fileSelector = new FileSelector();
+        threads = new FileProcessor[0];
+        deleteFileLock = new Object();
+
+        /*
+         * The trackDetail property is immutable because of the complexity (if
+         * it were mutable) in determining whether to update the memory budget
+         * and perform eviction.
+         */
+        trackDetail = env.getConfigManager().getBoolean
+            (EnvironmentParams.CLEANER_TRACK_DETAIL);
+
+        /* Initialize mutable properties and register for notifications. */
+        envConfigUpdate(env.getConfigManager(), null);
+        env.addConfigObserver(this);
+    }
+
+    /**
+     * Process notifications of mutable property changes.
+     */
+    public void envConfigUpdate(DbConfigManager cm,
+				EnvironmentMutableConfig ignore)
+        throws DatabaseException {
+
+        lockTimeout = PropUtil.microsToMillis(cm.getLong
+                (EnvironmentParams.CLEANER_LOCK_TIMEOUT));
+
+        readBufferSize = cm.getInt(EnvironmentParams.CLEANER_READ_SIZE);
+        if (readBufferSize <= 0) {
+            readBufferSize = cm.getInt
+                (EnvironmentParams.LOG_ITERATOR_READ_SIZE);
+        }
+
+        lookAheadCacheSize = cm.getInt
+            (EnvironmentParams.CLEANER_LOOK_AHEAD_CACHE_SIZE);
+
+        nDeadlockRetries = cm.getInt(EnvironmentParams.CLEANER_DEADLOCK_RETRY);
+
+	expunge = cm.getBoolean(EnvironmentParams.CLEANER_REMOVE);
+
+	clusterResident = cm.getBoolean(EnvironmentParams.CLEANER_CLUSTER);
+
+	clusterAll = cm.getBoolean(EnvironmentParams.CLEANER_CLUSTER_ALL);
+
+        maxBatchFiles = cm.getInt(EnvironmentParams.CLEANER_MAX_BATCH_FILES);
+
+        detailedTraceLevel = Tracer.parseLevel
+            (env, EnvironmentParams.JE_LOGGING_LEVEL_CLEANER);
+
+        if (clusterResident && clusterAll) {
+            throw new IllegalArgumentException
+                ("Both " + EnvironmentParams.CLEANER_CLUSTER +
+                 " and " + EnvironmentParams.CLEANER_CLUSTER_ALL +
+                 " may not be set to true.");
+        }
+
+        int nThreads = cm.getInt(EnvironmentParams.CLEANER_THREADS);
+        assert nThreads > 0;
+
+        if (nThreads != threads.length) {
+
+            /* Shutdown threads when reducing their number. */
+            for (int i = nThreads; i < threads.length; i += 1) {
+                if (threads[i] != null) {
+                    threads[i].shutdown();
+                    threads[i] = null;
+                }
+            }
+
+            /* Copy existing threads that are still used. */
+            FileProcessor[] newThreads = new FileProcessor[nThreads];
+            for (int i = 0; i < nThreads && i < threads.length; i += 1) {
+                newThreads[i] = threads[i];
+            }
+
+            /* Don't lose track of new threads if an exception occurs. */
+            threads = newThreads;
+
+            /* Start new threads when increasing their number. */
+            for (int i = 0; i < nThreads; i += 1) {
+                if (threads[i] == null) {
+                    threads[i] = new FileProcessor
+                        (name + '-' + (i + 1),
+                         env, this, profile, fileSelector);
+                }
+            }
+        }
+
+        cleanerBytesInterval = cm.getLong
+            (EnvironmentParams.CLEANER_BYTES_INTERVAL);
+        if (cleanerBytesInterval == 0) {
+            cleanerBytesInterval = cm.getLong
+                (EnvironmentParams.LOG_FILE_MAX) / 4;
+        }
+
+        fetchObsoleteSize = cm.getBoolean
+            (EnvironmentParams.CLEANER_FETCH_OBSOLETE_SIZE);
+
+        /*
+         * Lazy migration of LNs is disabled if CHECKPOINTER_HIGH_PRIORITY is
+         * true.  LN migration slows down the checkpoint and so LNs are
+         * migrated by FileProcessor when high priority checkpoints are
+         * configured.
+         */
+        lazyMigration = !cm.getBoolean
+            (EnvironmentParams.CHECKPOINTER_HIGH_PRIORITY);
+    }
+
+    public UtilizationTracker getUtilizationTracker() {
+        return tracker;
+    }
+
+    public UtilizationProfile getUtilizationProfile() {
+        return profile;
+    }
+
+    /** For unit testing. */
+    FileSelector getFileSelector() {
+        return fileSelector;
+    }
+
+    public boolean getFetchObsoleteSize() {
+        return fetchObsoleteSize;
+    }
+
+    /*
+     * Delegate the run/pause/wakeup/shutdown DaemonRunner operations.  We
+     * always check for null to account for the possibility of exceptions
+     * during thread creation.  Cleaner daemon can't ever be run if No Locking
+     * mode is enabled.
+     */
+    public void runOrPause(boolean run) {
+	if (!env.isNoLocking()) {
+	    for (int i = 0; i < threads.length; i += 1) {
+                FileProcessor processor = threads[i];
+		if (processor != null) {
+		    processor.runOrPause(run);
+		}
+	    }
+	}
+    }
+
+    public void wakeup() {
+        for (int i = 0; i < threads.length; i += 1) {
+            if (threads[i] != null) {
+                threads[i].wakeup();
+            }
+        }
+    }
+
+    public void requestShutdown() {
+        for (int i = 0; i < threads.length; i += 1) {
+            if (threads[i] != null) {
+                threads[i].requestShutdown();
+            }
+        }
+    }
+
+    public void shutdown() {
+        for (int i = 0; i < threads.length; i += 1) {
+            if (threads[i] != null) {
+                threads[i].shutdown();
+                threads[i].clearEnv();
+                threads[i] = null;
+            }
+        }
+    }
+
+    public int getNWakeupRequests() {
+        int count = 0;
+        for (int i = 0; i < threads.length; i += 1) {
+            if (threads[i] != null) {
+                count += threads[i].getNWakeupRequests();
+            }
+        }
+        return count;
+    }
+
+    private boolean areThreadsRunning() {
+        for (int i = 0; i < threads.length; i += 1) {
+            if (threads[i] != null) {
+                return threads[i].isRunning();
+            }
+        }
+        return false;
+    }
+
+    public void setExceptionListener(ExceptionListener exceptionListener) {
+        for (int i = 0; i < threads.length; i += 1) {
+            if (threads[i] != null) {
+                threads[i].setExceptionListener(exceptionListener);
+            }
+        }
+    }
+
+    /**
+     * Cleans selected files and returns the number of files cleaned.  This
+     * method is not invoked by a deamon thread, it is programatically.
+     *
+     * @param cleanMultipleFiles is true to clean until we're under budget,
+     * or false to clean at most one file.
+     *
+     * @param forceCleaning is true to clean even if we're not under the
+     * utilization threshold.
+     *
+     * @return the number of files cleaned, not including files cleaned
+     * unsuccessfully.
+     */
+    public int doClean(boolean cleanMultipleFiles, boolean forceCleaning)
+        throws DatabaseException {
+
+        FileProcessor processor = new FileProcessor
+            ("", env, this, profile, fileSelector);
+        return processor.doClean
+            (false /*invokedFromDaemon*/, cleanMultipleFiles, forceCleaning);
+    }
+
+    /**
+     * Load stats.
+     */
+    public void loadStats(StatsConfig config, EnvironmentStats stat)
+        throws DatabaseException {
+
+        stat.setCleanerBacklog(getBacklog());
+        stat.setNCleanerRuns(nCleanerRuns);
+        stat.setNCleanerDeletions(nCleanerDeletions);
+        stat.setNINsObsolete(nINsObsolete);
+        stat.setNINsCleaned(nINsCleaned);
+        stat.setNINsDead(nINsDead);
+        stat.setNINsMigrated(nINsMigrated);
+        stat.setNLNsObsolete(nLNsObsolete);
+        stat.setNLNsCleaned(nLNsCleaned);
+        stat.setNLNsDead(nLNsDead);
+        stat.setNLNsLocked(nLNsLocked);
+        stat.setNLNsMigrated(nLNsMigrated);
+        stat.setNLNsMarked(nLNsMarked);
+        stat.setNLNQueueHits(nLNQueueHits);
+        stat.setNPendingLNsProcessed(nPendingLNsProcessed);
+        stat.setNMarkedLNsProcessed(nMarkedLNsProcessed);
+        stat.setNToBeCleanedLNsProcessed(nToBeCleanedLNsProcessed);
+        stat.setNClusterLNsProcessed(nClusterLNsProcessed);
+        stat.setNPendingLNsLocked(nPendingLNsLocked);
+        stat.setNCleanerEntriesRead(nEntriesRead);
+        stat.setNRepeatIteratorReads(nRepeatIteratorReads);
+        if (!config.getFast()) {
+            stat.setTotalLogSize(profile.getTotalLogSize());
+        }
+
+        if (config.getClear()) {
+            nCleanerRuns = 0;
+            nCleanerDeletions = 0;
+            nINsObsolete = 0;
+            nINsCleaned = 0;
+            nINsDead = 0;
+            nINsMigrated = 0;
+            nLNsObsolete = 0;
+            nLNsCleaned = 0;
+            nLNsDead = 0;
+            nLNsLocked = 0;
+            nLNsMigrated = 0;
+            nLNsMarked = 0;
+            nLNQueueHits = 0;
+            nPendingLNsProcessed = 0;
+            nMarkedLNsProcessed = 0;
+            nToBeCleanedLNsProcessed = 0;
+            nClusterLNsProcessed = 0;
+            nPendingLNsLocked = 0;
+            nEntriesRead = 0;
+            nRepeatIteratorReads = 0;
+        }
+    }
+
+    /**
+     * Deletes all files that are safe-to-delete, if there are no read/only
+     * processes and concurrent backups.
+     *
+     * Deletion is coordinated by the synchronization variable deleteFileLock
+     * AND by the deleteProhibited state variable. The reason that two
+     * different mechanisms are use is because file deletion must be prevented
+     * both inter and intra-process. File locks must be used for inter-process,
+     * and the state bit for intra-process.
+     *
+     * To guard against read/only processes, the would-be deleter tries to get
+     * an exclusive lock on the environment. This will not be possible if a
+     * read/only process exists.
+     *
+     * To guard against backup mode, the would-be deleter checks the
+     * deleteProhibited state. Backup and file deletion can only be carried out
+     * by a read/write process, so both activities are working in the same
+     * process. Note that file locks are not supported intra-process. The
+     * deleteProhibited state is used rather than a simple synchronization on
+     * deleteFileLock because the start/endBackup is controlled by the
+     * application, and the copying of log files can take an arbitrarily long
+     * time. Using synchronization on deleteFileLock would make it possible to
+     * lock out a cleaner thread for an unacceptable amount of time.
+     *
+     * The deleteProhibited state is also set while a replication node is
+     * sending files to initialize another size.  More than one such
+     * initialization may be occuring at once, and a backup may also be
+     * occuring simultaneously, hence deleteProhibited may be greater than 1.
+     */
+    void deleteSafeToDeleteFiles()
+        throws DatabaseException {
+
+        /*
+         * Synchronized to prevent multiple threads from requesting the same
+         * file lock.
+         */
+        synchronized (deleteFileLock) {
+            if (deleteProhibited > 0) {
+                return; /* deletion disabled. */
+            }
+
+            Set<Long> safeFiles = fileSelector.copySafeToDeleteFiles();
+            if (safeFiles == null) {
+                return; /* Nothing to do. */
+            }
+
+            /*
+             * Fail loudly if the environment is invalid.  A
+             * RunRecoveryException must have occurred.
+             */
+            env.checkIfInvalid();
+
+            /*
+             * Fail silently if the environment is not open.
+             */
+            if (env.mayNotWrite()) {
+                return;
+            }
+
+            /*
+             * If we can't get an exclusive lock, then there are reader
+             * processes and we can't delete any cleaned files.
+             */
+            if (!env.getFileManager().lockEnvironment(false, true)) {
+                Tracer.trace
+                    (Level.SEVERE, env, "Cleaner has " + safeFiles.size() +
+                     " files not deleted because of read-only processes.");
+                return;
+            }
+
+            try {
+                for (Iterator<Long> i = safeFiles.iterator(); i.hasNext();) {
+                    Long fileNum = i.next();
+                    long fileNumValue = fileNum.longValue();
+                    boolean deleted = false;
+                    try {
+                        if (expunge) {
+                            env.getFileManager().deleteFile(fileNumValue);
+                        } else {
+                            env.getFileManager().renameFile
+                                (fileNumValue, FileManager.DEL_SUFFIX);
+                        }
+                        deleted = true;
+                    } catch (DatabaseException e) {
+                        traceFileNotDeleted(e, fileNumValue);
+                    } catch (IOException e) {
+                        traceFileNotDeleted(e, fileNumValue);
+                    }
+
+                    /*
+                     * If the log file was not deleted, leave it in the
+                     * safe-to-delete set (and the UP) so that we will retry
+                     * the deletion later.  If the log file was deleted, trace
+                     * the deletion, delete the file from the UP and from the
+                     * safe-to-delete set.
+                     *
+                     * We do not retry if an error occurs deleting the UP
+                     * database entries below.  Retrying is intended only to
+                     * solve a problem on Windows where deleting a log file
+                     * isn't always possible immediately after closing it.
+                     */
+                    if (deleted) {
+                        Tracer.trace
+                            (Level.SEVERE, env,
+                             "Cleaner deleted file 0x" +
+                             Long.toHexString(fileNumValue));
+
+                        /*
+                         * Remove the file from the profile before removing
+                         * it from the safe-to-delete set.  If we remove in the
+                         * reverse order, it may be selected for cleaning.
+                         * Always delete the file from the safe-to-delete set
+                         * (in a finally block) so that we don't attempt to
+                         * delete the file again.
+                         */
+                        try {
+                            profile.removeFile
+                                (fileNum,
+                                 fileSelector.getCleanedDatabases(fileNum));
+                        } finally {
+                            fileSelector.removeDeletedFile
+                                (fileNum, env.getMemoryBudget());
+                        }
+                    }
+                    nCleanerDeletions++;
+                }
+            } finally {
+                env.getFileManager().releaseExclusiveLock();
+            }
+        }
+    }
+
+    public void setDeleteProhibited() {
+
+        synchronized (deleteFileLock) {
+            deleteProhibited += 1;
+        }
+    }
+
+    public void clearDeleteProhibited() {
+        synchronized (deleteFileLock) {
+            deleteProhibited -= 1;
+        }
+    }
+
+    private void traceFileNotDeleted(Throwable e, long fileNum)
+        throws DatabaseException {
+
+        String msg = "Cleaner deleteSafeToDeleteFiles" +
+             "Log file 0x" + Long.toHexString(fileNum) + " could not be " +
+             (expunge ? "deleted" : "renamed") +
+             ".  This operation will be retried at the next checkpoint." +
+             e.toString() +
+             " FileSelector: " + fileSelector;
+        Tracer.trace
+            (env, "Cleaner", "deleteSafeToDeleteFiles", msg, e);
+    }
+
+    /**
+     * Returns a copy of the cleaned and processed files at the time a
+     * checkpoint starts.
+     *
+     * <p>If non-null is returned, the checkpoint should flush an extra level,
+     * and addCheckpointedFiles() should be called when the checkpoint is
+     * complete.</p>
+     */
+    public CheckpointStartCleanerState getFilesAtCheckpointStart()
+        throws DatabaseException {
+
+        /* Pending LNs can prevent file deletion. */
+        processPending();
+
+        return fileSelector.getFilesAtCheckpointStart();
+    }
+
+    /**
+     * When a checkpoint is complete, update the files that were returned at
+     * the beginning of the checkpoint.
+     */
+    public void updateFilesAtCheckpointEnd(CheckpointStartCleanerState info)
+        throws DatabaseException {
+
+        fileSelector.updateFilesAtCheckpointEnd(info);
+        deleteSafeToDeleteFiles();
+    }
+
+    /**
+     * Update the lowUtilizationFiles and toBeCleanedFiles fields with new
+     * read-only collections.
+     */
+    public void updateReadOnlyFileCollections() {
+        toBeCleanedFiles = fileSelector.getToBeCleanedFiles();
+        lowUtilizationFiles = fileSelector.getLowUtilizationFiles();
+    }
+
+    final int getBacklog() {
+        return toBeCleanedFiles.size();
+    }
+
+    /**
+     * If any LNs are pending, process them.  This method should be called
+     * often enough to prevent the pending LN set from growing too large.
+     */
+    void processPending()
+        throws DatabaseException {
+
+        DbTree dbMapTree = env.getDbTree();
+
+        LNInfo[] pendingLNs = fileSelector.getPendingLNs();
+        if (pendingLNs != null) {
+            TreeLocation location = new TreeLocation();
+
+            for (int i = 0; i < pendingLNs.length; i += 1) {
+                LNInfo info = pendingLNs[i];
+
+                DatabaseId dbId = info.getDbId();
+                DatabaseImpl db = dbMapTree.getDb(dbId, lockTimeout);
+                try {
+                    byte[] key = info.getKey();
+                    byte[] dupKey = info.getDupKey();
+                    LN ln = info.getLN();
+
+                    /* Evict before processing each entry. */
+                    if (DO_CRITICAL_EVICTION) {
+                        env.getEvictor().
+                            doCriticalEviction(true); // backgroundIO
+                    }
+
+                    processPendingLN
+                        (ln, db, key, dupKey, location);
+                } finally {
+                    dbMapTree.releaseDb(db);
+                }
+
+                /* Sleep if background read/write limit was exceeded. */
+                env.sleepAfterBackgroundIO();
+            }
+        }
+
+        DatabaseId[] pendingDBs = fileSelector.getPendingDBs();
+        if (pendingDBs != null) {
+            for (int i = 0; i < pendingDBs.length; i += 1) {
+                DatabaseId dbId = pendingDBs[i];
+                DatabaseImpl db = dbMapTree.getDb(dbId, lockTimeout);
+                try {
+                    if (db == null || db.isDeleteFinished()) {
+                        fileSelector.removePendingDB(dbId);
+                    }
+                } finally {
+                    dbMapTree.releaseDb(db);
+                }
+            }
+        }
+    }
+
+    /**
+     * Processes a pending LN, getting the lock first to ensure that the
+     * overhead of retries is mimimal.
+     */
+    private void processPendingLN(LN ln,
+                                  DatabaseImpl db,
+                                  byte[] key,
+                                  byte[] dupKey,
+                                  TreeLocation location)
+        throws DatabaseException {
+
+        boolean parentFound = false;  // We found the parent BIN.
+        boolean processedHere = true; // The LN was cleaned here.
+        boolean lockDenied = false;   // The LN lock was denied.
+        boolean obsolete = false;     // The LN is no longer in use.
+        boolean completed = false;    // This method completed.
+
+        BasicLocker locker = null;
+        BIN bin = null;
+        DIN parentDIN = null;
+        try {
+            nPendingLNsProcessed++;
+
+            /*
+             * If the DB is gone, this LN is obsolete.  If delete cleanup is in
+             * progress, put the DB into the DB pending set; this LN will be
+             * declared deleted after the delete cleanup is finished.
+             */
+            if (db == null || db.isDeleted()) {
+                addPendingDB(db);
+                nLNsDead++;
+                obsolete = true;
+                completed = true;
+                return;
+            }
+
+            Tree tree = db.getTree();
+            assert tree != null;
+
+            /* Get a non-blocking lock on the original node ID. */
+
+	    locker =
+		BasicLocker.createBasicLocker(env, false /*noWait*/,
+					      true /*noAPIReadLock*/);
+            LockResult lockRet = locker.nonBlockingLock
+                (ln.getNodeId(), LockType.READ, db);
+            if (lockRet.getLockGrant() == LockGrantType.DENIED) {
+                /* Try again later. */
+                nPendingLNsLocked++;
+                lockDenied = true;
+                completed = true;
+                return;
+            }
+
+            /*
+	     * Search down to the bottom most level for the parent of this LN.
+             *
+             * We pass searchDupTree=true to search the dup tree by nodeID if
+             * necessary.  This handles the case where dupKey is null because
+             * the pending entry was a deleted single-duplicate in a BIN.
+	     */
+            parentFound = tree.getParentBINForChildLN
+                (location, key, dupKey, ln,
+                 false,  // splitsAllowed
+                 true,   // findDeletedEntries
+                 true,   // searchDupTree
+                 UPDATE_GENERATION);
+            bin = location.bin;
+            int index = location.index;
+
+            if (!parentFound) {
+                nLNsDead++;
+                obsolete = true;
+                completed = true;
+		return;
+            }
+
+            if (ln.containsDuplicates()) {
+                /* Migrate a DupCountLN. */
+                parentDIN = (DIN) bin.fetchTarget(index);
+                parentDIN.latch(UPDATE_GENERATION);
+                ChildReference dclRef = parentDIN.getDupCountLNRef();
+                processedHere = false;
+                migrateDupCountLN
+                    (db, dclRef.getLsn(), parentDIN, dclRef,
+                     true,           // wasCleaned
+                     true,           // isPending
+                     ln.getNodeId(), // lockedPendingNodeId
+                     CLEAN_PENDING_LN);
+            } else {
+                /* Migrate a plain LN. */
+                processedHere = false;
+                migrateLN
+                    (db, bin.getLsn(index), bin, index,
+                     true,           // wasCleaned
+                     true,           // isPending
+                     ln.getNodeId(), // lockedPendingNodeId
+                     true,           // backgroundIO
+                     CLEAN_PENDING_LN);
+            }
+            completed = true;
+	} catch (DatabaseException DBE) {
+	    DBE.printStackTrace();
+	    Tracer.trace(env, "com.sleepycat.je.cleaner.Cleaner", "processLN",
+			 "Exception thrown: ", DBE);
+	    throw DBE;
+        } finally {
+            if (parentDIN != null) {
+                parentDIN.releaseLatch();
+            }
+
+            if (bin != null) {
+                bin.releaseLatch();
+            }
+
+            if (locker != null) {
+                locker.operationEnd();
+            }
+
+            /*
+             * If migrateLN was not called above, remove the pending LN and
+             * perform tracing in this method.
+             */
+            if (processedHere) {
+                if (completed && !lockDenied) {
+                    fileSelector.removePendingLN(ln.getNodeId());
+                }
+                trace(detailedTraceLevel, CLEAN_PENDING_LN, ln, DbLsn.NULL_LSN,
+                      completed, obsolete, false /*migrated*/);
+            }
+        }
+    }
+
+    /**
+     * Returns whether the given BIN entry may be stripped by the evictor.
+     * True is always returned if the BIN is not dirty.  False is returned if
+     * the BIN is dirty and the entry will be migrated soon.
+     *
+     * Note that the BIN may or may not be latched when this method is called.
+     * Returning the wrong answer is OK in that case (it will be called again
+     * later when latched), but an exception should not occur.
+     */
+    public boolean isEvictable(BIN bin, int index) {
+
+        if (bin.getDirty()) {
+
+            if (bin.getMigrate(index)) {
+                return false;
+            }
+
+            long lsn = bin.getLsn(index);
+            if (lsn == DbLsn.NULL_LSN) {
+        	
+                /*
+                 * LN is resident but never logged, no cleaning restrictions
+                 * apply.
+                 */
+        	return true;
+            }
+
+            boolean isResident = (bin.getTarget(index) != null);
+            Long fileNum = Long.valueOf(DbLsn.getFileNumber(lsn));
+
+            if ((PROACTIVE_MIGRATION || isResident) &&
+                toBeCleanedFiles.contains(fileNum)) {
+                return false;
+            }
+
+            if ((clusterAll || (clusterResident && isResident)) &&
+                lowUtilizationFiles.contains(fileNum)) {
+                return false;
+            }
+        }
+
+        return true;
+    }
+
+    /**
+     * This method should be called just before logging a BIN.  LNs will be
+     * migrated if the MIGRATE flag is set, or if they are in a file to be
+     * cleaned, or if the LNs qualify according to the rules for cluster and
+     * clusterAll.
+     *
+     * <p>On return this method guarantees that no MIGRATE flag will be set on
+     * any child entry.  If this method is *not* called before logging a BIN,
+     * then the addPendingLNs method must be called.</p>
+     *
+     * @param bin is the latched BIN.  The latch will not be released by this
+     * method.
+     *
+     * @param proactiveMigration perform proactive migration if needed; this is
+     * false during a split, to reduce the delay in the user operation.
+     */
+    public void lazyMigrateLNs(final BIN bin,
+                               boolean proactiveMigration,
+                               boolean backgroundIO)
+        throws DatabaseException {
+
+        DatabaseImpl db = bin.getDatabase();
+
+        boolean isBinInDupDb = db.getSortedDuplicates() &&
+                               !bin.containsDuplicates();
+
+        /*
+         * For non-resident LNs, sort them by LSN before migrating them.
+         * Fetching in LSN order reduces physical disk I/O.
+         */
+        Integer[] sortedIndices = null;
+        int nSortedIndices = 0;
+        int nEntries = bin.getNEntries();
+
+        for (int index = 0; index < nEntries; index += 1) {
+
+            boolean migrateFlag = bin.getMigrate(index);
+            boolean isResident = (bin.getTarget(index) != null);
+            long childLsn = bin.getLsn(index);
+
+            if (childLsn != DbLsn.NULL_LSN) {
+        	/* LSN could be NULL_LSN if deferred-write mode */
+
+                if (shouldMigrateLN
+                    (migrateFlag, isResident, proactiveMigration, isBinInDupDb,
+                     childLsn)) {
+
+                     if (isResident) {
+                         migrateLN
+                         (db, childLsn, bin, index,
+                          migrateFlag, // wasCleaned
+                          false,       // isPending
+                          0,           // lockedPendingNodeId
+                          backgroundIO,
+                          CLEAN_MIGRATE_LN);
+                     } else {
+                         if (sortedIndices == null) {
+                             sortedIndices = new Integer[nEntries];
+                         }
+                         sortedIndices[nSortedIndices++] =
+                             Integer.valueOf(index);
+                     }
+                }
+            }
+        }
+
+        if (sortedIndices != null) {
+            Arrays.sort(sortedIndices, 
+                        0, 
+                        nSortedIndices, 
+                        new Comparator<Integer>() {
+                public int compare( Integer int1, Integer int2) {
+                    return DbLsn.compareTo(bin.getLsn(int1), bin.getLsn(int2));
+                }
+            });
+            for (int i = 0; i < nSortedIndices; i += 1) {
+                int index = sortedIndices[i].intValue();
+                long childLsn = bin.getLsn(index);
+                boolean migrateFlag = bin.getMigrate(index);
+                migrateLN
+                    (db, childLsn, bin, index,
+                     migrateFlag, // wasCleaned
+                     false,       // isPending
+                     0,           // lockedPendingNodeId
+                     backgroundIO,
+                     CLEAN_MIGRATE_LN);
+            }
+        }
+    }
+
+    /**
+     * This method should be called just before logging a root DIN.  The
+     * DupCountLN will be migrated if the MIGRATE flag is set, or if it is in a
+     * file to be cleaned, or if the LN qualifies according to the rules for
+     * cluster and clusterAll.
+     *
+     * <p>On return this method guarantees that the MIGRATE flag will not be
+     * set on the child entry.  If this method is *not* called before logging a
+     * root DIN, then the addPendingDupCountLN method must be called.</p>
+     *
+     * @param din is the latched DIN.  The latch will not be released by this
+     * method.
+     *
+     * @param dclRef is the reference to the DupCountLN.
+     *
+     * @param proactiveMigration perform proactive migration if needed; this is
+     * false during a split, to reduce the delay in the user operation.
+     */
+    public void lazyMigrateDupCountLN(DIN din,
+                                      ChildReference dclRef,
+                                      boolean proactiveMigration)
+        throws DatabaseException {
+
+        DatabaseImpl db = din.getDatabase();
+
+        boolean migrateFlag = dclRef.getMigrate();
+        boolean isResident = (dclRef.getTarget() != null);
+        boolean isBinInDupDb = false;
+        long childLsn = dclRef.getLsn();
+
+        if (shouldMigrateLN
+            (migrateFlag, isResident, proactiveMigration, isBinInDupDb,
+             childLsn)) {
+
+            migrateDupCountLN
+                (db, childLsn, din, dclRef,
+                 migrateFlag, // wasCleaned
+                 false,       // isPending
+                 0,           // lockedPendingNodeId
+                 CLEAN_MIGRATE_LN);
+        }
+    }
+
+    /**
+     * Returns whether an LN entry should be migrated.  Updates stats.
+     *
+     * @param migrateFlag is whether the MIGRATE flag is set on the entry.
+     *
+     * @param isResident is whether the LN is currently resident.
+     *
+     * @param proactiveMigration perform proactive migration if needed; this is
+     * false during a split, to reduce the delay in the user operation.
+     *
+     * @param isBinInDupDb is whether this is a BIN entry in a database with
+     * duplicates enabled.
+     *
+     * @param childLsn is the LSN of the LN.
+     *
+     * @return whether to migrate the LN.
+     */
+    private boolean shouldMigrateLN(boolean migrateFlag,
+                                    boolean isResident,
+                                    boolean proactiveMigration,
+                                    boolean isBinInDupDb,
+                                    long childLsn) {
+        boolean doMigration = false;
+
+        if (migrateFlag) {
+
+            /*
+             * Always try to migrate if the MIGRATE flag is set, since the LN
+             * has been processed.  If we did not migrate it, we would have to
+             * add it to pending LN set.
+             */
+            doMigration = true;
+            nMarkedLNsProcessed++;
+
+        } else if (!proactiveMigration ||
+                   isBinInDupDb ||
+                   env.isClosing()) {
+
+            /*
+             * Do nothing if proactiveMigration is false, since all further
+             * migration is optional.
+             *
+             * Do nothing if this is a BIN in a duplicate database.  We
+             * must not fetch DINs, since this BIN may be about to be
+             * evicted.  Fetching a DIN would add it as an orphan to the
+             * INList, plus an IN with non-LN children is not evictable.
+             *
+             * Do nothing if the environment is shutting down and the
+             * MIGRATE flag is not set.  Proactive migration during
+             * shutdown is counterproductive -- it prevents a short final
+             * checkpoint, and it does not allow more files to be deleted.
+             */
+
+        } else {
+
+            Long fileNum = Long.valueOf(DbLsn.getFileNumber(childLsn));
+
+            if ((PROACTIVE_MIGRATION || isResident) &&
+                toBeCleanedFiles.contains(fileNum)) {
+
+                /* Migrate because it will be cleaned soon. */
+                doMigration = true;
+                nToBeCleanedLNsProcessed++;
+
+            } else if ((clusterAll || (clusterResident && isResident)) &&
+                lowUtilizationFiles.contains(fileNum)) {
+
+                /* Migrate for clustering. */
+                doMigration = true;
+                nClusterLNsProcessed++;
+            }
+        }
+
+        return doMigration;
+    }
+
+    /**
+     * Migrate an LN in the given BIN entry, if it is not obsolete.  The BIN is
+     * latched on entry to this method and is left latched when it returns.
+     */
+    private void migrateLN(DatabaseImpl db,
+                           long lsn,
+                           BIN bin,
+                           int index,
+                           boolean wasCleaned,
+                           boolean isPending,
+                           long lockedPendingNodeId,
+                           boolean backgroundIO,
+                           String cleanAction)
+        throws DatabaseException {
+
+        /* Status variables are used to generate debug tracing info. */
+        boolean obsolete = false;    // The LN is no longer in use.
+        boolean migrated = false;    // The LN was in use and is migrated.
+        boolean lockDenied = false;  // The LN lock was denied.
+        boolean completed = false;   // This method completed.
+        boolean clearTarget = false; // Node was non-resident when called.
+
+        /*
+         * If wasCleaned is false we don't count statistics unless we migrate
+         * the LN.  This avoids double counting.
+         */
+        BasicLocker locker = null;
+        LN ln = null;
+
+        try {
+            if (lsn == DbLsn.NULL_LSN) {
+                /* This node was never written, no need to migrate. */
+                completed = true;
+                return;
+            }
+
+            /*
+             * Fetch the node, if necessary.  If it was not resident and it is
+             * an evictable LN, we will clear it after we migrate it.
+             */
+	    if (!bin.isEntryKnownDeleted(index)) {
+                ln = (LN) bin.getTarget(index);
+                if (ln == null) {
+                    /* If fetchTarget returns null, a deleted LN was cleaned.*/
+                    ln = (LN) bin.fetchTarget(index);
+                    clearTarget = !db.getId().equals(DbTree.ID_DB_ID);
+                }
+            }
+
+	    /* Don't migrate knownDeleted or deleted cleaned LNs.  */
+            if (ln == null) {
+                if (wasCleaned) {
+                    nLNsDead++;
+                }
+                obsolete = true;
+                completed = true;
+                return;
+	    }
+
+            /*
+             * Get a non-blocking read lock on the LN.  A pending node is
+             * already locked, but that node ID may be different than the
+             * current LN's node if a slot is reused.  We must lock the current
+             * node to guard against aborts.
+             */
+            if (lockedPendingNodeId != ln.getNodeId()) {
+		locker =
+		    BasicLocker.createBasicLocker(env, false /*noWait*/,
+						  true /*noAPIReadLock*/);
+                LockResult lockRet = locker.nonBlockingLock
+                    (ln.getNodeId(), LockType.READ, db);
+                if (lockRet.getLockGrant() == LockGrantType.DENIED) {
+
+                    /*
+                     * LN is currently locked by another Locker, so we can't
+                     * assume anything about the value of the LSN in the bin.
+                     */
+                    if (wasCleaned) {
+                        nLNsLocked++;
+                    }
+                    lockDenied = true;
+                    completed = true;
+                    return;
+                }
+            }
+
+	    /* Don't migrate deleted LNs.  */
+            if (ln.isDeleted()) {
+                bin.setKnownDeletedLeaveTarget(index);
+                if (wasCleaned) {
+                    nLNsDead++;
+                }
+                obsolete = true;
+                completed = true;
+                return;
+            }
+
+            /*
+             * Once we have a lock, check whether the current LSN needs to be
+             * migrated.  There is no need to migrate it if the LSN no longer
+             * qualifies for cleaning.  The LSN could have been changed by an
+             * update or delete after we set the MIGRATE flag.
+             *
+             * Note that we do not perform this optimization if the MIGRATE
+             * flag is not set, i.e, for clustering and proactive migration of
+             * resident LNs.  For these cases, we checked the conditions for
+             * migration immediately before calling this method.  Although the
+             * condition could change after locking, the window is small and
+             * a second check is not worthwhile.
+             */
+            if (bin.getMigrate(index)) {
+                Long fileNum = Long.valueOf(DbLsn.getFileNumber(lsn));
+                if (!fileSelector.isFileCleaningInProgress(fileNum)) {
+                    obsolete = true;
+                    completed = true;
+                    if (wasCleaned) {
+                        nLNsDead++;
+                    }
+                    return;
+                }
+            }
+
+            /* Migrate the LN. */
+            byte[] key = getLNMainKey(bin, index);
+            long newLNLsn = ln.log(env, db, key, lsn, locker, backgroundIO,
+                                   ReplicationContext.NO_REPLICATE);
+            bin.updateEntry(index, newLNLsn);
+            nLNsMigrated++;
+            migrated = true;
+            completed = true;
+            return;
+        } finally {
+            if (isPending) {
+                if (completed && !lockDenied) {
+                    fileSelector.removePendingLN(lockedPendingNodeId);
+                }
+            } else {
+
+                /*
+                 * If a to-be-migrated LN was not processed successfully, we
+                 * must guarantee that the file will not be deleted and that we
+                 * will retry the LN later.  The retry information must be
+                 * complete or we may delete a file later without processing
+                 * all of its LNs.
+                 *
+                 * Note that the LN may be null if fetchTarget threw an
+                 * exception above. [#16039]
+                 */
+                if (bin.getMigrate(index) &&
+                    (!completed || lockDenied) &&
+                    (ln != null)) {
+
+                    byte[] key = getLNMainKey(bin, index);
+                    byte[] dupKey = getLNDupKey(bin, index, ln);
+                    fileSelector.addPendingLN(ln, db.getId(), key, dupKey);
+
+                    /* Wake up the cleaner thread to process pending LNs. */
+                    if (!areThreadsRunning()) {
+                        env.getUtilizationTracker().activateCleaner();
+                    }
+
+                    /*
+                     * If we need to retry, don't clear the target since we
+                     * would only have to fetch it again soon.
+                     */
+                    clearTarget = false;
+                }
+            }
+
+            /*
+             * Always clear the migrate flag.  If the LN could not be locked
+             * and the migrate flag was set, the LN will have been added to the
+             * pending LN set above.
+             */
+            bin.setMigrate(index, false);
+
+            /*
+             * If the node was originally non-resident, clear it now so that we
+             * don't create more work for the evictor and reduce the cache
+             * memory available to the application.
+             */
+            if (clearTarget) {
+                bin.updateNode(index, null /*node*/, null /*lnSlotKey*/);
+            }
+
+            if (locker != null) {
+                locker.operationEnd();
+            }
+
+            trace(detailedTraceLevel, cleanAction, ln, lsn,
+                  completed, obsolete, migrated);
+        }
+    }
+
+    /**
+     * Migrate the DupCountLN for the given DIN.  The DIN is latched on entry
+     * to this method and is left latched when it returns.
+     */
+    private void migrateDupCountLN(DatabaseImpl db,
+                                   long lsn,
+                                   DIN parentDIN,
+                                   ChildReference dclRef,
+                                   boolean wasCleaned,
+                                   boolean isPending,
+                                   long lockedPendingNodeId,
+                                   String cleanAction)
+        throws DatabaseException {
+
+        /* Status variables are used to generate debug tracing info. */
+        boolean obsolete = false;    // The LN is no longer in use.
+        boolean migrated = false;    // The LN was in use and is migrated.
+        boolean lockDenied = false;  // The LN lock was denied.
+        boolean completed = false;   // This method completed.
+        boolean clearTarget = false; // Node was non-resident when called.
+
+        /*
+         * If wasCleaned is false we don't count statistics unless we migrate
+         * the LN.  This avoids double counting.
+         */
+        BasicLocker locker = null;
+        LN ln = null;
+
+        try {
+            if (lsn == DbLsn.NULL_LSN) {
+                /* This node was never written, no need to migrate. */
+                completed = true;
+                return;
+            }
+
+            /*
+             * Fetch the node, if necessary.  If it was not resident and it is
+             * an evictable LN, we will clear it after we migrate it.
+             */
+	    ln = (LN) dclRef.getTarget();
+            if (ln == null) {
+                ln = (LN) dclRef.fetchTarget(db, parentDIN);
+                assert ln != null;
+                clearTarget = !db.getId().equals(DbTree.ID_DB_ID);
+            }
+
+            /*
+             * Get a non-blocking read lock on the LN, if this is not an
+             * already locked pending node.
+             */
+            if (lockedPendingNodeId != ln.getNodeId()) {
+		locker =
+		    BasicLocker.createBasicLocker(env, false /*noWait*/,
+						  true /*noAPIReadLock*/);
+                LockResult lockRet = locker.nonBlockingLock
+                    (ln.getNodeId(), LockType.READ, db);
+                if (lockRet.getLockGrant() == LockGrantType.DENIED) {
+
+                    /*
+                     * LN is currently locked by another Locker, so we can't
+                     * assume anything about the value of the LSN in the bin.
+                     */
+                    if (wasCleaned) {
+                        nLNsLocked++;
+                    }
+                    lockDenied = true;
+                    completed = true;
+                    return;
+                }
+            }
+
+            /*
+             * Once we have a lock, check whether the current LSN needs to be
+             * migrated.  There is no need to migrate it if the LSN no longer
+             * qualifies for cleaning.
+             */
+            Long fileNum = Long.valueOf(DbLsn.getFileNumber(lsn));
+            if (!fileSelector.isFileCleaningInProgress(fileNum)) {
+                obsolete = true;
+                completed = true;
+                if (wasCleaned) {
+                    nLNsDead++;
+                }
+                return;
+            }
+
+            /* Migrate the LN. */
+            byte[] key = parentDIN.getDupKey();
+            long newLNLsn = ln.log(env, db, key, lsn, locker,
+                                   false, /* backgroundIO */
+                                   ReplicationContext.NO_REPLICATE);
+            parentDIN.updateDupCountLNRef(newLNLsn);
+            nLNsMigrated++;
+            migrated = true;
+            completed = true;
+            return;
+        } finally {
+            if (isPending) {
+                if (completed && !lockDenied) {
+                    fileSelector.removePendingLN(lockedPendingNodeId);
+                }
+            } else {
+
+                /*
+                 * If a to-be-migrated LN was not processed successfully, we
+                 * must guarantee that the file will not be deleted and that we
+                 * will retry the LN later.  The retry information must be
+                 * complete or we may delete a file later without processing
+                 * all of its LNs.
+                 *
+                 * Note that the LN may be null if fetchTarget threw an
+                 * exception above. [#16039]
+                 */
+                if (dclRef.getMigrate() &&
+                    (!completed || lockDenied) &&
+                    (ln != null)) {
+
+                    byte[] key = parentDIN.getDupKey();
+                    byte[] dupKey = null;
+                    fileSelector.addPendingLN(ln, db.getId(), key, dupKey);
+
+                    /* Wake up the cleaner thread to process pending LNs. */
+                    if (!areThreadsRunning()) {
+                        env.getUtilizationTracker().activateCleaner();
+                    }
+
+                    /*
+                     * If we need to retry, don't clear the target since we
+                     * would only have to fetch it again soon.
+                     */
+                    clearTarget = false;
+                }
+            }
+
+            /*
+             * Always clear the migrate flag.  If the LN could not be locked
+             * and the migrate flag was set, the LN will have been added to the
+             * pending LN set above.
+             */
+            dclRef.setMigrate(false);
+
+            /*
+             * If the node was originally non-resident, clear it now so that we
+             * don't create more work for the evictor and reduce the cache
+             * memory available to the application.
+             */
+            if (clearTarget) {
+                parentDIN.updateDupCountLN(null);
+            }
+
+            if (locker != null) {
+                locker.operationEnd();
+            }
+
+            trace(detailedTraceLevel, cleanAction, ln, lsn,
+                  completed, obsolete, migrated);
+        }
+    }
+
+    /**
+     * Returns the main key for a given BIN entry.
+     */
+    byte[] getLNMainKey(BIN bin, int index)
+        throws DatabaseException {
+
+        if (bin.containsDuplicates()) {
+            return bin.getDupKey();
+        } else {
+            return bin.getKey(index);
+        }
+    }
+
+    /**
+     * Returns the duplicate key for a given BIN entry.
+     */
+    private byte[] getLNDupKey(BIN bin, int index, LN ln)
+        throws DatabaseException {
+
+        DatabaseImpl db = bin.getDatabase();
+
+        if (!db.getSortedDuplicates() || ln.containsDuplicates()) {
+
+            /*
+             * The dup key is not needed for a non-duplicate DB or for a
+             * DupCountLN.
+             */
+            return null;
+
+        } else if (bin.containsDuplicates()) {
+
+            /* The DBIN entry key is the dup key. */
+            return bin.getKey(index);
+
+        } else {
+
+            /*
+             * The data is the dup key if the LN is not deleted.  If the LN is
+             * deleted, this method will return null and we will do a node ID
+             * search later when processing the pending LN.
+             */
+            return ln.getData();
+        }
+    }
+
+    /**
+     * Adds the DB ID to the pending DB set if it is being deleted but deletion
+     * is not yet complete.
+     */
+    void addPendingDB(DatabaseImpl db) {
+        if (db != null && db.isDeleted() && !db.isDeleteFinished()) {
+            DatabaseId id = db.getId();
+            if (fileSelector.addPendingDB(id)) {
+                Tracer.trace
+                    (detailedTraceLevel, env, "CleanAddPendingDB " + id);
+            }
+        }
+    }
+
+    /**
+     * Send trace messages to the java.util.logger. Don't rely on the logger
+     * alone to conditionalize whether we send this message, we don't even want
+     * to construct the message if the level is not enabled.
+     */
+    void trace(Level level,
+               String action,
+               Node node,
+               long logLsn,
+               boolean completed,
+               boolean obsolete,
+               boolean dirtiedMigrated) {
+
+        Logger logger = env.getLogger();
+        if (logger.isLoggable(level)) {
+            StringBuffer sb = new StringBuffer();
+            sb.append(action);
+            if (node != null) {
+                sb.append(" node=");
+                sb.append(node.getNodeId());
+            }
+            sb.append(" logLsn=");
+            sb.append(DbLsn.getNoFormatString(logLsn));
+            sb.append(" complete=").append(completed);
+            sb.append(" obsolete=").append(obsolete);
+            sb.append(" dirtiedOrMigrated=").append(dirtiedMigrated);
+
+            logger.log(level, sb.toString());
+        }
+    }
+
+    /**
+     * Release resources and update memory budget. Should only be called
+     * when this environment is closed and will never be accessed again.
+     */
+    public void close() {
+        profile.close();
+        tracker.close();
+        fileSelector.close(env.getMemoryBudget());
+    }
+}
diff --git a/src/com/sleepycat/je/cleaner/DbFileSummary.java b/src/com/sleepycat/je/cleaner/DbFileSummary.java
new file mode 100644
index 0000000000000000000000000000000000000000..745ee1ffaf177e882f5537de3f5dbb9ad9f0d0e4
--- /dev/null
+++ b/src/com/sleepycat/je/cleaner/DbFileSummary.java
@@ -0,0 +1,231 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbFileSummary.java,v 1.5.2.2 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.nio.ByteBuffer;
+
+import com.sleepycat.je.log.Loggable;
+import com.sleepycat.je.log.LogUtils;
+
+/**
+ * Per-DB-per-file utilization counters.  The DatabaseImpl stores a persistent
+ * map of file number to DbFileSummary.
+ */
+public class DbFileSummary implements Loggable {
+
+    /* Persistent fields. */
+    public int totalINCount;    // Number of IN log entries
+    public int totalINSize;     // Byte size of IN log entries
+    public int totalLNCount;    // Number of LN log entries
+    public int totalLNSize;     // Byte size of LN log entries
+    public int obsoleteINCount; // Number of obsolete IN log entries
+    public int obsoleteLNCount; // Number of obsolete LN log entries
+    public int obsoleteLNSize;  // Byte size of obsolete LN log entries
+    public int obsoleteLNSizeCounted;  // Number obsolete LNs with size counted
+
+    /**
+     * Creates an empty summary.
+     */
+    public DbFileSummary() {
+    }
+
+    /**
+     * Returns whether this summary contains any non-zero totals.
+     */
+    public boolean isEmpty() {
+
+        return totalLNCount == 0 &&
+               totalINCount == 0 &&
+               obsoleteINCount == 0 &&
+               obsoleteLNCount == 0;
+    }
+
+    /**
+     * Returns the approximate byte size of all obsolete LN entries.  In
+     * FileSummaryLN version 3 and greater the exact tracked size is used.
+     */
+    public int getObsoleteLNSize() {
+
+        if (totalLNCount == 0) {
+            return 0;
+        }
+
+        /*
+         * Use the tracked obsolete size for all entries for which the size was
+         * counted, plus the average size for all obsolete entries whose size
+         * was not counted.
+         */
+        int obsolete = obsoleteLNSize;
+        int notCounted = obsoleteLNCount - obsoleteLNSizeCounted;
+        if (notCounted > 0) {
+            /* Use long arithmetic. */
+            long total = totalLNSize;
+            /* Scale by 255 to reduce integer truncation error. */
+            total <<= 8;
+            long avgSizePerLN = total / totalLNCount;
+            obsolete += (int) ((notCounted * avgSizePerLN) >> 8);
+        }
+        return obsolete;
+    }
+
+    /**
+     * Returns the approximate byte size of all obsolete IN entries.
+     */
+    public int getObsoleteINSize() {
+
+        if (totalINCount == 0) {
+            return 0;
+        }
+        /* Use long arithmetic. */
+        long size = totalINSize;
+        /* Scale by 255 to reduce integer truncation error. */
+        size <<= 8;
+        long avgSizePerIN = size / totalINCount;
+        return (int) ((obsoleteINCount * avgSizePerIN) >> 8);
+    }
+
+    /**
+     * Returns an estimate of the total bytes that are obsolete.
+     */
+    public int getObsoleteSize() {
+        return getObsoleteLNSize() + getObsoleteINSize();
+    }
+
+    /**
+     * Returns the number of non-obsolete LN and IN entries.
+     */
+    public int getNonObsoleteCount() {
+        return totalLNCount +
+               totalINCount -
+               obsoleteLNCount -
+               obsoleteINCount;
+    }
+
+    /**
+     * Reset all totals to zero.
+     */
+    public void reset() {
+
+        totalINCount = 0;
+        totalINSize = 0;
+        totalLNCount = 0;
+        totalLNSize = 0;
+        obsoleteINCount = 0;
+        obsoleteLNCount = 0;
+        obsoleteLNSize = 0;
+        obsoleteLNSizeCounted = 0;
+    }
+
+    /**
+     * Add the totals of the given summary object to the totals of this object.
+     */
+    public void add(DbFileSummary o) {
+
+        totalINCount += o.totalINCount;
+        totalINSize += o.totalINSize;
+        totalLNCount += o.totalLNCount;
+        totalLNSize += o.totalLNSize;
+        obsoleteINCount += o.obsoleteINCount;
+        obsoleteLNCount += o.obsoleteLNCount;
+        obsoleteLNSize += o.obsoleteLNSize;
+        obsoleteLNSizeCounted += o.obsoleteLNSizeCounted;
+    }
+
+    /**
+     * @see Loggable#getLogSize
+     */
+    public int getLogSize() {
+        return
+            LogUtils.getPackedIntLogSize(totalINCount) +
+            LogUtils.getPackedIntLogSize(totalINSize) +
+            LogUtils.getPackedIntLogSize(totalLNCount) +
+            LogUtils.getPackedIntLogSize(totalLNSize) +
+            LogUtils.getPackedIntLogSize(obsoleteINCount) +
+            LogUtils.getPackedIntLogSize(obsoleteLNCount) +
+            LogUtils.getPackedIntLogSize(obsoleteLNSize) +
+            LogUtils.getPackedIntLogSize(obsoleteLNSizeCounted);
+    }
+
+    /**
+     * @see Loggable#writeToLog
+     */
+    public void writeToLog(ByteBuffer buf) {
+
+        LogUtils.writePackedInt(buf, totalINCount);
+        LogUtils.writePackedInt(buf, totalINSize);
+        LogUtils.writePackedInt(buf, totalLNCount);
+        LogUtils.writePackedInt(buf, totalLNSize);
+        LogUtils.writePackedInt(buf, obsoleteINCount);
+        LogUtils.writePackedInt(buf, obsoleteLNCount);
+        LogUtils.writePackedInt(buf, obsoleteLNSize);
+        LogUtils.writePackedInt(buf, obsoleteLNSizeCounted);
+    }
+
+    /**
+     * @see Loggable#readFromLog
+     */
+    public void readFromLog(ByteBuffer buf, byte entryTypeVersion) {
+
+        totalINCount = LogUtils.readPackedInt(buf);
+        totalINSize = LogUtils.readPackedInt(buf);
+        totalLNCount = LogUtils.readPackedInt(buf);
+        totalLNSize = LogUtils.readPackedInt(buf);
+        obsoleteINCount = LogUtils.readPackedInt(buf);
+        obsoleteLNCount = LogUtils.readPackedInt(buf);
+        obsoleteLNSize = LogUtils.readPackedInt(buf);
+        obsoleteLNSizeCounted = LogUtils.readPackedInt(buf);
+    }
+
+    /**
+     * @see Loggable#dumpLog
+     */
+    public void dumpLog(StringBuffer buf, boolean verbose) {
+
+        buf.append("<summary totalINCount=\"");
+        buf.append(totalINCount);
+        buf.append("\" totalINSize=\"");
+        buf.append(totalINSize);
+        buf.append("\" totalLNCount=\"");
+        buf.append(totalLNCount);
+        buf.append("\" totalLNSize=\"");
+        buf.append(totalLNSize);
+        buf.append("\" obsoleteINCount=\"");
+        buf.append(obsoleteINCount);
+        buf.append("\" obsoleteLNCount=\"");
+        buf.append(obsoleteLNCount);
+        buf.append("\" obsoleteLNSize=\"");
+        buf.append(obsoleteLNSize);
+        buf.append("\" obsoleteLNSizeCounted=\"");
+        buf.append(obsoleteLNSizeCounted);
+        buf.append("\"/>");
+    }
+
+    /**
+     * Never called.
+     * @see Loggable#getTransactionId
+     */
+    public long getTransactionId() {
+	return 0;
+    }
+
+    /**
+     * @see Loggable#logicalEquals
+     * Always return false, this item should never be compared.
+     */
+    public boolean logicalEquals(Loggable other) {
+        return false;
+    }
+
+    @Override
+    public String toString() {
+        StringBuffer buf = new StringBuffer();
+        dumpLog(buf, true);
+        return buf.toString();
+    }
+}
diff --git a/src/com/sleepycat/je/cleaner/DbFileSummaryMap.java b/src/com/sleepycat/je/cleaner/DbFileSummaryMap.java
new file mode 100644
index 0000000000000000000000000000000000000000..3c454757c79fc6c50ad6c64f492b01ad0517266e
--- /dev/null
+++ b/src/com/sleepycat/je/cleaner/DbFileSummaryMap.java
@@ -0,0 +1,187 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbFileSummaryMap.java,v 1.6.2.3 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.log.FileManager;
+
+public class DbFileSummaryMap {
+
+    private final static int FILE_ENTRY_OVERHEAD =
+        MemoryBudget.HASHMAP_ENTRY_OVERHEAD +
+        MemoryBudget.LONG_OVERHEAD +
+        MemoryBudget.DBFILESUMMARY_OVERHEAD;
+
+    private Map<Long, DbFileSummary> map;
+    private int memSize;
+    private MemoryBudget budget;
+
+    /**
+     * Creates a map of Long file number to DbFileSummary.  The init() method
+     * must be called after creating this object.
+     *
+     * <p>Always counts this object and its contained objects in the memory
+     * budget.  If countParentMapEntry is true, also counts a single HashMap
+     * entry that contains this object.  This option allows all memory budget
+     * adjustments for LocalUtilizationTracker to be contained in this
+     * class.</p>
+     */
+    public DbFileSummaryMap(boolean countParentMapEntry) {
+        map = new HashMap<Long, DbFileSummary>();
+        memSize = MemoryBudget.HASHMAP_OVERHEAD;
+        if (countParentMapEntry) {
+            memSize += MemoryBudget.HASHMAP_ENTRY_OVERHEAD;
+        }
+    }
+
+    /**
+     * Starts memory budgeting.  The map and its entries will be counted in
+     * the budget.  When adding entries via the get() method prior to calling
+     * this method, the adjustMemBudget parameter must be false.  After calling
+     * this method, the adjustMemBudget parameter must be true.
+     *
+     * <p>This method is separate from the constructor so that the map may be
+     * read from the log without having the EnvironmentImpl object
+     * available.</p>
+     */
+    public void init(EnvironmentImpl env) {
+        budget = env.getMemoryBudget();
+        budget.updateTreeAdminMemoryUsage(memSize);
+    }
+
+    /**
+     * Returns the DbFileSummary for the given file, allocating it if
+     * necessary.
+     *
+     * <p>Must be called under the log write latch.</p>
+     *
+     * @param fileNum the file identifying the summary.
+     *
+     * @param adjustMemBudget see init().
+     *
+     * @param checkResurrected is true if this method should check fileNum and
+     * return null if the file does not exist.
+     *
+     * @param fileManager is used to check for resurrected files and may be null
+     * if checkResurrected is false.
+     */
+    public DbFileSummary get(Long fileNum,
+                             boolean adjustMemBudget,
+                             boolean checkResurrected,
+                             FileManager fileManager) {
+
+        assert adjustMemBudget == (budget != null);
+
+        /*
+         * Note that both calls below to isFileValid (which calls File.exists)
+         * only occur if the file number is less than the last file in the log,
+         * and the file is not already present in the map.  When the file is
+         * not the last file, we are recording obsoleteness and the file should
+         * already be in the map.  So we only incur the overhead of File.exists
+         * when resurrecting a file, which should be pretty rare.
+         */
+        DbFileSummary summary = map.get(fileNum);
+        if (summary == null) {
+            if (checkResurrected && 
+                fileNum < fileManager.getCurrentFileNum() &&
+                !fileManager.isFileValid(fileNum)) {
+                /* Will return null. */
+            } else {
+                summary = new DbFileSummary();
+                Object oldVal = map.put(fileNum, summary);
+                assert oldVal == null;
+                memSize += FILE_ENTRY_OVERHEAD;
+                if (adjustMemBudget) {
+                   budget.updateTreeAdminMemoryUsage(FILE_ENTRY_OVERHEAD);     
+                }
+                /* Double-check that we never add a deleted file. */
+                assert fileManager == null ||
+                       fileNum == fileManager.getCurrentFileNum() ||
+                       fileManager.isFileValid(fileNum) :
+                       "Resurrected file: 0x" + Long.toHexString(fileNum);
+
+            }
+        }
+        return summary;
+    }
+
+    /**
+     * Removes the DbFileSummary for the given file.
+     *
+     * <p>Must be called under the log write latch.</p>
+     */
+    public boolean remove(Long fileNum) {
+        if (map.remove(fileNum) != null) {
+            budget.updateTreeAdminMemoryUsage(0 - FILE_ENTRY_OVERHEAD);
+            memSize -= FILE_ENTRY_OVERHEAD;
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    /*
+     * Get this map's memory size. Usually it's built up over time and added to
+     * the global memory budget, but this is used to reinitialize the memory
+     * budget after recovery, when DbFileSummaryMaps may be cut adrift by the
+     * process of overlaying new portions of the btree.
+     */
+    public long getMemorySize() {
+        return memSize;
+    }
+
+    public void subtractFromMemoryBudget() {
+        /* May not have been initialized if it was read by a FileReader */
+        if (budget != null) {
+            budget.updateTreeAdminMemoryUsage(0 - memSize);
+            memSize = 0;
+        }
+    }
+
+    public Set<Map.Entry<Long,DbFileSummary>> entrySet() {
+        return map.entrySet();
+    }
+
+    public boolean contains(Long fileNum) {
+        return map.containsKey(fileNum);
+    }
+
+    public int size() {
+        return map.size();
+    }
+
+    @Override
+    public String toString() {
+        return map.toString();
+    }
+
+    /**
+     * Removes entries for deleted files that were created by JE 3.3.74 and
+     * earlier.  [#16610]
+     */
+    public void repair(EnvironmentImpl env) {
+        Long[] existingFiles = env.getFileManager().getAllFileNumbers();
+        Iterator<Long> iter = map.keySet().iterator();
+        while (iter.hasNext()) {
+            Long fileNum = iter.next();
+            if (Arrays.binarySearch(existingFiles, fileNum) < 0) {
+                iter.remove();
+                budget.updateTreeAdminMemoryUsage(0 - FILE_ENTRY_OVERHEAD);
+                memSize -= FILE_ENTRY_OVERHEAD;
+            }
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/cleaner/FileProcessor.java b/src/com/sleepycat/je/cleaner/FileProcessor.java
new file mode 100644
index 0000000000000000000000000000000000000000..f671461bfc725468114a55d4a42bcaa351313d2e
--- /dev/null
+++ b/src/com/sleepycat/je/cleaner/FileProcessor.java
@@ -0,0 +1,1312 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: FileProcessor.java,v 1.44.2.5 2010/01/30 01:10:55 mark Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+import java.util.logging.Level;
+
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.DbTree;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.log.CleanerFileReader;
+import com.sleepycat.je.log.LogFileNotFoundException;
+import com.sleepycat.je.log.ReplicationContext;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.tree.ChildReference;
+import com.sleepycat.je.tree.DIN;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.tree.LN;
+import com.sleepycat.je.tree.SearchResult;
+import com.sleepycat.je.tree.Tree;
+import com.sleepycat.je.tree.TreeLocation;
+import com.sleepycat.je.tree.WithRootLatched;
+import com.sleepycat.je.txn.BasicLocker;
+import com.sleepycat.je.txn.LockGrantType;
+import com.sleepycat.je.txn.LockResult;
+import com.sleepycat.je.txn.LockType;
+import com.sleepycat.je.utilint.DaemonThread;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.Tracer;
+
+/**
+ * Reads all entries in a log file and either determines them to be obsolete or
+ * marks them for migration.  LNs are marked for migration by setting the BIN
+ * entry MIGRATE flag.  INs are marked for migration by setting the dirty flag.
+ *
+ * May be invoked explicitly by calling doClean, or woken up if used as a
+ * daemon thread.
+ */
+class FileProcessor extends DaemonThread {
+
+    /**
+     * The number of LN log entries after we process pending LNs.  If we do
+     * this too seldom, the pending LN queue may grow large, and it isn't
+     * budgeted memory.  If we process it too often, we will repeatedly request
+     * a non-blocking lock for the same locked node.
+     */
+    private static final int PROCESS_PENDING_EVERY_N_LNS = 100;
+
+    /**
+     * Whether to prohibit BINDeltas for a BIN that is fetched by the cleaner.
+     * The theory is that when fetching a BIN during cleaning we normally
+     * expect that the BIN will be evicted soon, and a delta during checkpoint
+     * would be wasted.  However, this does not take into account use of the
+     * BIN by the application after fetching; the BIN could become hot and then
+     * deltas may be profitable.  To be safe we currently allow deltas when
+     * fetching.
+     */
+    private static final boolean PROHIBIT_DELTAS_WHEN_FETCHING = false;
+
+    private static final boolean DEBUG_TRACING = false;
+
+    private EnvironmentImpl env;
+    private Cleaner cleaner;
+    private FileSelector fileSelector;
+    private UtilizationProfile profile;
+
+    /* Log version for the target file. */
+    private int fileLogVersion;
+
+    /* Per Run counters. Reset before each file is processed. */
+    private int nINsObsoleteThisRun = 0;
+    private int nINsCleanedThisRun = 0;
+    private int nINsDeadThisRun = 0;
+    private int nINsMigratedThisRun = 0;
+    private int nLNsObsoleteThisRun = 0;
+    private int nLNsCleanedThisRun = 0;
+    private int nLNsDeadThisRun = 0;
+    private int nLNsLockedThisRun = 0;
+    private int nLNsMigratedThisRun = 0;
+    private int nLNsMarkedThisRun = 0;
+    private int nLNQueueHitsThisRun = 0;
+    private int nEntriesReadThisRun;
+    private long nRepeatIteratorReadsThisRun;
+
+    FileProcessor(String name,
+                  EnvironmentImpl env,
+                  Cleaner cleaner,
+                  UtilizationProfile profile,
+                  FileSelector fileSelector) {
+        super(0, name, env);
+        this.env = env;
+        this.cleaner = cleaner;
+        this.fileSelector = fileSelector;
+        this.profile = profile;
+    }
+
+    public void clearEnv() {
+        env = null;
+        cleaner = null;
+        fileSelector = null;
+        profile = null;
+    }
+
+    /**
+     * Return the number of retries when a deadlock exception occurs.
+     */
+    @Override
+    protected long nDeadlockRetries()
+        throws DatabaseException {
+
+        return cleaner.nDeadlockRetries;
+    }
+
+    /**
+     * Activates the cleaner.  Is normally called when je.cleaner.byteInterval
+     * bytes are written to the log.
+     */
+    public void onWakeup()
+        throws DatabaseException {
+
+        doClean(true,   // invokedFromDaemon
+                true,   // cleanMultipleFiles
+                false); // forceCleaning
+    }
+
+    /**
+     * Cleans selected files and returns the number of files cleaned.  May be
+     * called by the daemon thread or programatically.
+     *
+     * @param invokedFromDaemon currently has no effect.
+     *
+     * @param cleanMultipleFiles is true to clean until we're under budget,
+     * or false to clean at most one file.
+     *
+     * @param forceCleaning is true to clean even if we're not under the
+     * utilization threshold.
+     *
+     * @return the number of files cleaned, not including files cleaned
+     * unsuccessfully.
+     */
+    public synchronized int doClean(boolean invokedFromDaemon,
+                                    boolean cleanMultipleFiles,
+                                    boolean forceCleaning)
+        throws DatabaseException {
+
+        if (env.isClosed()) {
+            return 0;
+        }
+
+        /* Clean until no more files are selected.  */
+        int nOriginalLogFiles = profile.getNumberOfFiles();
+        int nFilesCleaned = 0;
+        while (true) {
+
+            /* Don't clean forever. */
+            if (nFilesCleaned >= nOriginalLogFiles) {
+                break;
+            }
+
+            /* Stop if the daemon is paused or the environment is closing. */
+            if ((invokedFromDaemon && isPaused()) || env.isClosing()) {
+                break;
+            }
+
+            /*
+             * Process pending LNs periodically.  Pending LNs can prevent file
+             * deletion.  Do not call deleteSafeToDeleteFiles here, since
+             * cleaner threads will block while the checkpointer deletes log
+             * files, which can be time consuming.
+             */
+            cleaner.processPending();
+
+            /*
+             * Select the next file for cleaning and update the Cleaner's
+             * read-only file collections.
+             */
+            boolean needLowUtilizationSet =
+                cleaner.clusterResident || cleaner.clusterAll;
+
+            Long fileNum = fileSelector.selectFileForCleaning
+                (profile, forceCleaning, needLowUtilizationSet,
+                 cleaner.maxBatchFiles);
+
+            cleaner.updateReadOnlyFileCollections();
+
+            /*
+             * If no file was selected, the total utilization is under the
+             * threshold and we can stop cleaning.
+             */
+            if (fileNum == null) {
+                break;
+            }
+
+            /*
+             * Clean the selected file.
+             */
+            resetPerRunCounters();
+            boolean finished = false;
+            boolean fileDeleted = false;
+            long fileNumValue = fileNum.longValue();
+            long runId = ++cleaner.nCleanerRuns;
+            MemoryBudget budget = env.getMemoryBudget();
+            try {
+
+                String traceMsg =
+                    "CleanerRun " + runId +
+                    " on file 0x" + Long.toHexString(fileNumValue) +
+                    " begins backlog=" + cleaner.getBacklog();
+                Tracer.trace(Level.INFO, env, traceMsg);
+                if (DEBUG_TRACING) {
+                    System.out.println("\n" + traceMsg);
+                }
+
+                /* Clean all log entries in the file. */
+                Set<DatabaseId> databases = new HashSet<DatabaseId>();
+                if (processFile(fileNum, databases)) {
+                    /* File is fully processed, update status information. */
+                    fileSelector.addCleanedFile(fileNum, databases, budget);
+                    nFilesCleaned += 1;
+                    accumulatePerRunCounters();
+                    finished = true;
+                }
+            } catch (LogFileNotFoundException e) {
+
+                /*
+                 * File was deleted.  Although it is possible that the file was
+                 * deleted externally it is much more likely that the file was
+                 * deleted normally after being cleaned earlier.  This can
+                 * occur when tracked obsolete information is collected and
+                 * processed after the file has been cleaned and deleted.
+                 * Since the file does not exist, ignore the error so that the
+                 * cleaner will continue.  The tracing below will indicate that
+                 * the file was deleted.  Remove the file completely from the
+                 * FileSelector and UtilizationProfile so that we don't
+                 * repeatedly attempt to process it. [#15528]
+                 */
+                fileDeleted = true;
+                profile.removeFile(fileNum, null /*databases*/);
+                fileSelector.removeAllFileReferences(fileNum, budget);
+            } catch (IOException e) {
+                Tracer.trace(env, "Cleaner", "doClean", "", e);
+                throw new DatabaseException(e);
+            } finally {
+                if (!finished && !fileDeleted) {
+                    fileSelector.putBackFileForCleaning(fileNum);
+                }
+                String traceMsg =
+                    "CleanerRun " + runId +
+                    " on file 0x" + Long.toHexString(fileNumValue) +
+                    " invokedFromDaemon=" + invokedFromDaemon +
+                    " finished=" + finished +
+                    " fileDeleted=" + fileDeleted +
+                    " nEntriesRead=" + nEntriesReadThisRun +
+                    " nINsObsolete=" + nINsObsoleteThisRun +
+                    " nINsCleaned=" + nINsCleanedThisRun +
+                    " nINsDead=" + nINsDeadThisRun +
+                    " nINsMigrated=" + nINsMigratedThisRun +
+                    " nLNsObsolete=" + nLNsObsoleteThisRun +
+                    " nLNsCleaned=" + nLNsCleanedThisRun +
+                    " nLNsDead=" + nLNsDeadThisRun +
+                    " nLNsMigrated=" + nLNsMigratedThisRun +
+                    " nLNsMarked=" + nLNsMarkedThisRun +
+                    " nLNQueueHits=" + nLNQueueHitsThisRun +
+                    " nLNsLocked=" + nLNsLockedThisRun;
+                Tracer.trace(Level.SEVERE, env, traceMsg);
+                if (DEBUG_TRACING) {
+                    System.out.println("\n" + traceMsg);
+                }
+            }
+
+            /* If we should only clean one file, stop now. */
+            if (!cleanMultipleFiles) {
+                break;
+            }
+        }
+
+        return nFilesCleaned;
+    }
+
+    /**
+     * Process all log entries in the given file.
+     *
+     * Note that we check for obsolete entries using the active TFS
+     * (TrackedFileSummary) for a file while it is being processed, and we
+     * prohibit flushing (eviction) of that offset information until file
+     * processing is complete.  An entry could become obsolete because: 1-
+     * normal application activity deletes or updates the entry, 2- proactive
+     * migration migrates the entry before we process it, or 3- if trackDetail
+     * is false.  However, checking the TFS is expensive if it has many
+     * entries, because we perform a linear search.  There is a tradeoff
+     * between the cost of the TFS lookup and its benefit, which is to avoid a
+     * tree search if the entry is obsolete.  Note that many more lookups for
+     * non-obsolete entries than obsolete entries will typically be done.  In
+     * spite of that we check the tracked summary to avoid the situation where
+     * eviction does proactive migration, and evicts a BIN that is very soon
+     * afterward fetched during cleaning.
+     *
+     * @param fileNum the file being cleaned.
+     * @param databases on return will contain the DatabaseIds of all entries
+     * in the cleaned file.
+     * @return false if we aborted file processing because the environment is
+     * being closed.
+     */
+    private boolean processFile(Long fileNum, Set<DatabaseId> databases)
+        throws DatabaseException, IOException {
+
+        /* Get the current obsolete offsets for this file. */
+        PackedOffsets obsoleteOffsets = new PackedOffsets();
+        TrackedFileSummary tfs =
+            profile.getObsoleteDetail(fileNum,
+                                      obsoleteOffsets,
+                                      true /* logUpdate */);
+        PackedOffsets.Iterator obsoleteIter = obsoleteOffsets.iterator();
+        long nextObsolete = -1;
+
+        /* Keep in local variables because they are mutable properties. */
+        final int readBufferSize = cleaner.readBufferSize;
+        int lookAheadCacheSize = cleaner.lookAheadCacheSize;
+
+        /*
+         * Add the overhead of this method to the budget.  Two read buffers are
+         * allocated by the file reader. The log size of the offsets happens to
+         * be the same as the memory overhead.
+         */
+        int adjustMem = (2 * readBufferSize) +
+                        obsoleteOffsets.getLogSize() +
+                        lookAheadCacheSize;
+        MemoryBudget budget = env.getMemoryBudget();
+        budget.updateAdminMemoryUsage(adjustMem);
+
+        /* Evict after updating the budget. */
+        if (Cleaner.DO_CRITICAL_EVICTION) {
+            env.getEvictor().doCriticalEviction(true); // backgroundIO
+        }
+
+        /*
+         * We keep a look ahead cache of non-obsolete LNs.  When we lookup a
+         * BIN in processLN, we also process any other LNs in that BIN that are
+         * in the cache.  This can reduce the number of tree lookups.
+         */
+        LookAheadCache lookAheadCache = new LookAheadCache(lookAheadCacheSize);
+
+        /*
+         * For obsolete entries we must check for pending deleted DBs.  To
+         * avoid the overhead of DbTree.getDb on every entry we keep a set of
+         * all DB IDs encountered and do the check once per DB at the end.
+         */
+        Set<DatabaseId> checkPendingDbSet = new HashSet<DatabaseId>();
+
+        /*
+         * Use local caching to reduce DbTree.getDb overhead.  Do not call
+         * releaseDb after getDb with the dbCache, since the entire dbCache
+         * will be released at the end of thie method.
+         */
+        Map<DatabaseId, DatabaseImpl> dbCache =
+            new HashMap<DatabaseId, DatabaseImpl>();
+        DbTree dbMapTree = env.getDbTree();
+
+        try {
+            /* Create the file reader. */
+            CleanerFileReader reader = new CleanerFileReader
+                (env, readBufferSize, DbLsn.NULL_LSN, fileNum);
+            /* Validate all entries before ever deleting a file. */
+            reader.setAlwaysValidateChecksum(true);
+
+            TreeLocation location = new TreeLocation();
+
+            int nProcessedLNs = 0;
+            while (reader.readNextEntry()) {
+                cleaner.nEntriesRead += 1;
+                long logLsn = reader.getLastLsn();
+                long fileOffset = DbLsn.getFileOffset(logLsn);
+                boolean isLN = reader.isLN();
+                boolean isIN = reader.isIN();
+                boolean isRoot = reader.isRoot();
+                boolean isObsolete = false;
+
+                /* Maintain a set of all databases encountered. */
+                DatabaseId dbId = reader.getDatabaseId();
+                if (dbId != null) {
+                    databases.add(dbId);
+                }
+
+                if (reader.isFileHeader()) {
+                    fileLogVersion = reader.getFileHeader().getLogVersion();
+                }
+
+                /* Stop if the daemon is shut down. */
+                if (env.isClosing()) {
+                    return false;
+                }
+
+                /* Update background reads. */
+                int nReads = reader.getAndResetNReads();
+                if (nReads > 0) {
+                    env.updateBackgroundReads(nReads);
+                }
+
+                /* Sleep if background read/write limit was exceeded. */
+                env.sleepAfterBackgroundIO();
+
+                /* Check for a known obsolete node. */
+                while (nextObsolete < fileOffset && obsoleteIter.hasNext()) {
+                    nextObsolete = obsoleteIter.next();
+                }
+                if (nextObsolete == fileOffset) {
+                    isObsolete = true;
+                }
+
+                /* Check for the entry type next because it is very cheap. */
+                if (!isObsolete &&
+                    !isLN &&
+                    !isIN &&
+                    !isRoot) {
+                    /* Consider all entries we do not process as obsolete. */
+                    isObsolete = true;
+                }
+
+                /*
+                 * SR 14583: In JE 2.0 and later we can assume that all
+                 * deleted LNs are obsolete. Either the delete committed and
+                 * the BIN parent is marked with a pending deleted bit, or the
+                 * delete rolled back, in which case there is no reference
+                 * to this entry. JE 1.7.1 and earlier require a tree lookup
+                 * because deleted LNs may still be reachable through their BIN
+                 * parents.
+                 */
+                if (!isObsolete &&
+                    isLN &&
+                    reader.getLN().isDeleted() &&
+                    fileLogVersion > 2) {
+                    /* Deleted LNs are always obsolete. */
+                    isObsolete = true;
+                }
+
+                /* Check the current tracker last, as it is more expensive. */
+                if (!isObsolete &&
+                    tfs != null &&
+                    tfs.containsObsoleteOffset(fileOffset)) {
+                    isObsolete = true;
+                }
+
+                /* Skip known obsolete nodes. */
+                if (isObsolete) {
+                    /* Count obsolete stats. */
+                    if (isLN) {
+                        nLNsObsoleteThisRun++;
+                    } else if (isIN) {
+                        nINsObsoleteThisRun++;
+                    }
+                    /* Must update the pending DB set for obsolete entries. */
+                    if (dbId != null) {
+                        checkPendingDbSet.add(dbId);
+                    }
+                    continue;
+                }
+
+                /* Evict before processing each entry. */
+                if (Cleaner.DO_CRITICAL_EVICTION) {
+                    env.getEvictor().doCriticalEviction(true); // backgroundIO
+                }
+
+                /* The entry is not known to be obsolete -- process it now. */
+                if (isLN) {
+
+                    LN targetLN = reader.getLN();
+                    byte[] key = reader.getKey();
+                    byte[] dupKey = reader.getDupTreeKey();
+
+                    lookAheadCache.add
+                        (Long.valueOf(DbLsn.getFileOffset(logLsn)),
+                         new LNInfo(targetLN, dbId, key, dupKey));
+
+                    if (lookAheadCache.isFull()) {
+                        processLN(fileNum, location, lookAheadCache, dbCache);
+                    }
+
+                    /*
+                     * Process pending LNs before proceeding in order to
+                     * prevent the pending list from growing too large.
+                     */
+                    nProcessedLNs += 1;
+                    if (nProcessedLNs % PROCESS_PENDING_EVERY_N_LNS == 0) {
+                        cleaner.processPending();
+                    }
+
+                } else if (isIN) {
+
+                    IN targetIN = reader.getIN();
+                    DatabaseImpl db = dbMapTree.getDb
+                        (dbId, cleaner.lockTimeout, dbCache);
+                    targetIN.setDatabase(db);
+
+                    processIN(targetIN, db, logLsn);
+
+                } else if (isRoot) {
+
+                    env.rewriteMapTreeRoot(logLsn);
+                } else {
+                    assert false;
+                }
+            }
+
+            /* Process remaining queued LNs. */
+            while (!lookAheadCache.isEmpty()) {
+                if (Cleaner.DO_CRITICAL_EVICTION) {
+                    env.getEvictor().doCriticalEviction(true); // backgroundIO
+                }
+                processLN(fileNum, location, lookAheadCache, dbCache);
+                /* Sleep if background read/write limit was exceeded. */
+                env.sleepAfterBackgroundIO();
+            }
+
+            /* Update the pending DB set. */
+            for (Iterator<DatabaseId> i = checkPendingDbSet.iterator();
+                 i.hasNext();) {
+                DatabaseId pendingDbId = i.next();
+                DatabaseImpl db = dbMapTree.getDb
+                    (pendingDbId, cleaner.lockTimeout, dbCache);
+                cleaner.addPendingDB(db);
+            }
+
+            /* Update reader stats. */
+            nEntriesReadThisRun = reader.getNumRead();
+            nRepeatIteratorReadsThisRun = reader.getNRepeatIteratorReads();
+
+        } finally {
+            /* Subtract the overhead of this method from the budget. */
+            budget.updateAdminMemoryUsage(0 - adjustMem);
+
+            /* Release all cached DBs. */
+            dbMapTree.releaseDbs(dbCache);
+
+            /* Allow flushing of TFS when cleaning is complete. */
+            if (tfs != null) {
+                tfs.setAllowFlush(true);
+            }
+        }
+
+        return true;
+    }
+
+    /**
+     * Unit testing.  Simulates processing of a single LN.
+     */
+    void testProcessLN(LN targetLN,
+                       long logLsn,
+                       byte[] key,
+                       byte[] dupKey,
+                       DatabaseId dbId,
+                       Map<DatabaseId, DatabaseImpl> dbCache)
+        throws DatabaseException {
+
+        LookAheadCache lookAheadCache = new LookAheadCache(1);
+
+        lookAheadCache.add
+            (Long.valueOf(DbLsn.getFileOffset(logLsn)),
+             new LNInfo(targetLN, dbId, key, dupKey));
+
+        processLN(DbLsn.getFileNumber(logLsn), new TreeLocation(),
+                  lookAheadCache, dbCache);
+    }
+
+    /**
+     * Processes the first LN in the look ahead cache and removes it from the
+     * cache.  While the BIN is latched, look through the BIN for other LNs in
+     * the cache; if any match, process them to avoid a tree search later.
+     */
+    private void processLN(Long fileNum,
+                           TreeLocation location,
+                           LookAheadCache lookAheadCache,
+                           Map<DatabaseId, DatabaseImpl> dbCache)
+        throws DatabaseException {
+
+        nLNsCleanedThisRun++;
+
+        /* Get the first LN from the queue. */
+        Long offset = lookAheadCache.nextOffset();
+        LNInfo info = lookAheadCache.remove(offset);
+
+        LN ln = info.getLN();
+        byte[] key = info.getKey();
+        byte[] dupKey = info.getDupKey();
+
+        long logLsn = DbLsn.makeLsn
+            (fileNum.longValue(), offset.longValue());
+
+        /*
+         * Do not call releaseDb after this getDb, since the entire dbCache
+         * will be released later.
+         */
+        DatabaseImpl db = env.getDbTree().getDb
+            (info.getDbId(), cleaner.lockTimeout, dbCache);
+
+        /* Status variables are used to generate debug tracing info. */
+        boolean processedHere = true; // The LN was cleaned here.
+        boolean obsolete = false;     // The LN is no longer in use.
+        boolean completed = false;    // This method completed.
+
+        BIN bin = null;
+        DIN parentDIN = null;      // for DupCountLNs
+        try {
+
+            /*
+             * If the DB is gone, this LN is obsolete.  If delete cleanup is in
+             * progress, put the DB into the DB pending set; this LN will be
+             * declared deleted after the delete cleanup is finished.
+             */
+            if (db == null || db.isDeleted()) {
+                cleaner.addPendingDB(db);
+                nLNsDeadThisRun++;
+                obsolete = true;
+                completed = true;
+                return;
+            }
+
+            Tree tree = db.getTree();
+            assert tree != null;
+
+            /*
+             * Search down to the bottom most level for the parent of this LN.
+             */
+            boolean parentFound = tree.getParentBINForChildLN
+                (location, key, dupKey, ln,
+                 false,  // splitsAllowed
+                 true,   // findDeletedEntries
+                 false,  // searchDupTree
+                 Cleaner.UPDATE_GENERATION);
+            bin = location.bin;
+            int index = location.index;
+
+            if (!parentFound) {
+
+                nLNsDeadThisRun++;
+                obsolete = true;
+                completed = true;
+                return;
+            }
+
+            /*
+             * Now we're at the parent for this LN, whether BIN, DBIN or DIN.
+             * If knownDeleted, LN is deleted and can be purged.
+             */
+            if (bin.isEntryKnownDeleted(index)) {
+                nLNsDeadThisRun++;
+                obsolete = true;
+                completed = true;
+                return;
+            }
+
+            /*
+             * Determine whether the parent is the current BIN, or in the case
+             * of a DupCountLN, a DIN.  Get the tree LSN in either case.
+             */
+            boolean isDupCountLN = ln.containsDuplicates();
+            long treeLsn;
+            if (isDupCountLN) {
+                parentDIN = (DIN) bin.fetchTarget(index);
+                parentDIN.latch(Cleaner.UPDATE_GENERATION);
+                ChildReference dclRef = parentDIN.getDupCountLNRef();
+                treeLsn = dclRef.getLsn();
+            } else {
+                treeLsn = bin.getLsn(index);
+            }
+
+            /* Process this LN that was found in the tree. */
+            processedHere = false;
+            processFoundLN(info, logLsn, treeLsn, bin, index, parentDIN);
+            completed = true;
+
+            /*
+             * For all other non-deleted LNs in this BIN, lookup their LSN
+             * in the LN queue and process any matches.
+             */
+            if (!isDupCountLN) {
+
+                /*
+                 * For deferred write DBs with duplicates, the entry for an LSN
+                 * that matches may contain a DIN, and should not be processed.
+                 * This occurs when the LN has been moved from the BIN into a
+                 * duplicate subtree and the DIN has not been logged. [#16039]
+                 */
+                boolean isBinInDupDwDb = db.isDeferredWriteMode() &&
+                                         db.getSortedDuplicates() &&
+                                         !bin.containsDuplicates();
+
+                for (int i = 0; i < bin.getNEntries(); i += 1) {
+                    long binLsn = bin.getLsn(i);
+                    if (i != index &&
+                        !bin.isEntryKnownDeleted(i) &&
+                        !bin.isEntryPendingDeleted(i) &&
+                        DbLsn.getFileNumber(binLsn) == fileNum.longValue()) {
+
+                        Long myOffset =
+                            Long.valueOf(DbLsn.getFileOffset(binLsn));
+                        LNInfo myInfo;
+                        if (isBinInDupDwDb &&
+                            bin.getTarget(i) instanceof DIN) {
+                            /* LN is in the dup subtree, it's not a match. */
+                            myInfo = null;
+                        } else {
+                            /* If the offset is in the cache, it's a match. */
+                            myInfo = lookAheadCache.remove(myOffset);
+                        }
+
+                        if (myInfo != null) {
+                            nLNQueueHitsThisRun++;
+                            nLNsCleanedThisRun++;
+                            processFoundLN
+                                (myInfo, binLsn, binLsn, bin, i, null);
+                        }
+                    }
+                }
+            }
+            return;
+
+        } finally {
+            if (parentDIN != null) {
+                parentDIN.releaseLatch();
+            }
+
+            if (bin != null) {
+                bin.releaseLatch();
+            }
+
+            if (processedHere) {
+                cleaner.trace
+                    (cleaner.detailedTraceLevel, Cleaner.CLEAN_LN, ln, logLsn,
+                     completed, obsolete, false /*migrated*/);
+            }
+        }
+    }
+
+    /**
+     * Processes an LN that was found in the tree.  Lock the LN's node ID and
+     * then set the entry's MIGRATE flag if the LSN of the LN log entry is the
+     * active LSN in the tree.
+     *
+     * @param info identifies the LN log entry.
+     *
+     * @param logLsn is the LSN of the log entry.
+     *
+     * @param treeLsn is the LSN found in the tree.
+     *
+     * @param bin is the BIN found in the tree; is latched on method entry and
+     * exit.
+     *
+     * @param index is the BIN index found in the tree.
+     *
+     * @param parentDIN is non-null for a DupCountLN only; if non-null, is
+     * latched on method entry and exit.
+     */
+    private void processFoundLN(LNInfo info,
+                                long logLsn,
+                                long treeLsn,
+                                BIN bin,
+                                int index,
+                                DIN parentDIN)
+        throws DatabaseException {
+
+        LN lnFromLog = info.getLN();
+        byte[] key = info.getKey();
+        byte[] dupKey = info.getDupKey();
+
+        DatabaseImpl db = bin.getDatabase();
+        boolean isTemporary = db.isTemporary();
+        boolean isDupCountLN = parentDIN != null;
+
+        /* Status variables are used to generate debug tracing info. */
+        boolean obsolete = false;  // The LN is no longer in use.
+        boolean migrated = false;  // The LN was in use and is migrated.
+        boolean lockDenied = false;// The LN lock was denied.
+        boolean completed = false; // This method completed.
+
+        long nodeId = lnFromLog.getNodeId();
+        BasicLocker locker = null;
+        try {
+            Tree tree = db.getTree();
+            assert tree != null;
+
+            /*
+             * If the tree and log LSNs are equal, then we can be fairly
+             * certain that the log entry is current; in that case, it is
+             * wasteful to lock the LN here if we will perform lazy migration
+             * -- it is better to lock only once during lazy migration.  But if
+             * the tree and log LSNs differ, it is likely that another thread
+             * has updated or deleted the LN and the log LSN is now obsolete;
+             * in this case we can avoid dirtying the BIN by checking for
+             * obsoleteness here, which requires locking.  The latter case can
+             * occur frequently if trackDetail is false.
+             *
+             * 1. If the LSN in the tree and in the log are the same, we will
+             * attempt to migrate it.
+             *
+             * 2. If the LSN in the tree is < the LSN in the log, the log entry
+             * is obsolete, because this LN has been rolled back to a previous
+             * version by a txn that aborted.
+             *
+             * 3. If the LSN in the tree is > the LSN in the log, the log entry
+             * is obsolete, because the LN was advanced forward by some
+             * now-committed txn.
+             *
+             * 4. If the LSN in the tree is a null LSN, the log entry is
+             * obsolete. A slot can only have a null LSN if the record has
+             * never been written to disk in a deferred write database, and
+             * in that case the log entry must be for a past, deleted version
+             * of that record.
+             */
+            if (lnFromLog.isDeleted() &&
+                (treeLsn == logLsn) &&
+                fileLogVersion <= 2) {
+
+                /*
+                 * SR 14583: After JE 2.0, deleted LNs are never found in the
+                 * tree, since we can assume they're obsolete and correctly
+                 * marked as such in the obsolete offset tracking. JE 1.7.1 and
+                 * earlier did not use the pending deleted bit, so deleted LNs
+                 * may still be reachable through their BIN parents.
+                 */
+                obsolete = true;
+                nLNsDeadThisRun++;
+                bin.setPendingDeleted(index);
+            } else if (treeLsn == DbLsn.NULL_LSN) {
+
+                /*
+                 * Case 4: The LN in the tree is a never-written LN for a
+                 * deferred-write db, so the LN in the file is obsolete.
+                 */
+                obsolete = true;
+            } else if (treeLsn != logLsn && isTemporary) {
+
+                /*
+                 * Temporary databases are always non-transactional.  If the
+                 * tree and log LSNs are different then we know that the logLsn
+                 * is obsolete.  Even if the LN is locked, the tree cannot be
+                 * restored to the logLsn because no abort is possible without
+                 * a transaction.  We should consider a similar optimization in
+                 * the future for non-transactional durable databases.
+                 */
+                nLNsDeadThisRun++;
+                obsolete = true;
+            } else if (treeLsn != logLsn || !cleaner.lazyMigration) {
+
+                /*
+                 * Get a lock on the LN if the treeLsn and logLsn are different
+                 * to determine definitively whether the logLsn is obsolete.
+                 * We must also get a lock if we will migrate the LN now
+                 * (lazyMigration is false).
+                 *
+                 * We can hold the latch on the BIN (and DIN) since we always
+                 * attempt to acquire a non-blocking read lock.
+                 */
+                locker = BasicLocker.createBasicLocker
+                    (env, false /*noWait*/, true /*noAPIReadLock*/);
+                LockResult lockRet = locker.nonBlockingLock
+                    (nodeId, LockType.READ, db);
+                if (lockRet.getLockGrant() == LockGrantType.DENIED) {
+
+                    /*
+                     * LN is currently locked by another Locker, so we can't
+                     * assume anything about the value of the LSN in the bin.
+                     */
+                    nLNsLockedThisRun++;
+                    lockDenied = true;
+                } else if (treeLsn != logLsn) {
+                    /* The LN is obsolete and can be purged. */
+                    nLNsDeadThisRun++;
+                    obsolete = true;
+                }
+            }
+
+            /*
+             * At this point either obsolete==true, lockDenied==true, or
+             * treeLsn==logLsn.
+             */
+            if (!obsolete && !lockDenied) {
+                assert treeLsn == logLsn;
+
+                /*
+                 * If lazyMigration is true, set the migrate flag and dirty
+                 * the parent IN.  The evictor or checkpointer will migrate the
+                 * LN later.  If lazyMigration is false, migrate the LN now.
+                 *
+                 * We have a lock on the LN if we are going to migrate it now,
+                 * but not if we will set the migrate flag.
+                 *
+                 * When setting the migrate flag, also populate the target node
+                 * so it does not have to be fetched when it is migrated, if
+                 * the tree and log LSNs are equal and the target is not
+                 * resident.  We must call postFetchInit to initialize MapLNs
+                 * that have not been fully initialized yet [#13191].
+                 *
+                 * For temporary databases, do not rely on the LN migration
+                 * mechanism because temporary databases are not checkpointed
+                 * or recovered.  Instead, dirty the LN to ensure it is
+                 * flushed before its parent is written, and set the LSN to
+                 * NULL_LSN to ensure that it is not tracked or otherwise
+                 * referenced.  Because we do not attempt to lock temporary
+                 * database LNs (see above) we know that if it is non-obsolete,
+                 * the tree and log LSNs are equal.  We will always restore the
+                 * LN to the BIN slot here, and always log the dirty LN when
+                 * logging the BIN.
+                 *
+                 * Also for temporary databases, make both the target LN and
+                 * the BIN or IN parent dirty. Otherwise, when the BIN or IN is
+                 * evicted in the future, it will be written to disk without
+                 * flushing its dirty, migrated LNs.  [#18227]
+                 */
+                if (isDupCountLN) {
+                    ChildReference dclRef = parentDIN.getDupCountLNRef();
+                    if (dclRef.getTarget() == null) {
+                        lnFromLog.postFetchInit(db, logLsn);
+                        parentDIN.updateDupCountLN(lnFromLog);
+                    } 
+
+                    if (isTemporary) {
+                        ((LN) dclRef.getTarget()).setDirty();
+                        dclRef.setLsn(DbLsn.NULL_LSN);
+                        parentDIN.setDirty(true);
+                    } else if (cleaner.lazyMigration) {
+                        dclRef.setMigrate(true);
+                        parentDIN.setDirty(true);
+                    } else {
+                        LN targetLn = (LN) dclRef.getTarget();
+                        assert targetLn != null;
+                        byte[] targetKey = parentDIN.getDupKey();
+                        long newLNLsn = targetLn.log
+                            (env, db, targetKey, logLsn, locker,
+                             true /*backgroundIO*/,
+                             ReplicationContext.NO_REPLICATE);
+                        parentDIN.updateDupCountLNRef(newLNLsn);
+                        /* Evict LN if we populated it with the log LN. */
+                        if (lnFromLog == targetLn) {
+                            parentDIN.updateDupCountLN(null);
+                        }
+                    }
+                } else {
+                    if (bin.getTarget(index) == null) {
+                        lnFromLog.postFetchInit(db, logLsn);
+                        /* Ensure keys are transactionally correct. [#15704] */
+                        byte[] lnSlotKey = bin.containsDuplicates() ?
+                            dupKey : key;
+                        bin.updateNode(index, lnFromLog, lnSlotKey);
+                    }
+
+                    if (isTemporary) {
+                        ((LN) bin.getTarget(index)).setDirty();
+                        bin.clearLsn(index);
+                        bin.setDirty(true);
+                    } else if (cleaner.lazyMigration) {
+                        bin.setMigrate(index, true);
+                        bin.setDirty(true);
+                    } else {
+                        LN targetLn = (LN) bin.getTarget(index);
+                        assert targetLn != null;
+                        byte[] targetKey = cleaner.getLNMainKey(bin, index);
+                        long newLNLsn = targetLn.log
+                            (env, db, targetKey, logLsn, locker,
+                             true /*backgroundIO*/,
+                             ReplicationContext.NO_REPLICATE);
+                        bin.updateEntry(index, newLNLsn);
+                        /* Evict LN if we populated it with the log LN. */
+                        if (lnFromLog == targetLn) {
+                            bin.updateNode(index, null, null);
+                        }
+                    }
+
+                    /*
+                     * If the generation is zero, we fetched this BIN just for
+                     * cleaning.
+                     */
+                    if (PROHIBIT_DELTAS_WHEN_FETCHING &&
+                        bin.getGeneration() == 0) {
+                        bin.setProhibitNextDelta();
+                    }
+
+                    /*
+                     * Update the generation so that the BIN is not evicted
+                     * immediately.  This allows the cleaner to fill in as many
+                     * entries as possible before eviction, as to-be-cleaned
+                     * files are processed.
+                     */
+                    bin.setGeneration(CacheMode.DEFAULT);
+                }
+
+                nLNsMarkedThisRun++;
+                migrated = true;
+            }
+            completed = true;
+        } finally {
+            if (locker != null) {
+                locker.operationEnd();
+            }
+
+            /*
+             * If a write lock is held, it is likely that the log LSN will
+             * become obsolete.  It is more efficient to process this via the
+             * pending list than to set the MIGRATE flag, dirty the BIN, and
+             * cause the BIN to be logged unnecessarily.
+             */
+            if (completed && lockDenied) {
+                assert !isTemporary;
+                fileSelector.addPendingLN(lnFromLog, db.getId(), key, dupKey);
+            }
+
+            cleaner.trace
+                (cleaner.detailedTraceLevel, Cleaner.CLEAN_LN, lnFromLog,
+                 logLsn, completed, obsolete, migrated);
+        }
+    }
+
+    /**
+     * If an IN is still in use in the in-memory tree, dirty it. The checkpoint
+     * invoked at the end of the cleaning run will end up rewriting it.
+     */
+    private void processIN(IN inClone, DatabaseImpl db, long logLsn)
+        throws DatabaseException {
+
+        boolean obsolete = false;
+        boolean dirtied = false;
+        boolean completed = false;
+
+        try {
+            nINsCleanedThisRun++;
+
+            /*
+             * If the DB is gone, this LN is obsolete.  If delete cleanup is in
+             * progress, put the DB into the DB pending set; this LN will be
+             * declared deleted after the delete cleanup is finished.
+             */
+            if (db == null || db.isDeleted()) {
+                cleaner.addPendingDB(db);
+                nINsDeadThisRun++;
+                obsolete = true;
+                completed = true;
+                return;
+            }
+
+            Tree tree = db.getTree();
+            assert tree != null;
+
+            IN inInTree = findINInTree(tree, db, inClone, logLsn);
+
+            if (inInTree == null) {
+                /* IN is no longer in the tree.  Do nothing. */
+                nINsDeadThisRun++;
+                obsolete = true;
+            } else {
+
+                /*
+                 * IN is still in the tree.  Dirty it.  Checkpoint or eviction
+                 * will write it out.  Prohibit the next delta, since the
+                 * original version must be made obsolete.
+                 */
+                nINsMigratedThisRun++;
+                inInTree.setDirty(true);
+                inInTree.setProhibitNextDelta();
+                inInTree.releaseLatch();
+                dirtied = true;
+            }
+
+            completed = true;
+        } finally {
+            cleaner.trace
+                (cleaner.detailedTraceLevel, Cleaner.CLEAN_IN, inClone, logLsn,
+                 completed, obsolete, dirtied);
+        }
+    }
+
+    /**
+     * Given a clone of an IN that has been taken out of the log, try to find
+     * it in the tree and verify that it is the current one in the log.
+     * Returns the node in the tree if it is found and it is current re: LSN's.
+     * Otherwise returns null if the clone is not found in the tree or it's not
+     * the latest version.  Caller is responsible for unlatching the returned
+     * IN.
+     */
+    private IN findINInTree(Tree tree,
+                            DatabaseImpl db,
+                            IN inClone,
+                            long logLsn)
+        throws DatabaseException {
+
+        /* Check if inClone is the root. */
+        if (inClone.isDbRoot()) {
+            IN rootIN = isRoot(tree, db, inClone, logLsn);
+            if (rootIN == null) {
+
+                /*
+                 * inClone is a root, but no longer in use. Return now, because
+                 * a call to tree.getParentNode will return something
+                 * unexpected since it will try to find a parent.
+                 */
+                return null;
+            } else {
+                return rootIN;
+            }
+        }
+
+        /* It's not the root.  Can we find it, and if so, is it current? */
+        inClone.latch(Cleaner.UPDATE_GENERATION);
+        SearchResult result = null;
+        try {
+
+            result = tree.getParentINForChildIN
+                (inClone,
+                 true,   // requireExactMatch
+                 Cleaner.UPDATE_GENERATION,
+                 inClone.getLevel(),
+                 null);  // trackingList
+
+            if (!result.exactParentFound) {
+                return null;
+            }
+
+            long treeLsn = result.parent.getLsn(result.index);
+
+            /*
+             * The IN in the tree is a never-written IN for a DW db so the IN
+             * in the file is obsolete. [#15588]
+             */
+            if (treeLsn == DbLsn.NULL_LSN) {
+                return null;
+            }
+
+            int compareVal = DbLsn.compareTo(treeLsn, logLsn);
+
+            if (compareVal > 0) {
+                /* Log entry is obsolete. */
+                return null;
+            } else {
+
+                /*
+                 * Log entry is same or newer than what's in the tree.  Dirty
+                 * the IN and let checkpoint write it out.
+                 */
+                IN in;
+                if (compareVal == 0) {
+                    /* We can reuse the log entry if the LSNs are equal. */
+                    in = (IN) result.parent.getTarget(result.index);
+                    if (in == null) {
+                        in = inClone;
+                        in.postFetchInit(db, logLsn);
+                        result.parent.updateNode
+                            (result.index, in, null /*lnSlotKey*/);
+                    }
+                } else {
+                    in = (IN) result.parent.fetchTarget(result.index);
+                }
+                in.latch(Cleaner.UPDATE_GENERATION);
+                return in;
+            }
+        } finally {
+            if ((result != null) && (result.exactParentFound)) {
+                result.parent.releaseLatch();
+            }
+        }
+    }
+
+    /**
+     * Get the current root in the tree, or null if the inClone
+     * is the current root.
+     */
+    private static class RootDoWork implements WithRootLatched {
+        private DatabaseImpl db;
+        private IN inClone;
+        private long logLsn;
+
+        RootDoWork(DatabaseImpl db, IN inClone, long logLsn) {
+            this.db = db;
+            this.inClone = inClone;
+            this.logLsn = logLsn;
+        }
+
+        public IN doWork(ChildReference root)
+            throws DatabaseException {
+
+            if (root == null ||
+                (root.getLsn() == DbLsn.NULL_LSN) || // deferred write root
+                (root.fetchTarget(db, null).getNodeId() !=
+                 inClone.getNodeId())) {
+                return null;
+            }
+
+            /*
+             * A root LSN less than the log LSN must be an artifact of when we
+             * didn't properly propagate the logging of the rootIN up to the
+             * root ChildReference.  We still do this for compatibility with
+             * old log versions but may be able to remove it in the future.
+             */
+            if (DbLsn.compareTo(root.getLsn(), logLsn) <= 0) {
+                IN rootIN = (IN) root.fetchTarget(db, null);
+                rootIN.latch(Cleaner.UPDATE_GENERATION);
+                return rootIN;
+            } else {
+                return null;
+            }
+        }
+    }
+
+    /**
+     * Check if the cloned IN is the same node as the root in tree.  Return the
+     * real root if it is, null otherwise.  If non-null is returned, the
+     * returned IN (the root) is latched -- caller is responsible for
+     * unlatching it.
+     */
+    private IN isRoot(Tree tree, DatabaseImpl db, IN inClone, long lsn)
+        throws DatabaseException {
+
+        RootDoWork rdw = new RootDoWork(db, inClone, lsn);
+        return tree.withRootLatchedShared(rdw);
+    }
+
+    /**
+     * Reset per-run counters.
+     */
+    private void resetPerRunCounters() {
+        nINsObsoleteThisRun = 0;
+        nINsCleanedThisRun = 0;
+        nINsDeadThisRun = 0;
+        nINsMigratedThisRun = 0;
+        nLNsObsoleteThisRun = 0;
+        nLNsCleanedThisRun = 0;
+        nLNsDeadThisRun = 0;
+        nLNsMigratedThisRun = 0;
+        nLNsMarkedThisRun = 0;
+        nLNQueueHitsThisRun = 0;
+        nLNsLockedThisRun = 0;
+        nEntriesReadThisRun = 0;
+        nRepeatIteratorReadsThisRun = 0;
+    }
+
+    /**
+     * Add per-run counters to total counters.
+     */
+    private void accumulatePerRunCounters() {
+        cleaner.nINsObsolete +=         nINsObsoleteThisRun;
+        cleaner.nINsCleaned +=          nINsCleanedThisRun;
+        cleaner.nINsDead +=             nINsDeadThisRun;
+        cleaner.nINsMigrated +=         nINsMigratedThisRun;
+        cleaner.nLNsObsolete +=         nLNsObsoleteThisRun;
+        cleaner.nLNsCleaned +=          nLNsCleanedThisRun;
+        cleaner.nLNsDead +=             nLNsDeadThisRun;
+        cleaner.nLNsMigrated +=         nLNsMigratedThisRun;
+        cleaner.nLNsMarked +=           nLNsMarkedThisRun;
+        cleaner.nLNQueueHits +=         nLNQueueHitsThisRun;
+        cleaner.nLNsLocked +=           nLNsLockedThisRun;
+        cleaner.nRepeatIteratorReads += nRepeatIteratorReadsThisRun;
+    }
+
+    /**
+     * A cache of LNInfo by LSN offset.  Used to hold a set of LNs that are
+     * to be processed.  Keeps track of memory used, and when full (over
+     * budget) the next offset should be queried and removed.
+     */
+    private static class LookAheadCache {
+
+        private SortedMap<Long,LNInfo> map;
+        private int maxMem;
+        private int usedMem;
+
+        LookAheadCache(int lookAheadCacheSize) {
+            map = new TreeMap<Long,LNInfo>();
+            maxMem = lookAheadCacheSize;
+            usedMem = MemoryBudget.TREEMAP_OVERHEAD;
+        }
+
+        boolean isEmpty() {
+            return map.isEmpty();
+        }
+
+        boolean isFull() {
+            return usedMem >= maxMem;
+        }
+
+        Long nextOffset() {
+            return map.firstKey();
+        }
+
+        void add(Long lsnOffset, LNInfo info) {
+            map.put(lsnOffset, info);
+            usedMem += info.getMemorySize();
+            usedMem += MemoryBudget.TREEMAP_ENTRY_OVERHEAD;
+        }
+
+        LNInfo remove(Long offset) {
+            LNInfo info = map.remove(offset);
+            if (info != null) {
+                usedMem -= info.getMemorySize();
+                usedMem -= MemoryBudget.TREEMAP_ENTRY_OVERHEAD;
+            }
+            return info;
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/cleaner/FileSelector.java b/src/com/sleepycat/je/cleaner/FileSelector.java
new file mode 100644
index 0000000000000000000000000000000000000000..cc2f3a1e4e86b6e2e03ba0d2f9c19340751ccecc
--- /dev/null
+++ b/src/com/sleepycat/je/cleaner/FileSelector.java
@@ -0,0 +1,605 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: FileSelector.java,v 1.24.2.10 2010/01/04 15:30:27 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.tree.LN;
+import com.sleepycat.je.utilint.TestHook;
+import com.sleepycat.je.utilint.TestHookExecute;
+
+/**
+ * Keeps track of the status of files for which cleaning is in progres.
+ */
+public class FileSelector {
+
+    /*
+     * Each file for which cleaning is in progress is in one of the following
+     * collections.  Files numbers migrate from one collection to another as
+     * their status changes, in order:
+     *
+     * toBeCleaned -> beingCleaned -> cleaned ->
+     * checkpointed -> fullyProcessed -> safeToDelete
+     *
+     * Access to these collections is synchronized to guarantee that the status
+     * is atomically updated.
+     */
+
+    /*
+     * A file is initially to-be-cleaned when it is selected as part of a batch
+     * of files that, when deleted, will bring total utilization down to the
+     * minimum configured value.  All files in this collection will be cleaned
+     * in lowest-cost-to-clean order.  For two files of equal cost to clean,
+     * the lower numbered (oldest) files is selected; this is why the set is
+     * sorted.
+     */
+    private SortedSet<Long> toBeCleanedFiles;
+
+    /*
+     * When a file is selected for processing by FileProcessor from the
+     * to-be-cleaned list, it is added to this processing set.  This
+     * distinction is used to prevent a file from being processed by more than
+     * one thread.
+     */
+    private Set<Long> beingCleanedFiles;
+
+    /*
+     * A file moves to the cleaned set when all log entries have been read and
+     * processed.  However, entries needing migration will be marked with the
+     * BIN entry MIGRATE flag, entries that could not be locked will be in the
+     * pending LN set, and the DBs that were pending deletion will be in the
+     * pending DB set.
+     */
+    private Set<Long> cleanedFiles;
+    private Map<Long,Set<DatabaseId>> cleanedFilesDatabases;
+
+    /*
+     * A file moves to the checkpointed set at the end of a checkpoint if it
+     * was in the cleaned set at the beginning of the checkpoint.  Because all
+     * dirty BINs are flushed during the checkpoints, no files in this set
+     * will have entries with the MIGRATE flag set.  However, some entries may
+     * be in the pending LN set and some DBs may be in the pending DB set.
+     */
+    private Set<Long> checkpointedFiles;
+
+    /*
+     * A file is moved from the checkpointed set to the fully-processed set
+     * when the pending LN/DB sets become empty.  Since a pending LN was not
+     * locked successfully, we don't know its original file.  But we do know
+     * that when no pending LNs are present for any file, all log entries in
+     * checkpointed files are either obsolete or have been migrated.  Note,
+     * however, that the parent BINs of the migrated entries may not have been
+     * logged yet.
+     *
+     * No special handling is required to coordinate syncing of deferred write
+     * databases for pending, deferred write LNs, because non-temporary
+     * deferred write DBs are always synced during checkpoints, and temporary
+     * deferred write DBs are not recovered.  Note that although DW databases
+     * are non-txnal, their LNs may be pended because of lock collisions.
+     */
+    private Set<Long> fullyProcessedFiles;
+
+    /*
+     * A file moves to the safe-to-delete set at the end of a checkpoint if it
+     * was in the fully-processed set at the beginning of the checkpoint.  All
+     * parent BINs of migrated entries have now been logged.
+     */
+    private Set<Long> safeToDeleteFiles;
+
+    /*
+     * Pending LNs are stored in a map of {NodeID -> LNInfo}.  These are LNs
+     * that could not be locked, either during processing or during migration.
+     */
+    private Map<Long,LNInfo> pendingLNs;
+
+    /*
+     * For processed entries with DBs that are pending deletion, we consider
+     * them to be obsolete but we store their DatabaseIds in a set.  Until the
+     * DB deletion is complete, we can't delete the log files containing those
+     * entries.
+     */
+    private Set<DatabaseId> pendingDBs;
+
+    /*
+     * If during a checkpoint there are no pending LNs or DBs added, we can
+     * move cleaned files to safe-delete files at the end of the checkpoint.
+     * This is an optimization that allows deleting files more quickly when
+     * possible. In particular this impacts the checkpoint during environment
+     * close, since no user operations are active during that checkpoint; this
+     * optimization allows us to delete all cleaned files after the final
+     * checkpoint.
+     */
+    private boolean anyPendingDuringCheckpoint;
+
+    /*
+     * As a side effect of file selection a set of low utilization files is
+     * determined.  This set is guaranteed to be non-null and read-only, so no
+     * synchronization is needed when accessing it.
+     */
+    private Set<Long> lowUtilizationFiles;
+
+    /* For unit tests */
+    private TestHook fileChosenHook;
+
+    FileSelector() {
+        toBeCleanedFiles = new TreeSet<Long>();
+        cleanedFiles = new HashSet<Long>();
+        cleanedFilesDatabases = new HashMap<Long,Set<DatabaseId>>();
+        checkpointedFiles = new HashSet<Long>();
+        fullyProcessedFiles = new HashSet<Long>();
+        safeToDeleteFiles = new HashSet<Long>();
+        pendingLNs = new HashMap<Long,LNInfo>();
+        pendingDBs = new HashSet<DatabaseId>();
+        lowUtilizationFiles = Collections.emptySet();
+        beingCleanedFiles = new HashSet<Long>();
+    }
+
+    /* For unit testing only. */
+    public void setFileChosenHook(TestHook hook) {
+        fileChosenHook = hook;
+    }
+
+    /**
+     * Returns the best file that qualifies for cleaning, or null if no file
+     * qualifies.
+     *
+     * @param forceCleaning is true to always select a file, even if its
+     * utilization is above the minimum utilization threshold.
+     *
+     * @param calcLowUtilizationFiles whether to recalculate the set of files
+     * that are below the minimum utilization threshold.
+     *
+     * @param maxBatchFiles is the maximum number of files to be selected at
+     * one time, or zero if there is no limit.
+     *
+     * @return the next file to be cleaned, or null if no file needs cleaning.
+     */
+    Long selectFileForCleaning(UtilizationProfile profile,
+                               boolean forceCleaning,
+                               boolean calcLowUtilizationFiles,
+                               int maxBatchFiles)
+        throws DatabaseException {
+
+        /*
+         * Note that because it calls UtilizationProfile methods, this method
+         * itself cannot be synchronized.  Synchronization is done elsewhere in
+         * the order [UtilizationProfile, FileSelector], so this method can't
+         * synchronize first on FileSelector and then call a UtilizationProfile
+         * method without causing a deadlock.  However, it must synchronize
+         * while accessing private fields.  Retries are performed when
+         * necessary to work around the lack of synchronization.  See below.
+         */
+        Set<Long> newLowUtilizationFiles = calcLowUtilizationFiles ?
+            (new HashSet<Long>()) : null;
+
+        /*
+         * Add files until we reach the theoretical minimum utilization
+         * threshold.
+         */
+        while (true) {
+
+            int toBeCleanedSize;
+            synchronized (this) {
+                toBeCleanedSize = toBeCleanedFiles.size();
+            }
+            if (maxBatchFiles > 0 &&
+                toBeCleanedSize >= maxBatchFiles) {
+                break;
+            }
+
+            Long fileNum = profile.getBestFileForCleaning
+                (this, forceCleaning, newLowUtilizationFiles,
+                 toBeCleanedSize > 0 /*isBacklog*/);
+
+            if (fileNum == null) {
+                break;
+            }
+
+            assert TestHookExecute.doHookIfSet(fileChosenHook);
+
+            /*
+             * Because we don't synchronize on the FileSelector while calling
+             * UtilizationProfile.getBestFileForCleaning, another thread may
+             * have selected this file.  Add the file only if not already being
+             * cleaned.  [#17079]
+             *
+             * There is also a possibility that another thread will have
+             * already cleaned and deleted the file.  In that case, we will
+             * return it and attempt to clean it again, but
+             * FileProcessor.doClean will detect this situation and recover.
+             */
+            synchronized (this) {
+                if (!isFileCleaningInProgress(fileNum)) {
+                    toBeCleanedFiles.add(fileNum);
+                }
+            }
+        }
+
+        /* Update the read-only set. */
+        if (newLowUtilizationFiles != null) {
+            lowUtilizationFiles = newLowUtilizationFiles;
+        }
+
+        /*
+         * Select the cheapest file to clean from a copy of the to-be-cleaned
+         * set.  Move the selected file from the to-be-cleaned set to the
+         * being-cleaned set.
+         *
+         * Because we don't synchronize on the FileSelector while calling
+         * UtilizationProfile.getCheapestFileToClean, another thread may have
+         * selected this file.  Select the file only if it is waiting to be
+         * cleaned.  [#17079]
+         */
+        SortedSet<Long> availableFiles;
+        synchronized (this) {
+            availableFiles = new TreeSet<Long>(toBeCleanedFiles);
+        }
+        while (availableFiles.size() > 0) {
+            Long fileNum = profile.getCheapestFileToClean(availableFiles);
+            if (fileNum == null) {
+                return null;
+            }
+            synchronized (this) {
+                if (toBeCleanedFiles.remove(fileNum)) {
+                    beingCleanedFiles.add(fileNum);
+                    return fileNum;
+                }
+                availableFiles.remove(fileNum);
+            }
+        }
+        return null;
+    }
+
+    /**
+     * Returns whether the file is in any stage of the cleaning process.
+     */
+    synchronized boolean isFileCleaningInProgress(Long file) {
+        return toBeCleanedFiles.contains(file) ||
+            beingCleanedFiles.contains(file) ||
+            cleanedFiles.contains(file) ||
+            checkpointedFiles.contains(file) ||
+            fullyProcessedFiles.contains(file) ||
+            safeToDeleteFiles.contains(file);
+    }
+
+    private boolean isFileCleaningInProgress(Collection<Long> files) {
+        for (Long file : files) {
+            if (isFileCleaningInProgress(file)) {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    /**
+     * Removes all references to a file.
+     */
+    synchronized void removeAllFileReferences(Long file, MemoryBudget budget) {
+        toBeCleanedFiles.remove(file);
+        beingCleanedFiles.remove(file);
+        cleanedFiles.remove(file);
+        Set<DatabaseId> oldDatabases = cleanedFilesDatabases.remove(file);
+        adjustMemoryBudget(budget, oldDatabases, null /*newDatabases*/);
+        checkpointedFiles.remove(file);
+        fullyProcessedFiles.remove(file);
+        safeToDeleteFiles.remove(file);
+    }
+
+    /**
+     * When file cleaning is aborted, move the file back from the being-cleaned
+     * set to the to-be-cleaned set.
+     */
+    synchronized void putBackFileForCleaning(Long fileNum) {
+        beingCleanedFiles.remove(fileNum);
+        assert !isFileCleaningInProgress(fileNum);
+        toBeCleanedFiles.add(fileNum);
+    }
+
+    /**
+     * For unit testing.
+     */
+    synchronized void injectFileForCleaning(Long fileNum) {
+        if (!isFileCleaningInProgress(fileNum)) {
+            toBeCleanedFiles.add(fileNum);
+        }
+    }
+
+    /**
+     * When cleaning is complete, move the file from the being-cleaned set to
+     * the cleaned set.
+     */
+    synchronized void addCleanedFile(Long fileNum,
+                                     Set<DatabaseId> databases,
+                                     MemoryBudget budget) {
+        beingCleanedFiles.remove(fileNum);
+        assert !isFileCleaningInProgress(fileNum);
+        cleanedFiles.add(fileNum);
+        Set<DatabaseId> oldDatabases =
+            cleanedFilesDatabases.put(fileNum, databases);
+        adjustMemoryBudget(budget, oldDatabases, databases);
+    }
+
+    /**
+     * Returns a read-only set of low utilization files that can be accessed
+     * without synchronization.
+     */
+    Set<Long> getLowUtilizationFiles() {
+        /* This set is read-only, so there is no need to make a copy. */
+        return lowUtilizationFiles;
+    }
+
+    /**
+     * Returns a read-only copy of to-be-cleaned files that can be accessed
+     * without synchronization.
+     */
+    synchronized Set<Long> getToBeCleanedFiles() {
+        return new HashSet<Long>(toBeCleanedFiles);
+    }
+
+    /**
+     * Returns a copy of the cleaned and fully-processed files at the time a
+     * checkpoint starts.
+     */
+    synchronized CheckpointStartCleanerState getFilesAtCheckpointStart() {
+
+        anyPendingDuringCheckpoint = !pendingLNs.isEmpty() ||
+            !pendingDBs.isEmpty();
+
+        CheckpointStartCleanerState info = new CheckpointStartCleanerState
+            (cleanedFiles, fullyProcessedFiles);
+        return info;
+    }
+
+    /**
+     * When a checkpoint is complete, move the previously cleaned and
+     * fully-processed files to the checkpointed and safe-to-delete sets.
+     */
+    synchronized void
+        updateFilesAtCheckpointEnd(CheckpointStartCleanerState info) {
+
+        if (!info.isEmpty()) {
+
+            Set<Long> previouslyCleanedFiles = info.getCleanedFiles();
+            if (previouslyCleanedFiles != null) {
+                cleanedFiles.removeAll(previouslyCleanedFiles);
+                assert !isFileCleaningInProgress(previouslyCleanedFiles) :
+                    toString();
+                if (anyPendingDuringCheckpoint) {
+                    checkpointedFiles.addAll(previouslyCleanedFiles);
+                } else {
+                    safeToDeleteFiles.addAll(previouslyCleanedFiles);
+                }
+            }
+
+            Set<Long> previouslyProcessedFiles =
+                info.getFullyProcessedFiles();
+            if (previouslyProcessedFiles != null) {
+                fullyProcessedFiles.removeAll(previouslyProcessedFiles);
+                assert !isFileCleaningInProgress(previouslyProcessedFiles) :
+                    toString();
+                safeToDeleteFiles.addAll(previouslyProcessedFiles);
+            }
+
+            updateProcessedFiles();
+        }
+    }
+
+    /**
+     * Adds the given LN info to the pending LN set.
+     */
+    synchronized boolean addPendingLN(LN ln,
+                                      DatabaseId dbId,
+                                      byte[] key,
+                                      byte[] dupKey) {
+        assert ln != null;
+
+        boolean added = pendingLNs.put
+            (Long.valueOf(ln.getNodeId()),
+             new LNInfo(ln, dbId, key, dupKey)) != null;
+
+        anyPendingDuringCheckpoint = true;
+        return added;
+    }
+
+    /**
+     * Returns an array of LNInfo for LNs that could not be migrated in a
+     * prior cleaning attempt, or null if no LNs are pending.
+     */
+    synchronized LNInfo[] getPendingLNs() {
+
+        if (pendingLNs.size() > 0) {
+            LNInfo[] lns = new LNInfo[pendingLNs.size()];
+            pendingLNs.values().toArray(lns);
+            return lns;
+        } else {
+            return null;
+        }
+    }
+
+    /**
+     * Removes the LN for the given node ID from the pending LN set.
+     */
+    synchronized void removePendingLN(long nodeId) {
+
+        pendingLNs.remove(nodeId);
+        updateProcessedFiles();
+    }
+
+    /**
+     * Adds the given DatabaseId to the pending DB set.
+     */
+    synchronized boolean addPendingDB(DatabaseId dbId) {
+
+        boolean added = pendingDBs.add(dbId);
+
+        anyPendingDuringCheckpoint = true;
+        return added;
+    }
+
+    /**
+     * Returns an array of DatabaseIds for DBs that were pending deletion in a
+     * prior cleaning attempt, or null if no DBs are pending.
+     */
+    synchronized DatabaseId[] getPendingDBs() {
+
+        if (pendingDBs.size() > 0) {
+            DatabaseId[] dbs = new DatabaseId[pendingDBs.size()];
+            pendingDBs.toArray(dbs);
+            return dbs;
+        } else {
+            return null;
+        }
+    }
+
+    /**
+     * Removes the DatabaseId from the pending DB set.
+     */
+    synchronized void removePendingDB(DatabaseId dbId) {
+
+        pendingDBs.remove(dbId);
+        updateProcessedFiles();
+    }
+
+    /**
+     * Returns a copy of the safe-to-delete files.
+     */
+    synchronized Set<Long> copySafeToDeleteFiles() {
+        if (safeToDeleteFiles.size() == 0) {
+            return null;
+        } else {
+            return new HashSet<Long>(safeToDeleteFiles);
+        }
+    }
+
+    /**
+     * Returns a copy of the databases for a cleaned file.
+     */
+    synchronized Set<DatabaseId> getCleanedDatabases(Long fileNum) {
+        return new HashSet<DatabaseId>(cleanedFilesDatabases.get(fileNum));
+    }
+
+    /**
+     * Removes file from the safe-to-delete set after the file itself has
+     * finally been deleted.
+     */
+    synchronized void removeDeletedFile(Long fileNum, MemoryBudget budget) {
+        /* Ensure that no remnants of a deleted file remain. [#17752] */
+        removeAllFileReferences(fileNum, budget);
+    }
+
+    /**
+     * Update memory budgets when the environment is closed and will never be
+     * accessed again.
+     */
+    synchronized void close(MemoryBudget budget) {
+        for (Set<DatabaseId> oldDatabases : cleanedFilesDatabases.values()) {
+            adjustMemoryBudget(budget, oldDatabases, null /*newDatabases*/);
+        }
+    }
+
+    /**
+     * If there are no pending LNs or DBs outstanding, move the checkpointed
+     * files to the fully-processed set.  The check for pending LNs/DBs and the
+     * copying of the checkpointed files must be done atomically in a
+     * synchronized block.  All methods that call this method are synchronized.
+     */
+    private void updateProcessedFiles() {
+        if (pendingLNs.isEmpty() && pendingDBs.isEmpty()) {
+            fullyProcessedFiles.addAll(checkpointedFiles);
+            checkpointedFiles.clear();
+        }
+    }
+
+    /**
+     * Adjust the memory budget when an entry is added to or removed from the
+     * cleanedFilesDatabases map.
+     */
+    private void adjustMemoryBudget(MemoryBudget budget,
+                                    Set<DatabaseId> oldDatabases,
+                                    Set<DatabaseId> newDatabases) {
+        long adjustMem = 0;
+        if (oldDatabases != null) {
+            adjustMem -= getCleanedFilesDatabaseEntrySize(oldDatabases);
+        }
+        if (newDatabases != null) {
+            adjustMem += getCleanedFilesDatabaseEntrySize(newDatabases);
+        }
+        budget.updateAdminMemoryUsage(adjustMem);
+    }
+
+    /**
+     * Returns the size of a HashMap entry that contains the given set of
+     * DatabaseIds.  We don't count the DatabaseId size because it is likely
+     * that it is also stored (and budgeted) in the DatabaseImpl.
+     */
+    private long getCleanedFilesDatabaseEntrySize(Set<DatabaseId> databases) {
+        return MemoryBudget.HASHMAP_ENTRY_OVERHEAD +
+               MemoryBudget.HASHSET_OVERHEAD +
+               (databases.size() * MemoryBudget.HASHSET_ENTRY_OVERHEAD);
+    }
+
+    /**
+     * Holds copy of all checkpoint-dependent cleaner state.
+     */
+    public static class CheckpointStartCleanerState {
+
+        /* A snapshot of the cleaner collections at the checkpoint start. */
+        private Set<Long> cleanedFiles;
+        private Set<Long> fullyProcessedFiles;
+
+        CheckpointStartCleanerState(Set<Long> cleanedFiles,
+                                    Set<Long> fullyProcessedFiles) {
+
+            /*
+             * Create snapshots of the collections of various files at the
+             * beginning of the checkpoint.
+             */
+            this.cleanedFiles = new HashSet<Long>(cleanedFiles);
+            this.fullyProcessedFiles = new HashSet<Long>(fullyProcessedFiles);
+        }
+
+        public boolean isEmpty() {
+            return ((cleanedFiles.size() == 0) &&
+                    (fullyProcessedFiles.size() == 0));
+        }
+
+        public Set<Long> getCleanedFiles() {
+            return cleanedFiles;
+        }
+
+        public Set<Long> getFullyProcessedFiles() {
+            return fullyProcessedFiles;
+        }
+    }
+
+    @Override
+    public String toString() {
+        return "toBeCleanedFiles = " + toBeCleanedFiles +
+               " beingCleanedFiles = " + beingCleanedFiles +
+               " cleanedFiles = " + cleanedFiles +
+               " cleanedFilesDatabases = " + cleanedFilesDatabases +
+               " checkpointedFiles = " + checkpointedFiles +
+               " fullyProcessedFiles = " + fullyProcessedFiles +
+               " safeToDeleteFiles = " + safeToDeleteFiles +
+               " pendingLNs = " + pendingLNs +
+               " pendingDBs = " + pendingDBs +
+               " anyPendingDuringCheckpoint = " + anyPendingDuringCheckpoint +
+               " lowUtilizationFiles = " + lowUtilizationFiles;
+    }
+}
diff --git a/src/com/sleepycat/je/cleaner/FileSummary.java b/src/com/sleepycat/je/cleaner/FileSummary.java
new file mode 100644
index 0000000000000000000000000000000000000000..c4649dd6996cf4a381b3962b55fc253c2cfdb033
--- /dev/null
+++ b/src/com/sleepycat/je/cleaner/FileSummary.java
@@ -0,0 +1,287 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: FileSummary.java,v 1.25.2.2 2010/01/04 15:30:28 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.nio.ByteBuffer;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.log.Loggable;
+import com.sleepycat.je.log.LogUtils;
+
+/**
+ * Per-file utilization counters.  The UtilizationProfile stores a persistent
+ * map of file number to FileSummary.
+ */
+public class FileSummary implements Loggable {
+
+    /* Persistent fields. */
+    public int totalCount;      // Total # of log entries
+    public int totalSize;       // Total bytes in log file
+    public int totalINCount;    // Number of IN log entries
+    public int totalINSize;     // Byte size of IN log entries
+    public int totalLNCount;    // Number of LN log entries
+    public int totalLNSize;     // Byte size of LN log entries
+    public int obsoleteINCount; // Number of obsolete IN log entries
+    public int obsoleteLNCount; // Number of obsolete LN log entries
+    public int obsoleteLNSize;  // Byte size of obsolete LN log entries
+    public int obsoleteLNSizeCounted;  // Number obsolete LNs with size counted
+
+    /**
+     * Creates an empty summary.
+     */
+    public FileSummary() {
+    }
+
+    /**
+     * Returns whether this summary contains any non-zero totals.
+     */
+    public boolean isEmpty() {
+
+        return totalCount == 0 &&
+               totalSize == 0 &&
+               obsoleteINCount == 0 &&
+               obsoleteLNCount == 0;
+    }
+
+    /**
+     * Returns the approximate byte size of all obsolete LN entries.  In
+     * FileSummaryLN version 3 and greater the exact tracked size is used.
+     */
+    public int getObsoleteLNSize() {
+
+        if (totalLNCount == 0) {
+            return 0;
+        }
+
+        /*
+         * Use the tracked obsolete size for all entries for which the size was
+         * counted, plus the average size for all obsolete entries whose size
+         * was not counted.
+         */
+        int obsolete = obsoleteLNSize;
+        int notCounted = obsoleteLNCount - obsoleteLNSizeCounted;
+        if (notCounted > 0) {
+            /* Use long arithmetic. */
+            long total = totalLNSize;
+            /* Scale by 255 to reduce integer truncation error. */
+            total <<= 8;
+            long avgSizePerLN = total / totalLNCount;
+            obsolete += (int) ((notCounted * avgSizePerLN) >> 8);
+        }
+        return obsolete;
+    }
+
+    /**
+     * Returns the approximate byte size of all obsolete IN entries.
+     */
+    public int getObsoleteINSize() {
+
+        if (totalINCount == 0) {
+            return 0;
+        }
+        /* Use long arithmetic. */
+        long size = totalINSize;
+        /* Scale by 255 to reduce integer truncation error. */
+        size <<= 8;
+        long avgSizePerIN = size / totalINCount;
+        return (int) ((obsoleteINCount * avgSizePerIN) >> 8);
+    }
+
+    /**
+     * Returns an estimate of the total bytes that are obsolete.
+     */
+    public int getObsoleteSize()
+        throws DatabaseException {
+
+        if (totalSize > 0) {
+            /* Leftover (non-IN non-LN) space is considered obsolete. */
+            int leftoverSize = totalSize - (totalINSize + totalLNSize);
+            int obsoleteSize = getObsoleteLNSize() +
+                               getObsoleteINSize() +
+                               leftoverSize;
+
+            /*
+             * Don't report more obsolete bytes than the total.  We may
+             * calculate more than the total because of (intentional)
+             * double-counting during recovery.
+             */
+            if (obsoleteSize > totalSize) {
+                obsoleteSize = totalSize;
+            }
+            return obsoleteSize;
+        } else {
+            return 0;
+        }
+    }
+
+    /**
+     * Returns the total number of entries counted.  This value is guaranted
+     * to increase whenever the tracking information about a file changes.  It
+     * is used a key discriminator for FileSummaryLN records.
+     */
+    public int getEntriesCounted() {
+        return totalCount + obsoleteLNCount + obsoleteINCount;
+    }
+
+    /**
+     * Returns the number of non-obsolete LN and IN entries.
+     */
+    public int getNonObsoleteCount() {
+        return totalLNCount +
+               totalINCount -
+               obsoleteLNCount -
+               obsoleteINCount;
+    }
+
+    /**
+     * Reset all totals to zero.
+     */
+    public void reset() {
+
+        totalCount = 0;
+        totalSize = 0;
+        totalINCount = 0;
+        totalINSize = 0;
+        totalLNCount = 0;
+        totalLNSize = 0;
+        obsoleteINCount = 0;
+        obsoleteLNCount = 0;
+        obsoleteLNSize = 0;
+        obsoleteLNSizeCounted = 0;
+    }
+
+    /**
+     * Add the totals of the given summary object to the totals of this object.
+     */
+    public void add(FileSummary o) {
+
+        totalCount += o.totalCount;
+        totalSize += o.totalSize;
+        totalINCount += o.totalINCount;
+        totalINSize += o.totalINSize;
+        totalLNCount += o.totalLNCount;
+        totalLNSize += o.totalLNSize;
+        obsoleteINCount += o.obsoleteINCount;
+        obsoleteLNCount += o.obsoleteLNCount;
+        obsoleteLNSize += o.obsoleteLNSize;
+        obsoleteLNSizeCounted += o.obsoleteLNSizeCounted;
+    }
+
+    /**
+     * @see Loggable#getLogSize
+     */
+    public int getLogSize() {
+
+        return 10 * LogUtils.getIntLogSize();
+    }
+
+    /**
+     * @see Loggable#writeToLog
+     */
+    public void writeToLog(ByteBuffer buf) {
+
+        LogUtils.writeInt(buf, totalCount);
+        LogUtils.writeInt(buf, totalSize);
+        LogUtils.writeInt(buf, totalINCount);
+        LogUtils.writeInt(buf, totalINSize);
+        LogUtils.writeInt(buf, totalLNCount);
+        LogUtils.writeInt(buf, totalLNSize);
+        LogUtils.writeInt(buf, obsoleteINCount);
+        LogUtils.writeInt(buf, obsoleteLNCount);
+        LogUtils.writeInt(buf, obsoleteLNSize);
+        LogUtils.writeInt(buf, obsoleteLNSizeCounted);
+    }
+
+    /**
+     * @see Loggable#readFromLog
+     */
+    public void readFromLog(ByteBuffer buf, byte entryVersion) {
+
+        totalCount = LogUtils.readInt(buf);
+        totalSize = LogUtils.readInt(buf);
+        totalINCount = LogUtils.readInt(buf);
+        totalINSize = LogUtils.readInt(buf);
+        totalLNCount = LogUtils.readInt(buf);
+        totalLNSize = LogUtils.readInt(buf);
+        obsoleteINCount = LogUtils.readInt(buf);
+        if (obsoleteINCount == -1) {
+
+            /*
+             * If INs were not counted in an older log file written by 1.5.3 or
+             * earlier, consider all INs to be obsolete.  This causes the file
+             * to be cleaned, and then IN counting will be accurate.
+             */
+            obsoleteINCount = totalINCount;
+        }
+        obsoleteLNCount = LogUtils.readInt(buf);
+
+        /*
+         * obsoleteLNSize and obsoleteLNSizeCounted were added in FileSummaryLN
+         * version 3.
+         */
+        if (entryVersion >= 3) {
+            obsoleteLNSize = LogUtils.readInt(buf);
+            obsoleteLNSizeCounted = LogUtils.readInt(buf);
+        } else {
+            obsoleteLNSize = 0;
+            obsoleteLNSizeCounted = 0;
+        }
+    }
+
+    /**
+     * @see Loggable#dumpLog
+     */
+    public void dumpLog(StringBuffer buf, boolean verbose) {
+
+        buf.append("<summary totalCount=\"");
+        buf.append(totalCount);
+        buf.append("\" totalSize=\"");
+        buf.append(totalSize);
+        buf.append("\" totalINCount=\"");
+        buf.append(totalINCount);
+        buf.append("\" totalINSize=\"");
+        buf.append(totalINSize);
+        buf.append("\" totalLNCount=\"");
+        buf.append(totalLNCount);
+        buf.append("\" totalLNSize=\"");
+        buf.append(totalLNSize);
+        buf.append("\" obsoleteINCount=\"");
+        buf.append(obsoleteINCount);
+        buf.append("\" obsoleteLNCount=\"");
+        buf.append(obsoleteLNCount);
+        buf.append("\" obsoleteLNSize=\"");
+        buf.append(obsoleteLNSize);
+        buf.append("\" obsoleteLNSizeCounted=\"");
+        buf.append(obsoleteLNSizeCounted);
+        buf.append("\"/>");
+    }
+
+    /**
+     * Never called.
+     * @see Loggable#getTransactionId
+     */
+    public long getTransactionId() {
+	return 0;
+    }
+
+    /**
+     * @see Loggable#logicalEquals
+     * Always return false, this item should never be compared.
+     */
+    public boolean logicalEquals(Loggable other) {
+        return false;
+    }
+
+    @Override
+    public String toString() {
+        StringBuffer buf = new StringBuffer();
+        dumpLog(buf, true);
+        return buf.toString();
+    }
+}
diff --git a/src/com/sleepycat/je/cleaner/LNInfo.java b/src/com/sleepycat/je/cleaner/LNInfo.java
new file mode 100644
index 0000000000000000000000000000000000000000..6879a942cdb2cd51b0e51a710d35d50370c0ac04
--- /dev/null
+++ b/src/com/sleepycat/je/cleaner/LNInfo.java
@@ -0,0 +1,65 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LNInfo.java,v 1.7.2.2 2010/01/04 15:30:28 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.tree.LN;
+
+/**
+ * The information necessary to lookup an LN.  Used for pending LNs that are
+ * locked and must be migrated later, or cannot be migrated immediately during
+ * a split.  Also used in a look ahead cache in FileProcessor.
+ *
+ * Is public for Sizeof only.
+ */
+public final class LNInfo {
+
+    private LN ln;
+    private DatabaseId dbId;
+    private byte[] key;
+    private byte[] dupKey;
+
+    public LNInfo(LN ln, DatabaseId dbId, byte[] key, byte[] dupKey) {
+        this.ln = ln;
+        this.dbId = dbId;
+        this.key = key;
+        this.dupKey = dupKey;
+    }
+
+    LN getLN() {
+        return ln;
+    }
+
+    DatabaseId getDbId() {
+        return dbId;
+    }
+
+    byte[] getKey() {
+        return key;
+    }
+
+    byte[] getDupKey() {
+        return dupKey;
+    }
+
+    int getMemorySize() {
+        int size = MemoryBudget.LN_INFO_OVERHEAD;
+        if (ln != null) {
+            size += ln.getMemorySizeIncludedByParent();
+        }
+        if (key != null) {
+            size += MemoryBudget.byteArraySize(key.length);
+        }
+        if (dupKey != null) {
+            size += MemoryBudget.byteArraySize(dupKey.length);
+        }
+        return size;
+    }
+}
diff --git a/src/com/sleepycat/je/cleaner/LocalUtilizationTracker.java b/src/com/sleepycat/je/cleaner/LocalUtilizationTracker.java
new file mode 100644
index 0000000000000000000000000000000000000000..18d70b3bbb253e319a7f5233138f8c2ca1fe05aa
--- /dev/null
+++ b/src/com/sleepycat/je/cleaner/LocalUtilizationTracker.java
@@ -0,0 +1,117 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LocalUtilizationTracker.java,v 1.4.2.3 2010/01/04 15:30:28 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.util.IdentityHashMap;
+import java.util.Set;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.LogEntryType;
+
+/**
+ * Accumulates changes to the utilization profile locally in a single thread.
+ *
+ * <p>Per-database information is keyed by DatabaseImpl so that no tree lookup
+ * of a database is required (as when a DatabaseId is used).</p>
+ *
+ * <p>The countNewLogEntry, countObsoleteNode and countObsoleteNodeInexact
+ * methods may be called without taking the log write latch.  Totals and offset
+ * are accumulated locally in this object only, not in DatabaseImpl
+ * objects.</p>
+ *
+ * <p>When finished with this object, its information should be added to the
+ * Environment's UtilizationTracker and DatabaseImpl objects by calling
+ * transferToUtilizationTracker under the log write latch.  This is done in the
+ * Checkpointer, Evictor and INCompressor by calling
+ * UtilizationProfile.flushLocalTracker which calls
+ * LogManager.transferToUtilizationTracker which calls
+ * BaseLocalUtilizationTracker.transferToUtilizationTracker.</p>
+ */
+public class LocalUtilizationTracker extends BaseLocalUtilizationTracker {
+
+    public LocalUtilizationTracker(EnvironmentImpl env)
+        throws DatabaseException {
+
+        super(env, new IdentityHashMap<Object,DbFileSummaryMap>());
+    }
+
+    /**
+     * Counts the addition of all new log entries including LNs.
+     */
+    public void countNewLogEntry(long lsn,
+                                 LogEntryType type,
+                                 int size,
+                                 DatabaseImpl db) {
+        countNew(lsn, db, type, size);
+    }
+
+    /**
+     * Counts a node that has become obsolete and tracks the LSN offset, if
+     * non-zero, to avoid a lookup during cleaning.
+     *
+     * <p>A zero LSN offset is used as a special value when obsolete offset
+     * tracking is not desired. [#15365]  The file header entry (at offset
+     * zero) is never counted as obsolete, it is assumed to be obsolete by the
+     * cleaner.</p>
+     *
+     * <p>This method should only be called for LNs and INs (i.e, only for
+     * nodes).  If type is null we assume it is an LN.</p>
+     */
+    public void countObsoleteNode(long lsn,
+                                  LogEntryType type,
+                                  int size,
+                                  DatabaseImpl db) {
+        countObsolete
+            (lsn, db, type, size,
+             true,   // countPerFile
+             true,   // countPerDb
+             true);  // trackOffset
+    }
+
+    /**
+     * Counts as countObsoleteNode does, but since the LSN may be inexact, does
+     * not track the obsolete LSN offset.
+     *
+     * <p>This method should only be called for LNs and INs (i.e, only for
+     * nodes).  If type is null we assume it is an LN.</p>
+     */
+    public void countObsoleteNodeInexact(long lsn,
+                                         LogEntryType type,
+                                         int size,
+                                         DatabaseImpl db) {
+        countObsolete
+            (lsn, db, type, size,
+             true,   // countPerFile
+             true,   // countPerDb
+             false); // trackOffset
+    }
+
+    public Set<Object> getTrackedDbs() {
+        return getDatabaseMap().keySet();
+    }
+
+    /**
+     * Returns the DatabaseImpl from the database key, which in this case is
+     * the DatabaseImpl.
+     */
+    DatabaseImpl databaseKeyToDatabaseImpl(Object databaseKey)
+        throws DatabaseException {
+
+        return (DatabaseImpl) databaseKey;
+    }
+
+    /**
+     * Do nothing, since DbTree.getDb was not called by
+     * databaseKeyToDatabaseImpl.
+     */
+    void releaseDatabaseImpl(DatabaseImpl db) {
+    }
+}
diff --git a/src/com/sleepycat/je/cleaner/OffsetList.java b/src/com/sleepycat/je/cleaner/OffsetList.java
new file mode 100644
index 0000000000000000000000000000000000000000..24e7d52dab2288ed270152b795ca36071302aeca
--- /dev/null
+++ b/src/com/sleepycat/je/cleaner/OffsetList.java
@@ -0,0 +1,210 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: OffsetList.java,v 1.14.2.2 2010/01/04 15:30:28 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import com.sleepycat.je.utilint.Tracer;
+
+/**
+ * List of LSN offsets as a linked list of segments.  The reasons for using a
+ * list of this type and not a java.util.List are:
+ * <ul>
+ * <li>Segements reduce memory overhead by storing long primitives rather than
+ * Long objects.  Many longs per segment reduce link overhead.</li>
+ * <li>Memory is only allocated for new segments, reducing the number of calls
+ * to update the memory budget.</li>
+ * <li>This is an append-only list that supports a single appender thread and
+ * multiple unsynchronized reader threads.  The caller is responsible for
+ * synchronizing such that only one thread calls add() at one time.  The reader
+ * threads see data as it is changing but do not see inconsistent data (corrupt
+ * longs) and do not require synchronization for thread safety.</li>
+ * </ul>
+ *
+ * <p>The algorithms here use traversal of the list segments rather than
+ * recursion to avoid using a lot of stack space.</p>
+ */
+public class OffsetList {
+
+    static final int SEGMENT_CAPACITY = 100;
+
+    private Segment head;
+    private Segment tail;
+    private int size;
+
+    public OffsetList() {
+        head = new Segment();
+        tail = head;
+    }
+
+    /**
+     * Adds the given value and returns whether a new segment was allocated.
+     */
+    public boolean add(long value, boolean checkDupOffsets) {
+
+        /* Each value added should be unique. */
+        if (checkDupOffsets) {
+            assert (!contains(value)) :
+                Tracer.getStackTrace(new Exception("Dup Offset " +
+                                                   Long.toHexString(value)));
+        }
+
+        /*
+         * Do not increment the size until the value is added so that reader
+         * threads do not try to read a value before it has been added.
+         */
+        Segment oldTail = tail;
+        tail = tail.add(value);
+        size += 1;
+        return tail != oldTail;
+    }
+
+    public int size() {
+	return size;
+    }
+
+    /**
+     * Merges the given list and returns whether a segment was freed.
+     */
+    boolean merge(OffsetList other) {
+
+        boolean oneSegFreed = true;
+        Segment seg = other.head;
+        while (true) {
+            Segment next = seg.next();
+            if (next != null) {
+                /* Insert a full segment at the beginning of the list. */
+                seg.setNext(head);
+                head = seg;
+                seg = next;
+            } else {
+                /* Copy the last segment and discard it. */
+                for (int i = 0; i < seg.size(); i += 1) {
+                    if (add(seg.get(i), false)) {
+                        /* The two partial segments did not fit into one. */
+                        assert oneSegFreed;
+                        oneSegFreed = false;
+                    }
+                }
+                break;
+            }
+        }
+        return oneSegFreed;
+    }
+
+    /**
+     * Returns an array of all values as longs.  If a writer thread is
+     * appending to the list while this method is excuting, some values may be
+     * missing from the returned array, but the operation is safe.
+     */
+    public long[] toArray() {
+
+        long[] a = new long[size];
+        int next = 0;
+
+        segments: for (Segment seg = head; seg != null; seg = seg.next()) {
+            for (int i = 0; i < seg.size(); i += 1) {
+                if (next >= a.length) {
+                    break segments;
+                }
+                a[next] = seg.get(i);
+                next += 1;
+            }
+        }
+
+        return a;
+    }
+
+    /**
+     * Returns whether this list contains the given offset.
+     */
+    boolean contains(long offset) {
+
+        for (Segment seg = head; seg != null; seg = seg.next()) {
+            for (int i = 0; i < seg.size(); i += 1) {
+                if (seg.get(i) == offset) {
+                    return true;
+                }
+            }
+        }
+
+        return false;
+    }
+
+    /**
+     * One segment of a OffsetList containing at most SEGMENT_CAPACITY values.
+     * public for Sizeof.
+     */
+    public static class Segment {
+
+        private int index;
+        private Segment next;
+        private int[] values;
+
+	/* public for Sizeof. */
+        public Segment() {
+            values = new int[SEGMENT_CAPACITY];
+        }
+
+        /**
+         * Call this method on the tail.  The new tail is returned, if
+         * allocating a new tail is necessary.
+         */
+        Segment add(long value) {
+            if (index < values.length) {
+
+                /*
+                 * Increment index after adding the offset so that reader
+                 * threads won't see a partial long value.
+                 */
+                values[index] = (int) value;
+                index += 1;
+                return this;
+            } else {
+
+                /*
+                 * Add the value to the new segment before assigning the next
+                 * field so that reader threads can rely on more values being
+                 * available whenever the next field is non-null.
+                 */
+                Segment seg = new Segment();
+                seg.values[0] = (int) value;
+                seg.index = 1;
+                next = seg;
+                return seg;
+            }
+        }
+
+        /**
+         * Returns the value at the given index from this segment only.
+         */
+        long get(int i) {
+            return ((long) values[i]) & 0xFFFFFFFF;
+        }
+
+        /**
+         * Returns the next segment or null if this is the tail segment.
+         */
+        Segment next() {
+            return next;
+        }
+
+        /**
+         * Sets the next pointer during a merge.
+         */
+        void setNext(Segment next) {
+            this.next = next;
+        }
+
+        /**
+         * Returns the number of values in this segment.
+         */
+        int size() {
+            return index;
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/cleaner/PackedOffsets.java b/src/com/sleepycat/je/cleaner/PackedOffsets.java
new file mode 100644
index 0000000000000000000000000000000000000000..2f5f09b2f30a4c576d4aefc1308099a17af930ee
--- /dev/null
+++ b/src/com/sleepycat/je/cleaner/PackedOffsets.java
@@ -0,0 +1,238 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PackedOffsets.java,v 1.16.2.3 2010/01/04 15:30:28 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.log.Loggable;
+import com.sleepycat.je.log.LogUtils;
+
+/**
+ * Stores a sorted list of LSN offsets in a packed short representation.  Each
+ * stored value is the difference between two consecutive offsets.  The stored
+ * values are stored as one or more shorts where each short holds 0x7fff
+ * values.  Shorts are in LSB order.  The value is negated if more shorts for
+ * the same offset follow; this works because offsets are always positive
+ * values.
+ */
+public class PackedOffsets implements Loggable {
+
+    private short[] data;
+    private int size;
+
+    /**
+     * Creates an empty object.
+     */
+    public PackedOffsets() {
+
+        /*
+         * Verify assumption in FileSummaryLN that a new PackedOffsets instance
+         * has no extra extra memory that must be budgeted.
+         */
+        assert getExtraMemorySize() == 0;
+    }
+
+    /**
+     * Returns an iterator over all offsets.
+     */
+    Iterator iterator() {
+        return new Iterator();
+    }
+
+    /**
+     * Packs the given offsets, replacing any offsets stored in this object.
+     */
+    public void pack(long[] offsets) {
+
+        /* Allocate a maximum sized new data array. */
+        short[] newData = new short[offsets.length * 3];
+
+        /* Pack the sorted offsets. */
+        Arrays.sort(offsets);
+        int dataIndex = 0;
+        long priorVal = 0;
+        for (int i = 0; i < offsets.length; i += 1) {
+            long val = offsets[i];
+            dataIndex = append(newData, dataIndex, val - priorVal);
+            priorVal = val;
+        }
+
+        /* Copy in the exact sized new data. */
+        data = new short[dataIndex];
+        System.arraycopy(newData, 0, data, 0, dataIndex);
+        size = offsets.length;
+    }
+
+    /**
+     * Returns the unpacked offsets.
+     */
+    long[] toArray() {
+        long[] offsets = new long[size];
+        int index = 0;
+        Iterator iter = iterator();
+        while (iter.hasNext()) {
+            offsets[index++] = iter.next();
+        }
+        assert index == size;
+        return offsets;
+    }
+
+    /**
+     * Copies the given value as a packed long to the array starting at the
+     * given index.  Returns the index of the next position in the array.
+     */
+    private int append(short[] to, int index, long val) {
+
+        assert val >= 0;
+
+        while (true) {
+            short s = (short) (val & 0x7fff);
+            val >>>= 15;
+            if (val > 0) {
+                to[index++] = (short) (-1 - s);
+            } else {
+                to[index++] = s;
+                break;
+            }
+        }
+        return index;
+    }
+
+    /**
+     * An iterator over all offsets.
+     */
+    class Iterator {
+
+        private int index;
+        private long priorVal;
+
+        private Iterator() {
+        }
+
+        boolean hasNext() {
+            return data != null && index < data.length;
+        }
+
+        long next() {
+            long val = priorVal;
+            for (int shift = 0;; shift += 15) {
+                long s = data[index++];
+                if (s < 0) {
+                    val += (-1 - s) << shift;
+                } else {
+                    val += s << shift;
+                    break;
+                }
+            }
+            priorVal = val;
+            return val;
+        }
+    }
+
+    /**
+     * Return the extra memory used by this object when the pack() method has
+     * been called to allocate the data array.
+     */
+    public int getExtraMemorySize() {
+        if (data != null) {
+	    return MemoryBudget.shortArraySize(data.length);
+        } else {
+            return 0;
+        }
+    }
+
+    /**
+     * @see Loggable#getLogSize
+     */
+    public int getLogSize() {
+
+        int len = (data != null) ? data.length : 0;
+        return  (LogUtils.getPackedIntLogSize(size) +
+                 LogUtils.getPackedIntLogSize(len) +
+                 (len * LogUtils.SHORT_BYTES));
+    }
+
+    /**
+     * @see Loggable#writeToLog
+     */
+    public void writeToLog(ByteBuffer buf) {
+
+        LogUtils.writePackedInt(buf, size);
+        if (data != null) {
+            LogUtils.writePackedInt(buf, data.length);
+            for (int i = 0; i < data.length; i += 1) {
+                LogUtils.writeShort(buf, data[i]);
+            }
+        } else {
+            LogUtils.writePackedInt(buf, 0);
+        }
+    }
+
+    /**
+     * @see Loggable#readFromLog
+     */
+    public void readFromLog(ByteBuffer buf, byte entryVersion) {
+
+        boolean unpacked = (entryVersion < 6);
+        size = LogUtils.readInt(buf, unpacked);
+        int len = LogUtils.readInt(buf, unpacked);
+        if (len > 0) {
+            data = new short[len];
+            for (int i = 0; i < len; i += 1) {
+                data[i] = LogUtils.readShort(buf);
+            }
+        }
+    }
+
+    /**
+     * @see Loggable#dumpLog
+     */
+    public void dumpLog(StringBuffer buf, boolean verbose) {
+
+        if (size > 0) {
+            Iterator i = iterator();
+            buf.append("<offsets size=\"");
+            buf.append(size);
+            buf.append("\">");
+            while (i.hasNext()) {
+                buf.append("0x");
+                buf.append(Long.toHexString(i.next()));
+                buf.append(' ');
+            }
+            buf.append("</offsets>");
+        } else {
+            buf.append("<offsets size=\"0\"/>");
+        }
+    }
+
+    /**
+     * Never called.
+     * @see Loggable#getTransactionId
+     */
+    public long getTransactionId() {
+	return -1;
+    }
+
+    /**
+     * @see Loggable#logicalEquals
+     * Always return false, this item should never be compared.
+     */
+    public boolean logicalEquals(Loggable other) {
+        return false;
+    }
+
+    @Override
+    public String toString() {
+        StringBuffer buf = new StringBuffer();
+        dumpLog(buf, true);
+        return buf.toString();
+    }
+}
diff --git a/src/com/sleepycat/je/cleaner/RecoveryUtilizationTracker.java b/src/com/sleepycat/je/cleaner/RecoveryUtilizationTracker.java
new file mode 100644
index 0000000000000000000000000000000000000000..971fa208551e9e077fea7c5c1d677d71e78bedf5
--- /dev/null
+++ b/src/com/sleepycat/je/cleaner/RecoveryUtilizationTracker.java
@@ -0,0 +1,193 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RecoveryUtilizationTracker.java,v 1.9.2.3 2010/01/04 15:30:28 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.LogFileNotFoundException;
+import com.sleepycat.je.tree.LN;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * Accumulates changes to the utilization profile during recovery.
+ *
+ * <p>Per-database information is keyed by DatabaseId because the DatabaseImpl
+ * is not always available during recovery.  In fact this is the only reason
+ * that a "local" tracker is used during recovery -- to avoid requiring that
+ * the DatabaseImpl is available, which is necessary to use the "global"
+ * UtilizationTracker.  There is no requirement to accumulate totals locally,
+ * since recovery is single threaded.</p>
+ *
+ * <p>When finished with this object, its information should be added to the
+ * Environment's UtilizationTracker and DatabaseImpl objects by calling
+ * transferToUtilizationTracker.  This is done at the end of recovery, just
+ * prior to the checkpoint.  It does not have to be done under the log write
+ * latch, since recovery is single threaded.</p>
+ */
+public class RecoveryUtilizationTracker extends BaseLocalUtilizationTracker {
+
+    /* File number -> LSN of FileSummaryLN */
+    private Map<Long, Long> fileSummaryLsns;
+    /* DatabaseId  -> LSN of MapLN */
+    private Map<DatabaseId, Long> databaseLsns;
+
+    public RecoveryUtilizationTracker(EnvironmentImpl env)
+        throws DatabaseException {
+
+        super(env, new HashMap<Object,DbFileSummaryMap>());
+        fileSummaryLsns = new HashMap<Long, Long>();
+        databaseLsns = new HashMap<DatabaseId, Long>();
+    }
+
+    /**
+     * Saves the LSN of the last logged FileSummaryLN.
+     */
+    public void saveLastLoggedFileSummaryLN(long fileNum, long lsn) {
+        fileSummaryLsns.put(Long.valueOf(fileNum), Long.valueOf(lsn));
+    }
+
+    /**
+     * Saves the LSN of the last logged MapLN.
+     */
+    public void saveLastLoggedMapLN(DatabaseId dbId, long lsn) {
+        databaseLsns.put(dbId, Long.valueOf(lsn));
+    }
+
+    /**
+     * Counts the addition of all new log entries including LNs.
+     */
+    public void countNewLogEntry(long lsn,
+                                 LogEntryType type,
+                                 int size,
+                                 DatabaseId dbId) {
+        countNew(lsn, dbId, type, size);
+    }
+
+    /**
+     * Counts the LSN of a node obsolete unconditionally.
+     */
+    public void countObsoleteUnconditional(long lsn,
+                                           LogEntryType type,
+                                           int size,
+                                           DatabaseId dbId,
+                                           boolean countExact) {
+        countObsolete
+            (lsn, dbId, type, size,
+             true,   // countPerFile
+             true,   // countPerDb
+             countExact);
+    }
+
+    /**
+     * Counts the oldLsn of a node obsolete if it has not already been counted
+     * at the point of newLsn in the log.
+     *
+     * @return whether the file was previously uncounted.
+     */
+    public boolean countObsoleteIfUncounted(long oldLsn,
+                                            long newLsn,
+                                            LogEntryType type,
+                                            int size,
+                                            DatabaseId dbId,
+                                            boolean countExact) {
+        Long fileNum = Long.valueOf(DbLsn.getFileNumber(oldLsn));
+        boolean fileUncounted = isFileUncounted(fileNum, newLsn);
+        boolean dbUncounted = isDbUncounted(dbId, newLsn);
+        countObsolete
+            (oldLsn, dbId, type, size,
+             fileUncounted, // countPerFile
+             dbUncounted,   // countPerDb
+             countExact);   // trackOffset
+        return fileUncounted;
+    }
+
+    /**
+     * Fetches the LN to get its size only if necessary and so configured.
+     */
+    public int fetchLNSize(int size, long lsn)
+        throws DatabaseException {
+
+        if (size == 0 && env.getCleaner().getFetchObsoleteSize()) {
+            try {
+                LN ln = (LN) env.getLogManager().get(lsn);
+                size = ln.getLastLoggedSize();
+            } catch (LogFileNotFoundException e) {
+                /* Ignore errors if the file was cleaned. */
+            }
+        }
+        return size;
+    }
+
+    /**
+     * Overrides this method for recovery and returns whether the FileSummaryLN
+     * for the given file is prior to the given LSN.
+     */
+    @Override
+    boolean isFileUncounted(Long fileNum, long lsn) {
+        long fsLsn = DbLsn.longToLsn(fileSummaryLsns.get(fileNum));
+        int cmpFsLsnToNewLsn = (fsLsn != DbLsn.NULL_LSN) ?
+            DbLsn.compareTo(fsLsn, lsn) : -1;
+        return cmpFsLsnToNewLsn < 0;
+    }
+
+    /**
+     * Returns whether the MapLN for the given database ID is prior to the
+     * given LSN.
+     */
+    private boolean isDbUncounted(DatabaseId dbId, long lsn) {
+        long dbLsn = DbLsn.longToLsn(databaseLsns.get(dbId));
+        int cmpDbLsnToLsn = (dbLsn != DbLsn.NULL_LSN) ?
+            DbLsn.compareTo(dbLsn, lsn) : -1;
+        return cmpDbLsnToLsn < 0;
+    }
+
+    /**
+     * Clears all accmulated utilization info for the given file.
+     */
+    public void resetFileInfo(long fileNum) {
+        TrackedFileSummary trackedSummary = getTrackedFile(fileNum);
+        if (trackedSummary != null) {
+            trackedSummary.reset();
+        }
+    }
+
+    /**
+     * Clears all accmulated utilization info for the given database.
+     */
+    public void resetDbInfo(DatabaseId dbId) {
+        removeDbFileSummaries(dbId);
+    }
+
+    /**
+     * Returns the DatabaseImpl from the database key, which in this case is
+     * the DatabaseId.
+     */
+    @Override
+    DatabaseImpl databaseKeyToDatabaseImpl(Object databaseKey)
+        throws DatabaseException {
+
+        DatabaseId dbId = (DatabaseId) databaseKey;
+        return env.getDbTree().getDb(dbId);
+    }
+
+    /**
+     * Must release the database, since DbTree.getDb was called by
+     * databaseKeyToDatabaseImpl.
+     */
+    @Override
+    void releaseDatabaseImpl(DatabaseImpl db) {
+        env.getDbTree().releaseDb(db);
+    }
+}
diff --git a/src/com/sleepycat/je/cleaner/TrackedFileSummary.java b/src/com/sleepycat/je/cleaner/TrackedFileSummary.java
new file mode 100644
index 0000000000000000000000000000000000000000..54f5d5f5fb36474dd7b9b59c019097228945d0cd
--- /dev/null
+++ b/src/com/sleepycat/je/cleaner/TrackedFileSummary.java
@@ -0,0 +1,189 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TrackedFileSummary.java,v 1.17.2.2 2010/01/04 15:30:28 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import com.sleepycat.je.dbi.MemoryBudget;
+
+/**
+ * Delta file summary info for a tracked file.  Tracked files are managed by
+ * the UtilizationTracker.
+ *
+ * <p>The methods in this class for reading obsolete offsets may be used by
+ * multiple threads without synchronization even while another thread is adding
+ * offsets.  This is possible because elements are never deleted from the
+ * lists.  The thread adding obsolete offsets does so under the log write
+ * latch to prevent multiple threads from adding concurrently.</p>
+ */
+public class TrackedFileSummary extends FileSummary {
+
+    private BaseUtilizationTracker tracker;
+    private long fileNum;
+    private OffsetList obsoleteOffsets;
+    private int memSize;
+    private boolean trackDetail;
+    private boolean allowFlush = true;
+
+    /**
+     * Creates an empty tracked summary.
+     */
+    TrackedFileSummary(BaseUtilizationTracker tracker,
+                       long fileNum,
+                       boolean trackDetail) {
+        this.tracker = tracker;
+        this.fileNum = fileNum;
+        this.trackDetail = trackDetail;
+    }
+
+    /**
+     * Returns whether this summary is allowed or prohibited from being flushed
+     * or evicted during cleaning.  By default, flushing is allowed.
+     */
+    public boolean getAllowFlush() {
+        return allowFlush;
+    }
+
+    /**
+     * Allows or prohibits this summary from being flushed or evicted during
+     * cleaning.  By default, flushing is allowed.
+     */
+    void setAllowFlush(boolean allowFlush) {
+        this.allowFlush = allowFlush;
+    }
+
+    /**
+     * Returns the file number being tracked.
+     */
+    public long getFileNumber() {
+        return fileNum;
+    }
+
+    /**
+     * Return the total memory size for this object.  We only bother to budget
+     * obsolete detail, not the overhead for this object, for two reasons:
+     * 1) The number of these objects is very small, and 2) unit tests disable
+     * detail tracking as a way to prevent budget adjustments here.
+     */
+    int getMemorySize() {
+        return memSize;
+    }
+
+    /**
+     * Overrides reset for a tracked file, and is called when a FileSummaryLN
+     * is written to the log.
+     *
+     * <p>Must be called under the log write latch.</p>
+     */
+    @Override
+    public void reset() {
+
+        obsoleteOffsets = null;
+
+        tracker.resetFile(this);
+
+        if (memSize > 0) {
+            updateMemoryBudget(0 - memSize);
+        }
+
+        super.reset();
+    }
+
+    /**
+     * Tracks the given offset as obsolete or non-obsolete.
+     *
+     * <p>Must be called under the log write latch.</p>
+     */
+    void trackObsolete(long offset) {
+
+        if (!trackDetail) {
+            return;
+        }
+        int adjustMem = 0;
+        if (obsoleteOffsets == null) {
+            obsoleteOffsets = new OffsetList();
+            adjustMem += MemoryBudget.TFS_LIST_INITIAL_OVERHEAD;
+        }
+        if (obsoleteOffsets.add(offset, tracker.getEnvironment().isOpen())) {
+            adjustMem += MemoryBudget.TFS_LIST_SEGMENT_OVERHEAD;
+        }
+        if (adjustMem != 0) {
+            updateMemoryBudget(adjustMem);
+        }
+    }
+
+    /**
+     * Adds the obsolete offsets as well as the totals of the given object.
+     */
+    void addTrackedSummary(TrackedFileSummary other) {
+
+        /* Add the totals. */
+        add(other);
+
+        /*
+         * Add the offsets and the memory used [#15505] by the other tracker.
+         * The memory budget has already been updated for the offsets to be
+         * added, so we only need to account for a possible difference of one
+         * segment when we merge them.
+         */
+        memSize += other.memSize;
+        if (other.obsoleteOffsets != null) {
+            if (obsoleteOffsets != null) {
+                /* Merge the other offsets into our list. */
+                if (obsoleteOffsets.merge(other.obsoleteOffsets)) {
+                    /* There is one segment less as a result of the merge. */
+                    updateMemoryBudget
+                        (- MemoryBudget.TFS_LIST_SEGMENT_OVERHEAD);
+                }
+            } else {
+                /* Adopt the other's offsets as our own. */
+                obsoleteOffsets = other.obsoleteOffsets;
+            }
+        }
+    }
+
+    /**
+     * Returns obsolete offsets as an array of longs, or null if none.
+     */
+    public long[] getObsoleteOffsets() {
+
+        if (obsoleteOffsets != null) {
+            return obsoleteOffsets.toArray();
+        } else {
+            return null;
+        }
+    }
+
+    /**
+     * Returns whether the given offset is present in the tracked offsets.
+     * This does not indicate whether the offset is obsolete in general, but
+     * only if it is known to be obsolete in this version of the tracked
+     * information.
+     */
+    boolean containsObsoleteOffset(long offset) {
+
+        if (obsoleteOffsets != null) {
+            return obsoleteOffsets.contains(offset);
+        } else {
+            return false;
+        }
+    }
+
+    private void updateMemoryBudget(int delta) {
+        memSize += delta;
+        tracker.env.getMemoryBudget().updateAdminMemoryUsage(delta);
+    }
+
+    /**
+     * Update memory budgets when this tracker is closed and will never be
+     * accessed again.
+     */
+    void close() {
+        tracker.env.getMemoryBudget().updateAdminMemoryUsage(0-memSize);
+        memSize = 0;
+    }
+}
diff --git a/src/com/sleepycat/je/cleaner/UtilizationProfile.java b/src/com/sleepycat/je/cleaner/UtilizationProfile.java
new file mode 100644
index 0000000000000000000000000000000000000000..2fbeef1bef9e9cdf484211d9abad6649f4575965
--- /dev/null
+++ b/src/com/sleepycat/je/cleaner/UtilizationProfile.java
@@ -0,0 +1,1707 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: UtilizationProfile.java,v 1.84.2.5 2010/01/22 00:29:56 mark Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.SortedSet;
+import java.util.StringTokenizer;
+import java.util.TreeMap;
+import java.util.logging.Level;
+
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.EnvironmentMutableConfig;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.TransactionConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.CursorImpl;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.DbConfigManager;
+import com.sleepycat.je.dbi.DbTree;
+import com.sleepycat.je.dbi.EnvConfigObserver;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.dbi.CursorImpl.SearchMode;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.LogManager;
+import com.sleepycat.je.log.ReplicationContext;
+import com.sleepycat.je.log.entry.LNLogEntry;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.tree.FileSummaryLN;
+import com.sleepycat.je.tree.MapLN;
+import com.sleepycat.je.tree.Tree;
+import com.sleepycat.je.tree.TreeLocation;
+import com.sleepycat.je.txn.BasicLocker;
+import com.sleepycat.je.txn.LockType;
+import com.sleepycat.je.txn.Locker;
+import com.sleepycat.je.txn.Txn;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * The UP tracks utilization summary information for all log files.
+ *
+ * <p>Unlike the UtilizationTracker, the UP is not accessed under the log write
+ * latch and is instead synchronized on itself for protecting the cache.  It is
+ * not accessed during the primary data access path, except for when flushing
+ * (writing) file summary LNs.  This occurs in the following cases:
+ * <ol>
+ * <li>The summary information is flushed at the end of a checkpoint.  This
+ * allows tracking to occur in memory in between checkpoints, and replayed
+ * during recovery.</li>
+ * <li>When committing the truncateDatabase and removeDatabase operations, the
+ * summary information is flushed because detail tracking for those operations
+ * is not replayed during recovery</li>
+ * <li>The evictor will ask the UtilizationTracker to flush the largest summary
+ * if the memory taken by the tracker exeeds its budget.</li>
+ * </ol>
+ *
+ * <p>The cache is populated by the RecoveryManager just before performing the
+ * initial checkpoint.  The UP must be open and populated in order to respond
+ * to requests to flush summaries and to evict tracked detail, even if the
+ * cleaner is disabled.</p>
+ *
+ * <p>WARNING: While synchronized on this object, eviction is not permitted.
+ * If it were, this could cause deadlocks because the order of locking would be
+ * the UP object and then the evictor.  During normal eviction the order is to
+ * first lock the evictor and then the UP, when evicting tracked detail.</p>
+ *
+ * <p>The methods in this class synchronize to protect the cached summary
+ * information.  Some methods also access the UP database.  However, because
+ * eviction must not occur while synchronized, UP database access is not
+ * performed while synchronized except in one case: when inserting a new
+ * summary record.  In that case we disallow eviction during the database
+ * operation.</p>
+ */
+public class UtilizationProfile implements EnvConfigObserver {
+
+    /*
+     * Note that age is a distance between files not a number of files, that
+     * is, deleted files are counted in the age.
+     */
+    private EnvironmentImpl env;
+    private UtilizationTracker tracker;
+    private DatabaseImpl fileSummaryDb;
+    private SortedMap<Long,FileSummary> fileSummaryMap;
+    private boolean cachePopulated;
+    private boolean rmwFixEnabled;
+    private FilesToMigrate filesToMigrate;
+
+    /**
+     * Minimum overall utilization threshold that triggers cleaning.  Is
+     * non-private for unit tests.
+     */
+    int minUtilization;
+
+    /**
+     * Minimum utilization threshold for an individual log file that triggers
+     * cleaning.  Is non-private for unit tests.
+     */
+    int minFileUtilization;
+
+    /**
+     * Minumum age to qualify for cleaning.  If the first active LSN file is 5
+     * and the mininum age is 2, file 4 won't qualify but file 3 will.  Must be
+     * greater than zero because we never clean the first active LSN file.  Is
+     * non-private for unit tests.
+     */
+    int minAge;
+
+    /**
+     * Creates an empty UP.
+     */
+    public UtilizationProfile(EnvironmentImpl env,
+                              UtilizationTracker tracker)
+        throws DatabaseException {
+
+        this.env = env;
+        this.tracker = tracker;
+        fileSummaryMap = new TreeMap<Long,FileSummary>();
+        filesToMigrate = new FilesToMigrate();
+
+        rmwFixEnabled = env.getConfigManager().getBoolean
+            (EnvironmentParams.CLEANER_RMW_FIX);
+
+        /* Initialize mutable properties and register for notifications. */
+        envConfigUpdate(env.getConfigManager(), null);
+        env.addConfigObserver(this);
+    }
+
+    /**
+     * Process notifications of mutable property changes.
+     */
+    public void envConfigUpdate(DbConfigManager cm,
+				EnvironmentMutableConfig ignore)
+        throws DatabaseException {
+
+        minAge = cm.getInt(EnvironmentParams.CLEANER_MIN_AGE);
+        minUtilization = cm.getInt(EnvironmentParams.CLEANER_MIN_UTILIZATION);
+        minFileUtilization = cm.getInt
+            (EnvironmentParams.CLEANER_MIN_FILE_UTILIZATION);
+    }
+
+    /**
+     * @see EnvironmentParams#CLEANER_RMW_FIX
+     * @see FileSummaryLN#postFetchInit
+     */
+    public boolean isRMWFixEnabled() {
+        return rmwFixEnabled;
+    }
+
+    /**
+     * Returns the number of files in the profile.
+     */
+    synchronized int getNumberOfFiles()
+        throws DatabaseException {
+
+        return fileSummaryMap.size();
+    }
+
+    /**
+     * Returns an approximation of the total log size.  Used for stats.
+     */
+    long getTotalLogSize() {
+
+        /* Start with the size from the profile. */
+        long size = 0;
+        synchronized (this) {
+            for (FileSummary summary : fileSummaryMap.values()) {
+                size += summary.totalSize;
+            }
+        }
+
+        /*
+         * Add sizes that are known to the tracker but are not yet in the
+         * profile.  The FileSummary.totalSize field is the delta for new
+         * log entries added.  Typically the last log file is only one that
+         * will have a delta, but previous files may also not have been added
+         * to the profile yet.
+         */
+        for (TrackedFileSummary summary : tracker.getTrackedFiles()) {
+            size += summary.totalSize;
+        }
+
+        return size;
+    }
+
+    /**
+     * Returns the cheapest file to clean from the given list of files.  This
+     * method is used to select the first file to be cleaned in the batch of
+     * to-be-cleaned files.
+     */
+    synchronized Long getCheapestFileToClean(SortedSet<Long> files)
+        throws DatabaseException {
+
+        if (files.size() == 1) {
+            return files.first();
+        }
+
+        assert cachePopulated;
+
+        Long bestFile = null;
+        int bestCost = Integer.MAX_VALUE;
+
+        final SortedMap<Long,FileSummary> currentFileSummaryMap =
+            getFileSummaryMap(true /*includeTrackedFiles*/);
+
+        for (Iterator<Long> iter = files.iterator(); iter.hasNext();) {
+            Long file = iter.next();
+            FileSummary summary = currentFileSummaryMap.get(file);
+
+            /*
+             * Return a file in the given set if it does not exist.  Deleted
+             * files should be selected ASAP to remove them from the backlog.
+             * [#18179] For details, see where FileProcessor.doClean handles
+             * LogFileNotFoundException.
+             */
+            if (summary == null) {
+                return file;
+            }
+
+            /* Calculate this file's cost to clean. */
+            int thisCost = summary.getNonObsoleteCount();
+
+            /* Select this file if it has the lowest cost so far. */
+            if (bestFile == null || thisCost < bestCost) {
+                bestFile = file;
+                bestCost = thisCost;
+            }
+        }
+
+        return bestFile;
+    }
+
+    /**
+     * Returns the best file that qualifies for cleaning, or null if no file
+     * qualifies.
+     *
+     * @param fileSelector is used to determine valid cleaning candidates.
+     *
+     * @param forceCleaning is true to always select a file, even if its
+     * utilization is above the minimum utilization threshold.
+     *
+     * @param lowUtilizationFiles is a returned set of files that are below the
+     * minimum utilization threshold.
+     */
+    synchronized Long getBestFileForCleaning(FileSelector fileSelector,
+                                             boolean forceCleaning,
+                                             Set<Long> lowUtilizationFiles,
+                                             boolean isBacklog)
+        throws DatabaseException {
+
+        /* Start with an empty set.*/
+        if (lowUtilizationFiles != null) {
+            lowUtilizationFiles.clear();
+        }
+
+        assert cachePopulated;
+
+        /*
+         * Get all file summaries including tracked files.  Tracked files may
+         * be ready for cleaning if there is a large cache and many files have
+         * not yet been flushed and do not yet appear in the profile map.
+         */
+        SortedMap<Long,FileSummary> currentFileSummaryMap =
+            getFileSummaryMap(true /*includeTrackedFiles*/);
+
+        /* Paranoia.  There should always be 1 file. */
+        if (currentFileSummaryMap.size() == 0) {
+            return null;
+        }
+
+        /*
+         * Use local variables for mutable properties.  Using values that are
+         * changing during a single file selection pass would not produce a
+         * well defined result.
+         */
+        final int useMinUtilization = minUtilization;
+        final int useMinFileUtilization = minFileUtilization;
+        final int useMinAge = minAge;
+
+        /*
+         * Cleaning must refrain from rearranging the portion of log processed
+         * as recovery time. Do not clean a file greater or equal to the first
+         * active file used in recovery, which is either the last log file or
+         * the file of the first active LSN in an active transaction, whichever
+         * is earlier.
+         *
+         * TxnManager.getFirstActiveLsn() (firstActiveTxnLsn below) is
+         * guaranteed to be earlier or equal to the first active LSN of the
+         * checkpoint that will be performed before deleting the selected log
+         * file. By selecting a file prior to this point we ensure that will
+         * not clean any entry that may be replayed by recovery.
+         *
+         * For example:
+         * 200 ckptA start, determines that ckpt's firstActiveLsn = 100
+         * 400 ckptA end
+         * 600 ckptB start, determines that ckpt's firstActiveLsn = 300
+         * 800 ckptB end
+         *
+         * Any cleaning that executes before ckpt A start will be constrained
+         * to files <= lsn 100, because it will have checked the TxnManager.
+         * If cleaning executes after ckptA start, it may indeed clean after
+         * ckptA's firstActiveLsn, but the cleaning run will wait to ckptB end
+         * to delete files.
+         */
+        long firstActiveFile = currentFileSummaryMap.lastKey().longValue();
+        long firstActiveTxnLsn = env.getTxnManager().getFirstActiveLsn();
+        if (firstActiveTxnLsn != DbLsn.NULL_LSN) {
+            long firstActiveTxnFile = DbLsn.getFileNumber(firstActiveTxnLsn);
+            if (firstActiveFile > firstActiveTxnFile) {
+                firstActiveFile = firstActiveTxnFile;
+            }
+        }
+
+        /*
+         * Note that minAge is at least one and may be configured to a higher
+         * value to prevent cleaning recently active files.
+         */
+        long lastFileToClean = firstActiveFile - useMinAge;
+
+        /* Calculate totals and find the best file. */
+        Iterator <Map.Entry<Long,FileSummary>> iter =
+            currentFileSummaryMap.entrySet().iterator();
+        Long bestFile = null;
+        int bestUtilization = 101;
+        long totalSize = 0;
+        long totalObsoleteSize = 0;
+
+        while (iter.hasNext()) {
+            Map.Entry<Long,FileSummary> entry = iter.next();
+            Long file = entry.getKey();
+            long fileNum = file.longValue();
+
+            /* Calculate this file's utilization. */
+            FileSummary summary = entry.getValue();
+            int obsoleteSize = summary.getObsoleteSize();
+
+            /*
+             * If the file is already being cleaned, only total the
+             * non-obsolete amount.  This is an optimistic prediction of the
+             * results of cleaning, and is used to prevent over-cleaning.
+             * Update the total obsolete size to include the utilization DB
+             * records that will be deleted when the log file is deleted.
+             */
+            if (fileSelector.isFileCleaningInProgress(file)) {
+                totalSize += summary.totalSize - obsoleteSize;
+                totalObsoleteSize += estimateUPObsoleteSize(summary);
+                continue;
+            }
+
+            /* Add this file's value to the totals. */
+            totalSize += summary.totalSize;
+            totalObsoleteSize += obsoleteSize;
+
+            /* Skip files that are too young to be cleaned. */
+            if (fileNum > lastFileToClean) {
+                continue;
+            }
+
+            /* Select this file if it has the lowest utilization so far. */
+            int thisUtilization = utilization(obsoleteSize, summary.totalSize);
+            if (bestFile == null || thisUtilization < bestUtilization) {
+                bestFile = file;
+                bestUtilization = thisUtilization;
+            }
+
+            /* Return all low utilization files. */
+            if (lowUtilizationFiles != null &&
+                thisUtilization < useMinUtilization) {
+                lowUtilizationFiles.add(file);
+            }
+        }
+
+        /*
+         * The first priority is to clean the log up to the minimum utilization
+         * level, so if we're below the minimum (or an individual file is below
+         * the minimum for any file), then we clean the lowest utilization
+         * (best) file.  Otherwise, if there are more files to migrate, we
+         * clean the next file to be migrated.  Otherwise, if cleaning is
+         * forced (for unit testing), we clean the lowest utilization file.
+         */
+        int totalUtilization = utilization(totalObsoleteSize, totalSize);
+        if (totalUtilization < useMinUtilization ||
+            bestUtilization < useMinFileUtilization) {
+            return bestFile;
+        } else if (!isBacklog && filesToMigrate.hasNext()) {
+            return filesToMigrate.next();
+        } else if (forceCleaning) {
+            return bestFile;
+        } else {
+            return null;
+        }
+    }
+
+    /**
+     * Calculate the utilization percentage.
+     */
+    public static int utilization(long obsoleteSize, long totalSize) {
+        if (totalSize != 0) {
+            return (int) (((totalSize - obsoleteSize) * 100) / totalSize);
+        } else {
+            return 0;
+        }
+    }
+
+    /**
+     * Estimate the log size that will be made obsolete when a log file is
+     * deleted and we delete its UP records.
+     *
+     * Note that we do not count the space taken by the deleted FileSummaryLN
+     * records written during log file deletion.  These add the same amount to
+     * the total log size and the obsolete log size, and therefore have a small
+     * impact on total utilization.
+     */
+    private int estimateUPObsoleteSize(FileSummary summary) {
+
+        /* Disabled for now; needs more testing. */
+        if (true) {
+            return 0;
+        }
+
+        /*
+         * FileSummaryLN overhead:
+         *  14 Header
+         *   8 Node
+         *   1 Deleted
+         *   4 Data Length (0)
+         *  32 Base Summary (8 X 4)
+         *   8 PackedOffsets size and length (2 * 4)
+         *   8 PackedOffsets first offset
+         */
+        final int OVERHEAD = 75;
+
+        /*
+         * Make an arbitrary estimate of the number of offsets per
+         * FileSummaryLN.  Then estimate the total byte size, assuming all
+         * short (2 byte) offsets.  Round up the total number of log entries.
+         */
+        int OFFSETS_PER_LN = 1000;
+        int BYTES_PER_LN = OVERHEAD + (OFFSETS_PER_LN * 2 /* Size of short */);
+        int totalNodes = summary.totalLNCount + summary.totalINCount;
+        int logEntries = (totalNodes / OFFSETS_PER_LN) + 1 /* Round up */;
+        return logEntries * BYTES_PER_LN;
+    }
+
+    /**
+     * Gets the base summary from the cached map.  Add the tracked summary, if
+     * one exists, to the base summary.  Sets all entries obsolete, if the file
+     * is in the migrateFiles set.
+     */
+    private synchronized FileSummary getFileSummary(Long file) {
+
+        /* Get base summary. */
+        FileSummary summary = fileSummaryMap.get(file);
+
+        /* Add tracked summary */
+        TrackedFileSummary trackedSummary = tracker.getTrackedFile(file);
+        if (trackedSummary != null) {
+            FileSummary totals = new FileSummary();
+            totals.add(summary);
+            totals.add(trackedSummary);
+            summary = totals;
+        }
+
+        return summary;
+    }
+
+    /**
+     * Count the given locally tracked info as obsolete and then log the file
+     * and database info..
+     */
+    public void flushLocalTracker(LocalUtilizationTracker localTracker)
+        throws DatabaseException {
+
+        /* Count tracked info under the log write latch. */
+        env.getLogManager().transferToUtilizationTracker(localTracker);
+
+        /* Write out the modified file and database info. */
+        flushFileUtilization(localTracker.getTrackedFiles());
+        flushDbUtilization(localTracker);
+    }
+
+    /**
+     * Flush a FileSummaryLN node for each TrackedFileSummary that is currently
+     * active in the given tracker.
+     */
+    public void flushFileUtilization(Collection<TrackedFileSummary>
+                                     activeFiles)
+        throws DatabaseException {
+
+        /* Utilization flushing may be disabled for unittests. */
+        if (!DbInternal.getCheckpointUP
+	    (env.getConfigManager().getEnvironmentConfig())) {
+            return;
+        }
+
+        /* Write out the modified file summaries. */
+        for (TrackedFileSummary activeFile : activeFiles) {
+            long fileNum = activeFile.getFileNumber();
+            TrackedFileSummary tfs = tracker.getTrackedFile(fileNum);
+            if (tfs != null) {
+                flushFileSummary(tfs);
+            }
+        }
+    }
+
+    /**
+     * Flush a MapLN for each database that has dirty utilization in the given
+     * tracker.
+     */
+    private void flushDbUtilization(LocalUtilizationTracker localTracker)
+        throws DatabaseException {
+
+        /* Utilization flushing may be disabled for unittests. */
+        if (!DbInternal.getCheckpointUP
+            (env.getConfigManager().getEnvironmentConfig())) {
+            return;
+        }
+
+        /* Write out the modified MapLNs. */
+        Iterator<Object> dbs = localTracker.getTrackedDbs().iterator();
+        while (dbs.hasNext()) {
+            DatabaseImpl db = (DatabaseImpl) dbs.next();
+            if (!db.isDeleted() && db.isDirtyUtilization()) {
+                env.getDbTree().modifyDbRoot(db);
+            }
+        }
+    }
+
+    /**
+     * Returns a copy of the current file summary map, optionally including
+     * tracked summary information, for use by the DbSpace utility and by unit
+     * tests.  The returned map's key is a Long file number and its value is a
+     * FileSummary.
+     */
+    public synchronized SortedMap<Long,FileSummary> getFileSummaryMap(
+                                                boolean includeTrackedFiles)
+        throws DatabaseException {
+
+        assert cachePopulated;
+
+        if (includeTrackedFiles) {
+
+            /*
+             * Copy the fileSummaryMap to a new map, adding in the tracked
+             * summary information for each entry.
+             */
+            TreeMap<Long, FileSummary> map = new TreeMap<Long, FileSummary>();
+            for (Long file : fileSummaryMap.keySet()) {
+                FileSummary summary = getFileSummary(file);
+                map.put(file, summary);
+            }
+
+            /* Add tracked files that are not in fileSummaryMap yet. */
+            for (TrackedFileSummary summary : tracker.getTrackedFiles()) {
+                Long fileNum = Long.valueOf(summary.getFileNumber());
+                if (!map.containsKey(fileNum)) {
+                    map.put(fileNum, summary);
+                }
+            }
+            return map;
+        } else {
+            return new TreeMap<Long,FileSummary>(fileSummaryMap);
+        }
+    }
+
+    /**
+     * Clears the cache of file summary info.  The cache starts out unpopulated
+     * and is populated on the first call to getBestFileForCleaning.
+     */
+    public synchronized void clearCache() {
+
+        int memorySize = fileSummaryMap.size() *
+            MemoryBudget.UTILIZATION_PROFILE_ENTRY;
+        MemoryBudget mb = env.getMemoryBudget();
+        mb.updateAdminMemoryUsage(0 - memorySize);
+
+        fileSummaryMap = new TreeMap<Long,FileSummary>();
+        cachePopulated = false;
+    }
+
+    /**
+     * Removes a file from the utilization database and the profile, after it
+     * has been deleted by the cleaner.
+     */
+    void removeFile(Long fileNum, Set<DatabaseId> databases)
+        throws DatabaseException {
+
+        /* Synchronize to update the cache. */
+        synchronized (this) {
+            assert cachePopulated;
+
+            /* Remove from the cache. */
+            FileSummary oldSummary = fileSummaryMap.remove(fileNum);
+            if (oldSummary != null) {
+                MemoryBudget mb = env.getMemoryBudget();
+                mb.updateAdminMemoryUsage
+                    (0 - MemoryBudget.UTILIZATION_PROFILE_ENTRY);
+            }
+        }
+
+        /* Do not synchronize during LN deletion, to permit eviction. */
+        deleteFileSummary(fileNum, databases);
+    }
+
+    /**
+     * Deletes all FileSummaryLNs for the file and updates all MapLNs to remove
+     * the DbFileSummary for the file.  This method performs eviction and is
+     * not synchronized.
+     */
+    private void deleteFileSummary(final Long fileNum,
+                                   Set<DatabaseId> databases)
+        throws DatabaseException {
+
+        /*
+         * Update the MapLNs before deleting FileSummaryLNs in case there is an
+         * error during this process.  If a FileSummaryLN exists, we will redo
+         * this process during the next recovery (populateCache).
+         */
+        final LogManager logManager = env.getLogManager();
+        final DbTree dbTree = env.getDbTree();
+        /* Only call logMapTreeRoot once for ID and NAME DBs. */
+        DatabaseImpl idDatabase = dbTree.getDb(DbTree.ID_DB_ID);
+        DatabaseImpl nameDatabase = dbTree.getDb(DbTree.NAME_DB_ID);
+        boolean logRoot = false;
+        if (logManager.removeDbFileSummary(idDatabase, fileNum)) {
+            logRoot = true;
+        }
+        if (logManager.removeDbFileSummary(nameDatabase, fileNum)) {
+            logRoot = true;
+        }
+        if (logRoot) {
+            env.logMapTreeRoot();
+        }
+        /* Use DB ID set if available to avoid full scan of ID DB. */
+        if (databases != null) {
+            for (DatabaseId dbId : databases) {
+                if (!dbId.equals(DbTree.ID_DB_ID) &&
+                    !dbId.equals(DbTree.NAME_DB_ID)) {
+                    DatabaseImpl db = dbTree.getDb(dbId);
+                    try {
+                        if (db != null &&
+                            logManager.removeDbFileSummary(db, fileNum)) {
+                            dbTree.modifyDbRoot(db);
+                        }
+                    } finally {
+                        dbTree.releaseDb(db);
+                    }
+                }
+            }
+        } else {
+
+            /*
+             * Use LockType.NONE for traversing the ID DB so that a lock is not
+             * held when calling modifyDbRoot, which must release locks to
+             * handle deadlocks.
+             */
+            CursorImpl.traverseDbWithCursor(idDatabase,
+                                            LockType.NONE,
+                                            true /*allowEviction*/,
+                                            new CursorImpl.WithCursor() {
+                public boolean withCursor(CursorImpl cursor,
+                                          DatabaseEntry key,
+                                          DatabaseEntry data)
+                    throws DatabaseException {
+
+                    MapLN mapLN = (MapLN) cursor.getCurrentLN(LockType.NONE);
+                    if (mapLN != null) {
+                        DatabaseImpl db = mapLN.getDatabase();
+                        if (logManager.removeDbFileSummary(db, fileNum)) {
+
+                            /*
+                             * Because we're using dirty-read, silently do
+                             * nothing if the DB does not exist
+                             * (mustExist=false).
+                             */
+                            dbTree.modifyDbRoot
+                                (db, DbLsn.NULL_LSN /*ifBeforeLsn*/,
+                                 false /*mustExist*/);
+                        }
+                    }
+                    return true;
+                }
+            });
+        }
+
+        /* Now delete all FileSummaryLNs. */
+        Locker locker = null;
+        CursorImpl cursor = null;
+        boolean clearedTrackedFile = false;
+        try {
+	    locker = BasicLocker.createBasicLocker(env, false /*noWait*/,
+						   true /*noAPIReadLock*/);
+            cursor = new CursorImpl(fileSummaryDb, locker);
+            /* Perform eviction in unsynchronized methods. */
+            cursor.setAllowEviction(true);
+
+            DatabaseEntry keyEntry = new DatabaseEntry();
+            DatabaseEntry dataEntry = new DatabaseEntry();
+            long fileNumVal = fileNum.longValue();
+
+            /* Search by file number. */
+            OperationStatus status = OperationStatus.SUCCESS;
+            if (getFirstFSLN
+                (cursor, fileNumVal, keyEntry, dataEntry, LockType.WRITE)) {
+                status = OperationStatus.SUCCESS;
+            } else {
+                status = OperationStatus.NOTFOUND;
+            }
+
+            /* Delete all LNs for this file number. */
+            while (status == OperationStatus.SUCCESS) {
+
+                /* Perform eviction once per operation. */
+                env.getEvictor().doCriticalEviction(true); // backgroundIO
+
+                FileSummaryLN ln = (FileSummaryLN)
+                    cursor.getCurrentLN(LockType.NONE);
+
+                if (ln != null) {
+                    /* Stop if the file number changes. */
+                    if (fileNumVal != ln.getFileNumber(keyEntry.getData())) {
+                        break;
+                    }
+
+                    TrackedFileSummary tfs =
+                        tracker.getTrackedFile(fileNumVal);
+                    /* Associate the tracked summary so it will be cleared. */
+                    if (tfs != null) {
+                        ln.setTrackedSummary(tfs);
+                        clearedTrackedFile = true;
+                    }
+
+                    /*
+                     * Do not evict after deleting since the compressor would
+                     * have to fetch it again.
+                     */
+		    cursor.latchBIN();
+                    cursor.delete(ReplicationContext.NO_REPLICATE);
+                }
+
+                status = cursor.getNext
+                    (keyEntry, dataEntry, LockType.WRITE,
+                     true,    // forward
+                     false);  // alreadyLatched
+            }
+        } finally {
+            if (cursor != null) {
+                cursor.releaseBINs();
+                cursor.close();
+            }
+            if (locker != null) {
+                locker.operationEnd();
+            }
+        }
+
+        /*
+         * If LN.setTrackedSummary was not called above, the file will not be
+         * removed from the UtilizationTracker.  This can happen if a file is
+         * resurrected in the tracker after being cleaned, deleted and removed
+         * from the profile.  We'll end up here because FileProcessor.doClean
+         * calls removeFile when it gets a LogFileNotFoundException.  For this
+         * case we explicitly remove the file from the tracker below. [#16928]
+         */
+        if (!clearedTrackedFile) {
+            TrackedFileSummary tfs = tracker.getTrackedFile(fileNum);
+            if (tfs != null) {
+                env.getLogManager().removeTrackedFile(tfs);
+            }
+        }
+    }
+
+    /**
+     * Updates and stores the FileSummary for a given tracked file, if flushing
+     * of the summary is allowed.
+     */
+    public void flushFileSummary(TrackedFileSummary tfs)
+        throws DatabaseException {
+
+        if (tfs.getAllowFlush()) {
+            putFileSummary(tfs);
+        }
+    }
+
+    /**
+     * Updates and stores the FileSummary for a given tracked file.  This
+     * method is synchronized and may not perform eviction.
+     */
+    private synchronized PackedOffsets putFileSummary(TrackedFileSummary tfs)
+        throws DatabaseException {
+
+        if (env.isReadOnly()) {
+            throw new DatabaseException
+                ("Cannot write file summary in a read-only environment");
+        }
+
+        if (tfs.isEmpty()) {
+            return null; // no delta
+        }
+
+        if (!cachePopulated) {
+            /* Db does not exist and this is a read-only environment. */
+            return null;
+        }
+
+        long fileNum = tfs.getFileNumber();
+        Long fileNumLong = Long.valueOf(fileNum);
+
+        /* Get existing file summary or create an empty one. */
+        FileSummary summary = fileSummaryMap.get(fileNumLong);
+        if (summary == null) {
+
+            /*
+             * An obsolete node may have been counted after its file was
+             * deleted, for example, when compressing a BIN.  Do not insert a
+             * new profile record if no corresponding log file exists.  But if
+             * the file number is greater than the last known file, this is a
+             * new file that has been buffered but not yet flushed to disk; in
+             * that case we should insert a new profile record.
+             */
+            if (!fileSummaryMap.isEmpty() &&
+                fileNum < fileSummaryMap.lastKey() &&
+                !env.getFileManager().isFileValid(fileNum)) {
+
+                /*
+                 * File was deleted by the cleaner.  Remove it from the
+                 * UtilizationTracker and return.  Note that a file is normally
+                 * removed from the tracker by FileSummaryLN.writeToLog method
+                 * when it is called via insertFileSummary below. [#15512]
+                 */
+                env.getLogManager().removeTrackedFile(tfs);
+                return null;
+            }
+
+            summary = new FileSummary();
+        }
+
+        /*
+         * The key discriminator is a sequence that must be increasing over the
+         * life of the file.  We use the sum of all entries counted.  We must
+         * add the tracked and current summaries here to calculate the key.
+         */
+        FileSummary tmp = new FileSummary();
+        tmp.add(summary);
+        tmp.add(tfs);
+        int sequence = tmp.getEntriesCounted();
+
+        /* Insert an LN with the existing and tracked summary info. */
+        FileSummaryLN ln = new FileSummaryLN(env, summary);
+        ln.setTrackedSummary(tfs);
+        insertFileSummary(ln, fileNum, sequence);
+
+        /* Cache the updated summary object.  */
+        summary = ln.getBaseSummary();
+        if (fileSummaryMap.put(fileNumLong, summary) == null) {
+            MemoryBudget mb = env.getMemoryBudget();
+            mb.updateAdminMemoryUsage
+                (MemoryBudget.UTILIZATION_PROFILE_ENTRY);
+        }
+
+        return ln.getObsoleteOffsets();
+    }
+
+    /**
+     * Returns the stored/packed obsolete offsets and the tracked obsolete
+     * offsets for the given file.  The tracked summary object returned can be
+     * used to test for obsolete offsets that are being added during cleaning
+     * by other threads participating in lazy migration.  The caller must call
+     * TrackedFileSummary.setAllowFlush(true) when cleaning is complete.
+     * This method performs eviction and is not synchronized.
+     * @param logUpdate if true, log any updates to the utilization profile. If
+     * false, only retrieve the new information.
+     */
+    TrackedFileSummary getObsoleteDetail(Long fileNum,
+                                         PackedOffsets packedOffsets,
+                                         boolean logUpdate)
+        throws DatabaseException {
+
+        /* Return if no detail is being tracked. */
+        if (!env.getCleaner().trackDetail) {
+            return null;
+        }
+
+        assert cachePopulated;
+
+        long fileNumVal = fileNum.longValue();
+        List<long[]> list = new ArrayList<long[]>();
+
+        /*
+         * Get an unflushable summary that will remain valid for the duration
+         * of file cleaning.
+         */
+        TrackedFileSummary tfs =
+            env.getLogManager().getUnflushableTrackedSummary(fileNumVal);
+
+        /* Read the summary db. */
+        Locker locker = null;
+        CursorImpl cursor = null;
+        try {
+	    locker = BasicLocker.createBasicLocker(env, false /*noWait*/,
+						   true /*noAPIReadLock*/);
+            cursor = new CursorImpl(fileSummaryDb, locker);
+            /* Perform eviction in unsynchronized methods. */
+            cursor.setAllowEviction(true);
+
+            DatabaseEntry keyEntry = new DatabaseEntry();
+            DatabaseEntry dataEntry = new DatabaseEntry();
+
+            /* Search by file number. */
+            OperationStatus status = OperationStatus.SUCCESS;
+            if (!getFirstFSLN
+                (cursor, fileNumVal, keyEntry, dataEntry, LockType.NONE)) {
+                status = OperationStatus.NOTFOUND;
+            }
+
+            /* Read all LNs for this file number. */
+            while (status == OperationStatus.SUCCESS) {
+
+                /* Perform eviction once per operation. */
+                env.getEvictor().doCriticalEviction(true); // backgroundIO
+
+                FileSummaryLN ln = (FileSummaryLN)
+                    cursor.getCurrentLN(LockType.NONE);
+                if (ln != null) {
+                    /* Stop if the file number changes. */
+                    if (fileNumVal != ln.getFileNumber(keyEntry.getData())) {
+                        break;
+                    }
+
+                    PackedOffsets offsets = ln.getObsoleteOffsets();
+                    if (offsets != null) {
+                        list.add(offsets.toArray());
+                    }
+
+                    /* Always evict after using a file summary LN. */
+                    cursor.evict();
+                }
+
+                status = cursor.getNext
+                    (keyEntry, dataEntry, LockType.NONE,
+                     true,    // forward
+                     false);  // alreadyLatched
+            }
+        } finally {
+            if (cursor != null) {
+                cursor.releaseBINs();
+                cursor.close();
+            }
+            if (locker != null) {
+                locker.operationEnd();
+            }
+        }
+
+        /*
+         * Write out tracked detail, if any, and add its offsets to the list.
+         */
+        if (!tfs.isEmpty()) {
+            PackedOffsets offsets = null;
+            if (logUpdate) {
+                offsets = putFileSummary(tfs);
+                if (offsets != null) {
+                    list.add(offsets.toArray());
+                }
+            } else {
+                long[] offsetList = tfs.getObsoleteOffsets();
+                if (offsetList != null) {
+                    list.add(offsetList);
+                }
+            }
+        }
+
+        /* Merge all offsets into a single array and pack the result. */
+        int size = 0;
+        for (int i = 0; i < list.size(); i += 1) {
+            long[] a = list.get(i);
+            size += a.length;
+        }
+        long[] offsets = new long[size];
+        int index = 0;
+        for (int i = 0; i < list.size(); i += 1) {
+            long[] a = list.get(i);
+            System.arraycopy(a, 0, offsets, index, a.length);
+            index += a.length;
+        }
+        assert index == offsets.length;
+
+        packedOffsets.pack(offsets);
+
+        return tfs;
+    }
+
+    /**
+     * Populate the profile for file selection.  This method performs eviction
+     * and is not synchronized.  It must be called before recovery is complete
+     * so that synchronization is unnecessary.  It must be called before the
+     * recovery checkpoint so that the checkpoint can flush file summary
+     * information.
+     */
+    public boolean populateCache()
+        throws DatabaseException {
+
+        assert !cachePopulated;
+
+        /* Open the file summary db on first use. */
+        if (!openFileSummaryDatabase()) {
+            /* Db does not exist and this is a read-only environment. */
+            return false;
+        }
+
+        int oldMemorySize = fileSummaryMap.size() *
+            MemoryBudget.UTILIZATION_PROFILE_ENTRY;
+
+        /*
+         * It is possible to have an undeleted FileSummaryLN in the database
+         * for a deleted log file if we crash after deleting a file but before
+         * deleting the FileSummaryLN.  Iterate through all FileSummaryLNs and
+         * add them to the cache if their corresponding log file exists.  But
+         * delete those records that have no corresponding log file.
+         */
+        Long[] existingFiles = env.getFileManager().getAllFileNumbers();
+        Locker locker = null;
+        CursorImpl cursor = null;
+        try {
+	    locker = BasicLocker.createBasicLocker(env, false /*noWait*/,
+						   true /*noAPIReadLock*/);
+            cursor = new CursorImpl(fileSummaryDb, locker);
+            /* Perform eviction in unsynchronized methods. */
+            cursor.setAllowEviction(true);
+
+            DatabaseEntry keyEntry = new DatabaseEntry();
+            DatabaseEntry dataEntry = new DatabaseEntry();
+
+            if (cursor.positionFirstOrLast(true, null)) {
+
+                /* Retrieve the first record. */
+                OperationStatus status =
+                    cursor.getCurrentAlreadyLatched(keyEntry, dataEntry,
+                                                    LockType.NONE, true);
+                if (status != OperationStatus.SUCCESS) {
+                    /* The record we're pointing at may be deleted. */
+                    status = cursor.getNext(keyEntry, dataEntry, LockType.NONE,
+                                            true,   // go forward
+                                            false); // do need to latch
+                }
+
+                while (status == OperationStatus.SUCCESS) {
+
+                    /*
+                     * Perform eviction once per operation.  Pass false for
+                     * backgroundIO because this is done during recovery and
+                     * there is no reason to sleep.
+                     */
+                    env.getEvictor().doCriticalEviction(false); // backgroundIO
+
+                    FileSummaryLN ln = (FileSummaryLN)
+                        cursor.getCurrentLN(LockType.NONE);
+
+                    if (ln == null) {
+                        /* Advance past a cleaned record. */
+                        status = cursor.getNext
+                            (keyEntry, dataEntry, LockType.NONE,
+                             true,   // go forward
+                             false); // do need to latch
+                        continue;
+                    }
+
+                    byte[] keyBytes = keyEntry.getData();
+                    boolean isOldVersion = ln.hasStringKey(keyBytes);
+                    long fileNum = ln.getFileNumber(keyBytes);
+                    Long fileNumLong = Long.valueOf(fileNum);
+
+                    if (Arrays.binarySearch(existingFiles, fileNumLong) >= 0) {
+
+                        /* File exists, cache the FileSummaryLN. */
+                        FileSummary summary = ln.getBaseSummary();
+                        fileSummaryMap.put(fileNumLong, summary);
+
+                        /*
+                         * Update old version records to the new version.  A
+                         * zero sequence number is used to distinguish the
+                         * converted records and to ensure that later records
+                         * will have a greater sequence number.
+                         */
+                        if (isOldVersion && !env.isReadOnly()) {
+                            insertFileSummary(ln, fileNum, 0);
+                            cursor.latchBIN();
+                            cursor.delete(ReplicationContext.NO_REPLICATE);
+                        } else {
+                            /* Always evict after using a file summary LN. */
+                            cursor.evict();
+                        }
+                    } else {
+
+                        /*
+                         * File does not exist, remove the summary from the map
+                         * and delete all FileSummaryLN records.
+                         */
+                        fileSummaryMap.remove(fileNumLong);
+
+                        if (!env.isReadOnly()) {
+                            if (isOldVersion) {
+                                cursor.latchBIN();
+                                cursor.delete(ReplicationContext.NO_REPLICATE);
+                            } else {
+                                deleteFileSummary(fileNumLong,
+                                                  null /*databases*/);
+                            }
+                        }
+
+                        /*
+                         * Do not evict after deleting since the compressor
+                         * would have to fetch it again.
+                         */
+                    }
+
+                    /* Go on to the next entry. */
+                    if (isOldVersion) {
+
+                        /* Advance past the single old version record. */
+                        status = cursor.getNext
+                            (keyEntry, dataEntry, LockType.NONE,
+                             true,   // go forward
+                             false); // do need to latch
+                    } else {
+
+                        /*
+                         * Skip over other records for this file by adding one
+                         * to the file number and doing a range search.
+                         */
+                        if (!getFirstFSLN
+                            (cursor,
+                             fileNum + 1,
+                             keyEntry, dataEntry,
+                             LockType.NONE)) {
+                            status = OperationStatus.NOTFOUND;
+                        }
+                    }
+                }
+            }
+        } finally {
+            if (cursor != null) {
+                cursor.releaseBINs();
+                cursor.close();
+            }
+            if (locker != null) {
+                locker.operationEnd();
+            }
+
+            int newMemorySize = fileSummaryMap.size() *
+                MemoryBudget.UTILIZATION_PROFILE_ENTRY;
+            MemoryBudget mb = env.getMemoryBudget();
+            mb.updateAdminMemoryUsage(newMemorySize - oldMemorySize);
+        }
+
+        cachePopulated = true;
+        return true;
+    }
+
+    /**
+     * Positions at the most recent LN for the given file number.
+     */
+    private boolean getFirstFSLN(CursorImpl cursor,
+                                 long fileNum,
+                                 DatabaseEntry keyEntry,
+                                 DatabaseEntry dataEntry,
+                                 LockType lockType)
+        throws DatabaseException {
+
+        byte[] keyBytes = FileSummaryLN.makePartialKey(fileNum);
+        keyEntry.setData(keyBytes);
+
+        int result = cursor.searchAndPosition(keyEntry,
+                                              dataEntry,
+                                              SearchMode.SET_RANGE,
+                                              lockType);
+        if ((result & CursorImpl.FOUND) == 0) {
+            return false;
+        }
+
+        boolean exactKeyMatch = ((result & CursorImpl.EXACT_KEY) != 0);
+
+        if (exactKeyMatch &&
+            cursor.getCurrentAlreadyLatched
+                 (keyEntry, dataEntry, lockType, true) !=
+                    OperationStatus.KEYEMPTY) {
+            return true;
+        }
+
+        /* Always evict after using a file summary LN. */
+        cursor.evict(!exactKeyMatch); // alreadyLatched
+
+        OperationStatus status = cursor.getNext
+            (keyEntry, dataEntry, lockType,
+             true,             // forward
+             !exactKeyMatch);  // alreadyLatched
+
+        return status == OperationStatus.SUCCESS;
+    }
+
+    /**
+     * If the file summary db is already open, return, otherwise attempt to
+     * open it.  If the environment is read-only and the database doesn't
+     * exist, return false.  If the environment is read-write the database will
+     * be created if it doesn't exist.
+     */
+    private boolean openFileSummaryDatabase()
+        throws DatabaseException {
+
+        if (fileSummaryDb != null) {
+            return true;
+        }
+        DbTree dbTree = env.getDbTree();
+        Locker autoTxn = null;
+        boolean operationOk = false;
+        try {
+            autoTxn = Txn.createAutoTxn(env, new TransactionConfig(),
+                                        true, /*noAPIReadLock*/
+                                        ReplicationContext.NO_REPLICATE);
+
+            /*
+             * releaseDb is not called after this getDb or createDb because we
+             * want to prohibit eviction of this database until the environment
+             * is closed.
+             */
+            DatabaseImpl db = dbTree.getDb
+                (autoTxn, DbTree.UTILIZATION_DB_NAME, null);
+            if (db == null) {
+                if (env.isReadOnly()) {
+                    return false;
+                }
+                db = dbTree.createInternalDb
+                    (autoTxn, DbTree.UTILIZATION_DB_NAME,
+                     new DatabaseConfig());
+            }
+            fileSummaryDb = db;
+            operationOk = true;
+            return true;
+        } finally {
+            if (autoTxn != null) {
+                autoTxn.operationEnd(operationOk);
+            }
+        }
+    }
+
+    /**
+     * For unit testing.
+     */
+    public DatabaseImpl getFileSummaryDb() {
+        return fileSummaryDb;
+    }
+
+    /**
+     * Insert the given LN with the given key values.  This method is
+     * synchronized and may not perform eviction.
+     * 
+     * Is public only for unit testing.
+     */
+    public synchronized boolean insertFileSummary(FileSummaryLN ln,
+                                                  long fileNum,
+                                                  int sequence)
+        throws DatabaseException {
+
+        byte[] keyBytes = FileSummaryLN.makeFullKey(fileNum, sequence);
+
+        Locker locker = null;
+        CursorImpl cursor = null;
+        try {
+	    locker = BasicLocker.createBasicLocker(env, false /*noWait*/,
+						   true /*noAPIReadLock*/);
+            cursor = new CursorImpl(fileSummaryDb, locker);
+
+            /* Insert the LN. */
+            OperationStatus status = cursor.putLN
+                (keyBytes,
+                 ln,
+                 false, // allowDuplicates
+                 ReplicationContext.NO_REPLICATE);
+
+            if (status == OperationStatus.KEYEXIST) {
+                env.getLogger().log
+                    (Level.SEVERE,
+                     "Cleaner duplicate key sequence file=0x" +
+                     Long.toHexString(fileNum) + " sequence=0x" +
+                     Long.toHexString(sequence));
+                return false;
+            }
+
+            /* Account for FileSummaryLN's extra marshaled memory. [#17462] */
+            BIN bin = cursor.latchBIN();
+            ln.addExtraMarshaledMemorySize(bin);
+            cursor.releaseBIN();
+
+            /* Always evict after using a file summary LN. */
+            cursor.evict();
+            return true;
+        } finally {
+            if (cursor != null) {
+                cursor.close();
+            }
+            if (locker != null) {
+                locker.operationEnd();
+            }
+        }
+    }
+
+    /**
+     * Checks that all FSLN offsets are indeed obsolete.  Assumes that the
+     * system is quiesent (does not lock LNs).  This method is not synchronized
+     * (because it doesn't access fileSummaryMap) and eviction is allowed.
+     *
+     * @return true if no verification failures.
+     */
+    public boolean verifyFileSummaryDatabase()
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        openFileSummaryDatabase();
+        Locker locker = null;
+        CursorImpl cursor = null;
+        boolean ok = true;
+
+        try {
+	    locker = BasicLocker.createBasicLocker(env, false /*noWait*/,
+						   true /*noAPIReadLock*/);
+            cursor = new CursorImpl(fileSummaryDb, locker);
+            cursor.setAllowEviction(true);
+
+            if (cursor.positionFirstOrLast(true, null)) {
+
+                OperationStatus status = cursor.getCurrentAlreadyLatched
+                    (key, data, LockType.NONE, true);
+
+                /* Iterate over all file summary lns. */
+                while (status == OperationStatus.SUCCESS) {
+
+                    /* Perform eviction once per operation. */
+                    env.getEvictor().doCriticalEviction(true); // backgroundIO
+
+                    FileSummaryLN ln = (FileSummaryLN)
+                        cursor.getCurrentLN(LockType.NONE);
+
+                    if (ln != null) {
+                        long fileNumVal = ln.getFileNumber(key.getData());
+                        PackedOffsets offsets = ln.getObsoleteOffsets();
+
+                        /*
+                         * Check every offset in the fsln to make sure it's
+                         * truely obsolete.
+                         */
+                        if (offsets != null) {
+                            long[] vals = offsets.toArray();
+                            for (int i = 0; i < vals.length; i++) {
+                                long lsn = DbLsn.makeLsn(fileNumVal, vals[i]);
+                                if (!verifyLsnIsObsolete(lsn)) {
+                                    ok = false;
+                                }
+                            }
+                        }
+
+                        cursor.evict();
+                        status = cursor.getNext(key, data, LockType.NONE,
+                                                true,   // forward
+                                                false); // already latched
+                    }
+                }
+            }
+        } finally {
+            if (cursor != null) {
+                cursor.close();
+            }
+            if (locker != null) {
+                locker.operationEnd();
+            }
+        }
+
+        return ok;
+    }
+
+    /*
+     * Return true if the LN at this lsn is obsolete.
+     */
+    private boolean verifyLsnIsObsolete(long lsn)
+        throws DatabaseException {
+
+        /* Read the whole entry out of the log. */
+        Object o = env.getLogManager().getLogEntry(lsn);
+        if (!(o instanceof LNLogEntry)) {
+            return true;
+        }
+        LNLogEntry entry = (LNLogEntry)o;
+
+        /* All deleted LNs are obsolete. */
+        if (entry.getLN().isDeleted()) {
+            return true;
+        }
+
+        /* Find the owning database. */
+        DatabaseId dbId = entry.getDbId();
+        DatabaseImpl db = env.getDbTree().getDb(dbId);
+
+        /*
+         * Search down to the bottom most level for the parent of this LN.
+         */
+        BIN bin = null;
+        try {
+
+            /*
+             * The whole database is gone, so this LN is obsolete. No need
+             * to worry about delete cleanup; this is just verification and
+             * no cleaning is done.
+             */
+            if (db == null || db.isDeleted()) {
+                return true;
+            }
+
+            Tree tree = db.getTree();
+            TreeLocation location = new TreeLocation();
+            boolean parentFound = tree.getParentBINForChildLN
+                (location,
+                 entry.getKey(),
+                 entry.getDupKey(),
+                 entry.getLN(),
+                 false,  // splitsAllowed
+                 true,   // findDeletedEntries
+                 false,  // searchDupTree ???
+                 CacheMode.UNCHANGED);
+            bin = location.bin;
+            int index = location.index;
+
+            /* Is bin latched ? */
+            if (!parentFound) {
+                return true;
+            }
+
+            /*
+             * Now we're at the parent for this LN, whether BIN, DBIN or DIN.
+             * If knownDeleted, LN is deleted and can be purged.
+             */
+            if (bin.isEntryKnownDeleted(index)) {
+                return true;
+            }
+
+            if (bin.getLsn(index) != lsn) {
+                return true;
+            }
+
+            /* Oh no -- this lsn is in the tree. */
+            /* should print, or trace? */
+            System.err.println("lsn " + DbLsn.getNoFormatString(lsn)+
+                               " was found in tree.");
+            return false;
+        } finally {
+            env.getDbTree().releaseDb(db);
+            if (bin != null) {
+                bin.releaseLatch();
+            }
+        }
+    }
+
+    /**
+     * Update memory budgets when this profile is closed and will never be
+     * accessed again.
+     */
+    void close() {
+        clearCache();
+        if (fileSummaryDb != null) {
+            fileSummaryDb.releaseTreeAdminMemory();
+        }
+    }
+
+    /**
+     * Iterator over files that should be migrated by cleaning them, even if
+     * they don't need to be cleaned for other reasons.
+     *
+     * Files are migrated either because they are named in the
+     * CLEANER_FORCE_CLEAN_FILES parameter or their log version is prior to the
+     * CLEANER_UPGRADE_TO_LOG_VERSION parameter.
+     *
+     * An iterator is used rather than finding the entire set at startup to
+     * avoid opening a large number of files to examine their log version.  For
+     * example, if all files are being migrated in a very large data set, this
+     * would involve opening a very large number of files in order to read
+     * their header.  This could significantly delay application startup.
+     *
+     * Because we don't have the entire set at startup, we can't select the
+     * lowest utilization file from the set to clean next.  Inteaad we iterate
+     * in file number order to increase the odds of cleaning lower utilization
+     * files first.
+     */
+    private class FilesToMigrate {
+
+        /**
+         * An array of pairs of file numbers, where each pair is a range of
+         * files to be force cleaned.  Index i is the from value and i+1 is the
+         * to value, both inclusive.
+         */
+        private long[] forceCleanFiles;
+
+        /** Log version to upgrade to, or zero if none. */
+        private int upgradeToVersion;
+
+        /** Whether to continue checking the log version. */
+        private boolean checkLogVersion;
+
+        /** Whether hasNext() has prepared a valid nextFile. */
+        private boolean nextAvailable;
+
+        /** File to return; set by hasNext() and returned by next(). */
+        private long nextFile;
+
+        FilesToMigrate()
+            throws DatabaseException {
+
+            String forceCleanProp = env.getConfigManager().get
+                (EnvironmentParams.CLEANER_FORCE_CLEAN_FILES);
+            parseForceCleanFiles(forceCleanProp);
+
+            upgradeToVersion = env.getConfigManager().getInt
+                (EnvironmentParams.CLEANER_UPGRADE_TO_LOG_VERSION);
+            if (upgradeToVersion == -1) {
+                upgradeToVersion = LogEntryType.LOG_VERSION;
+            }
+
+            checkLogVersion = (upgradeToVersion != 0);
+            nextAvailable = false;
+            nextFile = -1;
+        }
+
+        /**
+         * Returns whether there are more files to be migrated.  Must be called
+         * while synchronized on the UtilizationProfile.
+         */
+        boolean hasNext()
+            throws DatabaseException {
+
+            if (nextAvailable) {
+                /* hasNext() has returned true since the last next(). */
+                return true;
+            }
+            long foundFile = -1;
+            for (long file : fileSummaryMap.tailMap(nextFile + 1).keySet()) {
+                if (isForceCleanFile(file)) {
+                    /* Found a file to force clean. */
+                    foundFile = file;
+                    break;
+                } else if (checkLogVersion) {
+                    try {
+                        int logVersion =
+                            env.getFileManager().getFileLogVersion(file);
+                        if (logVersion < upgradeToVersion) {
+                            /* Found a file to migrate. */
+                            foundFile = file;
+                            break;
+                        } else {
+
+                            /*
+                             * All following files have a log version greater
+                             * or equal to this one; stop checking.
+                             */
+                            checkLogVersion = false;
+                        }
+                    } catch (DatabaseException e) {
+                        /* Throw exception but allow iterator to continue. */
+                        nextFile = file;
+                        throw e;
+                    }
+                }
+            }
+            if (foundFile != -1) {
+                nextFile = foundFile;
+                nextAvailable = true;
+                return true;
+            } else {
+                return false;
+            }
+        }
+
+        /**
+         * Returns the next file file to be migrated.  Must be called while
+         * synchronized on the UtilizationProfile.
+         */
+        long next()
+            throws NoSuchElementException, DatabaseException {
+
+            if (hasNext()) {
+                nextAvailable = false;
+                return nextFile;
+            } else {
+                throw new NoSuchElementException();
+            }
+        }
+
+        /**
+         * Returns whether the given file is in the forceCleanFiles set.
+         */
+        private boolean isForceCleanFile(long file) {
+
+            if (forceCleanFiles != null) {
+                for (int i = 0; i < forceCleanFiles.length; i += 2) {
+                    long from = forceCleanFiles[i];
+                    long to = forceCleanFiles[i + 1];
+                    if (file >= from && file <= to) {
+                        return true;
+                    }
+                }
+            }
+            return false;
+        }
+
+        /**
+         * Parses the je.cleaner.forceCleanFiles property value and initializes
+         * the forceCleanFiles field.
+         */
+        private void parseForceCleanFiles(String propValue)
+            throws IllegalArgumentException {
+
+            if (propValue == null || propValue.length() == 0) {
+                forceCleanFiles = null;
+            } else {
+                String errPrefix = "Error in " +
+                    EnvironmentParams.CLEANER_FORCE_CLEAN_FILES.getName() +
+                    "=" + propValue + ": ";
+
+                StringTokenizer tokens = new StringTokenizer
+                    (propValue, ",-", true /*returnDelims*/);
+
+                /* Resulting list of Long file numbers. */
+                List<Long> list = new ArrayList<Long>();
+
+                while (tokens.hasMoreTokens()) {
+
+                    /* Get "from" file number. */
+                    String fromStr = tokens.nextToken();
+                    long fromNum;
+                    try {
+                        fromNum = Long.parseLong(fromStr, 16);
+                    } catch (NumberFormatException e) {
+                        throw new IllegalArgumentException
+                            (errPrefix + "Invalid hex file number: " +
+                             fromStr);
+                    }
+
+                    long toNum = -1;
+                    if (tokens.hasMoreTokens()) {
+
+                        /* Get delimiter. */
+                        String delim = tokens.nextToken();
+                        if (",".equals(delim)) {
+                            toNum = fromNum;
+                        } else if ("-".equals(delim)) {
+
+                            /* Get "to" file number." */
+                            if (tokens.hasMoreTokens()) {
+                                String toStr = tokens.nextToken();
+                                try {
+                                    toNum = Long.parseLong(toStr, 16);
+                                } catch (NumberFormatException e) {
+                                    throw new IllegalArgumentException
+                                        (errPrefix +
+                                         "Invalid hex file number: " +
+                                         toStr);
+                                }
+                            } else {
+                                throw new IllegalArgumentException
+                                    (errPrefix + "Expected file number: " +
+                                     delim);
+                            }
+                        } else {
+                            throw new IllegalArgumentException
+                                (errPrefix + "Expected '-' or ',': " + delim);
+                        }
+                    } else {
+                        toNum = fromNum;
+                    }
+
+                    assert toNum != -1;
+                    list.add(Long.valueOf(fromNum));
+                    list.add(Long.valueOf(toNum));
+                }
+
+                forceCleanFiles = new long[list.size()];
+                for (int i = 0; i < forceCleanFiles.length; i += 1) {
+                    forceCleanFiles[i] = list.get(i).longValue();
+                }
+            }
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/cleaner/UtilizationTracker.java b/src/com/sleepycat/je/cleaner/UtilizationTracker.java
new file mode 100644
index 0000000000000000000000000000000000000000..d0a5f848d4ba41ab506ab07410d1420971979cbd
--- /dev/null
+++ b/src/com/sleepycat/je/cleaner/UtilizationTracker.java
@@ -0,0 +1,202 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: UtilizationTracker.java,v 1.27.2.3 2010/01/04 15:30:28 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.log.LogEntryType;
+
+/**
+ * Tracks changes to the utilization profile since the last checkpoint.  This
+ * is the "global" tracker for an environment that tracks changes as they
+ * occur in live operations.  Other "local" tracker classes are used to count
+ * utilization locally and then later transfer the information to the global
+ * tracker, this tracker.
+ *
+ * <p>All changes to this object occur must under the log write latch.  It is
+ * possible to read tracked info without holding the latch.  This is done by
+ * the cleaner when selecting a file and by the checkpointer when determining
+ * what FileSummaryLNs need to be written.  To read tracked info outside the
+ * log write latch, call getTrackedFile or getTrackedFiles.  activateCleaner
+ * can also be called outside the latch.</p>
+ */
+public class UtilizationTracker extends BaseUtilizationTracker {
+
+    /**
+     * Creates an empty tracker.  The cleaner field of the environment object
+     * must be initialized before using this constructor.
+     */
+    public UtilizationTracker(EnvironmentImpl env)
+        throws DatabaseException {
+
+        super(env, env.getCleaner());
+    }
+
+    /**
+     * Constructor used by the cleaner constructor, prior to setting the
+     * cleaner field of the environment.
+     */
+    UtilizationTracker(EnvironmentImpl env, Cleaner cleaner)
+        throws DatabaseException {
+
+        super(env, cleaner);
+    }
+
+    @Override
+    public EnvironmentImpl getEnvironment() {
+        return env;
+    }
+
+    /**
+     * Evicts tracked detail if the budget for the tracker is exceeded.  Evicts
+     * only one file summary LN at most to keep eviction batches small.
+     * Returns the number of bytes freed.
+     *
+     * <p>When flushFileSummary is called, the TrackedFileSummary is cleared
+     * via its reset method, which is called by FileSummaryLN.writeToLog.  This
+     * is how memory is subtracted from the budget.</p>
+     */
+    public long evictMemory()
+        throws DatabaseException {
+
+        /* If not tracking detail, there is nothing to evict. */
+        if (!cleaner.trackDetail) {
+            return 0;
+        }
+
+        /*
+         * Do not start eviction until after recovery, since the
+         * UtilizationProfile will not be initialized properly.  UP
+         * initialization requires that all LNs have been replayed.
+         */
+        if (!env.isOpen()) {
+            return 0;
+        }
+
+        MemoryBudget mb = env.getMemoryBudget();
+        long totalEvicted = 0;
+        long totalBytes = 0;
+        int largestBytes = 0;
+        TrackedFileSummary bestFile = null;
+
+        for (TrackedFileSummary tfs : getTrackedFiles()) {
+            int mem = tfs.getMemorySize();
+            totalBytes += mem;
+            if (mem > largestBytes && tfs.getAllowFlush()) {
+                largestBytes = mem;
+                bestFile = tfs;
+            }
+        }
+
+        if (bestFile != null && totalBytes > mb.getTrackerBudget()) {
+            env.getUtilizationProfile().flushFileSummary(bestFile);
+            totalEvicted += largestBytes;
+        }
+        return totalEvicted;
+    }
+
+    /**
+     * Wakeup the cleaner thread and reset the log byte counter.
+     */
+    public void activateCleaner() {
+        env.getCleaner().wakeup();
+        bytesSinceActivate = 0;
+    }
+
+    /**
+     * Counts the addition of all new log entries including LNs, and returns
+     * whether the cleaner should be woken.
+     *
+     * <p>Must be called under the log write latch.</p>
+     */
+    public boolean countNewLogEntry(long lsn,
+                                    LogEntryType type,
+                                    int size,
+                                    DatabaseImpl db) {
+        return countNew(lsn, db, type, size);
+    }
+
+    /**
+     * Counts a node that has become obsolete and tracks the LSN offset, if
+     * non-zero, to avoid a lookup during cleaning.
+     *
+     * <p>A zero LSN offset is used as a special value when obsolete offset
+     * tracking is not desired. [#15365]  The file header entry (at offset
+     * zero) is never counted as obsolete, it is assumed to be obsolete by the
+     * cleaner.</p>
+     *
+     * <p>This method should only be called for LNs and INs (i.e, only for
+     * nodes).  If type is null we assume it is an LN.</p>
+     *
+     * <p>Must be called under the log write latch.</p>
+     */
+    public void countObsoleteNode(long lsn,
+                                  LogEntryType type,
+                                  int size,
+                                  DatabaseImpl db) {
+        countObsolete
+            (lsn, db, type, size,
+             true,   // countPerFile
+             true,   // countPerDb
+             true);  // trackOffset
+    }
+
+    /**
+     * Counts as countObsoleteNode does, but since the LSN may be inexact, does
+     * not track the obsolete LSN offset.
+     *
+     * <p>This method should only be called for LNs and INs (i.e, only for
+     * nodes).  If type is null we assume it is an LN.</p>
+     *
+     * <p>Must be called under the log write latch.</p>
+     */
+    public void countObsoleteNodeInexact(long lsn,
+                                         LogEntryType type,
+                                         int size,
+                                         DatabaseImpl db) {
+        countObsolete
+            (lsn, db, type, size,
+             true,   // countPerFile
+             true,   // countPerDb
+             false); // trackOffset
+    }
+
+    /**
+     * Returns a tracked summary for the given file which will not be flushed.
+     * Used for watching changes that occur while a file is being cleaned.
+     */
+    public TrackedFileSummary getUnflushableTrackedSummary(long fileNum)
+        throws DatabaseException {
+
+        TrackedFileSummary file = getFileSummary(fileNum);
+        file.setAllowFlush(false);
+        return file;
+    }
+
+    /**
+     * Allocates DbFileSummary information in the DatabaseImpl, which is the
+     * database key.
+     *
+     * <p>Must be called under the log write latch.</p>
+     *
+     * @return the summary, or null if the DB should not be tracked because
+     * the file has been deleted, or null if the databaseKey param is null.
+     */
+    DbFileSummary getDbFileSummary(Object databaseKey, long fileNum) {
+        DatabaseImpl db = (DatabaseImpl) databaseKey;
+        if (db != null) {
+            return db.getDbFileSummary
+                (Long.valueOf(fileNum), true /*willModify*/);
+        } else {
+            return null;
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/cleaner/VerifyUtils.java b/src/com/sleepycat/je/cleaner/VerifyUtils.java
new file mode 100644
index 0000000000000000000000000000000000000000..e71d2dccafcda3f91c2082c6d5a224661806f3a4
--- /dev/null
+++ b/src/com/sleepycat/je/cleaner/VerifyUtils.java
@@ -0,0 +1,403 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2006,2008 Oracle.  All rights reserved.
+ *
+ * $Id: VerifyUtils.java,v 1.17 2008/05/13 01:44:49 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.dbi.CursorImpl;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.DbTree;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.SortedLSNTreeWalker;
+import com.sleepycat.je.dbi.SortedLSNTreeWalker.TreeNodeProcessor;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.UtilizationFileReader;
+import com.sleepycat.je.tree.LN;
+import com.sleepycat.je.tree.MapLN;
+import com.sleepycat.je.tree.Node;
+import com.sleepycat.je.txn.LockType;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * Verify cleaner data structures
+ */
+public class VerifyUtils {
+
+    private static final boolean DEBUG = false;
+
+    /**
+     * Compare the LSNs referenced by a given Database to the lsns held
+     * in the utilization profile. Assumes that the database and
+     * environment is quiescent, and that there is no current cleaner
+     * activity.
+     */
+    public static void checkLsns(Database db)
+        throws DatabaseException {
+        checkLsns(DbInternal.dbGetDatabaseImpl(db), System.out);
+    }
+
+    /**
+     * Compare the lsns referenced by a given Database to the lsns held
+     * in the utilization profile. Assumes that the database and
+     * environment is quiescent, and that there is no current cleaner
+     * activity.
+     */
+    public static void checkLsns(DatabaseImpl dbImpl,
+                                 PrintStream out)
+        throws DatabaseException {
+
+        /* Get all the LSNs in the database. */
+        GatherLSNs gatherLsns = new GatherLSNs();
+        long rootLsn = dbImpl.getTree().getRootLsn();
+        List<DatabaseException> savedExceptions = 
+            new ArrayList<DatabaseException>();
+
+        SortedLSNTreeWalker walker =
+            new SortedLSNTreeWalker(dbImpl,
+                                    false, // don't set db state
+                                    rootLsn,
+                                    gatherLsns,
+                                    savedExceptions,
+				    null);
+        walker.walk();
+
+        /* Print out any exceptions seen during the walk. */
+        if (savedExceptions.size() > 0) {
+            out.println(savedExceptions.size() +
+                        " problems seen during tree walk for checkLsns");
+            Iterator<DatabaseException> iter = savedExceptions.iterator();
+            while (iter.hasNext()) {
+                out.println("  " + iter.next());
+            }
+        }
+
+        Set<Long> lsnsInTree = gatherLsns.getLsns();
+        if (rootLsn != DbLsn.NULL_LSN) {
+            lsnsInTree.add(rootLsn);
+        }
+
+        /* Get all the files used by this database. */
+        Iterator<Long> iter = lsnsInTree.iterator();
+        Set<Long> fileNums = new HashSet<Long>();
+
+        while (iter.hasNext()) {
+            long lsn = iter.next();
+            fileNums.add(DbLsn.getFileNumber(lsn));
+        }
+
+        /* Gather up the obsolete lsns in these file summary lns */
+        iter = fileNums.iterator();
+        Set<Long> obsoleteLsns = new HashSet<Long>();
+        UtilizationProfile profile =
+            dbImpl.getDbEnvironment().getUtilizationProfile();
+
+        while (iter.hasNext()) {
+            Long fileNum = iter.next();
+
+            PackedOffsets obsoleteOffsets = new PackedOffsets();
+            @SuppressWarnings("unused")
+            TrackedFileSummary tfs =
+                profile.getObsoleteDetail(fileNum,
+                                          obsoleteOffsets,
+                                          false /* logUpdate */);
+            PackedOffsets.Iterator obsoleteIter = obsoleteOffsets.iterator();
+            while (obsoleteIter.hasNext()) {
+        	long offset = obsoleteIter.next();
+        	Long oneLsn = Long.valueOf(DbLsn.makeLsn(fileNum.longValue(),
+                                                         offset));
+                obsoleteLsns.add(oneLsn);
+                if (DEBUG) {
+                    out.println("Adding 0x" +
+                                Long.toHexString(oneLsn.longValue()));
+                }
+            }
+        }
+
+        /* Check than none the lsns in the tree is in the UP. */
+        boolean error = false;
+        iter = lsnsInTree.iterator();
+        while (iter.hasNext()) {
+            Long lsn = iter.next();
+            if (obsoleteLsns.contains(lsn)) {
+                out.println("Obsolete LSN set contains valid LSN " +
+                            DbLsn.getNoFormatString(lsn.longValue()));
+                error = true;
+            }
+        }
+
+        /*
+         * Check that none of the lsns in the file summary ln is in the
+         * tree.
+         */
+        iter = obsoleteLsns.iterator();
+        while (iter.hasNext()) {
+            Long lsn = iter.next();
+            if (lsnsInTree.contains(lsn)) {
+                out.println("Tree contains obsolete LSN " +
+                            DbLsn.getNoFormatString(lsn.longValue()));
+                error = true;
+            }
+        }
+
+        if (error) {
+            throw new DatabaseException("Lsn mismatch");
+        }
+
+        if (savedExceptions.size() > 0) {
+            throw new DatabaseException("Sorted LSN Walk problem");
+        }
+    }
+
+    private static class GatherLSNs implements TreeNodeProcessor {
+        private Set<Long> lsns = new HashSet<Long>();
+
+        public void processLSN(long childLSN,
+			       LogEntryType childType,
+			       Node ignore,
+			       byte[] ignore2)
+	    throws DatabaseException {
+
+            lsns.add(childLSN);
+        }
+	
+	/* ignore */
+        public void processDirtyDeletedLN(long childLsn, LN ln, byte[] lnKey)
+	    throws DatabaseException {
+        }
+
+	/* ignore */
+        public void processDupCount(int ignore) {
+	}
+
+        public Set<Long> getLsns() {
+            return lsns;
+        }
+    }
+
+    /**
+     * Compare utilization as calculated by UtilizationProfile to utilization
+     * as calculated by UtilizationFileReader.  Also check that per-database
+     * and per-file utilization match.
+     * @throws IllegalStateException if there are mismatches
+     */
+    public static void verifyUtilization(EnvironmentImpl envImpl,
+                                         boolean expectAccurateObsoleteLNCount,
+                                         boolean expectAccurateObsoleteLNSize,
+                                         boolean expectAccurateDbUtilization)
+        throws DatabaseException {
+
+        Map<Long,FileSummary> profileMap = envImpl.getCleaner()
+            .getUtilizationProfile()
+            .getFileSummaryMap(true);
+
+        /* Flush the log before reading. */
+        envImpl.getLogManager().flushNoSync();
+
+        /* Create per-file map of recalculated utilization info. */
+        Map<Long,FileSummary> recalcMap;
+        try {
+            recalcMap = UtilizationFileReader.calcFileSummaryMap(envImpl);
+        } catch (IOException e) {
+            throw new DatabaseException(e);
+        }
+        /* Create per-file map derived from per-database utilization. */
+        Map<Long,DbFileSummary> dbDerivedMap = null;
+        if (expectAccurateDbUtilization) {
+            dbDerivedMap = calcDbDerivedUtilization(envImpl);
+        }
+
+        /*
+         * Loop through each file in the per-file profile, checking it against
+         * the recalculated map and database derived maps.
+         */
+        Iterator<Map.Entry<Long,FileSummary>> i = 
+            profileMap.entrySet().iterator();
+        while (i.hasNext()) {
+            Map.Entry<Long,FileSummary> entry = i.next();
+            Long file = entry.getKey();
+            String fileStr = file.toString();
+            FileSummary profileSummary = entry.getValue();
+            FileSummary recalcSummary = recalcMap.remove(file);
+            check(fileStr, recalcSummary != null);
+            /*
+            if (expectAccurateObsoleteLNCount &&
+                profileSummary.obsoleteLNCount !=
+                recalcSummary.obsoleteLNCount) {
+                System.out.println("file=" + file);
+                System.out.println("profile=" + profileSummary);
+                System.out.println("recalc=" + recalcSummary);
+            }
+            //*/
+            check(fileStr, 
+                  recalcSummary.totalCount == profileSummary.totalCount);
+            check(fileStr, 
+                  recalcSummary.totalSize == profileSummary.totalSize);
+            check(fileStr, 
+                  recalcSummary.totalINCount == profileSummary.totalINCount);
+            check(fileStr, 
+                  recalcSummary.totalINSize == profileSummary.totalINSize);
+            check(fileStr, 
+                  recalcSummary.totalLNCount == profileSummary.totalLNCount);
+            check(fileStr, 
+                  recalcSummary.totalLNSize == profileSummary.totalLNSize);
+
+            /*
+             * Currently we cannot verify obsolete INs because
+             * UtilizationFileReader does not count them accurately.
+             */
+            if (false) {
+                check(fileStr,
+                      recalcSummary.obsoleteINCount == 
+                      profileSummary.obsoleteINCount);
+            }
+
+            /*
+             * The obsolete LN count/size is inaccurate when a deleted LN is
+             * not counted properly by recovery because its parent INs were
+             * flushed and the obsolete LN was not found in the tree.
+             */
+            if (expectAccurateObsoleteLNCount) {
+                check(fileStr,
+                      recalcSummary.obsoleteLNCount ==
+                      profileSummary.obsoleteLNCount);
+
+                /*
+                 * The obsoletely LN size is inaccurate when a tree walk is
+                 * performed for truncate/remove or an abortLsn is counted by
+                 * recovery
+                 */
+                if (expectAccurateObsoleteLNSize) {
+                    check(fileStr,
+                          recalcSummary.getObsoleteLNSize() ==
+                          profileSummary.obsoleteLNSize);
+                }
+            }
+
+            /*
+             * The per-database and per-file info normally match.  It does not
+             * match, and expectAccurateDbUtilization is false, when we have
+             * truncated or removed a database, since that database information
+             * is now gone.
+             */
+            if (expectAccurateDbUtilization) {
+                DbFileSummary dbSummary =
+                    (DbFileSummary) dbDerivedMap.remove(file);
+                if (dbSummary == null) {
+                    dbSummary = new DbFileSummary();
+                }
+                check(fileStr,
+                      profileSummary.totalINCount == dbSummary.totalINCount);
+                check(fileStr,
+                      profileSummary.totalLNCount == dbSummary.totalLNCount);
+                check(fileStr,
+                      profileSummary.totalINSize == dbSummary.totalINSize);
+                check(fileStr,
+                      profileSummary.totalLNSize == dbSummary.totalLNSize);
+                check(fileStr,
+                      profileSummary.obsoleteINCount ==
+                      dbSummary.obsoleteINCount);
+                if (expectAccurateObsoleteLNCount) {
+                    check(fileStr,
+                          profileSummary.obsoleteLNCount ==
+                          dbSummary.obsoleteLNCount);
+                    if (expectAccurateObsoleteLNSize) {
+                        check(fileStr,
+                              profileSummary.obsoleteLNSize ==
+                              dbSummary.obsoleteLNSize);
+                        check(fileStr,
+                              profileSummary.obsoleteLNSizeCounted ==
+                              dbSummary.obsoleteLNSizeCounted);
+                    }
+                }
+            }
+        }
+        check(recalcMap.toString(), recalcMap.isEmpty());
+        if (expectAccurateDbUtilization) {
+            check(dbDerivedMap.toString(), dbDerivedMap.isEmpty());
+        }
+    }
+
+    private static void check(String errorMessage, boolean checkIsTrue) {
+        if (!checkIsTrue) {
+            throw new IllegalStateException(errorMessage);
+        }
+    }
+
+    /**
+     * Adds up the per-file totals from the utilization information for each
+     * database to make a total per-file count.
+     *
+     * @return aggregation of per-file information.
+     */
+    private static Map<Long,DbFileSummary> calcDbDerivedUtilization
+                                               (EnvironmentImpl envImpl)
+        throws DatabaseException {
+
+        final Map<Long,DbFileSummary> grandTotalsMap = new HashMap<Long,DbFileSummary>();
+
+        DbTree dbTree = envImpl.getDbTree();
+
+        /* Add in the special id and name database. */
+        addDbDerivedTotals(dbTree.getDb(DbTree.ID_DB_ID), grandTotalsMap);
+        addDbDerivedTotals(dbTree.getDb(DbTree.NAME_DB_ID), grandTotalsMap);
+
+        /* Walk through all the regular databases. */
+        CursorImpl.traverseDbWithCursor(dbTree.getDb(DbTree.ID_DB_ID),
+                                        LockType.NONE,
+                                        true /*allowEviction*/,
+                                        new CursorImpl.WithCursor() {
+            public boolean withCursor(CursorImpl cursor,
+                                      DatabaseEntry key,
+                                      DatabaseEntry data)
+                throws DatabaseException {
+
+                MapLN mapLN = (MapLN) cursor.getCurrentLN(LockType.NONE);
+                addDbDerivedTotals(mapLN.getDatabase(), grandTotalsMap);
+                return true;
+            }
+        });
+        return grandTotalsMap;
+    }
+
+    /** 
+     * Walk through the DbFileSummaryMap associated with a single database and
+     * aggregate all the per-file/per db information into a single per-file
+     * grandTotals map.
+     */
+    private static void addDbDerivedTotals
+        (DatabaseImpl dbImpl,
+         Map<Long,DbFileSummary> grandTotalsMap) {
+        Iterator<Map.Entry<Long,DbFileSummary>> entries = 
+            dbImpl.getDbFileSummaries().entrySet().iterator();
+        while (entries.hasNext()) {
+            Map.Entry<Long,DbFileSummary> entry = entries.next();
+            Long fileNum = entry.getKey();
+            DbFileSummary dbTotals = entry.getValue();
+            DbFileSummary grandTotals = grandTotalsMap.get(fileNum);
+            if (grandTotals == null) {
+                grandTotals = new DbFileSummary();
+                grandTotalsMap.put(fileNum, grandTotals);
+            }
+            grandTotals.add(dbTotals);
+        }
+    }
+
+}
diff --git a/src/com/sleepycat/je/config/BooleanConfigParam.java b/src/com/sleepycat/je/config/BooleanConfigParam.java
new file mode 100644
index 0000000000000000000000000000000000000000..a81511c65d1343e89f1cd2422c2438a737209eb7
--- /dev/null
+++ b/src/com/sleepycat/je/config/BooleanConfigParam.java
@@ -0,0 +1,49 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: BooleanConfigParam.java,v 1.31.2.2 2010/01/04 15:30:28 cwl Exp $
+ */
+
+package com.sleepycat.je.config;
+
+/**
+ * A JE configuration parameter with an boolean value.
+ */
+public class BooleanConfigParam extends ConfigParam {
+
+    private static final String DEBUG_NAME =
+        BooleanConfigParam.class.getName();
+
+    /**
+     * Set a boolean parameter w/default.
+     * @param configName
+     * @param defaultValue
+     * @param forReplication true if param is for replication
+     */
+    BooleanConfigParam(String configName,
+                       boolean defaultValue,
+                       boolean mutable,
+                       boolean forReplication) {
+        // defaultValue must not be null
+        super(configName,
+	      Boolean.valueOf(defaultValue).toString(),
+              mutable,
+              forReplication);
+    }
+
+    /**
+     * Make sure that value is a valid string for booleans.
+     */
+    @Override
+    public void validateValue(String value)
+        throws IllegalArgumentException {
+
+        if (!value.trim().equalsIgnoreCase(Boolean.FALSE.toString()) &&
+            !value.trim().equalsIgnoreCase(Boolean.TRUE.toString())) {
+            throw new IllegalArgumentException
+		(DEBUG_NAME + ": " +  value + " not valid boolean " + name);
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/config/ConfigParam.java b/src/com/sleepycat/je/config/ConfigParam.java
new file mode 100644
index 0000000000000000000000000000000000000000..5e8b86d6806e489b8fb615ce587e10459be8b4c3
--- /dev/null
+++ b/src/com/sleepycat/je/config/ConfigParam.java
@@ -0,0 +1,148 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: ConfigParam.java,v 1.32 2008/06/10 02:52:09 cwl Exp $
+ */
+
+package com.sleepycat.je.config;
+
+/**
+ * A ConfigParam embodies the metatdata about a JE configuration parameter:
+ * the parameter name, default value, and a validation method.
+ *
+ * Validation can be done in the scope of this parameter, or as a function of
+ * other parameters.
+ */
+public class ConfigParam {
+
+    protected String name;
+    private String defaultValue;
+    private boolean mutable;
+    private boolean forReplication;
+    private boolean isMultiValueParam;
+
+    /*
+     * Create a String parameter.
+     */
+    public ConfigParam(String configName,
+                       String configDefault,
+                       boolean mutable,
+                       boolean forReplication)
+        throws IllegalArgumentException {
+
+	if (configName == null) {
+	    name = null;
+	} else {
+
+	    /*
+	     * For Multi-Value params (i.e. those who's names end with ".#"),
+	     * strip the .# off the end of the name before storing and flag it
+	     * with isMultiValueParam=true.
+	     */
+	    int mvFlagIdx = configName.indexOf(".#");
+	    if (mvFlagIdx < 0) {
+		name = configName;
+		isMultiValueParam = false;
+	    } else {
+		name = configName.substring(0, mvFlagIdx);
+		isMultiValueParam = true;
+	    }
+	}
+
+        defaultValue = configDefault;
+        this.mutable = mutable;
+        this.forReplication = forReplication;
+
+        /* Check that the name and default value are valid */
+        validateName(configName);
+        validateValue(configDefault);
+
+        /* Add it the list of supported environment parameters. */
+        EnvironmentParams.addSupportedParam(this);
+    }
+
+    /*
+     * Return the parameter name of a multi-value parameter.  e.g.
+     * "je.rep.remote.address.foo" => "je.rep.remote.address"
+     */
+    public static String multiValueParamName(String paramName) {
+	int mvParamIdx = paramName.lastIndexOf('.');
+	if (mvParamIdx < 0) {
+	    return null;
+	}
+	return paramName.substring(0, mvParamIdx);
+    }
+
+    /*
+     * Return the label of a multi-value parameter.  e.g.
+     * "je.rep.remote.address.foo" => foo.
+     */
+    public static String mvParamIndex(String paramName) {
+
+	int mvParamIdx = paramName.lastIndexOf('.');
+	return paramName.substring(mvParamIdx + 1);
+    }
+
+    public String getName() {
+        return name;
+    }
+
+    public String getDefault() {
+        return defaultValue;
+    }
+
+    public boolean isMutable() {
+        return mutable;
+    }
+
+    public boolean isForReplication() {
+        return forReplication;
+    }
+
+    public void setForReplication(boolean forReplication) {
+        this.forReplication = forReplication;
+    }
+	
+    public boolean isMultiValueParam() {
+	return isMultiValueParam;
+    }
+
+    /**
+     * Validate yourself.
+     */
+    public void validate()
+	throws IllegalArgumentException {
+
+        validateName(name);
+        validateValue(defaultValue);
+    }
+
+    /*
+     * A param name can't be null or 0 length
+     */
+    private void validateName(String name)
+        throws IllegalArgumentException {
+
+        if ((name == null) || (name.length() < 1)) {
+            throw new IllegalArgumentException(" A configuration parameter" +
+                                               " name can't be null or 0" +
+                                               " length");
+        }
+    }
+
+    /*
+     * Validate your value. (No default validation for strings.)
+     * May be overridden for (e.g.) Multi-value params.
+     */
+    public void validateValue(String value)
+	throws IllegalArgumentException {
+
+    }
+
+    @Override
+    public String toString() {
+        return name;
+    }
+}
diff --git a/src/com/sleepycat/je/config/EnvironmentParams.java b/src/com/sleepycat/je/config/EnvironmentParams.java
new file mode 100644
index 0000000000000000000000000000000000000000..dfa46c902428519a6a4f6426d53ef771a8a210d2
--- /dev/null
+++ b/src/com/sleepycat/je/config/EnvironmentParams.java
@@ -0,0 +1,876 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EnvironmentParams.java,v 1.108.2.2 2010/01/04 15:30:28 cwl Exp $
+ */
+
+package com.sleepycat.je.config;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+
+/**
+ */
+public class EnvironmentParams {
+
+    /*
+     * The map of supported environment parameters where the key is parameter
+     * name and the data is the configuration parameter object. Put first,
+     * before any declarations of ConfigParams.
+     */
+    public final static Map<String,ConfigParam> SUPPORTED_PARAMS = 
+        new HashMap<String,ConfigParam>();
+
+    /*
+     * Only environment parameters that are part of the public API are
+     * represented by String constants in EnvironmentConfig.
+     */
+    public static final LongConfigParam MAX_MEMORY =
+        new LongConfigParam(EnvironmentConfig.MAX_MEMORY,
+                            null,           // min
+                            null,           // max
+                            Long.valueOf(0),// default uses je.maxMemoryPercent
+                            true,           // mutable
+                            false);         // forReplication
+
+    public static final IntConfigParam MAX_MEMORY_PERCENT =
+        new IntConfigParam(EnvironmentConfig.MAX_MEMORY_PERCENT,
+                           Integer.valueOf(1),  // min
+                           Integer.valueOf(90), // max
+                           Integer.valueOf(60), // default
+                           true,                // mutable
+                           false);              // forReplication
+
+    public static final BooleanConfigParam ENV_SHARED_CACHE =
+        new BooleanConfigParam(EnvironmentConfig.SHARED_CACHE,
+                               false,         // default
+                               false,         // mutable
+                               false);        // forReplication
+
+    /**
+     * Used by utilities, not exposed in the API.
+     *
+     * If true, an environment is created with recovery and the related daemon
+     * threads are enabled.
+     */
+    public static final BooleanConfigParam ENV_RECOVERY =
+        new BooleanConfigParam("je.env.recovery",
+                               true,          // default
+                               false,         // mutable
+                               false);        // forReplication
+
+    public static final BooleanConfigParam ENV_RECOVERY_FORCE_CHECKPOINT =
+        new BooleanConfigParam(EnvironmentConfig.ENV_RECOVERY_FORCE_CHECKPOINT,
+                               false,         // default
+                               false,         // mutable
+                               false);        // forReplication
+
+    public static final BooleanConfigParam ENV_RUN_INCOMPRESSOR =
+        new BooleanConfigParam(EnvironmentConfig.ENV_RUN_IN_COMPRESSOR,
+                               true,          // default
+                               true,          // mutable
+                               false);        // forReplication
+
+    /**
+     * As of 2.0, eviction is performed in-line.
+     *
+     * If true, starts up the evictor.  This parameter is false by default.
+     */
+    public static final BooleanConfigParam ENV_RUN_EVICTOR =
+        new BooleanConfigParam("je.env.runEvictor",
+                               false,        // default
+                               true,         // mutable
+                               false);       // forReplication
+
+    public static final BooleanConfigParam ENV_RUN_CHECKPOINTER =
+        new BooleanConfigParam(EnvironmentConfig.ENV_RUN_CHECKPOINTER,
+                               true,        // default
+                               true,        // mutable
+                               false);      // forReplication
+
+    public static final BooleanConfigParam ENV_RUN_CLEANER =
+        new BooleanConfigParam(EnvironmentConfig.ENV_RUN_CLEANER,
+                               true,        // default
+                               true,        // mutable
+                               false);      // forReplication
+
+    public static final IntConfigParam ENV_BACKGROUND_READ_LIMIT =
+        new IntConfigParam(EnvironmentConfig.ENV_BACKGROUND_READ_LIMIT,
+                            Integer.valueOf(0),                 // min
+                            Integer.valueOf(Integer.MAX_VALUE), // max
+                            Integer.valueOf(0),                 // default
+                            true,                           // mutable
+                            false);                         // forReplication
+
+    public static final IntConfigParam ENV_BACKGROUND_WRITE_LIMIT =
+        new IntConfigParam(EnvironmentConfig.ENV_BACKGROUND_WRITE_LIMIT,
+                            Integer.valueOf(0),                 // min
+                            Integer.valueOf(Integer.MAX_VALUE), // max
+                            Integer.valueOf(0),                 // default
+                            true,                           // mutable
+                            false);                         // forReplication
+
+    public static final IntConfigParam ENV_LOCKOUT_TIMEOUT =
+        new IntConfigParam(EnvironmentConfig.ENV_LOCKOUT_TIMEOUT,
+                            Integer.valueOf(0),                 // min
+                            Integer.valueOf(Integer.MAX_VALUE), // max
+                            Integer.valueOf(Integer.MAX_VALUE), // default
+                            true,                           // mutable
+                            false);                         // forReplication
+
+    public static final LongConfigParam ENV_BACKGROUND_SLEEP_INTERVAL =
+        new LongConfigParam(EnvironmentConfig.ENV_BACKGROUND_SLEEP_INTERVAL,
+                           Long.valueOf(1000),                  // min
+                           Long.valueOf(Long.MAX_VALUE),        // max
+                           Long.valueOf(1000),                  // default
+                           true,                            // mutable
+                           false);                          // forReplication
+
+    public static final BooleanConfigParam ENV_CHECK_LEAKS =
+        new BooleanConfigParam(EnvironmentConfig.ENV_CHECK_LEAKS,
+                               true,              // default
+                               false,             // mutable
+                               false);            // forReplication
+
+    public static final BooleanConfigParam ENV_FORCED_YIELD =
+        new BooleanConfigParam(EnvironmentConfig.ENV_FORCED_YIELD,
+                               false,             // default
+                               false,             // mutable
+                               false);            // forReplication
+
+    public static final BooleanConfigParam ENV_INIT_TXN =
+        new BooleanConfigParam(EnvironmentConfig.ENV_IS_TRANSACTIONAL,
+                               false,             // default
+                               false,             // mutable
+                               false);            // forReplication
+
+    public static final BooleanConfigParam ENV_INIT_LOCKING =
+        new BooleanConfigParam(EnvironmentConfig.ENV_IS_LOCKING,
+                               true,              // default
+                               false,             // mutable
+                               false);            // forReplication
+
+    public static final BooleanConfigParam ENV_RDONLY =
+        new BooleanConfigParam(EnvironmentConfig.ENV_READ_ONLY,
+                               false,             // default
+                               false,             // mutable
+                               false);            // forReplication
+
+    public static final BooleanConfigParam ENV_FAIR_LATCHES =
+        new BooleanConfigParam(EnvironmentConfig.ENV_FAIR_LATCHES,
+                               false,             // default
+                               false,             // mutable
+                               false);            // forReplication
+
+    /**
+     * Not part of the public API. As of 3.3, is true by default.
+     *
+     * If true (the default), use shared latches for Btree Internal Nodes (INs)
+     * to improve concurrency.
+     */
+    public static final BooleanConfigParam ENV_SHARED_LATCHES =
+        new BooleanConfigParam("je.env.sharedLatches",
+                               true,             // default
+                               false,            // mutable
+                               false);           // forReplication
+
+    public static final BooleanConfigParam ENV_DB_EVICTION =
+        new BooleanConfigParam(EnvironmentConfig.ENV_DB_EVICTION,
+                               true,             // default
+                               false,            // mutable
+                               false);           // forReplication
+
+    public static final IntConfigParam ADLER32_CHUNK_SIZE =
+        new IntConfigParam(EnvironmentConfig.ADLER32_CHUNK_SIZE,
+                           Integer.valueOf(0),       // min
+                           Integer.valueOf(1 << 20), // max
+                           Integer.valueOf(0),       // default
+                           true,                 // mutable
+                           false);               // forReplication
+
+    /*
+     * Database Logs
+     */
+    /* default: 2k * NUM_LOG_BUFFERS */
+    public static final int MIN_LOG_BUFFER_SIZE = 2048;
+    public static final int NUM_LOG_BUFFERS_DEFAULT = 3;
+    public static final long LOG_MEM_SIZE_MIN =
+        NUM_LOG_BUFFERS_DEFAULT * MIN_LOG_BUFFER_SIZE;
+    public static final String LOG_MEM_SIZE_MIN_STRING =
+        Long.toString(LOG_MEM_SIZE_MIN);
+
+    public static final LongConfigParam LOG_MEM_SIZE =
+        new LongConfigParam(EnvironmentConfig.LOG_TOTAL_BUFFER_BYTES,
+                            Long.valueOf(LOG_MEM_SIZE_MIN),// min
+                            null,              // max
+                            Long.valueOf(0),       // by default computed
+                                               // from je.maxMemory
+                            false,             // mutable
+                            false);            // forReplication
+
+    public static final IntConfigParam NUM_LOG_BUFFERS =
+        new IntConfigParam(EnvironmentConfig.LOG_NUM_BUFFERS,
+                           Integer.valueOf(2),     // min
+                           null,               // max
+                           Integer.valueOf(NUM_LOG_BUFFERS_DEFAULT), // default
+                           false,              // mutable
+                           false);             // forReplication
+
+    public static final IntConfigParam LOG_BUFFER_MAX_SIZE =
+        new IntConfigParam(EnvironmentConfig.LOG_BUFFER_SIZE,
+                           Integer.valueOf(1<<10),  // min
+                           null,                // max
+                           Integer.valueOf(1<<20),  // default
+                           false,               // mutable
+                           false);              // forReplication
+
+    public static final IntConfigParam LOG_FAULT_READ_SIZE =
+        new IntConfigParam(EnvironmentConfig.LOG_FAULT_READ_SIZE,
+                           Integer.valueOf(32),   // min
+                           null,              // max
+                           Integer.valueOf(2048), // default
+                           false,             // mutable
+                           false);            // forReplication
+
+    public static final IntConfigParam LOG_ITERATOR_READ_SIZE =
+        new IntConfigParam(EnvironmentConfig.LOG_ITERATOR_READ_SIZE,
+                           Integer.valueOf(128),  // min
+                           null,              // max
+                           Integer.valueOf(8192), // default
+                           false,             // mutable
+                           false);            // forReplication
+
+    public static final IntConfigParam LOG_ITERATOR_MAX_SIZE =
+        new IntConfigParam(EnvironmentConfig.LOG_ITERATOR_MAX_SIZE,
+                           Integer.valueOf(128),  // min
+                           null,              // max
+                           Integer.valueOf(16777216), // default
+                           false,             // mutable
+                           false);            // forReplication
+
+    public static final LongConfigParam LOG_FILE_MAX =
+	(EnvironmentImpl.IS_DALVIK ?
+        new LongConfigParam(EnvironmentConfig.LOG_FILE_MAX,
+			    Long.valueOf(10000),       // min
+                            Long.valueOf(4294967296L), // max
+                            Long.valueOf(100000),      // default
+                            false,                 // mutable
+                            false) :               // forReplication
+        new LongConfigParam(EnvironmentConfig.LOG_FILE_MAX,
+			    Long.valueOf(1000000),      // min
+                            Long.valueOf(4294967296L), // max
+                            Long.valueOf(10000000),    // default
+                            false,                 // mutable
+                            false));               // forReplication
+
+    public static final BooleanConfigParam LOG_CHECKSUM_READ =
+        new BooleanConfigParam(EnvironmentConfig.LOG_CHECKSUM_READ,
+                               true,               // default
+                               false,              // mutable
+                               false);             // forReplication
+
+    public static final BooleanConfigParam LOG_VERIFY_CHECKSUMS =
+        new BooleanConfigParam(EnvironmentConfig.LOG_VERIFY_CHECKSUMS,
+                               false,              // default
+                               false,              // mutable
+                               false);             // forReplication
+
+    public static final BooleanConfigParam LOG_MEMORY_ONLY =
+        new BooleanConfigParam(EnvironmentConfig.LOG_MEM_ONLY,
+                               false,              // default
+                               false,              // mutable
+                               false);             // forReplication
+
+    public static final IntConfigParam LOG_FILE_CACHE_SIZE =
+        new IntConfigParam(EnvironmentConfig.LOG_FILE_CACHE_SIZE,
+                           Integer.valueOf(3),    // min
+                           null,              // max
+                           Integer.valueOf(100),  // default
+                           false,             // mutable
+                           false);            // forReplication
+
+    public static final LongConfigParam LOG_FSYNC_TIMEOUT =
+        new LongConfigParam(EnvironmentConfig.LOG_FSYNC_TIMEOUT,
+                            Long.valueOf(10000L),  // min
+                            null,              // max
+                            Long.valueOf(500000L), // default
+                            false,             // mutable
+                            false);            // forReplication
+
+    public static final BooleanConfigParam LOG_USE_ODSYNC =
+	new BooleanConfigParam(EnvironmentConfig.LOG_USE_ODSYNC,
+                               false,          // default
+                               false,          // mutable
+                               false);         // forReplication
+
+    public static final BooleanConfigParam LOG_USE_NIO =
+        new BooleanConfigParam(EnvironmentConfig.LOG_USE_NIO,
+                               false,          // default
+                               false,          // mutable
+                               false);         // forReplication
+
+    public static final BooleanConfigParam LOG_DIRECT_NIO =
+        new BooleanConfigParam(EnvironmentConfig.LOG_DIRECT_NIO,
+                               false,          // default
+                               false,          // mutable
+                               false);         // forReplication
+
+    public static final LongConfigParam LOG_CHUNKED_NIO =
+        new LongConfigParam(EnvironmentConfig.LOG_CHUNKED_NIO,
+                            Long.valueOf(0L),      // min
+                            Long.valueOf(1 << 26), // max (64M)
+                            Long.valueOf(0L),      // default (no chunks)
+                            false,             // mutable
+                            false);            // forReplication
+
+    /**
+     * @deprecated As of 3.3, no longer used
+     *
+     * Optimize cleaner operation for temporary deferred write DBs.
+     */
+    public static final BooleanConfigParam LOG_DEFERREDWRITE_TEMP =
+        new BooleanConfigParam("je.deferredWrite.temp",
+                               false,          // default
+                               false,          // mutable
+                               false);         // forReplication
+
+    /*
+     * Tree
+     */
+    public static final IntConfigParam NODE_MAX =
+        new IntConfigParam(EnvironmentConfig.NODE_MAX_ENTRIES,
+                           Integer.valueOf(4),     // min
+                           Integer.valueOf(32767), // max
+                           Integer.valueOf(128),   // default
+                           false,              // mutable
+                           false);             // forReplication
+
+    public static final IntConfigParam NODE_MAX_DUPTREE =
+        new IntConfigParam(EnvironmentConfig.NODE_DUP_TREE_MAX_ENTRIES,
+                           Integer.valueOf(4),     // min
+                           Integer.valueOf(32767), // max
+                           Integer.valueOf(128),   // default
+                           false,              // mutable
+                           false);             // forReplication
+
+    public static final IntConfigParam BIN_MAX_DELTAS =
+        new IntConfigParam(EnvironmentConfig.TREE_MAX_DELTA,
+                           Integer.valueOf(0),     // min
+                           Integer.valueOf(100),   // max
+                           Integer.valueOf(10),    // default
+                           false,              // mutable
+                           false);             // forReplication
+
+    public static final IntConfigParam BIN_DELTA_PERCENT =
+        new IntConfigParam(EnvironmentConfig.TREE_BIN_DELTA,
+                           Integer.valueOf(0),     // min
+                           Integer.valueOf(75),    // max
+                           Integer.valueOf(25),    // default
+                           false,              // mutable
+                           false);             // forReplication
+
+    public static final LongConfigParam MIN_TREE_MEMORY =
+        new LongConfigParam(EnvironmentConfig.TREE_MIN_MEMORY,
+                            Long.valueOf(50 * 1024),   // min
+                            null,                  // max
+                            Long.valueOf(500 * 1024),  // default
+                            true,                  // mutable
+                            false);                // forReplication
+
+    /*
+     * IN Compressor
+     */
+    public static final LongConfigParam COMPRESSOR_WAKEUP_INTERVAL =
+        new LongConfigParam(EnvironmentConfig.COMPRESSOR_WAKEUP_INTERVAL,
+                            Long.valueOf(1000000),     // min
+                            Long.valueOf(4294967296L), // max
+                            Long.valueOf(5000000),     // default
+                            false,                 // mutable
+                            false);                // forReplication
+
+    public static final IntConfigParam COMPRESSOR_RETRY =
+        new IntConfigParam(EnvironmentConfig.COMPRESSOR_DEADLOCK_RETRY,
+                           Integer.valueOf(0),                // min
+                           Integer.valueOf(Integer.MAX_VALUE),// max
+                           Integer.valueOf(3),                // default
+                           false,                         // mutable
+                           false);                        // forReplication
+
+    public static final LongConfigParam COMPRESSOR_LOCK_TIMEOUT =
+        new LongConfigParam(EnvironmentConfig.COMPRESSOR_LOCK_TIMEOUT,
+                            Long.valueOf(0),           // min
+                            Long.valueOf(4294967296L), // max
+                            Long.valueOf(500000L),     // default
+                            false,                 // mutable
+                            false);                // forReplication
+
+    public static final BooleanConfigParam COMPRESSOR_PURGE_ROOT =
+        new BooleanConfigParam(EnvironmentConfig.COMPRESSOR_PURGE_ROOT,
+                                           false,              // default
+                               false,              // mutable
+                               false);             // forReplication
+
+    /*
+     * Evictor
+     */
+    public static final LongConfigParam EVICTOR_EVICT_BYTES =
+        new LongConfigParam(EnvironmentConfig.EVICTOR_EVICT_BYTES,
+                             Long.valueOf(1024),       // min
+                             null,                 // max
+                             Long.valueOf(524288),     // default
+                             false,                // mutable
+                             false);               // forReplication
+
+    /**
+     * @deprecated As of 2.0, this is replaced by je.evictor.evictBytes
+     *
+     * When eviction happens, the evictor will push memory usage to this
+     * percentage of je.maxMemory.
+     */
+    public static final IntConfigParam EVICTOR_USEMEM_FLOOR =
+        new IntConfigParam("je.evictor.useMemoryFloor",
+                           Integer.valueOf(50),        // min
+                           Integer.valueOf(100),       // max
+                           Integer.valueOf(95),        // default
+                           false,                  // mutable
+                           false);                 // forReplication
+
+    /**
+     * @deprecated As of 1.7.2, this is replaced by je.evictor.nodesPerScan 
+     *
+     * The evictor percentage of total nodes to scan per wakeup.
+     */
+    public static final IntConfigParam EVICTOR_NODE_SCAN_PERCENTAGE =
+        new IntConfigParam("je.evictor.nodeScanPercentage",
+                           Integer.valueOf(1),          // min
+                           Integer.valueOf(100),        // max
+                           Integer.valueOf(10),         // default
+                           false,                   // mutable
+                           false);                  // forReplication
+
+    /**
+     * @deprecated As of 1.7.2, 1 node is chosen per scan.
+     *
+     * The evictor percentage of scanned nodes to evict per wakeup.
+     */
+    public static final
+        IntConfigParam EVICTOR_EVICTION_BATCH_PERCENTAGE =
+        new IntConfigParam("je.evictor.evictionBatchPercentage",
+                           Integer.valueOf(1),          // min
+                           Integer.valueOf(100),        // max
+                           Integer.valueOf(10),         // default
+                           false,                   // mutable
+                           false);                  // forReplication
+
+    public static final IntConfigParam EVICTOR_NODES_PER_SCAN =
+        new IntConfigParam(EnvironmentConfig.EVICTOR_NODES_PER_SCAN,
+                           Integer.valueOf(1),           // min
+                           Integer.valueOf(1000),        // max
+                           Integer.valueOf(10),          // default
+                           false,                    // mutable
+                           false);                   // forReplication
+
+    /**
+     * Not part of public API. As of 2.0, eviction is performed in-line.
+     *
+     * At this percentage over the allotted cache, critical eviction will
+     * start.
+     */
+    public static final IntConfigParam EVICTOR_CRITICAL_PERCENTAGE =
+        new IntConfigParam("je.evictor.criticalPercentage",
+                           Integer.valueOf(0),           // min
+                           Integer.valueOf(1000),        // max
+                           Integer.valueOf(0),           // default
+                           false,                    // mutable
+                           false);                   // forReplication
+
+    public static final IntConfigParam EVICTOR_RETRY =
+        new IntConfigParam(EnvironmentConfig.EVICTOR_DEADLOCK_RETRY,
+                           Integer.valueOf(0),                // min
+                           Integer.valueOf(Integer.MAX_VALUE),// max
+                           Integer.valueOf(3),                // default
+                           false,                         // mutable
+                           false);                        // forReplication
+
+    public static final BooleanConfigParam EVICTOR_LRU_ONLY =
+        new BooleanConfigParam(EnvironmentConfig.EVICTOR_LRU_ONLY,
+                               true,                  // default
+                               false,                 // mutable
+                               false);                // forReplication
+
+    public static final BooleanConfigParam EVICTOR_FORCED_YIELD =
+        new BooleanConfigParam(EnvironmentConfig.EVICTOR_FORCED_YIELD,
+                               false,             // default
+                               false,             // mutable
+                               false);            // forReplication
+
+    /*
+     * Checkpointer
+     */
+    public static final LongConfigParam CHECKPOINTER_BYTES_INTERVAL =
+        new LongConfigParam(EnvironmentConfig.CHECKPOINTER_BYTES_INTERVAL,
+                            Long.valueOf(0),               // min
+                            Long.valueOf(Long.MAX_VALUE),  // max
+			    (EnvironmentImpl.IS_DALVIK ?
+			     Long.valueOf(200000) :
+			     Long.valueOf(20000000)),      // default
+                            false,                     // mutable
+                            false);                    // forReplication
+
+    public static final LongConfigParam CHECKPOINTER_WAKEUP_INTERVAL =
+        new LongConfigParam(EnvironmentConfig.CHECKPOINTER_WAKEUP_INTERVAL,
+                            Long.valueOf(1000000),     // min
+                            Long.valueOf(4294967296L), // max
+                            Long.valueOf(0),           // default
+                            false,                 // mutable
+                            false);                // forReplication
+
+    public static final IntConfigParam CHECKPOINTER_RETRY =
+        new IntConfigParam(EnvironmentConfig.CHECKPOINTER_DEADLOCK_RETRY,
+                           Integer.valueOf(0),                 // min
+                           Integer.valueOf(Integer.MAX_VALUE), // max
+                           Integer.valueOf(3),                 // default
+                           false,                          // mutable
+                           false);                         // forReplication
+
+    public static final BooleanConfigParam CHECKPOINTER_HIGH_PRIORITY =
+        new BooleanConfigParam(EnvironmentConfig.CHECKPOINTER_HIGH_PRIORITY,
+                               false, // default
+                               true,  // mutable
+                               false);// forReplication
+
+    /*
+     * Cleaner
+     */
+    public static final IntConfigParam CLEANER_MIN_UTILIZATION =
+        new IntConfigParam(EnvironmentConfig.CLEANER_MIN_UTILIZATION,
+                           Integer.valueOf(0),           // min
+                           Integer.valueOf(90),          // max
+                           Integer.valueOf(50),          // default
+                           true,                     // mutable
+                           false);                   // forReplication
+
+    public static final IntConfigParam CLEANER_MIN_FILE_UTILIZATION =
+        new IntConfigParam(EnvironmentConfig.CLEANER_MIN_FILE_UTILIZATION,
+                           Integer.valueOf(0),           // min
+                           Integer.valueOf(50),          // max
+                           Integer.valueOf(5),           // default
+                           true,                     // mutable
+                           false);                   // forReplication
+
+    public static final LongConfigParam CLEANER_BYTES_INTERVAL =
+        new LongConfigParam(EnvironmentConfig.CLEANER_BYTES_INTERVAL,
+                            Long.valueOf(0),              // min
+                            Long.valueOf(Long.MAX_VALUE), // max
+                            Long.valueOf(0),              // default
+                            true,                     // mutable
+                            false);                   // forReplication
+
+    public static final BooleanConfigParam CLEANER_FETCH_OBSOLETE_SIZE =
+        new BooleanConfigParam(EnvironmentConfig.CLEANER_FETCH_OBSOLETE_SIZE,
+                               false, // default
+                               true,  // mutable
+                               false);// forReplication
+
+    public static final IntConfigParam CLEANER_DEADLOCK_RETRY =
+        new IntConfigParam(EnvironmentConfig.CLEANER_DEADLOCK_RETRY,
+                           Integer.valueOf(0),                // min
+                           Integer.valueOf(Integer.MAX_VALUE),// max
+                           Integer.valueOf(3),                // default
+                           true,                          // mutable
+                           false);                        // forReplication
+
+    public static final LongConfigParam CLEANER_LOCK_TIMEOUT =
+        new LongConfigParam(EnvironmentConfig.CLEANER_LOCK_TIMEOUT,
+                            Long.valueOf(0),            // min
+                            Long.valueOf(4294967296L),  // max
+                            Long.valueOf(500000L),      // default
+                            true,                   // mutable
+                            false);                 // forReplication
+
+    public static final BooleanConfigParam CLEANER_REMOVE =
+        new BooleanConfigParam(EnvironmentConfig.CLEANER_EXPUNGE,
+                               true,                 // default
+                               true,                 // mutable
+                               false);               // forReplication
+
+    /**
+     * @deprecated As of 1.7.1, no longer used.
+     */
+    public static final IntConfigParam CLEANER_MIN_FILES_TO_DELETE =
+        new IntConfigParam("je.cleaner.minFilesToDelete",
+                           Integer.valueOf(1),           // min
+                           Integer.valueOf(1000000),     // max
+                           Integer.valueOf(5),           // default
+                           false,                    // mutable
+                           false);        // forReplication
+
+    /**
+     * @deprecated As of 2.0, no longer used.
+     */
+    public static final IntConfigParam CLEANER_RETRIES =
+        new IntConfigParam("je.cleaner.retries",
+                           Integer.valueOf(0),           // min
+                           Integer.valueOf(1000),        // max
+                           Integer.valueOf(10),          // default
+                           false,                    // mutable
+                           false);        // forReplication
+
+    /**
+     * @deprecated As of 2.0, no longer used.
+     */
+    public static final IntConfigParam CLEANER_RESTART_RETRIES =
+        new IntConfigParam("je.cleaner.restartRetries",
+                           Integer.valueOf(0),           // min
+                           Integer.valueOf(1000),        // max
+                           Integer.valueOf(5),           // default
+                           false,                    // mutable
+                           false);        // forReplication
+
+    public static final IntConfigParam CLEANER_MIN_AGE =
+        new IntConfigParam(EnvironmentConfig.CLEANER_MIN_AGE,
+                           Integer.valueOf(1),           // min
+                           Integer.valueOf(1000),        // max
+                           Integer.valueOf(2),           // default
+                           true,                     // mutable
+                           false);                   // forReplication
+
+    /**
+     * Experimental and may be removed in a future release -- not exposed in
+     * the public API.
+     *
+     * If true, eviction and checkpointing will cluster records by key
+     * value, migrating them from low utilization files if they are
+     * resident.
+     * The cluster and clusterAll properties may not both be set to true.
+     */
+    public static final BooleanConfigParam CLEANER_CLUSTER =
+        new BooleanConfigParam("je.cleaner.cluster",
+                               false,               // default
+                               true,                // mutable
+                               false);              // forReplication
+
+    /**
+     * Experimental and may be removed in a future release -- not exposed in
+     * the public API.
+     *
+     * If true, eviction and checkpointing will cluster records by key
+     * value, migrating them from low utilization files whether or not
+     * they are resident.
+     * The cluster and clusterAll properties may not both be set to true.
+     */
+    public static final BooleanConfigParam CLEANER_CLUSTER_ALL =
+        new BooleanConfigParam("je.cleaner.clusterAll",
+                               false,              // default
+                               true,               // mutable
+                               false);             // forReplication
+
+    public static final IntConfigParam CLEANER_MAX_BATCH_FILES =
+        new IntConfigParam(EnvironmentConfig.CLEANER_MAX_BATCH_FILES,
+                           Integer.valueOf(0),         // min
+                           Integer.valueOf(100000),    // max
+                           Integer.valueOf(0),         // default
+                           true,                   // mutable
+                           false);                 // forReplication
+
+    public static final IntConfigParam CLEANER_READ_SIZE =
+        new IntConfigParam(EnvironmentConfig.CLEANER_READ_SIZE,
+                           Integer.valueOf(128),  // min
+                           null,              // max
+                           Integer.valueOf(0),    // default
+                           true,              // mutable
+                           false);            // forReplication
+
+    /**
+     * Not part of public API.
+     *
+     * If true, the cleaner tracks and stores detailed information that is used
+     * to decrease the cost of cleaning.
+     */
+    public static final BooleanConfigParam CLEANER_TRACK_DETAIL =
+        new BooleanConfigParam("je.cleaner.trackDetail",
+                               true,          // default
+                               false,         // mutable
+                               false);        // forReplication
+
+    public static final IntConfigParam CLEANER_DETAIL_MAX_MEMORY_PERCENTAGE =
+    new IntConfigParam(EnvironmentConfig.CLEANER_DETAIL_MAX_MEMORY_PERCENTAGE,
+                           Integer.valueOf(1),    // min
+                           Integer.valueOf(90),   // max
+                           Integer.valueOf(2),    // default
+                           true,              // mutable
+                           false);            // forReplication
+
+    /**
+     * Not part of public API, since it applies to a very old bug.
+     *
+     * If true, detail information is discarded that was added by earlier
+     * versions of JE (specifically 2.0.42 and 2.0.54) if it may be invalid.
+     * This may be set to false for increased performance when those version of
+     * JE were used but LockMode.RMW was never used.
+     */
+    public static final BooleanConfigParam CLEANER_RMW_FIX =
+        new BooleanConfigParam("je.cleaner.rmwFix",
+                               true,          // default
+                               false,         // mutable
+                               false);        // forReplication
+
+    public static final ConfigParam CLEANER_FORCE_CLEAN_FILES =
+        new ConfigParam(EnvironmentConfig.CLEANER_FORCE_CLEAN_FILES,
+                        "",                  // default
+                        false,               // mutable
+                        false);              // forReplication
+
+    public static final IntConfigParam CLEANER_UPGRADE_TO_LOG_VERSION =
+        new IntConfigParam(EnvironmentConfig.CLEANER_UPGRADE_TO_LOG_VERSION,
+                           Integer.valueOf(-1),  // min
+                           null,             // max
+                           Integer.valueOf(0),   // default
+                           false,            // mutable
+                           false);           // forReplication
+
+    public static final IntConfigParam CLEANER_THREADS =
+        new IntConfigParam(EnvironmentConfig.CLEANER_THREADS,
+                           Integer.valueOf(1),   // min
+                           null,             // max
+                           Integer.valueOf(1),   // default
+                           true,             // mutable
+                           false);           // forReplication
+
+    public static final IntConfigParam CLEANER_LOOK_AHEAD_CACHE_SIZE =
+        new IntConfigParam(EnvironmentConfig.CLEANER_LOOK_AHEAD_CACHE_SIZE,
+                           Integer.valueOf(0),    // min
+                           null,              // max
+                           Integer.valueOf(8192), // default
+                           true,              // mutable
+                           false);            // forReplication
+
+    /*
+     * Transactions
+     */
+    public static final IntConfigParam N_LOCK_TABLES =
+        new IntConfigParam(EnvironmentConfig.LOCK_N_LOCK_TABLES,
+                           Integer.valueOf(1),    // min
+                           Integer.valueOf(32767),// max
+                           Integer.valueOf(1),    // default
+                           false,             // mutable
+                           false);            // forReplication
+
+    public static final LongConfigParam LOCK_TIMEOUT =
+        new LongConfigParam(EnvironmentConfig.LOCK_TIMEOUT,
+                            Long.valueOf(0),           // min
+                            Long.valueOf(4294967296L), // max
+                            Long.valueOf(500000L),     // default
+                            false,                 // mutable
+                            false);                // forReplication
+
+    public static final LongConfigParam TXN_TIMEOUT =
+        new LongConfigParam(EnvironmentConfig.TXN_TIMEOUT,
+                            Long.valueOf(0),           // min
+                            Long.valueOf(4294967296L), // max_value
+                            Long.valueOf(0),           // default
+                            false,                 // mutable
+                            false);                // forReplication
+
+    public static final BooleanConfigParam TXN_SERIALIZABLE_ISOLATION =
+        new BooleanConfigParam(EnvironmentConfig.TXN_SERIALIZABLE_ISOLATION,
+                               false,              // default
+                               false,              // mutable
+                               false);             // forReplication
+
+    public static final BooleanConfigParam TXN_DEADLOCK_STACK_TRACE =
+        new BooleanConfigParam(EnvironmentConfig.TXN_DEADLOCK_STACK_TRACE,
+                               false,              // default
+                               true,               // mutable
+                               false);             // forReplication
+
+    public static final BooleanConfigParam TXN_DUMPLOCKS =
+        new BooleanConfigParam(EnvironmentConfig.TXN_DUMP_LOCKS,
+                               false,              // default
+                               true,               // mutable
+                               false);             // forReplication
+
+    /*
+     * Debug tracing system
+     */
+    public static final BooleanConfigParam JE_LOGGING_FILE =
+        new BooleanConfigParam(EnvironmentConfig.TRACE_FILE,
+                               false,              // default
+                               false,              // mutable
+                               false);             // forReplication
+
+    public static final BooleanConfigParam JE_LOGGING_CONSOLE =
+        new BooleanConfigParam(EnvironmentConfig.TRACE_CONSOLE,
+                               false,             // default
+                               false,             // mutable
+                               false);             // forReplication
+
+    public static final BooleanConfigParam JE_LOGGING_DBLOG =
+        new BooleanConfigParam(EnvironmentConfig.TRACE_DB,
+                               true,               // default
+                               false,              // mutable
+                               false);             // forReplication
+
+    public static final IntConfigParam JE_LOGGING_FILE_LIMIT =
+        new IntConfigParam(EnvironmentConfig.TRACE_FILE_LIMIT,
+                           Integer.valueOf(1000),       // min
+                           Integer.valueOf(1000000000), // max
+                           Integer.valueOf(10000000),   // default
+                           false,                   // mutable
+                           false);                  // forReplication
+
+    public static final IntConfigParam JE_LOGGING_FILE_COUNT =
+        new IntConfigParam(EnvironmentConfig.TRACE_FILE_COUNT,
+                           Integer.valueOf(1),         // min
+                           null,                   // max
+                           Integer.valueOf(10),        // default
+                           false,                  // mutable
+                           false);                 // forReplication
+
+    public static final ConfigParam JE_LOGGING_LEVEL =
+        new ConfigParam(EnvironmentConfig.TRACE_LEVEL,
+                        "INFO",
+                        false,                     // mutable
+                        false);                    // forReplication
+
+    public static final ConfigParam JE_LOGGING_LEVEL_LOCKMGR =
+        new ConfigParam(EnvironmentConfig.TRACE_LEVEL_LOCK_MANAGER,
+                        "FINE",
+                        false,                    // mutable
+                        false);                   // forReplication
+
+    public static final ConfigParam JE_LOGGING_LEVEL_RECOVERY =
+        new ConfigParam(EnvironmentConfig.TRACE_LEVEL_RECOVERY,
+                        "FINE",
+                         false,                   // mutable
+                        false);                   // forReplication
+
+    public static final ConfigParam JE_LOGGING_LEVEL_EVICTOR =
+        new ConfigParam(EnvironmentConfig.TRACE_LEVEL_EVICTOR,
+                        "FINE",
+                         false,                   // mutable
+                        false);                   // forReplication
+
+    public static final ConfigParam JE_LOGGING_LEVEL_CLEANER =
+        new ConfigParam(EnvironmentConfig.TRACE_LEVEL_CLEANER,
+                        "FINE",
+                         true,                    // mutable
+                        false);                   // forReplication
+
+    /*
+     * Replication params are in com.sleepycat.je.rep.impl.ReplicatorParams
+     */
+
+    /*
+     * Add a configuration parameter to the set supported by an
+     * environment.
+     */
+    public static void addSupportedParam(ConfigParam param) {
+        SUPPORTED_PARAMS.put(param.getName(), param);
+    }
+}
diff --git a/src/com/sleepycat/je/config/IntConfigParam.java b/src/com/sleepycat/je/config/IntConfigParam.java
new file mode 100644
index 0000000000000000000000000000000000000000..f81b217c68d5f43c8b48ef4f3e211d5740153611
--- /dev/null
+++ b/src/com/sleepycat/je/config/IntConfigParam.java
@@ -0,0 +1,76 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: IntConfigParam.java,v 1.31.2.2 2010/01/04 15:30:28 cwl Exp $
+ */
+
+package com.sleepycat.je.config;
+
+/**
+ * A JE configuration parameter with an integer value.
+ */
+public class IntConfigParam extends ConfigParam {
+
+    private static final String DEBUG_NAME = IntConfigParam.class.getName();
+
+    private Integer min;
+    private Integer max;
+
+    public IntConfigParam(String configName,
+                          Integer minVal,
+                          Integer maxVal,
+                          Integer defaultValue,
+                          boolean mutable,
+                          boolean forReplication) {
+        // defaultValue must not be null
+        super(configName, defaultValue.toString(), mutable, forReplication);
+        min = minVal;
+        max = maxVal;
+    }
+
+    /*
+     * Self validate. Check mins and maxs
+     */
+    private void validate(Integer value)
+	throws IllegalArgumentException {
+
+        if (value != null) {
+            if (min != null) {
+                if (value.compareTo(min) < 0) {
+                    throw new IllegalArgumentException
+			(DEBUG_NAME + ":" +
+			 " param " + name +
+			 " doesn't validate, " +
+			 value +
+			 " is less than min of "+
+			 min);
+                }
+            }
+            if (max != null) {
+                if (value.compareTo(max) > 0) {
+                    throw new IllegalArgumentException
+			(DEBUG_NAME + ":" +
+			 " param " + name +
+			 " doesn't validate, " +
+			 value +
+			 " is greater than max of " +
+			 max);
+                }
+            }
+        }
+    }
+
+    @Override
+    public void validateValue(String value)
+        throws IllegalArgumentException {
+
+        try {
+            validate(new Integer(value));
+        } catch (NumberFormatException e) {
+            throw new IllegalArgumentException
+		(DEBUG_NAME + ": " +  value + " not valid value for " + name);
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/config/LongConfigParam.java b/src/com/sleepycat/je/config/LongConfigParam.java
new file mode 100644
index 0000000000000000000000000000000000000000..b97e369d66b1a263cfc5697956a66fbf06a12020
--- /dev/null
+++ b/src/com/sleepycat/je/config/LongConfigParam.java
@@ -0,0 +1,77 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LongConfigParam.java,v 1.29.2.2 2010/01/04 15:30:28 cwl Exp $
+ */
+
+package com.sleepycat.je.config;
+
+/**
+ * A JE configuration parameter with an integer value.
+ */
+public class LongConfigParam extends ConfigParam {
+
+    private static final String DEBUG_NAME = LongConfigParam.class.getName();
+
+    private Long min;
+    private Long max;
+
+    LongConfigParam(String configName,
+                    Long minVal,
+                    Long maxVal,
+                    Long defaultValue,
+                    boolean mutable,
+                    boolean forReplication) {
+
+        // defaultValue must not be null
+        super(configName, defaultValue.toString(), mutable, forReplication);
+        min = minVal;
+        max = maxVal;
+    }
+
+    /*
+     * Self validate. Check mins and maxs
+     */
+    private void validate(Long value)
+	throws IllegalArgumentException {
+
+        if (value != null) {
+            if (min != null) {
+                if (value.compareTo(min) < 0) {
+                    throw new IllegalArgumentException
+			(DEBUG_NAME + ":" +
+			 " param " + name +
+			 " doesn't validate, " +
+			 value +
+			 " is less than min of "
+			 + min);
+                }
+            }
+            if (max != null) {
+                if (value.compareTo(max) > 0) {
+                    throw new IllegalArgumentException
+			(DEBUG_NAME + ":" +
+			 " param " + name +
+			 " doesn't validate, " +
+			 value +
+			 " is greater than max "+
+			 " of " +  max);
+                }
+            }
+        }
+    }
+
+    @Override
+    public void validateValue(String value)
+        throws IllegalArgumentException {
+
+        try {
+            validate(new Long(value));
+        } catch (NumberFormatException e) {
+            throw new IllegalArgumentException
+		(DEBUG_NAME + ": " +  value + " not valid value for " + name);
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/config/ShortConfigParam.java b/src/com/sleepycat/je/config/ShortConfigParam.java
new file mode 100644
index 0000000000000000000000000000000000000000..ca55cfc3a1bea42a2a05294740725ce50d191ce7
--- /dev/null
+++ b/src/com/sleepycat/je/config/ShortConfigParam.java
@@ -0,0 +1,81 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2006 Oracle.  All rights reserved.
+ *
+ * $Id: ShortConfigParam.java,v 1.26 2008/06/10 02:52:09 cwl Exp $
+ */
+
+package com.sleepycat.je.config;
+
+/**
+ * A JE configuration parameter with an short value.
+ */
+public class ShortConfigParam extends ConfigParam {
+    
+    private static final String DEBUG_NAME =
+        ShortConfigParam.class.getName();
+
+    private Short min;
+    private Short max;
+
+    public ShortConfigParam(String configName,
+                     Short minVal,
+                     Short maxVal,
+                     Short defaultValue,
+                     boolean mutable,
+                     boolean forReplication) {
+        // defaultValue must not be null
+        super(configName, defaultValue.toString(), mutable, forReplication);
+
+        min = minVal;
+        max = maxVal;
+    }
+
+    /*
+     * Self validate. Check mins and maxs.
+     */
+    private void validate(Short value)
+	throws IllegalArgumentException {
+
+        if (value != null) {
+            if (min != null) {
+                if (value.compareTo(min) < 0) {
+                    throw new IllegalArgumentException
+			(DEBUG_NAME + ":" +
+			 " param " + name +
+			 " doesn't validate, " + value +
+			 " is less than min of " + min);
+                }
+            }
+            if (max != null) {
+                if (value.compareTo(max) > 0) {
+                    throw new IllegalArgumentException
+			(DEBUG_NAME + ":" +
+			 " param " + name +
+			 " doesn't validate, " + value +
+			 " is greater than max of " +
+			 max);
+                }
+            }
+        }
+    }
+
+    @Override
+    public void validateValue(String value)
+        throws IllegalArgumentException {
+
+        try {
+            validate(new Short(value));
+        }
+        catch (NumberFormatException e) {
+            throw new IllegalArgumentException
+		(DEBUG_NAME + ": " +  value +
+		 " not valid value for " + name);
+        }
+    }
+
+    public Short getMin() {
+        return min;
+    }
+}
diff --git a/src/com/sleepycat/je/config/package.html b/src/com/sleepycat/je/config/package.html
new file mode 100644
index 0000000000000000000000000000000000000000..837af0c5e1d15a444bc3389f51b5cc6e6c53fbbc
--- /dev/null
+++ b/src/com/sleepycat/je/config/package.html
@@ -0,0 +1,30 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
+<html>
+<head>
+<!--
+
+ See the file LICENSE for redistribution information.
+
+ Copyright (c) 2002,2010 Oracle.  All rights reserved.
+
+ $Id: package.html,v 1.11.2.2 2010/01/04 15:30:28 cwl Exp $
+
+-->
+</head>
+<body bgcolor="white">
+Environment configuration parameter support.
+<h2>Package Specification</h2>
+Attributes of Berkeley Db, Java Edition may be set programmatically or
+through a properties file in the environment home directory. In
+general, applications that want to set environment attributes 
+will do so through the setter methods provided in configuration classes such
+as com.sleepycat.je.EnvironmentConfig.
+<p>
+Occasionally the application will choose to set a less frequently used
+attribute through EnvironmentConfig.setConfigParam(). The classes in
+this package are used with that method.
+<p>
+<!-- Put @see and @since tags down here. -->
+
+</body>
+</html>
diff --git a/src/com/sleepycat/je/dbi/CursorImpl.java b/src/com/sleepycat/je/dbi/CursorImpl.java
new file mode 100644
index 0000000000000000000000000000000000000000..1b72c4716eea6f780bd26ab4199ab803bbd05e8a
--- /dev/null
+++ b/src/com/sleepycat/je/dbi/CursorImpl.java
@@ -0,0 +1,2989 @@
+/*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: CursorImpl.java,v 1.348.2.5 2010/03/26 13:23:55 mark Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.LockNotGrantedException;
+import com.sleepycat.je.LockStats;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.RunRecoveryException;
+import com.sleepycat.je.latch.LatchNotHeldException;
+import com.sleepycat.je.latch.LatchSupport;
+import com.sleepycat.je.log.LogUtils;
+import com.sleepycat.je.log.ReplicationContext;
+import com.sleepycat.je.log.entry.LNLogEntry;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.tree.BINBoundary;
+import com.sleepycat.je.tree.DBIN;
+import com.sleepycat.je.tree.DIN;
+import com.sleepycat.je.tree.DupCountLN;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.tree.Key;
+import com.sleepycat.je.tree.LN;
+import com.sleepycat.je.tree.Node;
+import com.sleepycat.je.tree.Tree;
+import com.sleepycat.je.tree.TreeWalkerStatsAccumulator;
+import com.sleepycat.je.txn.BasicLocker;
+import com.sleepycat.je.txn.LockGrantType;
+import com.sleepycat.je.txn.LockResult;
+import com.sleepycat.je.txn.LockType;
+import com.sleepycat.je.txn.Locker;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.TestHook;
+import com.sleepycat.je.utilint.TestHookExecute;
+
+/**
+ * A CursorImpl is the internal implementation of the cursor.
+ */
+public class CursorImpl implements Cloneable {
+
+    private static final boolean DEBUG = false;
+
+    private static final byte CURSOR_NOT_INITIALIZED = 1;
+    private static final byte CURSOR_INITIALIZED = 2;
+    private static final byte CURSOR_CLOSED = 3;
+    private static final String TRACE_DELETE = "Delete";
+    private static final String TRACE_MOD = "Mod:";
+
+    /*
+     * Cursor location in the databaseImpl, represented by a BIN and an index
+     * in the BIN.  bin/index must have a non-null/non-negative value if dupBin
+     * is set to non-null.
+     */
+    volatile private BIN bin;
+    volatile private int index;
+
+    /*
+     * Cursor location in a given duplicate set.  If the cursor is not
+     * referencing a duplicate set then these are null.
+     */
+    volatile private DBIN dupBin;
+    volatile private int dupIndex;
+
+    /*
+     * BIN and DBIN that are no longer referenced by this cursor but have not
+     * yet been removed.  If non-null, the BIN/DBIN will be removed soon.
+     * BIN.adjustCursors should ignore cursors that are to be removed.
+     */
+    volatile private BIN binToBeRemoved;
+    volatile private DBIN dupBinToBeRemoved;
+
+    /*
+     * The cursor location used for a given operation.
+     */
+    private BIN targetBin;
+    private int targetIndex;
+    private byte[] dupKey;
+
+    /* The databaseImpl behind the handle. */
+    private DatabaseImpl databaseImpl;
+
+    /* Owning transaction. */
+    private Locker locker;
+    private CursorImpl lockerPrev; // lockPrev, lockNext used for simple Locker
+    private CursorImpl lockerNext; // chain.
+    private boolean retainNonTxnLocks;
+
+    /* State of the cursor. See CURSOR_XXX above. */
+    private byte status;
+
+    private CacheMode cacheMode;
+    private boolean allowEviction;
+    private TestHook testHook;
+
+    private boolean nonCloning = false;
+
+    /*
+     * Unique id that we can return as a hashCode to prevent calls to
+     * Object.hashCode(). [#13896]
+     */
+    private int thisId;
+
+    /*
+     * Allocate hashCode ids from this. [#13896]
+     */
+    private static long lastAllocatedId = 0;
+
+    private ThreadLocal<TreeWalkerStatsAccumulator> treeStatsAccumulatorTL;
+
+    /*
+     * Allocate a new hashCode id.  Doesn't need to be synchronized since it's
+     * ok for two objects to have the same hashcode.
+     */
+    private static long getNextCursorId() {
+        return ++lastAllocatedId;
+    }
+
+    public int hashCode() {
+        return thisId;
+    }
+
+    private void maybeInitTreeStatsAccumulator() {
+        if (treeStatsAccumulatorTL == null) {
+            treeStatsAccumulatorTL =
+                new ThreadLocal<TreeWalkerStatsAccumulator>();
+        }
+    }
+
+    private TreeWalkerStatsAccumulator getTreeStatsAccumulator() {
+        if (EnvironmentImpl.getThreadLocalReferenceCount() > 0) {
+            maybeInitTreeStatsAccumulator();
+            return treeStatsAccumulatorTL.get();
+        } else {
+            return null;
+        }
+    }
+
+    public void incrementLNCount() {
+        TreeWalkerStatsAccumulator treeStatsAccumulator =
+            getTreeStatsAccumulator();
+        if (treeStatsAccumulator != null) {
+            treeStatsAccumulator.incrementLNCount();
+        }
+    }
+
+    /**
+     * Prevents this cursor from being cloned for any reason.  [#13879]
+     *
+     * NonCloning is an optimization used for Database.get/put operations.
+     * Normally cloning is used before an operation to allow use of the old
+     * cursor position after the operation fails.  With the Database
+     * operations, if an operation fails the cursor is simply discarded.
+     *
+     * Note that Cursor.dup may not be called when NonCloning is set to true.
+     */
+    public void setNonCloning(boolean nonCloning) {
+        this.nonCloning = nonCloning;
+    }
+
+    /**
+     * public for Cursor et al
+     */
+    public static class SearchMode {
+        public static final SearchMode SET =
+            new SearchMode(true, false, "SET");
+        public static final SearchMode BOTH =
+            new SearchMode(true, true, "BOTH");
+        public static final SearchMode SET_RANGE =
+            new SearchMode(false, false, "SET_RANGE");
+        public static final SearchMode BOTH_RANGE =
+            new SearchMode(false, true, "BOTH_RANGE");
+
+        private boolean exactSearch;
+        private boolean dataSearch;
+        private String name;
+
+        private SearchMode(boolean exactSearch,
+                           boolean dataSearch,
+                           String name) {
+            this.exactSearch = exactSearch;
+            this.dataSearch = dataSearch;
+            this.name = "SearchMode." + name;
+        }
+
+        /**
+         * Returns true when the key or key/data search is exact, i.e., for SET
+         * and BOTH.
+         */
+        public final boolean isExactSearch() {
+            return exactSearch;
+        }
+
+        /**
+         * Returns true when the data value is included in the search, i.e.,
+         * for BOTH and BOTH_RANGE.
+         */
+        public final boolean isDataSearch() {
+            return dataSearch;
+        }
+
+        public String toString() {
+            return name;
+        }
+    }
+
+    /**
+     * Holder for an OperationStatus and a keyChange flag.  Is used for search
+     * and getNextWithKeyChangeStatus operations.
+     */
+    public static class KeyChangeStatus {
+
+        /**
+         * Operation status;
+         */
+        public OperationStatus status;
+
+        /**
+         * Whether the operation moved to a new key.
+         */
+        public boolean keyChange;
+
+        public KeyChangeStatus(OperationStatus status, boolean keyChange) {
+            this.status = status;
+            this.keyChange = keyChange;
+        }
+    }
+
+    /**
+     * Creates a cursor with retainNonTxnLocks=true.
+     */
+    public CursorImpl(DatabaseImpl database, Locker locker)
+        throws DatabaseException {
+
+        this(database, locker, true);
+    }
+
+    /**
+     * Creates a cursor.
+     *
+     * A cursor always retains transactional locks when it is reset or closed.
+     * Non-transaction locks may be retained or not, depending on the
+     * retainNonTxnLocks parameter value.
+     *
+     * Normally a user-created non-transactional Cursor releases locks on reset
+     * and close, and a ThreadLocker is normally used.  However, by passing
+     * true for retainNonTxnLocks a ThreadLocker can be made to retain locks;
+     * this capability is used by SecondaryCursor.readPrimaryAfterGet.
+     *
+     * For internal (non-user) cursors, a BasicLocker is often used and locks
+     * are retained.  BasicLocker does not currently support releasing locks
+     * per cursor operation, so true must be passed for retainNonTxnLocks. In
+     * addition, in these internal use cases the caller explicitly calls
+     * BasicLocker.operationEnd, and retainNonTxnLocks is set to true to
+     * prevent operationEnd from being called when the cursor is closed.
+     *
+     * BasicLocker is also used for NameLN operations while opening a Database
+     * handle.  Database handle locks must be retained, even if the Database is
+     * opened non-transactionally.
+     *
+     * @param retainNonTxnLocks is true if non-transactional locks should be
+     * retained (not released automatically) when the cursor is reset or
+     * closed.
+     */
+    public CursorImpl(DatabaseImpl databaseImpl,
+                      Locker locker,
+                      boolean retainNonTxnLocks)
+        throws DatabaseException {
+
+        thisId = (int) getNextCursorId();
+        bin = null;
+        index = -1;
+        dupBin = null;
+        dupIndex = -1;
+
+        this.retainNonTxnLocks = retainNonTxnLocks;
+
+        /* Associate this cursor with the databaseImpl. */
+        this.databaseImpl = databaseImpl;
+        this.locker = locker;
+        this.locker.registerCursor(this);
+
+        this.cacheMode = CacheMode.DEFAULT;
+
+        status = CURSOR_NOT_INITIALIZED;
+
+        /*
+         * Do not perform eviction here because we may be synchronized on the
+         * Database instance. For example, this happens when we call
+         * Database.openCursor().  Also eviction may be disabled after the
+         * cursor is constructed.
+         */
+    }
+
+    /**
+     * Disables or enables eviction during cursor operations.  For example, a
+     * cursor used to implement eviction (e.g., in some UtilizationProfile and
+     * most DbTree methods) should not itself perform eviction, but eviction
+     * should be enabled for user cursors.  Eviction is disabled by default.
+     */
+    public void setAllowEviction(boolean allowed) {
+        allowEviction = allowed;
+    }
+
+    /**
+     * Shallow copy.  addCursor() is optionally called.
+     */
+    public CursorImpl cloneCursor(boolean addCursor)
+        throws DatabaseException {
+
+        return cloneCursor(addCursor, null);
+    }
+
+    /**
+     * Performs a shallow copy.
+     *
+     * @param addCursor If true, addCursor() is called to register the new
+     * cursor with the BINs.  This is done after the usePosition parameter is
+     * applied, if any.  There are two cases where you may not want addCursor()
+     * to be called: 1) When creating a fresh uninitialzed cursor as in when
+     * Cursor.dup(false) is called, or 2) when the caller will call addCursor()
+     * as part of a larger operation.
+     *
+     * @param usePosition Is null to duplicate the position of this cursor, or
+     * non-null to duplicate the position of the given cursor.
+     */
+    public CursorImpl cloneCursor(boolean addCursor, CursorImpl usePosition)
+        throws DatabaseException {
+
+        CursorImpl ret = null;
+        if (nonCloning) {
+            ret = this;
+        } else {
+            try {
+                latchBINs();
+                ret = (CursorImpl) super.clone();
+
+                if (!retainNonTxnLocks) {
+                    ret.locker = locker.newNonTxnLocker();
+                }
+
+                ret.locker.registerCursor(ret);
+                if (usePosition != null &&
+                    usePosition.status == CURSOR_INITIALIZED) {
+                    ret.bin = usePosition.bin;
+                    ret.index = usePosition.index;
+                    ret.dupBin = usePosition.dupBin;
+                    ret.dupIndex = usePosition.dupIndex;
+                }
+                if (addCursor) {
+                    ret.addCursor();
+                }
+            } catch (CloneNotSupportedException cannotOccur) {
+                return null;
+            } finally {
+                releaseBINs();
+            }
+        }
+
+        /* Perform eviction before and after each cursor operation. */
+        if (allowEviction) {
+            databaseImpl.getDbEnvironment().getEvictor().doCriticalEviction
+                (false); // backgroundIO
+        }
+        return ret;
+    }
+
+    public int getIndex() {
+        return index;
+    }
+
+    public void setIndex(int idx) {
+        index = idx;
+    }
+
+    public BIN getBIN() {
+        return bin;
+    }
+
+    public void setBIN(BIN newBin) {
+        bin = newBin;
+    }
+
+    public BIN getBINToBeRemoved() {
+        return binToBeRemoved;
+    }
+
+    public int getDupIndex() {
+        return dupIndex;
+    }
+
+    public void setDupIndex(int dupIdx) {
+        dupIndex = dupIdx;
+    }
+
+    public DBIN getDupBIN() {
+        return dupBin;
+    }
+
+    public void setDupBIN(DBIN newDupBin) {
+        dupBin = newDupBin;
+    }
+
+    public DBIN getDupBINToBeRemoved() {
+        return dupBinToBeRemoved;
+    }
+
+    public CacheMode getCacheMode() {
+        return cacheMode;
+    }
+
+    public void setCacheMode(CacheMode cacheMode) {
+        this.cacheMode = cacheMode;
+    }
+
+    public void setTreeStatsAccumulator(TreeWalkerStatsAccumulator tSA) {
+        maybeInitTreeStatsAccumulator();
+        treeStatsAccumulatorTL.set(tSA);
+    }
+
+    /**
+     * Figure out which BIN/index set to use.
+     */
+    private boolean setTargetBin() {
+        targetBin = null;
+        targetIndex = 0;
+        boolean isDup = (dupBin != null);
+        dupKey = null;
+        if (isDup) {
+            targetBin = dupBin;
+            targetIndex = dupIndex;
+            dupKey = dupBin.getDupKey();
+        } else {
+            targetBin = bin;
+            targetIndex = index;
+        }
+        return isDup;
+    }
+
+    /**
+     * Advance a cursor.  Used so that verify can advance a cursor even in the
+     * face of an exception [12932].
+     * @param key on return contains the key if available, or null.
+     * @param data on return contains the data if available, or null.
+     */
+    public boolean advanceCursor(DatabaseEntry key, DatabaseEntry data) {
+
+        BIN oldBin = bin;
+        BIN oldDupBin = dupBin;
+        int oldIndex = index;
+        int oldDupIndex = dupIndex;
+
+        key.setData(null);
+        data.setData(null);
+
+        try {
+            getNext(key, data, LockType.NONE,
+                    true /* forward */,
+                    false /* alreadyLatched */);
+        } catch (DatabaseException ignored) {
+            /* Klockwork - ok */
+        }
+
+        /*
+         * If the position changed, regardless of an exception, then we believe
+         * that we have advanced the cursor.
+         */
+        if (bin != oldBin ||
+            dupBin != oldDupBin ||
+            index != oldIndex ||
+            dupIndex != oldDupIndex) {
+
+            /*
+             * Return the key and data from the BIN entries, if we were not
+             * able to read it above.
+             */
+            if (key.getData() == null &&
+                bin != null &&
+                index > 0) {
+                setDbt(key, bin.getKey(index));
+            }
+            if (data.getData() == null &&
+                dupBin != null &&
+                dupIndex > 0) {
+                setDbt(data, dupBin.getKey(dupIndex));
+            }
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    public BIN latchBIN()
+        throws DatabaseException {
+
+        while (bin != null) {
+            BIN waitingOn = bin;
+            waitingOn.latch(cacheMode);
+            if (bin == waitingOn) {
+                return bin;
+            }
+            waitingOn.releaseLatch();
+        }
+
+        return null;
+    }
+
+    public void releaseBIN()
+        throws LatchNotHeldException {
+
+        if (bin != null) {
+            bin.releaseLatchIfOwner();
+        }
+    }
+
+    public void latchBINs()
+        throws DatabaseException {
+
+        latchBIN();
+        latchDBIN();
+    }
+
+    public void releaseBINs()
+        throws LatchNotHeldException {
+
+        releaseBIN();
+        releaseDBIN();
+    }
+
+    public DBIN latchDBIN()
+        throws DatabaseException {
+
+        while (dupBin != null) {
+            BIN waitingOn = dupBin;
+            waitingOn.latch(cacheMode);
+            if (dupBin == waitingOn) {
+                return dupBin;
+            }
+            waitingOn.releaseLatch();
+        }
+        return null;
+    }
+
+    public void releaseDBIN()
+        throws LatchNotHeldException {
+
+        if (dupBin != null) {
+            dupBin.releaseLatchIfOwner();
+        }
+    }
+
+    public Locker getLocker() {
+        return locker;
+    }
+
+    public void addCursor(BIN bin) {
+        if (bin != null) {
+            assert bin.isLatchOwnerForWrite();
+            bin.addCursor(this);
+        }
+    }
+
+    /**
+     * Add to the current cursor. (For dups)
+     */
+    public void addCursor() {
+        if (dupBin != null) {
+            addCursor(dupBin);
+        }
+        if (bin != null) {
+            addCursor(bin);
+        }
+    }
+
+    /*
+     * Update a cursor to refer to a new BIN or DBin following an insert.
+     * Don't bother removing this cursor from the previous bin.  Cursor will do
+     * that with a cursor swap thereby preventing latch deadlocks down here.
+     */
+    public void updateBin(BIN bin, int index)
+        throws DatabaseException {
+
+        removeCursorDBIN();
+        setDupIndex(-1);
+        setDupBIN(null);
+        setIndex(index);
+        setBIN(bin);
+        addCursor(bin);
+    }
+
+    public void updateDBin(DBIN dupBin, int dupIndex) {
+        setDupIndex(dupIndex);
+        setDupBIN(dupBin);
+        addCursor(dupBin);
+    }
+
+    private void removeCursor()
+        throws DatabaseException {
+
+        removeCursorBIN();
+        removeCursorDBIN();
+    }
+
+    private void removeCursorBIN()
+        throws DatabaseException {
+
+        BIN abin = latchBIN();
+        if (abin != null) {
+            abin.removeCursor(this);
+            abin.releaseLatch();
+        }
+    }
+
+    private void removeCursorDBIN()
+        throws DatabaseException {
+
+        DBIN abin = latchDBIN();
+        if (abin != null) {
+            abin.removeCursor(this);
+            abin.releaseLatch();
+        }
+    }
+
+    /**
+     * Clear the reference to the dup tree, if any.
+     */
+    public void clearDupBIN(boolean alreadyLatched)
+        throws DatabaseException {
+
+        if (dupBin != null) {
+            if (alreadyLatched) {
+                dupBin.removeCursor(this);
+                dupBin.releaseLatch();
+            } else {
+                removeCursorDBIN();
+            }
+            dupBin = null;
+            dupIndex = -1;
+        }
+    }
+
+    public void dumpTree()
+        throws DatabaseException {
+
+        databaseImpl.getTree().dump();
+    }
+
+    /**
+     * @return true if this cursor is closed
+     */
+    public boolean isClosed() {
+        return (status == CURSOR_CLOSED);
+    }
+
+    /**
+     * @return true if this cursor is not initialized
+     */
+    public boolean isNotInitialized() {
+        return (status == CURSOR_NOT_INITIALIZED);
+    }
+
+    /**
+     * Reset a cursor to an uninitialized state, but unlike close(), allow it
+     * to be used further.
+     */
+    public void reset()
+        throws DatabaseException {
+
+        removeCursor();
+
+        if (!retainNonTxnLocks) {
+            locker.releaseNonTxnLocks();
+        }
+
+        bin = null;
+        index = -1;
+        dupBin = null;
+        dupIndex = -1;
+
+        status = CURSOR_NOT_INITIALIZED;
+
+        /* Perform eviction before and after each cursor operation. */
+        if (allowEviction) {
+            databaseImpl.getDbEnvironment().getEvictor().doCriticalEviction
+                (false); // backgroundIO
+        }
+    }
+
+    /**
+     * Close a cursor.
+     *
+     * @throws DatabaseException if the cursor was previously closed.
+     */
+    public void close()
+        throws DatabaseException {
+
+        assert assertCursorState(false) : dumpToString(true);
+
+        if (cacheMode == CacheMode.EVICT_LN) {
+            evict();
+        }
+
+        removeCursor();
+        locker.unRegisterCursor(this);
+
+        if (!retainNonTxnLocks) {
+	    locker.nonTxnOperationEnd();
+        }
+
+        status = CURSOR_CLOSED;
+
+        /* Perform eviction before and after each cursor operation. */
+        if (allowEviction) {
+            databaseImpl.getDbEnvironment().getEvictor().
+		doCriticalEviction(false); // backgroundIO
+        }
+    }
+
+    public int count(LockType lockType)
+        throws DatabaseException {
+
+        assert assertCursorState(true) : dumpToString(true);
+
+        if (!databaseImpl.getSortedDuplicates()) {
+            return 1;
+        }
+
+        if (bin == null) {
+            return 0;
+        }
+
+        latchBIN();
+        try {
+            if (bin.getNEntries() <= index) {
+                return 0;
+            }
+
+            /* If fetchTarget returns null, a deleted LN was cleaned. */
+            Node n = bin.fetchTarget(index);
+            if (n != null && n.containsDuplicates()) {
+                DIN dupRoot = (DIN) n;
+
+                /* Latch couple down the tree. */
+                dupRoot.latch(cacheMode);
+                releaseBIN();
+                DupCountLN dupCountLN = (DupCountLN)
+                    dupRoot.getDupCountLNRef().fetchTarget(databaseImpl,
+                                                           dupRoot);
+
+                /* We can't hold latches when we acquire locks. */
+                dupRoot.releaseLatch();
+
+                /*
+                 * Call lock directly.  There is no need to call lockLN because
+                 * the node ID cannot change (a slot cannot be reused) for a
+                 * DupCountLN.
+                 */
+                if (lockType != LockType.NONE) {
+                    locker.lock
+                        (dupCountLN.getNodeId(), lockType, false /*noWait*/,
+                         databaseImpl);
+                }
+                return dupCountLN.getDupCount();
+            } else {
+                /* If an LN is in the slot, the count is one. */
+                return 1;
+            }
+        } finally {
+            releaseBIN();
+        }
+    }
+
+    /**
+     * Delete the item pointed to by the cursor. If cursor is not initialized
+     * or item is already deleted, return appropriate codes. Returns with
+     * nothing latched.  bin and dupBin are latched as appropriate.
+     *
+     * @return 0 on success, appropriate error code otherwise.
+     */
+    public OperationStatus delete(ReplicationContext repContext)
+        throws DatabaseException {
+
+        assert assertCursorState(true) : dumpToString(true);
+        boolean isDup = setTargetBin();
+
+        /* If nothing at current position, return. */
+        if (targetBin == null) {
+            return OperationStatus.KEYEMPTY;
+        }
+
+        /*
+         * Check if this is already deleted. We may know that the record is
+         * deleted w/out seeing the LN.
+         */
+        if (targetBin.isEntryKnownDeleted(targetIndex)) {
+            releaseBINs();
+            return OperationStatus.KEYEMPTY;
+        }
+
+        /* If fetchTarget returns null, a deleted LN was cleaned. */
+        LN ln = (LN) targetBin.fetchTarget(targetIndex);
+        if (ln == null) {
+            releaseBINs();
+            return OperationStatus.KEYEMPTY;
+        }
+
+        /* Get a write lock. */
+        LockResult lockResult = lockLN(ln, LockType.WRITE);
+        ln = lockResult.getLN();
+
+        /* Check LN deleted status under the protection of a write lock. */
+        if (ln == null) {
+            releaseBINs();
+            return OperationStatus.KEYEMPTY;
+        }
+
+        /* Lock the DupCountLN before logging any LNs. */
+        LockResult dclLockResult = null;
+        DIN dupRoot = null;
+        boolean dupRootIsLatched = false;
+        try {
+            isDup = (dupBin != null);
+            if (isDup) {
+                dupRoot = getLatchedDupRoot(true /*isDBINLatched*/);
+                dclLockResult = lockDupCountLN(dupRoot, LockType.WRITE);
+                /* Don't mark latched until after locked. */
+                dupRootIsLatched = true;
+
+                /*
+                 * Refresh the dupRoot variable because it may have changed
+                 * during locking, but is sure to be resident and latched by
+                 * lockDupCountLN.
+                 */
+                dupRoot = (DIN) bin.getTarget(index);
+                /* Release BIN to increase concurrency. */
+                releaseBIN();
+            }
+
+            /*
+             * Between the release of the BIN latch and acquiring the write
+             * lock any number of operations may have executed which would
+             * result in a new abort LSN for this record. Therefore, wait until
+             * now to get the abort LSN.
+             */
+            setTargetBin();
+            long oldLsn = targetBin.getLsn(targetIndex);
+            byte[] lnKey = targetBin.getKey(targetIndex);
+            lockResult.setAbortLsn
+                (oldLsn, targetBin.isEntryKnownDeleted(targetIndex));
+
+            /* Log the LN. */
+            long oldLNSize = ln.getMemorySizeIncludedByParent();
+            long newLsn = ln.delete(databaseImpl, lnKey,
+                                    dupKey, oldLsn, locker,
+                                    repContext);
+
+            /*
+             * Now update the parent of the LN (be it BIN or DBIN) to correctly
+             * reference the LN and adjust the memory sizing.  Be sure to do
+             * this update of the LSN before updating the dup count LN. In case
+             * we encounter problems there we need the LSN to match the latest
+             * version to ensure that undo works.
+             */
+            targetBin.updateNode
+                (targetIndex, ln, oldLNSize, newLsn, null /*lnSlotKey*/);
+            targetBin.setPendingDeleted(targetIndex);
+            releaseBINs();
+
+            if (isDup) {
+                dupRoot.incrementDuplicateCount
+                    (dclLockResult, dupKey, locker, false /*increment*/);
+                dupRoot.releaseLatch();
+                dupRootIsLatched = false;
+                dupRoot = null;
+
+                locker.addDeleteInfo(dupBin, new Key(lnKey));
+            } else {
+                locker.addDeleteInfo(bin, new Key(lnKey));
+            }
+
+            trace(Level.FINER, TRACE_DELETE, targetBin,
+                  ln, targetIndex, oldLsn, newLsn);
+        } finally {
+            if (dupRoot != null &&
+                dupRootIsLatched) {
+                dupRoot.releaseLatch();
+            }
+        }
+
+        return OperationStatus.SUCCESS;
+    }
+
+    /**
+     * Return a new copy of the cursor.
+     *
+     * @param samePosition If true, position the returned cursor at the same
+     * position as this cursor; if false, return a new uninitialized cursor.
+     */
+    public CursorImpl dup(boolean samePosition)
+        throws DatabaseException {
+
+        assert assertCursorState(false) : dumpToString(true);
+        assert !nonCloning;
+
+        CursorImpl ret = cloneCursor(samePosition /*addCursor*/);
+
+        if (!samePosition) {
+            ret.bin = null;
+            ret.index = -1;
+            ret.dupBin = null;
+            ret.dupIndex = -1;
+
+            ret.status = CURSOR_NOT_INITIALIZED;
+        }
+
+        return ret;
+    }
+
+    /**
+     * Evict the LN node at the cursor position.  This is used for internal
+     * databases only.
+     */
+    public void evict()
+        throws DatabaseException {
+
+        evict(false); // alreadyLatched
+    }
+
+    /**
+     * Evict the LN node at the cursor position.  This is used for internal
+     * databases only.
+     */
+    public void evict(boolean alreadyLatched)
+        throws DatabaseException {
+
+        try {
+            if (!alreadyLatched) {
+                latchBINs();
+            }
+            setTargetBin();
+            if (targetIndex >= 0) {
+                targetBin.evictLN(targetIndex);
+            }
+        } finally {
+            if (!alreadyLatched) {
+                releaseBINs();
+            }
+        }
+    }
+
+    /*
+     * Puts
+     */
+
+    /**
+     * Search for the next key (or duplicate) following the given key (and
+     * datum), and acquire a range insert lock on it.  If there are no more
+     * records following the given key and datum, lock the special EOF node
+     * for the databaseImpl.
+     */
+    public void lockNextKeyForInsert(DatabaseEntry key, DatabaseEntry data)
+        throws DatabaseException {
+
+        DatabaseEntry tempKey = new DatabaseEntry
+            (key.getData(), key.getOffset(), key.getSize());
+        DatabaseEntry tempData = new DatabaseEntry
+            (data.getData(), data.getOffset(), data.getSize());
+        tempKey.setPartial(0, 0, true);
+        tempData.setPartial(0, 0, true);
+        boolean lockedNextKey = false;
+
+        /* Don't search for data if duplicates are not configured. */
+        SearchMode searchMode = databaseImpl.getSortedDuplicates() ?
+            SearchMode.BOTH_RANGE : SearchMode.SET_RANGE;
+        boolean latched = true;
+        try {
+            /* Search. */
+            int searchResult = searchAndPosition
+                (tempKey, tempData, searchMode, LockType.RANGE_INSERT);
+            if ((searchResult & FOUND) != 0 &&
+                (searchResult & FOUND_LAST) == 0) {
+
+                /*
+                 * If searchAndPosition found a record (other than the last
+                 * one), in all cases we should advance to the next record:
+                 *
+                 * 1- found a deleted record,
+                 * 2- found an exact match, or
+                 * 3- found the record prior to the given key/data.
+                 *
+                 * If we didn't match the key, skip over duplicates to the next
+                 * key with getNextNoDup.
+                 */
+                OperationStatus status;
+                if ((searchResult & EXACT_KEY) != 0) {
+                    status = getNext
+                        (tempKey, tempData, LockType.RANGE_INSERT, true, true);
+                } else {
+                    status = getNextNoDup
+                        (tempKey, tempData, LockType.RANGE_INSERT, true, true);
+                }
+                if (status == OperationStatus.SUCCESS) {
+                    lockedNextKey = true;
+                }
+                latched = false;
+            }
+        } finally {
+            if (latched) {
+                releaseBINs();
+            }
+        }
+
+        /* Lock the EOF node if no next key was found. */
+        if (!lockedNextKey) {
+            lockEofNode(LockType.RANGE_INSERT);
+        }
+    }
+
+    /**
+     * Insert the given LN in the tree or return KEYEXIST if the key is already
+     * present.
+     *
+     * <p>This method is called directly internally for putting tree map LNs
+     * and file summary LNs.  It should not be used otherwise, and in the
+     * future we should find a way to remove this special case.</p>
+     */
+    public OperationStatus putLN(byte[] key,
+                                 LN ln,
+                                 boolean allowDuplicates,
+                                 ReplicationContext repContext)
+        throws DatabaseException {
+
+        assert assertCursorState(false) : dumpToString(true);
+
+        assert LatchSupport.countLatchesHeld() == 0;
+        LockResult lockResult = locker.lock
+            (ln.getNodeId(), LockType.WRITE, false /*noWait*/, databaseImpl);
+
+        /*
+         * We'll set abortLsn down in Tree.insert when we know whether we're
+         * re-using a BIN entry or not.
+         */
+        if (databaseImpl.getTree().insert
+            (ln, key, allowDuplicates, this, lockResult, repContext)) {
+            status = CURSOR_INITIALIZED;
+            return OperationStatus.SUCCESS;
+        } else {
+            locker.releaseLock(ln.getNodeId());
+            return OperationStatus.KEYEXIST;
+        }
+    }
+
+    /**
+     * Insert or overwrite the key/data pair.
+     * @param key
+     * @param data
+     * @return 0 if successful, failure status value otherwise
+     */
+    public OperationStatus put(DatabaseEntry key,
+                               DatabaseEntry data,
+                               DatabaseEntry foundData)
+        throws DatabaseException {
+
+        assert assertCursorState(false) : dumpToString(true);
+
+        OperationStatus result = putLN(Key.makeKey(key),
+                                       new LN(data,
+                                              databaseImpl.getDbEnvironment(),
+                                              databaseImpl.isReplicated()),
+                                       databaseImpl.getSortedDuplicates(),
+                                       databaseImpl.getRepContext());
+        if (result == OperationStatus.KEYEXIST) {
+            status = CURSOR_INITIALIZED;
+
+            /*
+             * If dups are allowed and putLN() returns KEYEXIST, the duplicate
+             * already exists.  However, we still need to get a write lock, and
+             * calling putCurrent does that.  Without duplicates, we have to
+             * update the data of course.
+             */
+            result = putCurrent(data,
+                                null,      // foundKey
+                                foundData,
+                                databaseImpl.getRepContext());
+        }
+        return result;
+    }
+
+    /**
+     * Insert the replicated LN. TODO: very similar to put(), refactor for
+     * HA release.
+     */
+    public OperationStatus putReplicatedLN(LNLogEntry lnEntry,
+                                           ReplicationContext repContext)
+        throws DatabaseException {
+
+        assert assertCursorState(false) : dumpToString(true);
+
+        LN ln = lnEntry.getLN();
+        OperationStatus result = putLN(lnEntry.getKey(),
+                                       ln,
+                                       databaseImpl.getSortedDuplicates(),
+                                       repContext);
+        if (result == OperationStatus.KEYEXIST) {
+            status = CURSOR_INITIALIZED;
+
+            /*
+             * If dups are allowed and putLN() returns KEYEXIST, the duplicate
+             * already exists.  However, we still need to get a write lock, and
+             * calling putCurrent does that.  Without duplicates, we have to
+             * update the data of course.
+             */
+            result = putCurrent(new DatabaseEntry(ln.getData()),
+                                null, // foundKey
+                                new DatabaseEntry(), // foundData
+                                repContext);
+        }
+        return result;
+    }
+
+    /**
+     * Delete the replicated LN.
+     */
+    public OperationStatus deleteReplicatedLN(LNLogEntry lnEntry,
+                                              ReplicationContext repContext)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry(lnEntry.getKey());
+        byte[] dupKey = lnEntry.getDupKey();
+        DatabaseEntry data = null;
+        SearchMode searchMode = SearchMode.SET;
+        if (dupKey != null) {
+            data = new DatabaseEntry(dupKey);
+            searchMode = SearchMode.BOTH;
+        }
+
+        /*
+         * Position this cursor at the required record. This should
+         * always be successful.
+         */
+        try {
+            int searchResult = searchAndPosition(key,
+                                                 data,
+                                                 searchMode,
+                                                 LockType.WRITE);
+
+            if ((searchResult & FOUND) != 0) {
+                /*
+                 * SearchAndPosition leaves the target BIN or DBIN
+                 * latched. Usually we unlatch before returning to the user. In
+                 * this case, add the step of latching the BIN in a dup tree
+                 * situation so we can perform a delete.
+                 */
+                if (dupBin != null) {
+                    latchBIN();
+                }
+                return delete(repContext);
+            } else {
+                return OperationStatus.NOTFOUND;
+            }
+        } finally {
+            releaseBINs();
+        }
+    }
+
+    /**
+     * Insert the key/data pair in No Overwrite mode.
+     * @param key
+     * @param data
+     * @return 0 if successful, failure status value otherwise
+     */
+    public OperationStatus putNoOverwrite(DatabaseEntry key,
+                                          DatabaseEntry data)
+        throws DatabaseException {
+
+        assert assertCursorState(false) : dumpToString(true);
+
+        return putLN(Key.makeKey(key),
+                     new LN(data,
+                            databaseImpl.getDbEnvironment(),
+                            databaseImpl.isReplicated()),
+                     false,   // allowDuplicates
+                     databaseImpl.getRepContext());
+    }
+
+    /**
+     * Insert the key/data pair as long as no entry for key/data exists yet.
+     */
+    public OperationStatus putNoDupData(DatabaseEntry key, DatabaseEntry data)
+        throws DatabaseException {
+
+        assert assertCursorState(false) : dumpToString(true);
+
+        if (!databaseImpl.getSortedDuplicates()) {
+            throw new DatabaseException
+                ("putNoDupData() called, but databaseImpl is not configured " +
+                 "for duplicate data.");
+        }
+        return putLN(Key.makeKey(key),
+                     new LN(data,
+                            databaseImpl.getDbEnvironment(),
+                            databaseImpl.isReplicated()),
+                     true,  // allowDuplicates
+                     databaseImpl.getRepContext());
+    }
+
+    /**
+     * Modify the current record with this data.
+     * @param data
+     */
+    public OperationStatus putCurrent(DatabaseEntry data,
+                                      DatabaseEntry foundKey,
+                                      DatabaseEntry foundData,
+                                      ReplicationContext repContext)
+        throws DatabaseException {
+
+        assert assertCursorState(true) : dumpToString(true);
+
+        if (foundKey != null) {
+            foundKey.setData(null);
+        }
+        if (foundData != null) {
+            foundData.setData(null);
+        }
+
+        if (bin == null) {
+            return OperationStatus.KEYEMPTY;
+        }
+
+        latchBINs();
+        boolean isDup = setTargetBin();
+
+        try {
+
+            /*
+             * Find the existing entry and get a reference to all BIN fields
+             * while latched.
+             */
+            LN ln = (LN) targetBin.fetchTarget(targetIndex);
+            byte[] lnKey = targetBin.getKey(targetIndex);
+
+            /* If fetchTarget returned null, a deleted LN was cleaned. */
+            if (targetBin.isEntryKnownDeleted(targetIndex) ||
+                ln == null) {
+                releaseBINs();
+                return OperationStatus.NOTFOUND;
+            }
+
+            /* Get a write lock. */
+            LockResult lockResult = lockLN(ln, LockType.WRITE);
+            ln = lockResult.getLN();
+
+            /* Check LN deleted status under the protection of a write lock. */
+            if (ln == null) {
+                releaseBINs();
+                return OperationStatus.NOTFOUND;
+            }
+
+            /*
+             * If cursor points at a dup, then we can only replace the entry
+             * with a new entry that is "equal" to the old one.  Since a user
+             * defined comparison function may actually compare equal for two
+             * byte sequences that are actually different we still have to do
+             * the replace.  Arguably we could skip the replacement if there is
+             * no user defined comparison function and the new data is the
+             * same.
+             */
+            byte[] foundDataBytes;
+            byte[] foundKeyBytes;
+            isDup = setTargetBin();
+            if (isDup) {
+                foundDataBytes = lnKey;
+                foundKeyBytes = targetBin.getDupKey();
+            } else {
+                foundDataBytes = ln.getData();
+                foundKeyBytes = lnKey;
+            }
+            byte[] newData;
+
+            /* Resolve partial puts. */
+            if (data.getPartial()) {
+                int dlen = data.getPartialLength();
+                int doff = data.getPartialOffset();
+                int origlen = (foundDataBytes != null) ?
+                    foundDataBytes.length : 0;
+                int oldlen = (doff + dlen > origlen) ? doff + dlen : origlen;
+                int len = oldlen - dlen + data.getSize();
+
+                if (len == 0) {
+                    newData = LogUtils.ZERO_LENGTH_BYTE_ARRAY;
+                } else {
+                    newData = new byte[len];
+                }
+                int pos = 0;
+
+                /*
+                 * Keep 0..doff of the old data (truncating if doff > length).
+                 */
+                int slicelen = (doff < origlen) ? doff : origlen;
+                if (slicelen > 0)
+                    System.arraycopy(foundDataBytes, 0, newData,
+                                     pos, slicelen);
+                pos += doff;
+
+                /* Copy in the new data. */
+                slicelen = data.getSize();
+                System.arraycopy(data.getData(), data.getOffset(),
+                                 newData, pos, slicelen);
+                pos += slicelen;
+
+                /* Append the rest of the old data (if any). */
+                slicelen = origlen - (doff + dlen);
+                if (slicelen > 0)
+                    System.arraycopy(foundDataBytes, doff + dlen, newData, pos,
+                                     slicelen);
+            } else {
+                int len = data.getSize();
+                if (len == 0) {
+                    newData = LogUtils.ZERO_LENGTH_BYTE_ARRAY;
+                } else {
+                    newData = new byte[len];
+                }
+                System.arraycopy(data.getData(), data.getOffset(),
+                                 newData, 0, len);
+            }
+
+            if (databaseImpl.getSortedDuplicates()) {
+		/* Check that data compares equal before replacing it. */
+		boolean keysEqual = false;
+		if (foundDataBytes != null) {
+                    keysEqual = Key.compareKeys
+                        (foundDataBytes, newData,
+                         databaseImpl.getDuplicateComparator()) == 0;
+
+		}
+
+                if (!keysEqual) {
+                    revertLock(ln, lockResult);
+                    throw new DatabaseException
+                        ("Can't replace a duplicate with different data.");
+                }
+            }
+
+            if (foundData != null) {
+                setDbt(foundData, foundDataBytes);
+            }
+            if (foundKey != null) {
+                setDbt(foundKey, foundKeyBytes);
+            }
+
+            /*
+             * Between the release of the BIN latch and acquiring the write
+             * lock any number of operations may have executed which would
+             * result in a new abort LSN for this record. Therefore, wait until
+             * now to get the abort LSN.
+             */
+            long oldLsn = targetBin.getLsn(targetIndex);
+            lockResult.setAbortLsn
+                (oldLsn, targetBin.isEntryKnownDeleted(targetIndex));
+
+            /*
+             * The modify has to be inside the latch so that the BIN is updated
+             * inside the latch.
+             */
+            long oldLNSize = ln.getMemorySizeIncludedByParent();
+            byte[] newKey = (isDup ? targetBin.getDupKey() : lnKey);
+            long newLsn = ln.modify(newData, databaseImpl, newKey,
+                                    oldLsn, locker, repContext);
+
+            /*
+             * Update the parent BIN.  Update the data-as-key, if changed, for
+             * a DBIN. [#15704]
+             */
+            targetBin.updateNode
+                (targetIndex, ln, oldLNSize, newLsn, isDup ? newData : null);
+            releaseBINs();
+
+            trace(Level.FINER, TRACE_MOD, targetBin,
+                  ln, targetIndex, oldLsn, newLsn);
+
+            status = CURSOR_INITIALIZED;
+            return OperationStatus.SUCCESS;
+        } finally {
+            releaseBINs();
+        }
+    }
+
+    /*
+     * Gets
+     */
+
+    /**
+     * Retrieve the current record.
+     */
+    public OperationStatus getCurrent(DatabaseEntry foundKey,
+                                      DatabaseEntry foundData,
+                                      LockType lockType)
+        throws DatabaseException {
+
+        assert assertCursorState(true) : dumpToString(true);
+
+        // If not pointing at valid entry, return failure
+        if (bin == null) {
+            return OperationStatus.KEYEMPTY;
+        }
+
+        if (dupBin == null) {
+            latchBIN();
+        } else {
+            latchDBIN();
+        }
+
+        return getCurrentAlreadyLatched(foundKey, foundData, lockType, true);
+    }
+
+    /**
+     * Retrieve the current record. Assume the bin is already latched.  Return
+     * with the target bin unlatched.
+     */
+    public OperationStatus getCurrentAlreadyLatched(DatabaseEntry foundKey,
+                                                    DatabaseEntry foundData,
+                                                    LockType lockType,
+                                                    boolean first)
+        throws DatabaseException {
+
+        assert assertCursorState(true) : dumpToString(true);
+        assert checkAlreadyLatched(true) : dumpToString(true);
+
+        try {
+            return fetchCurrent(foundKey, foundData, lockType, first);
+        } finally {
+            releaseBINs();
+        }
+    }
+
+    /**
+     * Retrieve the current LN, return with the target bin unlatched.
+     */
+    public LN getCurrentLN(LockType lockType)
+        throws DatabaseException {
+
+        assert assertCursorState(true) : dumpToString(true);
+
+        if (bin == null) {
+            return null;
+        } else {
+            latchBIN();
+            return getCurrentLNAlreadyLatched(lockType);
+        }
+    }
+
+    /**
+     * Retrieve the current LN, assuming the BIN is already latched.  Return
+     * with the target BIN unlatched.
+     */
+    public LN getCurrentLNAlreadyLatched(LockType lockType)
+        throws DatabaseException {
+
+        try {
+            assert assertCursorState(true) : dumpToString(true);
+            assert checkAlreadyLatched(true) : dumpToString(true);
+
+            if (bin == null) {
+                return null;
+            }
+
+            /*
+             * Get a reference to the LN under the latch.  Check the deleted
+             * flag in the BIN.  If fetchTarget returns null, a deleted LN was
+             * cleaned.
+             */
+            LN ln = null;
+            if (!bin.isEntryKnownDeleted(index)) {
+                ln = (LN) bin.fetchTarget(index);
+            }
+            if (ln == null) {
+                releaseBIN();
+                return null;
+            }
+
+            addCursor(bin);
+
+            /* Lock LN.  */
+            LockResult lockResult = lockLN(ln, lockType);
+            ln = lockResult.getLN();
+
+            /* Don't set abort LSN for a read operation! */
+            return ln;
+
+        } finally {
+            releaseBINs();
+        }
+    }
+
+    public OperationStatus getNext(DatabaseEntry foundKey,
+                                   DatabaseEntry foundData,
+                                   LockType lockType,
+                                   boolean forward,
+                                   boolean alreadyLatched)
+        throws DatabaseException {
+
+        return getNextWithKeyChangeStatus
+            (foundKey, foundData, lockType, forward, alreadyLatched).status;
+    }
+
+    /**
+     * Move the cursor forward and return the next record. This will cross BIN
+     * boundaries and dip into duplicate sets.
+     *
+     * @param foundKey DatabaseEntry to use for returning key
+     *
+     * @param foundData DatabaseEntry to use for returning data
+     *
+     * @param forward if true, move forward, else move backwards
+     *
+     * @param alreadyLatched if true, the bin that we're on is already
+     * latched.
+     *
+     * @return the status and an indication of whether we advanced to a new
+     * key during the operation.
+     */
+    public KeyChangeStatus
+        getNextWithKeyChangeStatus(DatabaseEntry foundKey,
+                                   DatabaseEntry foundData,
+                                   LockType lockType,
+                                   boolean forward,
+                                   boolean alreadyLatched)
+        throws DatabaseException {
+
+        assert assertCursorState(true) : dumpToString(true);
+        assert checkAlreadyLatched(alreadyLatched) : dumpToString(true);
+
+        KeyChangeStatus result =
+            new KeyChangeStatus(OperationStatus.NOTFOUND, true);
+
+        try {
+            while (bin != null) {
+
+                /* Are we positioned on a DBIN? */
+                if (dupBin != null) {
+                    if (DEBUG) {
+                        verifyCursor(dupBin);
+                    }
+                    if (getNextDuplicate(foundKey, foundData, lockType,
+                                         forward, alreadyLatched) ==
+                        OperationStatus.SUCCESS) {
+                        result.status = OperationStatus.SUCCESS;
+                        /* We returned a duplicate. */
+                        result.keyChange = false;
+                        break;
+                    } else {
+                        removeCursorDBIN();
+                        alreadyLatched = false;
+                        dupBin = null;
+                        dupIndex = -1;
+                        continue;
+                    }
+                }
+
+                assert checkAlreadyLatched(alreadyLatched) :
+                    dumpToString(true);
+                if (!alreadyLatched) {
+                    latchBIN();
+                } else {
+                    alreadyLatched = false;
+                }
+
+                if (DEBUG) {
+                    verifyCursor(bin);
+                }
+
+                /* Is there anything left on this BIN? */
+                if ((forward && ++index < bin.getNEntries()) ||
+                    (!forward && --index > -1)) {
+
+                    OperationStatus ret =
+                        getCurrentAlreadyLatched(foundKey, foundData,
+                                                 lockType, forward);
+                    if (ret == OperationStatus.SUCCESS) {
+                        incrementLNCount();
+                        result.status = OperationStatus.SUCCESS;
+                        break;
+                    } else {
+                        assert LatchSupport.countLatchesHeld() == 0;
+
+                        if (binToBeRemoved != null) {
+                            flushBINToBeRemoved();
+                        }
+
+                        continue;
+                    }
+
+                } else {
+
+                    /*
+                     * PriorBIN is used to release a BIN earlier in the
+                     * traversal chain when we move onto the next BIN. When
+                     * we traverse across BINs, there is a point when two BINs
+                     * point to the same cursor.
+                     *
+                     * Example:  BINa(empty) BINb(empty) BINc(populated)
+                     *           Cursor (C) is traversing
+                     * loop, leaving BINa:
+                     *   priorBIN is null, C points to BINa, BINa points to C
+                     *   set priorBin to BINa
+                     *   find BINb, make BINb point to C
+                     *   note that BINa and BINb point to C.
+                     * loop, leaving BINb:
+                     *   priorBIN == BINa, remove C from BINa
+                     *   set priorBin to BINb
+                     *   find BINc, make BINc point to C
+                     *   note that BINb and BINc point to C
+                     * finally, when leaving this method, remove C from BINb.
+                     */
+                    if (binToBeRemoved != null) {
+                        releaseBIN();
+                        flushBINToBeRemoved();
+                        latchBIN();
+                    }
+                    binToBeRemoved = bin;
+                    bin = null;
+
+                    BIN newBin;
+
+                    /*
+                     * SR #12736
+                     * Prune away oldBin. Assert has intentional side effect
+                     */
+                    assert TestHookExecute.doHookIfSet(testHook);
+
+                    if (forward) {
+                        newBin = databaseImpl.getTree().getNextBin
+                            (binToBeRemoved,
+                             false /*traverseWithinDupTree*/,
+                             cacheMode);
+                    } else {
+                        newBin = databaseImpl.getTree().getPrevBin
+                            (binToBeRemoved,
+                             false /*traverseWithinDupTree*/,
+                             cacheMode);
+                    }
+                    if (newBin == null) {
+                        result.status = OperationStatus.NOTFOUND;
+                        break;
+                    } else {
+                        if (forward) {
+                            index = -1;
+                        } else {
+                            index = newBin.getNEntries();
+                        }
+                        addCursor(newBin);
+                        /* Ensure that setting bin is under newBin's latch */
+                        bin = newBin;
+                        alreadyLatched = true;
+                    }
+                }
+            }
+        } finally {
+            assert LatchSupport.countLatchesHeld() == 0 :
+                LatchSupport.latchesHeldToString();
+            if (binToBeRemoved != null) {
+                flushBINToBeRemoved();
+            }
+        }
+        return result;
+    }
+
+    private void flushBINToBeRemoved()
+        throws DatabaseException {
+
+        binToBeRemoved.latch(cacheMode);
+        binToBeRemoved.removeCursor(this);
+        binToBeRemoved.releaseLatch();
+        binToBeRemoved = null;
+    }
+
+    public OperationStatus getNextNoDup(DatabaseEntry foundKey,
+                                        DatabaseEntry foundData,
+                                        LockType lockType,
+                                        boolean forward,
+                                        boolean alreadyLatched)
+        throws DatabaseException {
+
+        assert assertCursorState(true) : dumpToString(true);
+
+        if (dupBin != null) {
+            clearDupBIN(alreadyLatched);
+            alreadyLatched = false;
+        }
+
+        return getNext(foundKey, foundData, lockType, forward, alreadyLatched);
+    }
+
+    /**
+     * Retrieve the first duplicate at the current cursor position.
+     */
+    public OperationStatus getFirstDuplicate(DatabaseEntry foundKey,
+                                             DatabaseEntry foundData,
+                                             LockType lockType)
+        throws DatabaseException {
+
+        assert assertCursorState(true) : dumpToString(true);
+
+        /*
+         * By clearing the dupBin, the next call to fetchCurrent will move to
+         * the first duplicate.
+         */
+        if (dupBin != null) {
+            removeCursorDBIN();
+            dupBin = null;
+            dupIndex = -1;
+        }
+
+        return getCurrent(foundKey, foundData, lockType);
+    }
+
+    /**
+     * Enter with dupBin unlatched.  Pass foundKey == null to just advance
+     * cursor to next duplicate without fetching data.
+     */
+    public OperationStatus getNextDuplicate(DatabaseEntry foundKey,
+                                            DatabaseEntry foundData,
+                                            LockType lockType,
+                                            boolean forward,
+                                            boolean alreadyLatched)
+        throws DatabaseException {
+
+        assert assertCursorState(true) : dumpToString(true);
+        assert checkAlreadyLatched(alreadyLatched) : dumpToString(true);
+        try {
+            while (dupBin != null) {
+                if (!alreadyLatched) {
+                    latchDBIN();
+                } else {
+                    alreadyLatched = false;
+                }
+
+                if (DEBUG) {
+                    verifyCursor(dupBin);
+                }
+
+                /* Are we still on this DBIN? */
+                if ((forward && ++dupIndex < dupBin.getNEntries()) ||
+                    (!forward && --dupIndex > -1)) {
+
+                    OperationStatus ret = OperationStatus.SUCCESS;
+                    if (foundKey != null) {
+                        ret = getCurrentAlreadyLatched(foundKey, foundData,
+                                                       lockType, forward);
+                    } else {
+                        releaseDBIN();
+                    }
+                    if (ret == OperationStatus.SUCCESS) {
+                        incrementLNCount();
+                        return ret;
+                    } else {
+                        assert LatchSupport.countLatchesHeld() == 0;
+
+                        if (dupBinToBeRemoved != null) {
+                            flushDBINToBeRemoved();
+                        }
+
+                        continue;
+                    }
+
+                } else {
+
+                    /*
+                     * We need to go to the next DBIN.  Remove the cursor and
+                     * be sure to change the dupBin field after removing the
+                     * cursor.
+                     */
+                    if (dupBinToBeRemoved != null) {
+                        flushDBINToBeRemoved();
+                    }
+                    dupBinToBeRemoved = dupBin;
+
+                    dupBin = null;
+                    dupBinToBeRemoved.releaseLatch();
+
+                    TreeWalkerStatsAccumulator treeStatsAccumulator =
+                        getTreeStatsAccumulator();
+                    if (treeStatsAccumulator != null) {
+                        latchBIN();
+                        try {
+                            if (index < 0) {
+                                /* This duplicate tree has been deleted. */
+                                return OperationStatus.NOTFOUND;
+                            }
+
+                            DIN duplicateRoot = (DIN) bin.fetchTarget(index);
+                            duplicateRoot.latch(cacheMode);
+                            try {
+                                DupCountLN dcl = duplicateRoot.getDupCountLN();
+                                if (dcl != null) {
+                                    dcl.accumulateStats(treeStatsAccumulator);
+                                }
+                            } finally {
+                                duplicateRoot.releaseLatch();
+                            }
+                        } finally {
+                            releaseBIN();
+                        }
+                    }
+                    assert (LatchSupport.countLatchesHeld() == 0);
+
+                    dupBinToBeRemoved.latch(cacheMode);
+                    DBIN newDupBin;
+
+                    if (forward) {
+                        newDupBin = (DBIN) databaseImpl.getTree().getNextBin
+                            (dupBinToBeRemoved,
+                             true /*traverseWithinDupTree*/,
+                             cacheMode);
+                    } else {
+                        newDupBin = (DBIN) databaseImpl.getTree().getPrevBin
+                            (dupBinToBeRemoved,
+                             true /*traverseWithinDupTree*/,
+                             cacheMode);
+                    }
+
+                    if (newDupBin == null) {
+                        return OperationStatus.NOTFOUND;
+                    } else {
+                        if (forward) {
+                            dupIndex = -1;
+                        } else {
+                            dupIndex = newDupBin.getNEntries();
+                        }
+                        addCursor(newDupBin);
+
+                        /*
+                         * Ensure that setting dupBin is under newDupBin's
+                         * latch.
+                         */
+                        dupBin = newDupBin;
+                        alreadyLatched = true;
+                    }
+                }
+            }
+        } finally {
+            assert LatchSupport.countLatchesHeld() == 0;
+            if (dupBinToBeRemoved != null) {
+                flushDBINToBeRemoved();
+            }
+        }
+
+        return OperationStatus.NOTFOUND;
+    }
+
+    private void flushDBINToBeRemoved()
+        throws DatabaseException {
+
+        dupBinToBeRemoved.latch(cacheMode);
+        dupBinToBeRemoved.removeCursor(this);
+        dupBinToBeRemoved.releaseLatch();
+        dupBinToBeRemoved = null;
+    }
+
+    /**
+     * Position the cursor at the first or last record of the databaseImpl.
+     * It's okay if this record is deleted. Returns with the target BIN
+     * latched.
+     *
+     * @return true if a first or last position is found, false if the
+     * tree being searched is empty.
+     */
+    public boolean positionFirstOrLast(boolean first, DIN duplicateRoot)
+        throws DatabaseException {
+
+        assert assertCursorState(false) : dumpToString(true);
+
+        IN in = null;
+        boolean found = false;
+        try {
+            if (duplicateRoot == null) {
+                removeCursorBIN();
+                if (first) {
+                    in = databaseImpl.getTree().getFirstNode(cacheMode);
+                } else {
+                    in = databaseImpl.getTree().getLastNode(cacheMode);
+                }
+
+                if (in != null) {
+
+                    assert (in instanceof BIN);
+
+                    dupBin = null;
+                    dupIndex = -1;
+                    bin = (BIN) in;
+                    index = (first ? 0 : (bin.getNEntries() - 1));
+                    addCursor(bin);
+
+                    TreeWalkerStatsAccumulator treeStatsAccumulator =
+                        getTreeStatsAccumulator();
+
+                    if (bin.getNEntries() == 0) {
+
+                        /*
+                         * An IN was found. Even if it's empty, let Cursor
+                         * handle moving to the first non-deleted entry.
+                         */
+                        found = true;
+                    } else {
+
+                        /*
+                         * See if we need to descend further.  If fetchTarget
+                         * returns null, a deleted LN was cleaned.
+                         */
+                        Node n = null;
+                        if (!in.isEntryKnownDeleted(index)) {
+                            n = in.fetchTarget(index);
+                        }
+
+                        if (n != null && n.containsDuplicates()) {
+                            DIN dupRoot = (DIN) n;
+                            dupRoot.latch(cacheMode);
+                            in.releaseLatch();
+                            in = null;
+                            found = positionFirstOrLast(first, dupRoot);
+                        } else {
+
+                            /*
+                             * Even if the entry is deleted, just leave our
+                             * position here and return.
+                             */
+                            if (treeStatsAccumulator != null) {
+                                if (n == null || ((LN) n).isDeleted()) {
+                                    treeStatsAccumulator.
+                                        incrementDeletedLNCount();
+                                } else {
+                                    treeStatsAccumulator.
+                                        incrementLNCount();
+                                }
+                            }
+                            found = true;
+                        }
+                    }
+                }
+            } else {
+                removeCursorDBIN();
+                if (first) {
+                    in = databaseImpl.getTree().
+                        getFirstNode(duplicateRoot, cacheMode);
+                } else {
+                    in = databaseImpl.getTree().
+                        getLastNode(duplicateRoot, cacheMode);
+                }
+
+                if (in != null) {
+
+                    /*
+                     * An IN was found. Even if it's empty, let Cursor handle
+                     * moving to the first non-deleted entry.
+                     */
+                    /*
+                     * assert (in instanceof DBIN);
+                     * Will always be true since Tree.getFirst/LastNode always
+                     * returns a DBIN.
+                     */
+
+                    dupBin = (DBIN) in;
+                    dupIndex = (first ? 0 : (dupBin.getNEntries() - 1));
+                    addCursor(dupBin);
+                    found = true;
+                }
+            }
+            status = CURSOR_INITIALIZED;
+            return found;
+        } catch (DatabaseException e) {
+            /* Release latch on error. */
+            if (in != null) {
+                in.releaseLatch();
+            }
+            throw e;
+        }
+    }
+
+    public static final int FOUND = 0x1;
+    /* Exact match on the key portion. */
+    public static final int EXACT_KEY = 0x2;
+    /* Exact match on the DATA portion when searchAndPositionBoth used. */
+    public static final int EXACT_DATA = 0x4;
+    /* Record found is the last one in the databaseImpl. */
+    public static final int FOUND_LAST = 0x8;
+
+    /**
+     * Position the cursor at the key. This returns a three part value that's
+     * bitwise or'ed into the int. We find out if there was any kind of match
+     * and if the match was exact. Note that this match focuses on whether the
+     * searching criteria (key, or key and data, depending on the search type)
+     * is met.
+     *
+     * <p>Note this returns with the BIN latched!</p>
+     *
+     * <p>If this method returns without the FOUND bit set, the caller can
+     * assume that no match is possible.  Otherwise, if the FOUND bit is set,
+     * the caller should check the EXACT_KEY and EXACT_DATA bits.  If EXACT_KEY
+     * is not set (or for BOTH and BOTH_RANGE, if EXACT_DATA is not set), an
+     * approximate match was found.  In an approximate match, the cursor is
+     * always positioned before the target key/data.  This allows the caller to
+     * perform a 'next' operation to advance to the value that is equal or
+     * higher than the target key/data.</p>
+     *
+     * <p>Even if the search returns an exact result, the record may be
+     * deleted.  The caller must therefore check for both an approximate match
+     * and for whether the cursor is positioned on a deleted record.</p>
+     *
+     * <p>If SET or BOTH is specified, the FOUND bit will only be returned if
+     * an exact match is found.  However, the record found may be deleted.</p>
+     *
+     * <p>There is one special case where this method may be called without
+     * checking the EXACT_KEY (and EXACT_DATA) bits and without checking for a
+     * deleted record:  If SearchMode.SET is specified then only the FOUND bit
+     * need be checked.  When SET is specified and FOUND is returned, it is
+     * guaranteed to be an exact match on a non-deleted record.  It is for this
+     * case only that this method is public.</p>
+     *
+     * <p>If FOUND is set, FOUND_LAST may also be set if the cursor is
+     * positioned on the last record in the databaseImpl.  Note that this state
+     * can only be counted on as long as the BIN is latched, so it is not set
+     * if this method must release the latch to lock the record.  Therefore, it
+     * should only be used for optimizations.  If FOUND_LAST is set, the cursor
+     * is positioned on the last record and the BIN is latched.  If FOUND_LAST
+     * is not set, the cursor may or may not be positioned on the last record.
+     * Note that exact searches always perform an unlatch and a lock, so
+     * FOUND_LAST will only be set for inexact (range) searches.</p>
+     *
+     * <p>Be aware that when an approximate match is returned, the index or
+     * dupIndex may be set to -1.  This is done intentionally so that a 'next'
+     * operation will increment it.</p>
+     */
+    public int searchAndPosition(DatabaseEntry matchKey,
+                                 DatabaseEntry matchData,
+                                 SearchMode searchMode,
+                                 LockType lockType)
+        throws DatabaseException {
+
+        assert assertCursorState(false) : dumpToString(true);
+
+        removeCursor();
+
+        /* Reset the cursor. */
+        bin = null;
+        dupBin = null;
+        dupIndex = -1;
+
+        boolean foundSomething = false;
+        boolean foundExactKey = false;
+        boolean foundExactData = false;
+        boolean foundLast = false;
+        boolean exactSearch = searchMode.isExactSearch();
+        BINBoundary binBoundary = new BINBoundary();
+
+        try {
+            byte[] key = Key.makeKey(matchKey);
+            bin = (BIN) databaseImpl.getTree().search
+                (key, Tree.SearchType.NORMAL, -1, binBoundary, cacheMode);
+
+            if (bin != null) {
+                addCursor(bin);
+
+                /*
+                 * If we're doing an exact search, tell bin.findEntry we
+                 * require an exact match. If it's a range search, we don't
+                 * need that exact match.
+                 */
+                index = bin.findEntry(key, true, exactSearch);
+
+                /*
+                 * If we're doing an exact search, as a starting point, we'll
+                 * assume that we haven't found anything. If this is a range
+                 * search, we'll assume the opposite, that we have found a
+                 * record. That's because for a range search, the higher level
+                 * will take care of sorting out whether anything is really
+                 * there or not.
+                 */
+                foundSomething = !exactSearch;
+                boolean containsDuplicates = false;
+
+                if (index >= 0) {
+                    if ((index & IN.EXACT_MATCH) != 0) {
+
+                        /*
+                         * The binary search told us we had an exact match.
+                         * Note that this really only tells us that the key
+                         * matched. The underlying LN may be deleted or the
+                         * reference may be knownDeleted, or maybe there's a
+                         * dup tree w/no entries, but the next layer up will
+                         * find these cases.
+                         */
+                        foundExactKey = true;
+
+                        /*
+                         * Now turn off the exact match bit so the index will
+                         * be a valid value, before we use it to retrieve the
+                         * child reference from the bin.
+                         */
+                        index &= ~IN.EXACT_MATCH;
+                    }
+
+                    /*
+                     * If fetchTarget returns null, a deleted LN was cleaned.
+                     */
+                    Node n = null;
+                    if (!bin.isEntryKnownDeleted(index)) {
+                        n = bin.fetchTarget(index);
+                    }
+                    if (n != null) {
+                        containsDuplicates = n.containsDuplicates();
+                        if (searchMode.isDataSearch()) {
+                            if (foundExactKey) {
+                                /* If the key matches, try the data. */
+                                int searchResult = searchAndPositionBoth
+                                    (containsDuplicates, n, matchData,
+                                     exactSearch, lockType);
+                                foundSomething =
+                                    (searchResult & FOUND) != 0;
+                                foundExactData =
+                                    (searchResult & EXACT_DATA) != 0;
+                            }
+                        } else {
+                            foundSomething = true;
+                            if (!containsDuplicates && exactSearch) {
+                                /* Lock LN, check if deleted. */
+                                LN ln = (LN) n;
+                                LockResult lockResult = lockLN(ln, lockType);
+                                ln = lockResult.getLN();
+
+                                if (ln == null) {
+                                    foundSomething = false;
+                                }
+
+                                /*
+                                 * Note that we must not set the abort LSN for
+                                 * a read operation, lest false obsoletes are
+                                 * set. [13158]
+                                 */
+                            }
+                        }
+                    }
+
+                    /*
+                     * Determine whether the last record was found.  This is
+                     * only possible when we don't lock the record, and when
+                     * there are no duplicates.
+                     */
+                    foundLast = (searchMode == SearchMode.SET_RANGE &&
+                                 foundSomething &&
+                                 !containsDuplicates &&
+                                 binBoundary.isLastBin &&
+                                 index == bin.getNEntries() - 1);
+                }
+            }
+            status = CURSOR_INITIALIZED;
+
+            /* Return a multi-part status value */
+            return (foundSomething ? FOUND : 0) |
+                (foundExactKey ? EXACT_KEY : 0) |
+                (foundExactData ? EXACT_DATA : 0) |
+                (foundLast ? FOUND_LAST : 0);
+        } catch (DatabaseException e) {
+            /* Release latch on error. */
+            releaseBIN();
+            throw e;
+        }
+    }
+
+    /**
+     * For this type of search, we need to match both key and data.  This
+     * method is called after the key is matched to perform the data portion of
+     * the match. We may be matching just against an LN, or doing further
+     * searching into the dup tree.  See searchAndPosition for more details.
+     */
+    private int searchAndPositionBoth(boolean containsDuplicates,
+                                      Node n,
+                                      DatabaseEntry matchData,
+                                      boolean exactSearch,
+                                      LockType lockType)
+        throws DatabaseException {
+
+        assert assertCursorState(false) : dumpToString(true);
+
+        boolean found = false;
+        boolean exact = false;
+        assert (matchData != null);
+        byte[] data = Key.makeKey(matchData);
+
+        if (containsDuplicates) {
+            /* It's a duplicate tree. */
+            DIN duplicateRoot = (DIN) n;
+            duplicateRoot.latch(cacheMode);
+            releaseBIN();
+            dupBin = (DBIN) databaseImpl.getTree().searchSubTree
+                (duplicateRoot, data, Tree.SearchType.NORMAL,
+                 -1, null, cacheMode);
+            if (dupBin != null) {
+                /* Find an exact match. */
+                addCursor(dupBin);
+                dupIndex = dupBin.findEntry(data, true, exactSearch);
+                if (dupIndex >= 0) {
+                    if ((dupIndex & IN.EXACT_MATCH) != 0) {
+                        exact = true;
+                    }
+                    dupIndex &= ~IN.EXACT_MATCH;
+                    found = true;
+                } else {
+
+                    /*
+                     * The first duplicate is greater than the target data.
+                     * Set index so that a 'next' operation moves to the first
+                     * duplicate.
+                     */
+                    dupIndex = -1;
+                    found = !exactSearch;
+                }
+            }
+        } else {
+            /* Not a duplicate, but checking for both key and data match. */
+            LN ln = (LN) n;
+
+            /* Lock LN, check if deleted. */
+            LockResult lockResult = lockLN(ln, lockType);
+
+            /*
+             * Note that during the lockLN call, this cursor may have been
+             * adjusted to refer to an LN in a duplicate tree.  This happens in
+             * the case where we entered with a non-duplicate tree LN and
+             * during the lock call it was mutated to a duplicate tree.  The LN
+             * is still the correct LN, but our cursor is now down in a
+             * duplicate tree. [#14230].
+             */
+            ln = lockResult.getLN();
+
+            if (ln == null) {
+                found = !exactSearch;
+            } else {
+
+                /* Don't set abort LSN for read operation. [#13158] */
+
+                /*
+                 * The comparison logic below mimics IN.findEntry as used above
+                 * for duplicates.
+                 */
+                int cmp = Key.compareKeys
+                    (ln.getData(), data, databaseImpl.getDuplicateComparator());
+                if (cmp == 0 || (cmp <= 0 && !exactSearch)) {
+                    if (cmp == 0) {
+                        exact = true;
+                    }
+                    found = true;
+                } else {
+
+                    /*
+                     * The current record's data is greater than the target
+                     * data.  Set index so that a 'next' operation moves to the
+                     * current record.
+                     */
+                    if (dupBin == null) {
+                        index--;
+                    } else {
+                        /* We may now be pointing at a dup tree. [#14230]. */
+                        dupIndex--;
+                    }
+                    found = !exactSearch;
+                }
+            }
+        }
+
+        return (found ? FOUND : 0) |
+            (exact ? EXACT_DATA : 0);
+    }
+
+    /*
+     * Lock and copy current record into the key and data DatabaseEntry. Enter
+     * with the BIN/DBIN latched.
+     */
+    private OperationStatus fetchCurrent(DatabaseEntry foundKey,
+                                         DatabaseEntry foundData,
+                                         LockType lockType,
+                                         boolean first)
+        throws DatabaseException {
+
+        TreeWalkerStatsAccumulator treeStatsAccumulator =
+            getTreeStatsAccumulator();
+
+        boolean duplicateFetch = setTargetBin();
+        if (targetBin == null) {
+            return OperationStatus.NOTFOUND;
+        }
+
+        assert targetBin.isLatchOwnerForWrite();
+
+        /*
+         * Check the deleted flag in the BIN and make sure this isn't an empty
+         * BIN.  The BIN could be empty by virtue of the compressor running the
+         * size of this BIN to 0 but not having yet deleted it from the tree.
+         *
+         * The index may be negative if we're at an intermediate stage in an
+         * higher level operation, and we expect a higher level method to do a
+         * next or prev operation after this returns KEYEMPTY. [#11700]
+         */
+        Node n = null;
+
+        if (targetIndex < 0 ||
+            targetIndex >= targetBin.getNEntries() ||
+            targetBin.isEntryKnownDeleted(targetIndex)) {
+            /* Node is no longer present. */
+        } else {
+
+            /*
+             * If we encounter a pendingDeleted entry, add it to the compressor
+             * queue.
+             */
+            if (targetBin.isEntryPendingDeleted(targetIndex)) {
+                EnvironmentImpl envImpl = databaseImpl.getDbEnvironment();
+                envImpl.addToCompressorQueue
+                    (targetBin, new Key(targetBin.getKey(targetIndex)), false);
+            }
+
+            /* If fetchTarget returns null, a deleted LN was cleaned. */
+            try {
+                n = targetBin.fetchTarget(targetIndex);
+            } catch (DatabaseException DE) {
+                targetBin.releaseLatch();
+                throw DE;
+            }
+        }
+
+        if (n == null) {
+            if (treeStatsAccumulator != null) {
+                treeStatsAccumulator.incrementDeletedLNCount();
+            }
+            targetBin.releaseLatch();
+            return OperationStatus.KEYEMPTY;
+        }
+
+        /*
+         * Note that since we have the BIN/DBIN latched, we can safely check
+         * the node type. Any conversions from an LN to a dup tree must have
+         * the bin latched.
+         */
+        addCursor(targetBin);
+        if (n.containsDuplicates()) {
+            assert !duplicateFetch;
+            /* Descend down duplicate tree, doing latch coupling. */
+            DIN duplicateRoot = (DIN) n;
+            duplicateRoot.latch(cacheMode);
+            targetBin.releaseLatch();
+            if (positionFirstOrLast(first, duplicateRoot)) {
+                try {
+                    return fetchCurrent(foundKey, foundData, lockType, first);
+                } catch (DatabaseException DE) {
+                    releaseBINs();
+                    throw DE;
+                }
+            } else {
+                return OperationStatus.NOTFOUND;
+            }
+        }
+
+        LN ln = (LN) n;
+
+        assert TestHookExecute.doHookIfSet(testHook);
+
+        /*
+         * Lock the LN.  For dirty-read, the data of the LN can be set to null
+         * at any time.  Cache the data in a local variable so its state does
+         * not change before calling setDbt further below.
+         */
+        LockResult lockResult = lockLN(ln, lockType);
+        try {
+            ln = lockResult.getLN();
+            byte[] lnData = (ln != null) ? ln.getData() : null;
+            if (ln == null || lnData == null) {
+                if (treeStatsAccumulator != null) {
+                    treeStatsAccumulator.incrementDeletedLNCount();
+                }
+                return OperationStatus.KEYEMPTY;
+            }
+
+            /*
+             * Don't set the abort LSN here since we are not logging yet, even
+             * if this is a write lock.  Tree.insert depends on the fact that
+             * the abortLSN is not already set for deleted items.
+             */
+
+            /*
+             * Return the key from the targetBin because only the targetBin is
+             * guaranteed to be latched by lockLN above, and the key is not
+             * available as part of the LN.  [#15704]
+             */
+            if (foundKey != null) {
+                duplicateFetch = setTargetBin();
+                setDbt(foundKey, duplicateFetch ? dupKey :
+                       targetBin.getKey(targetIndex));
+            }
+
+            /*
+             * With a duplicate comparator configured, data values may also be
+             * non-identical but compare as equal.  For the data parameter, we
+             * return the LN data.  Although DBIN.getKey is guaranteed to be
+             * transactionally correct, we return the LN data instead because
+             * that works for duplicates and non-duplicates, and because the LN
+             * is the source of truth.  [#15704]
+             */
+            if (foundData != null) {
+                setDbt(foundData, lnData);
+            }
+
+            return OperationStatus.SUCCESS;
+        } finally {
+            releaseBINs();
+        }
+    }
+
+    /**
+     * Locks the given LN's node ID; a deleted LN will not be locked or
+     * returned.  Attempts to use a non-blocking lock to avoid
+     * unlatching/relatching.  Retries if necessary, to handle the case where
+     * the LN is changed while the BIN is unlatched.
+     *
+     * Preconditions: The target BIN must be latched.  When positioned in a dup
+     * tree, the BIN may be latched on entry also and if so it will be latched
+     * on exit.
+     *
+     * Postconditions: The target BIN is latched.  When positioned in a dup
+     * tree, the DBIN will be latched if it was latched on entry or a blocking
+     * lock was needed.  Therefore, when positioned in a dup tree, releaseDBIN
+     * should be called.
+     *
+     * @param ln the LN to be locked.
+     * @param lockType the type of lock requested.
+     * @return the LockResult containing the LN that was locked, or containing
+     * a null LN if the LN was deleted or cleaned.  If the LN is deleted, a
+     * lock will not be held.
+     */
+    private LockResult lockLN(LN ln, LockType lockType)
+        throws DatabaseException {
+
+        LockResult lockResult = lockLNDeletedAllowed(ln, lockType);
+        ln = lockResult.getLN();
+        if (ln != null) {
+            setTargetBin();
+            if (targetBin.isEntryKnownDeleted(targetIndex) ||
+                ln.isDeleted()) {
+                revertLock(ln.getNodeId(), lockResult.getLockGrant());
+                lockResult.setLN(null);
+            }
+        }
+        return lockResult;
+    }
+
+    /**
+     * Locks the given LN's node ID; a deleted LN will be locked and returned.
+     * Attempts to use a non-blocking lock to avoid unlatching/relatching.
+     * Retries if necessary, to handle the case where the LN is changed while
+     * the BIN is unlatched.
+     *
+     * Preconditions: The target BIN must be latched.  When positioned in a dup
+     * tree, the BIN may be latched on entry also and if so it will be latched
+     * on exit.
+     *
+     * Postconditions: The target BIN is latched.  When positioned in a dup
+     * tree, the DBIN will be latched if it was latched on entry or a blocking
+     * lock was needed.  Therefore, when positioned in a dup tree, releaseDBIN
+     * should be called.
+     *
+     * @param ln the LN to be locked.
+     * @param lockType the type of lock requested.
+     * @return the LockResult containing the LN that was locked, or containing
+     * a null LN if the LN was cleaned.
+     */
+    public LockResult lockLNDeletedAllowed(LN ln, LockType lockType)
+        throws DatabaseException {
+
+        LockResult lockResult;
+
+        /* For dirty-read, there is no need to fetch the node. */
+        if (lockType == LockType.NONE) {
+            lockResult = new LockResult(LockGrantType.NONE_NEEDED, null);
+            lockResult.setLN(ln);
+            return lockResult;
+        }
+
+        /*
+         * Try a non-blocking lock first, to avoid unlatching.  If the default
+         * is no-wait, use the standard lock method so LockNotGrantedException
+         * is thrown; there is no need to try a non-blocking lock twice.
+         */
+        if (locker.getDefaultNoWait()) {
+            try {
+                lockResult = locker.lock
+                    (ln.getNodeId(), lockType, true /*noWait*/, databaseImpl);
+            } catch (LockNotGrantedException e) {
+                /* Release all latches. */
+                releaseBINs();
+                throw e;
+            }
+        } else {
+            lockResult = locker.nonBlockingLock
+                (ln.getNodeId(), lockType, databaseImpl);
+        }
+        if (lockResult.getLockGrant() != LockGrantType.DENIED) {
+            lockResult.setLN(ln);
+            return lockResult;
+        }
+
+        /*
+         * Unlatch, get a blocking lock, latch, and get the current node from
+         * the slot.  If the node ID changed while unlatched, revert the lock
+         * and repeat.
+         */
+        while (true) {
+
+            /* Save the node ID we're locking and request a lock. */
+            long nodeId = ln.getNodeId();
+            releaseBINs();
+            lockResult = locker.lock
+                (nodeId, lockType, false /*noWait*/, databaseImpl);
+
+            /* Fetch the current node after locking. */
+            latchBINs();
+            setTargetBin();
+            ln = (LN) targetBin.fetchTarget(targetIndex);
+
+            if (ln != null && nodeId != ln.getNodeId()) {
+                /* If the node ID changed, revert the lock and try again. */
+                revertLock(nodeId, lockResult.getLockGrant());
+                continue;
+            } else {
+                /* If null (cleaned) or locked correctly, return the LN. */
+                lockResult.setLN(ln);
+                return lockResult;
+            }
+        }
+    }
+
+    /**
+     * Locks the DupCountLN for the given duplicate root.  Attempts to use a
+     * non-blocking lock to avoid unlatching/relatching.
+     *
+     * Preconditions: The dupRoot, BIN and DBIN are latched.
+     * Postconditions: The dupRoot, BIN and DBIN are latched.
+     *
+     * Note that the dupRoot may change during locking and should be refetched
+     * if needed.
+     *
+     * @param dupRoot the duplicate root containing the DupCountLN to be
+     * locked.
+     * @param lockType the type of lock requested.
+     * @return the LockResult containing the LN that was locked.
+     */
+    public LockResult lockDupCountLN(DIN dupRoot, LockType lockType)
+        throws DatabaseException {
+
+        DupCountLN ln = dupRoot.getDupCountLN();
+        LockResult lockResult;
+
+        /*
+         * Try a non-blocking lock first, to avoid unlatching.  If the default
+         * is no-wait, use the standard lock method so LockNotGrantedException
+         * is thrown; there is no need to try a non-blocking lock twice.
+         */
+        if (locker.getDefaultNoWait()) {
+            try {
+                lockResult = locker.lock
+                    (ln.getNodeId(), lockType, true /*noWait*/, databaseImpl);
+            } catch (LockNotGrantedException e) {
+                /* Release all latches. */
+                dupRoot.releaseLatch();
+                releaseBINs();
+                throw e;
+            }
+        } else {
+            lockResult = locker.nonBlockingLock
+                (ln.getNodeId(), lockType, databaseImpl);
+        }
+
+        if (lockResult.getLockGrant() == LockGrantType.DENIED) {
+            /* Release all latches. */
+            dupRoot.releaseLatch();
+            releaseBINs();
+            /* Request a blocking lock. */
+            lockResult = locker.lock
+                (ln.getNodeId(), lockType, false /*noWait*/, databaseImpl);
+            /* Reacquire all latches. */
+            latchBIN();
+            dupRoot = (DIN) bin.fetchTarget(index);
+            dupRoot.latch(cacheMode);
+            latchDBIN();
+            ln = dupRoot.getDupCountLN();
+        }
+        lockResult.setLN(ln);
+        return lockResult;
+    }
+
+    /**
+     * Fetch, latch and return the DIN root of the duplicate tree at the cursor
+     * position.
+     *
+     * Preconditions: The BIN must be latched and the current BIN entry must
+     * contain a DIN.
+     *
+     * Postconditions: The BIN and DIN will be latched.  The DBIN will remain
+     * latched if isDBINLatched is true.
+     *
+     * @param isDBINLatched is true if the DBIN is currently latched.
+     */
+    public DIN getLatchedDupRoot(boolean isDBINLatched)
+        throws DatabaseException {
+
+        assert bin != null;
+        assert bin.isLatchOwnerForWrite();
+        assert index >= 0;
+
+        DIN dupRoot = (DIN) bin.fetchTarget(index);
+
+        if (isDBINLatched) {
+
+            /*
+             * The BIN and DBIN are currently latched and we need to latch the
+             * dupRoot, which is between the BIN and DBIN in the tree.  First
+             * try latching the dupRoot no-wait; if this works, we have latched
+             * out of order, but in a way that does not cause deadlocks.  If we
+             * don't get the no-wait latch, then release the DBIN latch and
+             * latch in the proper order down the tree.
+             */
+            if (!dupRoot.latchNoWait()) {
+                releaseDBIN();
+                dupRoot.latch(cacheMode);
+                latchDBIN();
+            }
+        } else {
+            dupRoot.latch(cacheMode);
+        }
+
+        return dupRoot;
+    }
+
+    /**
+     * Helper to return a Data DBT from a BIN.
+     */
+    public static void setDbt(DatabaseEntry data, byte[] bytes) {
+
+        if (bytes != null) {
+            boolean partial = data.getPartial();
+            int off = partial ? data.getPartialOffset() : 0;
+            int len = partial ? data.getPartialLength() : bytes.length;
+            if (off + len > bytes.length) {
+                len = (off > bytes.length) ? 0 : bytes.length  - off;
+            }
+
+            byte[] newdata = null;
+            if (len == 0) {
+                newdata = LogUtils.ZERO_LENGTH_BYTE_ARRAY;
+            } else {
+                newdata = new byte[len];
+                System.arraycopy(bytes, off, newdata, 0, len);
+            }
+            data.setData(newdata);
+            data.setOffset(0);
+            data.setSize(len);
+        } else {
+            data.setData(null);
+            data.setOffset(0);
+            data.setSize(0);
+        }
+    }
+
+    /*
+     * For debugging. Verify that a BINs cursor set refers to the BIN.
+     */
+    private void verifyCursor(BIN bin)
+        throws DatabaseException {
+
+        if (!bin.getCursorSet().contains(this)) {
+            throw new DatabaseException("BIN cursorSet is inconsistent.");
+        }
+    }
+
+    /**
+     * Calls checkCursorState and returns false is an exception is thrown.
+     */
+    private boolean assertCursorState(boolean mustBeInitialized) {
+        try {
+            checkCursorState(mustBeInitialized);
+            return true;
+        } catch (DatabaseException e) {
+            return false;
+        }
+    }
+
+    /**
+     * Check that the cursor is open and optionally if it is initialized.
+     */
+    public void checkCursorState(boolean mustBeInitialized)
+        throws DatabaseException {
+
+        if (status == CURSOR_INITIALIZED) {
+
+            if (DEBUG) {
+                if (bin != null) {
+                    verifyCursor(bin);
+                }
+                if (dupBin != null) {
+                    verifyCursor(dupBin);
+                }
+            }
+
+            return;
+        } else if (status == CURSOR_NOT_INITIALIZED) {
+            if (mustBeInitialized) {
+                throw new DatabaseException
+                    ("Cursor Not Initialized.");
+            }
+        } else if (status == CURSOR_CLOSED) {
+            throw new DatabaseException
+                ("Cursor has been closed.");
+        } else {
+            throw new DatabaseException
+                ("Unknown cursor status: " + status);
+        }
+    }
+
+    /**
+     * Return this lock to its prior status. If the lock was just obtained,
+     * release it. If it was promoted, demote it.
+     */
+    private void revertLock(LN ln, LockResult lockResult)
+        throws DatabaseException {
+
+        revertLock(ln.getNodeId(), lockResult.getLockGrant());
+    }
+
+    /**
+     * Return this lock to its prior status. If the lock was just obtained,
+     * release it. If it was promoted, demote it.
+     */
+    private void revertLock(long nodeId, LockGrantType lockStatus)
+        throws DatabaseException {
+
+        if ((lockStatus == LockGrantType.NEW) ||
+            (lockStatus == LockGrantType.WAIT_NEW)) {
+            locker.releaseLock(nodeId);
+        } else if ((lockStatus == LockGrantType.PROMOTION) ||
+                   (lockStatus == LockGrantType.WAIT_PROMOTION)){
+            locker.demoteLock(nodeId);
+        }
+    }
+
+    /**
+     * Locks the logical EOF node for the databaseImpl.
+     */
+    public void lockEofNode(LockType lockType)
+        throws DatabaseException {
+
+        locker.lock(databaseImpl.getEofNodeId(), lockType,
+		    false /*noWait*/, databaseImpl);
+    }
+
+    /**
+     * @throws RunRecoveryException if the underlying environment is invalid.
+     */
+    public void checkEnv()
+        throws RunRecoveryException {
+
+        databaseImpl.getDbEnvironment().checkIfInvalid();
+    }
+
+    /*
+     * Support for linking cursors onto lockers.
+     */
+    public CursorImpl getLockerPrev() {
+        return lockerPrev;
+    }
+
+    public CursorImpl getLockerNext() {
+        return lockerNext;
+    }
+
+    public void setLockerPrev(CursorImpl p) {
+        lockerPrev = p;
+    }
+
+    public void setLockerNext(CursorImpl n) {
+        lockerNext = n;
+    }
+
+    /**
+     * Callback object for traverseDbWithCursor.
+     */
+    public interface WithCursor {
+
+        /**
+         * Called for each record in the databaseImpl.
+         * @return true to continue or false to stop the enumeration.
+         */
+        boolean withCursor(CursorImpl cursor,
+                           DatabaseEntry key,
+                           DatabaseEntry data)
+            throws DatabaseException;
+    }
+
+    /**
+     * Enumerates all records in a databaseImpl non-transactionally and calls
+     * the withCursor method for each record.  Stops the enumeration if the
+     * callback returns false.
+     *
+     * @param db DatabaseImpl to traverse.
+     *
+     * @param lockType non-null LockType for reading records.
+     *
+     * @param allowEviction should normally be true to evict when performing
+     * multiple operations, but may be false if eviction is disallowed in a
+     * particular context.
+     *
+     * @param withCursor callback object.
+     */
+    public static void traverseDbWithCursor(DatabaseImpl db,
+                                            LockType lockType,
+                                            boolean allowEviction,
+                                            WithCursor withCursor)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        Locker locker = null;
+        CursorImpl cursor = null;
+        try {
+            locker = BasicLocker.createBasicLocker(db.getDbEnvironment());
+            cursor = new CursorImpl(db, locker);
+            cursor.setAllowEviction(allowEviction);
+            if (cursor.positionFirstOrLast(true,    // first
+                                           null)) { // duplicateRoot
+                OperationStatus status = cursor.getCurrentAlreadyLatched
+                    (key, data, lockType, true); // first
+                boolean done = false;
+                while (!done) {
+
+                    /*
+                     * getCurrentAlreadyLatched may have returned non-SUCCESS
+                     * if the first record is deleted, but we can call getNext
+                     * below to move forward.
+                     */
+                    if (status == OperationStatus.SUCCESS) {
+                        if (!withCursor.withCursor(cursor, key, data)) {
+                            done = true;
+                        }
+                    }
+                    if (!done) {
+                        status = cursor.getNext(key, data, lockType,
+                                                true,   // forward
+                                                false); // alreadyLatched
+                        if (status != OperationStatus.SUCCESS) {
+                            done = true;
+                        }
+                    }
+                }
+            }
+        } finally {
+            if (cursor != null) {
+                cursor.releaseBINs();
+                cursor.close();
+            }
+            if (locker != null) {
+                locker.operationEnd();
+            }
+        }
+    }
+
+    /**
+     * Dump the cursor for debugging purposes.  Dump the bin and dbin that the
+     * cursor refers to if verbose is true.
+     */
+    public void dump(boolean verbose) {
+        System.out.println(dumpToString(verbose));
+    }
+
+    /**
+     * dump the cursor for debugging purposes.
+     */
+    public void dump() {
+        System.out.println(dumpToString(true));
+    }
+
+    /*
+     * dumper
+     */
+    private String statusToString(byte status) {
+        switch(status) {
+        case CURSOR_NOT_INITIALIZED:
+            return "CURSOR_NOT_INITIALIZED";
+        case CURSOR_INITIALIZED:
+            return "CURSOR_INITIALIZED";
+        case CURSOR_CLOSED:
+            return "CURSOR_CLOSED";
+        default:
+            return "UNKNOWN (" + Byte.toString(status) + ")";
+        }
+    }
+
+    /*
+     * dumper
+     */
+    public String dumpToString(boolean verbose) {
+        StringBuffer sb = new StringBuffer();
+
+        sb.append("<Cursor idx=\"").append(index).append("\"");
+        if (dupBin != null) {
+            sb.append(" dupIdx=\"").append(dupIndex).append("\"");
+        }
+        sb.append(" status=\"").append(statusToString(status)).append("\"");
+        sb.append(">\n");
+        if (verbose) {
+            sb.append((bin == null) ? "" : bin.dumpString(2, true));
+            sb.append((dupBin == null) ? "" : dupBin.dumpString(2, true));
+        }
+        sb.append("\n</Cursor>");
+
+        return sb.toString();
+    }
+
+    /*
+     * For unit tests
+     */
+    public LockStats getLockStats()
+        throws DatabaseException {
+
+        return locker.collectStats(new LockStats());
+    }
+
+    /**
+     * Send trace messages to the java.util.logger. Don't rely on the logger
+     * alone to conditionalize whether we send this message, we don't even want
+     * to construct the message if the level is not enabled.
+     */
+    private void trace(Level level,
+                       String changeType,
+                       BIN theBin,
+                       LN ln,
+                       int lnIndex,
+                       long oldLsn,
+                       long newLsn) {
+        Logger logger = databaseImpl.getDbEnvironment().getLogger();
+        if (logger.isLoggable(level)) {
+            StringBuffer sb = new StringBuffer();
+            sb.append(changeType);
+            sb.append(" bin=");
+            sb.append(theBin.getNodeId());
+            sb.append(" ln=");
+            sb.append(ln.getNodeId());
+            sb.append(" lnIdx=");
+            sb.append(lnIndex);
+            sb.append(" oldLnLsn=");
+            sb.append(DbLsn.getNoFormatString(oldLsn));
+            sb.append(" newLnLsn=");
+            sb.append(DbLsn.getNoFormatString(newLsn));
+
+            logger.log(level, sb.toString());
+        }
+    }
+
+    /* For unit testing only. */
+    public void setTestHook(TestHook hook) {
+        testHook = hook;
+    }
+
+    /* Check that the target bin is latched. For use in assertions. */
+    private boolean checkAlreadyLatched(boolean alreadyLatched) {
+        if (alreadyLatched) {
+            if (dupBin != null) {
+                return dupBin.isLatchOwnerForWrite();
+            } else if (bin != null) {
+                return bin.isLatchOwnerForWrite();
+            }
+        }
+        return true;
+    }
+}
diff --git a/src/com/sleepycat/je/dbi/DatabaseId.java b/src/com/sleepycat/je/dbi/DatabaseId.java
new file mode 100644
index 0000000000000000000000000000000000000000..cd429cc21b1e1cebcde4ef1a254a5869cb1a106b
--- /dev/null
+++ b/src/com/sleepycat/je/dbi/DatabaseId.java
@@ -0,0 +1,152 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DatabaseId.java,v 1.42.2.2 2010/01/04 15:30:28 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import java.io.UnsupportedEncodingException;
+import java.nio.ByteBuffer;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.log.LogUtils;
+import com.sleepycat.je.log.Loggable;
+
+/**
+ * DatabaseImpl Ids are wrapped in a class so they can be logged.
+ */
+public class DatabaseId implements Comparable<DatabaseId>, Loggable {
+
+    /**
+     * The unique id of this database.
+     */
+    private int id;
+
+    /**
+     *
+     */
+    public DatabaseId(int id) {
+        this.id = id;
+    }
+
+    /**
+     * Uninitialized database id, for logging.
+     */
+    public DatabaseId() {
+    }
+
+    /**
+     * @return id value
+     */
+    public int getId() {
+        return id;
+    }
+
+    /**
+     * @return id as bytes, for use as a key
+     */
+    public byte[] getBytes()
+        throws DatabaseException {
+
+        try {
+            return toString().getBytes("UTF-8");
+        } catch (UnsupportedEncodingException UEE) {
+            throw new DatabaseException(UEE);
+        }
+    }
+
+    /**
+     * Compare two DatabaseImpl Id's.
+     */
+    public boolean equals(Object obj) {
+        if (this == obj) {
+            return true;
+        }
+
+        if (!(obj instanceof DatabaseId)) {
+            return false;
+        }
+
+        return ((DatabaseId) obj).id == id;
+    }
+
+    public int hashCode() {
+        return id;
+    }
+
+    public String toString() {
+        return Integer.toString(id);
+    }
+
+    /**
+     * see Comparable#compareTo
+     */
+    public int compareTo(DatabaseId o) {
+        if (o == null) {
+            throw new NullPointerException();
+        }
+
+        if (id == o.id) {
+            return 0;
+        } else if (id > o.id) {
+            return 1;
+        } else {
+            return -1;
+        }
+    }
+
+    /*
+     * Logging support.
+     */
+
+    /**
+     * @see Loggable#getLogSize
+     */
+    public int getLogSize() {
+        return LogUtils.getPackedIntLogSize(id);
+    }
+
+    /**
+     * @see Loggable#writeToLog
+     */
+    public void writeToLog(ByteBuffer logBuffer) {
+        LogUtils.writePackedInt(logBuffer, id);
+    }
+
+    /**
+     * @see Loggable#readFromLog
+     */
+    public void readFromLog(ByteBuffer itemBuffer, byte entryVersion) {
+        id = LogUtils.readInt(itemBuffer, (entryVersion < 6));
+    }
+
+    /**
+     * @see Loggable#dumpLog
+     */
+    public void dumpLog(StringBuffer sb, boolean verbose) {
+        sb.append("<dbId id=\"");
+        sb.append(id);
+        sb.append("\"/>");
+    }
+
+    /**
+     * @see Loggable#getTransactionId
+     */
+    public long getTransactionId() {
+        return 0;
+    }
+
+   /**
+     * @see Loggable#logicalEquals
+     */
+    public boolean logicalEquals(Loggable other) {
+
+        if (!(other instanceof DatabaseId))
+            return false;
+
+        return id == ((DatabaseId) other).id;
+    }
+}
diff --git a/src/com/sleepycat/je/dbi/DatabaseImpl.java b/src/com/sleepycat/je/dbi/DatabaseImpl.java
new file mode 100644
index 0000000000000000000000000000000000000000..c3afc600dea38a21e7209bbcbcfecd3d78699d74
--- /dev/null
+++ b/src/com/sleepycat/je/dbi/DatabaseImpl.java
@@ -0,0 +1,2481 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DatabaseImpl.java,v 1.205.2.7 2010/03/23 15:02:07 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.PrintStream;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import com.sleepycat.je.BtreeStats;
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DatabaseStats;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.LockNotGrantedException;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.PreloadConfig;
+import com.sleepycat.je.PreloadStats;
+import com.sleepycat.je.PreloadStatus;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.VerifyConfig;
+import com.sleepycat.je.cleaner.BaseUtilizationTracker;
+import com.sleepycat.je.cleaner.DbFileSummary;
+import com.sleepycat.je.cleaner.DbFileSummaryMap;
+import com.sleepycat.je.cleaner.LocalUtilizationTracker;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.SortedLSNTreeWalker.ExceptionPredicate;
+import com.sleepycat.je.dbi.SortedLSNTreeWalker.TreeNodeProcessor;
+import com.sleepycat.je.latch.LatchSupport;
+import com.sleepycat.je.log.DbOpReplicationContext;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.LogException;
+import com.sleepycat.je.log.LogFileNotFoundException;
+import com.sleepycat.je.log.LogUtils;
+import com.sleepycat.je.log.Loggable;
+import com.sleepycat.je.log.ReplicationContext;
+import com.sleepycat.je.log.entry.DbOperationType;
+import com.sleepycat.je.recovery.Checkpointer;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.tree.ChildReference;
+import com.sleepycat.je.tree.DBIN;
+import com.sleepycat.je.tree.DIN;
+import com.sleepycat.je.tree.DupCountLN;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.tree.LN;
+import com.sleepycat.je.tree.Node;
+import com.sleepycat.je.tree.Tree;
+import com.sleepycat.je.tree.TreeUtils;
+import com.sleepycat.je.tree.TreeWalkerStatsAccumulator;
+import com.sleepycat.je.tree.WithRootLatched;
+import com.sleepycat.je.txn.BasicLocker;
+import com.sleepycat.je.txn.LockType;
+import com.sleepycat.je.txn.Locker;
+import com.sleepycat.je.utilint.CmdUtil;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.TestHook;
+import com.sleepycat.je.utilint.TestHookExecute;
+
+/**
+ * The underlying object for a given database.
+ */
+public class DatabaseImpl implements Loggable, Cloneable {
+
+    /*
+     * Delete processing states. See design note on database deletion and
+     * truncation
+     */
+    private static final short NOT_DELETED = 1;
+    private static final short DELETED_CLEANUP_INLIST_HARVEST = 2;
+    private static final short DELETED_CLEANUP_LOG_HARVEST = 3;
+    private static final short DELETED = 4;
+
+    /*
+     * Flag bits are the persistent representation of boolean properties
+     * for this database.  The DUPS_ALLOWED_BIT value is 1 for compatibility
+     * with earlier log entry versions where it was stored as a boolean.
+     *
+     * Two bits are used to indicate whether this database is replicated or
+     * not.
+     * isReplicated = 0, notReplicated = 0 means replication status is
+     *   unknown, because the db was created in an standalone environment.
+     * isReplicated = 1, notReplicated = 0 means the db is replicated.
+     * isReplicated = 0, notReplicated = 1 means the db is not replicated.
+     * isReplicated = 1, notReplicated = 1 is an illegal combination.
+     */
+    private byte flags;
+    private static final byte DUPS_ALLOWED_BIT = 0x1; // getSortedDuplicates()
+    private static final byte TEMPORARY_BIT = 0x2;     // isTemporary()
+    private static final byte IS_REPLICATED_BIT = 0x4; // isReplicated()
+    private static final byte NOT_REPLICATED_BIT = 0x8;// notReplicated()
+    private static final byte PREFIXING_ENABLED = 0x10;// getKeyPrefixing()
+    /* getUtilizationRepairDone() */
+    private static final byte UTILIZATION_REPAIR_DONE = 0x20;
+
+    private DatabaseId id;             // unique id
+    private Tree tree;
+    private EnvironmentImpl envImpl;   // Tree operations find the env this way
+    private boolean transactional;     // All open handles are transactional
+    private boolean durableDeferredWrite;  // Durable deferred write mode set
+    private boolean dirtyUtilization;  // Utilization changed since logging
+    private Set<Database> referringHandles; // Set of open Database handles
+    private BtreeStats stats;     // most recent btree stats w/ !DB_FAST_STAT
+    private long eofNodeId;       // Logical EOF node for range locking
+    private volatile short deleteState;    // one of four delete states.
+    private AtomicInteger useCount = new AtomicInteger();
+                                  // If non-zero, eviction is prohibited
+    private DbFileSummaryMap dbFileSummaries;
+
+    /**
+     * Log version when DB was created, or 0 if created prior to log version 6.
+     */
+    private byte createdAtLogVersion;
+
+    /**
+     * For unit testing, setting this field to true will force a walk of the
+     * tree to count utilization during truncate/remove, rather than using the
+     * per-database info.  This is used to test the "old technique" for
+     * counting utilization, which is now used only if the database was created
+     * prior to log version 6.
+     */
+    public static boolean forceTreeWalkForTruncateAndRemove;
+
+    /*
+     * The user defined Btree and duplicate comparison functions, if specified.
+     */
+    private Comparator<byte[]> btreeComparator = null;
+    private Comparator<byte[]> duplicateComparator = null;
+    private byte[] btreeComparatorBytes = LogUtils.ZERO_LENGTH_BYTE_ARRAY;
+    private byte[] duplicateComparatorBytes = LogUtils.ZERO_LENGTH_BYTE_ARRAY;
+    private boolean btreeComparatorByClassName = false;
+    private boolean duplicateComparatorByClassName = false;
+
+    /*
+     * Cache some configuration values.
+     */
+    private int binDeltaPercent;
+    private int binMaxDeltas;
+    private int maxMainTreeEntriesPerNode;
+    private int maxDupTreeEntriesPerNode;
+
+    /*
+     * The debugDatabaseName is used for error messages only, to avoid
+     * accessing the db mapping tree in error situations. Currently it's not
+     * guaranteed to be transactionally correct, nor is it updated by rename.
+     */
+    private String debugDatabaseName;
+
+    /* For unit tests */
+    private TestHook pendingDeletedHook;
+
+    /*
+     * For debugging -- this gives the ability to force all non-internal
+     * databases to use key prefixing.
+     *
+     * Note that doing
+     *     ant -Dje.forceKeyPrefixing=true test
+     * does not work because ant does not pass the parameter down to JE.
+     */
+    private static final boolean forceKeyPrefixing;
+    static {
+        String forceKeyPrefixingProp =
+            System.getProperty("je.forceKeyPrefixing");
+        if ("true".equals(forceKeyPrefixingProp)) {
+            forceKeyPrefixing = true;
+        } else {
+            forceKeyPrefixing = false;
+        }
+    }
+
+    /**
+     * Create a database object for a new database.
+     */
+    public DatabaseImpl(String dbName,
+                        DatabaseId id,
+                        EnvironmentImpl envImpl,
+                        DatabaseConfig dbConfig)
+        throws DatabaseException {
+
+        this.id = id;
+        this.envImpl = envImpl;
+        setBtreeComparator(dbConfig.getBtreeComparator(),
+                           dbConfig.getBtreeComparatorByClassName());
+        setDuplicateComparator(dbConfig.getDuplicateComparator(),
+                               dbConfig.getDuplicateComparatorByClassName());
+        if (dbConfig.getSortedDuplicates()) {
+            setSortedDuplicates();
+        }
+
+        if (dbConfig.getKeyPrefixing() ||
+            forceKeyPrefixing) {
+            setKeyPrefixing();
+        }
+
+        if (dbConfig.getTemporary()) {
+            setTemporary();
+        }
+
+        if (envImpl.isReplicated()) {
+            if (DbInternal.getDbConfigReplicated(dbConfig)) {
+                setIsReplicatedBit();
+            } else {
+                setNotReplicatedBit();
+            }
+        }
+
+        transactional = dbConfig.getTransactional();
+        durableDeferredWrite = dbConfig.getDeferredWrite();
+        maxMainTreeEntriesPerNode = dbConfig.getNodeMaxEntries();
+        maxDupTreeEntriesPerNode = dbConfig.getNodeMaxDupTreeEntries();
+
+        createdAtLogVersion = LogEntryType.LOG_VERSION;
+
+        /*
+         * New DB records do not need utilization repair.  Set this before
+         * calling initWithEnvironment to avoid repair overhead.
+         */
+        setUtilizationRepairDone();
+
+        commonInit();
+
+        initWithEnvironment();
+
+        /*
+         * The tree needs the env, make sure we assign it before
+         * allocating the tree.
+         */
+        tree = new Tree(this);
+
+        /* For error messages only. */
+        debugDatabaseName = dbName;
+    }
+
+    /**
+     * Create an empty database object for initialization from the log.  Note
+     * that the rest of the initialization comes from readFromLog(), except
+     * for the debugDatabaseName, which is set by the caller.
+     */
+    public DatabaseImpl()
+        throws DatabaseException {
+
+        id = new DatabaseId();
+        envImpl = null;
+
+        tree = new Tree();
+
+        commonInit();
+
+        /* initWithEnvironment is called after reading and envImpl is set.  */
+    }
+
+    private void commonInit() {
+
+        deleteState = NOT_DELETED;
+        referringHandles =
+            Collections.synchronizedSet(new HashSet<Database>());
+        dbFileSummaries = new DbFileSummaryMap
+            (false /* countParentMapEntry */);
+    }
+
+    public void setDebugDatabaseName(String debugName) {
+        debugDatabaseName = debugName;
+    }
+
+    public String getDebugName() {
+        return debugDatabaseName;
+    }
+
+    /* For unit testing only. */
+    public void setPendingDeletedHook(TestHook hook) {
+        pendingDeletedHook = hook;
+    }
+
+    /**
+     * Initialize configuration settings when creating a new instance or after
+     * reading an instance from the log.  The envImpl field must be set before
+     * calling this method.
+     */
+    private void initWithEnvironment()
+        throws DatabaseException {
+
+        /* The eof node id must be unique for each database in memory. */
+        eofNodeId = envImpl.getNodeSequence().getNextTransientNodeId();
+
+        assert !(replicatedBitSet() && notReplicatedBitSet()) :
+            "The replicated AND notReplicated bits should never be set "+
+            " together";
+
+        /*
+         * We'd like to assert that neither replication bit is set if
+         * the environmentImpl is not replicated, but can't do that.
+         * EnvironmentImpl.isReplicated() is not yet initialized if this
+         * environment is undergoing recovery during replication setup.
+
+        assert !((!envImpl.isReplicated() &&
+                 (replicatedBitSet() || notReplicatedBitSet()))) :
+            "Neither the replicated nor notReplicated bits should be set " +
+            " in a non-replicated environment" +
+            " replicatedBitSet=" + replicatedBitSet() +
+            " notRepBitSet=" + notReplicatedBitSet();
+        */
+
+        DbConfigManager configMgr = envImpl.getConfigManager();
+
+        binDeltaPercent =
+            configMgr.getInt(EnvironmentParams.BIN_DELTA_PERCENT);
+        binMaxDeltas =
+            configMgr.getInt(EnvironmentParams.BIN_MAX_DELTAS);
+
+        if (maxMainTreeEntriesPerNode == 0) {
+            maxMainTreeEntriesPerNode =
+                configMgr.getInt(EnvironmentParams.NODE_MAX);
+        }
+
+        if (maxDupTreeEntriesPerNode == 0) {
+            maxDupTreeEntriesPerNode =
+                configMgr.getInt(EnvironmentParams.NODE_MAX_DUPTREE);
+        }
+
+        /* Budgets memory for the utilization info. */
+        dbFileSummaries.init(envImpl);
+
+        /*
+         * Repair utilization info if necessary.  The repair flag will not be
+         * set for MapLNs written by JE 3.3.74 and earlier, and will be set for
+         * all MapLNs written thereafter.  Make the utilization dirty to force
+         * the MapLN to be flushed.  Even if no repair is performed, we want to
+         * write the updated flag.  [#16610]
+         */
+        if (!getUtilizationRepairDone()) {
+            dbFileSummaries.repair(envImpl);
+            setDirtyUtilization();
+            setUtilizationRepairDone();
+        }
+    }
+
+    /**
+     * Create a clone of this database that can be used as the new empty
+     * database when truncating this database.  setId and setTree must be
+     * called on the returned database.
+     */
+    public DatabaseImpl cloneDatabase() {
+        DatabaseImpl newDb;
+        try {
+            newDb = (DatabaseImpl) super.clone();
+        } catch (CloneNotSupportedException e) {
+            assert false : e;
+            return null;
+        }
+
+        /* Re-initialize fields that should not be shared by the new DB. */
+        newDb.id = null;
+        newDb.tree = null;
+        newDb.createdAtLogVersion = LogEntryType.LOG_VERSION;
+        newDb.dbFileSummaries = new DbFileSummaryMap
+            (false /*countParentMapEntry*/);
+        newDb.dbFileSummaries.init(envImpl);
+        newDb.useCount = new AtomicInteger();
+        return newDb;
+    }
+
+    /**
+     * @return the database tree.
+     */
+    public Tree getTree() {
+        return tree;
+    }
+
+    void setTree(Tree tree) {
+        this.tree = tree;
+    }
+
+    /**
+     * @return the database id.
+     */
+    public DatabaseId getId() {
+        return id;
+    }
+
+    void setId(DatabaseId id) {
+        this.id = id;
+    }
+
+    public long getEofNodeId() {
+        return eofNodeId;
+    }
+
+    /**
+     * @return true if this database is transactional.
+     */
+    public boolean isTransactional() {
+        return transactional;
+    }
+
+    /**
+     * Sets the transactional property for the first opened handle.
+     */
+    public void setTransactional(boolean transactional) {
+        this.transactional = transactional;
+    }
+
+    /**
+     * @return true if this database is temporary.
+     */
+    public boolean isTemporary() {
+        return ((flags & TEMPORARY_BIT) != 0);
+    }
+
+    public static boolean isTemporary(byte flagVal) {
+        return ((flagVal & TEMPORARY_BIT) != 0);
+    }
+
+    private void setTemporary() {
+        flags |= TEMPORARY_BIT;
+    }
+
+    /**
+     * @return true if this database was user configured for durable deferred
+     * write mode.
+     */
+    public boolean isDurableDeferredWrite() {
+        return durableDeferredWrite;
+    }
+
+    /**
+     * @return true if write operations are not logged immediately.  This is
+     * true if the user configured a durable DW database or a temporary
+     * database.
+     */
+    public boolean isDeferredWriteMode() {
+        return isDurableDeferredWrite() || isTemporary();
+    }
+
+    /**
+     * Sets the deferred write property for the first opened handle.
+     */
+    public void setDeferredWrite(boolean durableDeferredWrite) {
+        this.durableDeferredWrite = durableDeferredWrite;
+    }
+
+    /**
+     * @return true if duplicates are allowed in this database.
+     */
+    public boolean getSortedDuplicates() {
+        return (flags & DUPS_ALLOWED_BIT) != 0;
+    }
+
+    public static boolean getSortedDuplicates(byte flagVal) {
+        return (flagVal & DUPS_ALLOWED_BIT) != 0;
+    }
+
+    public void setSortedDuplicates() {
+        flags |= DUPS_ALLOWED_BIT;
+    }
+
+    /**
+     * @return true if key prefixing is enabled in this database.
+     */
+    public boolean getKeyPrefixing() {
+        return (flags & PREFIXING_ENABLED) != 0;
+    }
+
+    public void setKeyPrefixing() {
+        flags |= PREFIXING_ENABLED;
+    }
+
+    public void clearKeyPrefixing() {
+        if (forceKeyPrefixing) {
+            return;
+        }
+        flags &= ~PREFIXING_ENABLED;
+    }
+
+    /**
+     * @return true if this database is replicated. Note that
+     * we only need to check the IS_REPLICATED_BIT, because we require that
+     * we never have both IS_REPLICATED and NOT_REPLICATED set at the same
+     * time.
+     */
+    public boolean isReplicated() {
+        return replicatedBitSet();
+    }
+
+    /**
+     * @return true if this database is replicated.
+     */
+    public boolean unknownReplicated() {
+        return ((flags & IS_REPLICATED_BIT) == 0) &&
+            ((flags & NOT_REPLICATED_BIT) == 0);
+    }
+
+    private boolean replicatedBitSet() {
+        return (flags & IS_REPLICATED_BIT) != 0;
+    }
+
+    private void setIsReplicatedBit() {
+        flags |= IS_REPLICATED_BIT;
+    }
+
+    /**
+     * @return true if this database's not replicated bit is set.
+     */
+    private boolean notReplicatedBitSet() {
+        return (flags & NOT_REPLICATED_BIT) != 0;
+    }
+
+    private void setNotReplicatedBit() {
+        flags |= NOT_REPLICATED_BIT;
+    }
+
+    /**
+     * Is public for unit testing.
+     */
+    public boolean getUtilizationRepairDone() {
+        return (flags & UTILIZATION_REPAIR_DONE) != 0;
+    }
+
+    private void setUtilizationRepairDone() {
+        flags |= UTILIZATION_REPAIR_DONE;
+    }
+
+    /**
+     * Is public for unit testing.
+     */
+    public void clearUtilizationRepairDone() {
+        flags &= ~UTILIZATION_REPAIR_DONE;
+    }
+
+    public int getNodeMaxEntries() {
+        return maxMainTreeEntriesPerNode;
+    }
+
+    public int getNodeMaxDupTreeEntries() {
+        return maxDupTreeEntriesPerNode;
+    }
+
+    /**
+     * Returns the tree memory size that should be added to MAPLN_OVERHEAD.
+     *
+     * This is a start at budgeting per-Database memory.  For future reference,
+     * other things that could be budgeted are:
+     * - debugDatabaseName as it is set
+     * - Database handles as they are added/removed in referringHandles
+     */
+    public int getAdditionalTreeMemorySize() {
+
+        int val = 0;
+
+        /*
+         * If the comparator object is non-null we double the size of the
+         * serialized form to account for the approximate size of the user's
+         * comparator object.  This is only an approximation of course, and is
+         * not a very good one if we have serialized the class name, but we
+         * have no way to know the size of the user's object.
+         */
+        if (btreeComparator != null) {
+            val += 2 * MemoryBudget.byteArraySize
+                (btreeComparatorBytes.length);
+        }
+        if (duplicateComparator != null) {
+            val += 2 * MemoryBudget.byteArraySize
+                (duplicateComparatorBytes.length);
+        }
+
+        return val;
+    }
+
+    /**
+     * Set the duplicate comparison function for this database.
+     *
+     * @return true if the comparator was actually changed
+     *
+     * @param duplicateComparator - The Duplicate Comparison function.
+     */
+    public boolean setDuplicateComparator(Comparator<byte[]> comparator,
+                                          boolean byClassName)
+        throws DatabaseException {
+
+        duplicateComparator = comparator;
+        byte[] newDuplicateComparatorBytes =
+            comparatorToBytes(comparator, byClassName, "Duplicate");
+        boolean ret = Arrays.equals(newDuplicateComparatorBytes,
+                                    duplicateComparatorBytes);
+        duplicateComparatorBytes = newDuplicateComparatorBytes;
+        duplicateComparatorByClassName = byClassName;
+        return !ret;
+    }
+
+    /**
+     * Set the btree comparison function for this database.
+     *
+     * @return true if the comparator was actually changed
+     *
+     * @param btreeComparator - The btree Comparison function.
+     */
+    public boolean setBtreeComparator(Comparator<byte[]> comparator,
+                                      boolean byClassName)
+        throws DatabaseException {
+
+        btreeComparator = comparator;
+        byte[] newBtreeComparatorBytes =
+            comparatorToBytes(comparator, byClassName, "Btree");
+        boolean ret =
+            Arrays.equals(newBtreeComparatorBytes, btreeComparatorBytes);
+        btreeComparatorBytes = newBtreeComparatorBytes;
+        btreeComparatorByClassName = byClassName;
+        return !ret;
+    }
+
+    /**
+     * @return the btree Comparator object.
+     */
+    public Comparator<byte[]> getBtreeComparator() {
+        return btreeComparator;
+    }
+
+    /**
+     * @return the duplicate Comparator object.
+     */
+    public Comparator<byte[]> getDuplicateComparator() {
+        return duplicateComparator;
+    }
+
+    /**
+     * @return whether Comparator is set by class name, not by serializable
+     * Comparator object.
+     */
+    public boolean getBtreeComparatorByClass() {
+        return btreeComparatorByClassName;
+    }
+
+    /**
+     * @return whether Comparator is set by class name, not by serializable
+     * Comparator object.
+     */
+    public boolean getDuplicateComparatorByClass() {
+        return duplicateComparatorByClassName;
+    }
+
+    /**
+     * Set the db environment after reading in the DatabaseImpl from the log.
+     */
+    public void setEnvironmentImpl(EnvironmentImpl envImpl)
+        throws DatabaseException {
+
+        this.envImpl = envImpl;
+        initWithEnvironment();
+        tree.setDatabase(this);
+    }
+
+    /**
+     * @return the database environment.
+     */
+    public EnvironmentImpl getDbEnvironment() {
+        return envImpl;
+    }
+
+    /**
+     * Returns whether one or more handles are open.
+     */
+    public boolean hasOpenHandles() {
+        return referringHandles.size() > 0;
+    }
+
+    /**
+     * Add a referring handle
+     */
+    public void addReferringHandle(Database db) {
+        referringHandles.add(db);
+    }
+
+    /**
+     * Decrement the reference count.
+     */
+    public void removeReferringHandle(Database db) {
+        referringHandles.remove(db);
+    }
+
+    /**
+     * Called after a handle onto this DB is closed.
+     */
+    public void handleClosed(boolean doSyncDw)
+        throws DatabaseException {
+
+        if (referringHandles.isEmpty()) {
+
+            /*
+             * Remove a temporary database with no handles open.
+             *
+             * We are not synchronized here in any way that would prevent
+             * another thread from opening a handle during this process, before
+             * the NameLN is locked.  So we use noWait locking.  If a lock is
+             * not granted, then another handle was opened and we cannot remove
+             * the database until later.
+             *
+             * We pass the database ID to dbRemove in order to remove the
+             * database only if the name matches the ID.  This accounts for the
+             * remote possibility that the database is renamed or another
+             * database is created with the same name during this process,
+             * before the NameLN is locked.
+             *
+             * We can use a BasicLocker because temporary databases are always
+             * non-transactional.
+             */
+            if (isTemporary()) {
+                Locker locker =
+                    BasicLocker.createBasicLocker(envImpl, true /* noWait */);
+                boolean operationOk = false;
+                try {
+                    envImpl.getDbTree().dbRemove(locker, getName(), getId());
+                    operationOk = true;
+                } catch (LockNotGrantedException e) {
+                    /* We will have to remove this database later. */
+                } catch (Error E) {
+                    envImpl.invalidate(E);
+                    throw E;
+                } finally {
+                    locker.operationEnd(operationOk);
+                }
+            }
+
+            /*
+             * Sync a durable deferred write database with no handles open.  If
+             * a handle is opened during this process, then the sync may be
+             * unnecessary but it will not cause a problem.
+             */
+            if (doSyncDw && isDurableDeferredWrite()) {
+                sync(true);
+            }
+        }
+    }
+
+    /**
+     * Figure out how much memory is used by the DbFileSummaryMap.  Usually
+     * this number is built up over time by the DbFileSummaryMap itself and
+     * added to the memory budget, but in this case we need to reinitialize it
+     * after recovery, when DbFileSummaryMaps may be cut adrift by the process
+     * of overlaying new portions of the btree.
+     */
+    public long getTreeAdminMemory() {
+        return dbFileSummaries.getMemorySize();
+    }
+
+    /**
+     * Update memory budgets when this databaseImpl is closed and will never be
+     * accessed again or when it is still open when its owning MapLN will be
+     * garbage collected, due to eviction or recovery.
+     */
+    public void releaseTreeAdminMemory() {
+        /*
+         * There's no need to account for INs which belong to this database,
+         * because those are closed by the EnvironmentImpl when clearing
+         * the INList.  Do adjust memory budget for utilization info.
+         */
+        dbFileSummaries.subtractFromMemoryBudget();
+    }
+
+    /**
+     * @return the referring handle count.
+     */
+    synchronized int getReferringHandleCount() {
+        return referringHandles.size();
+    }
+
+    /**
+     * Increments the use count of this DB to prevent it from being
+     * evicted.  Called by the DbTree.createDb/getDb methods that return a
+     * DatabaseImpl.  Must be called while holding a lock on the MapLN. See
+     * isInUse. [#13415]
+     */
+    void incrementUseCount() {
+        useCount.incrementAndGet();
+    }
+
+    /**
+     * Decrements the use count of this DB, allowing it to be evicted if the
+     * use count reaches zero.  Called via DbTree.releaseDb to release a
+     * DatabaseImpl that was returned by a DbTree.createDb/getDb method. See
+     * isInUse. [#13415]
+     */
+    void decrementUseCount() {
+        assert useCount.get() > 0;
+        useCount.decrementAndGet();
+    }
+
+    /**
+     * Returns whether this DB is in use and cannot be evicted.  Called by
+     * MapLN.isEvictable while holding a write-lock on the MapLN and a latch on
+     * its parent BIN. [#13415]
+     *
+     * When isInUse returns false (while holding a write-lock on the MapLN and
+     * a latch on the parent BIN), it guarantees that the database object
+     * is not in use and cannot be acquired by another thread (via
+     * DbTree.createDb/getDb) until both the MapLN lock and BIN latch are
+     * released.  This guarantee is due to the fact that DbTree.createDb/getDb
+     * only increment the use count while holding a read-lock on the MapLN.
+     * Therefore, it is safe to evict the MapLN when isInUse returns false.
+     *
+     * When isInUse returns true, it is possible that another thread may
+     * decrement the use count at any time, since no locking or latching is
+     * performed when calling DbTree.releaseDb (which calls decrementUseCount).
+     * Therefore, it is not guaranteed that the MapLN is in use when isInUse
+     * returns true.  A true result means: the DB may be in use, so it is not
+     * safe to evict it.
+     */
+    public boolean isInUse() {
+        return (useCount.get() > 0);
+    }
+
+    /**
+     * Checks whether a database is in use during a remove or truncate database
+     * operation.
+     */
+    boolean isInUseDuringDbRemove() {
+
+        /*
+         * The use count is at least one here, because remove/truncate has
+         * called getDb but releaseDb has not yet been called.  Normally the
+         * database must be closed in order to remove or truncate it and
+         * referringHandles will be empty.  But when the deprecated
+         * Database.truncate is called, the database is open and the use count
+         * includes the number of open handles.  [#15805]
+         */
+        return useCount.get() > 1 + referringHandles.size();
+    }
+
+    /**
+     * Flush all dirty nodes for this database to disk.
+     */
+    public synchronized void sync(boolean flushLog)
+        throws DatabaseException {
+
+        if (!isDurableDeferredWrite()) {
+            throw new UnsupportedOperationException
+                ("Database.sync() is only supported " +
+                                        "for deferred-write databases");
+        }
+
+        if (tree.rootExists()) {
+            Checkpointer.syncDatabase(envImpl, this, flushLog);
+        }
+    }
+
+    /**
+     * For this secondary database return the primary that it is associated
+     * with, or null if not associated with any primary.  Note that not all
+     * handles need be associated with a primary.
+     */
+    public Database findPrimaryDatabase()
+        throws DatabaseException {
+
+        for (Iterator<Database> i = referringHandles.iterator();
+             i.hasNext();) {
+            Database obj = i.next();
+            if (obj instanceof SecondaryDatabase) {
+                return ((SecondaryDatabase) obj).getPrimaryDatabase();
+            }
+        }
+        return null;
+    }
+
+    public String getName()
+        throws DatabaseException {
+
+        return envImpl.getDbTree().getDbName(id);
+    }
+
+    /**
+     * Returns the DbFileSummary for the given file, allocates it if
+     * necessary and budgeted memory for any changes.
+     *
+     * <p>Must be called under the log write latch.</p>
+     *
+     * @param willModify if true, the caller will modify the utilization info.
+     */
+    public DbFileSummary getDbFileSummary(Long fileNum, boolean willModify) {
+        if (willModify) {
+            dirtyUtilization = true;
+        }
+        assert dbFileSummaries != null;
+        return dbFileSummaries.get(fileNum, true /*adjustMemBudget*/,
+                                   true /*checkResurrected*/,
+                                   envImpl.getFileManager());
+    }
+
+    /**
+     * Removes the DbFileSummary for the given file.
+     *
+     * <p>Must be called under the log write latch.</p>
+     *
+     * @return whether a DbFileSummary for the given file was present and was
+     * removed.
+     */
+    public boolean removeDbFileSummary(Long fileNum) {
+        assert dbFileSummaries != null;
+        boolean removed = dbFileSummaries.remove(fileNum);
+        return removed;
+    }
+
+    /**
+     * For unit testing.
+     */
+    public DbFileSummaryMap getDbFileSummaries() {
+        return dbFileSummaries;
+    }
+
+    /**
+     * Returns whether this database has new (unflushed) utilization info.
+     */
+    public boolean isDirtyUtilization() {
+        return dirtyUtilization;
+    }
+
+    /**
+     * Sets utilization dirty in order to force the MapLN to be flushed later.
+     */
+    public void setDirtyUtilization() {
+        dirtyUtilization = true;
+    }
+
+    /**
+     * Returns whether this database's MapLN must be flushed during a
+     * checkpoint.
+     */
+    public boolean isCheckpointNeeded() {
+        return !isDeleted() && (isDirtyUtilization() || isTemporary());
+    }
+
+    /**
+     * @return true if this database is deleted. Delete cleanup
+     * may still be in progress.
+     */
+    public boolean isDeleted() {
+        return !(deleteState == NOT_DELETED);
+    }
+
+    /**
+     * @return true if this database is deleted and all cleanup is finished.
+     */
+    public boolean isDeleteFinished() {
+        return (deleteState == DELETED);
+    }
+
+    /**
+     * The delete cleanup is starting. Set this before releasing any
+     * write locks held for a db operation.
+     */
+    public void startDeleteProcessing() {
+        assert (deleteState == NOT_DELETED);
+
+        deleteState = DELETED_CLEANUP_INLIST_HARVEST;
+    }
+
+    /**
+     * Should be called by the SortedLSNTreeWalker when it is finished with
+     * the INList.
+     */
+    void finishedINListHarvest() {
+        assert (deleteState == DELETED_CLEANUP_INLIST_HARVEST);
+
+        deleteState = DELETED_CLEANUP_LOG_HARVEST;
+    }
+
+    /**
+     * Perform the entire two-step database deletion.  This method is used at
+     * non-transactional operation end.  When a transaction is used (see Txn),
+     * startDeleteProcessing is called at commit before releasing write locks
+     * and finishDeleteProcessing is called after releasing write locks.
+     */
+    public void startAndFinishDelete()
+        throws DatabaseException {
+
+        startDeleteProcessing();
+        finishDeleteProcessing();
+    }
+
+    /**
+     * Release the INs for the deleted database, count all log entries for this
+     * database as obsolete, delete the MapLN, and set the state to DELETED.
+     *
+     * Used at transaction end or non-transactional operation end in these
+     * cases:
+     *  - purge the deleted database after a commit of
+     *           Environment.removeDatabase
+     *  - purge the deleted database after a commit of
+     *           Environment.truncateDatabase
+     *  - purge the newly created database after an abort of
+     *           Environment.truncateDatabase
+     *
+     * Note that the processing of the naming tree means the MapLN is never
+     * actually accessible from the current tree, but deleting the MapLN will
+     * do two things:
+     * (a) mark it properly obsolete
+     * (b) null out the database tree, leaving the INList the only
+     * reference to the INs.
+     */
+    public void finishDeleteProcessing()
+        throws DatabaseException {
+
+        assert TestHookExecute.doHookIfSet(pendingDeletedHook);
+
+        try {
+            /* Fetch utilization info if it was evicted. */
+            if (dbFileSummaries == null) {
+                assert false; // Fetch evicted info when we implement eviction
+            }
+
+            /*
+             * Delete MapLN before the walk.  Get the root LSN before deleting
+             * the MapLN, as that will null out the root.
+             */
+            long rootLsn = tree.getRootLsn();
+
+            /*
+             * Grab the in-cache root IN before we call deleteMapLN so that it
+             * gives us a starting point for the SortedLSNTreeWalk below.  The
+             * on-disk version is obsolete at this point.
+             */
+            IN rootIN = tree.getResidentRootIN(false);
+            envImpl.getDbTree().deleteMapLN(id);
+
+            if (createdAtLogVersion >= 6 &&
+                !forceTreeWalkForTruncateAndRemove) {
+
+                /*
+                 * For databases created at log version 6 or after, the
+                 * per-database utilization info is complete and can be counted
+                 * as obsolete without walking the database.
+                 *
+                 * We do not need to flush modified file summaries because the
+                 * obsolete amounts are logged along with the deleted MapLN and
+                 * will be re-counted by recovery if necessary.
+                 */
+                envImpl.getLogManager().countObsoleteDb(this);
+            } else {
+
+                /*
+                 * For databases created prior to log version 6, the
+                 * per-database utilization info is incomplete.  Use the old
+                 * method of counting utilization via SortedLSNTreeWalker.
+                 *
+                 * Use a local tracker that is accumulated under the log write
+                 * latch when we're done counting.  Start by recording the LSN
+                 * of the root IN as obsolete.
+                 */
+                LocalUtilizationTracker localTracker =
+                    new LocalUtilizationTracker(envImpl);
+                if (rootLsn != DbLsn.NULL_LSN) {
+                    localTracker.countObsoleteNodeInexact
+                        (rootLsn, LogEntryType.LOG_IN, 0, this);
+                }
+
+                /* Fetch LNs to count LN sizes only if so configured. */
+                boolean fetchLNSize =
+                    envImpl.getCleaner().getFetchObsoleteSize();
+
+                /* Use the tree walker to visit every child LSN in the tree. */
+                ObsoleteProcessor obsoleteProcessor =
+                    new ObsoleteProcessor(this, localTracker);
+                SortedLSNTreeWalker walker = new ObsoleteTreeWalker
+                    (this, rootLsn, fetchLNSize, obsoleteProcessor, rootIN);
+
+                /*
+                 * At this point, it's possible for the evictor to find an IN
+                 * for this database on the INList. It should be ignored.
+                 */
+                walker.walk();
+
+                /*
+                 * Count obsolete nodes for a deleted database at transaction
+                 * end time.  Write out the modified file summaries for
+                 * recovery.
+                 */
+                envImpl.getUtilizationProfile().flushLocalTracker
+                    (localTracker);
+            }
+
+            /* Remove all INs for this database from the INList. */
+            MemoryBudget mb = envImpl.getMemoryBudget();
+            INList inList = envImpl.getInMemoryINs();
+            long memoryChange = 0;
+            try {
+                Iterator<IN> iter = inList.iterator();
+                while (iter.hasNext()) {
+                    IN thisIN = iter.next();
+                    if (thisIN.getDatabase() == this) {
+                        iter.remove();
+                        memoryChange +=
+                            (0 - thisIN.getBudgetedMemorySize());
+                        thisIN.setInListResident(false);
+                    }
+                }
+            } finally {
+                mb.updateTreeMemoryUsage(memoryChange);
+            }
+        } finally {
+            /* Adjust memory budget for utilization info. */
+            dbFileSummaries.subtractFromMemoryBudget();
+
+            deleteState = DELETED;
+            /* releaseDb to balance getDb called by truncate/remove. */
+            envImpl.getDbTree().releaseDb(this);
+        }
+    }
+
+    public void checkIsDeleted(String operation)
+        throws DatabaseException {
+
+        if (isDeleted()) {
+            throw new DatabaseException
+                ("Attempt to " + operation + " a deleted database");
+        }
+    }
+
+    /**
+     * Counts all active LSNs in a database as obsolete.
+     *
+     * @param mapLnLsn is the LSN of the MapLN when called via recovery,
+     * otherwise is NULL_LSN.
+     *
+     * <p>Must be called under the log write latch or during recovery.</p>
+     */
+    public void countObsoleteDb(BaseUtilizationTracker tracker,
+                                long mapLnLsn) {
+        /*
+         * Even though the check for createdAtLogVersion and
+         * forceTreeWalkForTruncateAndRemove is made in finishDeleteProcessing
+         * before calling this method, we must repeat the check here because
+         * this method is also called by recovery.
+         */
+        if (createdAtLogVersion >= 6 && !forceTreeWalkForTruncateAndRemove) {
+            tracker.countObsoleteDb(dbFileSummaries, mapLnLsn);
+        }
+    }
+
+    private static class ObsoleteTreeWalker extends SortedLSNTreeWalker {
+
+        private IN rootIN;
+
+        private ObsoleteTreeWalker(DatabaseImpl dbImpl,
+                                   long rootLsn,
+                                   boolean fetchLNSize,
+                                   TreeNodeProcessor callback,
+                                   IN rootIN)
+            throws DatabaseException {
+
+            super(dbImpl,
+                  true,  // set INList finish harvest
+                  rootLsn,
+                  callback,
+                  null,  /* savedException */
+                  null); /* exception predicate */
+
+            accumulateLNs = fetchLNSize;
+            this.rootIN = rootIN;
+        }
+
+        @Override
+        protected IN getResidentRootIN()
+            throws DatabaseException {
+
+            return rootIN;
+        }
+    }
+
+    /* Mark each LSN obsolete in the utilization tracker. */
+    private static class ObsoleteProcessor implements TreeNodeProcessor {
+
+        private LocalUtilizationTracker localTracker;
+        private DatabaseImpl db;
+
+        ObsoleteProcessor(DatabaseImpl db,
+                          LocalUtilizationTracker localTracker) {
+            this.db = db;
+            this.localTracker = localTracker;
+        }
+
+        public void processLSN(long childLsn,
+                               LogEntryType childType,
+                               Node node,
+                               byte[] lnKey)
+            throws DatabaseException {
+
+            assert childLsn != DbLsn.NULL_LSN;
+
+            /*
+             * Count the LN log size if an LN node and key are available.  But
+             * do not count the size if the LN is dirty, since the logged LN is
+             * not available. [#15365]
+             */
+            int size = 0;
+            if (lnKey != null && node instanceof LN) {
+                LN ln = (LN) node;
+                size = ln.getLastLoggedSize();
+            }
+
+            localTracker.countObsoleteNodeInexact
+                (childLsn, childType, size, db);
+        }
+
+        public void processDirtyDeletedLN(long childLsn, LN ln, byte[] lnKey)
+            throws DatabaseException {
+
+            assert ln != null;
+
+            /*
+             * Do not count the size (pass zero) because the LN is dirty and
+             * the logged LN is not available.
+             */
+            localTracker.countObsoleteNodeInexact
+                (childLsn, ln.getLogType(), 0, db);
+        }
+
+        public void processDupCount(int ignore) {
+        }
+    }
+
+    public DatabaseStats stat(StatsConfig config)
+        throws DatabaseException {
+
+        if (stats == null) {
+
+            /*
+             * Called first time w/ FAST_STATS so just give them an
+             * empty one.
+             */
+            stats = new BtreeStats();
+        }
+
+        if (!config.getFast()) {
+            if (tree == null) {
+                return new BtreeStats();
+            }
+
+            PrintStream out = config.getShowProgressStream();
+            if (out == null) {
+                out = System.err;
+            }
+
+            StatsAccumulator statsAcc =
+                new StatsAccumulator(out,
+                                     config.getShowProgressInterval(),
+                                     getEmptyStats());
+            walkDatabaseTree(statsAcc, out, true);
+            statsAcc.copyToStats(stats);
+        }
+
+        return stats;
+    }
+
+    /*
+     * @param config verify configuration
+     * @param emptyStats empty database stats, to be filled by this method
+     * @return true if the verify saw no errors.
+     */
+    public boolean verify(VerifyConfig config, DatabaseStats emptyStats)
+        throws DatabaseException {
+
+        if (tree == null) {
+            return true;
+        }
+
+        PrintStream out = config.getShowProgressStream();
+        if (out == null) {
+            out = System.err;
+        }
+
+        StatsAccumulator statsAcc =
+            new StatsAccumulator(out,
+                                 config.getShowProgressInterval(),
+                                 emptyStats) {
+                @Override
+                void verifyNode(Node node) {
+
+                    try {
+                        node.verify(null);
+                    } catch (DatabaseException INE) {
+                        progressStream.println(INE);
+                    }
+                }
+                };
+        boolean ok = walkDatabaseTree(statsAcc, out, config.getPrintInfo());
+        statsAcc.copyToStats(emptyStats);
+        return ok;
+    }
+
+    /* @return the right kind of stats object for this database. */
+    public DatabaseStats getEmptyStats() {
+        return new BtreeStats();
+    }
+
+    /*
+     * @return true if no errors.
+     */
+    private boolean walkDatabaseTree(TreeWalkerStatsAccumulator statsAcc,
+                                     PrintStream out,
+                                     boolean verbose)
+        throws DatabaseException {
+
+        boolean ok = true;
+        Locker locker = BasicLocker.createBasicLocker(envImpl);
+	CursorImpl cursor = null;
+
+        try {
+	    EnvironmentImpl.incThreadLocalReferenceCount();
+            cursor = new CursorImpl(this, locker);
+	    tree.setTreeStatsAccumulator(statsAcc);
+
+	    /*
+	     * This will only be used on the first call for the position()
+	     * call.
+	     */
+	    cursor.setTreeStatsAccumulator(statsAcc);
+            DatabaseEntry foundData = new DatabaseEntry();
+            DatabaseEntry key = new DatabaseEntry();
+
+            if (cursor.positionFirstOrLast
+                    (true /*first*/, null /*duplicateRoot*/)) {
+                OperationStatus status = cursor.getCurrentAlreadyLatched
+                    (key, foundData, LockType.NONE, true /*first*/);
+                if (status == OperationStatus.SUCCESS) {
+                    if (cursor.getDupBIN() != null) {
+                        cursor.incrementLNCount();
+                    }
+                }
+                boolean done = false;
+                while (!done) {
+
+                    /* Perform eviction before each cursor operation. */
+                    envImpl.getEvictor().doCriticalEviction
+                        (false /*backgroundIO*/);
+
+                    try {
+                        status = cursor.getNext
+                            (key, foundData, LockType.NONE, true /*forward*/,
+                             false /*alreadyLatched*/);
+                    } catch (DatabaseException e) {
+                        ok = false;
+                        if (cursor.advanceCursor(key, foundData)) {
+                            if (verbose) {
+                                out.println("Error encountered (continuing):");
+                                out.println(e);
+                                printErrorRecord(out, key, foundData);
+                            }
+                        } else {
+                            throw e;
+                        }
+                    }
+                    if (status != OperationStatus.SUCCESS) {
+                        done = true;
+                    }
+                }
+            }
+        } finally {
+	    if (cursor != null) {
+		cursor.setTreeStatsAccumulator(null);
+	    }
+	    tree.setTreeStatsAccumulator(null);
+	    EnvironmentImpl.decThreadLocalReferenceCount();
+
+            if (cursor != null) {
+                cursor.close();
+            }
+        }
+
+        return ok;
+    }
+
+    /**
+     * Prints the key and data, if available, for a BIN entry that could not be
+     * read/verified.  Uses the same format as DbDump and prints both the hex
+     * and printable versions of the entries.
+     */
+    private void printErrorRecord(PrintStream out,
+                                  DatabaseEntry key,
+                                  DatabaseEntry data) {
+
+        byte[] bytes = key.getData();
+        StringBuffer sb = new StringBuffer("Error Key ");
+        if (bytes == null) {
+            sb.append("UNKNOWN");
+        } else {
+            CmdUtil.formatEntry(sb, bytes, false);
+            sb.append(' ');
+            CmdUtil.formatEntry(sb, bytes, true);
+        }
+        out.println(sb);
+
+        bytes = data.getData();
+        sb = new StringBuffer("Error Data ");
+        if (bytes == null) {
+            sb.append("UNKNOWN");
+        } else {
+            CmdUtil.formatEntry(sb, bytes, false);
+            sb.append(' ');
+            CmdUtil.formatEntry(sb, bytes, true);
+        }
+        out.println(sb);
+    }
+
+    static class StatsAccumulator implements TreeWalkerStatsAccumulator {
+        private Set<Long> inNodeIdsSeen = new HashSet<Long>();
+        private Set<Long> binNodeIdsSeen = new HashSet<Long>();
+        private Set<Long> dinNodeIdsSeen = new HashSet<Long>();
+        private Set<Long> dbinNodeIdsSeen = new HashSet<Long>();
+        private Set<Long> dupCountLNsSeen = new HashSet<Long>();
+        private long[] insSeenByLevel = null;
+        private long[] binsSeenByLevel = null;
+        private long[] dinsSeenByLevel = null;
+        private long[] dbinsSeenByLevel = null;
+        private long lnCount = 0;
+        private long deletedLNCount = 0;
+        private int mainTreeMaxDepth = 0;
+        private int duplicateTreeMaxDepth = 0;
+        private DatabaseStats useStats;
+
+        PrintStream progressStream;
+        int progressInterval;
+
+        /* The max levels we ever expect to see in a tree. */
+        private static final int MAX_LEVELS = 100;
+
+        StatsAccumulator(PrintStream progressStream,
+                         int progressInterval,
+                         DatabaseStats useStats) {
+
+            this.progressStream = progressStream;
+            this.progressInterval = progressInterval;
+
+            insSeenByLevel = new long[MAX_LEVELS];
+            binsSeenByLevel = new long[MAX_LEVELS];
+            dinsSeenByLevel = new long[MAX_LEVELS];
+            dbinsSeenByLevel = new long[MAX_LEVELS];
+
+            this.useStats = useStats;
+        }
+
+        void verifyNode(Node node) {
+
+        }
+
+        public void processIN(IN node, Long nid, int level) {
+            if (inNodeIdsSeen.add(nid)) {
+                tallyLevel(level, insSeenByLevel);
+                verifyNode(node);
+            }
+        }
+
+        public void processBIN(BIN node, Long nid, int level) {
+            if (binNodeIdsSeen.add(nid)) {
+                tallyLevel(level, binsSeenByLevel);
+                verifyNode(node);
+            }
+        }
+
+        public void processDIN(DIN node, Long nid, int level) {
+            if (dinNodeIdsSeen.add(nid)) {
+                tallyLevel(level, dinsSeenByLevel);
+                verifyNode(node);
+            }
+        }
+
+        public void processDBIN(DBIN node, Long nid, int level) {
+            if (dbinNodeIdsSeen.add(nid)) {
+                tallyLevel(level, dbinsSeenByLevel);
+                verifyNode(node);
+            }
+        }
+
+        public void processDupCountLN(DupCountLN node, Long nid) {
+            dupCountLNsSeen.add(nid);
+            verifyNode(node);
+        }
+
+        private void tallyLevel(int levelArg, long[] nodesSeenByLevel) {
+            int level = levelArg;
+            if (level >= IN.DBMAP_LEVEL) {
+                return;
+            }
+            if (level >= IN.MAIN_LEVEL) {
+                level &= ~IN.MAIN_LEVEL;
+                if (level > mainTreeMaxDepth) {
+                    mainTreeMaxDepth = level;
+                }
+            } else {
+                if (level > duplicateTreeMaxDepth) {
+                    duplicateTreeMaxDepth = level;
+                }
+            }
+
+            nodesSeenByLevel[level]++;
+        }
+
+        public void incrementLNCount() {
+            lnCount++;
+            if (progressInterval != 0) {
+                if ((lnCount % progressInterval) == 0) {
+                    copyToStats(useStats);
+                    progressStream.println(useStats);
+                }
+            }
+        }
+
+        public void incrementDeletedLNCount() {
+            deletedLNCount++;
+        }
+
+        Set<Long> getINNodeIdsSeen() {
+            return inNodeIdsSeen;
+        }
+
+        Set<Long> getBINNodeIdsSeen() {
+            return binNodeIdsSeen;
+        }
+
+        Set<Long> getDINNodeIdsSeen() {
+            return dinNodeIdsSeen;
+        }
+
+        Set<Long> getDBINNodeIdsSeen() {
+            return dbinNodeIdsSeen;
+        }
+
+        long[] getINsByLevel() {
+            return insSeenByLevel;
+        }
+
+        long[] getBINsByLevel() {
+            return binsSeenByLevel;
+        }
+
+        long[] getDINsByLevel() {
+            return dinsSeenByLevel;
+        }
+
+        long[] getDBINsByLevel() {
+            return dbinsSeenByLevel;
+        }
+
+        long getLNCount() {
+            return lnCount;
+        }
+
+        Set<Long> getDupCountLNCount() {
+            return dupCountLNsSeen;
+        }
+
+        long getDeletedLNCount() {
+            return deletedLNCount;
+        }
+
+        int getMainTreeMaxDepth() {
+            return mainTreeMaxDepth;
+        }
+
+        int getDuplicateTreeMaxDepth() {
+            return duplicateTreeMaxDepth;
+        }
+
+        private void copyToStats(DatabaseStats stats) {
+            BtreeStats bStats = (BtreeStats) stats;
+            bStats.setInternalNodeCount(getINNodeIdsSeen().size());
+            bStats.setBottomInternalNodeCount
+                (getBINNodeIdsSeen().size());
+            bStats.setDuplicateInternalNodeCount
+                (getDINNodeIdsSeen().size());
+            bStats.setDuplicateBottomInternalNodeCount
+                (getDBINNodeIdsSeen().size());
+            bStats.setLeafNodeCount(getLNCount());
+            bStats.setDeletedLeafNodeCount(getDeletedLNCount());
+            bStats.setDupCountLeafNodeCount
+                (getDupCountLNCount().size());
+            bStats.setMainTreeMaxDepth(getMainTreeMaxDepth());
+            bStats.setDuplicateTreeMaxDepth(getDuplicateTreeMaxDepth());
+            bStats.setINsByLevel(getINsByLevel());
+            bStats.setBINsByLevel(getBINsByLevel());
+            bStats.setDINsByLevel(getDINsByLevel());
+            bStats.setDBINsByLevel(getDBINsByLevel());
+        }
+    }
+
+    /**
+     * Preload exceptions, classes, callbacks.
+     */
+
+    /**
+     * Undeclared exception used to throw through SortedLSNTreeWalker code
+     * when preload has either filled the user's max byte or time request.
+     */
+    @SuppressWarnings("serial")
+    private static class HaltPreloadException extends RuntimeException {
+
+        private PreloadStatus status;
+
+        HaltPreloadException(PreloadStatus status) {
+            super(status.toString());
+            this.status = status;
+        }
+
+        PreloadStatus getStatus() {
+            return status;
+        }
+    }
+
+    private static final HaltPreloadException
+        TIME_EXCEEDED_PRELOAD_EXCEPTION =
+        new HaltPreloadException(PreloadStatus.EXCEEDED_TIME);
+
+    private static final HaltPreloadException
+        MEMORY_EXCEEDED_PRELOAD_EXCEPTION =
+        new HaltPreloadException(PreloadStatus.FILLED_CACHE);
+
+    /**
+     * The processLSN() code for PreloadLSNTreeWalker.
+     */
+    private static class PreloadProcessor implements TreeNodeProcessor {
+
+        private EnvironmentImpl envImpl;
+        private long maxBytes;
+        private long targetTime;
+        private PreloadStats stats;
+
+        PreloadProcessor(EnvironmentImpl envImpl,
+                         long maxBytes,
+                         long targetTime,
+                         PreloadStats stats) {
+            this.envImpl = envImpl;
+            this.maxBytes = maxBytes;
+            this.targetTime = targetTime;
+            this.stats = stats;
+        }
+
+        /**
+         * Called for each LSN that the SortedLSNTreeWalker encounters.
+         */
+        public void processLSN(long childLsn,
+                               LogEntryType childType,
+                               Node ignore,
+                               byte[] ignore2)
+            throws DatabaseException {
+
+            /*
+             * Check if we've exceeded either the max time or max bytes
+             * allowed for this preload() call.
+             */
+            if (System.currentTimeMillis() > targetTime) {
+                throw TIME_EXCEEDED_PRELOAD_EXCEPTION;
+            }
+
+            if (envImpl.getMemoryBudget().getCacheMemoryUsage() > maxBytes) {
+                throw MEMORY_EXCEEDED_PRELOAD_EXCEPTION;
+            }
+
+            /* Count entry types to return in the PreloadStats. */
+            if (childType.equals(LogEntryType.LOG_DUPCOUNTLN_TRANSACTIONAL) ||
+                childType.equals(LogEntryType.LOG_DUPCOUNTLN)) {
+                stats.incDupCountLNsLoaded();
+            } else if (childType.equals(LogEntryType.LOG_LN_TRANSACTIONAL) ||
+                       childType.equals(LogEntryType.LOG_LN)) {
+                stats.incLNsLoaded();
+            } else if (childType.equals(LogEntryType.LOG_DBIN)) {
+                stats.incDBINsLoaded();
+            } else if (childType.equals(LogEntryType.LOG_BIN)) {
+                stats.incBINsLoaded();
+            } else if (childType.equals(LogEntryType.LOG_DIN)) {
+                stats.incDINsLoaded();
+            } else if (childType.equals(LogEntryType.LOG_IN)) {
+                stats.incINsLoaded();
+            }
+        }
+
+        public void processDirtyDeletedLN(long childLsn, LN ln, byte[] lnKey)
+            throws DatabaseException {
+        }
+
+        public void processDupCount(int ignore) {
+        }
+    }
+
+    /*
+     * An extension of SortedLSNTreeWalker that provides an LSN to IN/index
+     * map.  When an LSN is processed by the tree walker, the map is used to
+     * lookup the parent IN and child entry index of each LSN processed by the
+     * tree walker.  Since fetchLSN is called with an arbitrary LSN, and since
+     * when we fetch (for preload) we need to setup the parent to refer to
+     * the node which we are prefetching, we need to have the parent in hand
+     * at the time of the fetch.  This map allows us to fetch that parent
+     * so that we can call fetchNode on that parent.
+     */
+    private static class PreloadLSNTreeWalker extends SortedLSNTreeWalker {
+
+        /* LSN -> INEntry */
+        private Map<Long,INEntry> lsnINMap = new HashMap<Long,INEntry>();
+
+        /* struct to hold IN/entry-index pair. */
+        private static class INEntry {
+            INEntry(IN in, int index) {
+                this.in = in;
+                this.index = index;
+            }
+
+            IN in;
+            int index;
+        }
+
+        PreloadLSNTreeWalker(DatabaseImpl db,
+                             TreeNodeProcessor callback,
+                             PreloadConfig conf)
+            throws DatabaseException {
+
+            super(db, false /* setDbState */, db.tree.getRootLsn(), callback,
+                  null, null); /* savedException, exception predicate */
+            accumulateLNs = conf.getLoadLNs();
+        }
+
+        private final class PreloadWithRootLatched
+            implements WithRootLatched {
+
+            public IN doWork(ChildReference root)
+                throws DatabaseException {
+
+                walkInternal();
+                return null;
+            }
+        }
+
+        @Override
+        public void walk()
+            throws DatabaseException {
+
+            WithRootLatched preloadWRL = new PreloadWithRootLatched();
+            dbImpl.getTree().withRootLatchedExclusive(preloadWRL);
+        }
+
+        /*
+         * Method to get the Root IN for this DatabaseImpl's tree.  Latches
+         * the root IN.
+         */
+        @Override
+        protected IN getRootIN(long rootLsn)
+            throws DatabaseException {
+
+	    return dbImpl.getTree().getRootIN(CacheMode.UNCHANGED);
+	}
+
+        @Override
+	protected IN getResidentRootIN()
+	    throws DatabaseException {
+
+	    return dbImpl.getTree().getResidentRootIN(true);
+	}
+
+	/*
+	 * Release the latch on the root IN.
+	 */
+        @Override
+	protected void releaseRootIN(IN root)
+	    throws DatabaseException {
+
+            root.releaseLatch();
+	}
+
+	/*
+	 * Add an LSN -> IN/index entry to the map.
+	 */
+        @Override
+	protected void addToLsnINMap(Long lsn, IN in, int index) {
+	    assert in.getDatabase() != null;
+	    lsnINMap.put(lsn, new INEntry(in, index));
+	}
+
+	/*
+	 * Process an LSN.  Get & remove its INEntry from the map, then fetch
+	 * the target at the INEntry's IN/index pair.  This method will be
+	 * called in sorted LSN order.
+         *
+         * We do not bother to set the lnkeyEntry because we never use the
+         * lnKey parameter in the processLSN method.
+         */
+        @Override
+        protected Node fetchLSN(long lsn, DatabaseEntry lnKeyEntry)
+            throws DatabaseException {
+
+            INEntry inEntry = (INEntry) lsnINMap.remove(Long.valueOf(lsn));
+            assert (inEntry != null) : DbLsn.getNoFormatString(lsn);
+            IN in = inEntry.in;
+            boolean isLatchedAlready = in.isLatchOwnerForWrite();
+            if (!isLatchedAlready) {
+                in.latch();
+            }
+
+            try {
+                int index = inEntry.index;
+                if (index < 0) {
+                    /* Negative index signifies a DupCountLN. */
+                    DIN din = (DIN) in;
+                    return din.getDupCountLN();
+                } else {
+                    if (in.isEntryKnownDeleted(index) ||
+                        in.getLsn(index) != lsn) {
+                        return null;
+                    }
+                    return in.fetchTarget(index);
+                }
+            } finally {
+                if (!isLatchedAlready) {
+                    in.releaseLatch();
+                }
+            }
+        }
+    }
+
+    /**
+     * Preload the cache, using up to maxBytes bytes or maxMillsecs msec.
+     */
+    public PreloadStats preload(PreloadConfig config)
+        throws DatabaseException {
+
+        try {
+            long maxBytes = config.getMaxBytes();
+            long maxMillisecs = config.getMaxMillisecs();
+            long targetTime = Long.MAX_VALUE;
+            if (maxMillisecs > 0) {
+                targetTime = System.currentTimeMillis() + maxMillisecs;
+            }
+
+            long cacheBudget = envImpl.getMemoryBudget().getMaxMemory();
+            if (maxBytes == 0) {
+                maxBytes = cacheBudget;
+            } else if (maxBytes > cacheBudget) {
+                throw new IllegalArgumentException
+                    ("maxBytes parameter to Database.preload() was " +
+                     "specified as " +
+                     maxBytes + " bytes \nbut the cache is only " +
+                     cacheBudget + " bytes.");
+            }
+
+            PreloadStats pstats = new PreloadStats();
+            PreloadProcessor callback =
+                new PreloadProcessor(envImpl, maxBytes, targetTime, pstats);
+            SortedLSNTreeWalker walker =
+                new PreloadLSNTreeWalker(this, callback, config);
+            walker.setPassNullLSNNodes(true);
+            try {
+                walker.walk();
+            } catch (HaltPreloadException HPE) {
+                pstats.setStatus(HPE.getStatus());
+            }
+
+            assert LatchSupport.countLatchesHeld() == 0;
+            return pstats;
+        } catch (Error E) {
+            envImpl.invalidate(E);
+            throw E;
+        }
+    }
+
+    /**
+     * The processLSN() code for PreloadLSNTreeWalker.
+     */
+    private static class CountProcessor implements TreeNodeProcessor {
+
+        private EnvironmentImpl envImpl;
+        /* Use PreloadStats in case we ever want to count more than LNs. */
+        private PreloadStats stats;
+
+        CountProcessor(EnvironmentImpl envImpl,
+                       PreloadStats stats) {
+            this.envImpl = envImpl;
+            this.stats = stats;
+        }
+
+        /**
+         * Called for each LSN that the SortedLSNTreeWalker encounters.
+         */
+        public void processLSN(long childLsn,
+                               LogEntryType childType,
+                               Node ignore,
+                               byte[] ignore2)
+            throws DatabaseException {
+
+            /* Count entry types to return in the PreloadStats. */
+            if (childType.equals(LogEntryType.LOG_DUPCOUNTLN_TRANSACTIONAL) ||
+                childType.equals(LogEntryType.LOG_DUPCOUNTLN)) {
+                /* Don't descend down into the dup tree -- just use the DCL. */
+                int dupCount = 0;
+                DupCountLN dcl = (DupCountLN)
+                    envImpl.getLogManager().get(childLsn);
+                dupCount = dcl.getDupCount();
+                stats.addLNsLoaded(dupCount);
+            } else if (childType.equals(LogEntryType.LOG_LN_TRANSACTIONAL) ||
+                       childType.equals(LogEntryType.LOG_LN)) {
+                stats.incLNsLoaded();
+            }
+        }
+
+        public void processDirtyDeletedLN(long childLsn, LN ln, byte[] lnKey)
+            throws DatabaseException {
+        }
+
+        /* Used when processing Deferred Write dbs and there are no LSNs. */
+        public void processDupCount(int count) {
+            stats.addLNsLoaded(count);
+        }
+    }
+
+    private static class CountExceptionPredicate
+        implements ExceptionPredicate {
+
+        /*
+         * Return true if the exception can be ignored.
+         * LogFileNotFoundException is the only one so far.
+         */
+        public boolean ignoreException(Exception e) {
+            if (e instanceof LogFileNotFoundException) {
+                return true;
+            }
+            return false;
+        }
+    }
+
+    /**
+     * Count entries in the database including dups, but don't dirty the cache.
+     */
+    public long count()
+        throws DatabaseException {
+
+        try {
+            PreloadStats pstats = new PreloadStats();
+
+            CountProcessor callback = new CountProcessor(envImpl, pstats);
+            ExceptionPredicate excPredicate = new CountExceptionPredicate();
+            SortedLSNTreeWalker walker =
+                new SortedLSNTreeWalker(this, false /* setDbState */,
+                                        tree.getRootLsn(), callback, null,
+                                        excPredicate);
+            /* Don't descend down into the dup tree. Use the DupCountLN. */
+            walker.setProcessDupTree(false);
+            walker.setPassNullLSNNodes(true);
+            walker.walk();
+
+            assert LatchSupport.countLatchesHeld() == 0;
+            return pstats.getNLNsLoaded();
+        } catch (Error E) {
+            envImpl.invalidate(E);
+            throw E;
+        }
+    }
+
+    /*
+     * Dumping
+     */
+    public String dumpString(int nSpaces) {
+        StringBuffer sb = new StringBuffer();
+        sb.append(TreeUtils.indent(nSpaces));
+        sb.append("<database id=\"" );
+        sb.append(id.toString());
+        sb.append("\"");
+        sb.append(" dupsort=\"");
+        sb.append(getSortedDuplicates());
+        sb.append("\"");
+        sb.append(" temporary=\"");
+        sb.append(isTemporary());
+        sb.append("\"");
+        sb.append(" deferredWrite=\"");
+        sb.append(isDurableDeferredWrite());
+        sb.append("\"");
+        sb.append(" keyPrefixing=\"");
+        sb.append(getKeyPrefixing());
+        sb.append("\"");
+        if (btreeComparator != null) {
+            sb.append(" btc=\"");
+            sb.append(getComparatorClassName(btreeComparator));
+            sb.append("\"");
+        }
+        if (duplicateComparator != null) {
+            sb.append(" dupc=\"");
+            sb.append(getComparatorClassName(duplicateComparator));
+            sb.append("\"");
+        }
+        sb.append(">");
+        if (dbFileSummaries != null) {
+            Iterator<Map.Entry<Long,DbFileSummary>> entries =
+                (Iterator<Map.Entry<Long,DbFileSummary>>)
+                dbFileSummaries.entrySet().iterator();
+            while (entries.hasNext()) {
+                Map.Entry<Long,DbFileSummary> entry = entries.next();
+                Long fileNum = (Long) entry.getKey();
+                DbFileSummary summary = entry.getValue();
+                sb.append("<file file=\"").append(fileNum);
+                sb.append("\">");
+                sb.append(summary);
+                sb.append("/file>");
+            }
+        }
+        sb.append("</database>");
+        return sb.toString();
+    }
+
+    /*
+     * Logging support
+     */
+
+    /**
+     * This log entry type is configured to perform marshaling (getLogSize and
+     * writeToLog) under the write log mutex.  Otherwise, the size could change
+     * in between calls to these two methods as the result of utilizaton
+     * tracking.
+     *
+     * @see Loggable#getLogSize
+     */
+    public int getLogSize() {
+
+        int size =
+            id.getLogSize() +
+            tree.getLogSize() +
+            1 + // flags, 1 byte
+            LogUtils.getByteArrayLogSize(btreeComparatorBytes) +
+            LogUtils.getByteArrayLogSize(duplicateComparatorBytes) +
+            LogUtils.getPackedIntLogSize(maxMainTreeEntriesPerNode) +
+            LogUtils.getPackedIntLogSize(maxDupTreeEntriesPerNode) +
+            1;  // createdAtLogVersion
+
+        size += LogUtils.getPackedIntLogSize(dbFileSummaries.size());
+
+        Iterator<Map.Entry<Long,DbFileSummary>> i =
+            (Iterator<Map.Entry<Long,DbFileSummary>>)
+            dbFileSummaries.entrySet().iterator();
+        while (i.hasNext()) {
+            Map.Entry<Long,DbFileSummary> entry = i.next();
+            Long fileNum = entry.getKey();
+            DbFileSummary summary = entry.getValue();
+            size +=
+                LogUtils.getPackedLongLogSize(fileNum.longValue()) +
+                summary.getLogSize();
+        }
+        return size;
+    }
+
+    /**
+     * @see Loggable#writeToLog
+     */
+    public void writeToLog(ByteBuffer logBuffer) {
+        id.writeToLog(logBuffer);
+        tree.writeToLog(logBuffer);
+        logBuffer.put(flags);
+        LogUtils.writeByteArray(logBuffer, btreeComparatorBytes);
+        LogUtils.writeByteArray(logBuffer, duplicateComparatorBytes);
+        LogUtils.writePackedInt(logBuffer, maxMainTreeEntriesPerNode);
+        LogUtils.writePackedInt(logBuffer, maxDupTreeEntriesPerNode);
+        logBuffer.put(createdAtLogVersion);
+        LogUtils.writePackedInt(logBuffer, dbFileSummaries.size());
+        Iterator<Map.Entry<Long,DbFileSummary>> i =
+            (Iterator<Map.Entry<Long,DbFileSummary>>)
+            dbFileSummaries.entrySet().iterator();
+
+        while (i.hasNext()) {
+            Map.Entry<Long,DbFileSummary> entry = i.next();
+            Long fileNum = entry.getKey();
+            DbFileSummary summary = entry.getValue();
+            LogUtils.writePackedLong(logBuffer, fileNum.longValue());
+            summary.writeToLog(logBuffer);
+        }
+        dirtyUtilization = false;
+    }
+
+    /**
+     * @see Loggable#readFromLog
+     */
+    public void readFromLog(ByteBuffer itemBuffer, byte entryVersion)
+        throws LogException {
+
+        boolean version6OrLater = (entryVersion >= 6);
+
+        id.readFromLog(itemBuffer, entryVersion);
+        tree.readFromLog(itemBuffer, entryVersion);
+
+        /*
+         * Versions < 6 have the duplicatesAllowed boolean rather than a
+         * flags byte here, but we don't need a special case because the
+         * old boolean value is 1 and replacement flag value is 1.
+         */
+        flags = itemBuffer.get();
+
+        if (forceKeyPrefixing) {
+            setKeyPrefixing();
+        }
+
+	if (entryVersion >= 2) {
+            btreeComparatorBytes =
+                LogUtils.readByteArray(itemBuffer, !version6OrLater);
+            duplicateComparatorBytes =
+                LogUtils.readByteArray(itemBuffer, !version6OrLater);
+        } else {
+            String btreeClassName =
+                LogUtils.readString(itemBuffer, !version6OrLater);
+            String dupClassName =
+                LogUtils.readString(itemBuffer, !version6OrLater);
+            if (btreeClassName.length() == 0) {
+                btreeComparatorBytes = LogUtils.ZERO_LENGTH_BYTE_ARRAY;
+            } else {
+                btreeComparatorBytes =
+                    objectToBytes(btreeClassName, "Btree");
+            }
+            if (dupClassName.length() == 0) {
+                duplicateComparatorBytes = LogUtils.ZERO_LENGTH_BYTE_ARRAY;
+            } else {
+                duplicateComparatorBytes =
+                    objectToBytes(dupClassName, "Duplicate");
+            }
+        }
+
+        /* Don't instantiate if comparators are unnecessary (DbPrintLog). */
+        if (!EnvironmentImpl.getNoComparators()) {
+            try {
+                if (btreeComparatorBytes.length != 0) {
+                    Object obj = bytesToObject(btreeComparatorBytes, "Btree");
+                    if (obj instanceof String) {
+                    	String className = (String)obj;
+
+                        Class<? extends Comparator<byte[]>> cls =
+                       (Class<? extends Comparator<byte[]>>)
+                            Class.forName(className);
+
+                        btreeComparator = instantiateComparator(cls, "Btree");
+                        btreeComparatorByClassName = true;
+                    } else if (obj instanceof Comparator) {
+                        btreeComparator = (Comparator<byte[]>) obj;
+                        btreeComparatorByClassName = false;
+                    } else {
+                        assert false : obj.getClass().getName();
+                    }
+                } else {
+                    btreeComparator = null;
+                    btreeComparatorByClassName = false;
+                }
+                if (duplicateComparatorBytes.length != 0) {
+                    Object obj = bytesToObject
+                        (duplicateComparatorBytes, "Duplicate");
+                    if (obj instanceof String) {
+
+                        Class<? extends Comparator<byte[]>> cls =
+                             (Class<? extends Comparator<byte[]>>)
+                            Class.forName((String) obj);
+
+                        duplicateComparator =
+                            instantiateComparator(cls, "Duplicate");
+                        duplicateComparatorByClassName = true;
+                    } else if (obj instanceof Comparator) {
+                        duplicateComparator = (Comparator<byte[]>) obj;
+                        duplicateComparatorByClassName = false;
+                    } else {
+                        assert false : obj.getClass().getName();
+                    }
+                } else {
+                    duplicateComparator = null;
+                    duplicateComparatorByClassName = false;
+                }
+            } catch (ClassNotFoundException CNFE) {
+                throw new LogException("couldn't instantiate class comparator",
+                                       CNFE);
+            }
+        }
+
+        if (entryVersion >= 1) {
+            maxMainTreeEntriesPerNode =
+                LogUtils.readInt(itemBuffer, !version6OrLater);
+            maxDupTreeEntriesPerNode =
+                LogUtils.readInt(itemBuffer, !version6OrLater);
+        }
+
+        if (version6OrLater) {
+            createdAtLogVersion = itemBuffer.get();
+            int nFiles = LogUtils.readPackedInt(itemBuffer);
+            for (int i = 0; i < nFiles; i += 1) {
+                long fileNum = LogUtils.readPackedLong(itemBuffer);
+                DbFileSummary summary = dbFileSummaries.get
+                    (Long.valueOf(fileNum), false /*adjustMemBudget*/,
+                     false /*checkResurrected*/, null /*fileManager*/);
+                summary.readFromLog(itemBuffer, entryVersion);
+            }
+        }
+    }
+
+    /**
+     * @see Loggable#dumpLog
+     */
+    public void dumpLog(StringBuffer sb, boolean verbose) {
+        sb.append("<database ");
+        dumpFlags(sb, verbose, flags);
+        sb.append(" btcmp=\"");
+        sb.append(getComparatorClassName(btreeComparator));
+        sb.append("\"");
+        sb.append(" dupcmp=\"");
+        sb.append(getComparatorClassName(duplicateComparator));
+        sb.append("\" > ");
+        id.dumpLog(sb, verbose);
+        tree.dumpLog(sb, verbose);
+        if (dbFileSummaries != null) {
+            Iterator<Map.Entry<Long,DbFileSummary>> entries =
+                (Iterator<Map.Entry<Long,DbFileSummary>>)
+                dbFileSummaries.entrySet().iterator();
+
+            while (entries.hasNext()) {
+                Map.Entry<Long,DbFileSummary> entry = entries.next();
+                Long fileNum = entry.getKey();
+                DbFileSummary summary = entry.getValue();
+                sb.append("<file file=\"").append(fileNum);
+                sb.append("\">");
+                sb.append(summary);
+                sb.append("</file>");
+            }
+        }
+        sb.append("</database>");
+    }
+
+    static void dumpFlags(StringBuffer sb, boolean verbose, byte flags) {
+        sb.append(" dupsort=\"").append((flags & DUPS_ALLOWED_BIT) != 0);
+        sb.append("\" replicated=\"").append((flags & IS_REPLICATED_BIT) != 0);
+        sb.append("\" temp=\"").append((flags & TEMPORARY_BIT)
+                                       != 0).append("\" ");
+    }
+
+    /**
+     * @see Loggable#getTransactionId
+     */
+    public long getTransactionId() {
+        return 0;
+    }
+
+    /**
+     * @see Loggable#logicalEquals
+     * Always return false, this item should never be compared.
+     */
+    public boolean logicalEquals(Loggable other) {
+        return false;
+    }
+
+    /**
+     * Used for log dumping.
+     */
+    private static String
+        getComparatorClassName(Comparator<byte[]> comparator) {
+
+        if (comparator != null) {
+            return comparator.getClass().getName();
+        } else {
+            return "";
+        }
+    }
+
+    /**
+     * Used both to read from the log and to validate a comparator when set in
+     * DatabaseConfig.
+     */
+    public static Comparator<byte[]>
+        instantiateComparator(Class<? extends Comparator<byte[]>>
+                              comparatorClass,
+                              String comparatorType)
+        throws LogException {
+
+        if (comparatorClass == null) {
+            return null;
+        }
+
+        try {
+            return comparatorClass.newInstance();
+        } catch (InstantiationException IE) {
+            throw new LogException
+                ("Exception while trying to load " + comparatorType +
+                 " Comparator class: " + IE);
+        } catch (IllegalAccessException IAE) {
+            throw new LogException
+                ("Exception while trying to load " + comparatorType +
+                 " Comparator class: " + IAE);
+        }
+    }
+
+    /**
+     * Used to validate a comparator when set in DatabaseConfig.
+     */
+    public static Comparator<byte[]>
+        instantiateComparator(Comparator<byte[]> comparator,
+                              String comparatorType)
+        throws DatabaseException {
+
+        if (comparator == null) {
+            return null;
+        }
+
+        return (Comparator<byte[]>) bytesToObject
+            (objectToBytes(comparator, comparatorType), comparatorType);
+    }
+
+    /**
+     * Converts a comparator object to a serialized byte array, converting to
+     * a class name String object if byClassName is true.
+     *
+     * @throws LogException if the object cannot be serialized.
+     */
+    private static byte[] comparatorToBytes(Comparator<byte[]> comparator,
+                                            boolean byClassName,
+                                            String comparatorType)
+        throws DatabaseException {
+
+        if (comparator == null) {
+            return LogUtils.ZERO_LENGTH_BYTE_ARRAY;
+        } else {
+            Object obj;
+            if (byClassName) {
+                obj = comparator.getClass().getName();
+            } else {
+                obj = comparator;
+            }
+            return objectToBytes(obj, comparatorType);
+        }
+    }
+
+    /**
+     * Converts an arbitrary object to a serialized byte array.  Assumes that
+     * the object given is non-null.
+     */
+    public static byte[] objectToBytes(Object obj,
+                                       String comparatorType)
+        throws LogException {
+
+        try {
+            ByteArrayOutputStream baos = new ByteArrayOutputStream();
+            ObjectOutputStream oos = new ObjectOutputStream(baos);
+            oos.writeObject(obj);
+            return baos.toByteArray();
+        } catch (IOException e) {
+            throw new LogException
+                ("Exception while trying to load " + comparatorType +
+                 ": " + e);
+        }
+    }
+
+    /**
+     * Converts an arbitrary serialized byte array to an object.  Assumes that
+     * the byte array given is non-null and has a non-zero length.
+     */
+    private static Object bytesToObject(byte[] bytes,
+                                        String comparatorType)
+        throws LogException {
+
+        try {
+            ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
+            ObjectInputStream ois = new ObjectInputStream(bais);
+            return ois.readObject();
+        } catch (IOException e) {
+            throw new LogException
+                ("Exception while trying to load " + comparatorType +
+                 ": " + e);
+        } catch (ClassNotFoundException e) {
+            throw new LogException
+                ("Exception while trying to load " + comparatorType +
+                 ": " + e);
+        }
+    }
+
+    /**
+     * Converts an arbitrary string of bytes to a comparator. Used to
+     * instantiate a comparator on a client node.
+     */
+    public static Comparator<byte[]> bytesToComparator(byte[] comparatorBytes,
+                                                       String comparatorType)
+        throws LogException, ClassNotFoundException {
+
+        Comparator<byte[]> comparator = null;
+        if (comparatorBytes.length != 0) {
+            Object obj = bytesToObject(comparatorBytes, comparatorType);
+            if (obj instanceof String) {
+                String className = (String)obj;
+                Class<? extends Comparator<byte[]>> cls =
+                    (Class<? extends Comparator<byte[]>>)
+                    Class.forName(className);
+                comparator = instantiateComparator(cls, comparatorType);
+            } else if (obj instanceof Comparator) {
+                comparator = (Comparator<byte[]>) obj;
+            } else {
+                assert false : obj.getClass().getName();
+            }
+        }
+        return comparator;
+    }
+
+    public int getBinDeltaPercent() {
+        return binDeltaPercent;
+    }
+
+    public int getBinMaxDeltas() {
+        return binMaxDeltas;
+    }
+
+    /**
+     * Return a ReplicationContext that will indicate if this operation
+     * should broadcast data records for this database as part the replication
+     * stream.
+     */
+    public ReplicationContext getRepContext() {
+
+        /*
+         * It's sufficient to base the decision on what to return solely on the
+         * isReplicated() value. We're guaranteed that the environment is
+         * currently opened w/replication. That's because we refuse to open
+         * rep'ed environments in standalone mode and we couldn't have created
+         * this db w/replication specified in a standalone environment.
+         *
+         * We also don't have to check if this is a client or master. If this
+         * method is called, we're executing a write operation that was
+         * instigated an API call on this node (as opposed to a write operation
+         * that was instigated by an incoming replication message). We enforce
+         * elsewhere that write operations are only conducted by the master.
+         *
+         * Writes provoked by incoming replication messages are executed
+         * through the putReplicatedLN and deleteReplicatedLN methods.
+         */
+        if (isReplicated()) {
+            return ReplicationContext.MASTER;
+        } else {
+            return ReplicationContext.NO_REPLICATE;
+        }
+    }
+
+    /**
+     * Return a ReplicationContext that includes information on how to
+     * logically replicate database operations. This kind of replication
+     * context must be used for any api call which logging a NameLN for that
+     * represents a database operation. However, NameLNs which are logged for
+     * other reasons, such as cleaner migration, don't need this special
+     * replication context.
+     */
+    DbOpReplicationContext
+        getOperationRepContext(DbOperationType operationType) {
+
+        /*
+         * If this method is called, we're executing a write operation that was
+         * instigated by an API call on this node (as opposed to a write
+         * operation that was instigated by an incoming replication
+         * message). We enforce elsewhere that write operations are only
+         * conducted by the master.
+         */
+        DbOpReplicationContext context =
+            new DbOpReplicationContext(isReplicated(), operationType);
+
+        if (operationType == DbOperationType.CREATE) {
+            context.setCreateConfig
+                (new ReplicatedDatabaseConfig(flags,
+                                              maxMainTreeEntriesPerNode,
+                                              maxDupTreeEntriesPerNode,
+                                              btreeComparatorBytes,
+                                              duplicateComparatorBytes));
+        } else if (operationType == DbOperationType.TRUNCATE) {
+            context.setTruncateOldDbId(id);
+        }
+
+        return context;
+    }
+}
diff --git a/src/com/sleepycat/je/dbi/DbConfigException.java b/src/com/sleepycat/je/dbi/DbConfigException.java
new file mode 100644
index 0000000000000000000000000000000000000000..e2c1fc0a5ea96feac7e638b7c5b4aa8690ceb27c
--- /dev/null
+++ b/src/com/sleepycat/je/dbi/DbConfigException.java
@@ -0,0 +1,29 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbConfigException.java,v 1.18.2.2 2010/01/04 15:30:28 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import com.sleepycat.je.DatabaseException;
+
+/**
+ * Configuration related exceptions.
+ */
+public class DbConfigException extends DatabaseException {
+
+    public DbConfigException(Throwable t) {
+        super(t);
+    }
+
+    public DbConfigException(String message) {
+	super(message);
+    }
+
+    public DbConfigException(String message, Throwable t) {
+        super(message, t);
+    }
+}
diff --git a/src/com/sleepycat/je/dbi/DbConfigManager.java b/src/com/sleepycat/je/dbi/DbConfigManager.java
new file mode 100644
index 0000000000000000000000000000000000000000..86e25938c4df3bf543cb6d173fddc3ded4d29102
--- /dev/null
+++ b/src/com/sleepycat/je/dbi/DbConfigManager.java
@@ -0,0 +1,491 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: DbConfigManager.java,v 1.48 2008/06/30 20:54:46 linda Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.Enumeration;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Properties;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.config.BooleanConfigParam;
+import com.sleepycat.je.config.ConfigParam;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.config.IntConfigParam;
+import com.sleepycat.je.config.LongConfigParam;
+
+/**
+ * DbConfigManager holds the configuration parameters for an environment.
+ *
+ * In general, all configuration parameters are represented by a ConfigParam
+ * defined in com.sleepycat.je.config.EnvironmentParams and can be represented
+ * by a property described by the EnvironmentConfig String constants.
+ * Environment parameters have some interesting twists because there are some
+ * attributes that are scoped by handle, such as the commit durability
+ * (txnSync, txnNoSync, etc) parameters.
+ *
+ * DbConfigManager is instantiated first by the EnvironmentImpl, and is
+ * loaded with the base configuration parameters. If replication is enabled,
+ * additional properties are added when the Replicator is instantiated.
+ * In order to keep replication code out of the base code, replication
+ * parameters are loaded by way of the addConfigurations method.
+ */
+public class DbConfigManager {
+
+    /*
+     * The name of the JE properties file, to be found in the environment
+     * directory.
+     */
+    private static final String PROPFILE_NAME = "je.properties";
+
+    /*
+     * All properties in effect for this JE instance, both environment
+     * and replicator scoped, are stored in this Properties field.
+     */
+    private Properties props;
+
+    /*
+     * Save a reference to the environment config to access debug properties
+     * that are fields in EnvironmentConfig, must be set before the
+     * environment is created, and are not represented as JE properties.
+     */
+    private EnvironmentConfig environmentConfig;
+
+    public DbConfigManager(EnvironmentConfig config)
+        throws DbConfigException {
+
+        environmentConfig = config;
+        if (config == null) {
+            props = new Properties();
+        } else {
+            props = DbInternal.getProps(config);
+        }
+    }
+
+    /**
+     * Add all configuration properties in the specified property bag
+     * to this environment's configuration. Used to add replication
+     * specific configurations from ReplicatorConfig without referring
+     * to replication classes.
+     */
+    public void addConfigurations(Properties additionalProps) {
+        props.putAll(additionalProps);
+    }
+
+    public EnvironmentConfig getEnvironmentConfig() {
+        return environmentConfig;
+    }
+
+    /*
+     * Parameter Access
+     */
+
+    /**
+     * Get this parameter from the environment wide configuration settings.
+     * @param configParam
+     *
+     * @return default for param if param wasn't explicitly set
+     */
+    public synchronized String get(ConfigParam configParam)
+        throws IllegalArgumentException {
+
+        return getConfigParam(props, configParam.getName());
+    }
+
+    /**
+     * Get this parameter from the environment wide configuration settings.
+     *
+     * @param configParam
+     *
+     * @return default for param if param wasn't explicitly set
+     */
+    public synchronized String get(String configParamName)
+        throws IllegalArgumentException {
+
+        return getConfigParam(props, configParamName);
+    }
+
+    /**
+     * Get this parameter from the environment wide configuration settings.
+     *
+     * @param configParam
+     *
+     * @return default for param if it wasn't explicitly set.
+     */
+    public boolean getBoolean(BooleanConfigParam configParam)
+        throws DatabaseException {
+
+        /* See if it's specified. */
+        String val = get(configParam);
+        return Boolean.valueOf(val).booleanValue();
+    }
+
+    /**
+     * Get this parameter from the environment wide configuration settings.
+     *
+     * @param configParam
+     * @return default for param if it wasn't explicitly set.
+     */
+    public int getInt(IntConfigParam configParam)
+        throws DatabaseException {
+
+        /* See if it's specified. */
+        String val = get(configParam);
+        int intValue = 0;
+        if (val != null) {
+            try {
+                intValue = Integer.parseInt(val);
+            } catch (NumberFormatException e) {
+
+                /*
+                 * This should never happen if we put error checking into
+                 * the loading of config values.
+                 */
+                assert false: e.getMessage();
+            }
+        }
+        return intValue;
+    }
+
+    /**
+     * Get this parameter from the environment wide configuration settings.
+     *
+     * @param configParam
+     * @return default for param if it wasn't explicitly set
+     */
+    public long getLong(LongConfigParam configParam)
+        throws DatabaseException {
+
+        /* See if it's specified. */
+        String val = get(configParam);
+        long longValue = 0;
+        if (val != null) {
+            try {
+                longValue = Long.parseLong(val);
+            } catch (NumberFormatException e) {
+                /*
+                 * This should never happen if we put error checking
+                 * into the loading of config values.
+                 */
+                assert false : e.getMessage();
+            }
+        }
+        return longValue;
+    }
+
+    /*
+     * Helper methods used by EnvironmentConfig and ReplicatorConfig.
+     */
+
+    /**
+     * Validate a collection of configurations at Environment and Replicator
+     * startup time. Check for valid configuration names and values.
+     * SuppressWarnings here because Enumeration doesn't work well with 
+     * Properties in Java 1.5
+     */
+    @SuppressWarnings("unchecked")
+    public static void validateProperties(Properties props,
+                                          boolean forReplication,
+                                          String configClassName,
+                                          boolean verifyForReplication)
+        throws IllegalArgumentException {
+
+        /* Check that the properties have valid names and values. */
+        Enumeration propNames = props.propertyNames();
+        while (propNames.hasMoreElements()) {
+            String name = (String) propNames.nextElement();
+            /* Is this a valid property name? */
+            ConfigParam param =
+                (ConfigParam) EnvironmentParams.SUPPORTED_PARAMS.get(name);
+
+            if (param == null) {
+
+                /* See if the parameter is an multi-value parameter. */
+                String mvParamName = ConfigParam.multiValueParamName(name);
+                param = (ConfigParam)
+                    EnvironmentParams.SUPPORTED_PARAMS.get(mvParamName);
+                if (param == null) {
+                    throw new IllegalArgumentException
+                        (name +
+                         " is not a valid BDBJE environment configuration");
+                }
+            }
+
+            /*
+             * Only verify that the parameter is "for replication" if this is
+             * being validated on behalf of a FooConfig class, not a
+             * je.properties file.
+             */
+            if (verifyForReplication) {
+                if (forReplication) {
+                    if (!param.isForReplication()) {
+                        throw new IllegalArgumentException
+                            (name +
+                             " is not a replication environment " +
+                             "configuration and cannot be used in " +
+                             configClassName);
+                    }
+                } else {
+                    if (param.isForReplication()) {
+                        throw new IllegalArgumentException
+                            (name +
+                             " is a replication environment configuration" +
+                             " and cannot be used in " + configClassName);
+                    }
+                }
+            }
+
+            /* Is this a valid property value? */
+            param.validateValue(props.getProperty(name));
+        }
+    }
+
+    /**
+     * Apply the configurations specified in the je.properties file to override
+     * the programatically set configuration values held in the property bag.
+     */
+    @SuppressWarnings("unchecked")
+	public static void applyFileConfig(File envHome,
+                                       Properties props,
+                                       boolean forReplication,
+                                       String errorClassName)
+        throws IllegalArgumentException {
+
+        File paramFile = null;
+        try {
+            Properties fileProps = new Properties();
+            if (envHome != null) {
+                if (envHome.isFile()) {
+                    paramFile = envHome;
+                } else {
+                    paramFile = new File(envHome, PROPFILE_NAME);
+                }
+                FileInputStream fis = new FileInputStream(paramFile);
+                fileProps.load(fis);
+                fis.close();
+            }
+
+            /* Validate the existing file. */
+            validateProperties(fileProps,
+                               forReplication,
+                               errorClassName,
+                               false);  // verifyForReplication
+
+            /* Add them to the configuration object. */
+            Iterator iter = fileProps.entrySet().iterator();
+            while (iter.hasNext()) {
+                Map.Entry propPair = (Map.Entry) iter.next();
+                String name = (String) propPair.getKey();
+                String value = (String) propPair.getValue();
+                setConfigParam(props,
+                               name,
+                               value,
+                               false, /* don't need mutability, we're
+                                         initializing */
+                               false, /* value already validated when set in
+                                         config object */
+                               forReplication,
+                               false); /* verifyForReplication */
+            }
+        } catch (FileNotFoundException e) {
+
+            /*
+             * Klockwork - ok
+             * Eat the exception, okay if the file doesn't exist.
+             */
+        } catch (IOException e) {
+            IllegalArgumentException e2 = new IllegalArgumentException
+                ("An error occurred when reading " + paramFile);
+            e2.initCause(e);
+            throw e2;
+        }
+    }
+
+    /**
+     * Helper method for environment and replicator configuration classes.
+     * Set a configuration parameter. Check that the name is valid.
+     * If specified, also check that the value is valid.Value checking
+     * may be disabled for unit testing.
+     *
+     * @param props Property bag held within the configuration object.
+     */
+    public static void setConfigParam(Properties props,
+                                      String paramName,
+                                      String value,
+                                      boolean requireMutability,
+                                      boolean validateValue,
+                                      boolean forReplication,
+                                      boolean verifyForReplication)
+        throws IllegalArgumentException {
+
+        boolean isMVParam = false;
+
+        /* Is this a valid property name? */
+        ConfigParam param =
+            (ConfigParam) EnvironmentParams.SUPPORTED_PARAMS.get(paramName);
+
+	if (param == null) {
+	    /* See if the parameter is an multi-value parameter. */
+	    String mvParamName = ConfigParam.multiValueParamName(paramName);
+	    param = (ConfigParam)
+		EnvironmentParams.SUPPORTED_PARAMS.get(mvParamName);
+	    if (param == null ||
+		!param.isMultiValueParam()) {
+		throw new IllegalArgumentException
+		    (paramName +
+		     " is not a valid BDBJE environment parameter");
+	    }
+	    isMVParam = true;
+	    assert param.isMultiValueParam();
+	}
+
+	/*
+	 * Only verify that the parameter is "for replication" if this is
+	 * being validated on behalf of a FooConfig class, not a
+	 * je.properties file.
+	 */
+	if (verifyForReplication) {
+	    if (forReplication) {
+		if (!param.isForReplication()) {
+		    throw new IllegalArgumentException
+			(paramName +
+			 " is not a BDBJE replication configuration.");
+		}
+	    } else {
+		if (param.isForReplication()) {
+		    throw new IllegalArgumentException
+			(paramName +
+			 " is only available for BDBJE replication.");
+		}
+	    }
+	}
+
+        /* Is this a mutable property? */
+        if (requireMutability && !param.isMutable()) {
+            throw new IllegalArgumentException
+                (paramName +
+                 " is not a mutable BDBJE environment configuration");
+        }
+
+        if (isMVParam) {
+            setVal(props, param, paramName, value, validateValue);
+        } else {
+            setVal(props, param, value, validateValue);
+        }
+    }
+
+    /**
+     * Helper method for environment and replicator configuration classes.
+     * Get the configuration value for the specified parameter, checking
+     * that the parameter name is valid.
+     * @param props Property bag held within the configuration object.
+     */
+    public static String getConfigParam(Properties props, String paramName)
+        throws IllegalArgumentException {
+
+        boolean isMVParam = false;
+
+        /* Is this a valid property name? */
+        ConfigParam param =
+            (ConfigParam) EnvironmentParams.SUPPORTED_PARAMS.get(paramName);
+
+        if (param == null) {
+
+            /* See if the parameter is an multi-value parameter. */
+            String mvParamName = ConfigParam.multiValueParamName(paramName);
+            param = (ConfigParam)
+                EnvironmentParams.SUPPORTED_PARAMS.get(mvParamName);
+            if (param == null) {
+                throw new IllegalArgumentException
+                    (paramName +
+                     " is not a valid BDBJE environment configuration");
+            }
+            isMVParam = true;
+            assert param.isMultiValueParam();
+        } else if (param.isMultiValueParam()) {
+            throw new IllegalArgumentException
+                ("Use getMultiValueValues() to retrieve Multi-Value " +
+                 "parameter values.");
+        }
+
+        if (isMVParam) {
+            return DbConfigManager.getVal(props, param, paramName);
+        } else {
+            return DbConfigManager.getVal(props, param);
+        }
+    }
+
+    /**
+     * Helper method for environment and replicator configuration classes.
+     * Gets either the value stored in this configuration or the
+     * default value for this param.
+     */
+    public static String getVal(Properties props,
+                                ConfigParam param) {
+        String val = props.getProperty(param.getName());
+        if (val == null) {
+            val = param.getDefault();
+        }
+        return val;
+    }
+
+    /**
+     * Helper method for environment and replicator configuration classes.
+     * Gets either the value stored in this configuration or the
+     * default value for this param.
+     */
+    public static String getVal(Properties props,
+                                ConfigParam param,
+                                String paramName) {
+        String val = props.getProperty(paramName);
+        if (val == null) {
+            val = param.getDefault();
+        }
+        return val;
+    }
+
+    /**
+     * Helper method for environment and replicator configuration classes.
+     * Set and validate the value for the specified parameter.
+     */
+    public static void setVal(Properties props,
+                              ConfigParam param,
+                              String val,
+                              boolean validateValue)
+        throws IllegalArgumentException {
+
+        if (validateValue) {
+            param.validateValue(val);
+        }
+        props.setProperty(param.getName(), val);
+    }
+
+    /**
+     * Helper method for environment and replicator configuration classes.
+     * Set and validate the value for the specified parameter.
+     */
+    public static void setVal(Properties props,
+                              ConfigParam param,
+                              String paramName,
+                              String val,
+                              boolean validateValue)
+        throws IllegalArgumentException {
+
+        if (validateValue) {
+            param.validateValue(val);
+        }
+        props.setProperty(paramName, val);
+    }
+}
diff --git a/src/com/sleepycat/je/dbi/DbEnvPool.java b/src/com/sleepycat/je/dbi/DbEnvPool.java
new file mode 100644
index 0000000000000000000000000000000000000000..0a526f5898be7d10204138d54fe68fbd9031ad57
--- /dev/null
+++ b/src/com/sleepycat/je/dbi/DbEnvPool.java
@@ -0,0 +1,326 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: DbEnvPool.java,v 1.45 2008/01/07 14:28:48 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.EnvironmentMutableConfig;
+import com.sleepycat.je.latch.LatchSupport;
+
+/**
+ * Singleton collection of environments.  Responsible for environment open and
+ * close, supporting this from multiple threads by synchronizing on the pool.
+ *
+ * When synchronizing on two or more of the following objects the
+ * synchronization order must be as follows.  Synchronization is not performed
+ * in constructors, of course, because no other thread can access the object.
+ *
+ * Synchronization order:  Environment, DbEnvPool, EnvironmentImpl, Evictor
+ *
+ * Environment ctor                                 NOT synchronized
+ *   calls DbEnvPool.getEnvironment                 synchronized
+ *     creates new EnvironmentImpl                  NOT synchronized
+ *       calls RecoveryManager.recover,buildTree    NOT synchronized
+ *         calls Evictor.addEnvironment             synchronized
+ *
+ * EnvironmentImpl.reinit                           NOT synchronized
+ *   calls DbEnvPool.reinitEnvironment              synchronized
+ *     calls EnvironmentImpl.doReinit               synchronized
+ *       calls RecoveryManager.recover,buildTree    NOT synchronized
+ *         calls Evictor.addEnvironment             synchronized
+ *
+ * Environment.close                                synchronized
+ *   calls EnvironmentImpl.close                    NOT synchronized
+ *     calls DbEnvPool.closeEnvironment             synchronized
+ *       calls EnvironmentImpl.doClose              synchronized
+ *         calls Evictor.removeEnvironment          synchronized
+ *
+ * Environment.setMutableConfig                     synchronized
+ *   calls EnvironmentImpl.setMutableConfig         NOT synchronized
+ *     calls DbEnvPool.setMutableConfig             synchronized
+ *       calls EnvironmentImpl.doSetMutableConfig   synchronized
+ */
+public class DbEnvPool {
+    /* Singleton instance. */
+    private static DbEnvPool pool = new DbEnvPool();
+
+    /*
+     * Collection of environment handles, mapped by canonical directory
+     * name->EnvironmentImpl object.
+     */
+    private Map<String,EnvironmentImpl> envs;
+
+    /* Environments (subset of envs) that share the global cache. */
+    private Set<EnvironmentImpl> sharedCacheEnvs;
+
+    /**
+     * Enforce singleton behavior.
+     */
+    private DbEnvPool() {
+        envs = new HashMap<String,EnvironmentImpl>();
+        sharedCacheEnvs = new HashSet<EnvironmentImpl>();
+    }
+
+    /**
+     * Access the singleton instance.
+     */
+    public static DbEnvPool getInstance() {
+        return pool;
+    }
+
+    public synchronized int getNSharedCacheEnvironments() {
+        return sharedCacheEnvs.size();
+    }
+
+    private EnvironmentImpl getAnySharedCacheEnv() {
+        Iterator<EnvironmentImpl> iter = sharedCacheEnvs.iterator();
+        return iter.hasNext() ? iter.next() : null;
+    }
+
+    /**
+     * Find a single environment, used by Environment handles and by command
+     * line utilities.
+     */
+    public synchronized
+        EnvironmentImpl getEnvironment(File envHome,
+                                       EnvironmentConfig config,
+                                       boolean checkImmutableParams,
+                                       boolean openIfNeeded,
+                                       boolean replicationIntended)
+        throws DatabaseException {
+
+        String environmentKey = getEnvironmentMapKey(envHome);
+        EnvironmentImpl envImpl = envs.get(environmentKey);
+        if (envImpl != null) {
+            envImpl.checkIfInvalid();
+            assert envImpl.isOpen();
+            if (checkImmutableParams) {
+
+                /*
+                 * If a non-null configuration parameter was passed to the
+                 * Environment ctor and the underlying EnvironmentImpl already
+                 * exist, check that the configuration parameters specified
+                 * match those of the currently open environment. An exception
+                 * is thrown if the check fails.
+                 *
+                 * Don't do this check if we create the environment here
+                 * because the creation might modify the parameters, which
+                 * would create a Catch-22 in terms of validation.  For
+                 * example, je.maxMemory will be overridden if the JVM's -mx
+                 * flag is less than that setting, so the new resolved config
+                 * parameters won't be the same as the passed in config.
+                 */
+                envImpl.checkImmutablePropsForEquality(config);
+            }
+            /* Successful, increment reference count */
+            envImpl.incReferenceCount();
+        } else {
+            if (openIfNeeded) {
+
+                /*
+                 * If a shared cache is used, get another (any other, doesn't
+                 * matter which) environment that is sharing the global cache.
+                 */
+                EnvironmentImpl sharedCacheEnv = config.getSharedCache() ?
+                    getAnySharedCacheEnv() : null;
+
+                /*
+                 * Environment must be instantiated. If it can be created, the
+                 * configuration must have allowCreate set.  Note that the
+                 * environment is added to the SharedEvictor before the
+                 * EnvironmentImpl ctor returns, by RecoveryManager.buildTree.
+                 */
+                envImpl = new EnvironmentImpl
+                    (envHome, config, sharedCacheEnv, replicationIntended);
+
+                assert config.getSharedCache() == envImpl.getSharedCache();
+
+                /* Successful */
+                addEnvironment(envImpl);
+            }
+        }
+
+        return envImpl;
+    }
+
+    /**
+     * Called by EnvironmentImpl.reinit to perform the reinit operation while
+     * synchronized on the DbEnvPool.
+     */
+    synchronized void reinitEnvironment(EnvironmentImpl envImpl,
+                                        boolean replicationIntended)
+	throws DatabaseException {
+
+        assert !envs.containsKey
+            (getEnvironmentMapKey(envImpl.getEnvironmentHome()));
+        assert !sharedCacheEnvs.contains(envImpl);
+
+        /*
+         * If a shared cache is used, get another (any other, doesn't
+         * matter which) environment that is sharing the global cache.
+         */
+        EnvironmentImpl sharedCacheEnv = envImpl.getSharedCache() ?
+            getAnySharedCacheEnv() : null;
+
+        envImpl.doReinit(replicationIntended, sharedCacheEnv);
+
+        /* Successful */
+        addEnvironment(envImpl);
+    }
+
+    /**
+     * Called by EnvironmentImpl.setMutableConfig to perform the
+     * setMutableConfig operation while synchronized on the DbEnvPool.
+     *
+     * In theory we shouldn't need to synchronize here when
+     * envImpl.getSharedCache() is false; however, we synchronize
+     * unconditionally to standardize the synchronization order and avoid
+     * accidental deadlocks.
+     */
+    synchronized void setMutableConfig(EnvironmentImpl envImpl,
+                                       EnvironmentMutableConfig mutableConfig)
+        throws DatabaseException {
+
+        envImpl.doSetMutableConfig(mutableConfig);
+        if (envImpl.getSharedCache()) {
+            resetSharedCache(envImpl.getMemoryBudget().getMaxMemory(),
+                             envImpl);
+        }
+    }
+
+    /**
+     * Called by EnvironmentImpl.close to perform the close operation while
+     * synchronized on the DbEnvPool.
+     */
+    synchronized void closeEnvironment(EnvironmentImpl envImpl,
+                                       boolean doCheckpoint,
+                                       boolean doCheckLeaks)
+        throws DatabaseException {
+
+        if (envImpl.decReferenceCount()) {
+            try {
+                envImpl.doClose(doCheckpoint, doCheckLeaks);
+            } finally {
+                removeEnvironment(envImpl);
+            }
+        }
+    }
+
+    /**
+     * Called by EnvironmentImpl.closeAfterRunRecovery to perform the close
+     * operation while synchronized on the DbEnvPool.
+     */
+    synchronized void closeEnvironmentAfterRunRecovery(EnvironmentImpl envImpl)
+        throws DatabaseException {
+
+        try {
+            envImpl.doCloseAfterRunRecovery();
+        } finally {
+            removeEnvironment(envImpl);
+        }
+    }
+
+    /**
+     * Adds an EnvironmentImpl to the pool after it has been opened.  This
+     * method is called while synchronized.
+     */
+    private void addEnvironment(EnvironmentImpl envImpl)
+        throws DatabaseException {
+
+        envImpl.incReferenceCount();
+        envs.put(getEnvironmentMapKey(envImpl.getEnvironmentHome()), envImpl);
+        if (envImpl.getSharedCache()) {
+            sharedCacheEnvs.add(envImpl);
+            assert envImpl.getEvictor().checkEnvs(sharedCacheEnvs);
+            resetSharedCache(-1, envImpl);
+        }
+    }
+
+    /**
+     * Removes an EnvironmentImpl from the pool after it has been closed.  This
+     * method is called while synchronized.  Note that the environment was
+     * removed from the SharedEvictor by EnvironmentImpl.shutdownEvictor.
+     */
+    private void removeEnvironment(EnvironmentImpl envImpl)
+        throws DatabaseException {
+
+        String environmentKey =
+            getEnvironmentMapKey(envImpl.getEnvironmentHome());
+        boolean found = envs.remove(environmentKey) != null;
+
+        if (sharedCacheEnvs.remove(envImpl)) {
+            assert found && envImpl.getSharedCache();
+            assert envImpl.getEvictor().checkEnvs(sharedCacheEnvs);
+            if (sharedCacheEnvs.isEmpty()) {
+                envImpl.getEvictor().shutdown();
+            } else {
+                envImpl.getMemoryBudget().subtractCacheUsage();
+                resetSharedCache(-1, null);
+            }
+        } else {
+            assert !found || !envImpl.getSharedCache();
+        }
+
+        /*
+         * Latch notes may only be cleared when there is no possibility that
+         * any environment is open.
+         */
+        if (envs.isEmpty()) {
+            LatchSupport.clearNotes();
+        }
+    }
+
+    /**
+     * For unit testing only.
+     */
+    public synchronized void clear() {
+        envs.clear();
+    }
+
+    /* Use the canonical path name for a normalized environment key. */
+    private String getEnvironmentMapKey(File file)
+        throws DatabaseException {
+        try {
+            return file.getCanonicalPath();
+        } catch (IOException e) {
+            throw new DatabaseException(e);
+        }
+    }
+
+    /**
+     * Resets the memory budget for all environments with a shared cache.
+     *
+     * @param newMaxMemory is the new total cache budget or is less than 0 if
+     * the total should remain unchanged.  A total greater than zero is given
+     * when it has changed via setMutableConfig.
+     *
+     * @param skipEnv is an environment that should not be reset, or null.
+     * Non-null is passed when an environment has already been reset because
+     * it was just created or the target of setMutableConfig.
+     */
+    private void resetSharedCache(long newMaxMemory, EnvironmentImpl skipEnv)
+        throws DatabaseException {
+
+        for (EnvironmentImpl envImpl : sharedCacheEnvs) {
+            if (envImpl != skipEnv) {
+                envImpl.getMemoryBudget().reset(newMaxMemory,
+                                                false /*newEnv*/,
+                                                envImpl.getConfigManager());
+            }
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/dbi/DbEnvState.java b/src/com/sleepycat/je/dbi/DbEnvState.java
new file mode 100644
index 0000000000000000000000000000000000000000..95273433a3bc5d7b6cd09aca90523e2c70c55df0
--- /dev/null
+++ b/src/com/sleepycat/je/dbi/DbEnvState.java
@@ -0,0 +1,65 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbEnvState.java,v 1.22.2.2 2010/01/04 15:30:28 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import com.sleepycat.je.DatabaseException;
+
+/**
+ * DbEnvState implements a typesafe enumeration of environment states
+ * and does state change validation.
+ */
+class DbEnvState {
+    private static final boolean DEBUG = false;
+
+    private String name;
+
+    /* Valid environment states. */
+    public static final DbEnvState INIT = new DbEnvState("initialized");
+    public static final DbEnvState OPEN = new DbEnvState("open");
+    public static final DbEnvState CLOSED = new DbEnvState("closed");
+    public static final DbEnvState INVALID = new DbEnvState("invalid");
+
+    /* Valid previous states, for state transition checking. */
+    public static final DbEnvState[] VALID_FOR_OPEN =   {INIT, CLOSED};
+    public static final DbEnvState[] VALID_FOR_CLOSE =  {INIT, OPEN, INVALID};
+    public static final DbEnvState[] VALID_FOR_REMOVE = {INIT, CLOSED};
+
+    DbEnvState(String name) {
+        this.name = name;
+    }
+
+    @Override
+    public String toString() {
+        return name;
+    }
+
+    /* Check for valid state transitions. */
+    void checkState(DbEnvState[] validPrevStates,
+                    DbEnvState newState)
+        throws DatabaseException {
+
+        if (DEBUG) {
+            System.out.println("newState = " + newState +
+                               " currentState = " + name);
+        }
+        boolean transitionOk = false;
+        for (int i = 0; i < validPrevStates.length; i++) {
+            if (this == validPrevStates[i]) {
+                transitionOk = true;
+                break;
+            }
+        }
+        if (!transitionOk) {
+            throw new DatabaseException("Can't go from environment state " +
+					toString() +
+					" to " +
+					newState.toString());
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/dbi/DbTree.java b/src/com/sleepycat/je/dbi/DbTree.java
new file mode 100644
index 0000000000000000000000000000000000000000..7e67e49e1254253cccabb49aa99e072ad82cdaf8
--- /dev/null
+++ b/src/com/sleepycat/je/dbi/DbTree.java
@@ -0,0 +1,1641 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbTree.java,v 1.206.2.2 2010/01/04 15:30:28 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import static com.sleepycat.je.log.entry.DbOperationType.CREATE;
+import static com.sleepycat.je.log.entry.DbOperationType.RENAME;
+import static com.sleepycat.je.log.entry.DbOperationType.REMOVE;
+import static com.sleepycat.je.log.entry.DbOperationType.TRUNCATE;
+
+import java.io.PrintStream;
+import java.io.UnsupportedEncodingException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DatabaseNotFoundException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.DeadlockException;
+import com.sleepycat.je.VerifyConfig;
+import com.sleepycat.je.dbi.CursorImpl.SearchMode;
+import com.sleepycat.je.log.LogException;
+import com.sleepycat.je.log.LogUtils;
+import com.sleepycat.je.log.Loggable;
+import com.sleepycat.je.log.ReplicationContext;
+import com.sleepycat.je.tree.ChildReference;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.tree.LN;
+import com.sleepycat.je.tree.MapLN;
+import com.sleepycat.je.tree.NameLN;
+import com.sleepycat.je.tree.Tree;
+import com.sleepycat.je.tree.TreeUtils;
+import com.sleepycat.je.tree.WithRootLatched;
+import com.sleepycat.je.txn.BasicLocker;
+import com.sleepycat.je.txn.LockType;
+import com.sleepycat.je.txn.Locker;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * DbTree represents the database directory for this environment. DbTree is
+ * itself implemented through two databases. The nameDatabase maps
+ * databaseName-> an internal databaseId. The idDatabase maps
+ * databaseId->DatabaseImpl.
+ *
+ * For example, suppose we have two databases, foo and bar. We have the
+ * following structure:
+ *
+ *           nameDatabase                          idDatabase
+ *               IN                                    IN
+ *                |                                     |
+ *               BIN                                   BIN
+ *    +-------------+--------+            +---------------+--------+
+ *  .               |        |            .               |        |
+ * NameLNs         NameLN    NameLN      MapLNs for   MapLN        MapLN
+ * for internal    key=bar   key=foo     internal dbs key=53       key=79
+ * dbs             data=     data=                    data=        data=
+ *                 dbId79    dbId53                   DatabaseImpl DatabaseImpl
+ *                                                        |            |
+ *                                                   Tree for foo  Tree for bar
+ *                                                        |            |
+ *                                                     root IN       root IN
+ *
+ * Databases, Cursors, the cleaner, compressor, and other entities have
+ * references to DatabaseImpls. It's important that object identity is properly
+ * maintained, and that all constituents reference the same DatabaseImpl for
+ * the same db, lest they develop disparate views of the in-memory database;
+ * corruption would ensue. To ensure that, all entities must obtain their
+ * DatabaseImpl by going through the idDatabase.
+ *
+ * DDL type operations such as create, rename, remove and truncate get their
+ * transactional semantics by transactionally locking the NameLN appropriately.
+ * A read-lock on the NameLN, called a handle lock, is maintained for all DBs
+ * opened via the public API (openDatabase).  This prevents them from being
+ * renamed or removed while open.
+ *
+ * However, for internal database operations, no handle lock on the NameLN is
+ * acquired and MapLNs are locked with short-lived non-transactional Lockers.
+ * An entity that is trying to get a reference to the DatabaseImpl gets a short
+ * lived read lock just for the fetch of the MapLN. A write lock on the MapLN
+ * is taken when the database is created, deleted, or when the MapLN is
+ * evicted. (see DatabaseImpl.isInUse())
+ *
+ * The nameDatabase operates pretty much as a regular application database in
+ * terms of eviction and recovery. The idDatabase requires special treatment
+ * for both eviction and recovery.
+ *
+ * The issues around eviction of the idDatabase center on the need to ensure
+ * that there are no other current references to the DatabaseImpl other than
+ * that held by the mapLN. The presence of a current reference would both make
+ * the DatabaseImpl not GC'able, and more importantly, would lead to object
+ * identify confusion later on. For example, if the MapLN is evicted while
+ * there is a current reference to its DatabaseImpl, and then refetched, there
+ * will be two in-memory versions of the DatabaseImpl. Since locks on the
+ * idDatabase are short lived, DatabaseImpl.useCount acts as a reference count
+ * of active current references. DatabaseImpl.useCount must be modified and
+ * read in conjunction with appropropriate locking on the MapLN. See
+ * DatabaseImpl.isInUse() for details.
+ *
+ * This reference count checking is only needed when the entire MapLN is
+ * evicted. It's possible to evict only the root IN of the database in
+ * question, since that doesn't interfere with the DatabaseImpl object
+ * identity.
+ */
+public class DbTree implements Loggable {
+
+    /* The id->DatabaseImpl tree is always id 0 */
+    public static final DatabaseId ID_DB_ID = new DatabaseId(0);
+    /* The name->id tree is always id 1 */
+    public static final DatabaseId NAME_DB_ID = new DatabaseId(1);
+
+    /* Internal databases - the database mapping tree and utilization info. */
+    private static final String ID_DB_NAME = "_jeIdMap";
+    private static final String NAME_DB_NAME = "_jeNameMap";
+    public static final String UTILIZATION_DB_NAME = "_jeUtilization";
+    public static final String REP_OPERATIONS_NAME = "_jeRepOp";
+    public static final String VLSN_MAP_DB_NAME = "_vlsnMapDb";
+
+    /* Reserved database names. */
+    private static final String[] RESERVED_DB_NAMES = {
+        ID_DB_NAME,
+        NAME_DB_NAME,
+        UTILIZATION_DB_NAME,
+        REP_OPERATIONS_NAME,
+        VLSN_MAP_DB_NAME
+    };
+
+    /*
+     * Database Ids:
+     * We need to ensure that local and replicated databases use different
+     * number spaces for their ids, so there can't be any possible conflicts.
+     * Local, non replicated databases use positive values, replicated
+     * databases use negative values.  -1 thru -5 are reserved for future
+     * special use.
+     */
+    public static final int NEG_DB_ID_START = -256;
+    private AtomicInteger lastAllocatedLocalDbId;
+    private AtomicInteger lastAllocatedReplicatedDbId;
+
+    private DatabaseImpl idDatabase;          // map db ids -> databases
+    private DatabaseImpl nameDatabase;        // map names -> dbIds
+
+    /* The flags byte holds a variety of attributes. */
+    private byte flags;
+
+    /*
+     * The replicated bit is set for environments that are opened with
+     * replication. The behavior is as follows:
+     *
+     * Env is     Env is     Persistent          Follow-on action
+     * replicated brand new  value of
+     *                       DbTree.isReplicated
+     *
+     * 0             1         n/a               replicated bit = 0;
+     * 0             0           0               none
+     * 0             0           1               illegal, exception thrown
+     * 1             1          n/a              replicated bit = 1
+     * 1             0           0               require config of all dbs
+     * 1             0           1               none
+     */
+    private static final byte REPLICATED_BIT = 0x1;
+
+    private EnvironmentImpl envImpl;
+
+    /**
+     * Create a dbTree from the log.
+     */
+    public DbTree()
+        throws DatabaseException {
+                
+        this.envImpl = null;
+        idDatabase = new DatabaseImpl();
+        idDatabase.setDebugDatabaseName(ID_DB_NAME);
+
+        /* 
+         * The default is false, but just in case we ever turn it on globally
+         * for testing this forces it off.
+         */
+        idDatabase.clearKeyPrefixing();
+        nameDatabase = new DatabaseImpl();
+        nameDatabase.clearKeyPrefixing();
+        nameDatabase.setDebugDatabaseName(NAME_DB_NAME);
+
+        /* These sequences are initialized by readFromLog. */
+        lastAllocatedLocalDbId = new AtomicInteger();
+        lastAllocatedReplicatedDbId = new AtomicInteger();
+    }
+
+    /**
+     * Create a new dbTree for a new environment.
+     */
+    public DbTree(EnvironmentImpl env, boolean replicationIntended)
+        throws DatabaseException {
+
+        this.envImpl = env;
+
+        /*
+         * Sequences must be initialized before any databases are created.  0
+         * and 1 are reserved, so we start at 2. We've put -1 to
+         * NEG_DB_ID_START asided for the future.
+         */
+        lastAllocatedLocalDbId = new AtomicInteger(1);
+        lastAllocatedReplicatedDbId = new AtomicInteger(NEG_DB_ID_START);
+
+        /* The id database is local */
+        DatabaseConfig idConfig = new DatabaseConfig();
+        DbInternal.setDbConfigReplicated(idConfig, false /* replicated */);
+
+        /* 
+         * The default is false, but just in case we ever turn it on globally
+         * for testing this forces it off.
+         */
+        idConfig.setKeyPrefixing(false);
+        idDatabase = new DatabaseImpl(ID_DB_NAME,
+                                      new DatabaseId(0),
+                                      env,
+                                      idConfig);
+        /* Force a reset if enabled globally. */
+        idDatabase.clearKeyPrefixing();
+
+        DatabaseConfig nameConfig = new DatabaseConfig();
+        nameConfig.setKeyPrefixing(false);
+        nameDatabase = new DatabaseImpl(NAME_DB_NAME,
+					new DatabaseId(1),
+					env,
+                                        nameConfig);
+        /* Force a reset if enabled globally. */
+        nameDatabase.clearKeyPrefixing();
+
+        if (replicationIntended) {
+            setIsReplicated();
+        }
+    }
+
+    /**
+     * The last allocated local and replicated db ids are used for ckpts.
+     */
+    public int getLastLocalDbId() {
+        return lastAllocatedLocalDbId.get();
+    }
+
+    public int getLastReplicatedDbId() {
+        return lastAllocatedReplicatedDbId.get();
+    }
+
+    /**
+     * We get a new database id of the appropriate kind when creating a new
+     * database.
+     */
+    private int getNextLocalDbId() {
+        return lastAllocatedLocalDbId.incrementAndGet();
+    }
+
+    private int getNextReplicatedDbId() {
+        return lastAllocatedReplicatedDbId.decrementAndGet();
+    }
+
+    /**
+     * Initialize the db ids, from recovery.
+     */
+    public void setLastDbId(int lastReplicatedDbId, int lastLocalDbId) {
+        lastAllocatedReplicatedDbId.set(lastReplicatedDbId);
+        lastAllocatedLocalDbId.set(lastLocalDbId);
+    }
+
+    /**
+     * @return true if this id is for a replicated db.
+     */
+    private boolean isReplicatedId(int id) {
+        return id < NEG_DB_ID_START;
+    }
+
+    /* 
+     * Only set the replicated db id if the replayDbId represents a
+     * newer, later value in the replication stream. If the replayDbId is
+     * earlier than this node's lastAllocatedReplicateDbId, don't bother
+     * updating the sequence;
+     */
+    public void updateFromReplay(DatabaseId replayDbId) {
+
+        int replayVal = replayDbId.getId();
+
+        assert replayVal < 0 : 
+            "replay node id is unexpectedly positive " + replayDbId;
+
+        while (true) {
+            int currentVal = lastAllocatedReplicatedDbId.get();
+            if (replayVal < currentVal) {
+                /* 
+                 * This replayDbId is newer than any other replicated db id
+                 * known by this node.
+                 */
+                boolean ok = lastAllocatedReplicatedDbId.weakCompareAndSet
+                    (currentVal, replayVal);
+                if (ok) {
+                    break;
+                }
+            } else {
+                break;
+            }
+        }
+    }
+
+    /**
+     * Initialize the db tree during recovery, after instantiating the tree
+     * from the log.
+     * a. set up references to the environment impl
+     * b. check for replication rules.
+     */
+    void initExistingEnvironment(EnvironmentImpl envImpl,
+                                 boolean replicationIntended)
+        throws DatabaseException {
+
+        if (replicationIntended) {
+            if (!isReplicated()) {
+                throw new UnsupportedOperationException
+                    ("This environment must be converted for replication." +
+                     "Conversion isn't supported yet.");
+            }
+        } else {
+            if (isReplicated() && (!envImpl.isReadOnly())) {
+                throw new DatabaseException
+                    ("This environment was previously opened for replication."+
+                     " It cannot be re-opened for in read/write mode for" +
+                     " standalone operation.");
+            }
+        }
+
+        this.envImpl = envImpl;
+        idDatabase.setEnvironmentImpl(envImpl);
+        nameDatabase.setEnvironmentImpl(envImpl);
+    }
+
+    /**
+     * Creates a new database object given a database name.
+     *
+     * Increments the use count of the new DB to prevent it from being evicted.
+     * releaseDb should be called when the returned object is no longer used,
+     * to allow it to be evicted.  See DatabaseImpl.isInUse.  [#13415]
+     */
+    public DatabaseImpl createDb(Locker locker,
+                                 String databaseName,
+                                 DatabaseConfig dbConfig,
+                                 Database databaseHandle)
+        throws DatabaseException {
+
+        return doCreateDb(locker,
+                          databaseName,
+                          dbConfig,
+                          databaseHandle,
+                          null,  // replicatedLN
+                          null); // repContext, to be decided by new db
+    }
+
+    /**
+     * Create a database for internal use that will never be replicated.
+     */
+    public DatabaseImpl createInternalDb(Locker locker,
+                                         String databaseName,
+                                         DatabaseConfig dbConfig)
+        throws DatabaseException {
+
+        DbInternal.setDbConfigReplicated(dbConfig, false);
+        /* Force all internal databases to not use key prefixing. */
+        dbConfig.setKeyPrefixing(false);
+        DatabaseImpl ret = doCreateDb(locker,
+                                      databaseName,
+                                      dbConfig,
+                                      null,  // databaseHandle,
+                                      null,  // replicatedLN
+                                      ReplicationContext.NO_REPLICATE);
+        /* Force a reset if enabled globally. */
+        ret.clearKeyPrefixing();
+        return ret;
+    }
+
+    /**
+     * Create a replicated database on this client node.
+     */
+    public DatabaseImpl createClientDb(Locker locker,
+                                       String databaseName,
+                                       DatabaseConfig dbConfig,
+                                       NameLN replicatedLN,
+                                       ReplicationContext repContext)
+        throws DatabaseException {
+
+        return doCreateDb(locker,
+                          databaseName,
+                          dbConfig,
+                          null, // databaseHndle
+                          replicatedLN,
+                          repContext);
+    }
+
+    /**
+     * Create a database.
+     *
+     * Increments the use count of the new DB to prevent it from being evicted.
+     * releaseDb should be called when the returned object is no longer used,
+     * to allow it to be evicted.  See DatabaseImpl.isInUse.  [#13415]
+     *
+     * Do not evict (do not call CursorImpl.setAllowEviction(true)) during low
+     * level DbTree operation. [#15176]
+     */
+    private synchronized DatabaseImpl doCreateDb(Locker nameLocker,
+                                                 String databaseName,
+                                                 DatabaseConfig dbConfig,
+                                                 Database databaseHandle,
+                                                 NameLN replicatedLN,
+                                                 ReplicationContext repContext)
+        throws DatabaseException {
+
+        /* Create a new database object. */
+        DatabaseId newId = null;
+        if (replicatedLN != null) {
+
+            /*
+             * This database was created on a master node and is being
+             * propagated to this client node.
+             */
+            newId = replicatedLN.getId();
+        } else {
+
+            /*
+             * This database has been created locally, either because this is
+             * a  non-replicated node or this is the replicated group master.
+             */
+            if (envImpl.isReplicated() &&
+                DbInternal.getDbConfigReplicated(dbConfig)) {
+                newId = new DatabaseId(getNextReplicatedDbId());
+            } else {
+                newId = new DatabaseId(getNextLocalDbId());
+            }
+        }
+
+        DatabaseImpl newDb =
+            new DatabaseImpl(databaseName, newId, envImpl, dbConfig);
+        CursorImpl idCursor = null;
+        CursorImpl nameCursor = null;
+        boolean operationOk = false;
+        Locker idDbLocker = null;
+        try {
+            /* Insert it into name -> id db. */
+            nameCursor = new CursorImpl(nameDatabase, nameLocker);
+            LN nameLN = null;
+            if (replicatedLN != null) {
+                nameLN = replicatedLN;
+            } else {
+                nameLN = new NameLN(newId, envImpl, newDb.isReplicated());
+            }
+
+            ReplicationContext useRepContext = repContext;
+            if (repContext == null) {
+                useRepContext = newDb.getOperationRepContext(CREATE);
+            }
+            nameCursor.putLN(databaseName.getBytes("UTF-8"),// key
+                             nameLN,
+                             false,                         // allowDuplicates
+                             useRepContext);
+
+            /*
+             * If this is a non-handle use, no need to record any handle locks.
+             */
+            if (databaseHandle != null) {
+                nameLocker.addToHandleMaps(Long.valueOf(nameLN.getNodeId()),
+                                           databaseHandle);
+            }
+
+            /* Insert it into id -> name db, in auto commit mode. */
+            idDbLocker = BasicLocker.createBasicLocker(envImpl);
+            idCursor = new CursorImpl(idDatabase, idDbLocker);
+            idCursor.putLN(newId.getBytes(), // key
+                           new MapLN(newDb), // ln
+                           false,            // allowDuplicates
+                           ReplicationContext.NO_REPLICATE);
+            /* Increment DB use count with lock held. */
+            newDb.incrementUseCount();
+            operationOk = true;
+        } catch (UnsupportedEncodingException UEE) {
+            throw new DatabaseException(UEE);
+        } finally {
+            if (idCursor != null) {
+                idCursor.close();
+            }
+
+            if (nameCursor != null) {
+                nameCursor.close();
+            }
+
+            if (idDbLocker != null) {
+                idDbLocker.operationEnd(operationOk);
+            }
+        }
+
+        return newDb;
+    }
+
+    /**
+     * Check deferred write settings before writing the MapLN.
+     * @param db the database represented by this MapLN
+     */
+    public void optionalModifyDbRoot(DatabaseImpl db)
+        throws DatabaseException {
+        
+        if (db.isDeferredWriteMode()) {
+            return;
+        }
+
+        modifyDbRoot(db);
+    }
+
+    /**
+     * Write the MapLN to disk.
+     * @param db the database represented by this MapLN
+     */
+    public void modifyDbRoot(DatabaseImpl db)
+        throws DatabaseException {
+
+        modifyDbRoot(db, DbLsn.NULL_LSN /*ifBeforeLsn*/, true /*mustExist*/);
+    }
+
+    /**
+     * Write a MapLN to the log in order to:
+     *  - propagate a root change
+     *  - save per-db utilization information
+     *  - save database config information.
+     * Any MapLN writes must be done through this method, in order to ensure
+     * that the root latch is taken, and updates to the rootIN are properly
+     * safeguarded. See MapN.java for more detail.
+     *
+     * @param db the database whose root is held by this MapLN
+     *
+     * @param ifBeforeLsn if argument is not NULL_LSN, only do the write if
+     * this MapLN's current LSN is before isBeforeLSN.
+     *
+     * @param if true, throw DatabaseException if the DB does not exist; if
+     * false, silently do nothing.
+     */
+    public void modifyDbRoot(DatabaseImpl db,
+                             long ifBeforeLsn,
+                             boolean mustExist)
+        throws DatabaseException {
+
+        if (db.getId().equals(ID_DB_ID) ||
+            db.getId().equals(NAME_DB_ID)) {
+            envImpl.logMapTreeRoot();
+        } else {
+            DatabaseEntry keyDbt = new DatabaseEntry(db.getId().getBytes());
+
+            /*
+             * Retry indefinitely in the face of lock timeouts since the
+             * lock on the MapLN is only supposed to be held for short
+             * periods.
+             */
+            while (true) {
+                Locker idDbLocker = null;
+                CursorImpl cursor = null;
+                boolean operationOk = false;
+                try {
+                    idDbLocker = BasicLocker.createBasicLocker(envImpl);
+                    cursor = new CursorImpl(idDatabase, idDbLocker);
+                    boolean searchOk = (cursor.searchAndPosition
+                                        (keyDbt, new DatabaseEntry(),
+                                         SearchMode.SET, LockType.WRITE) &
+                                        CursorImpl.FOUND) != 0;
+                    if (!searchOk) {
+                        if (mustExist) {
+                            throw new DatabaseException(
+                                "can't find database " + db.getId());
+                        } else {
+                            /* Do nothing silently. */
+                            break;
+                        }
+                    }
+                    /* Check BIN LSN while latched. */
+                    if (ifBeforeLsn == DbLsn.NULL_LSN ||
+                        DbLsn.compareTo
+                            (cursor.getBIN().getLsn(cursor.getIndex()),
+                             ifBeforeLsn) < 0) {
+                        MapLN mapLN = (MapLN) cursor.getCurrentLNAlreadyLatched
+                            (LockType.WRITE);
+                        assert mapLN != null; /* Should be locked. */
+                        /* Perform rewrite. */
+                        RewriteMapLN writeMapLN = new RewriteMapLN(cursor);
+                        mapLN.getDatabase().getTree().
+                            withRootLatchedExclusive(writeMapLN);
+                        operationOk = true;
+                    }
+                    break;
+                } catch (DeadlockException DE) {
+                    /* Continue loop and retry. */
+                } finally {
+                    if (cursor != null) {
+                        cursor.releaseBIN();
+                        cursor.close();
+                    }
+                    if (idDbLocker != null) {
+                        idDbLocker.operationEnd(operationOk);
+                    }
+                }
+            }
+        }
+    }
+
+    private static class RewriteMapLN implements WithRootLatched {
+        private CursorImpl cursor;
+
+        RewriteMapLN(CursorImpl cursor) {
+            this.cursor = cursor;
+        }
+
+        public IN doWork(ChildReference root)
+            throws DatabaseException {
+
+            DatabaseEntry dataDbt = new DatabaseEntry(new byte[0]);
+            cursor.putCurrent(dataDbt,
+                              null,  // foundKey
+                              null,  // foundData
+                              ReplicationContext.NO_REPLICATE);
+            return null;
+        }
+    }
+
+    /*
+     * Helper for database operations. This method positions a cursor
+     * on the NameLN that represents this database and write locks it.
+     *
+     * Do not evict (do not call CursorImpl.setAllowEviction(true)) during low
+     * level DbTree operation. [#15176]
+     */
+    private NameLockResult lockNameLN(Locker locker,
+                                      String databaseName,
+                                      String action)
+        throws DatabaseException {
+
+        /*
+         * We have to return both a cursor on the nameing tree and a
+         * reference to the found DatabaseImpl.
+         */
+        NameLockResult result = new NameLockResult();
+
+        /* Find the existing DatabaseImpl and establish a cursor. */
+        result.dbImpl = getDb(locker, databaseName, null);
+        if (result.dbImpl == null) {
+            throw new DatabaseNotFoundException
+                ("Attempted to " + action + " non-existent database " +
+                 databaseName);
+        }
+        boolean success = false;
+        try {
+            result.nameCursor = new CursorImpl(nameDatabase, locker);
+
+            /* Position the cursor at the specified NameLN. */
+            DatabaseEntry key =
+                new DatabaseEntry(databaseName.getBytes("UTF-8"));
+            /* See [#16210]. */
+            boolean found =
+                (result.nameCursor.searchAndPosition(key, null, SearchMode.SET,
+                                                     LockType.WRITE) &
+                 CursorImpl.FOUND) != 0;
+            if (!found) {
+                result.nameCursor.releaseBIN();
+                result.nameCursor.close();
+                result.nameCursor = null;
+                return result;
+            }
+
+            /* Call getCurrentLN to write lock the nameLN. */
+            result.nameLN = (NameLN)
+                result.nameCursor.getCurrentLNAlreadyLatched(LockType.WRITE);
+            assert result.nameLN != null; /* Should be locked. */
+
+            /*
+             * Check the open handle count after we have the write lock and no
+             * other transactions can open.
+             */
+            int handleCount = result.dbImpl.getReferringHandleCount();
+            if (handleCount > 0) {
+                throw new DatabaseException("Can't " + action + " database " +
+                                            databaseName + "," + handleCount +
+                                            " open Dbs exist");
+            }
+            success = true;
+        } catch (UnsupportedEncodingException UEE) {
+            throw new DatabaseException(UEE);
+        } finally {
+            if (!success) {
+                releaseDb(result.dbImpl);
+                if (result.nameCursor != null) {
+                    result.nameCursor.releaseBIN();
+                    result.nameCursor.close();
+                }
+            }
+        }
+
+        return result;
+    }
+
+    private static class NameLockResult {
+        CursorImpl nameCursor;
+        DatabaseImpl dbImpl;
+        NameLN nameLN;
+    }
+
+    /**
+     * Return true if the operation succeeded, false otherwise.
+     */
+    public boolean dbRename(Locker locker,
+                            String databaseName,
+                            String newName)
+        throws DatabaseException {
+
+        CursorImpl nameCursor = null;
+        NameLockResult result = lockNameLN(locker, databaseName, "rename");
+        try {
+            nameCursor = result.nameCursor;
+            if (nameCursor == null) {
+                return false;
+            } else {
+
+                /*
+                 * Rename simply deletes the one entry in the naming
+                 * tree and replaces it with a new one. Remove the
+                 * oldName->dbId entry and insert newName->dbId.
+                 */
+                nameCursor.latchBIN();
+                nameCursor.delete
+                    (result.dbImpl.getOperationRepContext(RENAME));
+                nameCursor.putLN(newName.getBytes("UTF-8"),
+                                 new NameLN(result.dbImpl.getId(),
+                                            envImpl,
+                                            result.dbImpl.isReplicated()),
+                                 false,  // allowDuplicates
+                                 result.dbImpl.getOperationRepContext(RENAME));
+                result.dbImpl.setDebugDatabaseName(newName);
+                return true;
+            }
+        } catch (UnsupportedEncodingException UEE) {
+            throw new DatabaseException(UEE);
+        } finally {
+            releaseDb(result.dbImpl);
+            if (nameCursor != null) {
+                nameCursor.releaseBIN();
+                nameCursor.close();
+            }
+        }
+    }
+
+    /**
+     * Remove the database by deleting the nameLN.  Does nothing if the
+     * non-null checkId argument does not match the database identified by
+     * databaseName.  Does nothing if the database name does not exist.
+     */
+    public void dbRemove(Locker locker,
+                         String databaseName,
+                         DatabaseId checkId)
+        throws DatabaseException {
+
+        CursorImpl nameCursor = null;
+        NameLockResult result = lockNameLN(locker, databaseName, "remove");
+        try {
+            nameCursor = result.nameCursor;
+            if ((nameCursor == null) ||
+                (checkId != null &&
+                 !checkId.equals(result.nameLN.getId()))) {
+                return;
+            } else {
+
+                /*
+                 * Delete the NameLN. There's no need to mark any Database
+                 * handle invalid, because the handle must be closed when we
+                 * take action and any further use of the handle will re-look
+                 * up the database.
+                 */
+                nameCursor.latchBIN();
+                nameCursor.delete
+                    (result.dbImpl.getOperationRepContext(REMOVE));
+
+                /*
+                 * Schedule database for final deletion during commit. This
+                 * should be the last action taken, since this will take
+                 * effect immediately for non-txnal lockers.
+                 *
+                 * Do not call releaseDb here on result.dbImpl, since that is
+                 * taken care of by markDeleteAtTxnEnd.
+                 */
+                locker.markDeleteAtTxnEnd(result.dbImpl, true);
+            }
+        } finally {
+            if (nameCursor != null) {
+                nameCursor.releaseBIN();
+                nameCursor.close();
+            }
+        }
+    }
+
+    /**
+     * To truncate, remove the database named by databaseName and
+     * create a new database in its place.
+     *
+     * Do not evict (do not call CursorImpl.setAllowEviction(true)) during low
+     * level DbTree operation. [#15176]
+     *
+     * @param returnCount if true, must return the count of records in the
+     * database, which can be an expensive option.
+     */
+    public long truncate(Locker locker,
+                         String databaseName,
+                         boolean returnCount)
+        throws DatabaseException {
+
+        CursorImpl nameCursor = null;
+        Locker idDbLocker = null;
+        NameLockResult result = lockNameLN(locker, databaseName, "truncate");
+        try {
+            nameCursor = result.nameCursor;
+            if (nameCursor == null) {
+                return 0;
+            } else {
+
+                /*
+                 * Make a new database with an empty tree. Make the nameLN
+                 * refer to the id of the new database. If this database is
+                 * replicated, the new one should also be replicated, and vice
+                 * versa.
+                 */
+                DatabaseImpl oldDb = result.dbImpl;
+                DatabaseId newId = null;
+                if (isReplicatedId(oldDb.getId().getId())) {
+                    newId = new DatabaseId(getNextReplicatedDbId());
+                } else {
+                    newId = new DatabaseId(getNextLocalDbId());
+                }
+
+                DatabaseImpl newDb = oldDb.cloneDatabase();
+                newDb.incrementUseCount();
+                newDb.setId(newId);
+                newDb.setTree(new Tree(newDb));
+
+                /*
+                 * Insert the new MapLN into the id tree. Do not use
+                 * a transaction on the id databaase, because we can not
+                 * hold long term locks on the mapLN.
+                 */
+                CursorImpl idCursor = null;
+                boolean operationOk = false;
+                try {
+                    idDbLocker = BasicLocker.createBasicLocker(envImpl);
+                    idCursor = new CursorImpl(idDatabase, idDbLocker);
+                    idCursor.putLN(newId.getBytes(), // key
+                                   new MapLN(newDb), // ln
+                                   false,            // allowDuplicates
+                                   ReplicationContext.NO_REPLICATE);
+                    operationOk = true;
+                } finally {
+                    if (idCursor != null) {
+                        idCursor.close();
+                    }
+
+                    if (idDbLocker != null) {
+                        idDbLocker.operationEnd(operationOk);
+                    }
+                }
+                result.nameLN.setId(newDb.getId());
+
+                /* If required, count the number of records in the database. */
+                long recordCount = 0;
+                if (returnCount) {
+                    recordCount = oldDb.count();
+                }
+
+                /* log the nameLN. */
+                DatabaseEntry dataDbt = new DatabaseEntry(new byte[0]);
+                nameCursor.putCurrent(dataDbt,
+                                      null,  // foundKey
+                                      null,  // foundData
+                                      oldDb.getOperationRepContext(TRUNCATE));
+
+                /*
+                 * Marking the lockers should be the last action, since it
+                 * takes effect immediately for non-txnal lockers.
+                 *
+                 * Do not call releaseDb here on oldDb or newDb, since that is
+                 * taken care of by markDeleteAtTxnEnd.
+                 */
+
+                /* Schedule old database for deletion if txn commits. */
+                locker.markDeleteAtTxnEnd(oldDb, true);
+
+                /* Schedule new database for deletion if txn aborts. */
+                locker.markDeleteAtTxnEnd(newDb, false);
+
+                return recordCount;
+            }
+        } finally {
+            if (nameCursor != null) {
+                nameCursor.releaseBIN();
+                nameCursor.close();
+            }
+        }
+    }
+
+    /*
+     * Remove the mapLN that refers to this database.
+     *
+     * Do not evict (do not call CursorImpl.setAllowEviction(true)) during low
+     * level DbTree operation. [#15176]
+     */
+    void deleteMapLN(DatabaseId id)
+        throws DatabaseException {
+
+        /*
+         * Retry indefinitely in the face of lock timeouts since the lock on
+         * the MapLN is only supposed to be held for short periods.
+         */
+        boolean done = false;
+        while (!done) {
+            Locker idDbLocker = null;
+            CursorImpl idCursor = null;
+            boolean operationOk = false;
+            try {
+                idDbLocker = BasicLocker.createBasicLocker(envImpl);
+                idCursor = new CursorImpl(idDatabase, idDbLocker);
+                boolean found =
+                    (idCursor.searchAndPosition
+                        (new DatabaseEntry(id.getBytes()), null,
+                         SearchMode.SET, LockType.WRITE) &
+                     CursorImpl.FOUND) != 0;
+                if (found) {
+
+                    /*
+                     * If the database is in use by an internal JE operation
+                     * (checkpointing, cleaning, etc), release the lock (done
+                     * in the finally block) and retry.  [#15805]
+                     */
+                    MapLN mapLN = (MapLN)
+                        idCursor.getCurrentLNAlreadyLatched(LockType.WRITE);
+                    assert mapLN != null;
+                    DatabaseImpl dbImpl = mapLN.getDatabase();
+                    if (!dbImpl.isInUseDuringDbRemove()) {
+                        idCursor.latchBIN();
+                        idCursor.delete(ReplicationContext.NO_REPLICATE);
+                        done = true;
+                    }
+                } else {
+                    /* MapLN does not exist. */
+                    done = true;
+                }
+                operationOk = true;
+            } catch (DeadlockException DE) {
+                /* Continue loop and retry. */
+            } finally {
+                if (idCursor != null) {
+                    idCursor.releaseBIN();
+                    idCursor.close();
+                }
+                if (idDbLocker != null) {
+                    idDbLocker.operationEnd(operationOk);
+                }
+            }
+        }
+    }
+
+    /**
+     * Get a database object given a database name.  Increments the use count
+     * of the given DB to prevent it from being evicted.  releaseDb should be
+     * called when the returned object is no longer used, to allow it to be
+     * evicted.  See DatabaseImpl.isInUse.
+     * [#13415]
+     *
+     * Do not evict (do not call CursorImpl.setAllowEviction(true)) during low
+     * level DbTree operation. [#15176]
+     *
+     * @param nameLocker is used to access the NameLN. As always, a NullTxn
+     *  is used to access the MapLN.
+     * @param databaseName target database
+     * @return null if database doesn't exist
+     */
+    public DatabaseImpl getDb(Locker nameLocker,
+                              String databaseName,
+                              Database databaseHandle)
+        throws DatabaseException {
+
+        try {
+            /* Use count is not incremented for idDatabase and nameDatabase. */
+            if (databaseName.equals(ID_DB_NAME)) {
+                return idDatabase;
+            } else if (databaseName.equals(NAME_DB_NAME)) {
+                return nameDatabase;
+            }
+
+            /*
+             * Search the nameDatabase tree for the NameLn for this name.
+             */
+            CursorImpl nameCursor = null;
+            DatabaseId id = null;
+            try {
+                nameCursor = new CursorImpl(nameDatabase, nameLocker);
+                DatabaseEntry keyDbt =
+                    new DatabaseEntry(databaseName.getBytes("UTF-8"));
+                boolean found =
+                    (nameCursor.searchAndPosition(keyDbt, null,
+                                                  SearchMode.SET,
+                                                  LockType.READ) &
+                     CursorImpl.FOUND) != 0;
+
+                if (found) {
+                    NameLN nameLN = (NameLN)
+                        nameCursor.getCurrentLNAlreadyLatched(LockType.READ);
+                    assert nameLN != null; /* Should be locked. */
+                    id = nameLN.getId();
+
+                    /*
+                     * If this is a non-handle use, no need to record any
+                     * handle locks.
+                     */
+                    if (databaseHandle != null) {
+                        nameLocker.addToHandleMaps
+                            (Long.valueOf(nameLN.getNodeId()), databaseHandle);
+                    }
+                }
+            } finally {
+                if (nameCursor != null) {
+                    nameCursor.releaseBIN();
+                    nameCursor.close();
+                }
+            }
+
+            /*
+             * Now search the id tree.
+             */
+            if (id == null) {
+                return null;
+            } else {
+                return getDb(id, -1, databaseName);
+            }
+        } catch (UnsupportedEncodingException UEE) {
+            throw new DatabaseException(UEE);
+        }
+    }
+
+    /**
+     * Get a database object based on an id only.  Used by recovery, cleaning
+     * and other clients who have an id in hand, and don't have a resident
+     * node, to find the matching database for a given log entry.
+     */
+    public DatabaseImpl getDb(DatabaseId dbId)
+        throws DatabaseException {
+
+        return getDb(dbId, -1);
+    }
+
+    /**
+     * Get a database object based on an id only. Specify the lock timeout to
+     * use, or -1 to use the default timeout.  A timeout should normally only
+     * be specified by daemons with their own timeout configuration.  public
+     * for unit tests.
+     */
+    public DatabaseImpl getDb(DatabaseId dbId, long lockTimeout)
+        throws DatabaseException {
+
+        return getDb(dbId, lockTimeout, (String) null);
+    }
+
+    /**
+     * Get a database object based on an id only, caching the id-db mapping in
+     * the given map.
+     */
+    public DatabaseImpl getDb(DatabaseId dbId, 
+                              long lockTimeout, 
+                              Map<DatabaseId,DatabaseImpl> dbCache)
+        throws DatabaseException {
+
+        if (dbCache.containsKey(dbId)) {
+            return dbCache.get(dbId);
+        } else {
+            DatabaseImpl db = getDb(dbId, lockTimeout, (String) null);
+            dbCache.put(dbId, db);
+            return db;
+        }
+    }
+
+    /**
+     * Get a database object based on an id only. Specify the lock timeout to
+     * use, or -1 to use the default timeout.  A timeout should normally only
+     * be specified by daemons with their own timeout configuration.  public
+     * for unit tests.
+     *
+     * Increments the use count of the given DB to prevent it from being
+     * evicted.  releaseDb should be called when the returned object is no
+     * longer used, to allow it to be evicted.  See DatabaseImpl.isInUse.
+     * [#13415]
+     *
+     * Do not evict (do not call CursorImpl.setAllowEviction(true)) during low
+     * level DbTree operation. [#15176]
+     */
+    public DatabaseImpl getDb(DatabaseId dbId,
+                              long lockTimeout,
+                              String dbNameIfAvailable)
+        throws DatabaseException {
+
+        if (dbId.equals(idDatabase.getId())) {
+            /* We're looking for the id database itself. */
+            return idDatabase;
+        } else if (dbId.equals(nameDatabase.getId())) {
+            /* We're looking for the name database itself. */
+            return nameDatabase;
+        } else {
+            /* Scan the tree for this db. */
+            DatabaseImpl foundDbImpl = null;
+
+            /*
+             * Retry indefinitely in the face of lock timeouts.  Deadlocks may
+             * be due to conflicts with modifyDbRoot.
+             */
+            while (true) {
+                Locker locker = null;
+                CursorImpl idCursor = null;
+                boolean operationOk = false;
+                try {
+                    locker = BasicLocker.createBasicLocker(envImpl);
+                    if (lockTimeout != -1) {
+                        locker.setLockTimeout(lockTimeout);
+                    }
+                    idCursor = new CursorImpl(idDatabase, locker);
+                    DatabaseEntry keyDbt = new DatabaseEntry(dbId.getBytes());
+                    boolean found =
+                        (idCursor.searchAndPosition
+                         (keyDbt, new DatabaseEntry(), SearchMode.SET,
+                          LockType.READ) &
+                         CursorImpl.FOUND) != 0;
+                    if (found) {
+                        MapLN mapLN = (MapLN)
+                            idCursor.getCurrentLNAlreadyLatched(LockType.READ);
+                        assert mapLN != null; /* Should be locked. */
+                        foundDbImpl =  mapLN.getDatabase();
+                        /* Increment DB use count with lock held. */
+                        foundDbImpl.incrementUseCount();
+                    }
+                    operationOk = true;
+                    break;
+                } catch (DeadlockException DE) {
+                    /* Continue loop and retry. */
+                } finally {
+                    if (idCursor != null) {
+                        idCursor.releaseBIN();
+                        idCursor.close();
+                    }
+                    if (locker != null) {
+                        locker.operationEnd(operationOk);
+                    }
+                }
+            }
+
+            /*
+             * Set the debugging name in the databaseImpl, but only after
+             * recovery had finished setting up the tree.
+             */
+            if (envImpl.isOpen()) {
+                setDebugNameForDatabaseImpl(foundDbImpl, dbNameIfAvailable);
+            }
+
+            return foundDbImpl;
+        }
+    }
+
+    /**
+     * Decrements the use count of the given DB, allowing it to be evicted if
+     * the use count reaches zero.  Must be called to release a DatabaseImpl
+     * that was returned by a method in this class.  See DatabaseImpl.isInUse.
+     * [#13415]
+     */
+    public void releaseDb(DatabaseImpl db) {
+        /* Use count is not incremented for idDatabase and nameDatabase. */
+        if (db != null &&
+            db != idDatabase &&
+            db != nameDatabase) {
+            db.decrementUseCount();
+        }
+    }
+
+    /**
+     * Calls releaseDb for all DBs in the given map of DatabaseId to
+     * DatabaseImpl.  See getDb(DatabaseId, long, Map). [#13415]
+     */
+    public void releaseDbs(Map<DatabaseId,DatabaseImpl> dbCache) {
+        if (dbCache != null) {
+            for (Iterator<DatabaseImpl> i = dbCache.values().iterator(); 
+                 i.hasNext();) {
+                releaseDb(i.next());
+            }
+        }
+    }
+
+    /*
+     * We need to cache a database name in the dbImpl for later use in error
+     * messages, when it may be unsafe to walk the mapping tree.  Finding a
+     * name by id is slow, so minimize the number of times we must set the
+     * debug name.  The debug name will only be uninitialized when an existing
+     * databaseImpl is faulted in.
+     */
+    private void setDebugNameForDatabaseImpl(DatabaseImpl dbImpl,
+                                             String dbName)
+        throws DatabaseException {
+        
+        if (dbImpl != null) {
+            if (dbName != null) {
+                /* If a name was provided, use that. */
+                dbImpl.setDebugDatabaseName(dbName);
+            } else if (dbImpl.getDebugName() == null) {
+                /*
+                 * Only worry about searching for a name if the name
+                 * is uninitialized.
+                 */
+                dbImpl.setDebugDatabaseName(getDbName(dbImpl.getId()));
+            }
+        }
+    }
+
+    /**
+     * Rebuild the IN list after recovery.
+     */
+    public void rebuildINListMapDb()
+        throws DatabaseException {
+
+        idDatabase.getTree().rebuildINList();
+    }
+
+    /*
+     * Verification, must be run while system is quiescent.
+     */
+    public boolean verify(final VerifyConfig config, PrintStream out)
+        throws DatabaseException {
+
+        boolean ret = true;
+        try {
+            /* For now, verify all databases. */
+            boolean ok = idDatabase.verify(config,
+                                           idDatabase.getEmptyStats());
+            if (!ok) {
+                ret = false;
+            }
+
+            ok = nameDatabase.verify(config,
+                                     nameDatabase.getEmptyStats());
+            if (!ok) {
+                ret = false;
+            }
+        } catch (DatabaseException DE) {
+            ret = false;
+        }
+
+        synchronized (envImpl.getINCompressor()) {
+
+            /*
+             * Get a cursor on the id tree. Use objects at the dbi layer rather
+             * than at the public api, in order to retrieve objects rather than
+             * Dbts. Note that we don't do cursor cloning here, so any failures
+             * from each db verify invalidate the cursor.  Use dirty read
+             * (LockMode.NONE) because locks on the MapLN should never be held
+             * for long, as that will cause deadlocks with splits and
+             * checkpointing.
+             */
+            final LockType lockType = LockType.NONE;
+            class Traversal implements CursorImpl.WithCursor {
+                boolean allOk = true;
+
+                public boolean withCursor(CursorImpl cursor,
+                                          DatabaseEntry key,
+                                          DatabaseEntry data)
+                    throws DatabaseException {
+
+                    MapLN mapLN = (MapLN) cursor.getCurrentLN(lockType);
+                    if (mapLN != null && !mapLN.isDeleted()) {
+                        DatabaseImpl dbImpl = mapLN.getDatabase();
+                        boolean ok = dbImpl.verify(config,
+                                                   dbImpl.getEmptyStats());
+                        if (!ok) {
+                            allOk = false;
+                        }
+                    }
+                    return true;
+                }
+            }
+            Traversal traversal = new Traversal();
+            CursorImpl.traverseDbWithCursor
+                (idDatabase, lockType, true /*allowEviction*/, traversal);
+            if (!traversal.allOk) {
+                ret = false;
+            }
+        }
+
+        return ret;
+    }
+
+    /**
+     * Return the database name for a given db. Slow, must traverse. Called by
+     * Database.getName.
+     *
+     * Do not evict (do not call CursorImpl.setAllowEviction(true)) during low
+     * level DbTree operation. [#15176]
+     */
+    public String getDbName(final DatabaseId id)
+        throws DatabaseException {
+
+        if (id.equals(ID_DB_ID)) {
+            return ID_DB_NAME;
+        } else if (id.equals(NAME_DB_ID)) {
+            return NAME_DB_NAME;
+        }
+        class Traversal implements CursorImpl.WithCursor {
+            String name = null;
+
+            public boolean withCursor(CursorImpl cursor,
+                                      DatabaseEntry key,
+                                      DatabaseEntry data)
+                throws DatabaseException {
+
+                NameLN nameLN = (NameLN) cursor.getCurrentLN(LockType.NONE);
+                if (nameLN != null && nameLN.getId().equals(id)) {
+                    try {
+                        name = new String(key.getData(), "UTF-8");
+                    } catch (UnsupportedEncodingException e) {
+                        throw new DatabaseException(e);
+                    }
+                    return false;
+                }
+                return true;
+            }
+        }
+        Traversal traversal = new Traversal();
+        CursorImpl.traverseDbWithCursor
+            (nameDatabase, LockType.NONE, false /*allowEviction*/, traversal);
+        return traversal.name;
+    }
+
+    /**
+     * @return a map of database ids to database names (Strings).
+     */
+    public Map<DatabaseId,String> getDbNamesAndIds()
+        throws DatabaseException {
+
+        final Map<DatabaseId,String> nameMap =
+            new HashMap<DatabaseId,String>();
+
+        class Traversal implements CursorImpl.WithCursor {
+            public boolean withCursor(CursorImpl cursor,
+                                      DatabaseEntry key,
+                                      DatabaseEntry data)
+                throws DatabaseException {
+
+                NameLN nameLN = (NameLN) cursor.getCurrentLN(LockType.NONE);
+                DatabaseId id = nameLN.getId();
+                try {
+                    nameMap.put(id, new String(key.getData(), "UTF-8"));
+                } catch (UnsupportedEncodingException e) {
+                    throw new DatabaseException(e);
+                }
+                return true;
+            }
+        }
+        Traversal traversal = new Traversal();
+        CursorImpl.traverseDbWithCursor
+            (nameDatabase, LockType.NONE, false /*allowEviction*/, traversal);
+        return nameMap;
+    }
+
+    /**
+     * @return a list of database names held in the environment, as strings.
+     */
+    public List<String> getDbNames()
+        throws DatabaseException {
+
+        final List<String> nameList = new ArrayList<String>();
+
+        CursorImpl.traverseDbWithCursor(nameDatabase,
+                                        LockType.NONE,
+                                        true /*allowEviction*/,
+                                        new CursorImpl.WithCursor() {
+            public boolean withCursor(CursorImpl cursor,
+                                      DatabaseEntry key,
+                                      DatabaseEntry data)
+                throws DatabaseException {
+
+                try {
+                    String name = new String(key.getData(), "UTF-8");
+                    if (!isReservedDbName(name)) {
+                        nameList.add(name);
+                    }
+                    return true;
+                } catch (UnsupportedEncodingException e) {
+                    throw new DatabaseException(e);
+                }
+            }
+        });
+
+        return nameList;
+    }
+
+    /**
+     * Return a list of the names of internally used databases that
+     * don't get looked up through the naming tree.
+     */
+    public List<String> getInternalNoLookupDbNames() {
+        List<String> names = new ArrayList<String>();
+        names.add(ID_DB_NAME);
+        names.add(NAME_DB_NAME);
+        return names;
+    }
+
+    /**
+     * Return a list of the names of internally used databases.
+     * TODO: The internal replication DBs are not added here and therefore not
+     * verified by DbVerify. Reassess for HA release.
+     */
+    public List<String> getInternalDbNames() {
+        List<String> names = new ArrayList<String>();
+        names.add(UTILIZATION_DB_NAME);
+        return names;
+    }
+
+    /**
+     * Returns true if the name is a reserved JE database name.
+     */
+    public static boolean isReservedDbName(String name) {
+        for (int i = 0; i < RESERVED_DB_NAMES.length; i += 1) {
+            if (RESERVED_DB_NAMES[i].equals(name)) {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    /**
+     * @return the higest level node in the environment.
+     */
+    public int getHighestLevel()
+        throws DatabaseException {
+
+        /* The highest level in the map side */
+        int idHighLevel = getHighestLevel(idDatabase);
+
+        /* The highest level in the name side */
+        int nameHighLevel = getHighestLevel(nameDatabase);
+
+        return (nameHighLevel > idHighLevel) ? nameHighLevel : idHighLevel;
+    }
+
+    /**
+     * @return the higest level node for this database.
+     */
+    public int getHighestLevel(DatabaseImpl dbImpl)
+        throws DatabaseException {
+
+        /* The highest level in the map side */
+        RootLevel getLevel = new RootLevel(dbImpl);
+        dbImpl.getTree().withRootLatchedShared(getLevel);
+        return getLevel.getRootLevel();
+    }
+
+    private boolean isReplicated() {
+        return (flags & REPLICATED_BIT) != 0;
+    }
+
+    private void setIsReplicated() {
+        flags |= REPLICATED_BIT;
+    }
+
+    /**
+     * Release resources and update memory budget. Should only be called
+     * when this dbtree is closed and will never be accessed again.
+     */
+    public void close() {
+        idDatabase.releaseTreeAdminMemory();
+        nameDatabase.releaseTreeAdminMemory();
+    }
+
+    long getTreeAdminMemory() {
+        return idDatabase.getTreeAdminMemory() +
+            nameDatabase.getTreeAdminMemory();
+    }
+
+    /*
+     * RootLevel lets us fetch the root IN within the root latch.
+     */
+    private static class RootLevel implements WithRootLatched {
+        private DatabaseImpl db;
+        private int rootLevel;
+
+        RootLevel(DatabaseImpl db) {
+            this.db = db;
+            rootLevel = 0;
+        }
+
+        public IN doWork(ChildReference root)
+            throws DatabaseException {
+
+            IN rootIN = (IN) root.fetchTarget(db, null);
+            rootLevel = rootIN.getLevel();
+            return null;
+        }
+
+        int getRootLevel() {
+            return rootLevel;
+        }
+    }
+
+    /*
+     * Logging support
+     */
+
+    /**
+     * @see Loggable#getLogSize
+     */
+    public int getLogSize() {
+        return
+            LogUtils.getIntLogSize() +  // last allocated local db id
+            LogUtils.getIntLogSize() +  // last allocated replicated db id
+            idDatabase.getLogSize() +   // id db
+            nameDatabase.getLogSize() + // name db
+            1;                          // 1 byte of flags
+    }
+
+    /**
+     * This log entry type is configured to perform marshaling (getLogSize and
+     * writeToLog) under the write log mutex.  Otherwise, the size could change
+     * in between calls to these two methods as the result of utilizaton
+     * tracking.
+     *
+     * @see Loggable#writeToLog
+     */
+    public void writeToLog(ByteBuffer logBuffer) {
+        LogUtils.writeInt(logBuffer,
+                          lastAllocatedLocalDbId.get());      // last id
+        LogUtils.writeInt(logBuffer,
+                          lastAllocatedReplicatedDbId.get()); // last rep id
+        idDatabase.writeToLog(logBuffer);                // id db
+        nameDatabase.writeToLog(logBuffer);              // name db
+        logBuffer.put(flags);
+    }
+
+
+    /**
+     * @see Loggable#readFromLog
+     */
+    public void readFromLog(ByteBuffer itemBuffer, byte entryVersion)
+        throws LogException {
+
+        lastAllocatedLocalDbId.set(LogUtils.readInt(itemBuffer));
+        if (entryVersion >= 6) {
+            lastAllocatedReplicatedDbId.set(LogUtils.readInt(itemBuffer));
+        }
+
+        idDatabase.readFromLog(itemBuffer, entryVersion); // id db
+        nameDatabase.readFromLog(itemBuffer, entryVersion); // name db
+
+        if (entryVersion >= 6) {
+            flags = itemBuffer.get();
+        } else {
+            flags = 0;
+        }
+    }
+
+    /**
+     * @see Loggable#dumpLog
+     */
+    public void dumpLog(StringBuffer sb, boolean verbose) {
+        sb.append("<dbtree lastLocalDbId = \"");
+        sb.append(lastAllocatedLocalDbId);
+        sb.append("\" lastReplicatedDbId = \"");
+        sb.append(lastAllocatedReplicatedDbId);
+        sb.append("\">");
+        sb.append("<idDb>");
+        idDatabase.dumpLog(sb, verbose);
+        sb.append("</idDb><nameDb>");
+        nameDatabase.dumpLog(sb, verbose);
+        sb.append("</nameDb>");
+        sb.append("</dbtree>");
+    }
+
+    /**
+     * @see Loggable#getTransactionId
+     */
+    public long getTransactionId() {
+        return 0;
+    }
+
+    /**
+     * @see Loggable#logicalEquals
+     * Always return false, this item should never be compared.
+     */
+    public boolean logicalEquals(Loggable other) {
+        return false;
+    }
+
+    /*
+     * For unit test support
+     */
+
+    String dumpString(int nSpaces) {
+        StringBuffer self = new StringBuffer();
+        self.append(TreeUtils.indent(nSpaces));
+        self.append("<dbTree lastDbId =\"");
+        self.append(lastAllocatedLocalDbId);
+        self.append("\">");
+        self.append('\n');
+        self.append(idDatabase.dumpString(nSpaces + 1));
+        self.append('\n');
+        self.append(nameDatabase.dumpString(nSpaces + 1));
+        self.append('\n');
+        self.append("</dbtree>");
+        return self.toString();
+    }
+
+    @Override
+    public String toString() {
+        return dumpString(0);
+    }
+
+    /**
+     * For debugging.
+     */
+    public void dump()
+        throws DatabaseException {
+
+        idDatabase.getTree().dump();
+        nameDatabase.getTree().dump();
+    }
+}
diff --git a/src/com/sleepycat/je/dbi/EnvConfigObserver.java b/src/com/sleepycat/je/dbi/EnvConfigObserver.java
new file mode 100644
index 0000000000000000000000000000000000000000..44cd76a46e5c7f55bbaf18fb6b5660445b77c1c5
--- /dev/null
+++ b/src/com/sleepycat/je/dbi/EnvConfigObserver.java
@@ -0,0 +1,26 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: EnvConfigObserver.java,v 1.9 2008/01/07 14:28:48 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.EnvironmentMutableConfig;
+
+/**
+ * Implemented by observers of mutable config changes.
+ */
+public interface EnvConfigObserver {
+
+    /**
+     * Notifies the observer that one or more mutable properties have been
+     * changed.
+     */
+    void envConfigUpdate(DbConfigManager configMgr,
+			 EnvironmentMutableConfig newConfig)
+        throws DatabaseException;
+}
diff --git a/src/com/sleepycat/je/dbi/EnvironmentImpl.java b/src/com/sleepycat/je/dbi/EnvironmentImpl.java
new file mode 100644
index 0000000000000000000000000000000000000000..f45af70bfcd949ce6b885d4ea43ff4a99d894ea2
--- /dev/null
+++ b/src/com/sleepycat/je/dbi/EnvironmentImpl.java
@@ -0,0 +1,2175 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EnvironmentImpl.java,v 1.301.2.8 2010/01/04 15:30:28 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import java.util.logging.ConsoleHandler;
+import java.util.logging.FileHandler;
+import java.util.logging.Handler;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import com.sleepycat.je.APILockedException;
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.DeadlockException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.EnvironmentMutableConfig;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.ExceptionListener;
+import com.sleepycat.je.LockStats;
+import com.sleepycat.je.LogScanConfig;
+import com.sleepycat.je.LogScanner;
+import com.sleepycat.je.RunRecoveryException;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.TransactionConfig;
+import com.sleepycat.je.TransactionStats;
+import com.sleepycat.je.VerifyConfig;
+import com.sleepycat.je.cleaner.Cleaner;
+import com.sleepycat.je.cleaner.LocalUtilizationTracker;
+import com.sleepycat.je.cleaner.UtilizationProfile;
+import com.sleepycat.je.cleaner.UtilizationTracker;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.evictor.Evictor;
+import com.sleepycat.je.evictor.PrivateEvictor;
+import com.sleepycat.je.evictor.SharedEvictor;
+import com.sleepycat.je.incomp.INCompressor;
+import com.sleepycat.je.latch.Latch;
+import com.sleepycat.je.latch.LatchSupport;
+import com.sleepycat.je.latch.SharedLatch;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.log.LNFileReader;
+import com.sleepycat.je.log.LatchedLogManager;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.LogManager;
+import com.sleepycat.je.log.ReplicationContext;
+import com.sleepycat.je.log.SyncedLogManager;
+import com.sleepycat.je.log.TraceLogHandler;
+import com.sleepycat.je.log.entry.SingleItemEntry;
+import com.sleepycat.je.recovery.Checkpointer;
+import com.sleepycat.je.recovery.RecoveryInfo;
+import com.sleepycat.je.recovery.RecoveryManager;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.tree.BINReference;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.tree.Key;
+import com.sleepycat.je.tree.LN;
+import com.sleepycat.je.txn.BasicLocker;
+import com.sleepycat.je.txn.LockGrantType;
+import com.sleepycat.je.txn.LockResult;
+import com.sleepycat.je.txn.LockType;
+import com.sleepycat.je.txn.Locker;
+import com.sleepycat.je.txn.Txn;
+import com.sleepycat.je.txn.TxnManager;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.PropUtil;
+import com.sleepycat.je.utilint.TestHook;
+import com.sleepycat.je.utilint.TestHookExecute;
+import com.sleepycat.je.utilint.Tracer;
+import com.sleepycat.je.utilint.TracerFormatter;
+
+/**
+ * Underlying Environment implementation. There is a single instance for any
+ * database environment opened by the application.
+ */
+public class EnvironmentImpl implements EnvConfigObserver {
+
+    /*
+     * Set true and run unit tests for NO_LOCKING_MODE test.
+     * EnvironmentConfigTest.testInconsistentParams will fail. [#13788]
+     */
+    private static final boolean TEST_NO_LOCKING_MODE = false;
+
+    /* Attributes of the entire environment */
+    private volatile DbEnvState envState;
+    private volatile boolean closing;    // true if close has begun
+    private File envHome;
+    private int referenceCount; // count of opened Database and DbTxns
+    private boolean isTransactional; // true if env opened with DB_INIT_TRANS
+    private boolean isNoLocking;   // true if env has no locking
+    private boolean isReadOnly; // true if env opened with the read only flag.
+    private boolean isMemOnly;  // true if je.log.memOnly=true
+    private boolean directNIO;  // true to use direct NIO buffers
+    private boolean sharedCache; // true if je.sharedCache=true
+    private static boolean fairLatches;// true if user wants fair latches
+    private static boolean useSharedLatchesForINs;
+    /* true if offset tracking should be used for deferred write dbs. */
+    private boolean dbEviction;
+
+    private MemoryBudget memoryBudget;
+    private static int adler32ChunkSize;
+
+    /* Save so we don't have to look it up in the config manager frequently. */
+    private long lockTimeout;
+    private long txnTimeout;
+
+    /* Directory of databases */
+    private DbTree dbMapTree;
+    private long mapTreeRootLsn = DbLsn.NULL_LSN;
+    private Latch mapTreeRootLatch;
+
+    private INList inMemoryINs;
+
+    /* Services */
+    private DbConfigManager configManager;
+    private List<EnvConfigObserver> configObservers;
+    private Logger envLogger;
+    protected LogManager logManager;
+    private FileManager fileManager;
+    private TxnManager txnManager;
+
+    /* Daemons */
+    private Evictor evictor;
+    private INCompressor inCompressor;
+    private Checkpointer checkpointer;
+    private Cleaner cleaner;
+
+    /* Replication */
+    private ReplicatorInstance repInstance;
+
+    /* Stats, debug information */
+    private RecoveryInfo lastRecoveryInfo;
+    private RunRecoveryException savedInvalidatingException;
+
+    /* If true, call Thread.yield() at strategic points (stress test aid) */
+    private static boolean forcedYield = false;
+
+    /*
+     * Used by Database to protect access to the trigger list.  A single latch
+     * for all databases is used to prevent deadlocks.
+     */
+    private SharedLatch triggerLatch;
+
+    /**
+     * The exception listener for this envimpl, if any has been specified.
+     */
+    private ExceptionListener exceptionListener = null;
+
+    /*
+     * Configuration and tracking of background IO limits.  Managed by the
+     * updateBackgroundReads, updateBackgroundWrites and sleepAfterBackgroundIO
+     * methods.  The limits and the backlog are volatile because we check them
+     * outside the synchronized block.  Other fields are updated and checked
+     * while synchronized on the tracking mutex object.  The sleep mutex is
+     * used to block multiple background threads while sleeping.
+     */
+    private volatile int backgroundSleepBacklog;
+    private volatile int backgroundReadLimit;
+    private volatile int backgroundWriteLimit;
+    private long backgroundSleepInterval;
+    private int backgroundReadCount;
+    private long backgroundWriteBytes;
+    private TestHook backgroundSleepHook;
+    private Object backgroundTrackingMutex = new Object();
+    private Object backgroundSleepMutex = new Object();
+
+    /*
+     * ThreadLocal.get() is not cheap so we want to minimize calls to it.  We
+     * only use ThreadLocals for the TreeStatsAccumulator which are only called
+     * in limited circumstances.  Use this reference count to indicate that a
+     * thread has set a TreeStatsAccumulator.  When it's done, it decrements
+     * the counter.  It's static so that we don't have to pass around the
+     * EnvironmentImpl.
+     */
+    private static int threadLocalReferenceCount = 0;
+
+    /**
+     * DbPrintLog doesn't need btree and dup comparators to function properly
+     * don't require any instantiations.  This flag, if true, indicates that
+     * we've been called from DbPrintLog.
+     */
+    private static boolean noComparators = false;
+
+    /*
+     * A preallocated RunRecoveryException that is used in OOME and other
+     * java.lang.Error situations so that allocation does not need to be done
+     * in the OOME context.
+     */
+    public final RunRecoveryException SAVED_RRE = DbInternal.makeNoArgsRRE();
+
+    public static final boolean USE_JAVA5_ADLER32;
+
+    private static final String DISABLE_JAVA_ADLER32_NAME =
+        "je.disable.java.adler32";
+
+    static {
+        USE_JAVA5_ADLER32 =
+            System.getProperty(DISABLE_JAVA_ADLER32_NAME) == null;
+    }
+
+    public static final boolean IS_DALVIK;
+
+    static {
+        IS_DALVIK = "Dalvik".equals(System.getProperty("java.vm.name"));
+    }
+
+    public static final boolean IS_WINDOWS_7;
+
+    static {
+        String osName = System.getProperty("os.name");
+        IS_WINDOWS_7 = (osName != null && osName.startsWith("Windows 7"));
+    }
+
+    /*
+     * Timeout for waiting for API lockout to finish.
+     */
+    @SuppressWarnings("unused")
+    private int lockoutTimeout;
+
+    /*
+     * The NodeId used for the apiLock.
+     */
+    private Long apiLockNodeId;
+
+    /*
+     * The Locker used to hold the apiLock for write.
+     */
+    private Locker apiWriteLocker;
+
+    /* NodeId sequence counters */
+    private NodeSequence nodeSequence;
+
+    /**
+     * Create a database environment to represent the data in envHome.
+     * dbHome. Properties from the je.properties file in that directory are
+     * used to initialize the system wide property bag. Properties passed to
+     * this method are used to influence the open itself.
+     *
+     * @param envHome absolute path of the database environment
+     *                          home directory
+     *
+     * @param envConfig
+     *
+     * @param sharedCacheEnv if non-null, is another environment that is
+     * sharing the cache with this environment; if null, this environment is
+     * not sharing the cache or is the first environment to share the cache.
+     *
+     * @throws DatabaseException on all other failures
+     */
+    public EnvironmentImpl(File envHome,
+                           EnvironmentConfig envConfig,
+                           EnvironmentImpl sharedCacheEnv,
+                           boolean replicationIntended)
+        throws DatabaseException {
+
+        boolean success = false;
+        try {
+            this.envHome = envHome;
+            envState = DbEnvState.INIT;
+            mapTreeRootLatch = new Latch("MapTreeRoot");
+
+            /* Set up configuration parameters */
+            configManager = new DbConfigManager(envConfig);
+            configObservers = new ArrayList<EnvConfigObserver>();
+            addConfigObserver(this);
+
+            /*
+             * Set up debug logging. Depending on configuration, add handlers,
+             * set logging level.
+             */
+            envLogger = initLogger(envHome);
+
+            /*
+             * Essential services. These must exist before recovery.
+             */
+            forcedYield =
+                configManager.getBoolean(EnvironmentParams.ENV_FORCED_YIELD);
+            isTransactional =
+                configManager.getBoolean(EnvironmentParams.ENV_INIT_TXN);
+            isNoLocking = !(configManager.getBoolean
+                            (EnvironmentParams.ENV_INIT_LOCKING));
+            if (isTransactional && isNoLocking) {
+                if (TEST_NO_LOCKING_MODE) {
+                    isNoLocking = !isTransactional;
+                } else {
+                    throw new IllegalArgumentException
+                        ("Can't set 'je.env.isNoLocking' and " +
+                         "'je.env.isTransactional';");
+                }
+            }
+
+            directNIO = false;
+            fairLatches =
+                configManager.getBoolean(EnvironmentParams.ENV_FAIR_LATCHES);
+            isReadOnly =
+                configManager.getBoolean(EnvironmentParams.ENV_RDONLY);
+            isMemOnly =
+                configManager.getBoolean(EnvironmentParams.LOG_MEMORY_ONLY);
+            useSharedLatchesForINs =
+                configManager.getBoolean(EnvironmentParams.ENV_SHARED_LATCHES);
+            dbEviction =
+                configManager.getBoolean(EnvironmentParams.ENV_DB_EVICTION);
+            adler32ChunkSize =
+                configManager.getInt(EnvironmentParams.ADLER32_CHUNK_SIZE);
+            sharedCache =
+                configManager.getBoolean(EnvironmentParams.ENV_SHARED_CACHE);
+
+            /*
+             * Decide on memory budgets based on environment config params and
+             * memory available to this process.
+             */
+            memoryBudget =
+                new MemoryBudget(this, sharedCacheEnv, configManager);
+
+            fileManager = new FileManager(this, envHome, isReadOnly);
+            if (!envConfig.getAllowCreate() && !fileManager.filesExist()) {
+                throw new DatabaseException
+                    ("Environment.setAllowCreate is false so environment " +
+                     " creation is not permitted, but there is no " +
+                     " pre-existing environment in " + envHome);
+            }
+
+            if (fairLatches) {
+                logManager = new LatchedLogManager(this, isReadOnly);
+            } else {
+                logManager = new SyncedLogManager(this, isReadOnly);
+            }
+
+            inMemoryINs = new INList(this);
+            txnManager = new TxnManager(this);
+
+            /*
+             * Daemons are always made here, but only started after recovery.
+             * We want them to exist so we can call them programatically even
+             * if the daemon thread is not started.
+             */
+            createDaemons(sharedCacheEnv);
+
+            /*
+             * The node sequence transient ids, but not the node ids, must be
+             * created before the DbTree is created because the transient node
+             * id sequence is used during the creation of the dbtree.
+             */
+            nodeSequence = new NodeSequence();
+            nodeSequence.initTransientNodeId();
+
+            /*
+             * Instantiate a new, blank dbtree. If the environment already
+             * exists, recovery will recreate the dbMapTree from the log and
+             * overwrite this instance.
+             */
+            dbMapTree = new DbTree(this, replicationIntended);
+
+            referenceCount = 0;
+
+            triggerLatch = new SharedLatch("TriggerLatch");
+
+            /*
+             * Allocate node sequences before recovery. We expressly wait to
+             * allocate it after the DbTree is created, because these sequences
+             * should not be used by the DbTree before recovery has
+             * run. Waiting until now to allocate them will make errors more
+             * evident, since there will be a NullPointerException.
+             */
+            nodeSequence.initRealNodeId();
+
+            /* Do not do recovery if this environment is for a utility. */
+            if (configManager.getBoolean(EnvironmentParams.ENV_RECOVERY)) {
+
+                /*
+                 * Run recovery.  Note that debug logging to the database log
+                 * is disabled until recovery is finished.
+                 */
+                try {
+                    RecoveryManager recoveryManager =
+                        new RecoveryManager(this);
+                    lastRecoveryInfo =
+                        recoveryManager.recover(isReadOnly,
+                                                replicationIntended);
+                } finally {
+                    try {
+                        /* Flush to get all exception tracing out to the log.*/
+                        logManager.flush();
+                        fileManager.clear();
+                    } catch (IOException e) {
+                        throw new DatabaseException(e.getMessage());
+                    }
+                }
+            } else {
+                isReadOnly = true;
+                noComparators = true;
+            }
+
+            /*
+             * Cache a few critical values. We keep our timeout in millis
+             * instead of microseconds because Object.wait takes millis.
+             */
+            lockTimeout =
+                PropUtil.microsToMillis(configManager.getLong
+                                        (EnvironmentParams.LOCK_TIMEOUT));
+            txnTimeout =
+                PropUtil.microsToMillis(configManager.getLong
+                                        (EnvironmentParams.TXN_TIMEOUT));
+
+            /*
+             * Initialize the environment memory usage number. Must be called
+             * after recovery, because recovery determines the starting size
+             * of the in-memory tree.
+             */
+            memoryBudget.initCacheMemoryUsage(dbMapTree.getTreeAdminMemory());
+
+            /* Mark as open before starting daemons. */
+            open();
+
+            /*
+             * Call config observer and start daemons last after everything
+             * else is initialized. Note that all config parameters, both
+             * mutable and non-mutable, needed by the memoryBudget have already
+             * been initialized when the configManager was instantiated.
+             */
+            envConfigUpdate(configManager, envConfig);
+
+            success = true;
+        } catch (DatabaseException e) {
+
+            /* Release any environment locks if there was a problem. */
+            if (fileManager != null) {
+                try {
+
+                    /*
+                     * Clear again, in case an exception in logManager.flush()
+                     * caused us to skip the earlier call to clear().
+                     */
+                    fileManager.clear();
+                    fileManager.close();
+                } catch (IOException IOE) {
+
+                    /*
+                     * Klockwork - ok
+                     * Eat it, we want to throw the original exception.
+                     */
+                }
+            }
+            throw e;
+        } finally {
+
+            /*
+             * DbEnvPool.addEnvironment is called by RecoveryManager.buildTree
+             * during recovery above, to enable eviction during recovery.  If
+             * we fail to create the environment, we must remove it.
+             */
+            if (!success && sharedCache && evictor != null) {
+                evictor.removeEnvironment(this);
+            }
+        }
+    }
+
+    /**
+     * Reinitialize after an Internal Init copies new *.jdb files into envhome.
+     */
+    public void reinit(boolean replicationIntended)
+        throws DatabaseException {
+
+        /* Calls doReinit while synchronized on DbEnvPool. */
+        DbEnvPool.getInstance().reinitEnvironment(this, replicationIntended);
+    }
+
+    /**
+     * This method must be called while synchronized on DbEnvPool.
+     */
+    synchronized void doReinit(boolean replicationIntended,
+                               EnvironmentImpl sharedCacheEnv)
+        throws DatabaseException {
+
+        try {
+            closing = false;
+            envState = DbEnvState.INIT;
+            SAVED_RRE.setAlreadyThrown(false);
+
+            /*
+             * Daemons are always made here, but only started after recovery.
+             * We want them to exist so we can call them programatically even
+             * if the daemon thread is not started.
+             */
+            createDaemons(sharedCacheEnv);
+
+            /*
+             * Instantiate a new, blank dbtree. If the environment already
+             * exists, recovery will recreate the dbMapTree from the log and
+             * overwrite this instance.
+             */
+            if (dbMapTree != null) {
+                dbMapTree.close();
+            }
+
+            dbMapTree = new DbTree(this, replicationIntended);
+            mapTreeRootLsn = DbLsn.NULL_LSN;
+            referenceCount = 0;
+            threadLocalReferenceCount = 0;
+
+            /*
+             * Run recovery.  Note that debug logging to the database log is
+             * disabled until recovery is finished.
+             */
+            try {
+                RecoveryManager recoveryManager = new RecoveryManager(this);
+                lastRecoveryInfo =
+                    recoveryManager.recover(isReadOnly, replicationIntended);
+            } finally {
+                try {
+                    /* Flush to get all exception tracing out to the log.*/
+                    logManager.flush();
+                    fileManager.clear();
+                } catch (IOException e) {
+                    throw new DatabaseException(e.getMessage());
+                }
+            }
+
+            /*
+             * Initialize the environment memory usage number. Must be called
+             * after recovery, because recovery determines the starting size of
+             * the in-memory tree.
+             */
+            memoryBudget.initCacheMemoryUsage(dbMapTree.getTreeAdminMemory());
+
+            /*
+             * Call config observer and start daemons after the memory budget
+             * is initialized. Note that all config parameters, both mutable
+             * and non-mutable, needed by the memoryBudget have already been
+             * initialized when the configManager was instantiated.
+             */
+            envConfigUpdate(configManager,
+                            configManager.getEnvironmentConfig());
+
+            /* Mark as open. */
+            open();
+        } catch (DatabaseException e) {
+
+            /* Release any environment locks if there was a problem. */
+            if (fileManager != null) {
+                try {
+
+                    /*
+                     * Clear again, in case an exception in logManager.flush()
+                     * caused us to skip the earlier call to clear().
+                     */
+                    fileManager.clear();
+                    fileManager.close();
+                } catch (IOException IOE) {
+
+                    /*
+                     * Klockwork - ok
+                     * Eat it, we want to throw the original exception.
+                     */
+                }
+            }
+            throw e;
+        }
+    }
+
+    /**
+     * Respond to config updates.
+     */
+    public void envConfigUpdate(DbConfigManager mgr,
+                                EnvironmentMutableConfig newConfig)
+        throws DatabaseException {
+
+        backgroundReadLimit = mgr.getInt
+            (EnvironmentParams.ENV_BACKGROUND_READ_LIMIT);
+        backgroundWriteLimit = mgr.getInt
+            (EnvironmentParams.ENV_BACKGROUND_WRITE_LIMIT);
+        backgroundSleepInterval = PropUtil.microsToMillis(mgr.getLong
+            (EnvironmentParams.ENV_BACKGROUND_SLEEP_INTERVAL));
+        lockoutTimeout = mgr.getInt
+            (EnvironmentParams.ENV_LOCKOUT_TIMEOUT);
+
+        exceptionListener = newConfig.getExceptionListener();
+        inCompressor.setExceptionListener(exceptionListener);
+        cleaner.setExceptionListener(exceptionListener);
+        checkpointer.setExceptionListener(exceptionListener);
+        evictor.setExceptionListener(exceptionListener);
+
+        /* Start daemons last, after all other parameters are set. */
+        runOrPauseDaemons(mgr);
+    }
+
+    /**
+     * Read configurations for daemons, instantiate.
+     */
+    private void createDaemons(EnvironmentImpl sharedCacheEnv)
+        throws DatabaseException  {
+
+        /* Evictor */
+        if (sharedCacheEnv != null) {
+            assert sharedCache;
+            evictor = sharedCacheEnv.evictor;
+        } else if (sharedCache) {
+            evictor = new SharedEvictor(this, "SharedEvictor");
+        } else {
+            evictor = new PrivateEvictor(this, "Evictor");
+        }
+
+        /* Checkpointer */
+
+        /*
+         * Make sure that either log-size-based or time-based checkpointing
+         * is enabled.
+         */
+        long checkpointerWakeupTime =
+            Checkpointer.getWakeupPeriod(configManager);
+        checkpointer = new Checkpointer(this,
+                                        checkpointerWakeupTime,
+                                        Environment.CHECKPOINTER_NAME);
+
+        /* INCompressor */
+        long compressorWakeupInterval =
+            PropUtil.microsToMillis
+            (configManager.getLong
+             (EnvironmentParams.COMPRESSOR_WAKEUP_INTERVAL));
+        inCompressor = new INCompressor(this, compressorWakeupInterval,
+                                        Environment.INCOMP_NAME);
+
+        /* The cleaner is not time-based so no wakeup interval is used. */
+        cleaner = new Cleaner(this, Environment.CLEANER_NAME);
+    }
+
+    /**
+     * Run or pause daemons, depending on config properties.
+     */
+    private void runOrPauseDaemons(DbConfigManager mgr)
+        throws DatabaseException {
+
+        if (!isReadOnly) {
+            /* INCompressor */
+            inCompressor.runOrPause
+                (mgr.getBoolean(EnvironmentParams.ENV_RUN_INCOMPRESSOR));
+
+            /* Cleaner. Do not start it if running in-memory  */
+            cleaner.runOrPause
+                (mgr.getBoolean(EnvironmentParams.ENV_RUN_CLEANER) &&
+                 !isMemOnly);
+
+            /*
+             * Checkpointer. Run in both transactional and non-transactional
+             * environments to guarantee recovery time.
+             */
+            checkpointer.runOrPause
+                (mgr.getBoolean(EnvironmentParams.ENV_RUN_CHECKPOINTER));
+        }
+
+        /* Evictor */
+        evictor.runOrPause
+            (mgr.getBoolean(EnvironmentParams.ENV_RUN_EVICTOR));
+    }
+
+    /**
+     * Return the incompressor. In general, don't use this directly because
+     * it's easy to forget that the incompressor can be null at times (i.e
+     * during the shutdown procedure. Instead, wrap the functionality within
+     * this class, like lazyCompress.
+     */
+    public INCompressor getINCompressor() {
+        return inCompressor;
+    }
+
+    /**
+     * Returns the UtilizationTracker.
+     */
+    public UtilizationTracker getUtilizationTracker() {
+        return cleaner.getUtilizationTracker();
+    }
+
+    /**
+     * Returns the UtilizationProfile.
+     */
+    public UtilizationProfile getUtilizationProfile() {
+        return cleaner.getUtilizationProfile();
+    }
+
+    /**
+     * If a background read limit has been configured and that limit is
+     * exceeded when the cumulative total is incremented by the given number of
+     * reads, increment the sleep backlog to cause a sleep to occur.  Called by
+     * background activities such as the cleaner after performing a file read
+     * operation.
+     *
+     * @see #sleepAfterBackgroundIO
+     */
+    public void updateBackgroundReads(int nReads) {
+
+        /*
+         * Make a copy of the volatile limit field since it could change
+         * between the time we check it and the time we use it below.
+         */
+        int limit = backgroundReadLimit;
+        if (limit > 0) {
+            synchronized (backgroundTrackingMutex) {
+                backgroundReadCount += nReads;
+                if (backgroundReadCount >= limit) {
+                    backgroundSleepBacklog += 1;
+                    /* Remainder is rolled forward. */
+                    backgroundReadCount -= limit;
+                    assert backgroundReadCount >= 0;
+                }
+            }
+        }
+    }
+
+    /**
+     * If a background write limit has been configured and that limit is
+     * exceeded when the given amount written is added to the cumulative total,
+     * increment the sleep backlog to cause a sleep to occur.  Called by
+     * background activities such as the checkpointer and evictor after
+     * performing a file write operation.
+     *
+     * <p>The number of writes is estimated by dividing the bytes written by
+     * the log buffer size.  Since the log write buffer is shared by all
+     * writers, this is the best approximation possible.</p>
+     *
+     * @see #sleepAfterBackgroundIO
+     */
+    public void updateBackgroundWrites(int writeSize, int logBufferSize) {
+
+        /*
+         * Make a copy of the volatile limit field since it could change
+         * between the time we check it and the time we use it below.
+         */
+        int limit = backgroundWriteLimit;
+        if (limit > 0) {
+            synchronized (backgroundTrackingMutex) {
+                backgroundWriteBytes += writeSize;
+                int writeCount = (int) (backgroundWriteBytes / logBufferSize);
+                if (writeCount >= limit) {
+                    backgroundSleepBacklog += 1;
+                    /* Remainder is rolled forward. */
+                    backgroundWriteBytes -= (limit * logBufferSize);
+                    assert backgroundWriteBytes >= 0;
+                }
+            }
+        }
+    }
+
+    /**
+     * If the sleep backlog is non-zero (set by updateBackgroundReads or
+     * updateBackgroundWrites), sleep for the configured interval and decrement
+     * the backlog.
+     *
+     * <p>If two threads call this method and the first call causes a sleep,
+     * the call by the second thread will block until the first thread's sleep
+     * interval is over.  When the call by the second thread is unblocked, if
+     * another sleep is needed then the second thread will sleep again.  In
+     * other words, when lots of sleeps are needed, background threads may
+     * backup.  This is intended to give foreground threads a chance to "catch
+     * up" when background threads are doing a lot of IO.</p>
+     */
+    public void sleepAfterBackgroundIO() {
+        if (backgroundSleepBacklog > 0) {
+            synchronized (backgroundSleepMutex) {
+                /* Sleep. Rethrow interrupts if they occur. */
+                try {
+                    /* FindBugs: OK that we're sleeping with a mutex held. */
+                    Thread.sleep(backgroundSleepInterval);
+                } catch (InterruptedException e) {
+                    Thread.currentThread().interrupt();
+                }
+                /* Assert has intentional side effect for unit testing. */
+                assert TestHookExecute.doHookIfSet(backgroundSleepHook);
+            }
+            synchronized (backgroundTrackingMutex) {
+                /* Decrement backlog last to make other threads wait. */
+                if (backgroundSleepBacklog > 0) {
+                    backgroundSleepBacklog -= 1;
+                }
+            }
+        }
+    }
+
+    /* For unit testing only. */
+    public void setBackgroundSleepHook(TestHook hook) {
+        backgroundSleepHook = hook;
+    }
+
+    /*
+     * Initialize the API Lock.
+     */
+    public void setupAPILock()
+        throws DatabaseException {
+
+        if (isNoLocking()) {
+            throw new UnsupportedOperationException
+                ("Attempting to init the apiLock with locking disabled.");
+        }
+
+        apiLockNodeId = Long.valueOf(nodeSequence.getNextTransientNodeId());
+        apiWriteLocker =
+            BasicLocker.createBasicLocker(this, false /*noWait*/,
+                                          true /*noAPIReadLock*/);
+    }
+
+    /*
+     * Lock the API by grabbing the apiLock's write lock.
+     *
+     * @param time the amount of time to wait while attempting to acquire
+     * the api write lock.
+     *
+     * @param units the units of the time param.
+     *
+     * @throws DatabaseException if the lock is not acquired within the timeout
+     * period.
+     */
+    public void acquireAPIWriteLock(int time, TimeUnit units)
+        throws DatabaseException {
+
+        /* Shouldn't be calling this if the api lock is not init'd yet. */
+        if (apiLockNodeId == null) {
+            throw new UnsupportedOperationException
+                ("Attempting to acquireAPIWriteLock, but the API Lock is " +
+                 "not initialized yet.  Call EnvironmentImpl.setupAPILock().");
+        }
+
+        try {
+            LockResult lr = apiWriteLocker.lock(apiLockNodeId,
+                                                LockType.WRITE,
+                                                false,
+                                                null);
+            LockGrantType grant = lr.getLockGrant();
+
+            if (grant == LockGrantType.DENIED) {
+                throw new APILockedException("API Lock timeout");
+            }
+
+            if (grant != LockGrantType.NEW) {
+                throw new IllegalMonitorStateException
+                    ("Write lock was granted, but grant type was " +
+                     grant);
+            }
+        } catch (DeadlockException DE) {
+            throw new APILockedException("API Lock timeout");
+        }
+    }
+
+    /*
+     * Unlock the API by release the apiLock's write lock.
+     */
+    public void releaseAPIWriteLock()
+        throws DatabaseException {
+
+        /* Shouldn't be calling this if the api lock is not init'd yet. */
+        if (apiLockNodeId == null) {
+            throw new UnsupportedOperationException
+                ("Attempting to releaseAPIWriteLock, but the API Lock is " +
+                 "not initialized yet.  Call EnvironmentImpl.setupAPILock().");
+        }
+
+        boolean ret = apiWriteLocker.releaseLock(apiLockNodeId);
+        if (!ret) {
+            throw new IllegalMonitorStateException
+                ("Couldn't release API write lock.");
+        }
+    }
+
+    /*
+     * Acquire a read lock on the apiLock.  Used to indicate that some
+     * transaction or cursor is open.
+     *
+     * @throws DatabaseException if the lock can't be acquired in
+     * lockoutTimeout milliseconds.
+     */
+    public void acquireAPIReadLock(Locker who)
+        throws DatabaseException {
+
+        /* Just return if API Locking is not enabled. */
+        if (apiLockNodeId == null) {
+            return;
+        }
+
+        try {
+            LockResult lr = who.lock(apiLockNodeId,
+                                     LockType.READ,
+                                     false,
+                                     null);
+            LockGrantType grant = lr.getLockGrant();
+
+            if (grant == LockGrantType.DENIED) {
+                throw new APILockedException("API Lock timeout");
+            }
+
+            if (grant != LockGrantType.NEW) {
+                throw new IllegalMonitorStateException
+                    ("Read lock was granted, but grant type was " + grant);
+            }
+        } catch (DeadlockException DE) {
+            throw new APILockedException("API Lock timeout");
+        }
+    }
+
+    public boolean releaseAPIReadLock(Locker who)
+        throws DatabaseException {
+
+        /* Just return if API Locking not enabled. */
+        if (apiLockNodeId == null) {
+            return false;
+        }
+
+        return who.releaseLock(apiLockNodeId);
+    }
+
+    public boolean scanLog(long startPosition,
+                           long endPosition,
+                           LogScanConfig config,
+                           LogScanner scanner)
+        throws DatabaseException {
+
+        try {
+            DbConfigManager cm = getConfigManager();
+            int readBufferSize =
+                cm.getInt(EnvironmentParams.LOG_ITERATOR_READ_SIZE);
+            long endOfLogLsn = fileManager.getNextLsn();
+            boolean forwards = config.getForwards();
+            LNFileReader reader = null;
+            if (forwards) {
+                if (endPosition > endOfLogLsn) {
+                    throw new IllegalArgumentException
+                        ("endPosition (" + endPosition +
+                         ") is past the end of the log on a forewards scan.");
+                }
+                reader = new LNFileReader(this,
+                                          readBufferSize,
+                                          startPosition,  /*startLsn*/
+                                          true,           /*forwards*/
+                                          endPosition,    /*endOfFileLsn*/
+                                          DbLsn.NULL_LSN, /*finishLsn*/
+                                          null,           /*singleFileNum*/
+                                          DbLsn.NULL_LSN);/*ckptEnd*/
+            } else {
+                if (startPosition > endOfLogLsn) {
+                    throw new IllegalArgumentException
+                        ("startPosition (" + startPosition +
+                         ") is past the end of the log on a backwards scan.");
+                }
+                reader = new LNFileReader(this,
+                                          readBufferSize,
+                                          startPosition,  /*startLsn*/
+                                          false,          /*forwards*/
+                                          endOfLogLsn,    /*endOfFileLsn*/
+                                          endPosition,    /*finishLsn*/
+                                          null,           /*singleFileNum*/
+                                          DbLsn.NULL_LSN);/*ckptEnd*/
+            }
+            reader.addTargetType(LogEntryType.LOG_LN_TRANSACTIONAL);
+            reader.addTargetType(LogEntryType.LOG_LN);
+            reader.addTargetType(LogEntryType.LOG_DEL_DUPLN_TRANSACTIONAL);
+            reader.addTargetType(LogEntryType.LOG_DEL_DUPLN);
+
+            Map<DatabaseId,String> dbNameMap = dbMapTree.getDbNamesAndIds();
+
+            while (reader.readNextEntry()) {
+                if (reader.isLN()) {
+                    LN theLN = reader.getLN();
+                    byte[] theKey = reader.getKey();
+
+                    DatabaseId dbId = reader.getDatabaseId();
+                    String dbName = dbNameMap.get(dbId);
+                    if (DbTree.isReservedDbName(dbName)) {
+                        continue;
+                    }
+
+                    boolean continueScanning =
+                        scanner.scanRecord(new DatabaseEntry(theKey),
+                                           new DatabaseEntry(theLN.getData()),
+                                           theLN.isDeleted(),
+                                           dbName);
+                    if (!continueScanning) {
+                        break;
+                    }
+                }
+            }
+
+            return true;
+        } catch (IOException IOE) {
+            throw new DatabaseException(IOE);
+        }
+    }
+
+    /**
+     * Logs the map tree root and saves the LSN.
+     */
+    public void logMapTreeRoot()
+        throws DatabaseException {
+
+        logMapTreeRoot(DbLsn.NULL_LSN);
+    }
+
+    /**
+     * Logs the map tree root, but only if its current LSN is before the
+     * ifBeforeLsn parameter or ifBeforeLsn is NULL_LSN.
+     */
+    public void logMapTreeRoot(long ifBeforeLsn)
+        throws DatabaseException {
+
+        mapTreeRootLatch.acquire();
+        try {
+            if (ifBeforeLsn == DbLsn.NULL_LSN ||
+                DbLsn.compareTo(mapTreeRootLsn, ifBeforeLsn) < 0) {
+                mapTreeRootLsn = logManager.log
+                    (new SingleItemEntry(LogEntryType.LOG_ROOT,
+                                         dbMapTree),
+                     ReplicationContext.NO_REPLICATE);
+            }
+        } finally {
+            mapTreeRootLatch.release();
+        }
+    }
+
+    /**
+     * Force a rewrite of the map tree root if required.
+     */
+    public void rewriteMapTreeRoot(long cleanerTargetLsn)
+        throws DatabaseException {
+
+        mapTreeRootLatch.acquire();
+        try {
+            if (DbLsn.compareTo(cleanerTargetLsn, mapTreeRootLsn) == 0) {
+
+                /*
+                 * The root entry targetted for cleaning is in use.  Write a
+                 * new copy.
+                 */
+                mapTreeRootLsn = logManager.log
+                    (new SingleItemEntry(LogEntryType.LOG_ROOT,
+                                         dbMapTree),
+                     ReplicationContext.NO_REPLICATE);
+            }
+        } finally {
+            mapTreeRootLatch.release();
+        }
+    }
+
+    /**
+     * @return the mapping tree root LSN.
+     */
+    public long getRootLsn() {
+        return mapTreeRootLsn;
+    }
+
+    /**
+     * Set the mapping tree from the log. Called during recovery.
+     */
+    public void readMapTreeFromLog(long rootLsn, boolean replicationIntended)
+        throws DatabaseException {
+
+        if (dbMapTree != null) {
+            dbMapTree.close();
+        }
+        dbMapTree = (DbTree) logManager.get(rootLsn);
+        dbMapTree.initExistingEnvironment(this, replicationIntended);
+
+        /* Set the map tree root */
+        mapTreeRootLatch.acquire();
+        try {
+            mapTreeRootLsn = rootLsn;
+        } finally {
+            mapTreeRootLatch.release();
+        }
+    }
+
+    /**
+     * Tells the asynchronous IN compressor thread about a BIN with a deleted
+     * entry.
+     */
+    public void addToCompressorQueue(BIN bin,
+                                     Key deletedKey,
+                                     boolean doWakeup)
+        throws DatabaseException {
+
+        /*
+         * May be called by the cleaner on its last cycle, after the compressor
+         * is shut down.
+         */
+        if (inCompressor != null) {
+            inCompressor.addBinKeyToQueue(bin, deletedKey, doWakeup);
+        }
+    }
+
+    /**
+     * Tells the asynchronous IN compressor thread about a BINReference with a
+     * deleted entry.
+     */
+    public void addToCompressorQueue(BINReference binRef,
+                                     boolean doWakeup)
+        throws DatabaseException {
+
+        /*
+         * May be called by the cleaner on its last cycle, after the compressor
+         * is shut down.
+         */
+        if (inCompressor != null) {
+            inCompressor.addBinRefToQueue(binRef, doWakeup);
+        }
+    }
+
+    /**
+     * Tells the asynchronous IN compressor thread about a collections of
+     * BINReferences with deleted entries.
+     */
+    public void addToCompressorQueue(Collection<BINReference> binRefs,
+                                     boolean doWakeup)
+        throws DatabaseException {
+
+        /*
+         * May be called by the cleaner on its last cycle, after the compressor
+         * is shut down.
+         */
+        if (inCompressor != null) {
+            inCompressor.addMultipleBinRefsToQueue(binRefs, doWakeup);
+        }
+    }
+
+    /**
+     * Do lazy compression at opportune moments.
+     */
+    public void lazyCompress(IN in, LocalUtilizationTracker localTracker)
+        throws DatabaseException {
+
+        /*
+         * May be called by the cleaner on its last cycle, after the compressor
+         * is shut down.
+         */
+        if (inCompressor != null) {
+            inCompressor.lazyCompress(in, localTracker);
+        }
+    }
+
+    /**
+     * Initialize the debugging logging system. Note that publishing to the
+     * database log is not permitted until we've initialized the file manager
+     * in recovery. We can't log safely before that.
+     */
+    private Logger initLogger(File envHome)
+        throws DatabaseException {
+
+        Logger logger = Logger.getAnonymousLogger();
+
+        /*
+         * Disable handlers inherited from parents, we want JE to control its
+         * own behavior. Add our handlers based on configuration
+         */
+        logger.setUseParentHandlers(false);
+
+        /* Set the logging level. */
+        Level level =
+            Tracer.parseLevel(this, EnvironmentParams.JE_LOGGING_LEVEL);
+        logger.setLevel(level);
+
+        /* Log to console. */
+        if (configManager.getBoolean(EnvironmentParams.JE_LOGGING_CONSOLE)) {
+            Handler consoleHandler = new ConsoleHandler();
+            consoleHandler.setFormatter(new TracerFormatter());
+            consoleHandler.setLevel(level);
+            logger.addHandler(consoleHandler);
+        }
+
+        /* Log to text file. */
+        Handler fileHandler = null;
+        try {
+            if (configManager.getBoolean(EnvironmentParams.JE_LOGGING_FILE)) {
+
+                /* Log with a rotating set of files, use append mode. */
+                int limit =
+                    configManager.getInt(EnvironmentParams.
+                                         JE_LOGGING_FILE_LIMIT);
+                int count =
+                    configManager.getInt(EnvironmentParams.
+                                         JE_LOGGING_FILE_COUNT);
+                String logFilePattern = envHome + "/" + Tracer.INFO_FILES;
+
+                fileHandler = new FileHandler(logFilePattern,
+                                              limit, count, true);
+                fileHandler.setFormatter(new TracerFormatter());
+                fileHandler.setLevel(level);
+                logger.addHandler(fileHandler);
+            }
+        } catch (IOException e) {
+            throw new DatabaseException(e.getMessage());
+        }
+
+        return logger;
+    }
+
+    /**
+     * Add the database log as one of the debug logging destinations when the
+     * logging system is sufficiently initialized.
+     */
+    public void enableDebugLoggingToDbLog()
+        throws DatabaseException {
+
+        if (configManager.getBoolean(EnvironmentParams.JE_LOGGING_DBLOG)) {
+            Handler dbLogHandler = new TraceLogHandler(this);
+            Level level =
+                Level.parse(configManager.get(EnvironmentParams.
+                                              JE_LOGGING_LEVEL));
+            dbLogHandler.setLevel(level);
+            dbLogHandler.setFormatter(new TracerFormatter());
+            envLogger.addHandler(dbLogHandler);
+        }
+    }
+
+    /**
+     * Close down the logger.
+     */
+    public void closeLogger() {
+        Handler[] handlers = envLogger.getHandlers();
+        for (int i = 0; i < handlers.length; i++) {
+            handlers[i].close();
+        }
+    }
+
+    /**
+     * Not much to do, mark state.
+     */
+    public void open() {
+        envState = DbEnvState.OPEN;
+    }
+
+    /**
+     * Invalidate the environment. Done when a fatal exception
+     * (RunRecoveryException) is thrown.
+     */
+    public void invalidate(RunRecoveryException e) {
+
+        /*
+         * Remember the fatal exception so we can redisplay it if the
+         * environment is called by the application again. Set some state in
+         * the exception so the exception message will be clear that this was
+         * an earlier exception.
+         */
+        savedInvalidatingException = e;
+        envState = DbEnvState.INVALID;
+        requestShutdownDaemons();
+    }
+
+    public void invalidate(Error e) {
+        if (SAVED_RRE.getCause() == null) {
+            savedInvalidatingException = (RunRecoveryException)
+                SAVED_RRE.initCause(e);
+            envState = DbEnvState.INVALID;
+            requestShutdownDaemons();
+        }
+    }
+
+    /**
+     * @return true if environment is open.
+     */
+    public boolean isOpen() {
+        return (envState == DbEnvState.OPEN);
+    }
+
+    /**
+     * @return true if close has begun, although the state may still be open.
+     */
+    public boolean isClosing() {
+        return closing;
+    }
+
+    public boolean isClosed() {
+        return (envState == DbEnvState.CLOSED);
+    }
+
+    /**
+     * When a RunRecoveryException occurs or the environment is closed, further
+     * writing can cause log corruption.
+     */
+    public boolean mayNotWrite() {
+        return (envState == DbEnvState.INVALID) ||
+               (envState == DbEnvState.CLOSED);
+    }
+
+    public void checkIfInvalid()
+        throws RunRecoveryException {
+
+        if (envState == DbEnvState.INVALID) {
+            savedInvalidatingException.setAlreadyThrown(true);
+            if (savedInvalidatingException == SAVED_RRE) {
+                savedInvalidatingException.fillInStackTrace();
+            }
+            throw savedInvalidatingException;
+        }
+    }
+
+    public void checkNotClosed()
+        throws DatabaseException {
+
+        if (envState == DbEnvState.CLOSED) {
+            throw new DatabaseException
+                ("Attempt to use a Environment that has been closed.");
+        }
+    }
+
+    /**
+     * Decrements the reference count and closes the enviornment when it
+     * reaches zero.  A checkpoint is always performed when closing.
+     */
+    public void close()
+        throws DatabaseException {
+
+        /* Calls doClose while synchronized on DbEnvPool. */
+        DbEnvPool.getInstance().closeEnvironment
+            (this, true /* doCheckpoint */, true /* doCheckLeaks */);
+    }
+
+    /**
+     * Decrements the reference count and closes the environment when it
+     * reaches zero.  A checkpoint when closing is optional.
+     */
+    public void close(boolean doCheckpoint)
+        throws DatabaseException {
+
+        /* Calls doClose while synchronized on DbEnvPool. */
+        DbEnvPool.getInstance().closeEnvironment
+            (this, doCheckpoint, true /* doCheckLeaks */);
+    }
+
+    /**
+     * Used by tests to close an environment to simulate a crash.  Database
+     * handles do not have to be closed before calling this method.  A
+     * checkpoint is not performed.
+     */
+    public void abnormalClose()
+        throws DatabaseException {
+
+        /* Calls doClose while synchronized on DbEnvPool. */
+        DbEnvPool.getInstance().closeEnvironment
+            (this, false /* doCheckpoint */, false /* doCheckLeaks */);
+    }
+
+    /**
+     * Closes the environment, optionally performing a checkpoint and checking
+     * for resource leaks.  This method must be called while synchronized on
+     * DbEnvPool.
+     */
+    synchronized void doClose(boolean doCheckpoint, boolean doCheckLeaks)
+        throws DatabaseException {
+
+        StringWriter errorStringWriter = new StringWriter();
+        PrintWriter errors = new PrintWriter(errorStringWriter);
+
+        try {
+            Tracer.trace(Level.FINE, this,
+                         "Close of environment " +
+                         envHome + " started");
+
+            try {
+                envState.checkState(DbEnvState.VALID_FOR_CLOSE,
+                                    DbEnvState.CLOSED);
+            } catch (DatabaseException e) {
+                throw e;
+            }
+
+            /*
+             * Begin shutdown of the deamons before checkpointing.  Cleaning
+             * during the checkpoint is wasted and slows down the checkpoint.
+             */
+            requestShutdownDaemons();
+
+            /* Checkpoint to bound recovery time. */
+            if (doCheckpoint &&
+                !isReadOnly &&
+                (envState != DbEnvState.INVALID) &&
+                logManager.getLastLsnAtRecovery() !=
+                fileManager.getLastUsedLsn()) {
+
+                /*
+                 * Force a checkpoint. Don't allow deltas (minimize recovery
+                 * time) because they cause inefficiencies for two reasons: (1)
+                 * recovering BINDeltas causes extra random I/O in order to
+                 * reconstitute BINS, which can greatly increase recovery time,
+                 * and (2) logging deltas during close causes redundant logging
+                 * by the full checkpoint after recovery.
+                 */
+                CheckpointConfig ckptConfig = new CheckpointConfig();
+                ckptConfig.setForce(true);
+                ckptConfig.setMinimizeRecoveryTime(true);
+                try {
+                    invokeCheckpoint
+                        (ckptConfig,
+                         false, // flushAll
+                         "close");
+                } catch (DatabaseException e) {
+                    errors.append("\nException performing checkpoint: ");
+                    e.printStackTrace(errors);
+                    errors.println();
+                }
+            }
+
+            /* Flush log. */
+            Tracer.trace(Level.FINE, this,
+                         "About to shutdown daemons for Env " + envHome);
+            try {
+                shutdownDaemons();
+            } catch (InterruptedException e) {
+                errors.append("\nException shutting down daemon threads: ");
+                e.printStackTrace(errors);
+                errors.println();
+            }
+
+            try {
+                logManager.flush();
+            } catch (DatabaseException e) {
+                errors.append("\nException flushing log manager: ");
+                e.printStackTrace(errors);
+                errors.println();
+            }
+
+            try {
+                fileManager.clear();
+            } catch (IOException e) {
+                errors.append("\nException clearing file manager: ");
+                e.printStackTrace(errors);
+                errors.println();
+            } catch (DatabaseException e) {
+                errors.append("\nException clearing file manager: ");
+                e.printStackTrace(errors);
+                errors.println();
+            }
+
+            try {
+                fileManager.close();
+            } catch (IOException e) {
+                errors.append("\nException closing file manager: ");
+                e.printStackTrace(errors);
+                errors.println();
+            } catch (DatabaseException e) {
+                errors.append("\nException closing file manager: ");
+                e.printStackTrace(errors);
+                errors.println();
+            }
+
+            /*
+             * Close the memory budgets on these components before the
+             * INList is forcibly released and the treeAdmin budget is
+             * cleared.
+             */
+            dbMapTree.close();
+            cleaner.close();
+
+            try {
+                inMemoryINs.clear();
+            } catch (DatabaseException e) {
+                errors.append("\nException clearing the INList: ");
+                e.printStackTrace(errors);
+                errors.println();
+            }
+
+            closeLogger();
+
+            if (doCheckLeaks &&
+                (envState != DbEnvState.INVALID)) {
+
+                try {
+                    checkLeaks();
+                } catch (DatabaseException e) {
+                    errors.append("\nException performing validity checks: ");
+                    e.printStackTrace(errors);
+                    errors.println();
+                }
+            }
+        } finally {
+            envState = DbEnvState.CLOSED;
+        }
+
+        if (errorStringWriter.getBuffer().length() > 0 &&
+            savedInvalidatingException == null) {
+
+            /* Don't whine again if we've already whined. */
+            throw new RunRecoveryException(this, errorStringWriter.toString());
+        }
+    }
+
+    /*
+     * Clear as many resources as possible, even in the face of an environment
+     * that has received a fatal error, in order to support reopening the
+     * environment in the same JVM.
+     */
+    public void closeAfterRunRecovery()
+        throws DatabaseException {
+
+        /* Calls doCloseAfterRunRecovery while synchronized on DbEnvPool. */
+        DbEnvPool.getInstance().closeEnvironmentAfterRunRecovery(this);
+    }
+
+    /**
+     * This method must be called while synchronized on DbEnvPool.
+     */
+    synchronized void doCloseAfterRunRecovery() {
+        try {
+            shutdownDaemons();
+        } catch (InterruptedException IE) {
+            /* Klockwork - ok */
+        }
+
+        try {
+            fileManager.clear();
+        } catch (Exception e) {
+            /* Klockwork - ok */
+        }
+
+        try {
+            fileManager.close();
+        } catch (Exception e) {
+            /* Klockwork - ok */
+        }
+    }
+
+    synchronized void incReferenceCount() {
+        referenceCount++;
+    }
+
+    /**
+     * Returns true if the environment should be closed.
+     */
+    synchronized boolean decReferenceCount() {
+        return (--referenceCount <= 0);
+    }
+
+    public static int getThreadLocalReferenceCount() {
+        return threadLocalReferenceCount;
+    }
+
+    static synchronized void incThreadLocalReferenceCount() {
+        threadLocalReferenceCount++;
+    }
+
+    static synchronized void decThreadLocalReferenceCount() {
+        threadLocalReferenceCount--;
+    }
+
+    public static boolean getNoComparators() {
+        return noComparators;
+    }
+
+    /**
+     * Debugging support. Check for leaked locks and transactions.
+     */
+    private void checkLeaks()
+        throws DatabaseException {
+
+        /* Only enabled if this check leak flag is true. */
+        if (!configManager.getBoolean(EnvironmentParams.ENV_CHECK_LEAKS)) {
+            return;
+        }
+
+        boolean clean = true;
+        StatsConfig statsConfig = new StatsConfig();
+
+        /* Fast stats will not return NTotalLocks below. */
+        statsConfig.setFast(false);
+
+        LockStats lockStat = lockStat(statsConfig);
+        if (lockStat.getNTotalLocks() != 0) {
+            clean = false;
+            System.err.println("Problem: " + lockStat.getNTotalLocks() +
+                               " locks left");
+            txnManager.getLockManager().dump();
+        }
+
+        TransactionStats txnStat = txnStat(statsConfig);
+        if (txnStat.getNActive() != 0) {
+            clean = false;
+            System.err.println("Problem: " + txnStat.getNActive() +
+                               " txns left");
+            TransactionStats.Active[] active = txnStat.getActiveTxns();
+            if (active != null) {
+                for (int i = 0; i < active.length; i += 1) {
+                    System.err.println(active[i]);
+                }
+            }
+        }
+
+        if (LatchSupport.countLatchesHeld() > 0) {
+            clean = false;
+            System.err.println("Some latches held at env close.");
+            LatchSupport.dumpLatchesHeld();
+        }
+
+        long memoryUsage = memoryBudget.getVariableCacheUsage();
+        if (memoryUsage != 0) {
+            clean = false;
+            System.err.println("Local Cache Usage = " +  memoryUsage);
+            System.err.println("Tree Memory Usage = " +
+                               memoryBudget.getTreeMemoryUsage());
+            System.err.println("Admin Memory Usage = " +
+                               memoryBudget.getAdminMemoryUsage());
+            System.err.println("Tree Admin Memory Usage = " +
+                               memoryBudget.getTreeAdminMemoryUsage());
+            System.err.println("Lock Memory Usage = " +
+                               memoryBudget.getLockMemoryUsage());
+            EnvironmentStats memoryStats = new EnvironmentStats();
+            memoryBudget.loadStats(new StatsConfig(),
+                                   memoryStats);
+            System.err.println(memoryStats);
+        }
+
+        boolean assertionsEnabled = false;
+        assert assertionsEnabled = true; // Intentional side effect.
+        if (!clean && assertionsEnabled) {
+            throw new DatabaseException("Lock, transaction, latch or memory " +
+                                        "left behind at environment close");
+        }
+    }
+
+    /**
+     * Invoke a checkpoint programatically. Note that only one checkpoint may
+     * run at a time.
+     */
+    public boolean invokeCheckpoint(CheckpointConfig config,
+                                    boolean flushAll,
+                                    String invokingSource)
+        throws DatabaseException {
+
+        if (checkpointer != null) {
+            checkpointer.doCheckpoint(config, flushAll, invokingSource);
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    /**
+     * Flip the log to a new file, forcing an fsync.  Return the LSN of the
+     * trace record in the new file.
+     */
+    public long forceLogFileFlip()
+        throws DatabaseException {
+
+        return logManager.logForceFlip(
+                      new SingleItemEntry(LogEntryType.LOG_TRACE,
+                                          new Tracer("File Flip")));
+    }
+
+    /**
+     * Invoke a compress programatically. Note that only one compress may run
+     * at a time.
+     */
+    public boolean invokeCompressor()
+        throws DatabaseException {
+
+        if (inCompressor != null) {
+            inCompressor.doCompress();
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    public void invokeEvictor()
+        throws DatabaseException {
+
+        if (evictor != null) {
+            evictor.doEvict(Evictor.SOURCE_MANUAL);
+        }
+    }
+
+    public int invokeCleaner()
+        throws DatabaseException {
+
+        if (isReadOnly || isMemOnly) {
+            throw new IllegalStateException
+                ("Log cleaning not allowed in a read-only or memory-only " +
+                 "environment");
+        }
+        if (cleaner != null) {
+            return cleaner.doClean(true,   // cleanMultipleFiles
+                                   false); // forceCleaning
+        } else {
+            return 0;
+        }
+    }
+
+    private void requestShutdownDaemons() {
+
+        closing = true;
+
+        if (inCompressor != null) {
+            inCompressor.requestShutdown();
+        }
+
+        /*
+         * Don't shutdown the shared cache evictor here.  It is shutdown when
+         * the last shared cache environment is removed in DbEnvPool.
+         */
+        if (evictor != null && !sharedCache) {
+            evictor.requestShutdown();
+        }
+
+        if (checkpointer != null) {
+            checkpointer.requestShutdown();
+        }
+
+        if (cleaner != null) {
+            cleaner.requestShutdown();
+        }
+    }
+
+    /**
+     * For unit testing -- shuts down daemons completely but leaves environment
+     * usable since environment references are not nulled out.
+     */
+    public void stopDaemons()
+        throws InterruptedException {
+
+        if (inCompressor != null) {
+            inCompressor.shutdown();
+        }
+        if (evictor != null) {
+            evictor.shutdown();
+        }
+        if (checkpointer != null) {
+            checkpointer.shutdown();
+        }
+        if (cleaner != null) {
+            cleaner.shutdown();
+        }
+    }
+
+    /**
+     * Ask all daemon threads to shut down.  Is public for unit testing.
+     */
+    private void shutdownDaemons()
+        throws InterruptedException {
+
+        shutdownINCompressor();
+
+        /*
+         * Cleaner has to be shutdown before checkpointer because former calls
+         * the latter.
+         */
+        shutdownCleaner();
+        shutdownCheckpointer();
+
+        /*
+         * The evictor has to get shutdown last because the other daemons might
+         * create changes to the memory usage which result in a notify to
+         * eviction.
+         */
+        shutdownEvictor();
+    }
+
+    void shutdownINCompressor()
+        throws InterruptedException {
+
+        if (inCompressor != null) {
+            inCompressor.shutdown();
+
+            /*
+             * If daemon thread doesn't shutdown for any reason, at least clear
+             * the reference to the environment so it can be GC'd.
+             */
+            inCompressor.clearEnv();
+            inCompressor = null;
+        }
+        return;
+    }
+
+    void shutdownEvictor()
+        throws InterruptedException {
+
+        if (evictor != null) {
+            if (sharedCache) {
+
+                /*
+                 * Don't shutdown the SharedEvictor here.  It is shutdown when
+                 * the last shared cache environment is removed in DbEnvPool.
+                 * Instead, remove this environment from the SharedEvictor's
+                 * list so we won't try to evict from a closing/closed
+                 * environment.  Note that we do this after the final checkpoint
+                 * so that eviction is possible during the checkpoint, and just
+                 * before deconstructing the environment.  Leave the evictor
+                 * field intact so DbEnvPool can get it.
+                 */
+                evictor.removeEnvironment(this);
+            } else {
+                evictor.shutdown();
+
+                /*
+                 * If daemon thread doesn't shutdown for any reason, at least
+                 * clear the reference to the environment so it can be GC'd.
+                 */
+                evictor.clearEnv();
+                evictor = null;
+            }
+        }
+        return;
+    }
+
+    void shutdownCheckpointer()
+        throws InterruptedException {
+
+        if (checkpointer != null) {
+            checkpointer.shutdown();
+
+            /*
+             * If daemon thread doesn't shutdown for any reason, at least clear
+             * the reference to the environment so it can be GC'd.
+             */
+            checkpointer.clearEnv();
+            checkpointer = null;
+        }
+        return;
+    }
+
+    /**
+     * public for unit tests.
+     */
+    public void shutdownCleaner()
+        throws InterruptedException {
+
+        if (cleaner != null) {
+            cleaner.shutdown();
+
+            /*
+             * Don't call clearEnv -- Cleaner.shutdown does this for each
+             * cleaner thread.  Don't set the cleaner field to null because we
+             * use it to get the utilization profile and tracker.
+             */
+        }
+        return;
+    }
+
+    public boolean isNoLocking() {
+        return isNoLocking;
+    }
+
+    public boolean isTransactional() {
+        return isTransactional;
+    }
+
+    public boolean isReadOnly() {
+        return isReadOnly;
+    }
+
+    public boolean isMemOnly() {
+        return isMemOnly;
+    }
+
+    /*
+     * FUTURE: change this to be non-static. It's static now just to avoid
+     * passing down parameters in various places.
+     */
+    public static boolean getFairLatches() {
+        return fairLatches;
+    }
+
+    public static boolean getSharedLatches() {
+        return useSharedLatchesForINs;
+    }
+
+    /**
+     * Returns whether DB/MapLN eviction is enabled.
+     */
+    public boolean getDbEviction() {
+        return dbEviction;
+    }
+
+    public static int getAdler32ChunkSize() {
+        return adler32ChunkSize;
+    }
+
+    public boolean getSharedCache() {
+        return sharedCache;
+    }
+
+    /**
+     * Transactional services.
+     */
+    public Txn txnBegin(Transaction parent, TransactionConfig txnConfig)
+        throws DatabaseException {
+
+        if (!isTransactional) {
+            throw new UnsupportedOperationException
+                ("beginTransaction called, " +
+                                        " but Environment was not opened "+
+                 "with transactional capabilities");
+        }
+
+        return txnManager.txnBegin(parent, txnConfig);
+    }
+
+    /* Services. */
+    public LogManager getLogManager() {
+        return logManager;
+    }
+
+    public FileManager getFileManager() {
+        return fileManager;
+    }
+
+    public DbTree getDbTree() {
+        return dbMapTree;
+    }
+
+    /**
+     * Returns the config manager for the current base configuration.
+     *
+     * <p>The configuration can change, but changes are made by replacing the
+     * config manager object with a enw one.  To use a consistent set of
+     * properties, call this method once and query the returned manager
+     * repeatedly for each property, rather than getting the config manager via
+     * this method for each property individually.</p>
+     */
+    public DbConfigManager getConfigManager() {
+        return configManager;
+    }
+
+    public NodeSequence getNodeSequence() {
+        return nodeSequence;
+    }
+
+    /**
+     * Clones the current configuration.
+     */
+    public EnvironmentConfig cloneConfig() {
+        return DbInternal.cloneConfig(configManager.getEnvironmentConfig());
+    }
+
+    /**
+     * Clones the current mutable configuration.
+     */
+    public EnvironmentMutableConfig cloneMutableConfig() {
+        return DbInternal.cloneMutableConfig
+            (configManager.getEnvironmentConfig());
+    }
+
+    /**
+     * Throws an exception if an immutable property is changed.
+     */
+    public void checkImmutablePropsForEquality(EnvironmentConfig config)
+        throws IllegalArgumentException {
+
+        DbInternal.checkImmutablePropsForEquality
+            (configManager.getEnvironmentConfig(), config);
+    }
+
+    /**
+     * Changes the mutable config properties that are present in the given
+     * config, and notifies all config observer.
+     */
+    public void setMutableConfig(EnvironmentMutableConfig config)
+        throws DatabaseException {
+
+        /* Calls doSetMutableConfig while synchronized on DbEnvPool. */
+        DbEnvPool.getInstance().setMutableConfig(this, config);
+    }
+
+    /**
+     * This method must be called while synchronized on DbEnvPool.
+     */
+    synchronized void doSetMutableConfig(EnvironmentMutableConfig config)
+        throws DatabaseException {
+
+        /* Clone the current config. */
+        EnvironmentConfig newConfig =
+            DbInternal.cloneConfig(configManager.getEnvironmentConfig());
+
+        /* Copy in the mutable props. */
+        DbInternal.copyMutablePropsTo(config, newConfig);
+
+        /*
+         * Update the current config and notify observers.  The config manager
+         * is replaced with a new instance that uses the new configuration.
+         * This avoid synchronization issues: other threads that have a
+         * referenced to the old configuration object are not impacted.
+         *
+         * Notify listeners in reverse order of registration so that the
+         * environment listener is notified last and it can start daemon
+         * threads after they are configured.
+         */
+        configManager = new DbConfigManager(newConfig);
+        for (int i = configObservers.size() - 1; i >= 0; i -= 1) {
+            EnvConfigObserver o = configObservers.get(i);
+            o.envConfigUpdate(configManager, newConfig);
+        }
+    }
+
+    public void setExceptionListener(ExceptionListener exceptionListener) {
+
+        this.exceptionListener = exceptionListener;
+    }
+
+    public ExceptionListener getExceptionListener() {
+        return exceptionListener;
+    }
+
+    /**
+     * Adds an observer of mutable config changes.
+     */
+    public synchronized void addConfigObserver(EnvConfigObserver o) {
+        configObservers.add(o);
+    }
+
+    /**
+     * Removes an observer of mutable config changes.
+     */
+    public synchronized void removeConfigObserver(EnvConfigObserver o) {
+        configObservers.remove(o);
+    }
+
+    public INList getInMemoryINs() {
+        return inMemoryINs;
+    }
+
+    public TxnManager getTxnManager() {
+        return txnManager;
+    }
+
+    public Checkpointer getCheckpointer() {
+        return checkpointer;
+    }
+
+    public Cleaner getCleaner() {
+        return cleaner;
+    }
+
+    public MemoryBudget getMemoryBudget() {
+        return memoryBudget;
+    }
+
+    /**
+     * @return environment Logger, for use in debugging output.
+     */
+    public Logger getLogger() {
+        return envLogger;
+    }
+
+    /*
+     * Verification, must be run while system is quiescent.
+     */
+    public boolean verify(VerifyConfig config, PrintStream out)
+        throws DatabaseException {
+
+        /* For now, verify all databases */
+        return dbMapTree.verify(config, out);
+    }
+
+    public void verifyCursors()
+        throws DatabaseException {
+
+        inCompressor.verifyCursors();
+    }
+
+    /*
+     * Statistics
+     */
+
+    /**
+     * Retrieve and return stat information.
+     */
+    public synchronized EnvironmentStats loadStats(StatsConfig config)
+        throws DatabaseException {
+
+        EnvironmentStats stats = new EnvironmentStats();
+        inCompressor.loadStats(config, stats);
+        evictor.loadStats(config, stats);
+        checkpointer.loadStats(config, stats);
+        cleaner.loadStats(config, stats);
+        logManager.loadStats(config, stats);
+        memoryBudget.loadStats(config, stats);
+        return stats;
+    }
+
+    /**
+     * Retrieve lock statistics
+     */
+    public synchronized LockStats lockStat(StatsConfig config)
+        throws DatabaseException {
+
+        return txnManager.lockStat(config);
+    }
+
+    /**
+     * Retrieve txn statistics
+     */
+    public synchronized TransactionStats txnStat(StatsConfig config)
+        throws DatabaseException {
+
+        return txnManager.txnStat(config);
+    }
+
+    public int getINCompressorQueueSize()
+        throws DatabaseException {
+
+        return inCompressor.getBinRefQueueSize();
+    }
+
+    /**
+     * Info about the last recovery
+     */
+    public RecoveryInfo getLastRecoveryInfo() {
+        return lastRecoveryInfo;
+    }
+
+    /**
+     * Get the environment home directory.
+     */
+    public File getEnvironmentHome() {
+        return envHome;
+    }
+
+    public long getTxnTimeout() {
+        return txnTimeout;
+    }
+
+    public long getLockTimeout() {
+        return lockTimeout;
+    }
+
+    /**
+     * Returns the shared trigger latch.
+     */
+    public SharedLatch getTriggerLatch() {
+        return triggerLatch;
+    }
+
+    public Evictor getEvictor() {
+        return evictor;
+    }
+
+    void alertEvictor() {
+        if (evictor != null) {
+            evictor.alert();
+        }
+    }
+
+    /**
+     * Return true if this environment is part of a replication group.
+     */
+    public boolean isReplicated() {
+        return repInstance != null;
+    }
+
+    public ReplicatorInstance getReplicator() {
+        return repInstance;
+    }
+
+    public void setReplicator(ReplicatorInstance repInstance) {
+        this.repInstance = repInstance;
+    }
+
+    /**
+     * For stress testing.  Should only ever be called from an assert.
+     */
+    public static boolean maybeForceYield() {
+        if (forcedYield) {
+            Thread.yield();
+        }
+        return true;      // so assert doesn't fire
+    }
+}
diff --git a/src/com/sleepycat/je/dbi/GetMode.java b/src/com/sleepycat/je/dbi/GetMode.java
new file mode 100644
index 0000000000000000000000000000000000000000..b0434502c8d7a45798b88578d71f3ab620353e5f
--- /dev/null
+++ b/src/com/sleepycat/je/dbi/GetMode.java
@@ -0,0 +1,39 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: GetMode.java,v 1.11.2.2 2010/01/04 15:30:28 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+/**
+ * Internal class used to distinguish which variety of getXXX() that
+ * Cursor.retrieveNext should use.
+ */
+public class GetMode {
+    private String name;
+    private boolean forward;
+
+    private GetMode(String name, boolean forward) {
+        this.name = name;
+        this.forward = forward;
+    }
+
+    public static final GetMode NEXT =       new GetMode("NEXT", true);
+    public static final GetMode PREV =       new GetMode("PREV", false);
+    public static final GetMode NEXT_DUP =   new GetMode("NEXT_DUP", true);
+    public static final GetMode PREV_DUP =   new GetMode("PREV_DUP", false);
+    public static final GetMode NEXT_NODUP = new GetMode("NEXT_NODUP", true);
+    public static final GetMode PREV_NODUP = new GetMode("PREV_NODUP", false);
+
+    public final boolean isForward() {
+        return forward;
+    }
+
+    @Override
+    public String toString() {
+        return name;
+    }
+}
diff --git a/src/com/sleepycat/je/dbi/INList.java b/src/com/sleepycat/je/dbi/INList.java
new file mode 100644
index 0000000000000000000000000000000000000000..e985fa06157a1cad7b0a2c10e48e015cd9eeb78c
--- /dev/null
+++ b/src/com/sleepycat/je/dbi/INList.java
@@ -0,0 +1,380 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: INList.java,v 1.59.2.2 2010/01/04 15:30:28 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicLong;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.tree.IN;
+
+/**
+ * The INList is a list of in-memory INs for a given environment.
+ */
+public class INList implements Iterable<IN> {
+    private EnvironmentImpl envImpl;
+    private boolean updateMemoryUsage;
+    private boolean enabled;
+    private volatile boolean recalcInProgress;
+    private volatile boolean recalcToggle;
+    private boolean recalcConsistent;
+    private AtomicLong recalcTotal;
+
+    /**
+     * We use a Map of INs because there is no ConcurrentHashSet, only a
+     * ConcurrentHashMap.  But this map is treated as a set of INs with the
+     * same object as the key and the value.
+     */
+    private Map<IN,IN> ins = null;
+
+    INList(EnvironmentImpl envImpl) {
+        init(envImpl);
+        ins = new ConcurrentHashMap<IN,IN>();
+        updateMemoryUsage = true;
+        enabled = false;
+    }
+
+    /**
+     * Used only by tree verifier when validating INList.
+     */
+    public INList(INList orig, EnvironmentImpl envImpl)
+        throws DatabaseException {
+
+        init(envImpl);
+        ins = new ConcurrentHashMap<IN,IN>(orig.ins);
+        updateMemoryUsage = false;
+        enabled = true;
+    }
+
+    private void init(EnvironmentImpl envImpl) {
+        this.envImpl = envImpl;
+        recalcInProgress = false;
+        recalcToggle = false;
+        recalcConsistent = true;
+        recalcTotal = new AtomicLong();
+    }
+
+    /*
+     * Ok to be imprecise.
+     */
+    public int getSize() {
+        return ins.size();
+    }
+
+    public boolean contains(IN in) {
+        return ins.containsKey(in);
+    }
+
+    /**
+     * Enable the INList during recovery.
+     */
+    public void enable() {
+        assert ins.isEmpty();
+        assert !enabled;
+        enabled = true;
+    }
+
+    /**
+     * An IN has just come into memory, add it to the list.
+     */
+    public void add(IN in)
+        throws DatabaseException {
+
+        /* Ignore additions until the INList is enabled. */
+        if (!enabled) {
+            return;
+        }
+
+        envImpl.getEvictor().noteINListChange(1 /*nINs*/);
+
+        IN oldValue  = ins.put(in, in);
+
+        assert oldValue == null : "failed adding IN " + in.getNodeId();
+
+        if (updateMemoryUsage) {
+            long size = in.getBudgetedMemorySize();
+            memRecalcAdd(in, size);
+            envImpl.getMemoryBudget().updateTreeMemoryUsage(size);
+            in.setInListResident(true);
+        }
+    }
+
+    /**
+     * An IN is being evicted.
+     */
+    public void remove(IN in)
+        throws DatabaseException {
+
+        envImpl.getEvictor().noteINListChange(1 /*nINs*/);
+
+        IN oldValue = ins.remove(in);
+        assert oldValue != null;
+
+        if (updateMemoryUsage) {
+            long delta = 0 - in.getBudgetedMemorySize();
+            memRecalcRemove(in, delta);
+            envImpl.getMemoryBudget().updateTreeMemoryUsage(delta);
+            in.setInListResident(false);
+        }
+    }
+
+    /**
+     * Return an iterator over the main 'ins' set.  Returned iterator may or
+     * may not show elements added or removed after the iterator is created.
+     *
+     * @return an iterator over the main 'ins' set.
+     */
+    public Iterator<IN> iterator() {
+        return new Iter();
+    }
+
+    /**
+     * A direct Iterator on the INList may return INs that have been removed,
+     * since the underlying ConcurrentHashMap doesn't block changes to the list
+     * during the iteration.  This Iterator implementation wraps a direct
+     * Iterator and returns only those INs that are on the INList.
+     *
+     * Note that this doesn't guarantee that an IN will not be removed from the
+     * INList after being returned by this iterator.  But filtering out the INs
+     * already removed will avoid wasting effort in the evictor, checkpointer,
+     * and other places where INs are iterated and processed.
+     */
+    private class Iter implements Iterator<IN> {
+
+        private Iterator<IN> baseIter;
+        private IN next;
+        private IN lastReturned;
+
+        private Iter() {
+            baseIter = ins.keySet().iterator();
+        }
+
+        public boolean hasNext() {
+            if (next != null) {
+                return true;
+            } else {
+                return advance();
+            }
+        }
+
+        public IN next() {
+            if (next == null) {
+                if (!advance()) {
+                    throw new NoSuchElementException();
+                }
+            }
+            lastReturned = next;
+            next = null;
+            return lastReturned;
+        }
+
+        private boolean advance() {
+            while (baseIter.hasNext()) {
+                IN in = baseIter.next();
+                if (in.getInListResident()) {
+                    next = in;
+                    return true;
+                }
+            }
+            return false;
+        }
+
+        public void remove() {
+            if (lastReturned != null) {
+                envImpl.getEvictor().noteINListChange(1 /*nINs*/);
+                ins.remove(lastReturned);
+                lastReturned = null;
+            } else {
+                throw new IllegalStateException();
+            }
+        }
+    }
+
+    /**
+     * Clear the entire list at shutdown and release its portion of the memory
+     * budget.
+     */
+    public void clear() 
+        throws DatabaseException {
+
+        if (envImpl.getEvictor() != null) {
+            envImpl.getEvictor().noteINListChange(getSize() /*nINs*/);
+        }
+
+        ins.clear();
+
+        if (updateMemoryUsage) {
+            MemoryBudget mb = envImpl.getMemoryBudget();
+            mb.refreshTreeMemoryUsage(0);
+            mb.refreshTreeAdminMemoryUsage(0);            
+        }
+    }
+
+    public void dump() {
+        System.out.println("size=" + getSize());
+        for (IN theIN : ins.keySet()) {
+            System.out.println("db=" + theIN.getDatabase().getId() +
+                               " nid=: " + theIN.getNodeId() + "/" +
+                               theIN.getLevel());
+        }
+    }
+
+    /*
+     * The following set of memRecalc methods allow an iteration over the
+     * INList to recalculate the tree memory budget.  This is done during a
+     * checkpoint by the DirtyINMap class.
+     *
+     * We flip the INList toggle, recalcToggle, at the beginning of the recalc.
+     * At that point, if recalcConsistent is true, all IN toggles have the
+     * opposite value of recalcToggle.  As we process INs we flip their
+     * toggles.  We can tell whether we have already processed an IN by
+     * comparing its toggle to recalcToggle.  If they are equal, we have
+     * already processed the IN.
+     *
+     * The scenarios below describe how the recalcTotal is updated for a
+     * particular IN.
+     *
+     * Scenario #1: IN size is unchanged during the iteration
+     *  begin
+     *   iterate -- add total IN size, mark processed
+     *  end
+     *
+     * Scenario #2: IN size is updated during the iteration
+     *  begin
+     *   update  -- do not add delta because IN is not yet processed
+     *   iterate -- add total IN size, mark processed
+     *   update  -- do add delta because IN was already processed
+     *  end
+     *
+     * Scenario #3: IN is added during the iteration but not iterated
+     *  begin
+     *   add -- add IN size, mark processed
+     *  end
+     *
+     * Scenario #4: IN is added during the iteration and is iterated
+     *  begin
+     *   add     -- add IN size, mark processed
+     *   iterate -- do not add size because IN was already processed
+     *  end
+     *
+     * Scenario #5: IN is removed during the iteration but not iterated
+     *  begin
+     *   remove  -- do not add delta because IN is not yet processed
+     *  end
+     *
+     * Scenario #6: IN is removed during the iteration and is iterated
+     *  begin
+     *   iterate -- add total IN size, mark processed
+     *   remove  -- add delta because IN was already processed
+     *  end
+     *
+     * If recalcConsistent is false, the last attempted recalc was not
+     * compeleted.  In that case the next reset pass will simply set the toggle
+     * in every IN so that they are consistent.  The pass following that will
+     * then do a normal recalc.  At the end of any pass, we only update the
+     * memory budget if the last recalc was consistent (or this is the first
+     * recalc), and the current recalc is completed.
+     *
+     * We do not synchronize when changing state variables.  In memRecalcBegin
+     * and memRecalcEnd it is possible for an IN to be added or removed by
+     * another thread in the window between settting recalcInProgress and
+     * setting or getting the recalclTotal.  In memRecalcUpdate a similar thing
+     * can happen in the window between checking the IN toggle and adding to
+     * recalcTotal, if memRecaclcIterate is called by the checkpointer in that
+     * window. If this occurs, the reset total can be inaccurate by the amount
+     * that was changed in the window.  We have chosen to live with this
+     * possible inaccuracy rather than synchronize these methods.  We would
+     * have to synchronize every time we add/remove INs and update the size of
+     * an IN, which could introduce a new point of contention.
+     */
+
+    /**
+     * We are starting the iteration of the INList.  Flip the INList toggle
+     * and set the total amount to zero.
+     *
+     * After calling this method, memRecalcEnd must be called in a finally
+     * block.  If it is not called, internal state will be invalid.
+     */
+    public void memRecalcBegin() {
+        recalcTotal.set(0);
+        recalcInProgress = true;
+        recalcToggle = !recalcToggle;
+    }
+
+    /**
+     * An IN was encountered during the iteration through the entire INList.
+     * Add its size to the recalc total if we have not already processed it,
+     * and mark it as processed.  If it was already processed, memRecalcAdd
+     * must have been called for the IN when it was added to the INList during
+     * the iteration.
+     */
+    public void memRecalcIterate(IN in) {
+        assert recalcInProgress;
+        if (recalcConsistent &&
+            recalcToggle != in.getRecalcToggle()) {
+            long delta = in.getBudgetedMemorySize();
+            recalcTotal.addAndGet(delta);
+        }
+        in.setRecalcToggle(recalcToggle);
+    }
+
+    /**
+     * An IN is being added to the INList.  Add its size to the recalc total
+     * and mark it as processed.  It cannot have already been processed since
+     * it is a new IN.
+     */
+    private void memRecalcAdd(IN in, long size) {
+        if (recalcInProgress &&
+            recalcConsistent) {
+            recalcTotal.addAndGet(size);
+        }
+        in.setRecalcToggle(recalcToggle);
+    }
+
+    /**
+     * An IN is being removed from the INList.  Add the delta to the recalc
+     * total if it was already processed, and mark it as processed.  If we have
+     * not yet processed it, it is not included in the total.
+     */
+    private void memRecalcRemove(IN in, long delta) {
+        memRecalcUpdate(in, delta); // Remove and update are the same
+    }
+
+    /**
+     * The size of an IN is changing.  Add the delta to the recalc total if it
+     * have already processed the IN.  If we have not yet processed it, its
+     * total size will be added by memRecalcIterate.
+     */
+    public void memRecalcUpdate(IN in, long delta) {
+        if (recalcInProgress &&
+            recalcConsistent &&
+            recalcToggle == in.getRecalcToggle()) {
+            recalcTotal.addAndGet(delta);
+        }
+    }
+
+    /**
+     * The reset operation is over.  Only update the tree budget if the
+     * iteration was completed and the state was consistent prior to this reset
+     * operation.
+     */
+    public void memRecalcEnd(boolean completed) {
+        assert recalcInProgress;
+        if (completed &&
+            recalcConsistent) {
+            envImpl.getMemoryBudget().refreshTreeMemoryUsage
+                (recalcTotal.get());
+        }
+        recalcInProgress = false;
+        recalcConsistent = completed;
+    }
+}
diff --git a/src/com/sleepycat/je/dbi/MemoryBudget.java b/src/com/sleepycat/je/dbi/MemoryBudget.java
new file mode 100644
index 0000000000000000000000000000000000000000..34fa9e237670c08342e2cb6f26e060f1fbe6316a
--- /dev/null
+++ b/src/com/sleepycat/je/dbi/MemoryBudget.java
@@ -0,0 +1,1200 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: MemoryBudget.java,v 1.86.2.4 2010/01/04 15:30:28 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import java.util.concurrent.atomic.AtomicLong;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.EnvironmentMutableConfig;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.tree.DBIN;
+import com.sleepycat.je.tree.DIN;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.utilint.Tracer;
+
+/**
+ * MemoryBudget calculates the available memory for JE and how to apportion
+ * it between cache and log buffers. It is meant to centralize all memory
+ * calculations. Objects that ask for memory budgets should get settings from
+ * this class, rather than using the configuration parameter values directly.
+ */
+public class MemoryBudget implements EnvConfigObserver {
+
+    /*
+     * CLEANUP_DONE can be set to false for unit test debugging
+     * that is still in progress. When we do the final regression,
+     * this should be removed to be assured that it is never false.
+     */
+    public static boolean CLEANUP_DONE = false;
+
+    /*
+     * These DEBUG variables are public so unit tests can easily turn them
+     * on and off for different sections of code.
+     */
+    public static boolean DEBUG_ADMIN = Boolean.getBoolean("memAdmin");
+    public static boolean DEBUG_LOCK = Boolean.getBoolean("memLock");
+    public static boolean DEBUG_TXN = Boolean.getBoolean("memTxn");
+    public static boolean DEBUG_TREEADMIN = Boolean.getBoolean("memTreeAdmin");
+    public static boolean DEBUG_TREE = Boolean.getBoolean("memTree");
+
+    /*
+     * Object overheads. These are set statically with advance measurements.
+     * Java doesn't provide a way of assessing object size dynamically. These
+     * overheads will not be precise, but are close enough to let the system
+     * behave predictably.
+     *
+     * _32 values are the same on Windows and Solaris.
+     * _64 values are from 1.5.0_05 on Solaris.
+     * _Dalvik values are from running the Sizeof test on Android.
+     *
+     * Specifically:
+     *
+     * java.vm.version=1.5.0_05_b05 os.name=SunOS
+     * java.vm.version=1.4.2_05_b04 os.name=SunOS
+     * java.vm.version=1.5.0_04_b05, os.name=Windows XP
+     * java.vm.version=1.4.2_06-b03, os.name=Windows XP
+     *
+     * The integer following the // below is the Sizeof argument used to
+     * compute the value.
+     */
+
+    // 7
+    private final static int LONG_OVERHEAD_32 = 16;
+    private final static int LONG_OVERHEAD_64 = 24;
+    private final static int LONG_OVERHEAD_DALVIK = 24;
+
+    // 8
+    private final static int ARRAY_OVERHEAD_32 = 16;
+    private final static int ARRAY_OVERHEAD_64 = 24;
+    private final static int ARRAY_OVERHEAD_DALVIK = 24;
+
+    private final static int ARRAY_SIZE_INCLUDED_32 = 4;
+    private final static int ARRAY_SIZE_INCLUDED_64 = 0;
+    private final static int ARRAY_SIZE_INCLUDED_DALVIK = 4;
+
+    // 2
+    private final static int OBJECT_OVERHEAD_32 = 8;
+    private final static int OBJECT_OVERHEAD_64 = 16;
+    private final static int OBJECT_OVERHEAD_DALVIK = 16;
+
+    // (4 - ARRAY_OVERHEAD) / 256
+    // 64b: 4 is 2072
+    // Dalvik; 4 is 1048
+    private final static int OBJECT_ARRAY_ITEM_OVERHEAD_32 = 4;
+    private final static int OBJECT_ARRAY_ITEM_OVERHEAD_64 = 8;
+    private final static int OBJECT_ARRAY_ITEM_OVERHEAD_DALVIK = 4;
+
+    // 20
+    private final static int HASHMAP_OVERHEAD_32 = 120;
+    private final static int HASHMAP_OVERHEAD_64_15 = 216;
+    private final static int HASHMAP_OVERHEAD_64_16 = 218;
+    private final static int HASHMAP_OVERHEAD_DALVIK = 128;
+
+    // 21 - OBJECT_OVERHEAD - HASHMAP_OVERHEAD
+    // 64b: 21 is max(280,...,287) on Linux/Solaris 1.5/1.6
+    // Dalvik: 21 is 176
+    private final static int HASHMAP_ENTRY_OVERHEAD_32 = 24;
+    private final static int HASHMAP_ENTRY_OVERHEAD_64 = 55;
+    private final static int HASHMAP_ENTRY_OVERHEAD_DALVIK = 32;
+
+    // 22
+    private final static int HASHSET_OVERHEAD_32 = 136;
+    private final static int HASHSET_OVERHEAD_64 = 240;
+    private final static int HASHSET_OVERHEAD_DALVIK = 144;
+
+    // 23 - OBJECT_OVERHEAD - HASHSET_OVERHEAD
+    // 64b: 23 is max(304,...,311) on Linux/Solaris
+    // Dalvik: 23 is 192
+    private final static int HASHSET_ENTRY_OVERHEAD_32 = 24;
+    private final static int HASHSET_ENTRY_OVERHEAD_64 = 55;
+    private final static int HASHSET_ENTRY_OVERHEAD_DALVIK = 32;
+
+    // HASHMAP_OVERHEAD * 2
+    private final static int TWOHASHMAPS_OVERHEAD_32 = 240;
+    private final static int TWOHASHMAPS_OVERHEAD_64_15 = 432;
+    private final static int TWOHASHMAPS_OVERHEAD_64_16 = 436;
+    private final static int TWOHASHMAPS_OVERHEAD_DALVIK = 256;
+
+    // 34
+    private final static int TREEMAP_OVERHEAD_32_15 = 40;
+    private final static int TREEMAP_OVERHEAD_32_16 = 48;
+    private final static int TREEMAP_OVERHEAD_64_15 = 64;
+    private final static int TREEMAP_OVERHEAD_64_16 = 80;
+    private final static int TREEMAP_OVERHEAD_DALVIK = 40;
+
+    // 35 - OBJECT_OVERHEAD - TREEMAP_OVERHEAD
+    // 64b: 35 is 144 on 1.5 and 160 on 1.6, result is 64 for both
+    // Dalvik: 35 is 96
+    private final static int TREEMAP_ENTRY_OVERHEAD_32 = 32;
+    private final static int TREEMAP_ENTRY_OVERHEAD_64 = 64;
+    private final static int TREEMAP_ENTRY_OVERHEAD_DALVIK = 40;
+
+    // 36
+    private final static int MAPLN_OVERHEAD_32_15 = 640;
+    private final static int MAPLN_OVERHEAD_32_16 = 664;
+    private final static int MAPLN_OVERHEAD_64_15 = 1096;
+    private final static int MAPLN_OVERHEAD_64_16 = 1136;
+    private final static int MAPLN_OVERHEAD_DALVIK = 744;
+
+    // 9
+    private final static int LN_OVERHEAD_32 = 24;
+    private final static int LN_OVERHEAD_64 = 40;
+    private final static int LN_OVERHEAD_DALVIK = 32;
+
+    // 19
+    private final static int DUPCOUNTLN_OVERHEAD_32 = 24;
+    private final static int DUPCOUNTLN_OVERHEAD_64 = 48;
+    private final static int DUPCOUNTLN_OVERHEAD_DALVIK = 32;
+
+    // 12
+    // 64b: 12 is max(536, 539) on Linux/Solaris on 1.5
+    // 64b: 12 is max(578, 576) on Linux/Solaris on 1.6
+    private final static int BIN_FIXED_OVERHEAD_32 = 370; // 344 in 1.5
+    private final static int BIN_FIXED_OVERHEAD_64_15 = 544;
+    private final static int BIN_FIXED_OVERHEAD_64_16 = 584;
+    private final static int BIN_FIXED_OVERHEAD_DALVIK = 458;
+
+    // 18
+    private final static int DIN_FIXED_OVERHEAD_32 = 377; // 352 in 1.5
+    private final static int DIN_FIXED_OVERHEAD_64_15 = 552;
+    private final static int DIN_FIXED_OVERHEAD_64_16 = 596;
+    private final static int DIN_FIXED_OVERHEAD_DALVIK = 451;
+
+    // 17
+    // 64b: 17 is max(592,593) on Linux/Solaris on 1.6
+    private final static int DBIN_FIXED_OVERHEAD_32 = 377; // 352 in 1.5
+    private final static int DBIN_FIXED_OVERHEAD_64_15 = 560;
+    private final static int DBIN_FIXED_OVERHEAD_64_16 = 600;
+    private final static int DBIN_FIXED_OVERHEAD_DALVIK = 458;
+
+    // 13
+    // 339 is max(312,339) on Solaris 1.5 vs 1.6
+    private final static int IN_FIXED_OVERHEAD_32 = 339; // 312 in 1.5
+    private final static int IN_FIXED_OVERHEAD_64_15 = 488;
+    private final static int IN_FIXED_OVERHEAD_64_16 = 528;
+    private final static int IN_FIXED_OVERHEAD_DALVIK = 402;
+
+    // 6
+    private final static int KEY_OVERHEAD_32 = 16;
+    private final static int KEY_OVERHEAD_64 = 24;
+    private final static int KEY_OVERHEAD_DALVIK = 16;
+
+    // 24
+    private final static int LOCKIMPL_OVERHEAD_32 = 24;
+    private final static int LOCKIMPL_OVERHEAD_64 = 48;
+    private final static int LOCKIMPL_OVERHEAD_DALVIK = 32;
+
+    // 42
+    private final static int THINLOCKIMPL_OVERHEAD_32 = 16;
+    private final static int THINLOCKIMPL_OVERHEAD_64 = 32;
+    private final static int THINLOCKIMPL_OVERHEAD_DALVIK = 24;
+
+    // 25
+    private final static int LOCKINFO_OVERHEAD_32 = 16;
+    private final static int LOCKINFO_OVERHEAD_64 = 32;
+    private final static int LOCKINFO_OVERHEAD_DALVIK = 24;
+
+    // 37
+    private final static int WRITE_LOCKINFO_OVERHEAD_32 = 32;
+    private final static int WRITE_LOCKINFO_OVERHEAD_64 = 40;
+    private final static int WRITE_LOCKINFO_OVERHEAD_DALVIK = 40;
+
+    /*
+     * Txn memory is the size for the Txn + a hashmap entry
+     * overhead for being part of the transaction table.
+     */
+    // 15
+    private final static int TXN_OVERHEAD_32 = 186;
+    private final static int TXN_OVERHEAD_64 = 281;
+    private final static int TXN_OVERHEAD_DALVIK = 218;
+
+    // 26
+    private final static int CHECKPOINT_REFERENCE_SIZE_32 = 40 +
+        HASHSET_ENTRY_OVERHEAD_32;
+    private final static int CHECKPOINT_REFERENCE_SIZE_64 = 56 +
+        HASHSET_ENTRY_OVERHEAD_64;
+    private final static int CHECKPOINT_REFERENCE_SIZE_DALVIK = 40;
+
+    /* The per-log-file bytes used in UtilizationProfile. */
+    // 29 / 10
+    private final static int UTILIZATION_PROFILE_ENTRY_32 = 101;
+    private final static int UTILIZATION_PROFILE_ENTRY_64 = 153;
+    private final static int UTILIZATION_PROFILE_ENTRY_DALVIK = 124;
+
+    //  38
+    private final static int DBFILESUMMARY_OVERHEAD_32 = 40;
+    private final static int DBFILESUMMARY_OVERHEAD_64 = 48;
+    private final static int DBFILESUMMARY_OVERHEAD_DALVIK = 48;
+
+    /* Tracked File Summary overheads. */
+    // 31
+    private final static int TFS_LIST_INITIAL_OVERHEAD_32 = 464;
+    private final static int TFS_LIST_INITIAL_OVERHEAD_64 = 504;
+    private final static int TFS_LIST_INITIAL_OVERHEAD_DALVIK = 472;
+
+    // 30
+    // 64b: 30 is max(464,464,464,465) on Linux/Solaris on 1.5/1.6
+    private final static int TFS_LIST_SEGMENT_OVERHEAD_32 = 440;
+    private final static int TFS_LIST_SEGMENT_OVERHEAD_64 = 465;
+    private final static int TFS_LIST_SEGMENT_OVERHEAD_DALVIK = 448;
+
+    // 33
+    private final static int LN_INFO_OVERHEAD_32 = 24;
+    private final static int LN_INFO_OVERHEAD_64 = 48;
+    private final static int LN_INFO_OVERHEAD_DALVIK = 32;
+
+    // 43
+    private final static int FILESUMMARYLN_OVERHEAD_32 = 112;
+    private final static int FILESUMMARYLN_OVERHEAD_64 = 168;
+    private final static int FILESUMMARYLN_OVERHEAD_DALVIK = 136;
+
+    /* Approximate element size in an ArrayList of Long. */
+    // (28 - 27) / 10
+    // 32b: 28 and 27 are 240 and 40, resp.
+    // 64b: 28 and 27 are 384 and 64, resp.
+    // Dalvik: 28 and 27 are 336 and 32, resp.
+    private final static int LONG_LIST_PER_ITEM_OVERHEAD_32 = 20;
+    private final static int LONG_LIST_PER_ITEM_OVERHEAD_64 = 32;
+    private final static int LONG_LIST_PER_ITEM_OVERHEAD_DALVIK = 30;
+
+    public final static int LONG_OVERHEAD;
+    public final static int ARRAY_OVERHEAD;
+    public final static int ARRAY_SIZE_INCLUDED;
+    public final static int OBJECT_OVERHEAD;
+    public final static int OBJECT_ARRAY_ITEM_OVERHEAD;
+    public final static int HASHMAP_OVERHEAD;
+    public final static int HASHMAP_ENTRY_OVERHEAD;
+    public final static int HASHSET_OVERHEAD;
+    public final static int HASHSET_ENTRY_OVERHEAD;
+    public final static int TWOHASHMAPS_OVERHEAD;
+    public final static int TREEMAP_OVERHEAD;
+    public final static int TREEMAP_ENTRY_OVERHEAD;
+    public final static int MAPLN_OVERHEAD;
+    public final static int LN_OVERHEAD;
+    public final static int DUPCOUNTLN_OVERHEAD;
+    public final static int BIN_FIXED_OVERHEAD;
+    public final static int DIN_FIXED_OVERHEAD;
+    public final static int DBIN_FIXED_OVERHEAD;
+    public final static int IN_FIXED_OVERHEAD;
+    public final static int KEY_OVERHEAD;
+    public final static int LOCKIMPL_OVERHEAD;
+    public final static int THINLOCKIMPL_OVERHEAD;
+    public final static int LOCKINFO_OVERHEAD;
+    public final static int WRITE_LOCKINFO_OVERHEAD;
+    public final static int TXN_OVERHEAD;
+    public final static int CHECKPOINT_REFERENCE_SIZE;
+    public final static int UTILIZATION_PROFILE_ENTRY;
+    public final static int DBFILESUMMARY_OVERHEAD;
+    public final static int TFS_LIST_INITIAL_OVERHEAD;
+    public final static int TFS_LIST_SEGMENT_OVERHEAD;
+    public final static int LN_INFO_OVERHEAD;
+    public final static int FILESUMMARYLN_OVERHEAD;
+    public final static int LONG_LIST_PER_ITEM_OVERHEAD;
+
+    /* Primitive long array item size is the same on all platforms. */
+    public final static int PRIMITIVE_LONG_ARRAY_ITEM_OVERHEAD = 8;
+
+    private final static String JVM_ARCH_PROPERTY = "sun.arch.data.model";
+    private final static String FORCE_JVM_ARCH = "je.forceJVMArch";
+
+    static {
+    
+        /* 
+         * On the Dalvik VM (Android), there is no difference in sizes between
+         * any platforms.
+         */
+        if (EnvironmentImpl.IS_DALVIK) {
+            LONG_OVERHEAD = LONG_OVERHEAD_DALVIK;
+            ARRAY_OVERHEAD = ARRAY_OVERHEAD_DALVIK;
+            ARRAY_SIZE_INCLUDED = ARRAY_SIZE_INCLUDED_DALVIK;
+            OBJECT_OVERHEAD = OBJECT_OVERHEAD_DALVIK;
+            OBJECT_ARRAY_ITEM_OVERHEAD = OBJECT_ARRAY_ITEM_OVERHEAD_DALVIK;
+            HASHMAP_OVERHEAD = HASHMAP_OVERHEAD_DALVIK;
+            HASHMAP_ENTRY_OVERHEAD = HASHMAP_ENTRY_OVERHEAD_DALVIK;
+            HASHSET_OVERHEAD = HASHSET_OVERHEAD_DALVIK;
+            HASHSET_ENTRY_OVERHEAD = HASHSET_ENTRY_OVERHEAD_DALVIK;
+            TWOHASHMAPS_OVERHEAD = TWOHASHMAPS_OVERHEAD_DALVIK;
+            TREEMAP_OVERHEAD = TREEMAP_OVERHEAD_DALVIK;
+            TREEMAP_ENTRY_OVERHEAD = TREEMAP_ENTRY_OVERHEAD_DALVIK;
+            MAPLN_OVERHEAD = MAPLN_OVERHEAD_DALVIK;
+            LN_OVERHEAD = LN_OVERHEAD_DALVIK;
+            DUPCOUNTLN_OVERHEAD = DUPCOUNTLN_OVERHEAD_DALVIK;
+            BIN_FIXED_OVERHEAD = BIN_FIXED_OVERHEAD_DALVIK;
+            DIN_FIXED_OVERHEAD = DIN_FIXED_OVERHEAD_DALVIK;
+            DBIN_FIXED_OVERHEAD = DBIN_FIXED_OVERHEAD_DALVIK;
+            IN_FIXED_OVERHEAD = IN_FIXED_OVERHEAD_DALVIK;
+            KEY_OVERHEAD = KEY_OVERHEAD_DALVIK;
+            LOCKIMPL_OVERHEAD = LOCKIMPL_OVERHEAD_DALVIK;
+            THINLOCKIMPL_OVERHEAD = THINLOCKIMPL_OVERHEAD_DALVIK;
+            LOCKINFO_OVERHEAD = LOCKINFO_OVERHEAD_DALVIK;
+            WRITE_LOCKINFO_OVERHEAD = WRITE_LOCKINFO_OVERHEAD_DALVIK;
+            TXN_OVERHEAD = TXN_OVERHEAD_DALVIK;
+            CHECKPOINT_REFERENCE_SIZE = CHECKPOINT_REFERENCE_SIZE_DALVIK;
+            UTILIZATION_PROFILE_ENTRY = UTILIZATION_PROFILE_ENTRY_DALVIK;
+            DBFILESUMMARY_OVERHEAD = DBFILESUMMARY_OVERHEAD_DALVIK;
+            TFS_LIST_INITIAL_OVERHEAD = TFS_LIST_INITIAL_OVERHEAD_DALVIK;
+            TFS_LIST_SEGMENT_OVERHEAD = TFS_LIST_SEGMENT_OVERHEAD_DALVIK;
+            LN_INFO_OVERHEAD = LN_INFO_OVERHEAD_DALVIK;
+            FILESUMMARYLN_OVERHEAD = FILESUMMARYLN_OVERHEAD_DALVIK;
+            LONG_LIST_PER_ITEM_OVERHEAD = LONG_LIST_PER_ITEM_OVERHEAD_DALVIK;
+        } else {
+            String javaVersion = System.getProperty("java.version");
+            boolean isJVM15 = javaVersion != null &&
+                javaVersion.startsWith("1.5.");
+
+            boolean is64 = false;
+            String overrideArch = System.getProperty(FORCE_JVM_ARCH);
+            try {
+                if (overrideArch == null) {
+                    String arch = System.getProperty(JVM_ARCH_PROPERTY);
+                    if (arch != null) {
+                        is64 = Integer.parseInt(arch) == 64;
+                    }
+                } else {
+                    is64 = Integer.parseInt(overrideArch) == 64;
+                }
+            } catch (NumberFormatException NFE) {
+                NFE.printStackTrace(System.err);
+            }
+
+            if (is64) {
+                LONG_OVERHEAD = LONG_OVERHEAD_64;
+                ARRAY_OVERHEAD = ARRAY_OVERHEAD_64;
+                ARRAY_SIZE_INCLUDED = ARRAY_SIZE_INCLUDED_64;
+                OBJECT_OVERHEAD = OBJECT_OVERHEAD_64;
+                OBJECT_ARRAY_ITEM_OVERHEAD = OBJECT_ARRAY_ITEM_OVERHEAD_64;
+                HASHMAP_ENTRY_OVERHEAD = HASHMAP_ENTRY_OVERHEAD_64;
+                HASHSET_OVERHEAD = HASHSET_OVERHEAD_64;
+                HASHSET_ENTRY_OVERHEAD = HASHSET_ENTRY_OVERHEAD_64;
+                if (isJVM15) {
+                    TREEMAP_OVERHEAD = TREEMAP_OVERHEAD_64_15;
+                    MAPLN_OVERHEAD = MAPLN_OVERHEAD_64_15;
+                    BIN_FIXED_OVERHEAD = BIN_FIXED_OVERHEAD_64_15;
+                    DIN_FIXED_OVERHEAD = DIN_FIXED_OVERHEAD_64_15;
+                    DBIN_FIXED_OVERHEAD = DBIN_FIXED_OVERHEAD_64_15;
+                    IN_FIXED_OVERHEAD = IN_FIXED_OVERHEAD_64_15;
+                    HASHMAP_OVERHEAD = HASHMAP_OVERHEAD_64_15;
+                    TWOHASHMAPS_OVERHEAD = TWOHASHMAPS_OVERHEAD_64_15;
+                } else {
+                    TREEMAP_OVERHEAD = TREEMAP_OVERHEAD_64_16;
+                    MAPLN_OVERHEAD = MAPLN_OVERHEAD_64_16;
+                    BIN_FIXED_OVERHEAD = BIN_FIXED_OVERHEAD_64_16;
+                    DIN_FIXED_OVERHEAD = DIN_FIXED_OVERHEAD_64_16;
+                    DBIN_FIXED_OVERHEAD = DBIN_FIXED_OVERHEAD_64_16;
+                    IN_FIXED_OVERHEAD = IN_FIXED_OVERHEAD_64_16;
+                    HASHMAP_OVERHEAD = HASHMAP_OVERHEAD_64_16;
+                    TWOHASHMAPS_OVERHEAD = TWOHASHMAPS_OVERHEAD_64_16;
+                }
+                TREEMAP_ENTRY_OVERHEAD = TREEMAP_ENTRY_OVERHEAD_64;
+                LN_OVERHEAD = LN_OVERHEAD_64;
+                DUPCOUNTLN_OVERHEAD = DUPCOUNTLN_OVERHEAD_64;
+                TXN_OVERHEAD = TXN_OVERHEAD_64;
+                CHECKPOINT_REFERENCE_SIZE = CHECKPOINT_REFERENCE_SIZE_64;
+                KEY_OVERHEAD = KEY_OVERHEAD_64;
+                LOCKIMPL_OVERHEAD = LOCKIMPL_OVERHEAD_64;
+                THINLOCKIMPL_OVERHEAD = THINLOCKIMPL_OVERHEAD_64;
+                LOCKINFO_OVERHEAD = LOCKINFO_OVERHEAD_64;
+                WRITE_LOCKINFO_OVERHEAD = WRITE_LOCKINFO_OVERHEAD_64;
+                UTILIZATION_PROFILE_ENTRY = UTILIZATION_PROFILE_ENTRY_64;
+                DBFILESUMMARY_OVERHEAD = DBFILESUMMARY_OVERHEAD_64;
+                TFS_LIST_INITIAL_OVERHEAD = TFS_LIST_INITIAL_OVERHEAD_64;
+                TFS_LIST_SEGMENT_OVERHEAD = TFS_LIST_SEGMENT_OVERHEAD_64;
+                LN_INFO_OVERHEAD = LN_INFO_OVERHEAD_64;
+                FILESUMMARYLN_OVERHEAD = FILESUMMARYLN_OVERHEAD_64;
+                LONG_LIST_PER_ITEM_OVERHEAD = LONG_LIST_PER_ITEM_OVERHEAD_64;
+            } else {
+                LONG_OVERHEAD = LONG_OVERHEAD_32;
+                ARRAY_OVERHEAD = ARRAY_OVERHEAD_32;
+                ARRAY_SIZE_INCLUDED = ARRAY_SIZE_INCLUDED_32;
+                OBJECT_OVERHEAD = OBJECT_OVERHEAD_32;
+                OBJECT_ARRAY_ITEM_OVERHEAD = OBJECT_ARRAY_ITEM_OVERHEAD_32;
+                HASHMAP_OVERHEAD = HASHMAP_OVERHEAD_32;
+                HASHMAP_ENTRY_OVERHEAD = HASHMAP_ENTRY_OVERHEAD_32;
+                HASHSET_OVERHEAD = HASHSET_OVERHEAD_32;
+                HASHSET_ENTRY_OVERHEAD = HASHSET_ENTRY_OVERHEAD_32;
+                TWOHASHMAPS_OVERHEAD = TWOHASHMAPS_OVERHEAD_32;
+                if (isJVM15) {
+                    TREEMAP_OVERHEAD = TREEMAP_OVERHEAD_32_15;
+                    MAPLN_OVERHEAD = MAPLN_OVERHEAD_32_15;
+                } else {
+                    TREEMAP_OVERHEAD = TREEMAP_OVERHEAD_32_16;
+                    MAPLN_OVERHEAD = MAPLN_OVERHEAD_32_16;
+                }
+                TREEMAP_ENTRY_OVERHEAD = TREEMAP_ENTRY_OVERHEAD_32;
+                LN_OVERHEAD = LN_OVERHEAD_32;
+                DUPCOUNTLN_OVERHEAD = DUPCOUNTLN_OVERHEAD_32;
+                BIN_FIXED_OVERHEAD = BIN_FIXED_OVERHEAD_32;
+                DIN_FIXED_OVERHEAD = DIN_FIXED_OVERHEAD_32;
+                DBIN_FIXED_OVERHEAD = DBIN_FIXED_OVERHEAD_32;
+                IN_FIXED_OVERHEAD = IN_FIXED_OVERHEAD_32;
+                TXN_OVERHEAD = TXN_OVERHEAD_32;
+                CHECKPOINT_REFERENCE_SIZE = CHECKPOINT_REFERENCE_SIZE_32;
+                KEY_OVERHEAD = KEY_OVERHEAD_32;
+                LOCKIMPL_OVERHEAD = LOCKIMPL_OVERHEAD_32;
+                THINLOCKIMPL_OVERHEAD = THINLOCKIMPL_OVERHEAD_32;
+                LOCKINFO_OVERHEAD = LOCKINFO_OVERHEAD_32;
+                WRITE_LOCKINFO_OVERHEAD = WRITE_LOCKINFO_OVERHEAD_32;
+                UTILIZATION_PROFILE_ENTRY = UTILIZATION_PROFILE_ENTRY_32;
+                DBFILESUMMARY_OVERHEAD = DBFILESUMMARY_OVERHEAD_32;
+                TFS_LIST_INITIAL_OVERHEAD = TFS_LIST_INITIAL_OVERHEAD_32;
+                TFS_LIST_SEGMENT_OVERHEAD = TFS_LIST_SEGMENT_OVERHEAD_32;
+                LN_INFO_OVERHEAD = LN_INFO_OVERHEAD_32;
+                FILESUMMARYLN_OVERHEAD = FILESUMMARYLN_OVERHEAD_32;
+                LONG_LIST_PER_ITEM_OVERHEAD = LONG_LIST_PER_ITEM_OVERHEAD_32;
+            }
+        }
+    }
+
+    /* public for unit tests. */
+    public final static long MIN_MAX_MEMORY_SIZE = 96 * 1024;
+    public final static String MIN_MAX_MEMORY_SIZE_STRING =
+        Long.toString(MIN_MAX_MEMORY_SIZE);
+
+    /* This value prevents cache churn for apps with a high write rate. */
+    @SuppressWarnings("unused")
+    private final static int DEFAULT_MIN_BTREE_CACHE_SIZE = 500 * 1024;
+
+    private final static long N_64MB = (1 << 26);
+
+    /*
+     * Note that this class contains long fields that are accessed by multiple
+     * threads.  Access to these fields is synchronized when changing them but
+     * not when reading them to detect cache overflow or get stats.  Although
+     * inaccuracies may occur when reading the values, correcting this is not
+     * worth the cost of synchronizing every time we access them.  The worst
+     * that can happen is that we may invoke eviction unnecessarily.
+     */
+
+    /*
+     * Amount of memory cached for tree objects.
+     */
+    private AtomicLong treeMemoryUsage = new AtomicLong(0);
+
+    /*
+     * Amount of memory cached for txn usage.
+     */
+    private AtomicLong txnMemoryUsage = new AtomicLong(0);
+
+    /*
+     * Amount of memory cached for log cleaning, dirty IN list, and other admin
+     * functions.
+     */
+    private AtomicLong adminMemoryUsage = new AtomicLong(0);
+
+    /*
+     * Amount of memory cached for admininstrative structures that are
+     * sometimes housed within tree nodes. Right now, that's
+     * DbFileSummaryMap, which is sometimes referenced by a MapLN by
+     * way of a DatabaseImpl, and sometimes is just referenced by
+     * a DatabaseImpl without a MapLN (the id and name databases.)
+     */
+    private AtomicLong treeAdminMemoryUsage = new AtomicLong(0);
+
+    /*
+     * Number of lock tables (cache of EnvironmentParams.N_LOCK_TABLES).
+     */
+    private int nLockTables;
+
+    /*
+     * Amount of memory cached for locks. Protected by the
+     * LockManager.lockTableLatches[lockTableIndex].
+     */
+    private AtomicLong[] lockMemoryUsage;
+
+    /*
+     * Memory available to JE, based on je.maxMemory and the memory available
+     * to this process.
+     */
+    private Totals totals;
+
+    /* Memory available to log buffers. */
+    private long logBufferBudget;
+
+    /* Maximum allowed use of the admin budget by the UtilizationTracker. */
+    private long trackerBudget;
+
+    /* Mininum to prevent cache churn. */
+    private long minTreeMemoryUsage;
+
+    /*
+     * Overheads that are a function of node capacity.
+     */
+    private long inOverhead;
+    private long binOverhead;
+    private long dinOverhead;
+    private long dbinOverhead;
+
+    private EnvironmentImpl envImpl;
+
+    MemoryBudget(EnvironmentImpl envImpl,
+                 EnvironmentImpl sharedCacheEnv,
+                 DbConfigManager configManager)
+        throws DatabaseException {
+
+        this.envImpl = envImpl;
+
+        /* Request notification of mutable property changes. */
+        envImpl.addConfigObserver(this);
+
+        /* Peform first time budget initialization. */
+        long newMaxMemory;
+        if (envImpl.getSharedCache()) {
+            if (sharedCacheEnv != null) {
+                totals = sharedCacheEnv.getMemoryBudget().totals;
+                /* For a new environment, do not override existing budget. */
+                newMaxMemory = -1;
+            } else {
+                totals = new SharedTotals();
+                newMaxMemory = calcMaxMemory(configManager);
+            }
+        } else {
+            totals = new PrivateTotals(this);
+            newMaxMemory = calcMaxMemory(configManager);
+        }
+        reset(newMaxMemory, true /*newEnv*/, configManager);
+
+        /*
+         * Calculate IN and BIN overheads, which are a function of capacity.
+         * These values are stored in this class so that they can be calculated
+         * once per environment. The logic to do the calculations is left in
+         * the respective node classes so it can be done properly in the domain
+         * of those objects.
+         */
+        inOverhead = IN.computeOverhead(configManager);
+        binOverhead = BIN.computeOverhead(configManager);
+        dinOverhead = DIN.computeOverhead(configManager);
+        dbinOverhead = DBIN.computeOverhead(configManager);
+    }
+
+    /**
+     * Respond to config updates.
+     */
+    public void envConfigUpdate(DbConfigManager configManager,
+                                EnvironmentMutableConfig ignore)
+        throws DatabaseException {
+
+        /* Reinitialize the cache budget and the log buffer pool. */
+        reset(calcMaxMemory(configManager), false /*newEnv*/, configManager);
+    }
+
+    private long calcMaxMemory(DbConfigManager configManager)
+        throws DatabaseException {
+
+        /*
+         * Calculate the total memory allotted to JE.
+         * 1. If je.maxMemory is specified, use that. Check that it's not more
+         * than the JVM memory.
+         * 2. Otherwise, take je.maxMemoryPercent * JVM max memory.
+         */
+        long newMaxMemory =
+            configManager.getLong(EnvironmentParams.MAX_MEMORY);
+        long jvmMemory = getRuntimeMaxMemory();
+
+        if (newMaxMemory != 0) {
+            /* Application specified a cache size number, validate it. */
+            if (jvmMemory < newMaxMemory) {
+                throw new IllegalArgumentException
+                    (EnvironmentParams.MAX_MEMORY.getName() +
+                     " has a value of " + newMaxMemory +
+                     " but the JVM is only configured for " +
+                     jvmMemory +
+                     ". Consider using je.maxMemoryPercent.");
+            }
+            if (newMaxMemory < MIN_MAX_MEMORY_SIZE) {
+                throw new IllegalArgumentException
+                    (EnvironmentParams.MAX_MEMORY.getName() +
+                     " is " + newMaxMemory +
+                     " which is less than the minimum: " +
+                     MIN_MAX_MEMORY_SIZE);
+            }
+        } else {
+
+            /*
+             * When no explicit cache size is specified and the JVM memory size
+             * is unknown, assume a default sized (64 MB) heap.  This produces
+             * a reasonable cache size when no heap size is known.
+             */
+            if (jvmMemory == Long.MAX_VALUE) {
+                jvmMemory = N_64MB;
+            }
+
+            /* Use the configured percentage of the JVM memory size. */
+            int maxMemoryPercent =
+                configManager.getInt(EnvironmentParams.MAX_MEMORY_PERCENT);
+            newMaxMemory = (maxMemoryPercent * jvmMemory) / 100;
+        }
+
+        return newMaxMemory;
+    }
+
+    /**
+     * Initialize at construction time and when the cache is resized.
+     *
+     * @param newMaxMemory is the new total cache budget or is less than 0 if
+     * the total should remain unchanged.
+     *
+     * @param newEnv is true if this is the first time we are resetting the
+     * budget for a new environment.  Note that a new environment has not yet
+     * been added to the set of shared cache environments.
+     */
+    void reset(long newMaxMemory,
+               boolean newEnv,
+               DbConfigManager configManager)
+        throws DatabaseException {
+
+        long oldLogBufferBudget = logBufferBudget;
+
+        /*
+         * Update the new total cache budget.
+         */
+        if (newMaxMemory < 0) {
+            newMaxMemory = getMaxMemory();
+        } else {
+            totals.setMaxMemory(newMaxMemory);
+        }
+
+        /*
+         * This environment's portion is adjusted for a shared cache.  Further
+         * below we make buffer and tracker sizes a fixed percentage (7% and
+         * 2%, by default) of the total shared cache size.  The math for this
+         * starts by dividing the total size by number of environments to get
+         * myCachePortion.  Then we take 7% or 2% of myCachePortion to get each
+         * environment's portion.  In other words, if there are 10 environments
+         * then each gets 7%/10 and 2%/10 of the total cache size, by default.
+         *
+         * Note that when we resize the shared cache, we resize the buffer
+         * pools and tracker budgets for all environments.  Resizing the
+         * tracker budget has no overhead, but resizing the buffer pools causes
+         * new buffers to be allocated.  If reallocation of the log buffers is
+         * not desirable, the user can configure a byte amount rather than a
+         * percentage.
+         */
+        long myCachePortion;
+        if (envImpl.getSharedCache()) {
+            int nEnvs = DbEnvPool.getInstance().getNSharedCacheEnvironments();
+            if (newEnv) {
+                nEnvs += 1;
+            }
+            myCachePortion = newMaxMemory / nEnvs;
+        } else {
+            myCachePortion = newMaxMemory;
+        }
+
+        /*
+         * Calculate the memory budget for log buffering.  If the LOG_MEM_SIZE
+         * parameter is not set, start by using 7% (1/16th) of the cache
+         * size. If it is set, use that explicit setting.
+         *
+         * No point in having more log buffers than the maximum size. If
+         * this starting point results in overly large log buffers,
+         * reduce the log buffer budget again.
+         */
+        long newLogBufferBudget =
+            configManager.getLong(EnvironmentParams.LOG_MEM_SIZE);
+        if (newLogBufferBudget == 0) {
+            if (EnvironmentImpl.IS_DALVIK) {
+                /* If Dalvik JVM, use 1/128th instead of 1/16th of cache. */
+                newLogBufferBudget = myCachePortion >> 7;
+            } else {
+                newLogBufferBudget = myCachePortion >> 4;
+            }
+        } else if (newLogBufferBudget > myCachePortion / 2) {
+            newLogBufferBudget = myCachePortion / 2;
+        }
+
+        /*
+         * We have a first pass at the log buffer budget. See what
+         * size log buffers result. Don't let them be too big, it would
+         * be a waste.
+         */
+        int numBuffers =
+            configManager.getInt(EnvironmentParams.NUM_LOG_BUFFERS);
+        long startingBufferSize = newLogBufferBudget / numBuffers;
+        int logBufferSize =
+            configManager.getInt(EnvironmentParams.LOG_BUFFER_MAX_SIZE);
+        if (startingBufferSize > logBufferSize) {
+            startingBufferSize = logBufferSize;
+            newLogBufferBudget = numBuffers * startingBufferSize;
+        } else if (startingBufferSize <
+                   EnvironmentParams.MIN_LOG_BUFFER_SIZE) {
+            startingBufferSize = EnvironmentParams.MIN_LOG_BUFFER_SIZE;
+            newLogBufferBudget = numBuffers * startingBufferSize;
+        }
+
+        long newCriticalThreshold =
+            (newMaxMemory *
+             envImpl.getConfigManager().getInt
+                (EnvironmentParams.EVICTOR_CRITICAL_PERCENTAGE))/100;
+
+        long newTrackerBudget =
+            (myCachePortion *
+             envImpl.getConfigManager().getInt
+                (EnvironmentParams.CLEANER_DETAIL_MAX_MEMORY_PERCENTAGE))/100;
+
+        long newMinTreeMemoryUsage = Math.min
+            (configManager.getLong(EnvironmentParams.MIN_TREE_MEMORY),
+             myCachePortion - newLogBufferBudget);
+
+        /*
+         * If all has gone well, update the budget fields.  Once the log buffer
+         * budget is determined, the remainder of the memory is left for tree
+         * nodes.
+         */
+        logBufferBudget = newLogBufferBudget;
+        totals.setCriticalThreshold(newCriticalThreshold);
+        trackerBudget = newTrackerBudget;
+        if (lockMemoryUsage == null) {
+            nLockTables =
+                configManager.getInt(EnvironmentParams.N_LOCK_TABLES);
+            lockMemoryUsage = new AtomicLong[nLockTables];
+            for (int i = 0; i < nLockTables; i++) {
+                lockMemoryUsage[i] = new AtomicLong(0);
+            }
+        }
+        minTreeMemoryUsage = newMinTreeMemoryUsage;
+
+        /* The log buffer budget is counted in the cache usage. */
+        totals.updateCacheUsage(logBufferBudget - oldLogBufferBudget);
+
+        /*
+         * Only reset the log buffer pool if the log buffer has already been
+         * initialized (we're updating an existing budget) and the log buffer
+         * budget hasn't changed (resetting it is expensive and may cause I/O).
+         */
+        if (!newEnv && oldLogBufferBudget != logBufferBudget) {
+            envImpl.getLogManager().resetPool(configManager);
+        }
+    }
+
+    /**
+     * Returns Runtime.maxMemory(), accounting for a MacOS bug.
+     * May return Long.MAX_VALUE if there is no inherent limit.
+     * Used by unit tests as well as by this class.
+     */
+    public static long getRuntimeMaxMemory() {
+
+        /* Runtime.maxMemory is unreliable on MacOS Java 1.4.2. */
+        if ("Mac OS X".equals(System.getProperty("os.name"))) {
+            String jvmVersion = System.getProperty("java.version");
+            if (jvmVersion != null && jvmVersion.startsWith("1.4.2")) {
+                return Long.MAX_VALUE; /* Undetermined heap size. */
+            }
+        }
+
+        return Runtime.getRuntime().maxMemory();
+    }
+
+    /**
+     * Initialize the starting environment memory state. We really only need to
+     * recalibrate the tree and treeAdmin categories, since there are no locks
+     * and txns yet, and the items in the admin category are cleaner items and
+     * aren't affected by the recovery splicing process.
+     */
+    void initCacheMemoryUsage(long dbTreeAdminMemory)
+        throws DatabaseException {
+
+        long totalTree = 0;
+        long treeAdmin = 0;
+        for (IN in : envImpl.getInMemoryINs()) {
+            totalTree += in.getBudgetedMemorySize();
+            treeAdmin += in.getTreeAdminMemorySize();
+        }
+        refreshTreeMemoryUsage(totalTree);
+        refreshTreeAdminMemoryUsage(treeAdmin + dbTreeAdminMemory);
+    }
+
+    /**
+     * Called by INList when clearing  tree memory usage.
+     */
+    void refreshTreeAdminMemoryUsage(long newSize) {
+        long oldSize =  treeAdminMemoryUsage.getAndSet(newSize);
+        long diff = (newSize - oldSize);
+
+        if (DEBUG_TREEADMIN) {
+            System.err.println("RESET = " + newSize);
+        }
+        if (totals.updateCacheUsage(diff)) {
+            envImpl.alertEvictor();
+        }
+    }
+
+    /**
+     * Called by INList when recalculating tree memory usage.
+     */
+    void refreshTreeMemoryUsage(long newSize) {
+        long oldSize = treeMemoryUsage.getAndSet(newSize);
+        long diff = (newSize - oldSize);
+
+        if (totals.updateCacheUsage(diff)) {
+            envImpl.alertEvictor();
+        }
+    }
+
+    /**
+     * Returns whether eviction of INList information is allowed.
+     * To prevent extreme cache churn, eviction of Btree information is
+     * prohibited unless the tree memory usage is above this minimum value.
+     */
+    public boolean isTreeUsageAboveMinimum() {
+        return treeMemoryUsage.get() > minTreeMemoryUsage;
+    }
+
+    /**
+     * For unit tests.
+     */
+    public long getMinTreeMemoryUsage() {
+        return minTreeMemoryUsage;
+    }
+
+    /**
+     * Update the environment wide tree memory count, wake up the evictor if
+     * necessary.
+     * @param increment note that increment may be negative.
+     */
+    public void updateTreeMemoryUsage(long increment) {
+        updateCounter(increment, treeMemoryUsage, "tree", DEBUG_TREE);
+    }
+
+    /**
+     * Update the environment wide txn memory count, wake up the evictor if
+     * necessary.
+     * @param increment note that increment may be negative.
+     */
+    public void updateTxnMemoryUsage(long increment) {
+        updateCounter(increment, txnMemoryUsage, "txn", DEBUG_TXN);
+    }
+
+    /**
+     * Update the environment wide admin memory count, wake up the evictor if
+     * necessary.
+     * @param increment note that increment may be negative.
+     */
+    public void updateAdminMemoryUsage(long increment) {
+        updateCounter(increment, adminMemoryUsage, "admin", DEBUG_ADMIN);
+    }
+
+    /**
+     * Update the treeAdmin memory count, wake up the evictor if necessary.
+     * @param increment note that increment may be negative.
+     */
+    public void updateTreeAdminMemoryUsage(long increment) {
+        updateCounter(increment, treeAdminMemoryUsage, "treeAdmin",
+                      DEBUG_TREEADMIN);
+    }
+
+    private void updateCounter(long increment,
+                               AtomicLong counter,
+                               String debugName,
+                               boolean debug) {
+        if (increment != 0) {
+            long newSize = counter.addAndGet(increment);
+
+            assert (sizeNotNegative(newSize)) :
+                   makeErrorMessage(debugName, newSize, increment);
+
+            if (debug) {
+                if (increment > 0) {
+                    System.err.println("INC-------- =" + increment + " " +
+                                       debugName + " "  + newSize);
+                } else {
+                    System.err.println("-------DEC=" + increment + " " +
+                                       debugName + " "  + newSize);
+                }
+            }
+
+            if (totals.updateCacheUsage(increment)) {
+                envImpl.alertEvictor();
+            }
+        }
+    }
+
+    private boolean sizeNotNegative(long newSize) {
+
+        if (CLEANUP_DONE)  {
+            return (newSize >= 0);
+        } else {
+            return true;
+        }
+    }
+
+    public void updateLockMemoryUsage(long increment, int lockTableIndex) {
+        if (increment != 0) {
+            lockMemoryUsage[lockTableIndex].addAndGet(increment);
+
+            assert lockMemoryUsage[lockTableIndex].get() >= 0:
+                   makeErrorMessage("lockMem",
+                                    lockMemoryUsage[lockTableIndex].get(),
+                                    increment);
+            if (DEBUG_LOCK) {
+                if (increment > 0) {
+                    System.err.println("INC-------- =" + increment +
+                                       " lock[" +
+                                       lockTableIndex + "] " +
+                                       lockMemoryUsage[lockTableIndex].get());
+                } else {
+                    System.err.println("-------DEC=" + increment +
+                                       " lock[" + lockTableIndex + "] " +
+                                       lockMemoryUsage[lockTableIndex].get());
+                }
+            }
+
+            if (totals.updateCacheUsage(increment)) {
+                envImpl.alertEvictor();
+            }
+        }
+    }
+
+    private String makeErrorMessage(String memoryType,
+                                    long total,
+                                    long increment) {
+        return memoryType + "=" + total +
+            " increment=" + increment + " " +
+            Tracer.getStackTrace(new Throwable());
+    }
+
+    void subtractCacheUsage() {
+        totals.updateCacheUsage(0 - getLocalCacheUsage());
+    }
+
+    private long getLocalCacheUsage() {
+        return logBufferBudget +
+               treeMemoryUsage.get() +
+               adminMemoryUsage.get() +
+               treeAdminMemoryUsage.get() +
+               getLockMemoryUsage();
+    }
+
+    long getVariableCacheUsage() {
+        return treeMemoryUsage.get() +
+            adminMemoryUsage.get() +
+            treeAdminMemoryUsage.get() +
+            getLockMemoryUsage();
+    }
+
+    /**
+     * Public for unit testing.
+     */
+    public long getLockMemoryUsage() {
+        long accLockMemoryUsage = txnMemoryUsage.get();
+        if (nLockTables == 1) {
+            accLockMemoryUsage += lockMemoryUsage[0].get();
+        } else {
+            for (int i = 0; i < nLockTables; i++) {
+                accLockMemoryUsage += lockMemoryUsage[i].get();
+            }
+        }
+
+        return accLockMemoryUsage;
+    }
+
+    /*
+     * The following 2 methods are shorthand for getTotals.getXxx().
+     */
+
+    public long getCacheMemoryUsage() {
+        return totals.getCacheUsage();
+    }
+
+    public long getMaxMemory() {
+        return totals.getMaxMemory();
+    }
+
+    /**
+     * Used for unit testing.
+     */
+    public long getTreeMemoryUsage() {
+        return treeMemoryUsage.get();
+    }
+
+    /**
+     * Used for unit testing.
+     */
+    public long getAdminMemoryUsage() {
+        return adminMemoryUsage.get();
+    }
+
+    /*
+     * For unit testing
+     */
+    public long getTreeAdminMemoryUsage() {
+        return treeAdminMemoryUsage.get();
+    }
+
+    public long getLogBufferBudget() {
+        return logBufferBudget;
+    }
+
+    public long getTrackerBudget() {
+        return trackerBudget;
+    }
+
+    public long getINOverhead() {
+        return inOverhead;
+    }
+
+    public long getBINOverhead() {
+        return binOverhead;
+    }
+
+    public long getDINOverhead() {
+        return dinOverhead;
+    }
+
+    public long getDBINOverhead() {
+        return dbinOverhead;
+    }
+
+    /**
+     * Returns the memory size occupied by a byte array of a given length.  All
+     * arrays (regardless of element type) have the same overhead for a zero
+     * length array.  On 32b Java, there are 4 bytes included in that fixed
+     * overhead that can be used for the first N elements -- however many fit
+     * in 4 bytes.  On 64b Java, there is no extra space included.  In all
+     * cases, space is allocated in 8 byte chunks.
+     */
+    public static int byteArraySize(int arrayLen) {
+
+        /*
+         * ARRAY_OVERHEAD accounts for N bytes of data, which is 4 bytes on 32b
+         * Java and 0 bytes on 64b Java.  Data larger than N bytes is allocated
+         * in 8 byte increments.
+         */
+        int size = ARRAY_OVERHEAD;
+        if (arrayLen > ARRAY_SIZE_INCLUDED) {
+            size += ((arrayLen - ARRAY_SIZE_INCLUDED + 7) / 8) * 8;
+        }
+
+        return size;
+    }
+
+    public static int shortArraySize(int arrayLen) {
+        return byteArraySize(arrayLen * 2);
+    }
+
+    public static int intArraySize(int arrayLen) {
+        return byteArraySize(arrayLen * 4);
+    }
+
+    public static int objectArraySize(int arrayLen) {
+        return byteArraySize(arrayLen * OBJECT_ARRAY_ITEM_OVERHEAD);
+    }
+
+    void loadStats(StatsConfig config, EnvironmentStats stats) {
+        stats.setSharedCacheTotalBytes
+            (totals.isSharedCache() ? totals.getCacheUsage() : 0);
+        stats.setCacheTotalBytes(getLocalCacheUsage());
+        stats.setDataBytes(treeMemoryUsage.get() +
+                           treeAdminMemoryUsage.get());
+        stats.setAdminBytes(adminMemoryUsage.get());
+        stats.setLockBytes(getLockMemoryUsage());
+    }
+
+    @Override
+    public String toString() {
+        StringBuilder sb = new StringBuilder();
+        sb.append("treeUsage = ").append(treeMemoryUsage.get());
+        sb.append("treeAdminUsage = ").append(treeAdminMemoryUsage.get());
+        sb.append("adminUsage = ").append(adminMemoryUsage.get());
+        sb.append("txnUsage = ").append(txnMemoryUsage.get());
+        sb.append("lockUsage = ").append(getLockMemoryUsage());
+        return sb.toString();
+    }
+
+    public Totals getTotals() {
+        return totals;
+    }
+
+    /**
+     * Common base class for shared and private totals.  This abstraction
+     * allows most other classes to be unaware of whether we're using a
+     * SharedEvictor or PrivateEvictor.
+     */
+    public abstract static class Totals {
+
+        long maxMemory;
+        private long criticalThreshold;
+
+        private Totals() {
+            maxMemory = 0;
+        }
+
+        private final void setMaxMemory(long maxMemory) {
+            this.maxMemory = maxMemory;
+        }
+
+        public final long getMaxMemory() {
+            return maxMemory;
+        }
+
+        private final void setCriticalThreshold(long criticalThreshold) {
+            this.criticalThreshold = criticalThreshold;
+        }
+
+        public final long getCriticalThreshold() {
+            return criticalThreshold;
+        }
+
+        public abstract long getCacheUsage();
+        abstract boolean updateCacheUsage(long increment);
+        abstract boolean isSharedCache();
+    }
+
+    /**
+     * Totals for a single environment's non-shared cache.  Used when
+     * EnvironmentConfig.setSharedCache(false) and a PrivateEvictor are used.
+     */
+    private static class PrivateTotals extends Totals {
+
+        private MemoryBudget parent;
+
+        private PrivateTotals(MemoryBudget parent) {
+            this.parent = parent;
+        }
+
+        public final long getCacheUsage() {
+            return parent.getLocalCacheUsage();
+        }
+
+        final boolean updateCacheUsage(long increment) {
+            return (parent.getLocalCacheUsage() > maxMemory);
+        }
+
+        final boolean isSharedCache() {
+            return false;
+        }
+    }
+
+    /**
+     * Totals for the multi-environment shared cache.  Used when
+     * EnvironmentConfig.setSharedCache(false) and the SharedEvictor are used.
+     */
+    private static class SharedTotals extends Totals {
+
+        private AtomicLong usage;
+
+        private SharedTotals() {
+            usage = new AtomicLong();
+        }
+
+        public final long getCacheUsage() {
+            return usage.get();
+        }
+
+        final boolean updateCacheUsage(long increment) {
+            return (usage.addAndGet(increment) > maxMemory);
+        }
+
+        final boolean isSharedCache() {
+            return true;
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/dbi/NodeSequence.java b/src/com/sleepycat/je/dbi/NodeSequence.java
new file mode 100644
index 0000000000000000000000000000000000000000..0eade724caf41acb9f180eb00f6ae37052377ed3
--- /dev/null
+++ b/src/com/sleepycat/je/dbi/NodeSequence.java
@@ -0,0 +1,134 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: NodeSequence.java,v 1.1.2.2 2010/01/04 15:30:28 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * NodeSequence encapsulates the generation and maintenance of a sequence for
+ * generating node ids.
+ */
+public class NodeSequence {
+
+    /*
+     * Node Ids: We need to ensure that local and replicated nodes use
+     * different number spaces for their ids, so there can't be any possible
+     * conflicts.  Local, non replicated nodes use positive values, replicated
+     * nodes use negative values. On top of that, there is the notion of
+     * transient node ids, which are used for cases like the eof node used for
+     * Serializable isolation and the lock used for api lockout. Transient node
+     * ids are used to provide unique locks, and are only used during the life
+     * of an environment, for non-persistent objects. We use the descending
+     * sequence of positive values, starting from Long.MAX_VALUE.
+     *
+     * The transient node sequence must be initialized before the DbTree
+     * uber-tree is created, because they are used at DatabaseImpl
+     * construction.  The local and replicated node id sequences are
+     * initialized by the first pass of recovery, after the log has been
+     * scanned for the latest used node id.
+     */
+    private AtomicLong lastAllocatedLocalNodeId = null;
+    private AtomicLong lastAllocatedReplicatedNodeId = null;
+    private AtomicLong lastAllocatedTransientNodeId = null;
+
+    /**
+     * Initialize the counters in these methods rather than a constructor
+     * so we can control the initialization more precisely.
+     */
+    void initTransientNodeId() {
+        lastAllocatedTransientNodeId = new AtomicLong(Long.MAX_VALUE);
+    }
+
+    /**
+     * Initialize the counters in these methods rather than a constructor
+     * so we can control the initialization more precisely.
+     */
+    void initRealNodeId() {
+        lastAllocatedLocalNodeId = new AtomicLong(0);
+        lastAllocatedReplicatedNodeId = new AtomicLong(0);
+    }
+
+    /**
+     * The last allocated local and replicated node ids are used for ckpts.
+     */
+    public long getLastLocalNodeId() {
+        return lastAllocatedLocalNodeId.get();
+    }
+
+    public long getLastReplicatedNodeId() {
+        return lastAllocatedReplicatedNodeId.get();
+    }
+
+    /**
+     * We get a new node id of the appropriate kind when creating a new node.
+     */
+    public long getNextLocalNodeId() {
+        return lastAllocatedLocalNodeId.incrementAndGet();
+    }
+
+    public long getNextReplicatedNodeId() {
+        return lastAllocatedReplicatedNodeId.decrementAndGet();
+    }
+
+    public long getNextTransientNodeId() {
+        /* Assert that the two sequences haven't overlapped. */
+        assert (noOverlap()) : "transient=" +
+            lastAllocatedTransientNodeId.get();
+        return lastAllocatedTransientNodeId.decrementAndGet();
+    }
+
+    private boolean noOverlap() {
+        if (lastAllocatedLocalNodeId != null) {
+            return (lastAllocatedTransientNodeId.get() - 1) >
+                lastAllocatedLocalNodeId.get();
+        } else {
+            return true;
+        }
+    }
+
+    /**
+     * Initialize the node ids, from recovery. No need to initialize
+     * the transient node ids, since those can be reused each time the
+     * environment is recreated.
+     */
+    public void setLastNodeId(long lastReplicatedNodeId,
+                              long lastLocalNodeId) {
+        lastAllocatedReplicatedNodeId.set(lastReplicatedNodeId);
+        lastAllocatedLocalNodeId.set(lastLocalNodeId);
+    }
+
+    /*
+     * Only set the replicated node id if the replayNodeId represents a
+     * newer, later value in the replication stream. If the replayNodeId is
+     * earlier than this node's lastAllocatedReplicateNodeId, don't bother
+     * updating the sequence;
+     */
+    public void updateFromReplay(long replayNodeId) {
+
+        assert replayNodeId < 0 :
+            "replay node id is unexpectedly positive " + replayNodeId;
+
+        while (true) {
+            long currentVal = lastAllocatedReplicatedNodeId.get();
+            if (replayNodeId < currentVal) {
+                /*
+                 * This replayNodeId is newer than any other replicatedNodeId
+                 * known by this node.
+                 */
+                boolean ok = lastAllocatedReplicatedNodeId.weakCompareAndSet
+                    (currentVal, replayNodeId);
+                if (ok) {
+                    break;
+                }
+            } else {
+                break;
+            }
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/dbi/Operation.java b/src/com/sleepycat/je/dbi/Operation.java
new file mode 100644
index 0000000000000000000000000000000000000000..5e03bde01f571a05267681c59005631fbe9703bc
--- /dev/null
+++ b/src/com/sleepycat/je/dbi/Operation.java
@@ -0,0 +1,71 @@
+/*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Operation.java,v 1.10.2.2 2010/01/04 15:30:28 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import java.nio.ByteBuffer;
+
+/**
+ * An enumeration of different api call sources for replication, currently for
+ * debugging. This is also intended to support the future possibility of
+ * providing application level visibility into the replication operation
+ * stream.
+ */
+public class Operation {
+
+    public static final Operation PUT =
+	new Operation((byte) 1, "PUT");
+    public static final Operation NO_OVERWRITE =
+	new Operation((byte) 2, "NO_OVERWRITE");
+    public static final Operation PLACEHOLDER =
+	new Operation((byte) 3, "PLACEHOLDER");
+
+    private static final Operation[] ALL_OPS =
+    {PUT, NO_OVERWRITE, PLACEHOLDER };
+
+    private static final byte MAX_OP = 3;
+    private static final byte MIN_OP = 1;
+
+    private byte op;
+    private String name;
+
+    public Operation() {
+    }
+
+    private Operation(byte op, String name) {
+        this.op = op;
+	this.name = name;
+    }
+
+    public int getContentSize() {
+        return 1;
+    }
+
+    /**
+     * Serialize this object into the buffer.
+     * @param buffer is the destination buffer
+     */
+    public void writeToBuffer(ByteBuffer buffer) {
+        buffer.put(op);
+    }
+
+    public static Operation readFromBuffer(ByteBuffer buffer) {
+        byte opNum = buffer.get();
+	if (opNum >= MIN_OP &&
+	    opNum <= MAX_OP) {
+	    return ALL_OPS[opNum - 1];
+	} else {
+	    return new Operation(opNum, "UNKNOWN " + opNum);
+	}
+    }
+
+    @Override
+    public String toString() {
+	return name;
+    }
+}
diff --git a/src/com/sleepycat/je/dbi/PutMode.java b/src/com/sleepycat/je/dbi/PutMode.java
new file mode 100644
index 0000000000000000000000000000000000000000..eeb2d4295df0967504aa30dbddbb58c2477c027b
--- /dev/null
+++ b/src/com/sleepycat/je/dbi/PutMode.java
@@ -0,0 +1,31 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PutMode.java,v 1.9.2.2 2010/01/04 15:30:28 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+/**
+ * Internal class used to distinguish which variety of putXXX() that
+ * Cursor.putInternal() should use.
+ */
+public class PutMode {
+    private String name;
+
+    private PutMode(String name) {
+	this.name = name;
+    }
+
+    public static final PutMode NODUP =       new PutMode("NODUP");
+    public static final PutMode CURRENT =     new PutMode("CURRENT");
+    public static final PutMode OVERWRITE =   new PutMode("OVERWRITE");
+    public static final PutMode NOOVERWRITE = new PutMode("NOOVERWRITE");
+
+    @Override
+    public String toString() {
+	return name;
+    }
+}
diff --git a/src/com/sleepycat/je/dbi/RangeRestartException.java b/src/com/sleepycat/je/dbi/RangeRestartException.java
new file mode 100644
index 0000000000000000000000000000000000000000..e198fb53c6686e37cdaa86801e6af51b14f0d86f
--- /dev/null
+++ b/src/com/sleepycat/je/dbi/RangeRestartException.java
@@ -0,0 +1,25 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RangeRestartException.java,v 1.8.2.2 2010/01/04 15:30:28 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import com.sleepycat.je.DatabaseException;
+
+/**
+ * Thrown by the LockManager when requesting a RANGE_READ or RANGE_WRITE
+ * lock, and a RANGE_INSERT lock is held or is waiting.  This exception is
+ * caught by read operations and causes a restart of the operation.  It should
+ * never be seen by the user.
+ */
+@SuppressWarnings("serial")
+public class RangeRestartException extends DatabaseException {
+
+    public RangeRestartException() {
+        super();
+    }
+}
diff --git a/src/com/sleepycat/je/dbi/ReplicatedDatabaseConfig.java b/src/com/sleepycat/je/dbi/ReplicatedDatabaseConfig.java
new file mode 100644
index 0000000000000000000000000000000000000000..14abe4b281c2c6afec05a3f1a5169c160c7955d9
--- /dev/null
+++ b/src/com/sleepycat/je/dbi/ReplicatedDatabaseConfig.java
@@ -0,0 +1,165 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ReplicatedDatabaseConfig.java,v 1.8.2.2 2010/01/04 15:30:28 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.Comparator;
+
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.log.LogException;
+import com.sleepycat.je.log.LogUtils;
+import com.sleepycat.je.log.Loggable;
+
+/**
+ * This class contains all fields of the database configuration which are
+ * persistent. This class is logged as part of a nameLN so that databases can
+ * be created on replica nodes with the correct configuration.
+ */
+public class ReplicatedDatabaseConfig implements Loggable {
+
+    private byte flags;
+    private int maxMainTreeEntriesPerNode;
+    private int maxDupTreeEntriesPerNode;
+    private byte[] btreeComparatorBytes = LogUtils.ZERO_LENGTH_BYTE_ARRAY;
+    private byte[] duplicateComparatorBytes = LogUtils.ZERO_LENGTH_BYTE_ARRAY;
+
+    /** For reading */
+    public ReplicatedDatabaseConfig() {
+    }
+
+    /** For writing */
+    ReplicatedDatabaseConfig(byte flags,
+                            int maxMainTreeEntriesPerNode,
+                            int maxDupTreeEntriesPerNode,
+                            byte[] btreeComparatorBytes,
+                            byte[] duplicateComparatorBytes) {
+
+        this.flags = flags;
+        this.maxMainTreeEntriesPerNode = maxMainTreeEntriesPerNode;
+        this.maxDupTreeEntriesPerNode = maxDupTreeEntriesPerNode;
+
+        if (btreeComparatorBytes != null) {
+            this.btreeComparatorBytes = btreeComparatorBytes;
+        }
+
+        if (duplicateComparatorBytes != null) {
+            this.duplicateComparatorBytes = duplicateComparatorBytes;
+        }
+    }
+
+    /**
+     * Create a database config for use on the replica which contains
+     * all the configuration options that were conveyed by way of this class.
+     */
+    public DatabaseConfig getReplicaConfig()
+        throws ClassNotFoundException, LogException {
+
+        DatabaseConfig replicaConfig = new DatabaseConfig();
+        replicaConfig.setTransactional(true);
+        replicaConfig.setSortedDuplicates
+            (DatabaseImpl.getSortedDuplicates(flags));
+        replicaConfig.setTemporary(DatabaseImpl.isTemporary(flags));
+        DbInternal.setDbConfigReplicated(replicaConfig, true);
+        replicaConfig.setNodeMaxEntries(maxMainTreeEntriesPerNode);
+        replicaConfig.setNodeMaxDupTreeEntries(maxDupTreeEntriesPerNode);
+        Comparator<byte[]> c = DatabaseImpl.bytesToComparator(btreeComparatorBytes,
+                                                      "btree");
+        replicaConfig.setBtreeComparator(c);
+        c = DatabaseImpl.bytesToComparator(duplicateComparatorBytes,
+                                           "duplicate");
+        replicaConfig.setDuplicateComparator(c);
+        return replicaConfig;
+    }
+
+    /** @see Loggable#getLogSize */
+    public int getLogSize() {
+        return 1 + // flags, 1 byte
+            LogUtils.getPackedIntLogSize(maxMainTreeEntriesPerNode) +
+            LogUtils.getPackedIntLogSize(maxDupTreeEntriesPerNode) +
+            LogUtils.getByteArrayLogSize(btreeComparatorBytes) +
+            LogUtils.getByteArrayLogSize(duplicateComparatorBytes);
+    }
+
+    /** @see Loggable#writeToLog */
+    public void writeToLog(ByteBuffer logBuffer) {
+        logBuffer.put(flags);
+	LogUtils.writePackedInt(logBuffer, maxMainTreeEntriesPerNode);
+	LogUtils.writePackedInt(logBuffer, maxDupTreeEntriesPerNode);
+        LogUtils.writeByteArray(logBuffer, btreeComparatorBytes);
+        LogUtils.writeByteArray(logBuffer, duplicateComparatorBytes);
+    }
+
+    /** @see Loggable#readFromLog */
+    public void readFromLog(ByteBuffer itemBuffer, byte entryVersion)
+	throws LogException {
+
+        /*
+         * ReplicatedDatabaseConfigs didn't exist before version 6 so they are
+         * always packed.
+         */
+        flags = itemBuffer.get();
+        maxMainTreeEntriesPerNode =
+            LogUtils.readInt(itemBuffer, false/*unpacked*/);
+        maxDupTreeEntriesPerNode =
+            LogUtils.readInt(itemBuffer, false/*unpacked*/);
+        btreeComparatorBytes =
+            LogUtils.readByteArray(itemBuffer, false/*unpacked*/);
+        duplicateComparatorBytes =
+            LogUtils.readByteArray(itemBuffer, false/*unpacked*/);
+    }
+
+    /** @see Loggable#dumpLog */
+    public void dumpLog(StringBuffer sb, boolean verbose) {
+        sb.append("<config ");
+        DatabaseImpl.dumpFlags(sb, verbose, flags);
+        sb.append(" btcmpSet=\"").append(btreeComparatorBytes !=
+                                         LogUtils.ZERO_LENGTH_BYTE_ARRAY);
+        sb.append("\" dupcmpSet=\"").append(duplicateComparatorBytes !=
+                                            LogUtils.ZERO_LENGTH_BYTE_ARRAY
+                                            ).append("\"");
+        sb.append(" />");
+    }
+
+    /** @see Loggable.getTransactionId() */
+    public long getTransactionId() {
+        return 0;
+    }
+
+    /** @see Loggable.logicalEquals() */
+    public boolean logicalEquals(Loggable other) {
+        if (!(other instanceof ReplicatedDatabaseConfig))
+            return false;
+
+        ReplicatedDatabaseConfig otherConfig =
+            (ReplicatedDatabaseConfig) other;
+
+        if (flags != otherConfig.flags)
+            return false;
+
+        if (maxMainTreeEntriesPerNode !=
+            otherConfig.maxMainTreeEntriesPerNode)
+            return false;
+
+        if (maxDupTreeEntriesPerNode !=
+            otherConfig.maxDupTreeEntriesPerNode)
+            return false;
+
+        if (!Arrays.equals(btreeComparatorBytes,
+                           otherConfig.btreeComparatorBytes))
+            return false;
+
+        if (!Arrays.equals(duplicateComparatorBytes,
+                           otherConfig.duplicateComparatorBytes))
+            return false;
+
+        return true;
+    }
+}
diff --git a/src/com/sleepycat/je/dbi/ReplicatorInstance.java b/src/com/sleepycat/je/dbi/ReplicatorInstance.java
new file mode 100644
index 0000000000000000000000000000000000000000..90012ee9e9647ea315bfe2421c73a7296852bd79
--- /dev/null
+++ b/src/com/sleepycat/je/dbi/ReplicatorInstance.java
@@ -0,0 +1,113 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ReplicatorInstance.java,v 1.21.2.2 2010/01/04 15:30:28 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.TransactionConfig;
+import com.sleepycat.je.log.LogEntryHeader;
+import com.sleepycat.je.log.ReplicationContext;
+import com.sleepycat.je.txn.Txn;
+import com.sleepycat.je.utilint.VLSN;
+
+/**
+ * The ReplicatorInstance is the sole conduit of replication functionality
+ * available to the core JE code. All references to any classes from
+ * com.sleepycat.je.rep* should be funnelled through this interface.
+ *
+ * Keeping a strict boundary serves to maintain the reliability of the
+ * standalone node. All ReplicatorInstance methods are prohibited from
+ * blocking, and should be examine carefully to determine whether they can
+ * throw exceptions or have any side effects which would diminish the
+ * reliability of the non-replication code paths.
+ *
+ * The ReplicatorInstance also allows us to package JE without the additional
+ * replication classes.
+ */
+public interface ReplicatorInstance {
+
+    /**
+     * Record the vlsn->lsn mapping for this just-logged log entry. This method
+     * is synchronized on the VLSNMap, and must be called outside the log write
+     * latch.
+     * @param lsn lsn of the target log entry
+     * @param header of the target log entry, which contains the VLSN and
+     * log entry type.
+     */
+    public void registerVLSN(long lsn, LogEntryHeader header);
+
+    /**
+     * Increment and get the next VLSN.
+     */
+    public VLSN bumpVLSN();
+
+    /**
+     * Decrement the vlsn if there was a problem logging the entry
+     */
+    public void decrementVLSN();
+
+    /**
+     * @return true if this node is the replication master.
+     */
+    public boolean isMaster();
+
+    /**
+     * Do any work that must be included as part of the checkpoint process.
+     * @throws DatabaseException if any activity fails
+     */
+    public void preCheckpointEndFlush() throws DatabaseException;
+
+
+    /**
+     * Create an appropriate type of Replicated transaction. Specifically,
+     * it creates a MasterTxn, if the node is currently a Master, a ReadonlyTxn
+     * otherwise, that is, if the node is a Replica, or it's currently in a
+     * DETACHED state.
+     *
+     * Note that a ReplicaTxn, used for transaction replay on a Replica is not
+     * created on this path. It's created explicitly in the Replay loop by a
+     * Replica.
+     *
+     * @param envImpl the environment associated with the transaction
+     * @param config  the transaction configuration
+     *
+     * @return an instance of MasterTxn or ReadonlyTxn
+     * @throws DatabaseException
+     */
+    public Txn createRepTxn(EnvironmentImpl envImpl,
+                            TransactionConfig config)
+        throws DatabaseException;
+
+    /**
+     * A form used primarily for auto commit transactions.
+     *
+     * @see com.sleepycat.je.txn.Txn
+     * (com.sleepycat.je.dbi.EnvironmentImpl,
+     *  com.sleepycat.je.TransactionConfig,
+     *  boolean,
+     *  long)
+     *
+     */
+    public Txn createRepTxn(EnvironmentImpl envImpl,
+                            TransactionConfig config,
+                            boolean noAPIReadLock,
+                            long mandatedId)
+        throws DatabaseException;
+
+    /**
+     * A variation of the above used for testing; it arranges for a
+     * ReplicationContext to be passed in for testing purposes.
+     */
+    public Txn createRepTxn(EnvironmentImpl envImpl,
+                         TransactionConfig config,
+                         ReplicationContext repContext)
+        throws DatabaseException;
+
+
+
+}
diff --git a/src/com/sleepycat/je/dbi/SortedLSNTreeWalker.java b/src/com/sleepycat/je/dbi/SortedLSNTreeWalker.java
new file mode 100644
index 0000000000000000000000000000000000000000..16402b17d7e109c6f45e1448234929c05340dccc
--- /dev/null
+++ b/src/com/sleepycat/je/dbi/SortedLSNTreeWalker.java
@@ -0,0 +1,479 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SortedLSNTreeWalker.java,v 1.37.2.4 2010/03/23 15:02:07 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import java.util.Arrays;
+import java.util.List;
+
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.cleaner.OffsetList;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.entry.LNLogEntry;
+import com.sleepycat.je.log.entry.LogEntry;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.tree.ChildReference;
+import com.sleepycat.je.tree.DBIN;
+import com.sleepycat.je.tree.DIN;
+import com.sleepycat.je.tree.DupCountLN;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.tree.LN;
+import com.sleepycat.je.tree.Node;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * SortedLSNTreeWalker uses ordered disk access rather than random access to
+ * iterate over a database tree. Faulting in data records by on-disk order can
+ * provide much improved performance over faulting in by key order, since the
+ * latter may require random access.  SortedLSN walking does not obey cursor
+ * and locking constraints, and therefore can only be guaranteed consistent for
+ * a quiescent tree which is not being modified by user or daemon threads.
+ *
+ * The class walks over the tree using sorted LSN fetching for parts of the
+ * tree that are not in memory. Returns LSNs for each node in the tree
+ * <b>except</b> the root IN, but in an arbitrary order (i.e. not key
+ * order). The caller is responsible for getting the root IN's LSN explicitly.
+ * <p>
+ * A calllback function specified in the constructor is executed for each LSN
+ * found.
+ * <p>
+ * The walker works in two phases.  The first phase is to gather and return all
+ * the INs from the INList that match the database being iterated over.  For
+ * each IN, all of the LSNs of the children are passed to the callback method
+ * (processLSN).  If the child was not in memory, it is added to a list of LSNs
+ * to read.  When all of the in-memory INs have been processed, the list of
+ * LSNs that were harvested is sorted.
+ * <p>
+ * Then for each of the sorted LSNs, the target is fetched, the type
+ * determined, and the LSN and type passed to the callback method for
+ * processing.  LSNs of the children of those nodes are retrieved and the
+ * process repeated until there are no more nodes to be fetched for this
+ * database's tree.
+ */
+public class SortedLSNTreeWalker {
+
+    /*
+     * The interface for calling back to the user with each LSN.
+     */
+    public interface TreeNodeProcessor {
+	void processLSN(long childLSN,
+                        LogEntryType childType,
+                        Node theNode,
+                        byte[] lnKey)
+	    throws DatabaseException;
+
+        /* Used for processing dirty (unlogged) deferred write LNs. [#15365] */
+	void processDirtyDeletedLN(long childLSN, LN ln, byte[] lnKey)
+	    throws DatabaseException;
+
+	/* Used when processing DW dbs where there are no LSNs. */
+	void processDupCount(int count);
+    }
+
+    /*
+     * Optionally passed to the SortedLSNTreeWalker to be called when an
+     * exception occurs.
+     */
+    public interface ExceptionPredicate {
+	/* Return true if the exception can be ignored. */
+	boolean ignoreException(Exception e);
+    }
+
+    protected DatabaseImpl dbImpl;
+    private EnvironmentImpl envImpl;
+
+    /*
+     * Save the root LSN at construction time, because the root may be
+     * nulled out before walk() executes.
+     */
+    private long rootLsn;
+
+    /* Indicates whether db has allowDuplicates set. */
+    private boolean dups;
+
+    /*
+     * Whether to call DatabaseImpl.finishedINListHarvest().
+     */
+    private boolean setDbState;
+
+    /*
+     * An array (and index) of LSNs that were accumulated in a previous pass
+     * over the tree.
+     */
+    private long[] currentLSNs;
+    private int currentLSNIdx = 0;
+
+    /*
+     * A list of LSNs being accumulated.  Once they have been accumulated, they
+     * will be moved to currentLSNs, fetched, and returned to the user.
+     *
+     * Store this in two OffsetLists, one for the file number portion of the
+     * LSN and the other for the file offset portion since OffsetLists can only
+     * store ints, not longs.
+     */
+    private OffsetList accumulatedLSNFileNumbers;
+    private OffsetList accumulatedLSNFileOffsets;
+
+    private TreeNodeProcessor callback;
+
+    /*
+     * If true, then walker should also accumulate LNs and pass them in sorted
+     * order to the TreeNodeProcessor callback method.
+     */
+    protected boolean accumulateLNs = false;
+
+    /*
+     * If true, then walker should process Dup Trees all the way to the bottom.
+     * If false, then walker only processes the root DIN and DupCountLN.
+     */
+    private boolean processDupTree = true;
+
+    /*
+     * If true, then we still pass nodes that have null LSNs (i.e. during
+     * DeferredWrite DB processing in Database.count().
+     */
+    private boolean passNullLSNNodes = false;
+
+    /*
+     * If non-null, save any exceptions encountered while traversing nodes into
+     * this savedException list, in order to walk as much of the tree as
+     * possible. The caller of the tree walker will handle the exceptions.
+     */
+    private List<DatabaseException> savedExceptions;
+
+    private ExceptionPredicate excPredicate;
+
+    /* Holder for returning LN key from fetchLSN. */
+    private DatabaseEntry lnKeyEntry = new DatabaseEntry();
+
+    /*
+     * @param rootLsn is passed in addition to the dbImpl, because the
+     * root may be nulled out on the dbImpl before walk() is called.
+     */
+    public SortedLSNTreeWalker(DatabaseImpl dbImpl,
+			       boolean setDbState,
+                               long rootLsn,
+			       TreeNodeProcessor callback,
+                               List<DatabaseException> savedExceptions,
+			       ExceptionPredicate excPredicate)
+	throws DatabaseException {
+
+	/* This iterator is used on both deleted and undeleted databases. */
+	this.dbImpl = dbImpl;
+	this.envImpl = dbImpl.getDbEnvironment();
+	if (envImpl == null) {
+	    throw new DatabaseException
+		("environmentImpl is null for target db " +
+                 dbImpl.getDebugName());
+	}
+	this.dups = dbImpl.getSortedDuplicates();
+
+	this.setDbState = setDbState;
+        this.rootLsn = rootLsn;
+	this.callback = callback;
+        this.savedExceptions = savedExceptions;
+	this.excPredicate = excPredicate;
+	currentLSNs = new long[0];
+	currentLSNIdx = 0;
+    }
+
+    void setProcessDupTree(boolean processDupTree) {
+	this.processDupTree = processDupTree;
+    }
+
+    void setPassNullLSNNodes(boolean passNullLSNNodes) {
+	this.passNullLSNNodes = passNullLSNNodes;
+    }
+
+    void setAccumulateLNs(boolean accumulateLNs) {
+	this.accumulateLNs = accumulateLNs;
+    }
+
+    /**
+     * Find all non-resident nodes, and execute the callback.  The root IN's
+     * LSN is not returned to the callback.
+     */
+    public void walk()
+	throws DatabaseException {
+
+	walkInternal();
+    }
+
+    protected void walkInternal()
+	throws DatabaseException {
+
+	IN root = null;
+	if (rootLsn == DbLsn.NULL_LSN && !passNullLSNNodes) {
+	    return;
+	}
+
+	root = getResidentRootIN();
+	if (root == null && rootLsn != DbLsn.NULL_LSN) {
+	    root = getRootIN(rootLsn);
+	}
+	if (root != null) {
+            try {
+                accumulateLSNs(root);
+            } finally {
+                releaseRootIN(root);
+            }
+	}
+
+        if (setDbState) {
+            dbImpl.finishedINListHarvest();
+        }
+
+	while (true) {
+	    maybeGetMoreINs();
+	    if (currentLSNs != null &&
+		currentLSNIdx < currentLSNs.length) {
+                fetchAndProcessLSN(currentLSNs[currentLSNIdx++]);
+	    } else {
+		break;
+	    }
+	}
+    }
+
+    private void maybeGetMoreINs() {
+
+	if ((currentLSNs != null &&
+	     currentLSNIdx >= currentLSNs.length)) {
+
+	    if (accumulatedLSNFileNumbers == null ||
+		accumulatedLSNFileNumbers.size() == 0) {
+
+		/* Nothing left to process. Mark completion of second phase. */
+		currentLSNs = null;
+		currentLSNIdx = Integer.MAX_VALUE;
+		return;
+	    }
+
+	    long[] tempFileNumbers = accumulatedLSNFileNumbers.toArray();
+	    long[] tempFileOffsets = accumulatedLSNFileOffsets.toArray();
+	    int nLSNs = tempFileNumbers.length;
+	    currentLSNIdx = 0;
+	    currentLSNs = new long[nLSNs];
+	    for (int i = 0; i < nLSNs; i++) {
+		currentLSNs[i] =
+		    DbLsn.makeLsn(tempFileNumbers[i], tempFileOffsets[i]);
+	    }
+
+	    Arrays.sort(currentLSNs);
+	    accumulatedLSNFileNumbers = null;
+	    accumulatedLSNFileOffsets = null;
+	}
+    }
+
+    private void accumulateLSNs(IN in)
+	throws DatabaseException {
+
+	boolean accumulate = true;
+
+        /*
+         * If this is the bottom of the tree and we're not accumulating LNs,
+         * then there's no need to accumulate any more LSNs, but we still need
+         * to callback with each of them.
+         */
+	boolean childIsLN = (!dups && (in instanceof BIN)) ||
+	    (in instanceof DBIN);
+	if (childIsLN) {
+	    if (!accumulateLNs) {
+
+		/*
+		 * No need to accumulate the LSNs of a non-dup BIN or a DBIN.
+		 */
+		accumulate = false;
+	    }
+	}
+
+	boolean isDINRoot = (in instanceof DIN) && in.isRoot();
+
+	/*
+	 * Process all children, but only accumulate LSNs for children that are
+	 * not in memory.
+	 */
+	if (in != null &&
+	    (processDupTree || !in.containsDuplicates())) {
+	    for (int i = 0; i < in.getNEntries(); i++) {
+
+		long lsn = in.getLsn(i);
+		Node node = in.getTarget(i);
+
+		if (in.isEntryPendingDeleted(i) ||
+		    in.isEntryKnownDeleted(i)) {
+
+                    /* Dirty LNs (deferred write) get special treatment. */
+                    if (node instanceof LN) {
+                        LN ln = (LN) node;
+                        if (ln.isDirty()) {
+                            callback.processDirtyDeletedLN
+                                (lsn, ln, in.getKey(i));
+                        }
+                    }
+		    continue;
+		}
+
+		if (accumulate &&
+                    (node == null) &&
+                    lsn != DbLsn.NULL_LSN) {
+		    if (accumulatedLSNFileNumbers == null) {
+			accumulatedLSNFileNumbers = new OffsetList();
+			accumulatedLSNFileOffsets = new OffsetList();
+		    }
+
+		    accumulatedLSNFileNumbers.add(DbLsn.getFileNumber(lsn),
+						  false);
+		    accumulatedLSNFileOffsets.add(DbLsn.getFileOffset(lsn),
+						  false);
+
+		    /*
+		     * If we're maintaining a map from LSN to owning IN/index,
+		     * then update the map here.
+		     */
+		    addToLsnINMap(Long.valueOf(lsn), in, i);
+		    /* callback.processLSN is called when we fetch this LSN. */
+		} else if (lsn != DbLsn.NULL_LSN ||
+			   passNullLSNNodes) {
+
+		    /*
+		     * If the child is resident, use that log type, else we can
+		     * assume it's an LN.
+		     */
+                    byte[] lnKey = (node == null || node instanceof LN) ?
+                        in.getKey(i) : null;
+		    callback.processLSN(lsn,
+					(node == null) ? LogEntryType.LOG_LN :
+					node.getLogType(),
+					node,
+                                        lnKey);
+		    if (node instanceof IN) {
+                        IN nodeAsIN = (IN) node;
+                        try {
+                            nodeAsIN.latch(CacheMode.UNCHANGED);
+                            accumulateLSNs(nodeAsIN);
+                        } finally {
+                            nodeAsIN.releaseLatch();
+                        }
+		    }
+		}
+	    }
+	}
+
+        /* Handle the DupCountLN for a DIN root. */
+        if (isDINRoot) {
+	    DIN din = (DIN) in;
+	    ChildReference dupCountLNRef = din.getDupCountLNRef();
+	    long lsn = dupCountLNRef.getLsn();
+	    if (lsn == DbLsn.NULL_LSN) {
+		DupCountLN dcl = din.getDupCountLN();
+		callback.processDupCount(dcl.getDupCount());
+	    } else {
+                /* Negative index signifies a DupCountLN. */
+                addToLsnINMap(Long.valueOf(lsn), in, -1);
+		Node node = fetchLSN(lsn, lnKeyEntry);
+		callback.processLSN
+                    (lsn, LogEntryType.LOG_DUPCOUNTLN, node,
+                     dupCountLNRef.getKey());
+	    }
+        }
+    }
+
+    /*
+     * Fetch the node at 'lsn' and callback to let the invoker process it.  If
+     * it is an IN, accumulate LSNs for it.
+     */
+    private void fetchAndProcessLSN(long lsn)
+	throws DatabaseException {
+
+        try {
+            lnKeyEntry.setData(null);
+            Node node = fetchLSN(lsn, lnKeyEntry);
+            boolean isIN = (node instanceof IN);
+            IN in = null;
+            try {
+                if (isIN) {
+                    in = (IN) node;
+                    in.latch(CacheMode.UNCHANGED);
+                }
+                if (node != null) {
+                    callback.processLSN
+                        (lsn, node.getLogType(), node, lnKeyEntry.getData());
+
+                    if (isIN) {
+                        accumulateLSNs(in);
+                    }
+                }
+            } finally {
+                if (isIN) {
+                    in.releaseLatch();
+                }
+            }
+        } catch (DatabaseException e) {
+	    if (excPredicate == null ||
+		!excPredicate.ignoreException(e)) {
+		if (savedExceptions != null) {
+
+		    /*
+		     * This LSN fetch hit a failure. Do as much of the rest of
+		     * the tree as possible.
+		     */
+		    savedExceptions.add(e);
+		} else {
+		    throw e;
+		}
+	    }
+	}
+    }
+
+    /**
+     * The default behavior fetches the rootIN from the log, but classes
+     * extending this may fetch the root from the tree.
+     */
+    protected IN getRootIN(long rootLsn)
+	throws DatabaseException {
+
+	return (IN) envImpl.getLogManager().get(rootLsn);
+    }
+
+    protected IN getResidentRootIN()
+	throws DatabaseException {
+
+	return dbImpl.getTree().getResidentRootIN(false);
+    }
+
+    protected void releaseRootIN(IN ignore)
+	throws DatabaseException {
+
+	/*
+	 * There's no root IN latch in a vanilla Sorted LSN Tree Walk because
+	 * we just fetched the root from the log.
+	 */
+    }
+
+    /**
+     * @param index a negative index signifies a DupCountLN.
+     */
+    protected void addToLsnINMap(Long lsn, IN in, int index) {
+    }
+
+    protected Node fetchLSN(long lsn, DatabaseEntry lnKeyEntry)
+	throws DatabaseException {
+
+        LogEntry entry = envImpl.getLogManager().getLogEntry(lsn);
+        if (entry instanceof LNLogEntry) {
+            lnKeyEntry.setData(((LNLogEntry) entry).getKey());
+        }
+        return (Node) entry.getMainItem();
+    }
+
+    public List<DatabaseException> getSavedExceptions() {
+        return savedExceptions;
+    }
+}
diff --git a/src/com/sleepycat/je/dbi/TruncateResult.java b/src/com/sleepycat/je/dbi/TruncateResult.java
new file mode 100644
index 0000000000000000000000000000000000000000..189aa618e5a3d72ff994edd5c2216dde89f76a76
--- /dev/null
+++ b/src/com/sleepycat/je/dbi/TruncateResult.java
@@ -0,0 +1,31 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TruncateResult.java,v 1.7.2.2 2010/01/04 15:30:28 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+/**
+ * Holds the result of a database truncate operation.
+ */
+public class TruncateResult {
+
+    private DatabaseImpl db;
+    private int count;
+
+    TruncateResult(DatabaseImpl db, int count) {
+        this.db = db;
+        this.count = count;
+    }
+
+    public DatabaseImpl getDatabase() {
+        return db;
+    }
+
+    public int getRecordCount() {
+        return count;
+    }
+}
diff --git a/src/com/sleepycat/je/evictor/Evictor.java b/src/com/sleepycat/je/evictor/Evictor.java
new file mode 100644
index 0000000000000000000000000000000000000000..5678e678a3d0b1e2e22d1a7afca1171b187883e0
--- /dev/null
+++ b/src/com/sleepycat/je/evictor/Evictor.java
@@ -0,0 +1,1040 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Evictor.java,v 1.115.2.6 2010/01/30 01:10:55 mark Exp $
+ */
+
+package com.sleepycat.je.evictor;
+
+import java.text.NumberFormat;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.cleaner.LocalUtilizationTracker;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.DbConfigManager;
+import com.sleepycat.je.dbi.DbTree;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.INList;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.latch.LatchSupport;
+import com.sleepycat.je.recovery.Checkpointer;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.tree.ChildReference;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.tree.Node;
+import com.sleepycat.je.tree.SearchResult;
+import com.sleepycat.je.tree.Tree;
+import com.sleepycat.je.tree.WithRootLatched;
+import com.sleepycat.je.utilint.DaemonThread;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.TestHook;
+import com.sleepycat.je.utilint.Tracer;
+import com.sleepycat.je.utilint.TestHookExecute;
+
+/**
+ * The Evictor looks through the INList for IN's and BIN's that are worthy of
+ * eviction.  Once the nodes are selected, it removes all references to them so
+ * that they can be GC'd by the JVM.
+ */
+public abstract class Evictor extends DaemonThread {
+    public static final String SOURCE_DAEMON = "daemon";
+    public static final String SOURCE_MANUAL = "manual";
+    public static final String SOURCE_CRITICAL = "critical";
+    private static final boolean DEBUG = false;
+
+    private MemoryBudget.Totals memBudgetTotals;
+
+    /* Prevent endless eviction loops under extreme resource constraints. */
+    private static final int MAX_BATCHES_PER_RUN = 100;
+
+    private Level detailedTraceLevel;  // level value for detailed trace msgs
+    private volatile boolean active;   // true if eviction is happening.
+
+    /* The number of bytes we need to evict in order to get under budget. */
+    private long currentRequiredEvictBytes;
+
+    /* 1 node out of <nodesPerScan> are chosen for eviction. */
+    private int nodesPerScan;
+
+    /* je.evictor.evictBytes */
+    private long evictBytesSetting;
+
+    /* je.evictor.lruOnly */
+    private boolean evictByLruOnly;
+
+    /* je.evictor.forceYield */
+    private boolean forcedYield;
+
+    /* je.evictor.deadlockRetry */
+    private int deadlockRetries;
+
+    /* for trace messages. */
+    private NumberFormat formatter;
+
+    /*
+     * Stats
+     */
+
+    /* Number of passes made to the evictor. */
+    private long nEvictPasses = 0;
+
+    /* Number of nodes selected to evict. */
+    private long nNodesSelected = 0;
+    private long nNodesSelectedThisRun;
+
+    /* Number of nodes scanned in order to select the eviction set */
+    private long nNodesScanned = 0;
+    private long nNodesScannedThisRun;
+
+    /*
+     * Number of nodes evicted on this run. This could be understated, as a
+     * whole subtree may have gone out with a single node.
+     */
+    private long nNodesEvicted = 0;
+    private long nNodesEvictedThisRun;
+
+    /*
+     * Number of closed database root nodes evicted on this run.
+     */
+    private long nRootNodesEvicted = 0;
+    private long nRootNodesEvictedThisRun;
+
+    /* Number of BINs stripped. */
+    private long nBINsStripped = 0;
+    private long nBINsStrippedThisRun;
+
+    /* Debugging and unit test support. */
+    EvictProfile evictProfile;
+    private TestHook runnableHook;
+    private TestHook preEvictINHook;
+
+    Evictor(EnvironmentImpl envImpl, String name)
+        throws DatabaseException {
+
+        super(0, name, envImpl);
+
+        memBudgetTotals = envImpl.getMemoryBudget().getTotals();
+
+        DbConfigManager configManager = envImpl.getConfigManager();
+        nodesPerScan = configManager.getInt
+            (EnvironmentParams.EVICTOR_NODES_PER_SCAN);
+        evictBytesSetting = configManager.getLong
+            (EnvironmentParams.EVICTOR_EVICT_BYTES);
+        evictByLruOnly = configManager.getBoolean
+            (EnvironmentParams.EVICTOR_LRU_ONLY);
+        forcedYield = configManager.getBoolean
+            (EnvironmentParams.EVICTOR_FORCED_YIELD);
+        deadlockRetries = configManager.getInt
+            (EnvironmentParams.EVICTOR_RETRY);
+        detailedTraceLevel = Tracer.parseLevel
+            (envImpl, EnvironmentParams.JE_LOGGING_LEVEL_EVICTOR);
+
+        evictProfile = new EvictProfile();
+        formatter = NumberFormat.getNumberInstance();
+
+        active = false;
+    }
+
+    /**
+     * Load stats.
+     */
+    public void loadStats(StatsConfig config, EnvironmentStats stat)
+        throws DatabaseException {
+
+        stat.setNEvictPasses(nEvictPasses);
+        stat.setNNodesSelected(nNodesSelected);
+        stat.setNNodesScanned(nNodesScanned);
+        stat.setNNodesExplicitlyEvicted(nNodesEvicted);
+        stat.setNRootNodesEvicted(nRootNodesEvicted);
+        stat.setNBINsStripped(nBINsStripped);
+        stat.setRequiredEvictBytes(currentRequiredEvictBytes);
+
+        if (config.getClear()) {
+            nEvictPasses = 0;
+            nNodesSelected = 0;
+            nNodesScanned = 0;
+            nNodesEvicted = 0;
+            nRootNodesEvicted = 0;
+            nBINsStripped = 0;
+        }
+    }
+
+    /**
+     * Return the number of retries when a deadlock exception occurs.
+     */
+    @Override
+    protected long nDeadlockRetries()
+        throws DatabaseException {
+
+        return deadlockRetries;
+    }
+
+    /**
+     * Wakeup the evictor only if it's not already active.
+     */
+    public void alert() {
+        if (!active) {
+            wakeup();
+        }
+    }
+
+    /**
+     * Called whenever the daemon thread wakes up from a sleep.
+     */
+    public void onWakeup()
+        throws DatabaseException {
+
+        doEvict(SOURCE_DAEMON,
+                false, // criticalEviction
+                true); // backgroundIO
+    }
+
+    /**
+     * May be called by the evictor thread on wakeup or programatically.
+     */
+    public void doEvict(String source)
+        throws DatabaseException {
+
+        doEvict(source,
+                false, // criticalEviction
+                true); // backgroundIO
+    }
+
+    /**
+     * Allows performing eviction during shutdown, which is needed when
+     * during checkpointing and cleaner log file deletion.
+     */
+    private synchronized void doEvict(String source,
+                                      boolean criticalEviction,
+                                      boolean backgroundIO)
+        throws DatabaseException {
+
+        /*
+         * We use an active flag to prevent reentrant calls.  This is simpler
+         * than ensuring that no reentrant eviction can occur in any caller.
+         * We also use the active flag to determine when it is unnecessary to
+         * wake up the evictor thread.
+         */
+        if (active) {
+            return;
+        }
+        active = true;
+        try {
+
+            /*
+             * Repeat as necessary to keep up with allocations.  Stop if no
+             * progress is made, to prevent an infinite loop.
+             */
+            boolean progress = true;
+            int nBatches = 0;
+            while (progress &&
+                   (nBatches < MAX_BATCHES_PER_RUN) &&
+                   (criticalEviction || !isShutdownRequested()) &&
+                   isRunnable(source)) {
+                if (evictBatch
+                    (source, backgroundIO, currentRequiredEvictBytes) == 0) {
+                    progress = false;
+                }
+                nBatches += 1;
+            }
+        } finally {
+            active = false;
+        }
+    }
+
+    /**
+     * Do a check on whether synchronous eviction is needed.
+     */
+    public void doCriticalEviction(boolean backgroundIO)
+        throws DatabaseException {
+
+        long currentUsage  = memBudgetTotals.getCacheUsage();
+        long maxMem = memBudgetTotals.getMaxMemory();
+        long over = currentUsage - maxMem;
+
+        if (over > memBudgetTotals.getCriticalThreshold()) {
+            if (DEBUG) {
+                System.out.println("***critical detected:" + over);
+            }
+            doEvict(SOURCE_CRITICAL,
+                    true, // criticalEviction
+                    backgroundIO);
+        }
+
+        if (forcedYield) {
+            Thread.yield();
+        }
+    }
+
+    /**
+     * Each iteration will attempt to evict requiredEvictBytes, but will give
+     * up after a complete pass over the INList.
+     *
+     * @return the number of bytes evicted, or zero if no progress was made.
+     */
+    long evictBatch(String source,
+                    boolean backgroundIO,
+                    long requiredEvictBytes)
+        throws DatabaseException {
+
+        nNodesSelectedThisRun = 0;
+        nNodesEvictedThisRun = 0;
+        nRootNodesEvictedThisRun = 0;
+        nNodesScannedThisRun = 0;
+        nBINsStrippedThisRun = 0;
+        nEvictPasses++;
+
+        assert evictProfile.clear(); // intentional side effect
+        int nBatchSets = 0;
+        boolean finished = false;
+
+        /* Perform class-specific per-batch processing. */
+        long evictBytes = startBatch();
+
+        /* Must call getMaxINsPerBatch after startBatch. */
+        int maxINsPerBatch = getMaxINsPerBatch();
+        if (maxINsPerBatch == 0) {
+            return evictBytes; // The INList(s) are empty.
+        }
+
+        try {
+
+            /*
+             * Keep evicting until we've freed enough memory or we've visited
+             * the maximum number of nodes allowed. Each iteration of the while
+             * loop is called an eviction batch.
+             *
+             * In order to prevent endless evicting, limit this run to one pass
+             * over the IN list(s).
+             */
+            while ((evictBytes < requiredEvictBytes) &&
+                   (nNodesScannedThisRun <= maxINsPerBatch)) {
+
+                IN target = selectIN(maxINsPerBatch);
+
+                if (target == null) {
+                    break;
+                } else {
+                    assert evictProfile.count(target);//intentional side effect
+
+                    /*
+                     * Check to make sure the DB was not deleted after
+                     * selecting it, and prevent the DB from being deleted
+                     * while we're working with it.
+                     */
+                    DatabaseImpl targetDb = target.getDatabase();
+                    DbTree dbTree = targetDb.getDbEnvironment().getDbTree();
+                    DatabaseImpl refreshedDb = null;
+                    try {
+                        refreshedDb = dbTree.getDb(targetDb.getId());
+                        if (refreshedDb != null && !refreshedDb.isDeleted()) {
+                            if (target.isDbRoot()) {
+                                evictBytes += evictRoot(target, backgroundIO);
+                            } else {
+                                evictBytes += evictIN(target, backgroundIO);
+                            }
+                        } else {
+
+                            /*
+                             * We don't expect to see an IN that is resident on
+                             * the INList with a database that has finished
+                             * delete processing, because it should have been
+                             * removed from the INList during post-delete
+                             * cleanup.  It may have been returned by the
+                             * INList iterator after being removed from the
+                             * INList (because we're using ConcurrentHashMap),
+                             * but then IN.getInListResident should return
+                             * false.
+                             */
+                            if (targetDb.isDeleteFinished() &&
+                                target.getInListResident()) {
+                                String inInfo =
+                                    " IN type=" + target.getLogType() +
+                                    " id=" + target.getNodeId() +
+                                    " not expected on INList";
+                                String errMsg = (refreshedDb == null) ?
+                                    inInfo :
+                                    ("Database " + refreshedDb.getDebugName() +
+                                     " id=" + refreshedDb.getId() +
+                                     " rootLsn=" +
+                                     DbLsn.getNoFormatString
+                                         (refreshedDb.getTree().getRootLsn()) +
+                                     ' ' + inInfo);
+                                throw new DatabaseException(errMsg);
+                            }
+                        }
+                    } finally {
+                        dbTree.releaseDb(refreshedDb);
+                    }
+                }
+                nBatchSets++;
+            }
+
+            finished = true;
+
+        } finally {
+            nNodesScanned += nNodesScannedThisRun;
+
+            Logger logger = getLogger();
+            if (logger != null && logger.isLoggable(detailedTraceLevel)) {
+                /* Ugh, only create trace message when logging. */
+                logger.log(detailedTraceLevel,
+                             "Evictor: pass=" + nEvictPasses +
+                             " finished=" + finished +
+                             " source=" + source +
+                             " requiredEvictBytes=" +
+                             formatter.format(requiredEvictBytes) +
+                             " evictBytes=" +
+                             formatter.format(evictBytes) +
+                             " inListSize=" + maxINsPerBatch +
+                             " nNodesScanned=" + nNodesScannedThisRun +
+                             " nNodesSelected=" + nNodesSelectedThisRun +
+                             " nNodesEvicted=" + nNodesEvictedThisRun +
+                             " nRootNodesEvicted=" + nRootNodesEvictedThisRun +
+                             " nBINsStripped=" + nBINsStrippedThisRun +
+                             " nBatchSets=" + nBatchSets);
+            }
+        }
+
+        assert LatchSupport.countLatchesHeld() == 0: "latches held = " +
+            LatchSupport.countLatchesHeld();
+
+        return evictBytes;
+    }
+
+    /**
+     * Return true if eviction should happen.  As a side effect, if true is
+     * returned the currentRequiredEvictBytes is set.
+     */
+    private boolean isRunnable(String source)
+        throws DatabaseException {
+
+        long currentUsage  = memBudgetTotals.getCacheUsage();
+        long maxMem = memBudgetTotals.getMaxMemory();
+        long overBudget = currentUsage - maxMem;
+        boolean doRun = (overBudget > 0);
+
+        /* If running, figure out how much to evict. */
+        if (doRun) {
+            currentRequiredEvictBytes = overBudget + evictBytesSetting;
+            /* Don't evict more than 50% of the cache. */
+            if (currentUsage - currentRequiredEvictBytes < maxMem / 2) {
+                currentRequiredEvictBytes = overBudget + (maxMem / 2);
+            }
+            if (DEBUG) {
+                if (source == SOURCE_CRITICAL) {
+                    System.out.println("executed: critical runnable");
+                }
+            }
+        }
+
+        /* unit testing, force eviction */
+        if (runnableHook != null) {
+            doRun = ((Boolean) runnableHook.getHookValue()).booleanValue();
+            currentRequiredEvictBytes = maxMem;
+        }
+
+        /*
+         * This trace message is expensive, only generate if tracing at this
+         * level is enabled.
+         */
+        Logger logger = getLogger();
+        if (logger != null && logger.isLoggable(detailedTraceLevel)) {
+
+            /*
+             * Generate debugging output. Note that Runtime.freeMemory
+             * fluctuates over time as the JVM grabs more memory, so you really
+             * have to do totalMemory - freeMemory to get stack usage.  (You
+             * can't get the concept of memory available from free memory.)
+             */
+            Runtime r = Runtime.getRuntime();
+            long totalBytes = r.totalMemory();
+            long freeBytes= r.freeMemory();
+            long usedBytes = r.totalMemory() - r.freeMemory();
+            StringBuffer sb = new StringBuffer();
+            sb.append(" source=").append(source);
+            sb.append(" doRun=").append(doRun);
+            sb.append(" JEusedBytes=").append(formatter.format(currentUsage));
+            sb.append(" requiredEvict=").
+                append(formatter.format(currentRequiredEvictBytes));
+            sb.append(" JVMtotalBytes= ").append(formatter.format(totalBytes));
+            sb.append(" JVMfreeBytes= ").append(formatter.format(freeBytes));
+            sb.append(" JVMusedBytes= ").append(formatter.format(usedBytes));
+            logger.log(detailedTraceLevel, sb.toString());
+        }
+
+        return doRun;
+    }
+
+    /**
+     * Select a single node to evict.
+     */
+    private IN selectIN(int maxNodesToIterate)
+        throws DatabaseException {
+
+        /* Find the best target in the next <nodesPerScan> nodes. */
+        IN target = null;
+        long targetGeneration = Long.MAX_VALUE;
+        int targetLevel = Integer.MAX_VALUE;
+        boolean targetDirty = true;
+
+        /* The nodesPerScan limit is on nodes that qualify for eviction. */
+        int nCandidates = 0;
+
+        /* The limit on iterated nodes is to prevent an infinite loop. */
+        int nIterated = 0;
+
+        while (nIterated <  maxNodesToIterate && nCandidates < nodesPerScan) {
+            IN in = getNextIN();
+            if (in == null) {
+                break; // INList is empty
+            }
+            nIterated++;
+            nNodesScannedThisRun++;
+
+            DatabaseImpl db = in.getDatabase();
+
+            /*
+             * Ignore the IN if its database is deleted.  We have not called
+             * getDb, so we can't guarantee that the DB is valid; get Db is
+             * called and this is checked again after an IN is selected for
+             * eviction.
+             */
+            if (db == null || db.isDeleted()) {
+                continue;
+            }
+
+            /*
+             * If this is a read-only environment, skip any dirty INs (recovery
+             * dirties INs even in a read-only environment).
+             */
+            if (db.getDbEnvironment().isReadOnly() &&
+                in.getDirty()) {
+                continue;
+            }
+
+            /*
+             * Only scan evictable or strippable INs.  This prevents higher
+             * level INs from being selected for eviction, unless they are
+             * part of an unused tree.
+             */
+            int evictType = in.getEvictionType();
+            if (evictType == IN.MAY_NOT_EVICT) {
+                continue;
+            }
+
+            /*
+             * This node is in the scanned node set.  Select according to
+             * the configured eviction policy.
+             */
+            if (evictByLruOnly) {
+
+                /*
+                 * Select the node with the lowest generation number,
+                 * irrespective of tree level or dirtyness.
+                 */
+                if (targetGeneration > in.getGeneration()) {
+                    targetGeneration = in.getGeneration();
+                    target = in;
+                }
+            } else {
+
+                /*
+                 * Select first by tree level, then by dirtyness, then by
+                 * generation/LRU.
+                 */
+                int level = normalizeLevel(in, evictType);
+                if (targetLevel != level) {
+                    if (targetLevel > level) {
+                        targetLevel = level;
+                        targetDirty = in.getDirty();
+                        targetGeneration = in.getGeneration();
+                        target = in;
+                    }
+                } else if (targetDirty != in.getDirty()) {
+                    if (targetDirty) {
+                        targetDirty = false;
+                        targetGeneration = in.getGeneration();
+                        target = in;
+                    }
+                } else {
+                    if (targetGeneration > in.getGeneration()) {
+                        targetGeneration = in.getGeneration();
+                        target = in;
+                    }
+                }
+            }
+            nCandidates++;
+        }
+
+        if (target != null) {
+            nNodesSelectedThisRun++;
+            nNodesSelected++;
+        }
+        return target;
+    }
+
+    /**
+     * Normalize the tree level of the given IN.
+     *
+     * Is public for unit testing.
+     *
+     * A BIN containing evictable LNs is given level 0, so it will be stripped
+     * first.  For non-duplicate and DBMAP trees, the high order bits are
+     * cleared to make their levels correspond; that way, all bottom level
+     * nodes (BINs and DBINs) are given the same eviction priority.
+     *
+     * Note that BINs in a duplicate tree are assigned the same level as BINs
+     * in a non-duplicate tree.  This isn't always optimimal, but is the best
+     * we can do considering that BINs in duplicate trees may contain a mix of
+     * LNs and DINs.
+     *
+     * BINs in the mapping tree are also assigned the same level as user DB
+     * BINs.  When doing by-level eviction (lruOnly=false), this seems
+     * counter-intuitive since we should evict user DB nodes before mapping DB
+     * nodes.  But that does occur because mapping DB INs referencing an open
+     * DB are unevictable.  The level is only used for selecting among
+     * evictable nodes.
+     *
+     * If we did NOT normalize the level for the mapping DB, then INs for
+     * closed evictable DBs would not be evicted until after all nodes in all
+     * user DBs were evicted.  If there were large numbers of closed DBs, this
+     * would have a negative performance impact.
+     */
+    public int normalizeLevel(IN in, int evictType) {
+
+        int level = in.getLevel() & IN.LEVEL_MASK;
+
+        if (level == 1 && evictType == IN.MAY_EVICT_LNS) {
+            level = 0;
+        }
+
+        return level;
+    }
+
+    /**
+     * Evict this DB root node.  [#13415]
+     * @return number of bytes evicted.
+     */
+    private long evictRoot(final IN target,
+                           final boolean backgroundIO)
+        throws DatabaseException {
+
+        final DatabaseImpl db = target.getDatabase();
+        final EnvironmentImpl envImpl = db.getDbEnvironment();
+        final INList inList = envImpl.getInMemoryINs();
+
+        class RootEvictor implements WithRootLatched {
+
+            boolean flushed = false;
+            long evictBytes = 0;
+
+            public IN doWork(ChildReference root)
+                throws DatabaseException {
+
+                IN rootIN = (IN) root.fetchTarget(db, null);
+                rootIN.latch(CacheMode.UNCHANGED);
+                try {
+                    /* Re-check that all conditions still hold. */
+                    boolean isDirty = rootIN.getDirty();
+                    if (rootIN == target &&
+                        rootIN.isDbRoot() &&
+                        rootIN.isEvictable() &&
+                        !(envImpl.isReadOnly() && isDirty)) {
+
+                        /* Flush if dirty. */
+                        if (isDirty) {
+                            long newLsn = rootIN.log
+                                (envImpl.getLogManager(),
+                                 false, // allowDeltas
+                                 isProvisionalRequired(rootIN),
+                                 true,  // proactiveMigration
+                                 backgroundIO,
+                                 null); // parent
+                            root.setLsn(newLsn);
+                            flushed = true;
+                        }
+
+                        /* Take off the INList and adjust memory budget. */
+                        inList.remove(rootIN);
+                        evictBytes = rootIN.getBudgetedMemorySize();
+
+                        /* Evict IN. */
+                        root.clearTarget();
+
+                        /* Stats */
+                        nRootNodesEvictedThisRun++;
+                        nRootNodesEvicted++;
+                    }
+                } finally {
+                    rootIN.releaseLatch();
+                }
+                return null;
+            }
+        }
+
+        /* Attempt to evict the DB root IN. */
+        RootEvictor evictor = new RootEvictor();
+        db.getTree().withRootLatchedExclusive(evictor);
+
+        /* If the root IN was flushed, write the dirtied MapLN. */
+        if (evictor.flushed) {
+            envImpl.getDbTree().modifyDbRoot(db);
+        }
+
+        return evictor.evictBytes;
+    }
+
+    /**
+     * Strip or evict this node.
+     * @return number of bytes evicted.
+     */
+    private long evictIN(IN target, boolean backgroundIO)
+        throws DatabaseException {
+
+        DatabaseImpl db = target.getDatabase();
+        EnvironmentImpl envImpl = db.getDbEnvironment();
+        long evictedBytes = 0;
+
+        /*
+         * Use a tracker to count lazily compressed, deferred write, LNs as
+         * obsolete.  A local tracker is used to accumulate tracked obsolete
+         * info so it can be added in a single call under the log write latch.
+         * [#15365]
+         */
+        LocalUtilizationTracker localTracker = null;
+
+        /*
+         * Non-BIN INs are evicted by detaching them from their parent.  For
+         * BINS, the first step is to remove deleted entries by compressing
+         * the BIN. The evictor indicates that we shouldn't fault in
+         * non-resident children during compression. After compression,
+         * LN logging and LN stripping may be performed.
+         *
+         * If LN stripping is used, first we strip the BIN by logging any dirty
+         * LN children and detaching all its resident LN targets.  If we make
+         * progress doing that, we stop and will not evict the BIN itself until
+         * possibly later.  If it has no resident LNs then we evict the BIN
+         * itself using the "regular" detach-from-parent routine.
+         *
+         * If the cleaner is doing clustering, we don't do BIN stripping if we
+         * can write out the BIN.  Specifically LN stripping is not performed
+         * if the BIN is dirty AND the BIN is evictable AND cleaner
+         * clustering is enabled.  In this case the BIN is going to be written
+         * out soon, and with clustering we want to be sure to write out the
+         * LNs with the BIN; therefore we don't do stripping.
+         */
+
+        /*
+         * Use latchNoWait because if it's latched we don't want the cleaner
+         * to hold up eviction while it migrates an entire BIN.  Latched INs
+         * have a high generation value, so not evicting makes sense.  Pass
+         * false because we don't want to change the generation during the
+         * eviction process.
+         */
+        if (target.latchNoWait(CacheMode.UNCHANGED)) {
+	    boolean targetIsLatched = true;
+            try {
+                if (target instanceof BIN) {
+                    /* First attempt to compress deleted, resident children. */
+                    localTracker = new LocalUtilizationTracker(envImpl);
+                    envImpl.lazyCompress(target, localTracker);
+
+                    /*
+                     * Strip any resident LN targets right now. This may dirty
+                     * the BIN if dirty LNs were written out. Note that
+                     * migrated BIN entries cannot be stripped.
+                     */
+                    evictedBytes = ((BIN) target).evictLNs();
+                    if (evictedBytes > 0) {
+                        nBINsStrippedThisRun++;
+                        nBINsStripped++;
+                    }
+                }
+
+                /*
+                 * If we were able to free any memory by LN stripping above,
+                 * then we postpone eviction of the BIN until a later pass.
+                 * The presence of migrated entries would have inhibited LN
+                 * stripping. In that case, the BIN can still be evicted,
+                 * but the marked entries will have to be migrated. That would
+                 * happen when the target is logged in evictIN.
+                 */
+                if (evictedBytes == 0 && target.isEvictable()) {
+                    /* Regular eviction. */
+                    Tree tree = db.getTree();
+
+                    /*
+                     * Unit testing.  The target is latched and we are about to
+                     * release that latch and search for the parent.  Make sure
+                     * that other operations, such as dirtying an LN in the
+                     * target BIN, can occur safely in this window.  [#18227]
+                     */
+                    assert TestHookExecute.doHookIfSet(preEvictINHook);
+
+                    /* getParentINForChildIN unlatches target. */
+		    targetIsLatched = false;
+                    SearchResult result =
+                        tree.getParentINForChildIN
+                        (target,
+                         true,   // requireExactMatch
+                         CacheMode.UNCHANGED);
+
+                    if (result.exactParentFound) {
+                        evictedBytes = evictIN(target, result.parent,
+                                               result.index, backgroundIO);
+                    }
+                }
+            } finally {
+		if (targetIsLatched) {
+		    target.releaseLatch();
+		}
+            }
+        }
+
+        /*
+         * Count obsolete nodes and write out modified file summaries for
+         * recovery.  All latches must have been released. [#15365]
+         */
+        if (localTracker != null) {
+            envImpl.getUtilizationProfile().flushLocalTracker(localTracker);
+        }
+
+        return evictedBytes;
+    }
+
+    /**
+     * Evict an IN. Dirty nodes are logged before they're evicted.
+     */
+    private long evictIN(IN child, IN parent, int index, boolean backgroundIO)
+        throws DatabaseException {
+
+        long evictBytes = 0;
+        try {
+            assert parent.isLatchOwnerForWrite();
+
+            long oldGenerationCount = child.getGeneration();
+
+            /*
+             * Get a new reference to the child, in case the reference
+             * saved in the selection list became out of date because of
+             * changes to that parent.
+             */
+            IN renewedChild = (IN) parent.getTarget(index);
+
+            /*
+             * See the evictIN() method in this class for an explanation for
+             * calling latchNoWait().
+             */
+            if ((renewedChild != null) &&
+                (renewedChild.getGeneration() <= oldGenerationCount) &&
+                renewedChild.latchNoWait(CacheMode.UNCHANGED)) {
+
+                try {
+                    if (renewedChild.isEvictable()) {
+
+                        DatabaseImpl db = renewedChild.getDatabase();
+                        EnvironmentImpl envImpl = db.getDbEnvironment();
+
+                        /*
+                         * Log the child if dirty and env is not r/o. Remove
+                         * from IN list.
+                         */
+                        long renewedChildLsn = DbLsn.NULL_LSN;
+                        boolean newChildLsn = false;
+                        if (renewedChild.getDirty()) {
+                            if (!envImpl.isReadOnly()) {
+				boolean logProvisional =
+                                    isProvisionalRequired(renewedChild);
+
+                                /*
+                                 * Log a full version (no deltas) and with
+                                 * cleaner migration allowed.
+                                 */
+                                renewedChildLsn = renewedChild.log
+                                    (envImpl.getLogManager(),
+                                     false, // allowDeltas
+                                     logProvisional,
+                                     true,  // proactiveMigration
+                                     backgroundIO,
+                                     parent);
+                                newChildLsn = true;
+                            }
+                        } else {
+                            renewedChildLsn = parent.getLsn(index);
+                        }
+
+                        if (renewedChildLsn != DbLsn.NULL_LSN) {
+                            /* Take this off the inlist. */
+                            envImpl.getInMemoryINs().remove(renewedChild);
+
+                            evictBytes = renewedChild.getBudgetedMemorySize();
+                            if (newChildLsn) {
+
+                                /*
+                                 * Update the parent so its reference is
+                                 * null and it has the proper LSN.
+                                 */
+                                parent.updateNode
+                                    (index, null /*node*/, renewedChildLsn,
+                                     null /*lnSlotKey*/);
+                            } else {
+
+                                /*
+                                 * Null out the reference, but don't dirty
+                                 * the node since only the reference
+                                 * changed.
+                                 */
+                                parent.updateNode
+                                    (index, (Node) null /*node*/,
+                                     null /*lnSlotKey*/);
+                            }
+
+                            /* Stats */
+                            nNodesEvictedThisRun++;
+                            nNodesEvicted++;
+                        }
+                    }
+                } finally {
+                    renewedChild.releaseLatch();
+                }
+            }
+        } finally {
+            parent.releaseLatch();
+        }
+
+        return evictBytes;
+    }
+
+    /*
+     * @return true if the node must be logged provisionally.
+     */
+    private boolean isProvisionalRequired(IN target) {
+
+        DatabaseImpl db = target.getDatabase();
+        EnvironmentImpl envImpl = db.getDbEnvironment();
+
+        /*
+         * The evictor has to log provisionally in two cases:
+         * a - the checkpointer is in progress, and is at a level above the
+         * target eviction victim. We don't want the evictor's actions to
+         * introduce an IN that has not cascaded up properly.
+         * b - the eviction target is part of a deferred write database.
+         */
+        if (db.isDeferredWriteMode()) {
+            return true;
+        }
+
+        /*
+         * The checkpointer could be null if it was shutdown or never
+         * started.
+         */
+        Checkpointer ckpter = envImpl.getCheckpointer();
+        if ((ckpter != null) &&
+            (target.getLevel() < ckpter.getHighestFlushLevel(db))) {
+            return true;
+        }
+
+        return false;
+    }
+
+    /* For unit testing only. */
+    public void setRunnableHook(TestHook hook) {
+        runnableHook = hook;
+    }
+
+    /* For unit testing only. */
+    public void setPreEvictINHook(TestHook hook) {
+        preEvictINHook = hook;
+    }
+
+    /**
+     * Standard daemon method to set envImpl to null.
+     */
+    public abstract void clearEnv();
+
+    /**
+     * Called whenever INs are added to, or removed from, the INList.
+     */
+    public abstract void noteINListChange(int nINs);
+
+    /**
+     * Only supported by SharedEvictor.
+     */
+    public abstract void addEnvironment(EnvironmentImpl envImpl);
+
+    /**
+     * Only supported by SharedEvictor.
+     */
+    public abstract void removeEnvironment(EnvironmentImpl envImpl);
+
+    /**
+     * Only supported by SharedEvictor.
+     */
+    public abstract boolean checkEnvs(Set<EnvironmentImpl> envs);
+
+    abstract Logger getLogger();
+
+    /**
+     * Perform class-specific batch processing: Initialize iterator, perform
+     * UtilizationTracker eviction, etc.  No latches may be held when this
+     * method is called.
+     *
+     * startBatch must be called before getMaxINsPerBatch.
+     */
+    abstract long startBatch()
+        throws DatabaseException;
+
+    /**
+     * Returns the approximate number of total INs in the INList(s).  One
+     * eviction batch will scan at most this number of INs.  If zero is
+     * returned, selectIN will not be called.
+     *
+     * startBatch must be called before getMaxINsPerBatch.
+     */
+    abstract int getMaxINsPerBatch();
+
+    /**
+     * Returns the next IN in the INList(s), wrapping if necessary.
+     */
+    abstract IN getNextIN();
+
+    /* For unit testing only.  Supported only by PrivateEvictor. */
+    abstract Iterator<IN> getScanIterator();
+
+    /* For unit testing only.  Supported only by PrivateEvictor. */
+    abstract void setScanIterator(Iterator<IN> iter);
+
+    /* For debugging and unit tests. */
+    static class EvictProfile {
+        /* Keep a list of candidate nodes. */
+        private List<Long> candidates = new ArrayList<Long>();
+
+        /* Remember that this node was targetted. */
+        public boolean count(IN target) {
+            candidates.add(Long.valueOf(target.getNodeId()));
+            return true;
+        }
+
+        public List<Long> getCandidates() {
+            return candidates;
+        }
+
+        public boolean clear() {
+            candidates.clear();
+            return true;
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/evictor/PrivateEvictor.java b/src/com/sleepycat/je/evictor/PrivateEvictor.java
new file mode 100644
index 0000000000000000000000000000000000000000..8be8b87a882510521fdb7c27907b011390883760
--- /dev/null
+++ b/src/com/sleepycat/je/evictor/PrivateEvictor.java
@@ -0,0 +1,142 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PrivateEvictor.java,v 1.4.2.2 2010/01/04 15:30:28 cwl Exp $
+ */
+
+package com.sleepycat.je.evictor;
+
+import java.util.Iterator;
+import java.util.Set;
+import java.util.logging.Logger;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.tree.IN;
+
+/**
+ * The standard Evictor that operates on the INList for a single environment.
+ * A single iterator over the INList is used to implement getNextIN.
+ */
+public class PrivateEvictor extends Evictor {
+
+    private EnvironmentImpl envImpl;
+
+    private Iterator<IN> scanIter;
+
+    public PrivateEvictor(EnvironmentImpl envImpl, String name)
+        throws DatabaseException {
+
+        super(envImpl, name);
+        this.envImpl = envImpl;
+        scanIter = null;
+    }
+
+    @Override
+    public void loadStats(StatsConfig config, EnvironmentStats stat)
+        throws DatabaseException {
+
+        stat.setNSharedCacheEnvironments(0);
+        super.loadStats(config, stat);
+    }
+
+    @Override
+    public void onWakeup()
+        throws DatabaseException {
+
+        if (!envImpl.isClosed()) {
+            super.onWakeup();
+        }
+    }
+
+    /**
+     * Standard daemon method to set envImpl to null.
+     */
+    public void clearEnv() {
+        envImpl = null;
+    }
+
+    /**
+     * Do nothing.
+     */
+    public void noteINListChange(int nINs) {
+    }
+
+    /**
+     * Only supported by SharedEvictor.
+     */
+    public void addEnvironment(EnvironmentImpl envImpl) {
+        throw new UnsupportedOperationException();
+    }
+
+    /**
+     * Only supported by SharedEvictor.
+     */
+    public void removeEnvironment(EnvironmentImpl envImpl) {
+        throw new UnsupportedOperationException();
+    }
+
+    /**
+     * Only supported by SharedEvictor.
+     */
+    public boolean checkEnvs(Set<EnvironmentImpl> envs) {
+        throw new UnsupportedOperationException();
+    }
+
+    /**
+     * Standard logging is supported by PrivateEvictor.
+     */
+    Logger getLogger() {
+        return envImpl.getLogger();
+    }
+
+    /**
+     * Initializes the iterator, and performs UtilizationTracker eviction once
+     * per batch.
+     */
+    long startBatch()
+        throws DatabaseException {
+
+        if (scanIter == null) {
+            scanIter = envImpl.getInMemoryINs().iterator();
+        }
+
+        /* Evict utilization tracking info without holding any latches. */
+        return envImpl.getUtilizationTracker().evictMemory();
+    }
+
+    /**
+     * Returns the simple INList size.
+     */
+    int getMaxINsPerBatch() {
+        return envImpl.getInMemoryINs().getSize();
+    }
+
+    /**
+     * Returns the next IN, wrapping if necessary.
+     */
+    IN getNextIN() {
+        if (envImpl.getMemoryBudget().isTreeUsageAboveMinimum()) {
+            if (!scanIter.hasNext()) {
+                scanIter = envImpl.getInMemoryINs().iterator();
+            }
+            return scanIter.hasNext() ? scanIter.next() : null;
+        } else {
+            return null;
+        }
+    }
+
+    /* For unit testing only. */
+    Iterator<IN> getScanIterator() {
+        return scanIter;
+    }
+
+    /* For unit testing only. */
+    void setScanIterator(Iterator<IN> iter) {
+        scanIter = iter;
+    }
+}
diff --git a/src/com/sleepycat/je/evictor/SharedEvictor.java b/src/com/sleepycat/je/evictor/SharedEvictor.java
new file mode 100644
index 0000000000000000000000000000000000000000..d717b4b35ba4c271c551cf6672959692d1add3ca
--- /dev/null
+++ b/src/com/sleepycat/je/evictor/SharedEvictor.java
@@ -0,0 +1,306 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SharedEvictor.java,v 1.4.2.3 2010/01/04 15:30:28 cwl Exp $
+ */
+
+package com.sleepycat.je.evictor;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.logging.Logger;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.INList;
+import com.sleepycat.je.tree.IN;
+
+/**
+ * The Evictor that operates on the INLists for multiple environments that
+ * share a single cache.  Multiple iterators, once for each INList, are used to
+ * implement getNextIN.  INs are returned from from each iterator in a
+ * round-robin rotation, giving larger INLists proportionally more rotations.
+ * This "mixes up" the INs from all INlists so that the LRU algorithm is
+ * applied across all lists.
+ */
+public class SharedEvictor extends Evictor {
+
+    /**
+     * MIN_ROTATIONS is the number of rotations given to the smallest INList in
+     * a single "round".  In each round we return a number of INs from each
+     * INList that is proportional to the relative size of the INList.
+     *
+     * Imagine 2 INLists, one with 70 INs and the other with 100 INs.  If we
+     * use simple integer division to create the ratio of their sizes, the
+     * result is either 1-to-1 or 1-to-2.  To create a more accurate ratio we
+     * multiply the size of the INLists by MIN_ROTATIONS to get the initial
+     * Subject.remaining value.  But for each rotation we subtract from
+     * remaining the size of the smallest INList, without multiplying.  When
+     * the remaining value is less than zero, that INList is taken out of the
+     * rotation.  In this example the ratio would be:
+     *
+     *   (70*10)/70 to (100*10)/70, or 10-to-14
+     *
+     * So in one complete round we would return 10 INs from the INList with 70
+     * INs and 14 INs from the INList with 100 INs.
+     */
+    private static final int MIN_ROTATIONS = 10;
+
+    /**
+     * We re-initialize sizes after 1/INIT_SIZE_THRESHOLD of all INs have
+     * changed, to account for changes to the relative sizes of the INLists.
+     * We don't re-initialize sizes every time the INLists are changed because
+     * it is relatively expensitve to call ConcurrentHashMap.size.  Sizes are
+     * re-initialized:
+     * - before the first time eviction
+     * - after an environment is added or removed
+     * - after 1/INIT_SIZE_THRESHOLD of all INs have been added or removed
+     */
+    private static final int INIT_SIZE_THRESHOLD = 10;
+
+    /**
+     * A Subject is an environment that is sharing the global cache, and
+     * related info.
+     */
+    private static class Subject {
+        EnvironmentImpl env;
+        INList ins;
+        Iterator<IN> iter;
+        int size;
+        int remaining;
+    }
+
+    private List<Subject> subjects;
+    private int rotationIndex;
+    private int trackerEvictionIndex;
+    private boolean needInitSizes;
+    private int smallestSize;
+    private int totalSize;
+    private AtomicInteger changedINs;
+
+    public SharedEvictor(EnvironmentImpl env, String name)
+        throws DatabaseException {
+
+        super(env, name);
+        subjects = new ArrayList<Subject>();
+        changedINs = new AtomicInteger();
+        needInitSizes = true;
+    }
+
+    @Override
+    public void loadStats(StatsConfig config, EnvironmentStats stat)
+        throws DatabaseException {
+
+        /* No synchronization on subjects is intentional here. */
+        stat.setNSharedCacheEnvironments(subjects.size());
+        super.loadStats(config, stat);
+    }
+
+    /**
+     * Only supported by PrivateEvictor.
+     */
+    public void clearEnv() {
+        throw new UnsupportedOperationException();
+    }
+
+    /**
+     * After 1/INIT_SIZE_THRESHOLD of all INs have been changed, reinitialize
+     * the sizes.
+     */
+    public void noteINListChange(int nINs) {
+        if (changedINs.addAndGet(nINs) > totalSize / INIT_SIZE_THRESHOLD) {
+            needInitSizes = true;
+        }
+    }
+
+    /**
+     * Synchronized so that the set of environments cannot be changed in the
+     * middle of an eviction (which is also synchronized).
+     */
+    public synchronized void addEnvironment(EnvironmentImpl env) {
+        int nSubjects = subjects.size();
+        for (int i = 0; i < nSubjects; i += 1) {
+            Subject subject = subjects.get(i);
+            if (subject.env == env) {
+                return;
+            }
+        }
+        Subject subject = new Subject();
+        subject.env = env;
+        subject.ins = env.getInMemoryINs();
+        subjects.add(subject);
+        needInitSizes = true;
+    }
+
+    /**
+     * Synchronized so that the set of environments cannot be changed in the
+     * middle of an eviction (which is also synchronized).
+     */
+    public synchronized void removeEnvironment(EnvironmentImpl env) {
+        int nSubjects = subjects.size();
+        for (int i = 0; i < nSubjects; i += 1) {
+            Subject subject = subjects.get(i);
+            if (subject.env == env) {
+                subjects.remove(i);
+                needInitSizes = true;
+                return;
+            }
+        }
+    }
+
+    /**
+     * Returns true if the given set of environments matches the set of subject
+     * environments.  Used in assertions.
+     */
+    public boolean checkEnvs(Set<EnvironmentImpl> envs) {
+        int nSubjects = subjects.size();
+        if (nSubjects != envs.size()) {
+            return false;
+        }
+        for (int i = 0; i < nSubjects; i += 1) {
+            Subject subject = subjects.get(i);
+            if (!envs.contains(subject.env)) {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    /**
+     * Currently returns null to disable logging in the SharedEvictor.
+     */
+    Logger getLogger() {
+        return null;
+    }
+
+    /**
+     * Initializes the sizes if needed, and performs UtilizationTracker
+     * eviction for one environment in rotatation.
+     */
+    long startBatch()
+        throws DatabaseException {
+
+        /* Init sizes for the first eviction or after env add/remove. */
+        if (needInitSizes) {
+            initSizes();
+        }
+        /* Evict utilization tracking info without holding any latches. */
+        int nSubjects = subjects.size();
+        if (nSubjects > 0) {
+            if (trackerEvictionIndex >= nSubjects) {
+                trackerEvictionIndex = 0;
+            }
+            Subject subject = subjects.get(trackerEvictionIndex);
+            trackerEvictionIndex += 1;
+            return subject.env.getUtilizationTracker().evictMemory();
+        } else {
+            return 0;
+        }
+    }
+
+    /**
+     * Returns the total of all INList sizes, as of the last time sizes were
+     * initialized.
+     */
+    int getMaxINsPerBatch() {
+        return totalSize;
+    }
+
+    /**
+     * Returns the next IN, wrapping if necessary.  Returns a number of INs
+     * from each INList that is proportional to the sizes of the lists.  When a
+     * round is complete (we have returned the correct ratio from all INLists
+     * and all Subject.remaining fields are less than zero), start a new round
+     * by reinitializing the Subject.remaining fields.
+     */
+    IN getNextIN() {
+        int nSubjects = subjects.size();
+        if (nSubjects == 0) {
+            /* No environments are sharing the cache. */
+            return null;
+        }
+        int nSubjectsExamined = 0;
+        while (true) {
+            if (rotationIndex >= nSubjects) {
+                rotationIndex = 0;
+            }
+            Subject subject = subjects.get(rotationIndex);
+            rotationIndex += 1;
+            if (subject.remaining > 0 && isEvictionAllowed(subject)) {
+                subject.remaining -= smallestSize;
+                if (subject.iter == null || !subject.iter.hasNext()) {
+                    subject.iter = subject.ins.iterator();
+                }
+                if (subject.iter.hasNext()) {
+                    /* Found an IN to return. */
+                    return subject.iter.next();
+                } else {
+                    /* This INList is empty. */
+                    subject.remaining = -1;
+                }
+            }
+            nSubjectsExamined += 1;
+            if (nSubjectsExamined >= nSubjects) {
+                /* All Subject.remaining fields are <= 0. */
+                boolean foundAny = false;
+                for (int i = 0; i < nSubjects; i += 1) {
+                    Subject sub = subjects.get(i);
+                    if (sub.size > 0) {
+                        sub.remaining = sub.size * MIN_ROTATIONS;
+                        if (isEvictionAllowed(sub)) {
+                            foundAny = true;
+                        }
+                    }
+                }
+                if (!foundAny) {
+                    /* All INLists are empty or not evictable. */
+                    return null;
+                }
+                /* Start a new round. */
+                nSubjectsExamined = 0;
+            }
+        }
+    }
+
+    private boolean isEvictionAllowed(Subject subject) {
+        return subject.env.getMemoryBudget().isTreeUsageAboveMinimum();
+    }
+
+    /**
+     * Sets up the Subject size and remaining fields, and resets the rotation
+     * to the beginning.
+     */
+    private void initSizes() {
+        totalSize = 0;
+        smallestSize = Integer.MAX_VALUE;
+        int nSubjects = subjects.size();
+        for (int i = 0; i < nSubjects; i += 1) {
+            Subject subject = subjects.get(i);
+            int size = subject.ins.getSize();
+            if (smallestSize > size) {
+                smallestSize = size;
+            }
+            totalSize += size;
+            subject.size = size;
+            subject.remaining = size * MIN_ROTATIONS;
+        }
+        needInitSizes = false;
+    }
+
+    /* For unit testing only.  Supported only by PrivateEvictor. */
+    Iterator<IN> getScanIterator() {
+        throw new UnsupportedOperationException();
+    }
+
+    /* For unit testing only.  Supported only by PrivateEvictor. */
+    void setScanIterator(Iterator<IN> iter) {
+        throw new UnsupportedOperationException();
+    }
+}
diff --git a/src/com/sleepycat/je/evictor/package.html b/src/com/sleepycat/je/evictor/package.html
new file mode 100644
index 0000000000000000000000000000000000000000..d69d850f962cf9015c585b301f3d07f1262733a6
--- /dev/null
+++ b/src/com/sleepycat/je/evictor/package.html
@@ -0,0 +1,26 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
+<html>
+<head>
+<!--
+
+ See the file LICENSE for redistribution information.
+
+ Copyright (c) 2002,2010 Oracle.  All rights reserved.
+
+
+ $Id: package.html,v 1.9.2.2 2010/01/04 15:30:28 cwl Exp $
+-->
+</head>
+<body bgcolor="white">
+
+Provides classes and interfaces for memory reclamation in JE.
+
+
+<h2>Package Specification</h2>
+
+(None)
+
+<!-- Put @see and @since tags down here. -->
+
+</body>
+</html>
diff --git a/src/com/sleepycat/je/incomp/INCompressor.java b/src/com/sleepycat/je/incomp/INCompressor.java
new file mode 100644
index 0000000000000000000000000000000000000000..ef76806a5ee5644955999d1dca7cc527715031c5
--- /dev/null
+++ b/src/com/sleepycat/je/incomp/INCompressor.java
@@ -0,0 +1,838 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: INCompressor.java,v 1.148.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.incomp;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.logging.Level;
+
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.cleaner.LocalUtilizationTracker;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.DbTree;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.latch.LatchSupport;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.tree.BINReference;
+import com.sleepycat.je.tree.CursorsExistException;
+import com.sleepycat.je.tree.DBIN;
+import com.sleepycat.je.tree.DIN;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.tree.Key;
+import com.sleepycat.je.tree.Node;
+import com.sleepycat.je.tree.NodeNotEmptyException;
+import com.sleepycat.je.tree.Tree;
+import com.sleepycat.je.tree.Tree.SearchType;
+import com.sleepycat.je.utilint.DaemonThread;
+import com.sleepycat.je.utilint.PropUtil;
+import com.sleepycat.je.utilint.TestHook;
+import com.sleepycat.je.utilint.TestHookExecute;
+import com.sleepycat.je.utilint.Tracer;
+
+/**
+ * The IN Compressor.  JE compression consist of removing delete entries from
+ * BINs, and pruning empty IN/BINs from the tree. Compression is carried out by
+ * either a daemon thread or lazily by operations (namely checkpointing and
+ * eviction) that are writing INS.
+ */
+public class INCompressor extends DaemonThread {
+    private static final boolean DEBUG = false;
+
+    private EnvironmentImpl env;
+    private long lockTimeout;
+
+    /* stats */
+    private long splitBins = 0;
+    private long dbClosedBins = 0;
+    private long cursorsBins = 0;
+    private long nonEmptyBins = 0;
+    private long processedBins = 0;
+
+    /* per-run stats */
+    private int splitBinsThisRun = 0;
+    private int dbClosedBinsThisRun = 0;
+    private int cursorsBinsThisRun = 0;
+    private int nonEmptyBinsThisRun = 0;
+    private int processedBinsThisRun = 0;
+
+    /*
+     * The following stats are not kept per run, because they're set by
+     * multiple threads doing lazy compression. They are debugging aids; it
+     * didn't seem like a good idea to add synchronization to the general path.
+     */
+    private int lazyProcessed = 0;
+    private int lazyEmpty = 0;
+    private int lazySplit = 0;
+    private int wokenUp = 0;
+
+    /*
+     * Store logical references to BINs that have deleted entries and are
+     * candidates for compaction.
+     */
+    private Map<Long, BINReference> binRefQueue;
+    private Object binRefQueueSync;
+
+    /* For unit tests */
+    private TestHook beforeFlushTrackerHook; // [#15528]
+
+    public INCompressor(EnvironmentImpl env, long waitTime, String name)
+        throws DatabaseException {
+
+        super(waitTime, name, env);
+        this.env = env;
+        lockTimeout = PropUtil.microsToMillis(
+            env.getConfigManager().getLong
+                (EnvironmentParams.COMPRESSOR_LOCK_TIMEOUT));
+        binRefQueue = new HashMap<Long, BINReference>();
+        binRefQueueSync = new Object();
+    }
+
+    synchronized public void clearEnv() {
+        env = null;
+    }
+
+    /* For unit testing only. */
+    public void setBeforeFlushTrackerHook(TestHook hook) {
+        beforeFlushTrackerHook = hook;
+    }
+
+    public synchronized void verifyCursors()
+        throws DatabaseException {
+
+        /*
+         * Environment may have been closed.  If so, then our job here is done.
+         */
+        if (env.isClosed()) {
+            return;
+        }
+
+        /*
+         * Use a snapshot to verify the cursors.  This way we don't have to
+         * hold a latch while verify takes locks.
+         */
+        List<BINReference> queueSnapshot = null;
+        synchronized (binRefQueueSync) {
+            queueSnapshot = new ArrayList<BINReference>(binRefQueue.values());
+        }
+
+        /*
+         * Use local caching to reduce DbTree.getDb overhead.  Do not call
+         * releaseDb after each getDb, since the entire dbCache will be
+         * released at the end.
+         */
+        DbTree dbTree = env.getDbTree();
+        Map<DatabaseId, DatabaseImpl> dbCache = 
+            new HashMap<DatabaseId, DatabaseImpl>();
+        try {
+            Iterator<BINReference> it = queueSnapshot.iterator();
+            while (it.hasNext()) {
+                BINReference binRef = it.next();
+                DatabaseImpl db = dbTree.getDb
+                    (binRef.getDatabaseId(), lockTimeout, dbCache);
+                BIN bin = searchForBIN(db, binRef);
+                if (bin != null) {
+                    bin.verifyCursors();
+                    bin.releaseLatch();
+                }
+            }
+        } finally {
+            dbTree.releaseDbs(dbCache);
+        }
+    }
+
+    public int getBinRefQueueSize()
+        throws DatabaseException {
+
+        int size = 0;
+        synchronized (binRefQueueSync) {
+            size = binRefQueue.size();
+        }
+
+        return size;
+    }
+
+    /*
+     * There are multiple flavors of the addBin*ToQueue methods. All allow
+     * the caller to specify whether the daemon should be notified. Currently
+     * no callers proactively notify, and we rely on lazy compression and
+     * the daemon timebased wakeup to process the queue.
+     */
+
+    /**
+     * Adds the BIN and deleted Key to the queue if the BIN is not already in
+     * the queue, or adds the deleted key to an existing entry if one exists.
+     */
+    public void addBinKeyToQueue(BIN bin, Key deletedKey, boolean doWakeup)
+        throws DatabaseException {
+
+        synchronized (binRefQueueSync) {
+            addBinKeyToQueueAlreadyLatched(bin, deletedKey);
+        }
+        if (doWakeup) {
+            wakeup();
+        }
+    }
+
+    /**
+     * Adds the BINReference to the queue if the BIN is not already in the
+     * queue, or adds the deleted keys to an existing entry if one exists.
+     */
+    public void addBinRefToQueue(BINReference binRef, boolean doWakeup)
+        throws DatabaseException {
+
+        synchronized (binRefQueueSync) {
+            addBinRefToQueueAlreadyLatched(binRef);
+        }
+
+        if (doWakeup) {
+            wakeup();
+        }
+    }
+
+    /**
+     * Adds an entire collection of BINReferences to the queue at once.  Use
+     * this to avoid latching for each add.
+     */
+    public void addMultipleBinRefsToQueue(Collection<BINReference> binRefs,
+                                          boolean doWakeup)
+        throws DatabaseException {
+
+        synchronized (binRefQueueSync) {
+            Iterator<BINReference> it = binRefs.iterator();
+            while (it.hasNext()) {
+                BINReference binRef = it.next();
+                addBinRefToQueueAlreadyLatched(binRef);
+            }
+        }
+
+        if (doWakeup) {
+            wakeup();
+        }
+    }
+
+    /**
+     * Adds the BINReference with the latch held.
+     */
+    private void addBinRefToQueueAlreadyLatched(BINReference binRef) {
+
+        Long node = Long.valueOf(binRef.getNodeId());
+        BINReference existingRef = binRefQueue.get(node);
+        if (existingRef != null) {
+            existingRef.addDeletedKeys(binRef);
+        } else {
+            binRefQueue.put(node, binRef);
+        }
+    }
+
+    /**
+     * Adds the BIN and deleted Key with the latch held.
+     */
+    private void addBinKeyToQueueAlreadyLatched(BIN bin, Key deletedKey) {
+
+        Long node = Long.valueOf(bin.getNodeId());
+        BINReference existingRef = binRefQueue.get(node);
+        if (existingRef != null) {
+            if (deletedKey != null) {
+                existingRef.addDeletedKey(deletedKey);
+            }
+        } else {
+            BINReference binRef = bin.createReference();
+            if (deletedKey != null) {
+                binRef.addDeletedKey(deletedKey);
+            }
+            binRefQueue.put(node, binRef);
+        }
+    }
+
+    public boolean exists(long nodeId) {
+        Long node = Long.valueOf(nodeId);
+        synchronized (binRefQueueSync) {
+            return (binRefQueue.get(node) != null);
+        }
+    }
+
+    /*
+     * Return a bin reference for this node if it exists and has a set of
+     * deletable keys.
+     */
+    private BINReference removeCompressibleBinReference(long nodeId) {
+        Long node = Long.valueOf(nodeId);
+        BINReference foundRef = null;
+        synchronized (binRefQueueSync) {
+            BINReference target = binRefQueue.remove(node);
+            if (target != null) {
+                if (target.deletedKeysExist()) {
+                    foundRef = target;
+                } else {
+
+                    /*
+                     * This is an entry that needs to be pruned. Put it back
+                     * to be dealt with by the daemon.
+                     */
+                    binRefQueue.put(node, target);
+                }
+            }
+        }
+        return foundRef;
+    }
+
+    /**
+     * Return stats
+     */
+    public void loadStats(StatsConfig config, EnvironmentStats stat)
+        throws DatabaseException {
+
+        stat.setSplitBins(splitBins);
+        stat.setDbClosedBins(dbClosedBins);
+        stat.setCursorsBins(cursorsBins);
+        stat.setNonEmptyBins(nonEmptyBins);
+        stat.setProcessedBins(processedBins);
+        stat.setInCompQueueSize(getBinRefQueueSize());
+
+        if (DEBUG) {
+            System.out.println("lazyProcessed = " + lazyProcessed);
+            System.out.println("lazyEmpty = " + lazyEmpty);
+            System.out.println("lazySplit = " + lazySplit);
+            System.out.println("wokenUp=" + wokenUp);
+        }
+
+        if (config.getClear()) {
+            splitBins = 0;
+            dbClosedBins = 0;
+            cursorsBins = 0;
+            nonEmptyBins = 0;
+            processedBins = 0;
+            lazyProcessed = 0;
+            lazyEmpty = 0;
+            lazySplit = 0;
+            wokenUp = 0;
+        }
+    }
+
+    /**
+     * Return the number of retries when a deadlock exception occurs.
+     */
+    @Override
+    protected long nDeadlockRetries()
+        throws DatabaseException {
+
+        return env.getConfigManager().getInt
+            (EnvironmentParams.COMPRESSOR_RETRY);
+    }
+
+    public synchronized void onWakeup()
+        throws DatabaseException {
+
+        if (env.isClosed()) {
+            return;
+        }
+        wokenUp++;
+        doCompress();
+    }
+
+    /**
+     * The real work to doing a compress. This may be called by the compressor
+     * thread or programatically.
+     */
+    public synchronized void doCompress()
+        throws DatabaseException {
+
+        /*
+         * Make a snapshot of the current work queue so the compressor thread
+         * can safely iterate over the queue. Note that this impacts lazy
+         * compression, because it lazy compressors will not see BINReferences
+         * that have been moved to the snapshot.
+         */
+        Map<Long, BINReference> queueSnapshot = null;
+        int binQueueSize = 0;
+        synchronized (binRefQueueSync) {
+            binQueueSize = binRefQueue.size();
+            if (binQueueSize > 0) {
+                queueSnapshot = binRefQueue;
+                binRefQueue = new HashMap<Long, BINReference>();
+            }
+        }
+
+        /* There is work to be done. */
+        if (binQueueSize > 0) {
+            resetPerRunCounters();
+            Tracer.trace(Level.FINE, env,
+                         "InCompress.doCompress called, queue size: " +
+                         binQueueSize);
+            assert LatchSupport.countLatchesHeld() == 0;
+
+            /*
+             * Compressed entries must be counted as obsoleted.  A separate
+             * tracker is used to accumulate tracked obsolete info so it can be
+             * added in a single call under the log write latch.  We log the
+             * info for deleted subtrees immediately because we don't process
+             * deleted IN entries during recovery; this reduces the chance of
+             * lost info.
+             */
+            LocalUtilizationTracker localTracker =
+                new LocalUtilizationTracker(env);
+
+            /* Use local caching to reduce DbTree.getDb overhead. */
+            Map<DatabaseId, DatabaseImpl> dbCache = 
+                new HashMap<DatabaseId, DatabaseImpl>();
+
+            DbTree dbTree = env.getDbTree();
+            BINSearch binSearch = new BINSearch();
+            try {
+                Iterator<BINReference> it = queueSnapshot.values().iterator();
+                while (it.hasNext()) {
+                    if (env.isClosed()) {
+                        return;
+                    }
+
+                    BINReference binRef = it.next();
+                    if (!findDBAndBIN(binSearch, binRef, dbTree, dbCache)) {
+
+                        /*
+                         * Either the db is closed, or the BIN doesn't
+                         * exist. Don't process this BINReference.
+                         */
+                        continue;
+                    }
+
+                    if (binRef.deletedKeysExist()) {
+                        /* Compress deleted slots. */
+                        boolean requeued = compressBin
+                            (binSearch.db, binSearch.bin, binRef,
+                             localTracker);
+
+                        if (!requeued) {
+
+                            /*
+                             * This BINReference was fully processed, but there
+                             * may still be deleted slots. If there are still
+                             * deleted keys in the binref, they were relocated
+                             * by a split.
+                             */
+                            checkForRelocatedSlots
+                                (binSearch.db, binRef, localTracker);
+                        }
+                    } else {
+
+                        /*
+                         * An empty BINReference on the queue was put there by
+                         * a lazy compressor to indicate that we should try to
+                         * prune an empty BIN.
+                         */
+                        BIN foundBin = binSearch.bin;
+
+                        byte[] idKey = foundBin.getIdentifierKey();
+                        boolean isDBIN = foundBin.containsDuplicates();
+                        byte[] dupKey = null;
+                        if (isDBIN) {
+                            dupKey = ((DBIN) foundBin).getDupKey();
+                        }
+
+                        /*
+                         * Release the BIN latch taken by the initial
+                         * search. Pruning starts from the top of the tree and
+                         * requires that no latches are held.
+                         */
+                        foundBin.releaseLatch();
+
+                        pruneBIN(binSearch.db,  binRef, idKey, isDBIN,
+                                 dupKey, localTracker);
+                    }
+                }
+
+                /* SR [#11144]*/
+                assert TestHookExecute.doHookIfSet(beforeFlushTrackerHook);
+
+                /*
+                 * Count obsolete nodes and write out modified file summaries
+                 * for recovery.  All latches must have been released.
+                 */
+                env.getUtilizationProfile().flushLocalTracker(localTracker);
+
+            } finally {
+                dbTree.releaseDbs(dbCache);
+                assert LatchSupport.countLatchesHeld() == 0;
+                accumulatePerRunCounters();
+            }
+        }
+    }
+
+    /**
+     * Compresses a single BIN and then deletes the BIN if it is empty.
+     * @param bin is latched when this method is called, and unlatched when it
+     * returns.
+     * @return true if the BINReference was requeued by this method.
+     */
+    private boolean compressBin(DatabaseImpl db,
+                                BIN bin,
+                                BINReference binRef,
+                                LocalUtilizationTracker localTracker)
+        throws DatabaseException {
+
+        /* Safe to get identifier keys; bin is latched. */
+        boolean empty = false;
+        boolean requeued = false;
+        byte[] idKey = bin.getIdentifierKey();
+        byte[] dupKey = null;
+        boolean isDBIN = bin.containsDuplicates();
+
+        try {
+            int nCursors = bin.nCursors();
+            if (nCursors > 0) {
+
+                /*
+                 * There are cursors pointing to the BIN, so try again later.
+                 */
+                addBinRefToQueue(binRef, false);
+                requeued = true;
+                cursorsBinsThisRun++;
+            } else {
+                requeued = bin.compress
+                    (binRef, true /* canFetch */, localTracker);
+                if (!requeued) {
+
+                    /*
+                     * Only check for emptiness if this BINRef is in play and
+                     * not on the queue.
+                     */
+                    empty = (bin.getNEntries() == 0);
+
+                    if (empty) {
+
+                        /*
+                         * While we have the BIN latched, prepare a dup key if
+                         * needed for navigating the tree while pruning.
+                         */
+                        if (isDBIN) {
+                            dupKey = ((DBIN) bin).getDupKey();
+                        }
+                    }
+                }
+            }
+        } finally {
+            bin.releaseLatch();
+        }
+
+        /* Prune if the bin is empty and there has been no requeuing. */
+        if (empty) {
+            requeued = pruneBIN
+                (db, binRef, idKey, isDBIN, dupKey, localTracker);
+        }
+
+        return requeued;
+    }
+
+    /**
+     * If the target BIN is empty, attempt to remove the empty branch of the
+     * tree.
+     * @return true if the pruning was unable to proceed and the BINReference
+     * was requeued.
+     */
+    private boolean pruneBIN(DatabaseImpl dbImpl,
+                             BINReference binRef,
+                             byte[] idKey,
+                             boolean containsDups,
+                             byte[] dupKey,
+                             LocalUtilizationTracker localTracker)
+        throws DatabaseException {
+
+        boolean requeued = false;
+        try {
+            Tree tree = dbImpl.getTree();
+
+            if (containsDups) {
+                tree.deleteDup(idKey, dupKey, localTracker);
+            } else {
+                tree.delete(idKey, localTracker);
+            }
+            processedBinsThisRun++;
+        } catch (NodeNotEmptyException NNEE) {
+
+            /*
+             * Something was added to the node since the point when the
+             * deletion occurred; we can't prune, and we can throw away this
+             * BINReference.
+             */
+             nonEmptyBinsThisRun++;
+        } catch (CursorsExistException e) {
+
+            /*
+             * If there are cursors in the way of the delete, retry later.
+             * For example, When we delete a BIN or DBIN, we're guaranteed that
+             * there are no cursors at that node. (otherwise, we wouldn't be
+             * able to remove all the entries. However, there's the possibility
+             * that the BIN that is the parent of the duplicate tree has
+             * resident cursors, and in that case, we would not be able to
+             * remove the whole duplicate tree and DIN root. In that case, we'd
+             * requeue.
+             */
+            addBinRefToQueue(binRef, false);
+            cursorsBinsThisRun++;
+            requeued = true;
+        }
+        return requeued;
+    }
+
+    /*
+     * When we do not requeue the BINRef but there are deleted keys remaining,
+     * those keys were not found in the BIN and therefore must have been moved
+     * to another BIN during a split.
+     */
+    private void checkForRelocatedSlots(DatabaseImpl db,
+                                        BINReference binRef,
+                                        LocalUtilizationTracker localTracker)
+        throws DatabaseException {
+
+        Iterator<Key> iter = binRef.getDeletedKeyIterator();
+        if (iter != null) {
+
+            /* mainKey is only used for dups. */
+            byte[] mainKey = binRef.getKey();
+            boolean isDup = (binRef.getData() != null);
+
+            while (iter.hasNext()) {
+                Key key = iter.next();
+
+                /*
+                 * Lookup the BIN for each deleted key, and compress that BIN
+                 * separately.
+                 */
+                BIN splitBin = isDup ?
+                    searchForBIN(db, mainKey, key.getKey()) :
+                    searchForBIN(db, key.getKey(), null);
+                if (splitBin != null) {
+                    BINReference splitBinRef = splitBin.createReference();
+                    splitBinRef.addDeletedKey(key);
+                    compressBin(db, splitBin, splitBinRef, localTracker);
+                }
+            }
+        }
+    }
+
+    /**
+     * Search the tree for the BIN or DBIN that corresponds to this
+     * BINReference.
+     *
+     * @param binRef the BINReference that indicates the bin we want.
+     * @return the BIN or DBIN that corresponds to this BINReference. The
+     * node is latched upon return. Returns null if the BIN can't be found.
+     */
+    public BIN searchForBIN(DatabaseImpl db, BINReference binRef)
+        throws DatabaseException {
+
+        return searchForBIN(db, binRef.getKey(), binRef.getData());
+    }
+
+    private BIN searchForBIN(DatabaseImpl db, byte[] mainKey, byte[] dupKey)
+        throws DatabaseException {
+
+        /* Search for this IN */
+        Tree tree = db.getTree();
+        IN in = tree.search
+            (mainKey, SearchType.NORMAL, -1, null, CacheMode.UNCHANGED);
+
+        /* Couldn't find a BIN, return null */
+        if (in == null) {
+            return null;
+        }
+
+        /* This is not a duplicate, we're done. */
+        if (dupKey == null) {
+            return (BIN) in;
+        }
+
+        /* We need to descend down into a duplicate tree. */
+        DIN duplicateRoot = null;
+        boolean duplicateRootIsLatched = false;
+        DBIN duplicateBin = null;
+        BIN bin = (BIN) in;
+        boolean binIsLatched = true;
+        try {
+            int index = bin.findEntry(mainKey, false, true);
+            if (index >= 0) {
+                Node node = null;
+                if (!bin.isEntryKnownDeleted(index)) {
+
+                    /*
+                     * If fetchTarget returns null, a deleted LN was cleaned.
+                     */
+                    node = bin.fetchTarget(index);
+                }
+                if (node == null) {
+                    bin.releaseLatch();
+                    binIsLatched = false;
+                    return null;
+                }
+                if (node.containsDuplicates()) {
+                    /* It's a duplicate tree. */
+                    duplicateRoot = (DIN) node;
+                    duplicateRoot.latch(CacheMode.UNCHANGED);
+                    duplicateRootIsLatched = true;
+                    bin.releaseLatch();
+                    binIsLatched = false;
+                    duplicateBin = (DBIN) tree.searchSubTree
+                        (duplicateRoot, dupKey, SearchType.NORMAL, -1, null,
+                         CacheMode.UNCHANGED);
+                    duplicateRootIsLatched = false;
+
+                    return duplicateBin;
+                } else {
+                    /* We haven't migrated to a duplicate tree yet. */
+                    return bin;
+                }
+            } else {
+                bin.releaseLatch();
+                binIsLatched = false;
+                return null;
+            }
+        } catch (DatabaseException DBE) {
+            if (bin != null &&
+                binIsLatched) {
+                bin.releaseLatch();
+            }
+
+            if (duplicateRoot != null &&
+                duplicateRootIsLatched) {
+                duplicateRoot.releaseLatch();
+            }
+
+            /*
+             * FindBugs whines about Redundent comparison to null below, but
+             * for stylistic purposes we'll leave it in.
+             */
+            if (duplicateBin != null) {
+                duplicateBin.releaseLatch();
+            }
+            throw DBE;
+        }
+    }
+
+    /**
+     * Reset per-run counters.
+     */
+    private void resetPerRunCounters() {
+        splitBinsThisRun = 0;
+        dbClosedBinsThisRun = 0;
+        cursorsBinsThisRun = 0;
+        nonEmptyBinsThisRun = 0;
+        processedBinsThisRun = 0;
+    }
+
+    private void accumulatePerRunCounters() {
+        splitBins += splitBinsThisRun;
+        dbClosedBins += dbClosedBinsThisRun;
+        cursorsBins += cursorsBinsThisRun;
+        nonEmptyBins += nonEmptyBinsThisRun;
+        processedBins += processedBinsThisRun;
+    }
+
+    /**
+     * Lazily compress a single BIN. Do not do any pruning. The target IN
+     * should be latched when we enter, and it will be remain latched.
+     */
+    public void lazyCompress(IN in, LocalUtilizationTracker localTracker)
+        throws DatabaseException {
+
+        if (!in.isCompressible()) {
+            return;
+        }
+
+        assert in.isLatchOwnerForWrite();
+
+        /* BIN is latched. */
+        BIN bin = (BIN) in;
+        int nCursors = bin.nCursors();
+        if (nCursors > 0) {
+            /* Cursor prohibit compression. */
+            return;
+        } else {
+            BINReference binRef =
+                removeCompressibleBinReference(bin.getNodeId());
+            if ((binRef == null) || (!binRef.deletedKeysExist())) {
+                return;
+            } else {
+
+                boolean requeued =
+                    bin.compress(binRef, false /* canFetch */, localTracker);
+                lazyProcessed++;
+
+                /*
+                 * If this wasn't requeued, but there were deleted keys
+                 * remaining, requeue, so the daemon can handle this.  Either
+                 * we must have shuffled some items because of a split, or a
+                 * child was not resident and we couldn't process that entry.
+                 */
+                if (!requeued && binRef.deletedKeysExist()) {
+                    addBinRefToQueue(binRef, false);
+                    lazySplit++;
+                } else {
+                    if (bin.getNEntries() == 0) {
+                        addBinRefToQueue(binRef, false);
+                        lazyEmpty++;
+                    }
+                }
+            }
+        }
+    }
+
+    /*
+     * Find the db and bin for a BINReference.
+     * @return true if the db is open and the target bin is found.
+     */
+    private boolean findDBAndBIN(BINSearch binSearch,
+                                 BINReference binRef,
+                                 DbTree dbTree,
+                                 Map<DatabaseId, DatabaseImpl> dbCache)
+        throws DatabaseException {
+
+        /*
+         * Find the database.  Do not call releaseDb after this getDb, since
+         * the entire dbCache will be released later.
+         */
+        binSearch.db = dbTree.getDb
+            (binRef.getDatabaseId(), lockTimeout, dbCache);
+        if ((binSearch.db == null) ||(binSearch.db.isDeleted())) {
+          /* The db was deleted. Ignore this BIN Ref. */
+            dbClosedBinsThisRun++;
+            return false;
+        }
+
+        /* Perform eviction before each operation. */
+        env.getEvictor().doCriticalEviction(true); // backgroundIO
+
+        /* Find the BIN. */
+        binSearch.bin = searchForBIN(binSearch.db, binRef);
+        if ((binSearch.bin == null) ||
+            binSearch.bin.getNodeId() != binRef.getNodeId()) {
+            /* The BIN may have been split. */
+            if (binSearch.bin != null) {
+                binSearch.bin.releaseLatch();
+            }
+            splitBinsThisRun++;
+            return false;
+        }
+
+        return true;
+    }
+
+    /* Struct to return multiple values from findDBAndBIN. */
+    private static class BINSearch {
+        public DatabaseImpl db;
+        public BIN bin;
+    }
+}
diff --git a/src/com/sleepycat/je/jca/ra/JEConnection.java b/src/com/sleepycat/je/jca/ra/JEConnection.java
new file mode 100644
index 0000000000000000000000000000000000000000..27f0eac644a3f5d3a1628bf2bb3aeb36a4d8f29f
--- /dev/null
+++ b/src/com/sleepycat/je/jca/ra/JEConnection.java
@@ -0,0 +1,105 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: JEConnection.java,v 1.16.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.jca.ra;
+
+import javax.resource.ResourceException;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.SecondaryConfig;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.je.Transaction;
+
+/**
+ * A JEConnection provides access to JE services. See
+ * &lt;JEHOME&gt;/examples/jca/HOWTO-**.txt and
+ * &lt;JEHOME&gt;/examples/jca/simple/SimpleBean.java for more information on
+ * how to build the resource adaptor and use a JEConnection.
+ */
+public class JEConnection {
+
+    private JEManagedConnection mc;
+    private JELocalTransaction txn;
+
+    public JEConnection(JEManagedConnection mc) {
+        this.mc = mc;
+    }
+
+    protected void setManagedConnection(JEManagedConnection mc,
+					JELocalTransaction lt) {
+	this.mc = mc;
+	if (txn == null) {
+	    txn = lt;
+	}
+    }
+
+    JELocalTransaction getLocalTransaction() {
+	return txn;
+    }
+
+    void setLocalTransaction(JELocalTransaction txn) {
+	this.txn = txn;
+    }
+
+    public Environment getEnvironment()
+	throws ResourceException {
+
+	return mc.getEnvironment();
+    }
+
+    public Database openDatabase(String name, DatabaseConfig config)
+	throws DatabaseException {
+
+	return mc.openDatabase(name, config);
+    }
+
+    public SecondaryDatabase openSecondaryDatabase(String name,
+						   Database primaryDatabase,
+						   SecondaryConfig config)
+	throws DatabaseException {
+
+	return mc.openSecondaryDatabase(name, primaryDatabase, config);
+    }
+
+    public void removeDatabase(String databaseName)
+	throws DatabaseException {
+
+	mc.removeDatabase(databaseName);
+    }
+
+    public long truncateDatabase(String databaseName, boolean returnCount)
+	throws DatabaseException {
+
+	return mc.truncateDatabase(databaseName, returnCount);
+    }
+
+    public Transaction getTransaction()
+	throws ResourceException {
+
+	if (txn == null) {
+	    return null;
+	}
+
+	try {
+	    return txn.getTransaction();
+	} catch (DatabaseException DE) {
+	    ResourceException ret = new ResourceException(DE.toString());
+	    ret.initCause(DE);
+	    throw ret;
+	}
+    }
+
+    public void close()
+	throws JEException {
+
+	mc.close();
+    }
+}
diff --git a/src/com/sleepycat/je/jca/ra/JEConnectionFactory.java b/src/com/sleepycat/je/jca/ra/JEConnectionFactory.java
new file mode 100644
index 0000000000000000000000000000000000000000..26749c2c880a7301d9d756420bd1d8b426ef45ff
--- /dev/null
+++ b/src/com/sleepycat/je/jca/ra/JEConnectionFactory.java
@@ -0,0 +1,43 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: JEConnectionFactory.java,v 1.12.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.jca.ra;
+
+import java.io.Serializable;
+
+import javax.resource.Referenceable;
+
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.TransactionConfig;
+
+/**
+ * An application may obtain a {@link JEConnection} in this manner:
+ * <pre>
+ *    InitialContext iniCtx = new InitialContext();
+ *    Context enc = (Context) iniCtx.lookup("java:comp/env");
+ *    Object ref = enc.lookup("ra/JEConnectionFactory");
+ *    JEConnectionFactory dcf = (JEConnectionFactory) ref;
+ *    JEConnection dc = dcf.getConnection(envDir, envConfig);
+ * </pre>
+ *
+ * See &lt;JEHOME&gt;/examples/jca/HOWTO-**.txt and
+ * &lt;JEHOME&gt;/examples/jca/simple/SimpleBean.java for more information
+ * on how to build the resource adapter and use a JEConnection.
+ */
+public interface JEConnectionFactory
+    extends Referenceable, Serializable {
+
+    public JEConnection getConnection(String jeRootDir,
+				      EnvironmentConfig envConfig)
+	throws JEException;
+
+    public JEConnection getConnection(String jeRootDir,
+				      EnvironmentConfig envConfig,
+				      TransactionConfig transConfig)
+	throws JEException;
+}
diff --git a/src/com/sleepycat/je/jca/ra/JEConnectionFactoryImpl.java b/src/com/sleepycat/je/jca/ra/JEConnectionFactoryImpl.java
new file mode 100644
index 0000000000000000000000000000000000000000..a616a9a495faa585573e9105de5ed2e7a12c3b43
--- /dev/null
+++ b/src/com/sleepycat/je/jca/ra/JEConnectionFactoryImpl.java
@@ -0,0 +1,72 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: JEConnectionFactoryImpl.java,v 1.11.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.jca.ra;
+
+import java.io.File;
+
+import javax.naming.NamingException;
+import javax.naming.Reference;
+import javax.resource.ResourceException;
+import javax.resource.spi.ConnectionManager;
+import javax.resource.spi.ManagedConnectionFactory;
+
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.TransactionConfig;
+
+public class JEConnectionFactoryImpl implements JEConnectionFactory {
+
+    /*
+     * These are not transient because SJSAS seems to need to serialize
+     * them when leaving them in JNDI.
+     */
+    private /* transient */ ConnectionManager manager;
+    private /* transient */ ManagedConnectionFactory factory;
+    private Reference reference;
+
+    JEConnectionFactoryImpl(ConnectionManager manager,
+			    ManagedConnectionFactory factory) {
+	this.manager = manager;
+	this.factory = factory;
+    }
+
+    public JEConnection getConnection(String jeRootDir,
+				      EnvironmentConfig envConfig)
+	throws JEException {
+
+	return getConnection(jeRootDir, envConfig, null);
+    }
+
+
+    public JEConnection getConnection(String jeRootDir,
+				      EnvironmentConfig envConfig,
+				      TransactionConfig transConfig)
+	throws JEException {
+
+	JEConnection dc = null;
+ 	JERequestInfo jeInfo =
+ 	    new JERequestInfo(new File(jeRootDir), envConfig, transConfig);
+	try {
+	    dc = (JEConnection) manager.allocateConnection(factory, jeInfo);
+	} catch (ResourceException e) {
+	    throw new JEException("Unable to get Connection: " + e);
+	}
+
+	return dc;
+    }
+
+    public void setReference(Reference reference) {
+	this.reference = reference;
+    }
+
+    public Reference getReference()
+	throws NamingException {
+
+	return reference;
+    }
+}
diff --git a/src/com/sleepycat/je/jca/ra/JEConnectionMetaData.java b/src/com/sleepycat/je/jca/ra/JEConnectionMetaData.java
new file mode 100644
index 0000000000000000000000000000000000000000..54b89bb583b9e472b070790a37e79737cd91f2f0
--- /dev/null
+++ b/src/com/sleepycat/je/jca/ra/JEConnectionMetaData.java
@@ -0,0 +1,44 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: JEConnectionMetaData.java,v 1.7.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.jca.ra;
+
+import javax.resource.ResourceException;
+import javax.resource.spi.ManagedConnectionMetaData;
+
+public class JEConnectionMetaData
+    implements ManagedConnectionMetaData {
+
+    public JEConnectionMetaData() {
+    }
+
+    public String getEISProductName()
+        throws ResourceException {
+
+        return "Berkeley DB Java Edition JCA";
+    }
+
+    public String getEISProductVersion()
+        throws ResourceException {
+
+        return "2.0";
+    }
+
+    public int getMaxConnections()
+        throws ResourceException {
+
+	/* Make a je.* parameter? */
+	return 100;
+    }
+
+    public String getUserName()
+        throws ResourceException {
+
+    	return null;
+    }
+}
diff --git a/src/com/sleepycat/je/jca/ra/JEException.java b/src/com/sleepycat/je/jca/ra/JEException.java
new file mode 100644
index 0000000000000000000000000000000000000000..e31cacec20592ac1af33dd11914a88ba877d306d
--- /dev/null
+++ b/src/com/sleepycat/je/jca/ra/JEException.java
@@ -0,0 +1,16 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: JEException.java,v 1.7.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.jca.ra;
+
+public class JEException extends Exception {
+
+    public JEException(String message) {
+	super(message);
+    }
+}
diff --git a/src/com/sleepycat/je/jca/ra/JELocalTransaction.java b/src/com/sleepycat/je/jca/ra/JELocalTransaction.java
new file mode 100644
index 0000000000000000000000000000000000000000..cb94e0dc38799528e3d8b3cb5ee08e7c569ce523
--- /dev/null
+++ b/src/com/sleepycat/je/jca/ra/JELocalTransaction.java
@@ -0,0 +1,129 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: JELocalTransaction.java,v 1.17.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.jca.ra;
+
+import javax.resource.ResourceException;
+import javax.resource.spi.ConnectionEvent;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.TransactionConfig;
+import com.sleepycat.je.XAEnvironment;
+
+public class JELocalTransaction
+    implements javax.resource.cci.LocalTransaction,
+	       javax.resource.spi.LocalTransaction {
+
+    private static boolean DEBUG = false;
+
+    private transient XAEnvironment env;
+    private transient TransactionConfig transConfig;
+    private transient JEManagedConnection mgdConn;
+
+    JELocalTransaction(XAEnvironment env,
+		       TransactionConfig transConfig,
+		       JEManagedConnection mgdConn) {
+	this.env = env;
+	this.transConfig = transConfig;
+	this.mgdConn = mgdConn;
+    }
+
+    public Transaction getTransaction()
+	throws DatabaseException {
+
+	return env.getThreadTransaction();
+    }
+
+    protected XAEnvironment getEnv() {
+	return env;
+    }
+
+    private void checkEnv(String methodName)
+	throws ResourceException {
+
+	if (env == null) {
+	    throw new ResourceException("env is null in " + methodName);
+	}
+    }
+
+    /*
+     * Methods for LocalTransaction.
+     */
+
+    public void begin()
+	throws ResourceException {
+
+	checkEnv("begin");
+	long id = -1;
+	try {
+	    Transaction txn = env.beginTransaction(null, transConfig);
+	    env.setThreadTransaction(txn);
+	    id = txn.getId();
+	} catch (DatabaseException DE) {
+	    throw new ResourceException("During begin: " + DE.toString());
+	}
+
+	ConnectionEvent connEvent = new ConnectionEvent
+	    (mgdConn, ConnectionEvent.LOCAL_TRANSACTION_STARTED);
+	connEvent.setConnectionHandle(mgdConn);
+	mgdConn.sendConnectionEvent(connEvent);
+
+	if (DEBUG) {
+	    System.out.println("JELocalTransaction.begin " + id);
+	}
+    }
+
+    public void commit()
+	throws ResourceException {
+
+	checkEnv("commit");
+	try {
+	    env.getThreadTransaction().commit();
+	} catch (DatabaseException DE) {
+	    ResourceException ret = new ResourceException(DE.toString());
+	    ret.initCause(DE);
+	    throw ret;
+	} finally {
+	    env.setThreadTransaction(null);
+	}
+
+	ConnectionEvent connEvent = new ConnectionEvent
+	    (mgdConn, ConnectionEvent.LOCAL_TRANSACTION_COMMITTED);
+	connEvent.setConnectionHandle(mgdConn);
+	mgdConn.sendConnectionEvent(connEvent);
+
+	if (DEBUG) {
+	    System.out.println("JELocalTransaction.commit");
+	}
+    }
+
+    public void rollback()
+	throws ResourceException {
+
+	checkEnv("rollback");
+	try {
+	    env.getThreadTransaction().abort();
+	} catch (DatabaseException DE) {
+	    ResourceException ret = new ResourceException(DE.toString());
+	    ret.initCause(DE);
+	    throw ret;
+	} finally {
+	    env.setThreadTransaction(null);
+	}
+
+	ConnectionEvent connEvent = new ConnectionEvent
+	    (mgdConn, ConnectionEvent.LOCAL_TRANSACTION_ROLLEDBACK);
+	connEvent.setConnectionHandle(mgdConn);
+	mgdConn.sendConnectionEvent(connEvent);
+
+	if (DEBUG) {
+	    System.out.println("JELocalTransaction.rollback");
+	}
+    }
+}
diff --git a/src/com/sleepycat/je/jca/ra/JEManagedConnection.java b/src/com/sleepycat/je/jca/ra/JEManagedConnection.java
new file mode 100644
index 0000000000000000000000000000000000000000..0a1a33692c1f80628bfec6e690dc52abc2d04e1c
--- /dev/null
+++ b/src/com/sleepycat/je/jca/ra/JEManagedConnection.java
@@ -0,0 +1,325 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: JEManagedConnection.java,v 1.17.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.jca.ra;
+
+import java.io.PrintWriter;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+
+import javax.resource.ResourceException;
+import javax.resource.spi.ConnectionEvent;
+import javax.resource.spi.ConnectionEventListener;
+import javax.resource.spi.ConnectionRequestInfo;
+import javax.resource.spi.LocalTransaction;
+import javax.resource.spi.ManagedConnection;
+import javax.resource.spi.ManagedConnectionMetaData;
+import javax.security.auth.Subject;
+import javax.transaction.xa.XAResource;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.SecondaryConfig;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.je.TransactionConfig;
+import com.sleepycat.je.XAEnvironment;
+
+public class JEManagedConnection implements ManagedConnection {
+    private ArrayList<ConnectionEventListener> listeners;
+    private JEConnection conn;
+    private XAEnvironment env;
+    private JELocalTransaction savedLT;
+    private TransactionConfig savedTransConfig;
+    private Map<String,Database> rwDatabaseHandleCache;
+    private Map<String,Database> roDatabaseHandleCache;
+    private Map<String,Database> rwSecondaryDatabaseHandleCache;
+    private Map<String,Database> roSecondaryDatabaseHandleCache;
+
+    JEManagedConnection(Subject subject, JERequestInfo jeInfo)
+	throws ResourceException {
+
+	try {
+	    savedTransConfig = jeInfo.getTransactionConfig();
+	    this.env = new XAEnvironment(jeInfo.getJERootDir(),
+					 jeInfo.getEnvConfig());
+	} catch (DatabaseException DE) {
+	    throw new ResourceException(DE.toString());
+	}
+  	listeners = new ArrayList<ConnectionEventListener>();
+	savedLT = null;
+	rwDatabaseHandleCache = new HashMap<String,Database>();
+	roDatabaseHandleCache = new HashMap<String,Database>();
+	rwSecondaryDatabaseHandleCache = new HashMap<String,Database>();
+	roSecondaryDatabaseHandleCache = new HashMap<String,Database>();
+    }
+
+    public Object getConnection(Subject subject,
+				ConnectionRequestInfo connectionRequestInfo)
+        throws ResourceException {
+
+	if (conn == null) {
+	    conn = new JEConnection(this);
+	}
+	return conn;
+    }
+
+    protected XAEnvironment getEnvironment()
+	throws ResourceException {
+
+	return env;
+    }
+
+    public LocalTransaction getLocalTransaction()
+	throws ResourceException {
+
+	/*
+	 * If there is no JEConnection associated with this ManagedConnection
+	 * yet, then the ManagedConnection holds on to the JELocalTransaction.
+	 * Once a JEConnection is associated (it may not ever happen), we hand
+	 * off the JELocalTransaction to the JEConnection and forget about it
+	 * in the ManagedConnection.
+	 */
+	if (conn == null) {
+	    savedLT = new JELocalTransaction(env, savedTransConfig, this);
+	    return savedLT;
+	}
+
+	JELocalTransaction lt = conn.getLocalTransaction();
+	if (lt == null) {
+	    if (savedLT == null) {
+		lt = new JELocalTransaction(env, savedTransConfig, this);
+	    } else {
+		lt = savedLT;
+	    }
+	    conn.setLocalTransaction(lt);
+	    savedLT = null;
+	}
+	return lt;
+    }
+
+    public XAResource getXAResource()
+        throws ResourceException {
+
+	return (XAResource) env;
+    }
+
+    public void associateConnection(Object connection)
+	throws ResourceException {
+
+	conn = (JEConnection) connection;
+	conn.setManagedConnection(this, savedLT);
+	savedLT = null;
+    }
+
+    public void addConnectionEventListener(ConnectionEventListener listener) {
+	listeners.add(listener);
+    }
+
+    public void
+	removeConnectionEventListener(ConnectionEventListener listener) {
+
+	listeners.remove(listener);
+    }
+
+    public ManagedConnectionMetaData getMetaData()
+        throws ResourceException {
+
+        return new JEConnectionMetaData();
+    }
+
+    public void setLogWriter(PrintWriter out)
+        throws ResourceException {
+
+    }
+
+    public PrintWriter getLogWriter()
+        throws ResourceException {
+
+	return null;
+    }
+
+    protected void close() {
+	ConnectionEvent connEvent =
+	    new ConnectionEvent(this, ConnectionEvent.CONNECTION_CLOSED);
+	connEvent.setConnectionHandle(conn);
+	sendConnectionEvent(connEvent);
+    }
+
+    protected void sendConnectionEvent(ConnectionEvent connEvent) {
+	for (int i = listeners.size() - 1; i >= 0; i--) {
+	    ConnectionEventListener listener =
+		(ConnectionEventListener) listeners.get(i);
+	    if (connEvent.getId() == ConnectionEvent.CONNECTION_CLOSED) {
+		listener.connectionClosed(connEvent);
+	    } else if (connEvent.getId() ==
+		       ConnectionEvent.CONNECTION_ERROR_OCCURRED) {
+		listener.connectionErrorOccurred(connEvent);
+	    } else if (connEvent.getId() ==
+		       ConnectionEvent.LOCAL_TRANSACTION_STARTED) {
+		listener.localTransactionStarted(connEvent);
+	    } else if (connEvent.getId() ==
+		       ConnectionEvent.LOCAL_TRANSACTION_COMMITTED) {
+		listener.localTransactionCommitted(connEvent);
+	    } else if (connEvent.getId() ==
+		       ConnectionEvent.LOCAL_TRANSACTION_ROLLEDBACK) {
+		listener.localTransactionRolledback(connEvent);
+	    }
+	}
+    }
+
+    public void destroy()
+	throws ResourceException {
+
+	try {
+	    cleanupDatabaseHandleCache(roDatabaseHandleCache);
+	    cleanupDatabaseHandleCache(rwDatabaseHandleCache);
+	    cleanupDatabaseHandleCache(roSecondaryDatabaseHandleCache);
+	    cleanupDatabaseHandleCache(rwSecondaryDatabaseHandleCache);
+	    env.close();
+	} catch (DatabaseException DE) {
+	    throw new ResourceException(DE.toString());
+	}
+    }
+
+    public void cleanup() {
+    }
+
+    void removeDatabase(String dbName)
+	throws DatabaseException {
+
+	removeDatabaseFromCache(roDatabaseHandleCache, dbName);
+	removeDatabaseFromCache(rwDatabaseHandleCache, dbName);
+	removeDatabaseFromCache(roSecondaryDatabaseHandleCache, dbName);
+	removeDatabaseFromCache(rwSecondaryDatabaseHandleCache, dbName);
+	env.removeDatabase(null, dbName);
+    }
+
+    long truncateDatabase(String dbName, boolean returnCount)
+	throws DatabaseException {
+
+	removeDatabaseFromCache(roDatabaseHandleCache, dbName);
+	removeDatabaseFromCache(rwDatabaseHandleCache, dbName);
+	removeDatabaseFromCache(roSecondaryDatabaseHandleCache, dbName);
+	removeDatabaseFromCache(rwSecondaryDatabaseHandleCache, dbName);
+	return env.truncateDatabase(null, dbName, returnCount);
+    }
+
+    Database openDatabase(String dbName, DatabaseConfig config)
+	throws DatabaseException {
+
+	if (config.getReadOnly()) {
+	    synchronized (roDatabaseHandleCache) {
+		return openDatabaseInternal
+		    (roDatabaseHandleCache, dbName, config);
+	    }
+	} else {
+	    synchronized (rwDatabaseHandleCache) {
+		return openDatabaseInternal
+		    (rwDatabaseHandleCache, dbName, config);
+	    }
+	}
+    }
+
+    SecondaryDatabase openSecondaryDatabase(String dbName,
+					    Database primaryDatabase,
+					    SecondaryConfig config)
+	throws DatabaseException {
+
+	if (config.getReadOnly()) {
+	    synchronized (roSecondaryDatabaseHandleCache) {
+		return openSecondaryDatabaseInternal
+		    (roSecondaryDatabaseHandleCache, dbName,
+		     primaryDatabase, config);
+	    }
+	} else {
+	    synchronized (rwSecondaryDatabaseHandleCache) {
+		return openSecondaryDatabaseInternal
+		    (rwSecondaryDatabaseHandleCache, dbName,
+		     primaryDatabase, config);
+	    }
+	}
+    }
+
+    private Database 
+        openDatabaseInternal(Map<String,Database> databaseHandleCache,
+                             String dbName,
+                             DatabaseConfig config)
+	throws DatabaseException {
+
+	Database db;
+	if (config.getExclusiveCreate()) {
+	    db = env.openDatabase(null, dbName, config);
+	    databaseHandleCache.put(dbName, db);
+	} else {
+	    db = databaseHandleCache.get(dbName);
+	    if (db == null) {
+		db = env.openDatabase(null, dbName, config);
+		databaseHandleCache.put(dbName, db);
+	    } else {
+		DbInternal.databaseConfigValidate(config, db.getConfig());
+	    }
+	}
+	return db;
+    }
+
+    private SecondaryDatabase
+	openSecondaryDatabaseInternal(Map<String,Database> databaseHandleCache,
+				      String dbName,
+				      Database primaryDatabase,
+				      SecondaryConfig config)
+	throws DatabaseException {
+
+	SecondaryDatabase db;
+	if (config.getExclusiveCreate()) {
+	    db = env.openSecondaryDatabase(null, dbName,
+					   primaryDatabase, config);
+	    databaseHandleCache.put(dbName, db);
+	} else {
+	    db = (SecondaryDatabase) databaseHandleCache.get(dbName);
+	    if (db == null) {
+		db = env.openSecondaryDatabase(null, dbName,
+					       primaryDatabase, config);
+		databaseHandleCache.put(dbName, db);
+	    } else {
+		DbInternal.databaseConfigValidate(config, db.getConfig());
+	    }
+	}
+	return db;
+    }
+
+    private void removeDatabaseFromCache(Map<String,Database> cache, 
+                                         String dbName)
+	throws DatabaseException {
+
+	synchronized (cache) {
+	    Database db = cache.get(dbName);
+	    if (db == null) {
+		return;
+	    }
+	    db.close();
+	    cache.remove(dbName);
+	}
+    }
+
+    private void cleanupDatabaseHandleCache(Map<String,Database> cache)
+	throws DatabaseException {
+
+	synchronized (cache) {
+	    Iterator<Database> iter = cache.values().iterator();
+
+	    while (iter.hasNext()) {
+		Database db = iter.next();
+		db.close();
+	    }
+	}
+    }
+}
diff --git a/src/com/sleepycat/je/jca/ra/JEManagedConnectionFactory.java b/src/com/sleepycat/je/jca/ra/JEManagedConnectionFactory.java
new file mode 100644
index 0000000000000000000000000000000000000000..d0edd67bbdf63527ae21fdf255de4b33f23c3553
--- /dev/null
+++ b/src/com/sleepycat/je/jca/ra/JEManagedConnectionFactory.java
@@ -0,0 +1,105 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: JEManagedConnectionFactory.java,v 1.11.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.jca.ra;
+
+import java.io.PrintWriter;
+import java.io.Serializable;
+import java.util.Iterator;
+import java.util.Set;
+
+import javax.resource.ResourceException;
+import javax.resource.spi.ConnectionManager;
+import javax.resource.spi.ConnectionRequestInfo;
+import javax.resource.spi.ManagedConnection;
+import javax.resource.spi.ManagedConnectionFactory;
+import javax.security.auth.Subject;
+
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+
+public class JEManagedConnectionFactory
+    implements ManagedConnectionFactory, Serializable {
+
+    public JEManagedConnectionFactory() {
+    }
+
+    public Object createConnectionFactory(ConnectionManager cxManager)
+	throws ResourceException {
+
+        return new JEConnectionFactoryImpl(cxManager, this);
+    }
+
+    public Object createConnectionFactory()
+	throws ResourceException {
+
+	throw new UnsupportedOperationException("must supply a connMgr");
+    }
+
+    public ManagedConnection
+	createManagedConnection(Subject subject,
+				ConnectionRequestInfo info)
+	throws ResourceException {
+
+	JERequestInfo jeInfo = (JERequestInfo) info;
+	return new JEManagedConnection(subject, jeInfo);
+    }
+
+    public ManagedConnection
+	matchManagedConnections(Set connectionSet,
+				Subject subject,
+				ConnectionRequestInfo info)
+        throws ResourceException {
+
+	JERequestInfo jeInfo = (JERequestInfo) info;
+	Iterator iter = connectionSet.iterator();
+	while (iter.hasNext()) {
+	    Object next = iter.next();
+	    if (next instanceof JEManagedConnection) {
+		JEManagedConnection mc = (JEManagedConnection) next;
+		EnvironmentImpl nextEnvImpl =
+		    DbInternal.envGetEnvironmentImpl(mc.getEnvironment());
+		/* Do we need to match on more than root dir and r/o? */
+		if (nextEnvImpl.getEnvironmentHome().
+		    equals(jeInfo.getJERootDir()) &&
+		    nextEnvImpl.isReadOnly() ==
+		    jeInfo.getEnvConfig().getReadOnly()) {
+		    return mc;
+		}
+	    }
+	}
+        return null;
+    }
+
+    public void setLogWriter(PrintWriter out)
+	throws ResourceException {
+
+    }
+
+    public PrintWriter getLogWriter()
+	throws ResourceException {
+
+        return null;
+    }
+
+    public boolean equals(Object obj) {
+	if (obj == null) {
+	    return false;
+	}
+
+	if (obj instanceof JEManagedConnectionFactory) {
+	    return true;
+	} else {
+	    return false;
+	}
+    }
+
+    public int hashCode() {
+	return 0;
+    }
+}
diff --git a/src/com/sleepycat/je/jca/ra/JERequestInfo.java b/src/com/sleepycat/je/jca/ra/JERequestInfo.java
new file mode 100644
index 0000000000000000000000000000000000000000..594d81b3e04865e592246131c992081fdf907ffe
--- /dev/null
+++ b/src/com/sleepycat/je/jca/ra/JERequestInfo.java
@@ -0,0 +1,55 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: JERequestInfo.java,v 1.10.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.jca.ra;
+
+import java.io.File;
+
+import javax.resource.spi.ConnectionRequestInfo;
+
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.TransactionConfig;
+
+public class JERequestInfo implements ConnectionRequestInfo {
+    private File rootDir;
+    private EnvironmentConfig envConfig;
+    private TransactionConfig transConfig;
+
+    public JERequestInfo(File rootDir,
+			 EnvironmentConfig envConfig,
+			 TransactionConfig transConfig) {
+	this.rootDir = rootDir;
+	this.envConfig = envConfig;
+	this.transConfig = transConfig;
+    }
+
+    File getJERootDir() {
+	return rootDir;
+    }
+
+    EnvironmentConfig getEnvConfig() {
+	return envConfig;
+    }
+
+    TransactionConfig getTransactionConfig() {
+	return transConfig;
+    }
+
+    public boolean equals(Object obj) {
+	JERequestInfo info = (JERequestInfo) obj;
+	return rootDir.equals(info.rootDir);
+    }
+
+    public int hashCode() {
+	return rootDir.hashCode();
+    }
+
+    public String toString() {
+	return "</JERequestInfo rootDir=" + rootDir.getAbsolutePath() + "/>";
+    }
+}
diff --git a/src/com/sleepycat/je/jca/ra/package.html b/src/com/sleepycat/je/jca/ra/package.html
new file mode 100644
index 0000000000000000000000000000000000000000..0c702c5e51e5637fc6f1dc38871903fa848a82b2
--- /dev/null
+++ b/src/com/sleepycat/je/jca/ra/package.html
@@ -0,0 +1,43 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
+<html>
+<head>
+<!--
+
+ See the file LICENSE for redistribution information.
+
+ Copyright (c) 2002,2010 Oracle.  All rights reserved.
+
+ $Id: package.html,v 1.5.2.2 2010/01/04 15:30:29 cwl Exp $
+
+-->
+</head>
+<body>
+Support for the Java Connector Architecture, which provides a standard
+for connecting the J2EE platform to legacy enterprise information
+systems (EIS), such as ERP systems, database systems, and legacy
+applications not written in Java.
+
+<h2>Package Specification</h2>
+
+<p>
+Users who want to run JE within a J2EE Application Server can use the
+JCA Resource Adapter to connect to JE through a standard API. The JE
+Resource Adapter supports all three J2EE application server
+transaction types:
+</p>
+
+<ul type="disc">
+<li>No transaction.
+</li><li>Local transactions.
+</li><li>XA transactions.
+</li>
+</ul>
+
+<p>
+JCA also includes the Java Transaction API (JTA), which means that JE
+supports 2 phase commit (XA).  Therefore, JE can participate
+in distributed transactions managed by either a J2EE server or
+the applications direct use of the JTA API.
+</p>
+</body>
+</html>
diff --git a/src/com/sleepycat/je/jca/ra/ra.xml b/src/com/sleepycat/je/jca/ra/ra.xml
new file mode 100644
index 0000000000000000000000000000000000000000..136a326e163082de7ec2c30653e033a72d354e5b
--- /dev/null
+++ b/src/com/sleepycat/je/jca/ra/ra.xml
@@ -0,0 +1,56 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE connector PUBLIC 
+          "-//Sun Microsystems, Inc.//DTD Connector 1.0//EN" 
+          "http://java.sun.com/dtd/connector_1_0.dtd">
+
+<connector>
+   <display-name>Berkeley DB Java Edition JCA Adapter</display-name>
+   <vendor-name>Oracle</vendor-name>
+   <spec-version>1.0</spec-version>
+   <eis-type>Database</eis-type>
+   <version>2.0</version>
+   <license>
+      <description>
+      Berkeley DB Java Edition; license may be required for redistribution.
+      </description>
+      <license-required>true</license-required>
+   </license>
+   <resourceadapter>
+      <managedconnectionfactory-class>com.sleepycat.je.jca.ra.JEManagedConnectionFactory
+      </managedconnectionfactory-class>
+      <connectionfactory-interface>com.sleepycat.je.jca.ra.JEConnectionFactory
+      </connectionfactory-interface>
+      <connectionfactory-impl-class>com.sleepycat.je.jca.ra.JEConnectionFactoryImpl
+      </connectionfactory-impl-class>
+      <connection-interface>com.sleepycat.je.jca.ra.JEConnection
+      </connection-interface>
+      <connection-impl-class>com.sleepycat.je.jca.ra.JEConnectionImpl
+      </connection-impl-class>
+      <transaction-support>LocalTransaction</transaction-support>
+      <!--
+      <transaction-support>NoTransaction</transaction-support>
+      <transaction-support>XATransaction</transaction-support>
+      -->
+      <config-property>
+	 <config-property-name>UserName</config-property-name>
+	 <config-property-type>java.lang.String</config-property-type>
+	 <config-property-value></config-property-value>
+      </config-property>
+      <config-property>
+	 <config-property-name>Password</config-property-name>
+	 <config-property-type>java.lang.String</config-property-type>
+	 <config-property-value></config-property-value>
+      </config-property>
+      <authentication-mechanism>
+	 <authentication-mechanism-type>BasicPassword</authentication-mechanism-type>
+	 <credential-interface>javax.resource.security.PasswordCredential</credential-interface>
+      </authentication-mechanism>
+      <reauthentication-support>true</reauthentication-support>
+      <security-permission>
+         <description>Read/Write access is required to the contents of
+         the JERootDir</description>
+         <security-permission-spec>permission java.io.FilePermission
+         "/tmp/je_store/*", "read,write";</security-permission-spec>
+      </security-permission>
+   </resourceadapter>
+</connector>
diff --git a/src/com/sleepycat/je/jmx/JEMBeanHelper.java b/src/com/sleepycat/je/jmx/JEMBeanHelper.java
new file mode 100644
index 0000000000000000000000000000000000000000..bc6fbef69759371d38c5c66f863d73bd6c24519c
--- /dev/null
+++ b/src/com/sleepycat/je/jmx/JEMBeanHelper.java
@@ -0,0 +1,776 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: JEMBeanHelper.java,v 1.16.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.jmx;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.management.Attribute;
+import javax.management.AttributeNotFoundException;
+import javax.management.InvalidAttributeValueException;
+import javax.management.MBeanAttributeInfo;
+import javax.management.MBeanException;
+import javax.management.MBeanNotificationInfo;
+import javax.management.MBeanOperationInfo;
+import javax.management.MBeanParameterInfo;
+
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DatabaseStats;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.EnvironmentMutableConfig;
+import com.sleepycat.je.StatsConfig;
+
+/**
+ * JEMBeanHelper is a utility class for the MBean implementation which wants to
+ * add management of a JE environment to its capabilities. MBean
+ * implementations can contain a JEMBeanHelper instance to get MBean metadata
+ * for JE and to set attributes, get attributes, and invoke operations.
+ * <p>
+ * com.sleepycat.je.jmx.JEMonitor and
+ * the example program jmx.JEApplicationMBean are two MBean implementations
+ * which provide support different application use cases. See those classes for
+ * examples of how to use JEMBeanHelper.
+ */
+
+public class JEMBeanHelper {
+
+    /*
+     * A note to JE developers: all available JE attributes and operations are
+     * described in the following static info arrays. New management
+     * functionality can be added to the helper by adding to the appropriate
+     * set of static definitions. For example, if we want to add a new JE
+     * attribute called "foo", which is available for open environments, we
+     * need to define a new MBeanAttributeInfo in the OPEN_ATTR array. The
+     * helper then needs to provide an implementation in set/getAttribute.
+     */
+
+    /* --------------------- Attributes -------------------------- */
+
+    /* Attribute names. */
+    public static final String ATT_ENV_HOME = "environmentHome";
+    public static final String ATT_OPEN = "isOpen";
+    public static final String ATT_IS_READ_ONLY = "isReadOnly";
+    public static final String ATT_IS_TRANSACTIONAL = "isTransactional";
+    public static final String ATT_CACHE_SIZE = "cacheSize";
+    public static final String ATT_CACHE_PERCENT = "cachePercent";
+    public static final String ATT_LOCK_TIMEOUT = "lockTimeout";
+    public static final String ATT_IS_SERIALIZABLE = "isSerializableIsolation";
+    public static final String ATT_TXN_TIMEOUT = "transactionTimeout";
+    public static final String ATT_SET_READ_ONLY = "openReadOnly";
+    public static final String ATT_SET_TRANSACTIONAL = "openTransactional";
+    public static final String ATT_SET_SERIALIZABLE =
+        "openSerializableIsolation";
+
+    /* COMMON_ATTR attributes are available for any environment. */
+    private static final MBeanAttributeInfo[] COMMON_ATTR = {
+
+        new MBeanAttributeInfo(ATT_ENV_HOME,
+                               "java.lang.String",
+                               "Environment home directory.",
+                               true,   // readable
+                               false,  // writable
+                               false), // isIs
+        new MBeanAttributeInfo(ATT_OPEN,
+                               "java.lang.Boolean",
+                               "True if this environment is open.",
+                               true,   // readable
+                               false,  // writable
+                               true)   // isIs
+    };
+
+    /* OPEN_ATTR attributes are available for all open environments. */
+    private static final MBeanAttributeInfo[] OPEN_ATTR = {
+
+        new MBeanAttributeInfo(ATT_IS_READ_ONLY,
+                               "java.lang.Boolean",
+                               "True if this environment is read only.",
+                               true,   // readable
+                               false,  // writable
+                               true),  // isIs
+        new MBeanAttributeInfo(ATT_IS_TRANSACTIONAL,
+                               "java.lang.Boolean",
+                             "True if this environment supports transactions.",
+                               true,   // readable
+                               false,  // writable
+                               true),  // isIs
+        new MBeanAttributeInfo(ATT_CACHE_SIZE,
+                               "java.lang.Long",
+                               "Cache size, in bytes.",
+                               true,   // readable
+                               true,   // writable
+                               false), // isIs
+        new MBeanAttributeInfo(ATT_CACHE_PERCENT,
+                               "java.lang.Integer",
+                               "By default, cache size is (cachePercent * " +
+                               "JVM maximum memory. To change the cache size "+
+                               "using a percentage of the heap size, set " +
+                               "the cache size to 0 and cachePercent to the "+
+                               "desired percentage value.",
+                               true,   // readable
+                               true,   // writable
+                               false), // isIs
+        new MBeanAttributeInfo(ATT_LOCK_TIMEOUT,
+                               "java.lang.Long",
+                               "Lock timeout, in microseconds.",
+                               true,   // readable
+                               false,  // writable
+                               false), // isIs
+    };
+
+    /*
+     * TRANSACTIONAL_ATTR attributes are available only for open, transactional
+     * environments.
+     */
+    private static final MBeanAttributeInfo[] TRANSACTIONAL_ATTR = {
+
+        new MBeanAttributeInfo(ATT_IS_SERIALIZABLE,
+                               "java.lang.Boolean",
+                               "True if this environment provides " +
+                               "Serializable (degree 3) isolation. The " +
+                               "default is RepeatableRead isolation.",
+                               true,   // readable
+                               false,  // writable
+                               true),  // isIs
+        new MBeanAttributeInfo(ATT_TXN_TIMEOUT,
+                               "java.lang.Long",
+                               "Transaction timeout, in seconds. A value " +
+                               "of 0 means there is no timeout.",
+                               true,   // readable
+                               false,  // writable
+                               false)  // isIs
+    };
+
+    /*
+     * CREATE_ATTR attributes are available when the mbean is configured to
+     * support configuration and opening by the mbean. They express the
+     * configuration settings.
+     */
+    private static final MBeanAttributeInfo[] CREATE_ATTR = {
+
+        new MBeanAttributeInfo(ATT_SET_READ_ONLY,
+                               "java.lang.Boolean",
+                               "True if this environment should be opened " +
+                               "in readonly mode.",
+                               true,   // readable
+                               true,   // writable
+                               false), // isIs
+        new MBeanAttributeInfo(ATT_SET_TRANSACTIONAL,
+                               "java.lang.Boolean",
+                               "True if this environment should be opened " +
+                               "in transactional mode.",
+                               true,   // readable
+                               true,   // writable
+                               false), // isIs
+        new MBeanAttributeInfo(ATT_SET_SERIALIZABLE,
+                               "java.lang.Boolean",
+                               "True if this environment should be opened " +
+                               "with serializableIsolation. The default is "+
+                               "false.",
+                               true,   // readable
+                               true,   // writable
+                               false), // isIs
+    };
+
+    /* --------------------- Operations  -------------------------- */
+
+    /* Operation names */
+    static final String OP_CLEAN = "cleanLog";
+    static final String OP_EVICT = "evictMemory";
+    static final String OP_CHECKPOINT = "checkpoint";
+    static final String OP_SYNC = "sync";
+    static final String OP_ENV_STAT = "getEnvironmentStats";
+    static final String OP_LOCK_STAT = "getLockStats";
+    static final String OP_TXN_STAT = "getTxnStats";
+    static final String OP_DB_NAMES = "getDatabaseNames";
+    static final String OP_DB_STAT = "getDatabaseStats";
+
+    private static final MBeanOperationInfo OP_CLEAN_INFO =
+        new MBeanOperationInfo(OP_CLEAN,
+                               "Remove obsolete environment log files. " +
+                               "Zero or more log files will be cleaned as " +
+                               "necessary to bring the disk space " +
+                               "utilization of the environment above the " +
+                               "configured minimum utilization threshold " +
+                               "as determined by the setting " +
+                               "je.cleaner.minUtilization. Returns the " +
+                               "number of files cleaned, that will be " +
+                               "deleted at the next qualifying checkpoint.",
+                               new MBeanParameterInfo[0], // no params
+                               "java.lang.Integer",
+                               MBeanOperationInfo.UNKNOWN);
+
+    private static final MBeanOperationInfo OP_EVICT_INFO =
+        new MBeanOperationInfo(OP_EVICT,
+                               "Reduce cache usage to the threshold " +
+                               "determined by the setting " +
+                               "je.evictor.useMemoryFloor. ",
+                               new MBeanParameterInfo[0], // no params
+                               "void",
+                               MBeanOperationInfo.UNKNOWN);
+
+    /* parameter for checkpoint operation. */
+    private static final MBeanParameterInfo[] checkpointParams = {
+        new MBeanParameterInfo ("force", "java.lang.Boolean",
+                                "If true, force a checkpoint even if " +
+                                "there has been no activity since the last " +
+                                "checkpoint. Returns true if a checkpoint " +
+                                "executed.")
+    };
+
+    private static final MBeanOperationInfo OP_CHECKPOINT_INFO =
+        new MBeanOperationInfo(OP_CHECKPOINT,
+                               "Checkpoint the environment.",
+                               checkpointParams,
+                               "void",
+                               MBeanOperationInfo.UNKNOWN);
+
+    private static final MBeanOperationInfo OP_SYNC_INFO =
+        new MBeanOperationInfo(OP_SYNC,
+                               "Flush the environment to stable storage.",
+                               new MBeanParameterInfo[0], // no params
+                               "void",
+                               MBeanOperationInfo.UNKNOWN);
+
+    private static final MBeanParameterInfo[] statParams = {
+        new MBeanParameterInfo ("clear", "java.lang.Boolean",
+                                "If true, reset statistics after reading."),
+        new MBeanParameterInfo ("fast", "java.lang.Boolean",
+                                "If true, only return statistics which do " +
+                                "not require expensive computation.")
+
+    };
+
+    private static final MBeanOperationInfo OP_ENV_STAT_INFO =
+        new MBeanOperationInfo(OP_ENV_STAT,
+                               "Get environment statistics.",
+                               statParams,
+                               "com.sleepycat.je.EnvironmentStats",
+                               MBeanOperationInfo.INFO);
+
+    private static final MBeanOperationInfo OP_LOCK_STAT_INFO =
+        new MBeanOperationInfo(OP_LOCK_STAT,
+                               "Get locking statistics.",
+                               statParams,
+                               "com.sleepycat.je.LockStats",
+                               MBeanOperationInfo.INFO);
+
+    private static final MBeanOperationInfo OP_TXN_STAT_INFO =
+        new MBeanOperationInfo(OP_TXN_STAT,
+                               "Get transactional statistics.",
+                               statParams,
+                               "com.sleepycat.je.TransactionStats",
+                               MBeanOperationInfo.INFO);
+
+    private static final MBeanOperationInfo OP_DB_NAMES_INFO =
+        new MBeanOperationInfo(OP_DB_NAMES,
+                              "Get the names of databases in the environment.",
+                               new MBeanParameterInfo[0], // no params
+                               "java.util.ArrayList",
+                               MBeanOperationInfo.INFO);
+
+    private static final MBeanParameterInfo[] dbStatParams = {
+        new MBeanParameterInfo ("clear", "java.lang.Boolean",
+                                "If true, reset statistics after reading."),
+        new MBeanParameterInfo ("fast", "java.lang.Boolean",
+                                "If true, only return statistics which do " +
+                                "not require expensive computation. " +
+                                "Currently all database stats are not fast."),
+        new MBeanParameterInfo ("databaseName", "java.lang.String",
+                                "database name")
+
+    };
+
+    private static final MBeanOperationInfo OP_DB_STAT_INFO =
+        new MBeanOperationInfo(OP_DB_STAT,
+                               "Get database statistics.",
+                               dbStatParams,
+                               "com.sleepycat.je.DatabaseStats",
+                               MBeanOperationInfo.INFO);
+
+
+    /* target JE environment home directory. */
+    private File environmentHome;
+
+    /*
+     * If canConfigure is true, this helper will make environment configuration
+     * attributes available in the mbean metadata. Configuration attributes
+     * will be saved in the openConfig instance.
+     */
+    private boolean canConfigure;
+    private EnvironmentConfig openConfig;
+
+    /* true if the mbean metadata needs to be refreshed. */
+    private boolean needReset;
+
+    /*
+     * Save whether the environment was open the last time we fetched
+     * mbean attributes. Use to detect a change in environment status.
+     */
+    private boolean envWasOpen;
+
+    /**
+     * Instantiate a helper, specifying environment home and open capabilities.
+     *
+     * @param environmentHome home directory of the target JE environment.
+     * @param canConfigure If true, the helper will show environment
+     * configuration attributes.
+     */
+    public JEMBeanHelper(File environmentHome, boolean canConfigure) {
+
+        if (environmentHome == null) {
+            throw new IllegalArgumentException(
+                                        "Environment home cannot be null");
+        }
+        this.environmentHome = environmentHome;
+        this.canConfigure = canConfigure;
+        if (canConfigure) {
+            openConfig = new EnvironmentConfig();
+        }
+    }
+
+    /**
+     * Return the target environment directory.
+     * @return the environment directory.
+     */
+    public File getEnvironmentHome() {
+        return environmentHome;
+    }
+
+    /**
+     * If the helper was instantiated with canConfigure==true, it shows
+     * environment configuration attributes. Those attributes are returned
+     * within this EnvironmentConfig object for use in opening environments.
+     *
+     * @return EnvironmentConfig object which saves configuration attributes
+     * recorded through MBean attributes.
+     */
+    public EnvironmentConfig getEnvironmentOpenConfig() {
+        return openConfig;
+    }
+
+    /**
+     * Return an Environment only if the environment has already been opened
+     * in this process. A helper method for MBeans which want to only access
+     * open environments.
+     * @return Environment if already open, null if not open.
+     */
+    public Environment getEnvironmentIfOpen() {
+        if (environmentHome == null) {
+            return null;
+        }
+
+        return DbInternal.getEnvironmentShell(environmentHome);
+    }
+
+    /**
+     * Tell the MBean if the available set of functionality has changed.
+     *
+     * @return true if the MBean should regenerate its JE metadata.
+     */
+    public synchronized boolean getNeedReset() {
+        return needReset;
+    }
+
+    /********************************************************************/
+    /* MBean Attributes                                                 */
+    /********************************************************************/
+
+    /**
+     * Get MBean attribute metadata for this environment.
+     * @param targetEnv The target JE environment. May be null if the
+     * environment is not open.
+     * @return list of MBeanAttributeInfo objects describing the available
+     * attributes.
+     */
+    public List<MBeanAttributeInfo> getAttributeList(Environment targetEnv) {
+
+        /* Turn off reset because the mbean metadata is being refreshed. */
+        setNeedReset(false);
+
+        ArrayList<MBeanAttributeInfo> attrList = 
+            new ArrayList<MBeanAttributeInfo>();
+
+        /* Add attributes for all JE environments. */
+        for (int i = 0; i < COMMON_ATTR.length; i++) {
+            attrList.add(COMMON_ATTR[i]);
+        }
+
+        if (targetEnv == null) {
+            if (canConfigure) {
+                /* Add attributes for configuring an environment. */
+                for (int i = 0; i < CREATE_ATTR.length; i++) {
+                    attrList.add(CREATE_ATTR[i]);
+                }
+            }
+        } else {
+            /* Add attributes for an open environment. */
+            for (int i = 0; i < OPEN_ATTR.length; i++) {
+                attrList.add(OPEN_ATTR[i]);
+            }
+
+            /* Add attributes for an open, transactional environment. */
+            try {
+                EnvironmentConfig config = targetEnv.getConfig();
+                if (config.getTransactional()) {
+                    for (int i = 0; i < TRANSACTIONAL_ATTR.length; i++) {
+                        attrList.add(TRANSACTIONAL_ATTR[i]);
+                    }
+                }
+            } catch (DatabaseException ignore) {
+            	/* ignore */
+            }
+        }
+
+        return attrList;
+    }
+
+    /**
+     * Get an attribute value for the given environment. Check
+     * JEMBeanHelper.getNeedReset() after this call because the helper may
+     * detect that the environment has changed and that the MBean metadata
+     * should be reset.
+     *
+     * @param targetEnv The target JE environment. May be null if the
+     * environment is not open.
+     * @param attributeName attribute name.
+     * @return attribute value.
+     */
+    public Object getAttribute(Environment targetEnv,
+                               String attributeName)
+        throws AttributeNotFoundException,
+               MBeanException {
+
+        /* Sanity check. */
+        if (attributeName == null) {
+            throw new AttributeNotFoundException(
+                                            "Attribute name cannot be null");
+        }
+
+        /* These attributes are available regardless of environment state. */
+        try {
+            if (attributeName.equals(ATT_ENV_HOME)) {
+                return environmentHome.getCanonicalPath();
+            } else if (attributeName.equals(ATT_OPEN)) {
+                boolean envIsOpen = (targetEnv != null);
+                resetIfOpenStateChanged(envIsOpen);
+                return new Boolean(envIsOpen);
+            } else if (attributeName.equals(ATT_SET_READ_ONLY)) {
+                return new Boolean(openConfig.getReadOnly());
+            } else if (attributeName.equals(ATT_SET_TRANSACTIONAL)) {
+                return new Boolean(openConfig.getTransactional());
+            } else if (attributeName.equals(ATT_SET_SERIALIZABLE)) {
+                return new Boolean(openConfig.getTxnSerializableIsolation());
+            } else {
+                /* The rest are JE environment attributes. */
+                if (targetEnv != null) {
+
+                    EnvironmentConfig config = targetEnv.getConfig();
+
+                    if (attributeName.equals(ATT_IS_READ_ONLY)) {
+                        return new Boolean(config.getReadOnly());
+                    } else if (attributeName.equals(ATT_IS_TRANSACTIONAL)) {
+                        return new Boolean(config.getTransactional());
+                    } else if (attributeName.equals(ATT_CACHE_SIZE)) {
+                        return new Long(config.getCacheSize());
+                    } else if (attributeName.equals(ATT_CACHE_PERCENT)) {
+                        return new Integer(config.getCachePercent());
+                    } else if (attributeName.equals(ATT_LOCK_TIMEOUT)) {
+                        return new Long(config.getLockTimeout());
+                    } else if (attributeName.equals(ATT_IS_SERIALIZABLE)) {
+                        return new
+                            Boolean(config.getTxnSerializableIsolation());
+                    } else if (attributeName.equals(ATT_TXN_TIMEOUT)) {
+                        return new Long(config.getTxnTimeout());
+                    } else {
+                        throw new AttributeNotFoundException("attribute " +
+                                                             attributeName +
+                                                             " is not valid.");
+                    }
+                }
+                return null;
+            }
+        } catch (Exception e) {
+            /*
+             * Add both the message and the exception for easiest deciphering
+             * of the problem. Sometimes the original exception stacktrace gets
+             * hidden in server logs.
+             */
+            throw new MBeanException(e, e.getMessage());
+        }
+    }
+
+    /**
+     * Set an attribute value for the given environment.
+     *
+     * @param targetEnv The target JE environment. May be null if the
+     * environment is not open.
+     * @param attribute name/value pair
+     */
+    public void setAttribute(Environment targetEnv,
+                             Attribute attribute)
+        throws AttributeNotFoundException,
+               InvalidAttributeValueException {
+
+        if (attribute == null) {
+            throw new AttributeNotFoundException("Attribute cannot be null");
+        }
+
+        /* Sanity check parameters. */
+        String name = attribute.getName();
+        Object value = attribute.getValue();
+
+	if (name == null) {
+	    throw new AttributeNotFoundException(
+                                     "Attribute name cannot be null");
+	}
+
+	if (value == null) {
+	    throw new InvalidAttributeValueException(
+                                      "Attribute value for attribute " +
+                                      name + " cannot be null");
+	}
+
+        try {
+            if (name.equals(ATT_SET_READ_ONLY)) {
+                openConfig.setReadOnly(((Boolean) value).booleanValue());
+            } else if (name.equals(ATT_SET_TRANSACTIONAL)) {
+                openConfig.setTransactional(((Boolean) value).booleanValue());
+            } else if (name.equals(ATT_SET_SERIALIZABLE)) {
+                openConfig.setTxnSerializableIsolation(
+                                             ((Boolean) value).booleanValue());
+            } else {
+                /* Set the specified attribute if the environment is open. */
+                if (targetEnv != null) {
+
+                    EnvironmentMutableConfig config =
+                        targetEnv.getMutableConfig();
+
+                    if (name.equals(ATT_CACHE_SIZE)) {
+                        config.setCacheSize(((Long) value).longValue());
+                        targetEnv.setMutableConfig(config);
+                    } else if (name.equals(ATT_CACHE_PERCENT)) {
+                        config.setCachePercent(((Integer) value).intValue());
+                        targetEnv.setMutableConfig(config);
+                    } else {
+                        throw new AttributeNotFoundException("attribute " +
+                                                             name +
+                                                             " is not valid.");
+                    }
+                } else {
+                    throw new AttributeNotFoundException("attribute " +
+                                                         name +
+                                                         " is not valid.");
+                }
+            }
+        } catch (NumberFormatException e) {
+            throw new InvalidAttributeValueException("attribute name=" + name);
+        } catch (DatabaseException e) {
+            throw new InvalidAttributeValueException("attribute name=" + name +
+                                                     e.getMessage());
+        }
+    }
+
+    /********************************************************************/
+    /* JE Operations                                                    */
+    /********************************************************************/
+
+    /**
+     * Get mbean operation metadata for this environment.
+     *
+     * @param targetEnv The target JE environment. May be null if the
+     * environment is not open.
+     * @return List of MBeanOperationInfo describing available operations.
+     */
+    public List<MBeanOperationInfo> getOperationList(Environment targetEnv) {
+        setNeedReset(false);
+
+        List<MBeanOperationInfo> operationList = 
+            new ArrayList<MBeanOperationInfo>();
+
+        if (targetEnv != null) {
+            /*
+             * These operations are only available if the environment is
+             * open.
+             */
+            operationList.add(OP_CLEAN_INFO);
+            operationList.add(OP_EVICT_INFO);
+            operationList.add(OP_ENV_STAT_INFO);
+            operationList.add(OP_LOCK_STAT_INFO);
+            operationList.add(OP_DB_NAMES_INFO);
+            operationList.add(OP_DB_STAT_INFO);
+
+            /* Add checkpoint only for transactional environments. */
+            boolean isTransactional = false;
+            try {
+                EnvironmentConfig config = targetEnv.getConfig();
+                isTransactional = config.getTransactional();
+            } catch (DatabaseException e) {
+                /* Don't make any operations available. */
+                return new ArrayList<MBeanOperationInfo>();
+            }
+
+            if (isTransactional) {
+                operationList.add(OP_CHECKPOINT_INFO);
+                operationList.add(OP_TXN_STAT_INFO);
+            } else {
+                operationList.add(OP_SYNC_INFO);
+            }
+        }
+
+        return operationList;
+    }
+
+    /**
+     * Invoke an operation for the given environment.
+     *
+     * @param targetEnv The target JE environment. May be null if the
+     * environment is not open.
+     * @param actionName operation name.
+     * @param params operation parameters. May be null.
+     * @param signature operation signature. May be null.
+     * @return the operation result
+     */
+    public Object invoke(Environment targetEnv,
+                         String actionName,
+                         Object[] params,
+                         String[] signature)
+        throws MBeanException {
+
+        /* Sanity checking. */
+        if (actionName == null) {
+            throw new IllegalArgumentException("actionName cannot be null");
+        }
+
+        try {
+            if (targetEnv != null) {
+                if (actionName.equals(OP_CLEAN)) {
+                    int numFiles = targetEnv.cleanLog();
+                    return new Integer(numFiles);
+                } else if (actionName.equals(OP_EVICT)) {
+                    targetEnv.evictMemory();
+                    return null;
+                } else if (actionName.equals(OP_CHECKPOINT)) {
+                    CheckpointConfig config = new CheckpointConfig();
+                    if ((params != null) && (params.length > 0)) {
+                        Boolean force = (Boolean) params[0];
+                        config.setForce(force.booleanValue());
+                    }
+                    targetEnv.checkpoint(config);
+                    return null;
+                } else if (actionName.equals(OP_SYNC)) {
+                    targetEnv.sync();
+                    return null;
+                } else if (actionName.equals(OP_ENV_STAT)) {
+                    return targetEnv.getStats(getStatsConfig(params));
+                } else if (actionName.equals(OP_LOCK_STAT)) {
+                    return targetEnv.getLockStats(getStatsConfig(params));
+                } else if (actionName.equals(OP_TXN_STAT)) {
+                    return targetEnv.getTransactionStats(
+                                                       getStatsConfig(params));
+                } else if (actionName.equals(OP_DB_NAMES)) {
+                    return targetEnv.getDatabaseNames();
+                } else if (actionName.equals(OP_DB_STAT)) {
+                    return getDatabaseStats(targetEnv, params);
+                }
+            }
+
+            return new IllegalArgumentException("actionName: " +
+                                                actionName +
+                                                " is not valid");
+        } catch (DatabaseException e) {
+            /*
+             * Add both the message and the exception for easiest
+             * deciphering of the problem. Sometimes the original exception
+             * stacktrace gets hidden in server logs.
+             */
+            throw new MBeanException(e, e.getMessage());
+        }
+    }
+
+    /**
+     * Helper for creating a StatsConfig object to use as an operation
+     * parameter.
+     */
+    private StatsConfig getStatsConfig(Object[] params) {
+        StatsConfig statsConfig = new StatsConfig();
+        if ((params != null) && (params.length > 0) && (params[0] != null)) {
+            Boolean clear = (Boolean) params[0];
+            statsConfig.setClear(clear.booleanValue());
+        }
+        if ((params != null) && (params.length > 1) && (params[1] != null)) {
+            Boolean fast = (Boolean) params[1];
+            statsConfig.setFast(fast.booleanValue());
+        }
+        return statsConfig;
+    }
+
+    /**
+     * Helper to get statistics for a given database.
+     * @param params operation parameters
+     * @return DatabaseStats object
+     */
+    private DatabaseStats getDatabaseStats(Environment targetEnv,
+                                           Object[] params)
+        throws IllegalArgumentException,
+	       DatabaseException {
+
+        if ((params == null) || (params.length < 3)) {
+            return null;
+        }
+        String dbName = (String)params[2];
+
+        Database db = null;
+        try {
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setReadOnly(true);
+            DbInternal.setUseExistingConfig(dbConfig, true);
+            db = targetEnv.openDatabase(null, dbName, dbConfig);
+            return db.getStats(getStatsConfig(params));
+        } finally {
+            if (db != null) {
+                db.close();
+            }
+        }
+    }
+
+    /********************************************************************/
+    /* JE Notifications.
+    /********************************************************************/
+
+    /**
+     * No notifications are supported.
+     * @return List of MBeanNotificationInfo for available notifications.
+     */
+    public MBeanNotificationInfo[]
+        getNotificationInfo(Environment targetEnv) {
+        return null;
+    }
+
+    /********************************************************************/
+    /* private helpers.
+    /********************************************************************/
+
+    private synchronized void setNeedReset(boolean reset) {
+        needReset = reset;
+    }
+
+    private synchronized void resetIfOpenStateChanged(boolean isOpen) {
+        if (isOpen != envWasOpen) {
+            setNeedReset(true);
+            envWasOpen = isOpen;
+        }
+    }
+}
+
diff --git a/src/com/sleepycat/je/jmx/JEMonitor.java b/src/com/sleepycat/je/jmx/JEMonitor.java
new file mode 100644
index 0000000000000000000000000000000000000000..0df9f3504f43c9ca9de68aef182340d6386e59bf
--- /dev/null
+++ b/src/com/sleepycat/je/jmx/JEMonitor.java
@@ -0,0 +1,362 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: JEMonitor.java,v 1.10.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.jmx;
+
+import java.io.File;
+import java.lang.reflect.Constructor;
+import java.util.List;
+
+import javax.management.Attribute;
+import javax.management.AttributeList;
+import javax.management.AttributeNotFoundException;
+import javax.management.DynamicMBean;
+import javax.management.InvalidAttributeValueException;
+import javax.management.MBeanAttributeInfo;
+import javax.management.MBeanConstructorInfo;
+import javax.management.MBeanException;
+import javax.management.MBeanInfo;
+import javax.management.MBeanNotificationInfo;
+import javax.management.MBeanOperationInfo;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+
+/**
+ * JEMonitor is a JMX MBean which manages a JE environment.
+ * The MBean may be installed as is, or used as a starting point for building
+ * a MBean which includes JE support. JEMonitor expects another component in
+ * the JVM to configure and open the JE environment; it will only access a JE
+ * environment that is already active. It is intended for these use cases:
+ * <ul>
+ * <li>
+ * The application wants to add database monitoring with minimal effort and
+ * little knowledge of JMX. Configuring JEMonitor within the JMX container
+ * provides monitoring without requiring application code changes. </li>
+ * </li>
+   <li>
+ * An application already supports JMX and wants to add database monitoring
+ * without modifying its existing MBean.  The user can configure JEMonitor in
+ * the JMX container in conjunction with other application MBeans that are
+ * non-overlapping with JE monitoring.  No application code changes are
+ * required. </li>
+ * </ul>
+ * <p>
+ * In this MBean, JE management is divided between the JEMonitor class and
+ * JEMBeanHelper class. JEMonitor contains an instance of JEMBeanHelper, which
+ * knows about JE attributes, operations and notifications. JEMonitor itself
+ * has the responsibility of obtaining a temporary handle for the JE
+ * environment.
+ * <p>
+ * The key implementation choice for a JE MBean is the approach taken for
+ * accessing the JE environment. Some of the salient considerations are:
+ * <ul>
+ * <li>Applications may open one or many Environment objects per process
+ * against a given environment.</li>
+ *
+ * <li>All Environment handles reference the same underlying JE environment
+ * implementation object.</li>
+
+ * <li> The first Environment object instantiated in the process does the real
+ * work of configuring and opening the environment. Follow-on instantiations of
+ * Environment merely increment a reference count. Likewise,
+ * Environment.close() only does real work when it's called by the last
+ * Environment object in the process. </li>
+ * </ul>
+ * <p>
+ * Because of these considerations, JEMonitor avoids holding a JE environment
+ * handle in order to not impact the environment lifetime. Any environment
+ * handles used are held temporarily.
+ */
+public class JEMonitor implements DynamicMBean {
+
+    private static final String DESCRIPTION =
+        "Monitor an open Berkeley DB, Java Edition environment.";
+
+    private MBeanInfo mbeanInfo;    // this MBean's visible interface.
+    private JEMBeanHelper jeHelper; // gets JE management interface.
+
+    /**
+     * Instantiate a JEMonitor
+     *
+     * @param environmentHome home directory of the target JE environment.
+     */
+    public JEMonitor(String environmentHome)
+        throws MBeanException {
+
+        File environmentDirectory = new File(environmentHome);
+        jeHelper = new JEMBeanHelper(environmentDirectory, false);
+
+        Environment targetEnv = getEnvironmentIfOpen();
+        try {
+            resetMBeanInfo(targetEnv);
+        } finally {
+            closeEnvironment(targetEnv);
+        }
+    }
+
+    /**
+     * @see DynamicMBean#getAttribute
+     */
+    public Object getAttribute(String attributeName)
+        throws AttributeNotFoundException,
+               MBeanException {
+
+        Object result = null;
+        Environment targetEnv = getEnvironmentIfOpen();
+        try {
+            result =  jeHelper.getAttribute(targetEnv, attributeName);
+            targetEnv = checkForMBeanReset(targetEnv);
+        } finally {
+            /* release resource. */
+            closeEnvironment(targetEnv);
+        }
+
+        return result;
+    }
+
+    /**
+     * @see DynamicMBean#setAttribute
+     */
+    public void setAttribute(Attribute attribute)
+        throws AttributeNotFoundException,
+               InvalidAttributeValueException,
+               MBeanException {
+
+        Environment targetEnv = getEnvironmentIfOpen();
+        try {
+            jeHelper.setAttribute(targetEnv, attribute);
+        } finally {
+            /* release resources. */
+            closeEnvironment(targetEnv);
+        }
+    }
+
+    /**
+     * @see DynamicMBean#getAttributes
+     */
+    public AttributeList getAttributes(String[] attributes) {
+
+        /* Sanity checking. */
+        if (attributes == null) {
+            throw new IllegalArgumentException("Attributes cannot be null");
+        }
+
+        /* Get each requested attribute. */
+        AttributeList results = new AttributeList();
+        Environment targetEnv = getEnvironmentIfOpen();
+
+        try {
+            for (int i = 0; i < attributes.length; i++) {
+                try {
+                    String name = attributes[i];
+                    Object value = jeHelper.getAttribute(targetEnv, name);
+
+                    /*
+                     * jeHelper may notice that the environment state has
+                     * changed. If so, this mbean must update its interface.
+                     */
+                    targetEnv = checkForMBeanReset(targetEnv);
+
+                    results.add(new Attribute(name, value));
+                } catch (Exception e) {
+                    e.printStackTrace();
+                }
+            }
+            return results;
+        } finally {
+            try {
+                /* release resources. */
+                closeEnvironment(targetEnv);
+            } catch (MBeanException ignore) {
+                /* ignore */
+            }
+        }
+    }
+
+    /**
+     * @see DynamicMBean#setAttributes
+     */
+    public AttributeList setAttributes(AttributeList attributes) {
+
+        /* Sanity checking. */
+        if (attributes == null) {
+            throw new IllegalArgumentException("attribute list can't be null");
+        }
+
+        /* Set each attribute specified. */
+        AttributeList results = new AttributeList();
+        Environment targetEnv = getEnvironmentIfOpen();
+
+        try {
+            for (int i = 0; i < attributes.size(); i++) {
+                Attribute attr = (Attribute) attributes.get(i);
+                try {
+                    /* Set new value. */
+                    jeHelper.setAttribute(targetEnv, attr);
+
+                    /*
+                     * Add the name and new value to the result list. Be sure
+                     * to ask the MBean for the new value, rather than simply
+                     * using attr.getValue(), because the new value may not
+                     * be same if it is modified according to the JE
+                     * implementation.
+                     */
+                    String name = attr.getName();
+                    Object newValue = jeHelper.getAttribute(targetEnv, name);
+                    results.add(new Attribute(name, newValue));
+                } catch (Exception e) {
+                    e.printStackTrace();
+                }
+            }
+            return results;
+        } finally {
+            try {
+                /* release resources. */
+                closeEnvironment(targetEnv);
+            } catch (MBeanException ignore) {
+                /* ignore */
+            }
+        }
+    }
+
+    /**
+     * @see DynamicMBean#invoke
+     */
+    public Object invoke(String actionName,
+                         Object[] params,
+                         String[] signature)
+        throws MBeanException {
+
+        Object result = null;
+        Environment targetEnv = getEnvironmentIfOpen();
+        try {
+            result = jeHelper.invoke(targetEnv, actionName,
+                                     params, signature);
+        } finally {
+            /* release resources. */
+            closeEnvironment(targetEnv);
+        }
+
+        return result;
+    }
+
+    /**
+     * @see DynamicMBean#getMBeanInfo
+     */
+    public MBeanInfo getMBeanInfo() {
+
+        return mbeanInfo;
+    }
+
+    /**
+     * The JEHelper may detect a change in environment attributes that
+     * results in a change in management functionality.  Reset the
+     * MBeanInfo if needed and refresh the temporary environment handle.
+     *
+     * @param targetEnv the temporary JE environment handle
+     * @return new environment handle to replace targetEnv. Must be released
+     * by the caller.
+     */
+    private Environment checkForMBeanReset(Environment targetEnv)
+        throws MBeanException {
+
+        Environment env = targetEnv;
+        if (jeHelper.getNeedReset()) {
+
+            /* Refresh the environmen handle. */
+            closeEnvironment(env);
+            env = getEnvironmentIfOpen();
+            resetMBeanInfo(env);
+        }
+        return env;
+    }
+
+    /**
+     * Create the available management interface for this environment.
+     * The attributes and operations available vary according to
+     * environment configuration.
+     *
+     * @param targetEnv an open environment handle for the
+     * targetted application.
+     */
+    private void resetMBeanInfo(Environment targetEnv) {
+
+        /*
+         * Get JE attributes, operation and notification information
+         * from JEMBeanHelper. An application may choose to add functionality
+         * of its own when constructing the MBeanInfo.
+         */
+
+        /* Attributes. */
+        List<MBeanAttributeInfo> attributeList =  
+            jeHelper.getAttributeList(targetEnv);
+        MBeanAttributeInfo[] attributeInfo =
+            new MBeanAttributeInfo[attributeList.size()];
+        attributeList.toArray(attributeInfo);
+
+        /* Constructors. */
+        Constructor[] constructors = this.getClass().getConstructors();
+        MBeanConstructorInfo[] constructorInfo =
+            new MBeanConstructorInfo[constructors.length];
+        for (int i = 0; i < constructors.length; i++) {
+            constructorInfo[i] =
+                new MBeanConstructorInfo(this.getClass().getName(),
+                                         constructors[i]);
+        }
+
+        /* Operations. */
+        List<MBeanOperationInfo> operationList = 
+            jeHelper.getOperationList(targetEnv);
+        MBeanOperationInfo[] operationInfo =
+            new MBeanOperationInfo[operationList.size()];
+        operationList.toArray(operationInfo);
+
+        /* Notifications. */
+        MBeanNotificationInfo[] notificationInfo =
+            jeHelper.getNotificationInfo(targetEnv);
+
+        /* Generate the MBean description. */
+        mbeanInfo = new MBeanInfo(this.getClass().getName(),
+                                  DESCRIPTION,
+                                  attributeInfo,
+                                  constructorInfo,
+                                  operationInfo,
+                                  notificationInfo);
+    }
+
+    /**
+     * This MBean has the policy of only accessing an environment when
+     * it has already been configured and opened by other
+     * application threads.
+     *
+     * @return a valid Environment or null if the environment is not open
+     */
+    protected Environment getEnvironmentIfOpen() {
+
+        return jeHelper.getEnvironmentIfOpen();
+    }
+
+    /**
+     * Be sure to close Environments when they are no longer used, because
+     * they pin down resources.
+     *
+     * @param targetEnv the open environment. May be null.
+     */
+    protected void closeEnvironment(Environment targetEnv)
+        throws MBeanException {
+
+        try {
+            if (targetEnv != null) {
+                targetEnv.close();
+            }
+        } catch (DatabaseException e) {
+            throw new MBeanException(e);
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/jmx/package.html b/src/com/sleepycat/je/jmx/package.html
new file mode 100644
index 0000000000000000000000000000000000000000..e8ff881b8a4e1610ccfd9380c5d892d8a2df96a5
--- /dev/null
+++ b/src/com/sleepycat/je/jmx/package.html
@@ -0,0 +1,28 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
+<html>
+<head>
+<!--
+
+ See the file LICENSE for redistribution information.
+
+ Copyright (c) 2005,2008 Oracle.  All rights reserved.
+
+ $Id: package.html,v 1.8 2008/06/04 15:33:16 mark Exp $
+
+-->
+</head>
+<body>
+JE support for JMX.
+
+<h2>Package Specification</h2>
+This package provides support for creating JMX MBeans for JE. It
+contains a deployable MBean and a helper class for creating
+application specific MBeans.
+
+
+@see "&lt;jeHome&gt;/examples/jmx contains a README for
+additional information on how to incorporate JE instrumentation into
+your existing application MBean."    
+
+</body>
+</html>
diff --git a/src/com/sleepycat/je/latch/Latch.java b/src/com/sleepycat/je/latch/Latch.java
new file mode 100644
index 0000000000000000000000000000000000000000..28d4f4efc601d651057241e0eba6ef036c7709c4
--- /dev/null
+++ b/src/com/sleepycat/je/latch/Latch.java
@@ -0,0 +1,233 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Latch.java,v 1.92.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.latch;
+
+import java.util.concurrent.locks.ReentrantLock;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.RunRecoveryException;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+
+public class Latch {
+
+    /*
+     * Required because getOwner() is protected (for unknown reasons) and can't
+     * be accessed except by a subclass of ReentrantLock.
+     */
+    @SuppressWarnings("serial")
+    private static class JEReentrantLock extends ReentrantLock {
+	JEReentrantLock(boolean fair) {
+	    super(fair);
+	}
+
+        @Override
+	protected Thread getOwner() {
+	    return super.getOwner();
+	}
+    }
+
+    private JEReentrantLock lock;
+    private String name;
+    private LatchStats stats = new LatchStats();
+
+    public Latch(String name) {
+	lock = new JEReentrantLock(EnvironmentImpl.getFairLatches());
+	this.name = name;
+    }
+
+    /**
+     * Set the latch name, used for latches in objects instantiated from
+     * the log.
+     */
+    public void setName(String name) {
+	this.name = name;
+    }
+
+    /**
+     * Acquire a latch for exclusive/write access.
+     *
+     * <p>Wait for the latch if some other thread is holding it.  If there are
+     * threads waiting for access, they will be granted the latch on a FIFO
+     * basis.  When the method returns, the latch is held for exclusive
+     * access.</p>
+     *
+     * @throws LatchException if the latch is already held by the calling
+     * thread.
+     *
+     * @throws RunRecoveryException if an InterruptedException exception
+     * occurs.
+     */
+    public void acquire()
+	throws DatabaseException {
+
+        try {
+	    if (lock.isHeldByCurrentThread()) {
+		stats.nAcquiresSelfOwned++;
+		throw new LatchException(name + " already held");
+	    }
+
+	    if (lock.isLocked()) {
+		stats.nAcquiresWithContention++;
+	    } else {
+		stats.nAcquiresNoWaiters++;
+	    }
+
+	    lock.lock();
+
+            assert noteLatch(); // intentional side effect;
+	} finally {
+	    assert EnvironmentImpl.maybeForceYield();
+	}
+    }
+
+    /**
+     * Acquire a latch for exclusive/write access, but do not block if it's not
+     * available.
+     *
+     * @return true if the latch was acquired, false if it is not available.
+     *
+     * @throws LatchException if the latch is already held by the calling
+     * thread.
+     */
+    public boolean acquireNoWait()
+	throws LatchException {
+
+        try {
+	    if (lock.isHeldByCurrentThread()) {
+		stats.nAcquiresSelfOwned++;
+		throw new LatchException(name + " already held");
+	    }
+
+	    boolean ret = lock.tryLock();
+	    if (ret) {
+		assert noteLatch();
+		stats.nAcquireNoWaitSuccessful++;
+	    } else {
+		stats.nAcquireNoWaitUnsuccessful++;
+	    }
+	    return ret;
+	} finally {
+	    assert EnvironmentImpl.maybeForceYield();
+	}
+    }
+
+    /**
+     * Release the latch.  If there are other thread(s) waiting for the latch,
+     * one is woken up and granted the latch. If the latch was not owned by
+     * the caller, just return;
+     */
+    public void releaseIfOwner() {
+	doRelease(false);
+    }
+
+    /**
+     * Release the latch.  If there are other thread(s) waiting for the latch,
+     * they are woken up and granted the latch.
+     *
+     * @throws LatchNotHeldException if the latch is not currently held.
+     */
+    public void release()
+	throws LatchNotHeldException {
+
+	if (doRelease(true)) {
+            throw new LatchNotHeldException(name + " not held");
+        }
+    }
+
+    /**
+     * Do the work of releasing the latch. Wake up any waiters.
+     *
+     * @returns true if this latch was not owned by the caller.
+     */
+    private boolean doRelease(boolean checkHeld) {
+
+	try {
+	    if (!lock.isHeldByCurrentThread()) {
+		return true;
+	    }
+	    lock.unlock();
+	    stats.nReleases++;
+	    assert unNoteLatch(checkHeld); // intentional side effect.
+	} catch (IllegalMonitorStateException IMSE) {
+	    return true;
+	}
+	return false;
+    }
+
+    /**
+     * Return true if the current thread holds this latch.
+     *
+     * @return true if we hold this latch.  False otherwise.
+     */
+    public boolean isOwner() {
+	return lock.isHeldByCurrentThread();
+    }
+
+    /**
+     * Used only for unit tests.
+     *
+     * @return the thread that currently holds the latch for exclusive access.
+     */
+    public Thread owner() {
+	return lock.getOwner();
+    }
+
+    /**
+     * Return the number of threads waiting.
+     *
+     * @return the number of threads waiting for the latch.
+     */
+    public int nWaiters() {
+	return lock.getQueueLength();
+    }
+
+    /**
+     * @return a LatchStats object with information about this latch.
+     */
+    public LatchStats getLatchStats() {
+	LatchStats s = null;
+	try {
+	    s = (LatchStats) stats.clone();
+	} catch (CloneNotSupportedException e) {
+	    /* Klockwork - ok */
+	}
+	return s;
+    }
+
+    /**
+     * Formats a latch owner and waiters.
+     */
+    @Override
+    public String toString() {
+	return lock.toString();
+    }
+
+    /**
+     * Only call under the assert system. This records latching by thread.
+     */
+    private boolean noteLatch()
+	throws LatchException {
+
+        return LatchSupport.latchTable.noteLatch(this);
+    }
+
+    /**
+     * Only call under the assert system. This records latching by thread.
+     */
+    private boolean unNoteLatch(boolean checkHeld) {
+
+        /* Only return a false status if we are checking for latch ownership.*/
+        if (checkHeld) {
+            return LatchSupport.latchTable.unNoteLatch(this, name);
+        } else {
+            LatchSupport.latchTable.unNoteLatch(this, name);
+            return true;
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/latch/LatchException.java b/src/com/sleepycat/je/latch/LatchException.java
new file mode 100644
index 0000000000000000000000000000000000000000..9d9160e0f5821f52d130ee148b1f56907faa23a9
--- /dev/null
+++ b/src/com/sleepycat/je/latch/LatchException.java
@@ -0,0 +1,22 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LatchException.java,v 1.20.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.latch;
+
+import com.sleepycat.je.DatabaseException;
+
+/**
+ * The root of latch related exceptions.
+ */
+
+public class LatchException extends DatabaseException {
+
+    public LatchException(String message) {
+	super(message);
+    }
+}
diff --git a/src/com/sleepycat/je/latch/LatchNotHeldException.java b/src/com/sleepycat/je/latch/LatchNotHeldException.java
new file mode 100644
index 0000000000000000000000000000000000000000..c083c1786bd24607c1ab928de638313eb1e764cd
--- /dev/null
+++ b/src/com/sleepycat/je/latch/LatchNotHeldException.java
@@ -0,0 +1,20 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LatchNotHeldException.java,v 1.18.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.latch;
+
+/**
+ * An exception that is thrown when a latch is not held but a method is invoked
+ * on it that assumes it is held.
+ */
+public class LatchNotHeldException extends LatchException {
+
+    public LatchNotHeldException(String message) {
+	super(message);
+    }
+}
diff --git a/src/com/sleepycat/je/latch/LatchStats.java b/src/com/sleepycat/je/latch/LatchStats.java
new file mode 100644
index 0000000000000000000000000000000000000000..389e4e4158ab8bb9b0bfa9e4c71e16ebd5a2b448
--- /dev/null
+++ b/src/com/sleepycat/je/latch/LatchStats.java
@@ -0,0 +1,86 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LatchStats.java,v 1.26.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.latch;
+
+import java.io.Serializable;
+
+/**
+ * A class that provides interesting stats about a particular latch.
+ */
+public class LatchStats implements Cloneable, Serializable {
+
+    public int nAcquiresNoWaiters = 0;
+
+    /**
+     * Number of times acquire() was called when the latch was already owned by
+     * the caller.
+     */
+    public int nAcquiresSelfOwned = 0;
+
+    /**
+     * Number of times acquire() was called with allowNesting=true when the
+     * latch was already owned by the caller for shared access.
+     */
+    public int nAcquiresUpgrade = 0;
+
+    /**
+     * Number of times acquire() was called when the latch was already owned by
+     * the some other thread.
+     */
+    public int nAcquiresWithContention = 0;
+
+    /**
+     * Number of times acquireNoWait() was called when the latch was
+     * successfully acquired.
+     */
+    public int nAcquireNoWaitSuccessful = 0;
+
+    /**
+     * Number of unsuccessful acquireNoWait() calls.
+     */
+    public int nAcquireNoWaitUnsuccessful = 0;
+
+    /**
+     * Number of times acquireShared() was called when the latch was
+     * successfully acquired.
+     */
+    public int nAcquireSharedSuccessful = 0;
+
+    /**
+     * Numbed of calls to release();
+     */
+    public int nReleases = 0;
+
+    @Override
+    public String toString() {
+        StringBuffer sb = new StringBuffer();
+        sb.append("nAcquiresNoWaiters=").
+	    append(nAcquiresNoWaiters).append('\n');
+        sb.append("nAcquiresSelfOwned=").
+	    append(nAcquiresSelfOwned).append('\n');
+        sb.append("nAcquiresUpgrade=").
+	    append(nAcquiresUpgrade).append('\n');
+        sb.append("nAcquiresWithContention=").
+	    append(nAcquiresWithContention).append('\n');
+        sb.append("nAcquiresNoWaitSuccessful=").
+	    append(nAcquireNoWaitSuccessful).append('\n');
+        sb.append("nAcquiresNoWaitUnSuccessful=").
+	    append(nAcquireNoWaitUnsuccessful).append('\n');
+        sb.append("nAcquiresSharedSuccessful=").
+	    append(nAcquireSharedSuccessful).append('\n');
+        return sb.toString();
+    }
+
+    @Override
+    public Object clone()
+        throws CloneNotSupportedException {
+
+        return super.clone();
+    }
+}
diff --git a/src/com/sleepycat/je/latch/LatchSupport.java b/src/com/sleepycat/je/latch/LatchSupport.java
new file mode 100644
index 0000000000000000000000000000000000000000..ade824480d657fe474fabc6a55816860727b3c5a
--- /dev/null
+++ b/src/com/sleepycat/je/latch/LatchSupport.java
@@ -0,0 +1,42 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LatchSupport.java,v 1.14.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.latch;
+
+
+/**
+ * Class used to hold the debugging latchTable.
+ */
+public class LatchSupport {
+
+    /* Used for debugging */
+    static LatchTable latchTable = new LatchTable();
+
+    /**
+     * Only call under the assert system. This records and counts held latches.
+     */
+    static public int countLatchesHeld() {
+
+        return latchTable.countLatchesHeld();
+    }
+
+    static public void dumpLatchesHeld() {
+
+        System.out.println(latchesHeldToString());
+    }
+
+    static public String latchesHeldToString() {
+
+        return latchTable.latchesHeldToString();
+    }
+
+    static public void clearNotes() {
+
+        latchTable.clearNotes();
+    }
+}
diff --git a/src/com/sleepycat/je/latch/LatchTable.java b/src/com/sleepycat/je/latch/LatchTable.java
new file mode 100644
index 0000000000000000000000000000000000000000..8b37bcd4bfec0168d76c2d732f401937550651e6
--- /dev/null
+++ b/src/com/sleepycat/je/latch/LatchTable.java
@@ -0,0 +1,96 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LatchTable.java,v 1.17.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.latch;
+
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+import java.util.WeakHashMap;
+
+/**
+ * Table of latches by thread for debugging.
+ */
+class LatchTable {
+
+    private Map<Thread,Set<Object>> latchesByThread;
+
+    LatchTable() {
+
+        latchesByThread = Collections.synchronizedMap
+            (new WeakHashMap<Thread, Set<Object>>());
+    }
+
+    /**
+     * Only call under the assert system. This records latching by thread.
+     */
+    boolean noteLatch(Object latch)
+        throws LatchException {
+
+        Thread cur = Thread.currentThread();
+
+        Set<Object> threadLatches = latchesByThread.get(cur);
+        if (threadLatches == null) {
+            threadLatches = new HashSet<Object>();
+            latchesByThread.put(cur, threadLatches);
+        }
+        threadLatches.add(latch);
+        return true;
+    }
+
+    /**
+     * Only call under the assert system. This records latching by thread.
+     * @return true if unnoted successfully.
+     */
+    boolean unNoteLatch(Object latch, String name) {
+
+        Thread cur = Thread.currentThread();
+
+        Set<Object> threadLatches = latchesByThread.get(cur);
+
+        if (threadLatches == null) {
+            return false;
+        } else {
+            return threadLatches.remove(latch);
+        }
+    }
+
+    /**
+     * Only call under the assert system. This counts held latches.
+     */
+    int countLatchesHeld() {
+
+        Thread cur = Thread.currentThread();
+        Set<Object> threadLatches = latchesByThread.get(cur);
+        if (threadLatches != null) {
+            return threadLatches.size();
+        } else {
+            return 0;
+        }
+    }
+
+    String latchesHeldToString() {
+
+        Thread cur = Thread.currentThread();
+        Set<Object> threadLatches = latchesByThread.get(cur);
+        StringBuilder sb = new StringBuilder();
+        if (threadLatches != null) {
+            Iterator<Object> i = threadLatches.iterator();
+            while (i.hasNext()) {
+                sb.append(i.next()).append('\n');
+            }
+        }
+        return sb.toString();
+    }
+
+    void clearNotes() {
+        latchesByThread.clear();
+    }
+}
diff --git a/src/com/sleepycat/je/latch/SharedLatch.java b/src/com/sleepycat/je/latch/SharedLatch.java
new file mode 100644
index 0000000000000000000000000000000000000000..41c484308767c0023b1b5ab70549e43032286c59
--- /dev/null
+++ b/src/com/sleepycat/je/latch/SharedLatch.java
@@ -0,0 +1,277 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SharedLatch.java,v 1.24.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.latch;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.RunRecoveryException;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+
+/**
+ * Simple thread-based non-transactional reader-writer/shared-exclusive latch.
+ *
+ * Latches provide simple exclusive or shared transient locks on objects.
+ * Latches are expected to be held for short, defined periods of time.  No
+ * deadlock detection is provided so it is the caller's responsibility to
+ * sequence latch acquisition in an ordered fashion to avoid deadlocks.
+ */
+@SuppressWarnings("serial")
+public class SharedLatch
+    extends ReentrantReadWriteLock {
+
+    private String name;
+    private boolean noteLatch;
+    private List<Thread> readers;
+
+    /**
+     * If true, this shared latch is only ever latched exclusively.  Used for
+     * BINs.
+     */
+    private boolean exclusiveOnly;
+
+    public SharedLatch(String name) {
+	super(EnvironmentImpl.getFairLatches());
+	assert (readers = Collections.synchronizedList
+                (new ArrayList<Thread>())) != null;
+	exclusiveOnly = false;
+	this.name = name;
+    }
+
+    /**
+     * Set the latch name, used for latches in objects instantiated from the
+     * log.
+     */
+    public void setName(String name) {
+	this.name = name;
+    }
+
+    /**
+     * Indicate whether this latch should be tracked in the debugging
+     * LatchSupport.latchTable.
+     * Always return true so this can be called under an assert.
+     */
+    public boolean setNoteLatch(boolean noteLatch) {
+	this.noteLatch = noteLatch;
+	return true;
+    }
+
+    /**
+     * Indicate whether this latch can only be set exclusively (not shared).
+     * Used for BIN latches that are Shared, but should only be latched
+     * exclusively.
+     */
+    public void setExclusiveOnly(boolean exclusiveOnly) {
+	this.exclusiveOnly = exclusiveOnly;
+    }
+
+    /**
+     * Acquire a latch for exclusive/write access.  If the thread already holds
+     * the latch for shared access, it cannot be upgraded and LatchException
+     * will be thrown.
+     *
+     * Wait for the latch if some other thread is holding it.  If there are
+     * threads waiting for access, they will be granted the latch on a FIFO
+     * basis if fair latches are enabled.  When the method returns, the latch
+     * is held for exclusive access.
+     *
+     * @throws LatchException if the latch is already held by the current
+     * thread for shared access.
+     */
+    public void acquireExclusive()
+	throws DatabaseException {
+
+        try {
+	    if (isWriteLockedByCurrentThread()) {
+		throw new LatchException(name + " already held");
+	    }
+
+	    writeLock().lock();
+
+            assert (noteLatch ? noteLatch() : true);// intentional side effect;
+	} finally {
+	    assert EnvironmentImpl.maybeForceYield();
+	}
+    }
+
+    /**
+     * Probe a latch for exclusive access, but don't block if it's not
+     * available.
+     *
+     * @return true if the latch was acquired, false if it is not available.
+     *
+     * @throws LatchException if the latch is already held by the calling
+     * thread.
+     */
+    public boolean acquireExclusiveNoWait()
+	throws DatabaseException {
+
+        try {
+	    if (isWriteLockedByCurrentThread()) {
+		throw new LatchException(name + " already held");
+	    }
+
+	    boolean ret = writeLock().tryLock();
+
+	    /* Intentional side effect. */
+            assert ((noteLatch & ret) ? noteLatch() : true);
+	    return ret;
+	} finally {
+	    assert EnvironmentImpl.maybeForceYield();
+	}
+    }
+
+    /**
+     * Acquire a latch for shared/read access.  Nesting is allowed, that is,
+     * the latch may be acquired more than once by the same thread.
+     *
+     * @throws RunRecoveryException if an InterruptedException exception
+     * occurs.
+     */
+    public void acquireShared()
+        throws DatabaseException {
+
+	if (exclusiveOnly) {
+	    acquireExclusive();
+	    return;
+	}
+
+        try {
+	    boolean assertionsEnabled = false;
+	    assert assertionsEnabled = true;
+	    if (assertionsEnabled) {
+		if (readers.add(Thread.currentThread())) {
+		    readLock().lock();
+		} else {
+		    /* Already latched, do nothing. */
+		}
+	    } else {
+		readLock().lock();
+	    }
+
+            assert (noteLatch ?  noteLatch() : true);// intentional side effect
+	} finally {
+	    assert EnvironmentImpl.maybeForceYield();
+	}
+    }
+
+    /**
+     * Release an exclusive or shared latch.  If there are other thread(s)
+     * waiting for the latch, they are woken up and granted the latch.
+     */
+    public void release()
+	throws LatchNotHeldException {
+
+	try {
+	    if (isWriteLockedByCurrentThread()) {
+		writeLock().unlock();
+                /* Intentional side effect. */
+                assert (noteLatch ? unNoteLatch() : true);
+		return;
+	    }
+
+	    if (exclusiveOnly) {
+		return;
+	    }
+
+	    boolean assertionsEnabled = false;
+	    assert assertionsEnabled = true;
+	    if (assertionsEnabled) {
+		if (readers.remove(Thread.currentThread())) {
+		    readLock().unlock();
+		} else {
+		    throw new LatchNotHeldException(name + " not held");
+		}		
+	    } else {
+
+		/*
+		 * There's no way to tell if a readlock is held by the current
+		 * thread so just try unlocking it.
+		 */
+		readLock().unlock();
+	    }
+	    /* Intentional side effect. */
+	    assert (noteLatch ? unNoteLatch() : true);
+	} catch (IllegalMonitorStateException IMSE) {
+	    IMSE.printStackTrace();
+	    return;
+	}
+    }
+
+    /**
+     * Release the latch. If there are other thread(s) waiting for the latch,
+     * one is woken up and granted the latch.  If the latch was not owned by
+     * the caller, just return.
+     */
+    public void releaseIfOwner()
+	throws LatchNotHeldException {
+
+	if (isWriteLockedByCurrentThread()) {
+	    writeLock().unlock();
+	    assert (noteLatch ? unNoteLatch() : true);
+	    return;
+	}
+
+	if (exclusiveOnly) {
+	    return;
+	}
+
+	assert (getReadLockCount() > 0);
+	boolean assertionsEnabled = false;
+	assert assertionsEnabled = true;
+	if (assertionsEnabled) {
+	    if (readers.contains(Thread.currentThread())) {
+		readLock().unlock();
+		readers.remove(Thread.currentThread());
+		assert (noteLatch ? unNoteLatch() : true);
+	    }
+	} else {
+
+	    /*
+	     * There's no way to tell if a readlock is held by the current
+	     * thread so just try unlocking it.
+	     */
+	    readLock().unlock();
+	}
+    }
+
+    /**
+     * Return true if this thread is an owner, reader, or write.
+     */
+    public boolean isOwner() {
+	boolean assertionsEnabled = false;
+	assert assertionsEnabled = true;
+	if (assertionsEnabled && !exclusiveOnly) {
+	    return readers.contains(Thread.currentThread()) ||
+		isWriteLockedByCurrentThread();
+	} else {
+	    return isWriteLockedByCurrentThread();
+	}
+    }
+
+    /**
+     * Only call under the assert system. This records latching by thread.
+     */
+    private boolean noteLatch()
+	throws LatchException {
+
+        return LatchSupport.latchTable.noteLatch(this);
+    }
+
+    /**
+     * Only call under the assert system. This records latching by thread.
+     */
+    private boolean unNoteLatch() {
+
+	return LatchSupport.latchTable.unNoteLatch(this, name);
+    }
+}
diff --git a/src/com/sleepycat/je/latch/package.html b/src/com/sleepycat/je/latch/package.html
new file mode 100644
index 0000000000000000000000000000000000000000..87a6b46f196ac041a777e0a81c03bef72885171b
--- /dev/null
+++ b/src/com/sleepycat/je/latch/package.html
@@ -0,0 +1,26 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
+<html>
+<head>
+<!--
+
+ See the file LICENSE for redistribution information.
+
+ Copyright (c) 2002,2010 Oracle.  All rights reserved.
+
+
+ $Id: package.html,v 1.10.2.2 2010/01/04 15:30:29 cwl Exp $
+-->
+</head>
+<body bgcolor="white">
+
+Provides classes and interfaces for latches in JDB.
+
+
+<h2>Package Specification</h2>
+
+(None)
+
+<!-- Put @see and @since tags down here. -->
+
+</body>
+</html>
diff --git a/src/com/sleepycat/je/log/CheckpointFileReader.java b/src/com/sleepycat/je/log/CheckpointFileReader.java
new file mode 100644
index 0000000000000000000000000000000000000000..dc0f8b5e4e999b457a4ca0dee10c0cfef519a509
--- /dev/null
+++ b/src/com/sleepycat/je/log/CheckpointFileReader.java
@@ -0,0 +1,95 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: CheckpointFileReader.java,v 1.32.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+
+/**
+ * CheckpointFileReader searches for root and checkpoint entries.
+ */
+public class CheckpointFileReader extends FileReader {
+    /* Status about the last entry. */
+    private boolean isRoot;
+    private boolean isCheckpointEnd;
+    private boolean isCheckpointStart;
+
+    /**
+     * Create this reader to start at a given LSN.
+     */
+    public CheckpointFileReader(EnvironmentImpl env,
+                                int readBufferSize,
+                                boolean forward,
+                                long startLsn,
+                                long finishLsn,
+                                long endOfFileLsn)
+        throws IOException, DatabaseException {
+
+        super(env, readBufferSize, forward, startLsn,
+	      null, endOfFileLsn, finishLsn);
+    }
+
+    /**
+     * @return true if this is a targetted entry.
+     */
+    @Override
+    protected boolean isTargetEntry() {
+        byte logEntryTypeNumber = currentEntryHeader.getType();
+        boolean isTarget = false;
+        isRoot = false;
+        isCheckpointEnd = false;
+        isCheckpointStart = false;
+        if (LogEntryType.LOG_CKPT_END.equalsType(logEntryTypeNumber)) {
+            isTarget = true;
+            isCheckpointEnd = true;
+        } else if (LogEntryType.LOG_CKPT_START.equalsType
+            (logEntryTypeNumber)) {
+            isTarget = true;
+            isCheckpointStart = true;
+        } else if (LogEntryType.LOG_ROOT.equalsType(logEntryTypeNumber)) {
+            isTarget = true;
+            isRoot = true;
+        }
+        return isTarget;
+    }
+
+    /**
+     * This reader instantiate the first object of a given log entry
+     */
+    protected boolean processEntry(ByteBuffer entryBuffer)
+        throws DatabaseException {
+
+        /* Don't need to read the entry, since we just use the LSN. */
+        return true;
+    }
+
+    /**
+     * @return true if last entry was a root entry.
+     */
+    public boolean isRoot() {
+        return isRoot;
+    }
+
+    /**
+     * @return true if last entry was a checkpoint end entry.
+     */
+    public boolean isCheckpointEnd() {
+        return isCheckpointEnd;
+    }
+
+    /**
+     * @return true if last entry was a checkpoint start entry.
+     */
+    public boolean isCheckpointStart() {
+        return isCheckpointStart;
+    }
+}
diff --git a/src/com/sleepycat/je/log/ChecksumValidator.java b/src/com/sleepycat/je/log/ChecksumValidator.java
new file mode 100644
index 0000000000000000000000000000000000000000..29e164cecbdf7893847bddedcb47d0173e20ad50
--- /dev/null
+++ b/src/com/sleepycat/je/log/ChecksumValidator.java
@@ -0,0 +1,103 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ChecksumValidator.java,v 1.36.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.nio.ByteBuffer;
+import java.util.zip.Checksum;
+
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.utilint.Adler32;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * Checksum validator is used to check checksums on log entries.
+ */
+class ChecksumValidator {
+    private static final boolean DEBUG = false;
+
+    private Checksum cksum;
+
+    ChecksumValidator() {
+        cksum = Adler32.makeChecksum();
+    }
+
+    void reset() {
+        cksum.reset();
+    }
+
+    /**
+     * Add this byte buffer to the checksum. Assume the byte buffer is already
+     * positioned at the data.
+     * @param buf target buffer
+     * @param length of data
+     */
+    void update(EnvironmentImpl env,
+		ByteBuffer buf,
+		int length,
+		boolean anticipateChecksumErrors)
+        throws DbChecksumException {
+
+        if (buf == null) {
+            throw new DbChecksumException
+		((anticipateChecksumErrors ? null : env),
+		 "null buffer given to checksum validation, probably " +
+		 " result of 0's in log file. " + anticipateChecksumErrors);
+        }
+
+        int bufStart = buf.position();
+
+        if (DEBUG) {
+	    System.out.println("bufStart = " + bufStart +
+			       " length = " + length);
+        }
+
+        if (buf.hasArray()) {
+            cksum.update(buf.array(), bufStart + buf.arrayOffset(), length);
+        } else {
+            for (int i = bufStart; i < (length + bufStart); i++) {
+                cksum.update(buf.get(i));
+            }
+        }
+    }
+
+    void validate(EnvironmentImpl env,
+                  long expectedChecksum,
+                  long lsn)
+        throws DbChecksumException {
+
+        if (expectedChecksum != cksum.getValue()) {
+            throw new DbChecksumException
+		(env,
+		 "Location " + DbLsn.getNoFormatString(lsn) +
+		 " expected " + expectedChecksum + " got " + cksum.getValue());
+        }
+    }
+
+    void validate(EnvironmentImpl env,
+                  long expectedChecksum,
+                  long fileNum,
+                  long fileOffset,
+		  boolean anticipateChecksumErrors)
+        throws DbChecksumException {
+
+        if (expectedChecksum != cksum.getValue()) {
+            long problemLsn = DbLsn.makeLsn(fileNum, fileOffset);
+
+	    /*
+	     * Pass null for env so that RunRecoveryException() does not
+	     * invalidate the environment.
+	     */
+            throw new DbChecksumException
+		((anticipateChecksumErrors ? null : env),
+		 "Location " + DbLsn.getNoFormatString(problemLsn) +
+		 " expected " + expectedChecksum + " got " +
+		 cksum.getValue());
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/log/CleanerFileReader.java b/src/com/sleepycat/je/log/CleanerFileReader.java
new file mode 100644
index 0000000000000000000000000000000000000000..e5a629b9bc82b56580bbef8809e55aa6909f109e
--- /dev/null
+++ b/src/com/sleepycat/je/log/CleanerFileReader.java
@@ -0,0 +1,229 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: CleanerFileReader.java,v 1.43.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.HashMap;
+import java.util.Map;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.entry.INLogEntry;
+import com.sleepycat.je.log.entry.LNLogEntry;
+import com.sleepycat.je.log.entry.LogEntry;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.tree.LN;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * CleanerFileReader scans log files for INs and LNs.
+ */
+public class CleanerFileReader extends FileReader {
+    private static final byte IS_IN = 0;
+    private static final byte IS_LN = 1;
+    private static final byte IS_ROOT = 2;
+    private static final byte IS_FILEHEADER = 3;
+
+    private Map<LogEntryType, EntryInfo> targetEntryMap;
+    private LogEntry targetLogEntry;
+    private byte targetCategory;
+
+    /**
+     * Create this reader to start at a given LSN.
+     * @param env The relevant EnvironmentImpl.
+     * @param readBufferSize buffer size in bytes for reading in log.
+     * @param startLsn where to start in the log, or null for the beginning.
+     * @param fileNum single file number.
+     */
+    public CleanerFileReader(EnvironmentImpl env,
+                             int readBufferSize,
+                             long startLsn,
+                             Long fileNum)
+        throws IOException, DatabaseException {
+
+        super(env,
+              readBufferSize,
+              true,                     // forward
+              startLsn,
+              fileNum,                  // single file number
+              DbLsn.NULL_LSN,           // endOfFileLsn
+              DbLsn.NULL_LSN);          // finishLsn
+
+        targetEntryMap = new HashMap<LogEntryType, EntryInfo>();
+
+        addTargetType(IS_LN, LogEntryType.LOG_LN_TRANSACTIONAL);
+        addTargetType(IS_LN, LogEntryType.LOG_LN);
+        addTargetType(IS_LN, LogEntryType.LOG_NAMELN_TRANSACTIONAL);
+        addTargetType(IS_LN, LogEntryType.LOG_NAMELN);
+        addTargetType(IS_LN, LogEntryType.LOG_MAPLN_TRANSACTIONAL);
+        addTargetType(IS_LN, LogEntryType.LOG_MAPLN);
+        addTargetType(IS_LN, LogEntryType.LOG_DEL_DUPLN_TRANSACTIONAL);
+        addTargetType(IS_LN, LogEntryType.LOG_DEL_DUPLN);
+        addTargetType(IS_LN, LogEntryType.LOG_DUPCOUNTLN_TRANSACTIONAL);
+        addTargetType(IS_LN, LogEntryType.LOG_DUPCOUNTLN);
+        addTargetType(IS_LN, LogEntryType.LOG_FILESUMMARYLN);
+        addTargetType(IS_IN, LogEntryType.LOG_IN);
+        addTargetType(IS_IN, LogEntryType.LOG_BIN);
+        addTargetType(IS_IN, LogEntryType.LOG_DIN);
+        addTargetType(IS_IN, LogEntryType.LOG_DBIN);
+        addTargetType(IS_ROOT, LogEntryType.LOG_ROOT);
+        addTargetType(IS_FILEHEADER, LogEntryType.LOG_FILE_HEADER);
+    }
+
+    private void addTargetType(byte category, LogEntryType entryType)
+        throws DatabaseException {
+
+        targetEntryMap.put(entryType,
+                           new EntryInfo(entryType.getNewLogEntry(),
+                                         category));
+    }
+
+    /**
+     * Helper for determining the starting position and opening
+     * up a file at the desired location.
+     */
+    @Override
+    protected void initStartingPosition(long endOfFileLsn,
+                                        Long fileNum)
+        throws IOException, DatabaseException {
+
+        eof = false;
+
+        /*
+         * Start off at the startLsn. If that's null, start at the
+         * beginning of the log. If there are no log files, set
+         * eof.
+         */
+        readBufferFileNum = fileNum.longValue();
+        readBufferFileEnd = 0;
+
+        /*
+         * After we read the first entry, the currentEntry will
+         * point here.
+         */
+        nextEntryOffset = readBufferFileEnd;
+    }
+
+    /**
+     * @return true if this is a type we're interested in.
+     */
+    @Override
+    protected boolean isTargetEntry() {
+
+        LogEntryType fromLogType =
+            new LogEntryType(currentEntryHeader.getType());
+
+        /* Is it a target entry? */
+        EntryInfo info = targetEntryMap.get(fromLogType);
+        if (info == null) {
+            return false;
+        } else {
+            targetCategory = info.targetCategory;
+            targetLogEntry = info.targetLogEntry;
+            return true;
+        }
+    }
+
+    /**
+     * This reader instantiates an LN and key for every LN entry.
+     */
+    protected boolean processEntry(ByteBuffer entryBuffer)
+        throws DatabaseException {
+
+        targetLogEntry.readEntry
+            (currentEntryHeader, entryBuffer, true); // readFullItem
+        return true;
+    }
+
+    /**
+     * @return true if the last entry was an IN.
+     */
+    public boolean isIN() {
+        return (targetCategory == IS_IN);
+    }
+
+    /**
+     * @return true if the last entry was a LN.
+     */
+    public boolean isLN() {
+        return (targetCategory == IS_LN);
+    }
+
+    /**
+     * @return true if the last entry was a root
+     */
+    public boolean isRoot() {
+        return (targetCategory == IS_ROOT);
+    }
+
+    public boolean isFileHeader() {
+        return (targetCategory == IS_FILEHEADER);
+    }
+
+    /**
+     * Get the last LN seen by the reader.
+     */
+    public LN getLN() {
+        return ((LNLogEntry) targetLogEntry).getLN();
+    }
+
+    /**
+     * Get the last entry seen by the reader as an IN.
+     */
+    public IN getIN()
+        throws DatabaseException {
+
+        return ((INLogEntry) targetLogEntry).getIN(envImpl);
+    }
+
+    public FileHeader getFileHeader()
+        throws DatabaseException {
+
+        return (FileHeader) (targetLogEntry.getMainItem());
+    }
+
+    /**
+     * Get the last databaseId seen by the reader.
+     */
+    public DatabaseId getDatabaseId() {
+        if (targetCategory == IS_LN) {
+            return ((LNLogEntry) targetLogEntry).getDbId();
+        } else if (targetCategory == IS_IN) {
+            return ((INLogEntry) targetLogEntry).getDbId();
+        } else {
+            return null;
+        }
+    }
+
+    /**
+     * Get the last key seen by the reader.
+     */
+    public byte[] getKey() {
+        return ((LNLogEntry) targetLogEntry).getKey();
+    }
+
+    /**
+     * Get the last key seen by the reader.
+     */
+    public byte[] getDupTreeKey() {
+        return ((LNLogEntry) targetLogEntry).getDupKey();
+    }
+
+    private static class EntryInfo {
+        public LogEntry targetLogEntry;
+        public byte     targetCategory;
+
+        EntryInfo(LogEntry targetLogEntry, byte targetCategory) {
+            this.targetLogEntry = targetLogEntry;
+            this.targetCategory = targetCategory;
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/log/DbChecksumException.java b/src/com/sleepycat/je/log/DbChecksumException.java
new file mode 100644
index 0000000000000000000000000000000000000000..b34c678a3f7ac65b5682d38cddba1a35e53935f3
--- /dev/null
+++ b/src/com/sleepycat/je/log/DbChecksumException.java
@@ -0,0 +1,54 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbChecksumException.java,v 1.24.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import com.sleepycat.je.RunRecoveryException;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+
+/**
+ * Invalid serialized items seen.
+ */
+public class DbChecksumException extends RunRecoveryException {
+
+    private String extraInfo;
+
+    public DbChecksumException(EnvironmentImpl env, String message) {
+	super(env, message);
+    }
+
+    public DbChecksumException(EnvironmentImpl env,
+                               String message,
+                               Throwable t) {
+	super(env, message, t);
+    }
+
+    /**
+     * Support the addition of extra error information. Use this approach
+     * rather than wrapping exceptions because RunRecoveryException hierarchy
+     * does some intricate things with setting the environment as invalid.
+     */
+    public void addErrorMessage(String newExtraInfo) {
+
+        if (extraInfo == null) {
+            extraInfo = newExtraInfo;
+        } else {
+            extraInfo = extraInfo + newExtraInfo;
+        }
+    }
+
+    @Override
+    public String toString() {
+        if (extraInfo == null) {
+            return super.toString();
+        } else {
+            return super.toString() + extraInfo;
+        }
+    }
+}
+
diff --git a/src/com/sleepycat/je/log/DbOpReplicationContext.java b/src/com/sleepycat/je/log/DbOpReplicationContext.java
new file mode 100644
index 0000000000000000000000000000000000000000..36ef9be2aef4fe6564e785eb591f21d69f34e970
--- /dev/null
+++ b/src/com/sleepycat/je/log/DbOpReplicationContext.java
@@ -0,0 +1,94 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbOpReplicationContext.java,v 1.5.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.ReplicatedDatabaseConfig;
+import com.sleepycat.je.log.entry.DbOperationType;
+import com.sleepycat.je.log.entry.NameLNLogEntry;
+
+
+/**
+ * This subclass of ReplicationContext adds information specific to database
+ * operations to the replication context passed from operation-aware code down
+ * the the logging layer. It's a way to transport enough information though the
+ * NameLNLogEntry to logically replicate database operations.
+ */
+public class DbOpReplicationContext extends ReplicationContext {
+
+    /*
+     * Convenience static instance used when you know this database operation
+     * will not be replicated, either because it's executing on a
+     * non-replicated node or it's a local operation for a local database.
+     */
+    public static DbOpReplicationContext NO_REPLICATE =
+        new DbOpReplicationContext(false, // inReplicationStream
+                                   DbOperationType.NONE);
+
+    private DbOperationType opType;
+    private ReplicatedDatabaseConfig createConfig;
+    private DatabaseId truncateOldDbId;
+
+    /**
+     * Create a replication context for logging a database operation NameLN on
+     * the master.
+     */
+    public DbOpReplicationContext(boolean inReplicationStream,
+                                  DbOperationType opType) {
+        super(inReplicationStream);
+        this.opType = opType;
+    }
+
+    /**
+     * Create a repContext for executing a databaseOperation on the client.
+     */
+    public DbOpReplicationContext(LogEntryHeader header,
+                                  NameLNLogEntry nameLNEntry) {
+
+        /*
+         * Initialize the context with the VLSN that was shipped with the
+         * replicated log entry.
+         */
+        super(header.getVLSN());
+        this.opType = nameLNEntry.getOperationType();
+        if (opType == DbOperationType.CREATE) {
+            createConfig = nameLNEntry.getReplicatedCreateConfig();
+        }
+    }
+
+    @Override
+    public DbOperationType getDbOperationType() {
+        return opType;
+    }
+
+    public void setCreateConfig(ReplicatedDatabaseConfig createConfig) {
+    	this.createConfig = createConfig;
+    }
+
+    public ReplicatedDatabaseConfig getCreateConfig() {
+        return createConfig;
+    }
+
+    public void setTruncateOldDbId(DatabaseId truncateOldDbId) {
+        this.truncateOldDbId = truncateOldDbId;
+    }
+
+    public DatabaseId getTruncateOldDbId() {
+        return truncateOldDbId;
+    }
+
+    @Override
+    public String toString() {
+        StringBuilder sb = new StringBuilder();
+        sb.append(super.toString());
+        sb.append("opType=").append(opType);
+        sb.append("truncDbId=").append(truncateOldDbId);
+        return sb.toString();
+    }
+}
diff --git a/src/com/sleepycat/je/log/DumpFileReader.java b/src/com/sleepycat/je/log/DumpFileReader.java
new file mode 100644
index 0000000000000000000000000000000000000000..0494630e24b707ca5181a004532cf74fb398b14b
--- /dev/null
+++ b/src/com/sleepycat/je/log/DumpFileReader.java
@@ -0,0 +1,92 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DumpFileReader.java,v 1.50.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.StringTokenizer;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * The DumpFileReader prints every log entry to stdout.
+ */
+public abstract class DumpFileReader extends FileReader {
+
+    /* A set of the entry type numbers that this DumpFileReader should dump. */
+    private Set<Byte> targetEntryTypes;
+
+    /* A set of the txn ids that this DumpFileReader should dump. */
+    protected Set<Long> targetTxnIds;
+
+    /* If true, dump the long version of the entry. */
+    protected boolean verbose;
+
+    /**
+     * Create this reader to start at a given LSN.
+     */
+    public DumpFileReader(EnvironmentImpl env,
+			  int readBufferSize,
+			  long startLsn,
+			  long finishLsn,
+			  String entryTypes,
+			  String txnIds,
+			  boolean verbose)
+	throws IOException, DatabaseException {
+
+        super(env,
+              readBufferSize,
+              true, // read forward
+              startLsn,
+              null, // single file number
+              DbLsn.NULL_LSN, // end of file lsn
+              finishLsn); // finish lsn
+
+        /* If entry types is not null, record the set of target entry types. */
+        targetEntryTypes = new HashSet<Byte>();
+        if (entryTypes != null) {
+            StringTokenizer tokenizer = new StringTokenizer(entryTypes, ",");
+            while (tokenizer.hasMoreTokens()) {
+                String typeString = (String) tokenizer.nextToken();
+                targetEntryTypes.add(new Byte(typeString.trim()));
+            }
+        }
+        /* If txn ids is not null, record the set of target txn ids. */
+        targetTxnIds = new HashSet<Long>();
+        if (txnIds != null) {
+            StringTokenizer tokenizer = new StringTokenizer(txnIds, ",");
+            while (tokenizer.hasMoreTokens()) {
+                String txnIdString = (String) tokenizer.nextToken();
+                targetTxnIds.add(new Long(txnIdString.trim()));
+            }
+        }
+        this.verbose = verbose;
+    }
+
+    /**
+     * @return true if this reader should process this entry, or just
+     * skip over it.
+     */
+    @Override
+    protected boolean isTargetEntry() {
+	if (targetEntryTypes.size() == 0) {
+	    /* We want to dump all entry types. */
+	    return true;
+	} else {
+	    return targetEntryTypes.contains
+                (Byte.valueOf(currentEntryHeader.getType()));
+	}
+    }
+
+    public void summarize() {
+    }
+}
diff --git a/src/com/sleepycat/je/log/FSyncManager.java b/src/com/sleepycat/je/log/FSyncManager.java
new file mode 100644
index 0000000000000000000000000000000000000000..5ab228d0d6e0e5d8fe45966b904226860998c4a4
--- /dev/null
+++ b/src/com/sleepycat/je/log/FSyncManager.java
@@ -0,0 +1,386 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: FSyncManager.java,v 1.22.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.RunRecoveryException;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.latch.Latch;
+import com.sleepycat.je.utilint.PropUtil;
+
+/*
+ * The FsyncManager ensures that only one file fsync is issued at a time, for
+ * performance optimization. The goal is to reduce the number of fsyncs issued
+ * by the system by issuing 1 fsync on behalf of a number of threads.
+ *
+ * For example, suppose these writes happen which all need to be fsynced to
+ * disk:
+ *
+ *  thread 1 writes a commit record
+ *  thread 2 writes a checkpoint
+ *  thread 3 writes a commit record
+ *  thread 4 writes a commit record
+ *  thread 5 writes a checkpoint
+ *
+ * Rather than executing 5 fsyncs, which all must happen synchronously, we hope
+ * to issue fewer. How many fewer depend on timing. Note that the writes
+ * themselves are serialized and are guaranteed to run in order.
+ *
+ * For example:
+ *    thread 1 wants to fsync first, no other fsync going on, will issue fsync
+ *    thread 2 waits
+ *    thread 3 waits
+ *    thread 4 waits
+ *     - before thread 5 comes, thread 1 finishes fsyncing and returns to
+ *     the caller. Now another fsync can be issued that will cover threads
+ *     2,3,4. One of those threads (2, 3, 4} issues the fsync, the others
+ *     block.
+ *    thread 5 wants to fsync, but sees one going on, so will wait.
+ *     - the fsync issued for 2,3,4 can't cover thread 5 because we're not sure
+ *      if thread 5's write finished before that fsync call. Thread 5 will have
+ *      to issue its own fsync.
+ *
+ * Target file
+ * -----------
+ * Note that when the buffer pool starts a new file, we fsync the previous file
+ * under the log write latch. Therefore, at any time we only have one target
+ * file to fsync, which is the current write buffer. We do this so that we
+ * don't have to coordinate between files.  For example, suppose log files have
+ * 1000 bytes and a commit record is 10 bytes.  An LSN of value 6/990 is in
+ * file 6 at offset 990.
+ *
+ * thread 1: logWriteLatch.acquire()
+ *         write commit record to LSN 6/980
+ *         logWriteLatch.release()
+ * thread 2: logWriteLatch.acquire()
+ *         write commit record to LSN 6/990
+ *         logWriteLatch.release
+ * thread 3: logWriteLatch.acquire()
+ *         gets 7/000 as the next LSN to use
+ *          see that we flipped to a new file, so call fsync on file 6
+ *         write commit record to LSN 7/000
+ *         logWriteLatch.release()
+ *
+ * Thread 3 will fsync file 6 within the log write latch. That way, at any
+ * time, any non-latched fsyncs should only fsync the latest file.  If we
+ * didn't do, there's the chance that thread 3 would fsync file 7 and return to
+ * its caller before the thread 1 and 2 got an fsync for file 6. That wouldn't
+ * be correct, because thread 3's commit might depend on file 6.
+ *
+ * Note that the FileManager keeps a file descriptor that corresponds to the
+ * current end of file, and that is what we fsync.
+ */
+class FSyncManager {
+    private EnvironmentImpl envImpl;
+    private long timeout;
+
+    /* Use as the target for a synchronization block. */
+    private Latch fsyncLatch;
+
+    private volatile boolean fsyncInProgress;
+    private FSyncGroup nextFSyncWaiters;
+
+    /* stats */
+    private long nFSyncRequests = 0;
+    private long nFSyncs = 0;
+    private long nTimeouts = 0;
+
+    FSyncManager(EnvironmentImpl envImpl)
+        throws DatabaseException {
+        timeout = PropUtil.microsToMillis(envImpl.getConfigManager().getLong(
+            EnvironmentParams.LOG_FSYNC_TIMEOUT));
+        this.envImpl = envImpl;
+
+        fsyncLatch = new Latch("fsyncLatch");
+        fsyncInProgress = false;
+        nextFSyncWaiters = new FSyncGroup(timeout, envImpl);
+    }
+
+    /**
+     * Request that this file be fsynced to disk. This thread may or may not
+     * actually execute the fsync, but will not return until a fsync has been
+     * issued and executed on behalf of its write. There is a timeout period
+     * specified by EnvironmentParam.LOG_FSYNC_TIMEOUT that ensures that no
+     * thread gets stuck here indefinitely.
+     *
+     * When a thread comes in, it will find one of two things.
+     * 1. There is no fsync going on right now. This thread should go
+     *    ahead and fsync.
+     * 2. There is an active fsync, wait until it's over before
+     *    starting a new fsync.
+     *
+     * When a fsync is going on, all those threads that come along are grouped
+     * together as the nextFsyncWaiters. When the current fsync is finished,
+     * one of those nextFsyncWaiters will be selected as a leader to issue the
+     * next fsync. The other members of the group will merely wait until the
+     * fsync done on their behalf is finished.
+     *
+     * When a thread finishes a fsync, it has to:
+     * 1. wake up all the threads that were waiting for its fsync call.
+     * 2. wake up one member of the next group of waiting threads (the
+     *    nextFsyncWaiters) so that thread can become the new leader
+     *    and issue the next fysnc call.
+     *
+     * If a non-leader member of the nextFsyncWaiters times out, it will issue
+     * its own fsync anyway, in case something happened to the leader.
+     */
+    void fsync()
+        throws DatabaseException {
+
+        boolean doFsync = false;
+        boolean isLeader = false;
+        boolean needToWait = false;
+        FSyncGroup inProgressGroup = null;
+        FSyncGroup myGroup = null;
+
+        synchronized (fsyncLatch) {
+            nFSyncRequests++;
+
+            /* Figure out if we're calling fsync or waiting. */
+            if (fsyncInProgress) {
+                needToWait = true;
+                myGroup = nextFSyncWaiters;
+            } else {
+                isLeader = true;
+                doFsync = true;
+                fsyncInProgress = true;
+                inProgressGroup = nextFSyncWaiters;
+                nextFSyncWaiters = new FSyncGroup(timeout, envImpl);
+            }
+        }
+
+        if (needToWait) {
+
+            /*
+             * Note that there's no problem if we miss the notify on this set
+             * of waiters. We can check state in the FSyncGroup before we begin
+             * to wait.
+             *
+             * All members of the group may return from their waitForFSync()
+             * call with the need to do a fsync, because of timeout. Only one
+             * will return as the leader.
+             */
+            int waitStatus = myGroup.waitForFsync();
+
+            if (waitStatus == FSyncGroup.DO_LEADER_FSYNC) {
+                synchronized (fsyncLatch) {
+
+                    /*
+                     * Check if there's a fsync in progress; this might happen
+                     * even if you were designated the leader if a new thread
+                     * came in between the point when the old leader woke you
+                     * up and now. This new thread may have found that there
+                     * was no fsync in progress, and may have started a fsync.
+                     */
+                    if (!fsyncInProgress) {
+                        isLeader = true;
+                        doFsync = true;
+                        fsyncInProgress = true;
+                        inProgressGroup = myGroup;
+                        nextFSyncWaiters = new FSyncGroup(timeout, envImpl);
+                    }
+                }
+            } else if (waitStatus == FSyncGroup.DO_TIMEOUT_FSYNC) {
+                doFsync = true;
+                synchronized (fsyncLatch) {
+                    nTimeouts++;
+                }
+            }
+        }
+
+        if (doFsync) {
+
+            /*
+             * There are 3 ways that this fsync gets called:
+             *
+             * 1. A thread calls sync and there is not a sync call already in
+             * progress.  That thread executes fsync for itself only.  Other
+             * threads requesting sync form a group of waiters.
+             *
+             * 2. A sync finishes and wakes up a group of waiters.  The first
+             * waiter in the group to wake up becomes the leader.  It executes
+             * sync for it's group of waiters.  As above, other threads
+             * requesting sync form a new group of waiters.
+             *
+             * 3. If members of a group of waiters have timed out, they'll all
+             * just go and do their own sync for themselves.
+             */
+            executeFSync();
+
+            synchronized (fsyncLatch) {
+                nFSyncs++;
+                if (isLeader) {
+
+                    /*
+                     * Wake up the group that requested the fsync before you
+                     * started. They've piggybacked off your fsync.
+                     */
+                    inProgressGroup.wakeupAll();
+
+                    /*
+                     * Wake up a single waiter, who will become the next
+                     * leader.
+                     */
+                    nextFSyncWaiters.wakeupOne();
+                    fsyncInProgress = false;
+                }
+            }
+        }
+    }
+
+    /*
+     * Stats.
+     */
+    long getNFSyncRequests() {
+        return nFSyncRequests;
+    }
+
+    long getNFSyncs() {
+        return nFSyncs;
+    }
+
+    long getNTimeouts() {
+        return nTimeouts;
+    }
+
+    void loadStats(StatsConfig config, EnvironmentStats stats)
+        throws DatabaseException {
+
+        stats.setNFSyncs(nFSyncs);
+        stats.setNFSyncRequests(nFSyncRequests);
+        stats.setNFSyncTimeouts(nTimeouts);
+
+        if (config.getClear()) {
+            nFSyncs = 0;
+            nFSyncRequests = 0;
+            nTimeouts = 0;
+        }
+    }
+
+    /**
+     * Put the fsync execution into this method so it can be overridden for
+     * testing purposes.
+     */
+    protected void executeFSync()
+        throws DatabaseException {
+
+        envImpl.getFileManager().syncLogEnd();
+    }
+
+    /*
+     * Embodies a group of threads waiting for a common fsync. Note that
+     * there's no collection here; group membership is merely that the threads
+     * are all waiting on the same monitor.
+     */
+    static class FSyncGroup {
+        static int DO_TIMEOUT_FSYNC = 0;
+        static int DO_LEADER_FSYNC = 1;
+        static int NO_FSYNC_NEEDED = 2;
+
+        private volatile boolean fsyncDone;
+        private long fsyncTimeout;
+        private boolean leaderExists;
+        private EnvironmentImpl envImpl;
+
+        FSyncGroup(long fsyncTimeout, EnvironmentImpl envImpl) {
+            this.fsyncTimeout = fsyncTimeout;
+            fsyncDone = false;
+            leaderExists = false;
+            this.envImpl = envImpl;
+        }
+
+        synchronized boolean getLeader() {
+            if (fsyncDone) {
+                return false;
+            } else {
+                if (leaderExists) {
+                    return false;
+                } else {
+                    leaderExists = true;
+                    return true;
+                }
+            }
+        }
+
+        /**
+         * Wait for either a turn to execute a fsync, or to find out that a
+         * fsync was done on your behalf.
+	 *
+         * @return true if the fsync wasn't done, and this thread needs to
+         * execute a fsync when it wakes up. This may be true because it's the
+         * leader of its group, or because the wait timed out.
+         */
+        synchronized int waitForFsync()
+            throws RunRecoveryException {
+
+            int status = 0;
+
+            if (!fsyncDone) {
+                long startTime = System.currentTimeMillis();
+                while (true) {
+
+                    try {
+                        wait(fsyncTimeout);
+                    } catch (InterruptedException e) {
+                        throw new RunRecoveryException(envImpl,
+                           "Unexpected interrupt while waiting for fsync", e);
+                    }
+
+                    /*
+                     * This thread was awoken either by a timeout, by a notify,
+                     * or by an interrupt. Is the fsync done?
+                     */
+                    if (fsyncDone) {
+                        /* The fsync we're waiting on is done, leave. */
+                        status = NO_FSYNC_NEEDED;
+                        break;
+                    } else {
+
+                        /*
+                         * The fsync is not done -- were we woken up to become
+                         * the leader?
+                         */
+                        if (!leaderExists) {
+                            leaderExists = true;
+                            status = DO_LEADER_FSYNC;
+                            break;
+                        } else {
+
+                            /*
+                             * We're just a waiter. See if we're timed out or
+                             * have more to wait.
+                             */
+                            long now = System.currentTimeMillis();
+                            if ((now - startTime) > fsyncTimeout) {
+                                /* we timed out. */
+                                status = DO_TIMEOUT_FSYNC;
+                                break;
+                            }
+                        }
+                    }
+                }
+            }
+
+            return status;
+        }
+
+        synchronized void wakeupAll() {
+            fsyncDone = true;
+            notifyAll();
+        }
+
+        synchronized void wakeupOne() {
+	    /* FindBugs whines here. */
+            notify();
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/log/FileHandle.java b/src/com/sleepycat/je/log/FileHandle.java
new file mode 100644
index 0000000000000000000000000000000000000000..f92a33abd2d9994ba719ceeba02e73ed7db53767
--- /dev/null
+++ b/src/com/sleepycat/je/log/FileHandle.java
@@ -0,0 +1,85 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: FileHandle.java,v 1.27.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.IOException;
+import java.io.RandomAccessFile;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.latch.Latch;
+
+/**
+ * A FileHandle embodies a File and its accompanying latch.
+ */
+class FileHandle {
+    private RandomAccessFile file;
+    private Latch fileLatch;
+    private int logVersion;
+    private long fileNum;
+
+    /**
+     * Creates a new handle but does not initialize it.  The init method must
+     * be called before using the handle to access the file.
+     */
+    FileHandle(long fileNum, String label) {
+        fileLatch = new Latch("file_" + label + "_fileHandle");
+        this.fileNum = fileNum;
+    }
+
+    /**
+     * Initializes the handle after opening the file and reading the header.
+     */
+    void init(RandomAccessFile file, int logVersion) {
+        this.file = file;
+        this.logVersion = logVersion;
+    }
+
+    RandomAccessFile getFile() {
+        return file;
+    }
+
+    long getFileNum() {
+        return fileNum;
+    }
+
+    int getLogVersion() {
+        return logVersion;
+    }
+
+    boolean isOldHeaderVersion() {
+        return logVersion < LogEntryType.LOG_VERSION;
+    }
+
+    void latch()
+        throws DatabaseException {
+
+        fileLatch.acquire();
+    }
+
+    boolean latchNoWait()
+        throws DatabaseException {
+
+        return fileLatch.acquireNoWait();
+    }
+
+    void release()
+        throws DatabaseException {
+
+        fileLatch.release();
+    }
+
+    void close()
+	throws IOException {
+
+	if (file != null) {
+	    file.close();
+	    file = null;
+	}
+    }
+}
diff --git a/src/com/sleepycat/je/log/FileHandleSource.java b/src/com/sleepycat/je/log/FileHandleSource.java
new file mode 100644
index 0000000000000000000000000000000000000000..756d9c07ab03a27e1027a49daec086117831d878
--- /dev/null
+++ b/src/com/sleepycat/je/log/FileHandleSource.java
@@ -0,0 +1,37 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: FileHandleSource.java,v 1.17.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import com.sleepycat.je.DatabaseException;
+
+/**
+ * FileHandleSource is a file source built on top of a cached file handle.
+ */
+class FileHandleSource extends FileSource {
+
+    private FileHandle fileHandle;
+
+    FileHandleSource(FileHandle fileHandle,
+		     int readBufferSize,
+                     FileManager fileManager) {
+        super(fileHandle.getFile(), readBufferSize, fileManager,
+              fileHandle.getFileNum());
+        this.fileHandle = fileHandle;
+    }
+
+    /**
+     * @see LogSource#release
+     */
+    @Override
+    public void release()
+        throws DatabaseException {
+
+        fileHandle.release();
+    }
+}
diff --git a/src/com/sleepycat/je/log/FileHeader.java b/src/com/sleepycat/je/log/FileHeader.java
new file mode 100644
index 0000000000000000000000000000000000000000..c7ca69234c8e5bc0ed6820bb588aebb22dc63890
--- /dev/null
+++ b/src/com/sleepycat/je/log/FileHeader.java
@@ -0,0 +1,171 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: FileHeader.java,v 1.48.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.nio.ByteBuffer;
+import java.sql.Timestamp;
+import java.util.Calendar;
+
+import com.sleepycat.je.DatabaseException;
+
+/**
+ * A FileHeader embodies the header information at the beginning of each log
+ * file.
+ */
+public class FileHeader implements Loggable {
+
+    /*
+     * fileNum is the number of file, starting at 0. An unsigned int, so stored
+     * in a long in memory, but in 4 bytes on disk
+     */
+    private long fileNum;
+    private long lastEntryInPrevFileOffset;
+    private Timestamp time;
+    private int logVersion;
+
+    FileHeader(long fileNum, long lastEntryInPrevFileOffset) {
+        this.fileNum = fileNum;
+        this.lastEntryInPrevFileOffset = lastEntryInPrevFileOffset;
+        Calendar now = Calendar.getInstance();
+        time = new Timestamp(now.getTimeInMillis());
+        logVersion = LogEntryType.LOG_VERSION;
+    }
+
+    /**
+     * For logging only.
+     */
+    public FileHeader() {
+    }
+
+    public int getLogVersion() {
+        return logVersion;
+    }
+
+    /**
+     * @return file header log version.
+     *
+     * @throws DatabaseException if the header isn't valid.
+     */
+    int validate(String fileName, long expectedFileNum)
+        throws DatabaseException {
+
+        if (fileNum != expectedFileNum) {
+            throw new LogException
+                ("Wrong filenum in header for file " +
+                 fileName + " expected " +
+                 expectedFileNum + " got " + fileNum);
+        }
+
+        return logVersion;
+    }
+
+    /**
+     * @return the offset of the last entry in the previous file.
+     */
+    long getLastEntryInPrevFileOffset() {
+        return lastEntryInPrevFileOffset;
+    }
+
+    /*
+     * Logging support
+     */
+
+    /**
+     * A header is always a known size.  Is public for unit testing.
+     */
+    public static int entrySize() {
+        return
+            LogUtils.LONG_BYTES +                // time
+            LogUtils.UNSIGNED_INT_BYTES +        // file number
+            LogUtils.LONG_BYTES +                // lastEntryInPrevFileOffset
+            LogUtils.INT_BYTES;                  // logVersion
+    }
+    /**
+     * @see Loggable#getLogSize
+     * @return number of bytes used to store this object
+     */
+    public int getLogSize() {
+        return entrySize();
+    }
+
+    /**
+     * @see Loggable#writeToLog
+     * Serialize this object into the buffer. Update cksum with all
+     * the bytes used by this object
+     * @param logBuffer is the destination buffer
+     */
+    public void writeToLog(ByteBuffer logBuffer) {
+        LogUtils.writeLong(logBuffer, time.getTime());
+        LogUtils.writeUnsignedInt(logBuffer, fileNum);
+        LogUtils.writeLong(logBuffer, lastEntryInPrevFileOffset);
+        LogUtils.writeInt(logBuffer, logVersion);
+    }
+
+    /**
+     * @see Loggable#readFromLog
+     * Initialize this object from the data in itemBuf.
+     * @param itemBuf the source buffer
+     */
+    public void readFromLog(ByteBuffer logBuffer, byte entryVersion)
+	throws LogException {
+
+        /* Timestamp is always unpacked. */
+        time = LogUtils.readTimestamp(logBuffer, true/*unpacked*/);
+        fileNum = LogUtils.readUnsignedInt(logBuffer);
+        lastEntryInPrevFileOffset = LogUtils.readLong(logBuffer);
+        logVersion = LogUtils.readInt(logBuffer);
+        if (logVersion > LogEntryType.LOG_VERSION) {
+            throw new LogException("Expected log version " +
+                                   LogEntryType.LOG_VERSION +
+                                   " or earlier but found " + logVersion +
+                                   " -- this version is not supported.");
+        }
+    }
+
+    /**
+     * @see Loggable#dumpLog
+     * @param sb destination string buffer
+     * @param verbose if true, dump the full, verbose version
+     */
+    public void dumpLog(StringBuffer sb, boolean verbose) {
+        sb.append("<FileHeader num=\"0x");
+        sb.append(Long.toHexString(fileNum));
+        sb.append("\" lastEntryInPrevFileOffset=\"0x");
+        sb.append(Long.toHexString(lastEntryInPrevFileOffset));
+        sb.append("\" logVersion=\"0x");
+        sb.append(Integer.toHexString(logVersion));
+        sb.append("\" time=\"").append(time);
+        sb.append("\"/>");
+    }
+
+    /**
+     * @see Loggable#getTransactionId
+     */
+    public long getTransactionId() {
+	return 0;
+    }
+
+    /**
+     * @see Loggable#logicalEquals
+     * Always return false, this item should never be compared.
+     */
+    public boolean logicalEquals(Loggable other) {
+        return false;
+    }
+
+    /**
+     * Print in xml format
+     */
+    @Override
+    public String toString() {
+        StringBuffer sb = new StringBuffer();
+        dumpLog(sb, true);
+        return sb.toString();
+    }
+}
diff --git a/src/com/sleepycat/je/log/FileManager.java b/src/com/sleepycat/je/log/FileManager.java
new file mode 100644
index 0000000000000000000000000000000000000000..51aa08ecb698a3b1084eeaba1d0076792869d3f3
--- /dev/null
+++ b/src/com/sleepycat/je/log/FileManager.java
@@ -0,0 +1,2172 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: FileManager.java,v 1.194.2.9 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.ClosedChannelException;
+import java.nio.channels.FileChannel;
+import java.nio.channels.FileLock;
+import java.nio.channels.OverlappingFileLockException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Hashtable;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.EnvironmentLockedException;
+import com.sleepycat.je.RunRecoveryException;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DbConfigManager;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.latch.Latch;
+import com.sleepycat.je.log.entry.LogEntry;
+import com.sleepycat.je.log.entry.SingleItemEntry;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.HexFormatter;
+
+/**
+ * The FileManager presents the abstraction of one contiguous file.  It doles
+ * out LSNs.
+ */
+public class FileManager {
+
+    public enum FileMode {
+        READ_MODE("r", false),
+        READWRITE_MODE("rw", true),
+        READWRITE_ODSYNC_MODE("rwd", true),
+        READWRITE_OSYNC_MODE("rws", true);
+
+        private String fileModeValue;
+        private boolean isWritable;
+
+        private FileMode(String fileModeValue, boolean isWritable) {
+            this.fileModeValue = fileModeValue;
+            this.isWritable = isWritable;
+        }
+
+        public String getModeValue() {
+            return fileModeValue;
+        }
+
+        public boolean isWritable() {
+            return isWritable;
+        }
+    }
+
+    static boolean IO_EXCEPTION_TESTING_ON_WRITE = false;
+    static boolean IO_EXCEPTION_TESTING_ON_READ = false;
+    static boolean THROW_RRE_FOR_UNIT_TESTS = false;
+    private static final String DEBUG_NAME = FileManager.class.getName();
+    private static final boolean DEBUG = false;
+
+    /*
+     * The number of writes that have been performed.
+     *
+     * public so that unit tests can diddle them.
+     */
+    public static long WRITE_COUNT = 0;
+
+    /*
+     * The write count value where we should stop or throw.
+     */
+    public static long STOP_ON_WRITE_COUNT = Long.MAX_VALUE;
+
+    /*
+     * If we're throwing, then throw on write #'s WRITE_COUNT through
+     * WRITE_COUNT + N_BAD_WRITES - 1 (inclusive).
+     */
+    public static long N_BAD_WRITES = Long.MAX_VALUE;
+
+    /*
+     * If true, then throw an IOException on write #'s WRITE_COUNT through
+     * WRITE_COUNT + N_BAD_WRITES - 1 (inclusive).
+     */
+    public static boolean THROW_ON_WRITE = false;
+
+    public static final String JE_SUFFIX = ".jdb";  // regular log files
+    public static final String DEL_SUFFIX = ".del";  // cleaned files
+    public static final String BAD_SUFFIX = ".bad";  // corrupt files
+    private static final String LOCK_FILE = "je.lck";// lock file
+    static final String[] DEL_SUFFIXES = { DEL_SUFFIX };
+    static final String[] JE_SUFFIXES = { JE_SUFFIX };
+    private static final String[] JE_AND_DEL_SUFFIXES =
+    { JE_SUFFIX, DEL_SUFFIX };
+
+    /* May be set to false to speed unit tests. */
+    private boolean syncAtFileEnd = true;
+
+    private EnvironmentImpl envImpl;
+    private long maxFileSize;
+    private File dbEnvHome;
+
+    /* True if .del files should be included in the list of log files. */
+    private boolean includeDeletedFiles = false;
+
+    /* File cache */
+    private FileCache fileCache;
+    private Latch fileCacheLatch;
+
+    /* The channel and lock for the je.lck file. */
+    private RandomAccessFile lockFile;
+    private FileChannel channel;
+    private FileLock envLock;
+    private FileLock exclLock;
+
+    /* True if all files should be opened readonly. */
+    private boolean readOnly;
+
+    /* Handles onto log position */
+    private long currentFileNum;     // number of the current file
+    private long nextAvailableLsn;   // nextLSN is the next one available
+    private long lastUsedLsn;        // last LSN used in the current log file
+    private long prevOffset;         // Offset to use for the previous pointer
+    private boolean forceNewFile;    // Force new file on next write
+
+    /*
+     * Saved versions of above.  Save this in case a write causes an
+     * IOException, we can back the log up to the last known good LSN.
+     */
+    private long savedCurrentFileNum;
+    private long savedNextAvailableLsn; // nextLSN is the next one available
+    private long savedLastUsedLsn;   // last LSN used in the current log file
+    private long savedPrevOffset;    // Offset to use for the previous pointer
+    private boolean savedForceNewFile;
+
+    /* endOfLog is used for writes and fsyncs to the end of the log. */
+    private LogEndFileDescriptor endOfLog;
+
+    /* group commit sync */
+    private FSyncManager syncManager;
+
+    /*
+     * When we bump the LSNs over to a new file, we must remember the last LSN
+     * of the previous file so we can set the prevOffset field of the file
+     * header appropriately. We have to save it in a map because there's a time
+     * lag between when we know what the last LSN is and when we actually do
+     * the file write, because LSN bumping is done before we get a write
+     * buffer.  This map is keyed by file num->last LSN.
+     */
+    private Map<Long, Long> perFileLastUsedLsn;
+
+    /*
+     * Use O_DSYNC to open JE log files.
+     */
+    private final boolean useODSYNC;
+
+    /* public for unit tests. */
+    public boolean VERIFY_CHECKSUMS = false;
+
+    /*
+     * Last file to which any IO was done.
+     */
+    long lastFileNumberTouched = -1;
+
+    /*
+     * Current file offset of lastFile.
+     */
+    long lastFileTouchedOffset = 0;
+
+    /*
+     * For IO stats, this is a measure of what is "close enough" to constitute
+     * a sequential IO vs a random IO. 1MB for now.  Generally a seek within a
+     * few tracks of the current disk track is "fast" and only requires a
+     * single rotational latency.
+     */
+    private static final long ADJACENT_TRACK_SEEK_DELTA = 1 << 20;
+
+    /*
+     * Stats
+     */
+    long nRandomReads = 0;
+    long nRandomWrites = 0;
+    long nSequentialReads = 0;
+    long nSequentialWrites = 0;
+    long nRandomReadBytes = 0;
+    long nRandomWriteBytes = 0;
+    long nSequentialReadBytes = 0;
+    long nSequentialWriteBytes = 0;
+    int nFileOpens = 0;
+
+    /**
+     * Set up the file cache and initialize the file manager to point to the
+     * beginning of the log.
+     *
+     * @param configManager
+     * @param dbEnvHome environment home directory
+     */
+    public FileManager(EnvironmentImpl envImpl,
+                       File dbEnvHome,
+                       boolean readOnly)
+        throws DatabaseException {
+
+        this.envImpl = envImpl;
+        this.dbEnvHome = dbEnvHome;
+        this.readOnly = readOnly;
+
+        /* Read configurations. */
+        DbConfigManager configManager = envImpl.getConfigManager();
+        maxFileSize = configManager.getLong(EnvironmentParams.LOG_FILE_MAX);
+
+        useODSYNC =
+            configManager.getBoolean(EnvironmentParams.LOG_USE_ODSYNC);
+        VERIFY_CHECKSUMS =
+            configManager.getBoolean(EnvironmentParams.LOG_VERIFY_CHECKSUMS);
+
+        if (!envImpl.isMemOnly()) {
+            if (!dbEnvHome.exists()) {
+                throw new LogException("Environment home " + dbEnvHome +
+                                         " doesn't exist");
+            }
+            lockEnvironment(readOnly, false);
+        }
+
+        /* Cache of files. */
+        fileCache = new FileCache(configManager);
+        fileCacheLatch = new Latch(DEBUG_NAME + "_fileCache");
+
+        /* Start out as if no log existed. */
+        currentFileNum = 0L;
+        nextAvailableLsn =
+            DbLsn.makeLsn(currentFileNum, firstLogEntryOffset());
+        lastUsedLsn = DbLsn.NULL_LSN;
+        perFileLastUsedLsn = new HashMap<Long, Long>();
+        prevOffset = 0L;
+        endOfLog = new LogEndFileDescriptor();
+        forceNewFile = false;
+        saveLastPosition();
+
+        String stopOnWriteCountProp =
+            System.getProperty("je.debug.stopOnWriteCount");
+        if (stopOnWriteCountProp != null) {
+            STOP_ON_WRITE_COUNT = Long.parseLong(stopOnWriteCountProp);
+        }
+
+        String stopOnWriteActionProp =
+            System.getProperty("je.debug.stopOnWriteAction");
+        if (stopOnWriteActionProp != null) {
+            if (stopOnWriteActionProp.compareToIgnoreCase("throw") == 0) {
+                THROW_ON_WRITE = true;
+            } else if (stopOnWriteActionProp.
+                       compareToIgnoreCase("stop") == 0) {
+                THROW_ON_WRITE = false;
+            } else {
+                throw new DatabaseException
+                    ("unknown value for je.debugStopOnWriteAction: " +
+                     stopOnWriteActionProp);
+            }
+        }
+
+        syncManager = new FSyncManager(envImpl);
+    }
+
+    /**
+     * Set the file manager's "end of log".
+     *
+     * @param nextAvailableLsn LSN to be used for the next log entry
+     * @param lastUsedLsn last LSN to have a valid entry, may be null
+     * @param prevOffset value to use for the prevOffset of the next entry.
+     *  If the beginning of the file, this is 0.
+     */
+    public void setLastPosition(long nextAvailableLsn,
+                                long lastUsedLsn,
+                                long prevOffset) {
+        this.lastUsedLsn = lastUsedLsn;
+        perFileLastUsedLsn.put(Long.valueOf(DbLsn.getFileNumber(lastUsedLsn)),
+                               Long.valueOf(lastUsedLsn));
+        this.nextAvailableLsn = nextAvailableLsn;
+        currentFileNum = DbLsn.getFileNumber(this.nextAvailableLsn);
+        this.prevOffset = prevOffset;
+        saveLastPosition();
+    }
+
+    /*
+     * Cause the current LSN state to be saved in case we fail after we have
+     * bumped the LSN pointer but before we've successfully marshalled into the
+     * log buffer.
+     */
+    void saveLastPosition() {
+        savedNextAvailableLsn = nextAvailableLsn;
+        savedLastUsedLsn = lastUsedLsn;
+        savedPrevOffset = prevOffset;
+        savedForceNewFile = forceNewFile;
+        savedCurrentFileNum = currentFileNum;
+    }
+
+    void restoreLastPosition() {
+        nextAvailableLsn = savedNextAvailableLsn;
+        lastUsedLsn = savedLastUsedLsn;
+        prevOffset = savedPrevOffset;
+        forceNewFile = savedForceNewFile;
+        currentFileNum = savedCurrentFileNum;
+    }
+
+    /**
+     * May be used to disable sync at file end to speed unit tests.
+     * Must only be used for unit testing, since log corruption may result.
+     */
+    public void setSyncAtFileEnd(boolean sync) {
+        syncAtFileEnd = sync;
+    }
+
+    /*
+     * File management
+     */
+
+    /**
+     * public for cleaner.
+     *
+     * @return the number of the first file in this environment.
+     */
+    public Long getFirstFileNum() {
+        return getFileNum(true);
+    }
+
+    public boolean getReadOnly() {
+        return readOnly;
+    }
+
+    /**
+     * @return the number of the last file in this environment.
+     */
+    public Long getLastFileNum() {
+        return getFileNum(false);
+    }
+
+    /**
+     * Returns the highest (current) file number.  Because a long value cannot
+     * be read atomically without synchronization, this method should be called
+     * while holding the log write latch.
+     */
+    public long getCurrentFileNum() {
+        return currentFileNum;
+    }
+
+    /**
+     * For assertions that check whether a file is valid or has been deleted
+     * via log cleaning.
+     */
+    public boolean isFileValid(long fileNum) {
+
+        /*
+         * If the file is the current file, it may be buffered and not yet
+         * created.  If the env is memory-only, we will never create or delete
+         * log files.
+         */
+        if (fileNum == currentFileNum || envImpl.isMemOnly()) {
+            return true;
+        }
+
+        /* Check for file existence. */
+        String fileName = getFullFileName(fileNum, FileManager.JE_SUFFIX);
+        File file = new File(fileName);
+        return file.exists();
+    }
+
+    public void setIncludeDeletedFiles(boolean includeDeletedFiles) {
+        this.includeDeletedFiles = includeDeletedFiles;
+    }
+
+    /**
+     * Get all JE file numbers.
+     * @return an array of all JE file numbers.
+     */
+    public Long[] getAllFileNumbers() {
+        /* Get all the names in sorted order. */
+        String[] names = listFiles(JE_SUFFIXES);
+        Long[] nums = new Long[names.length];
+        for (int i = 0; i < nums.length; i += 1) {
+            nums[i] = getNumFromName(names[i]);
+        }
+        return nums;
+    }
+
+    /**
+     * Get the next file number before/after currentFileNum.
+     * @param currentFileNum the file we're at right now. Note that
+     * it may not exist, if it's been cleaned and renamed.
+     * @param forward if true, we want the next larger file, if false
+     * we want the previous file
+     * @return null if there is no following file, or if filenum doesn't exist
+     */
+    public Long getFollowingFileNum(long currentFileNum, boolean forward) {
+        /* Get all the names in sorted order. */
+        String[] names = listFiles(JE_SUFFIXES);
+
+        /* Search for the current file. */
+        String searchName = getFileName(currentFileNum, JE_SUFFIX);
+        int foundIdx = Arrays.binarySearch(names, searchName);
+
+        boolean foundTarget = false;
+        if (foundIdx >= 0) {
+            if (forward) {
+                foundIdx++;
+            } else {
+                foundIdx --;
+            }
+        } else {
+
+            /*
+             * currentFileNum not found (might have been cleaned). FoundIdx
+             * will be (-insertionPoint - 1).
+             */
+            foundIdx = Math.abs(foundIdx + 1);
+            if (!forward) {
+                foundIdx--;
+            }
+        }
+
+        /* The current fileNum is found, return the next or prev file. */
+        if (forward && (foundIdx < names.length)) {
+            foundTarget = true;
+        } else if (!forward && (foundIdx > -1)) {
+            foundTarget = true;
+        }
+
+        if (foundTarget) {
+            return getNumFromName(names[foundIdx]);
+        } else {
+            return null;
+        }
+    }
+
+    /**
+     * @return true if there are any files at all.
+     */
+    public boolean filesExist() {
+        String[] names = listFiles(JE_SUFFIXES);
+        return (names.length != 0);
+    }
+
+    /**
+     * Get the first or last file number in the set of JE files.
+     *
+     * @param first if true, get the first file, else get the last file
+     * @return the file number or null if no files exist
+     */
+    private Long getFileNum(boolean first) {
+        String[] names = listFiles(JE_SUFFIXES);
+        if (names.length == 0) {
+            return null;
+        } else {
+            int index = 0;
+            if (!first) {
+                index = names.length - 1;
+            }
+            return getNumFromName(names[index]);
+        }
+    }
+
+    /**
+     * Get the file number from a file name.
+     *
+     * @param the file name
+     * @return the file number
+     */
+    public Long getNumFromName(String fileName) {
+        String fileNumber = fileName.substring(0, fileName.indexOf("."));
+        return Long.valueOf(Long.parseLong(fileNumber, 16));
+    }
+
+    /**
+     * Find JE files. Return names sorted in ascending fashion.
+     * @param suffix which type of file we're looking for
+     * @return array of file names
+     */
+    public String[] listFiles(String[] suffixes) {
+        String[] fileNames = dbEnvHome.list(new JEFileFilter(suffixes));
+        if (fileNames != null) {
+            Arrays.sort(fileNames);
+        } else {
+            fileNames = new String[0];
+        }
+        return fileNames;
+    }
+
+    /**
+     * Find .jdb files which are >= the minimimum file number and
+     * <= the maximum file number.
+     * Return names sorted in ascending fashion.
+     *
+     * @return array of file names
+     */
+    public String[] listFiles(long minFileNumber, long maxFileNumber) {
+
+        String[] fileNames = dbEnvHome.list(new JEFileFilter(JE_SUFFIXES,
+                                                             minFileNumber,
+                                                             maxFileNumber));
+        Arrays.sort(fileNames);
+        return fileNames;
+    }
+
+   /**
+     * Find JE files, flavor for unit test support.
+     *
+     * @param suffix which type of file we're looking for
+     * @return array of file names
+     */
+    public static String[] listFiles(File envDirFile, String[] suffixes) {
+        String[] fileNames = envDirFile.list(new JEFileFilter(suffixes));
+        if (fileNames != null) {
+            Arrays.sort(fileNames);
+        } else {
+            fileNames = new String[0];
+        }
+        return fileNames;
+    }
+
+    /**
+     * @return the full file name and path for the nth JE file.
+     */
+    String[] getFullFileNames(long fileNum) {
+        if (includeDeletedFiles) {
+            int nSuffixes = JE_AND_DEL_SUFFIXES.length;
+            String[] ret = new String[nSuffixes];
+            for (int i = 0; i < nSuffixes; i++) {
+                ret[i] = getFullFileName(getFileName(fileNum,
+                                                     JE_AND_DEL_SUFFIXES[i]));
+            }
+            return ret;
+        } else {
+            return new String[]
+                { getFullFileName(getFileName(fileNum, JE_SUFFIX)) };
+        }
+    }
+
+    /**
+     * Remove files from the environment home directory.
+     * @param envFile environment directory
+     */
+    public static void removeFiles(File envFile)
+        throws IOException {
+
+        File[] targetFiles = envFile.listFiles();
+
+        /* Clean up any target files in this directory. */
+        for (int i = 0; i < targetFiles.length; i++) {
+            File f = targetFiles[i];
+            if (f.isDirectory() ||
+                f.getName().equals("je.properties")) {
+                continue;
+            }
+            boolean done = targetFiles[i].delete();
+            if (!done) {
+                System.out.println
+                    ("Warning, couldn't delete "
+                     + targetFiles[i]
+                     + " out of "
+                     + targetFiles[targetFiles.length - 1]);
+            }
+        }
+    }
+
+    /**
+     * @return the full file name and path for the given file number and
+     * suffix.
+     */
+    public String getFullFileName(long fileNum, String suffix) {
+        return getFullFileName(getFileName(fileNum, suffix));
+    }
+
+    /**
+     * @return the full file name and path for this file name.
+     */
+    private String getFullFileName(String fileName) {
+        return dbEnvHome + File.separator + fileName;
+    }
+
+    /**
+     * @return the file name for the nth file.
+     */
+    public static String getFileName(long fileNum, String suffix) {
+
+        return (getFileNumberString(fileNum) + suffix);
+    }
+
+    /**
+     * HexFormatter generates a 0 padded string starting with 0x.  We want
+     * the right most 8 digits, so start at 10.
+     */
+    private static String getFileNumberString(long fileNum) {
+        return HexFormatter.formatLong(fileNum).substring(10);
+    }
+
+    /**
+     * Rename this file to NNNNNNNN.suffix. If that file already exists, try
+     * NNNNNNNN.suffix.1, etc. Used for deleting files or moving corrupt files
+     * aside.
+     *
+     * @param fileNum the file we want to move
+     * @param newSuffix the new file suffix
+     */
+    public void renameFile(long fileNum, String newSuffix)
+        throws DatabaseException, IOException {
+
+        int repeatNum = 0;
+        boolean renamed = false;
+        while (!renamed) {
+            String generation = "";
+            if (repeatNum > 0) {
+                generation = "." + repeatNum;
+            }
+            String newName =
+                getFullFileName(getFileName(fileNum, newSuffix) + generation);
+            File targetFile = new File(newName);
+            if (targetFile.exists()) {
+                repeatNum++;
+            } else {
+                String oldFileName = getFullFileNames(fileNum)[0];
+                clearFileCache(fileNum);
+                File oldFile = new File(oldFileName);
+                if (oldFile.renameTo(targetFile)) {
+                    renamed = true;
+                } else {
+                    throw new LogException("Couldn't rename " + oldFileName +
+                                             " to " + newName);
+                }
+            }
+        }
+    }
+
+    /**
+     * Delete log file NNNNNNNN.
+     *
+     * @param fileNum the file we want to move
+     */
+    public void deleteFile(long fileNum)
+        throws DatabaseException, IOException {
+
+        String fileName = getFullFileNames(fileNum)[0];
+        clearFileCache(fileNum);
+        File file = new File(fileName);
+        boolean done = file.delete();
+        if (!done) {
+            throw new LogException
+                ("Couldn't delete " + file);
+        }
+    }
+
+    /**
+     * Returns the log version for the given file.
+     */
+    public int getFileLogVersion(long fileNum)
+        throws LogException, DatabaseException  {
+
+        FileHandle handle = getFileHandle(fileNum);
+        int logVersion = handle.getLogVersion();
+        handle.release();
+        return logVersion;
+    }
+
+    /**
+     * Return a read only file handle that corresponds the this file number.
+     * Retrieve it from the cache or open it anew and validate the file header.
+     * This method takes a latch on this file, so that the file descriptor will
+     * be held in the cache as long as it's in use.  When the user is done with
+     * the file, the latch must be released.
+     *
+     * @param fileNum which file
+     * @return the file handle for the existing or newly created file
+     */
+    FileHandle getFileHandle(long fileNum)
+        throws LogException, DatabaseException  {
+
+        /* Check the file cache for this file. */
+        Long fileId = Long.valueOf(fileNum);
+        FileHandle fileHandle = null;
+
+        /**
+         * Loop until we get an open FileHandle.
+         */
+        while (true) {
+
+            /*
+             * The file cache is intentionally not latched here so that it's
+             * not a bottleneck in the fast path.  We check that the file
+             * handle that we get back is really still open after we latch it
+             * down below.
+             */
+            fileHandle = fileCache.get(fileId);
+
+            /*
+             * If the file isn't in the cache, latch the cache and check again.
+             * Under the latch, if the file is not in the cache we add it to
+             * the cache but do not open the file yet.  We latch the handle
+             * here, and open the file further below after releasing the cache
+             * latch.  This prevents blocking other threads that are opening
+             * other files while we open this file.  The latch on the handle
+             * blocks other threads waiting to open the same file, which is
+             * necessary.
+             */
+            boolean newHandle = false;
+            if (fileHandle == null) {
+                if (EnvironmentImpl.getFairLatches()) {
+                    fileCacheLatch.acquire();
+                    try {
+                        fileHandle = fileCache.get(fileId);
+                        if (fileHandle == null) {
+                            newHandle = true;
+                            fileHandle = addFileHandle(fileId);
+                        }
+                    } finally {
+                        fileCacheLatch.release();
+                    }
+                } else {
+                    synchronized (fileCacheLatch) {
+                        fileHandle = fileCache.get(fileId);
+                        if (fileHandle == null) {
+                            newHandle = true;
+                            fileHandle = addFileHandle(fileId);
+                        }
+                    }
+                }
+            }
+
+            if (newHandle) {
+
+                /*
+                 * Open the file with the fileHandle latched.  It was latched
+                 * by addFileHandle above.
+                 */
+                boolean success = false;
+                try {
+                    openFileHandle(fileHandle, FileMode.READ_MODE);
+                    success = true;
+                } finally {
+                    if (!success) {
+                        /* An exception is in flight -- clean up. */
+                        fileHandle.release();
+                        try {
+                            clearFileCache(fileNum);
+                        } catch (IOException e) {
+                            throw new DatabaseException(e);
+                        }
+                    }
+                }
+            } else {
+
+                /*
+                 * The handle was found in the cache.  Latch the fileHandle
+                 * before checking getFile below and returning.
+                 */
+                fileHandle.latch();
+            }
+
+            /*
+             * We may have obtained this file handle outside the file cache
+             * latch, so we have to test that the handle is still valid.  If
+             * it's not, then loop back and try again.
+             */
+            if (fileHandle.getFile() == null) {
+                fileHandle.release();
+            } else {
+                break;
+            }
+        }
+
+        return fileHandle;
+    }
+
+    /**
+     * Creates a new FileHandle and adds it to the cache, but does not open
+     * the file.
+     * @return the latched FileHandle.
+     */
+    private FileHandle addFileHandle(Long fileNum)
+        throws DatabaseException {
+
+        FileHandle fileHandle =
+            new FileHandle(fileNum, getFileNumberString(fileNum));
+        fileCache.add(fileNum, fileHandle);
+        fileHandle.latch();
+        return fileHandle;
+    }
+
+    private FileMode getAppropriateReadWriteMode() {
+        if (useODSYNC) {
+            return FileMode.READWRITE_ODSYNC_MODE;
+        } else {
+            return FileMode.READWRITE_MODE;
+        }
+    }
+
+    /**
+     * Creates a new handle and opens it.  Does not add the handle to the
+     * cache.
+     */
+    private FileHandle makeFileHandle(long fileNum, FileMode mode)
+        throws DatabaseException {
+
+        FileHandle fileHandle =
+            new FileHandle(fileNum, getFileNumberString(fileNum));
+        openFileHandle(fileHandle, mode);
+        return fileHandle;
+    }
+
+    /**
+     * Opens the file for the given handle and initializes it.
+     */
+    private void openFileHandle(FileHandle fileHandle, FileMode mode)
+        throws DatabaseException {
+
+        nFileOpens += 1;
+        long fileNum = fileHandle.getFileNum();
+        String[] fileNames = getFullFileNames(fileNum);
+        RandomAccessFile newFile = null;
+        String fileName = null;
+        try {
+
+            /*
+             * Open the file. Note that we are going to try a few names to open
+             * this file -- we'll try for N.jdb, and if that doesn't exist and
+             * we're configured to look for all types, we'll look for N.del.
+             */
+            FileNotFoundException FNFE = null;
+            for (int i = 0; i < fileNames.length; i++) {
+                fileName = fileNames[i];
+                try {
+                    newFile =
+                        new RandomAccessFile(fileName, mode.getModeValue()) {
+                            public synchronized long length()
+                                throws IOException {
+
+                                return super.length();
+                            }
+                        };
+                    break;
+                } catch (FileNotFoundException e) {
+                    /* Save the first exception thrown. */
+                    if (FNFE == null) {
+                        FNFE = e;
+                    }
+                }
+            }
+
+            /*
+             * If we didn't find the file or couldn't create it, rethrow the
+             * exception.
+             */
+            if (newFile == null) {
+                throw FNFE;
+            }
+
+            int logVersion = LogEntryType.LOG_VERSION;
+
+            if (newFile.length() == 0) {
+
+                /*
+                 * If the file is empty, reinitialize it if we can. If not,
+                 * send the file handle back up; the calling code will deal
+                 * with the fact that there's nothing there.
+                 */
+                if (mode.isWritable()) {
+                    /* An empty file, write a header. */
+                    long lastLsn = DbLsn.longToLsn(perFileLastUsedLsn.remove
+                       (Long.valueOf(fileNum - 1)));
+                    long headerPrevOffset = 0;
+                    if (lastLsn != DbLsn.NULL_LSN) {
+                        headerPrevOffset = DbLsn.getFileOffset(lastLsn);
+                    }
+                    FileHeader fileHeader =
+                        new FileHeader(fileNum, headerPrevOffset);
+                    writeFileHeader(newFile, fileName, fileHeader, fileNum);
+                }
+            } else {
+                /* A non-empty file, check the header */
+                logVersion =
+                    readAndValidateFileHeader(newFile, fileName, fileNum);
+            }
+            fileHandle.init(newFile, logVersion);
+        } catch (FileNotFoundException e) {
+            throw new LogFileNotFoundException
+                ("Couldn't open file " + fileName + ": " +
+                 e.getMessage());
+        } catch (DbChecksumException e) {
+
+            /*
+             * Let this exception go as a checksum exception, so it sets the
+             * run recovery state correctly.
+             */
+            closeFileInErrorCase(newFile);
+            throw new DbChecksumException
+                (envImpl, "Couldn't open file " + fileName, e);
+        } catch (Throwable t) {
+
+            /*
+             * Catch Throwable here (rather than exception) because in unit
+             * test mode, we run assertions and they throw errors. We want to
+             * clean up the file object in all cases.
+             */
+            closeFileInErrorCase(newFile);
+            throw new DatabaseException
+                ("Couldn't open file " + fileName + ": " + t, t);
+        }
+    }
+
+    /**
+     * Close this file and eat any exceptions. Used in catch clauses.
+     */
+    private void closeFileInErrorCase(RandomAccessFile file) {
+        try {
+            if (file != null) {
+                file.close();
+            }
+        } catch (IOException e) {
+
+            /*
+             * Klockwork - ok
+             * Couldn't close file, oh well.
+             */
+        }
+    }
+
+    /**
+     * Read the given JE log file and validate the header.
+     *
+     * @throws DatabaseException if the file header isn't valid
+     *
+     * @return file header log version.
+     */
+    private int readAndValidateFileHeader(RandomAccessFile file,
+                                          String fileName,
+                                          long fileNum)
+        throws DatabaseException, IOException {
+
+        /*
+         * Read the file header from this file. It's always the first log
+         * entry.
+         */
+        LogManager logManager = envImpl.getLogManager();
+        LogEntry headerEntry =
+            logManager.getLogEntry(DbLsn.makeLsn(fileNum, 0), file);
+        FileHeader header = (FileHeader) headerEntry.getMainItem();
+        return header.validate(fileName, fileNum);
+    }
+
+    /**
+     * Write a proper file header to the given file.
+     */
+    private void writeFileHeader(RandomAccessFile file,
+                                 String fileName,
+                                 FileHeader header,
+                                 long fileNum)
+        throws DatabaseException {
+
+        /*
+         * Fail loudly if the environment is invalid.  A RunRecoveryException
+         * must have occurred.
+         */
+        envImpl.checkIfInvalid();
+
+        /*
+         * Fail silent if the environment is not open.
+         */
+        if (envImpl.mayNotWrite()) {
+            return;
+        }
+
+        /* Write file header into this buffer in the usual log entry format. */
+        LogEntry headerLogEntry =
+            new SingleItemEntry(LogEntryType.LOG_FILE_HEADER, header);
+        ByteBuffer headerBuf = envImpl.getLogManager().
+            putIntoBuffer(headerLogEntry,
+                          0); // prevLogEntryOffset
+
+        /* Write the buffer into the channel. */
+        int bytesWritten;
+        try {
+            if (RUNRECOVERY_EXCEPTION_TESTING) {
+                generateRunRecoveryException(file, headerBuf, 0, fileNum);
+            }
+            bytesWritten = writeToFile(file, headerBuf, 0, fileNum);
+
+            if (fileNum > savedCurrentFileNum) {
+
+                /*
+                 * Writing the new file header succeeded without an IOE.  This
+                 * can not be undone in the event of another IOE (Out Of Disk
+                 * Space) on the next write so update the saved LSN state with
+                 * the new info. Do not update the nextAvailableLsn with a
+                 * smaller (earlier) LSN in case there's already something in a
+                 * buffer that is after the new header. [#15754]
+                 */
+                long lsnAfterHeader = DbLsn.makeLsn(fileNum, bytesWritten);
+                if (DbLsn.compareTo(nextAvailableLsn, lsnAfterHeader) < 0) {
+                    nextAvailableLsn = lsnAfterHeader;
+                }
+
+                lastUsedLsn = DbLsn.makeLsn(fileNum, bytesWritten);
+                prevOffset = bytesWritten;
+                forceNewFile = false;
+                currentFileNum = fileNum;
+                saveLastPosition();
+            }
+        } catch (ClosedChannelException e) {
+
+            /*
+             * The channel should never be closed. It may be closed because
+             * of an interrupt received by another thread. See SR [#10463]
+             */
+            throw new RunRecoveryException
+                (envImpl, "Channel closed, may be due to thread interrupt", e);
+        } catch (IOException e) {
+            /* Possibly an out of disk exception. */
+            throw new RunRecoveryException
+                (envImpl, "IOException during write: " + e);
+        }
+
+        if (bytesWritten != headerLogEntry.getSize() +
+            LogEntryHeader.MIN_HEADER_SIZE) {
+            throw new LogException
+                ("File " + fileName +
+                 " was created with an incomplete header. Only " +
+                 bytesWritten + " bytes were written.");
+        }
+    }
+
+    /**
+     * @return the prevOffset field stored in the file header.
+     */
+    long getFileHeaderPrevOffset(long fileNum)
+        throws IOException, DatabaseException {
+
+        LogEntry headerEntry =
+            envImpl.getLogManager().getLogEntry(DbLsn.makeLsn(fileNum, 0));
+        FileHeader header = (FileHeader) headerEntry.getMainItem();
+        return header.getLastEntryInPrevFileOffset();
+    }
+
+    /*
+     * Support for writing new log entries
+     */
+
+    /**
+     * @return the file offset of the last LSN that was used. For constructing
+     * the headers of log entries. If the last LSN that was used was in a
+     * previous file, or this is the very first LSN of the whole system, return
+     * 0.
+     */
+    long getPrevEntryOffset() {
+        return prevOffset;
+    }
+
+    /**
+     * Increase the current log position by "size" bytes. Move the prevOffset
+     * pointer along.
+     *
+     * @param size is an unsigned int
+     * @return true if we flipped to the next log file.
+     */
+    boolean bumpLsn(long size) {
+
+        /* Save copy of initial LSN state. */
+        saveLastPosition();
+
+        boolean flippedFiles = false;
+
+        if (forceNewFile ||
+            (DbLsn.getFileOffset(nextAvailableLsn) + size) > maxFileSize) {
+
+            forceNewFile = false;
+
+            /* Move to another file. */
+            currentFileNum++;
+
+            /* Remember the last used LSN of the previous file. */
+            if (lastUsedLsn != DbLsn.NULL_LSN) {
+                perFileLastUsedLsn.put
+                    (Long.valueOf(DbLsn.getFileNumber(lastUsedLsn)),
+                     Long.valueOf(lastUsedLsn));
+            }
+            prevOffset = 0;
+            lastUsedLsn =
+                DbLsn.makeLsn(currentFileNum, firstLogEntryOffset());
+            flippedFiles = true;
+        } else {
+            if (lastUsedLsn == DbLsn.NULL_LSN) {
+                prevOffset = 0;
+            } else {
+                prevOffset = DbLsn.getFileOffset(lastUsedLsn);
+            }
+            lastUsedLsn = nextAvailableLsn;
+        }
+        nextAvailableLsn =
+            DbLsn.makeLsn(DbLsn.getFileNumber(lastUsedLsn),
+                          (DbLsn.getFileOffset(lastUsedLsn) + size));
+
+        return flippedFiles;
+    }
+
+    /**
+     * Write out a log buffer to the file.
+     * @param fullBuffer buffer to write
+     */
+    void writeLogBuffer(LogBuffer fullBuffer)
+        throws DatabaseException {
+
+        /*
+         * Fail loudly if the environment is invalid.  A RunRecoveryException
+         * must have occurred.
+         */
+        envImpl.checkIfInvalid();
+
+        /*
+         * Fail silent if the environment is not open.
+         */
+        if (envImpl.mayNotWrite()) {
+            return;
+        }
+
+        /* Use the LSN to figure out what file to write this buffer to. */
+        long firstLsn = fullBuffer.getFirstLsn();
+
+        /*
+         * Is there anything in this write buffer? We could have been called by
+         * the environment shutdown, and nothing is actually in the buffer.
+         */
+        if (firstLsn != DbLsn.NULL_LSN) {
+
+            RandomAccessFile file =
+                endOfLog.getWritableFile(DbLsn.getFileNumber(firstLsn));
+            ByteBuffer data = fullBuffer.getDataBuffer();
+
+            if (VERIFY_CHECKSUMS) {
+                verifyChecksums(data, DbLsn.getFileOffset(firstLsn),
+                                "pre-write");
+            }
+
+            try {
+
+                /*
+                 * Check that we do not overwrite unless the file only contains
+                 * a header [#11915] [#12616].
+                 */
+                assert fullBuffer.getRewriteAllowed() ||
+                    (DbLsn.getFileOffset(firstLsn) >= file.length() ||
+                     file.length() == firstLogEntryOffset()) :
+                        "FileManager would overwrite non-empty file 0x" +
+                        Long.toHexString(DbLsn.getFileNumber(firstLsn)) +
+                        " lsnOffset=0x" +
+                        Long.toHexString(DbLsn.getFileOffset(firstLsn)) +
+                        " fileLength=0x" +
+                        Long.toHexString(file.length());
+
+                if (IO_EXCEPTION_TESTING_ON_WRITE) {
+                    throw new IOException("generated for testing (write)");
+                }
+                if (RUNRECOVERY_EXCEPTION_TESTING) {
+                    generateRunRecoveryException
+                        (file, data, DbLsn.getFileOffset(firstLsn),
+                         DbLsn.getFileNumber(firstLsn));
+                }
+                writeToFile(file, data, DbLsn.getFileOffset(firstLsn),
+                            DbLsn.getFileNumber(firstLsn));
+            } catch (ClosedChannelException e) {
+
+                /*
+                 * The file should never be closed. It may be closed because
+                 * of an interrupt received by another thread. See SR [#10463].
+                 */
+                throw new RunRecoveryException
+                    (envImpl, "File closed, may be due to thread interrupt",
+                     e);
+            } catch (IOException IOE) {
+
+                if (!IO_EXCEPTION_TESTING_ON_WRITE ||
+                    THROW_RRE_FOR_UNIT_TESTS) {
+                    throw new RunRecoveryException
+                        (envImpl, "IOE during write", IOE);
+                } else {
+
+                    /*
+                     * Possibly an out of disk exception, but java.io will only
+                     * tell us IOException with no indication of whether it's
+                     * out of disk or something else. Better support may exist
+                     * in Java6.
+                     *
+                     * Since we can't tell what sectors were actually written
+                     * to disk, we need to change any commit records that might
+                     * have made it out to disk to abort records.  If they made
+                     * it to disk on the write, then rewriting should allow
+                     * them to be rewritten.  See [11271].
+                     *
+                     * Rewriting committed transactions in replication is
+                     * highly problematic, and can lead to divergence between
+                     * the replica and master. If this path is re-enabled, we
+                     * must assess its impact in replication.
+                     */
+                    abortCommittedTxns(data);
+                    try {
+                        if (IO_EXCEPTION_TESTING_ON_WRITE) {
+                            throw new IOException
+                                ("generated for testing (write)");
+                        }
+                        writeToFile(file, data, DbLsn.getFileOffset(firstLsn),
+                                    DbLsn.getFileNumber(firstLsn));
+                    } catch (IOException IOE2) {
+                        fullBuffer.setRewriteAllowed();
+                        throw new DatabaseException(IOE2);
+                    }
+                }
+            }
+
+            assert EnvironmentImpl.maybeForceYield();
+        }
+    }
+
+    /**
+     * Write a buffer to a file at a given offset.
+     */
+    private int writeToFile(RandomAccessFile file,
+                            ByteBuffer data,
+                            long destOffset,
+                            long fileNum)
+        throws IOException, DatabaseException {
+
+        int totalBytesWritten = 0;
+
+        bumpWriteCount("write");
+
+        /*
+         * Perform a RandomAccessFile write and update the buffer position.
+         * ByteBuffer.array() is safe to use since all non-direct ByteBuffers
+         * have a backing array.  Synchronization on the file object is needed
+         * because two threads may call seek() on the same file object.
+         */
+        synchronized (file) {
+            assert data.hasArray();
+
+            int pos = data.position();
+            int size = data.limit() - pos;
+
+            if (lastFileNumberTouched == fileNum &&
+                (Math.abs(destOffset - lastFileTouchedOffset) <
+                 ADJACENT_TRACK_SEEK_DELTA)) {
+                nSequentialWrites++;
+                nSequentialWriteBytes += size;
+            } else {
+                nRandomWrites++;
+                nRandomWriteBytes += size;
+            }
+
+            if (VERIFY_CHECKSUMS) {
+                verifyChecksums(data, destOffset, "pre-write");
+            }
+            file.seek(destOffset);
+            file.write(data.array(), pos + data.arrayOffset(), size);
+            if (VERIFY_CHECKSUMS) {
+                file.seek(destOffset);
+                file.read(data.array(), pos + data.arrayOffset(), size);
+                verifyChecksums(data, destOffset, "post-write");
+            }
+            data.position(pos + size);
+            totalBytesWritten = size;
+
+            lastFileNumberTouched = fileNum;
+            lastFileTouchedOffset = destOffset + size;
+        }
+        return totalBytesWritten;
+    }
+
+    private void bumpWriteCount(final String debugMsg)
+        throws IOException {
+
+        if (DEBUG) {
+            System.out.println("Write: " + WRITE_COUNT + " " + debugMsg);
+        }
+
+        if (++WRITE_COUNT >= STOP_ON_WRITE_COUNT &&
+            WRITE_COUNT < (STOP_ON_WRITE_COUNT + N_BAD_WRITES)) {
+            if (THROW_ON_WRITE) {
+                throw new IOException
+                    ("IOException generated for testing: " + WRITE_COUNT +
+                     " " + debugMsg);
+            } else {
+                Runtime.getRuntime().halt(0xff);
+            }
+        }
+    }
+
+    /**
+     * Read a buffer from a file at a given offset.
+     */
+    void readFromFile(RandomAccessFile file,
+                      ByteBuffer readBuffer,
+                      long offset,
+                      long fileNo)
+        throws DatabaseException, IOException {
+
+        /*
+         * All IOExceptions on read turn into RunRecoveryExceptions [#15768].
+         */
+        try {
+            readFromFileInternal(file, readBuffer, offset, fileNo);
+        } catch (IOException IOE) {
+            throw new RunRecoveryException(envImpl, IOE);
+        }
+
+    }
+
+    private void readFromFileInternal(RandomAccessFile file,
+                                      ByteBuffer readBuffer,
+                                      long offset,
+                                      long fileNum)
+        throws DatabaseException, IOException {
+
+        /*
+         * Perform a RandomAccessFile read and update the buffer position.
+         * ByteBuffer.array() is safe to use since all non-direct ByteBuffers
+         * have a backing array.  Synchronization on the file object is needed
+         * because two threads may call seek() on the same file object.
+         */
+        synchronized (file) {
+            assert readBuffer.hasArray();
+
+            int pos = readBuffer.position();
+            int size = readBuffer.limit() - pos;
+
+            if (lastFileNumberTouched == fileNum &&
+                (Math.abs(offset - lastFileTouchedOffset) <
+                 ADJACENT_TRACK_SEEK_DELTA)) {
+                nSequentialReads++;
+                nSequentialReadBytes += size;
+            } else {
+                nRandomReads++;
+                nRandomReadBytes += size;
+            }
+
+            file.seek(offset);
+            if (IO_EXCEPTION_TESTING_ON_READ) {
+                throw new IOException("generated for testing (read)");
+            }
+            int bytesRead = file.read(readBuffer.array(),
+                                      pos + readBuffer.arrayOffset(),
+                                      size);
+            if (bytesRead > 0) {
+                readBuffer.position(pos + bytesRead);
+            }
+
+            lastFileNumberTouched = fileNum;
+            lastFileTouchedOffset = offset + bytesRead;
+        }
+    }
+
+    private void verifyChecksums(ByteBuffer entryBuffer,
+                                 long lsn,
+                                 String comment) {
+
+        int curPos = entryBuffer.position();
+        try {
+            while (entryBuffer.remaining() > 0) {
+                int recStartPos = entryBuffer.position();
+                LogEntryHeader header =
+                    new LogEntryHeader(envImpl,
+                                       entryBuffer,
+                                       false); // anticipateChecksumErrors
+
+                verifyChecksum(entryBuffer, header, lsn, comment);
+                entryBuffer.position(recStartPos + header.getSize() +
+                                     header.getItemSize());
+            }
+        } catch (DatabaseException DCE) {
+            System.err.println("ChecksumException: (" + comment + ") " + DCE);
+            System.err.println("start stack trace");
+            DCE.printStackTrace(System.err);
+            System.err.println("end stack trace");
+        }
+        entryBuffer.position(curPos);
+    }
+
+    private void verifyChecksum(ByteBuffer entryBuffer,
+                                LogEntryHeader header,
+                                long lsn,
+                                String comment)
+        throws DbChecksumException {
+
+        ChecksumValidator validator = null;
+        /* Add header to checksum bytes */
+        validator = new ChecksumValidator();
+        int headerSizeMinusChecksum = header.getSizeMinusChecksum();
+        int itemStart = entryBuffer.position();
+        entryBuffer.position(itemStart - headerSizeMinusChecksum);
+        validator.update(envImpl,
+                         entryBuffer,
+                         headerSizeMinusChecksum,
+                         false); // anticipateChecksumErrors
+        entryBuffer.position(itemStart);
+
+        /*
+         * Now that we know the size, read the rest of the entry if the first
+         * read didn't get enough.
+         */
+        int itemSize = header.getItemSize();
+        if (entryBuffer.remaining() < itemSize) {
+            System.err.println("Couldn't verify checksum (" + comment + ")");
+            return;
+        }
+
+        /*
+         * Do entry validation. Run checksum before checking the entry
+         * type, it will be the more encompassing error.
+         */
+        validator.update(envImpl, entryBuffer, itemSize, false);
+        validator.validate(envImpl, header.getChecksum(), lsn);
+    }
+
+    /*
+     * Iterate through a buffer looking for commit records.  Change all commit
+     * records to abort records.
+     */
+    private void abortCommittedTxns(ByteBuffer data)
+        throws DatabaseException {
+
+        final byte commitType = LogEntryType.LOG_TXN_COMMIT.getTypeNum();
+        data.position(0);
+
+        while (data.remaining() > 0) {
+            int recStartPos = data.position();
+            LogEntryHeader header =
+                new LogEntryHeader(envImpl,
+                                   data,
+                                   false); // anticipateChecksumErrors
+
+            if (header.getType() == commitType) {
+                /* Change the log entry type, and recalculate the checksum. */
+                header.convertCommitToAbort(data);
+            }
+
+            data.position(recStartPos + header.getSize() +
+                          header.getItemSize());
+        }
+        data.position(0);
+    }
+
+    /**
+     * FSync the end of the log.
+     */
+    void syncLogEnd()
+        throws DatabaseException {
+
+        try {
+            endOfLog.force();
+        } catch (IOException e) {
+            throw new RunRecoveryException
+                (envImpl, "IOException during fsync", e);
+        }
+    }
+
+    /**
+     * Sync the end of the log, close off this log file. Should only be called
+     * under the log write latch.
+     */
+    void syncLogEndAndFinishFile()
+        throws DatabaseException, IOException {
+
+        if (syncAtFileEnd) {
+            syncLogEnd();
+        }
+        endOfLog.close();
+    }
+
+    /**
+     * Flush a file using the group sync mechanism, trying to amortize off
+     * other syncs.
+     */
+    void groupSync()
+        throws DatabaseException {
+
+        syncManager.fsync();
+    }
+
+    /**
+     * Close all file handles and empty the cache.
+     */
+    public void clear()
+        throws IOException, DatabaseException {
+
+        if (EnvironmentImpl.getFairLatches()) {
+            fileCacheLatch.acquire();
+            try {
+                fileCache.clear();
+            } finally {
+                fileCacheLatch.release();
+            }
+        } else {
+            synchronized (fileCacheLatch) {
+                fileCache.clear();
+            }
+        }
+
+        endOfLog.close();
+    }
+
+    /**
+     * Clear the file lock.
+     */
+    public void close()
+        throws IOException, DatabaseException {
+
+        if (envLock != null) {
+            envLock.release();
+        }
+
+        if (exclLock != null) {
+            exclLock.release();
+        }
+
+        if (channel != null) {
+            channel.close();
+        }
+
+        if (lockFile != null) {
+            lockFile.close();
+            lockFile = null;
+        }
+    }
+
+    /**
+     * Lock the environment.  Return true if the lock was acquired.  If
+     * exclusive is false, then this implements a single writer, multiple
+     * reader lock.  If exclusive is true, then implement an exclusive lock.
+     *
+     * There is a lock file and there are two regions of the lock file: byte 0,
+     * and byte 1.  Byte 0 is the exclusive writer process area of the lock
+     * file.  If an environment is opened for write, then it attempts to take
+     * an exclusive write lock on byte 0.  Byte 1 is the shared reader process
+     * area of the lock file.  If an environment is opened for read-only, then
+     * it attempts to take a shared lock on byte 1.  This is how we implement
+     * single writer, multi reader semantics.
+     *
+     * The cleaner, each time it is invoked, attempts to take an exclusive lock
+     * on byte 1.  The owning process already either has an exclusive lock on
+     * byte 0, or a shared lock on byte 1.  This will necessarily conflict with
+     * any shared locks on byte 1, even if it's in the same process and there
+     * are no other holders of that shared lock.  So if there is only one
+     * read-only process, it will have byte 1 for shared access, and the
+     * cleaner can not run in it because it will attempt to get an exclusive
+     * lock on byte 1 (which is already locked for shared access by itself).
+     * If a write process comes along and tries to run the cleaner, it will
+     * attempt to get an exclusive lock on byte 1.  If there are no other
+     * reader processes (with shared locks on byte 1), and no other writers
+     * (which are running cleaners on with exclusive locks on byte 1), then the
+     * cleaner will run.
+     */
+    public boolean lockEnvironment(boolean readOnly, boolean exclusive)
+        throws DatabaseException {
+
+        try {
+            if (checkEnvHomePermissions(readOnly)) {
+                return true;
+            }
+
+            if (lockFile == null) {
+                lockFile =
+                    new RandomAccessFile
+                    (new File(dbEnvHome, LOCK_FILE),
+                     FileMode.READWRITE_MODE.getModeValue());
+
+            }
+
+            channel = lockFile.getChannel();
+
+            boolean throwIt = false;
+            try {
+                if (exclusive) {
+
+                    /*
+                     * To lock exclusive, must have exclusive on
+                     * shared reader area (byte 1).
+                     */
+                    exclLock = channel.tryLock(1, 1, false);
+                    if (exclLock == null) {
+                        return false;
+                    }
+                    return true;
+                } else {
+                    if (readOnly) {
+                        envLock = channel.tryLock(1, 1, true);
+                    } else {
+                        envLock = channel.tryLock(0, 1, false);
+                    }
+                    if (envLock == null) {
+                        throwIt = true;
+                    }
+                }
+            } catch (OverlappingFileLockException e) {
+                throwIt = true;
+            }
+            if (throwIt) {
+                close();
+                throw new EnvironmentLockedException
+                    ("A " + LOCK_FILE + " file exists in " +
+                     dbEnvHome.getAbsolutePath() +
+                     " The environment can not be locked for " +
+                     (readOnly ? "shared" : "single writer") + " access.");
+            }
+        } catch (IOException IOE) {
+            throw new LogException(IOE.toString());
+        }
+        return true;
+    }
+
+    public void releaseExclusiveLock()
+        throws DatabaseException {
+
+        try {
+            if (exclLock != null) {
+                exclLock.release();
+            }
+        } catch (IOException IOE) {
+            throw new DatabaseException(IOE);
+        }
+    }
+
+    /**
+     * Ensure that if the environment home dir is on readonly media or in a
+     * readonly directory that the environment has been opened for readonly
+     * access.
+     *
+     * @return true if the environment home dir is readonly.
+     */
+    public boolean checkEnvHomePermissions(boolean readOnly)
+        throws DatabaseException {
+
+        boolean envDirIsReadOnly = !dbEnvHome.canWrite();
+        if (envDirIsReadOnly && !readOnly) {
+
+            /*
+             * Use the absolute path in the exception message, to
+             * make a mis-specified relative path problem more obvious.
+             */
+            throw new DatabaseException
+                ("The Environment directory " +
+                 dbEnvHome.getAbsolutePath() +
+                 " is not writable, but the " +
+                 "Environment was opened for read-write access.");
+        }
+
+        return envDirIsReadOnly;
+    }
+
+    /**
+     * Truncate a log at this position. Used by recovery to a timestamp
+     * utilities and by recovery to set the end-of-log position.
+     *
+     * <p>This method forces a new log file to be written next, if the last
+     * file (the file truncated to) has an old version in its header.  This
+     * ensures that when the log is opened by an old version of JE, a version
+     * incompatibility will be detected.  [#11243]</p>
+     */
+    public void truncateLog(long fileNum, long offset)
+        throws IOException, DatabaseException  {
+
+        FileHandle handle =
+            makeFileHandle(fileNum, getAppropriateReadWriteMode());
+        RandomAccessFile file = handle.getFile();
+
+        try {
+            file.getChannel().truncate(offset);
+        } finally {
+            file.close();
+        }
+
+        if (handle.isOldHeaderVersion()) {
+            forceNewFile = true;
+        }
+    }
+
+    /**
+     * Set the flag that causes a new file to be written before the next write.
+     */
+    void forceNewLogFile() {
+        forceNewFile = true;
+    }
+
+    /**
+     * Return the offset of the first log entry after the file header.
+     */
+
+    /**
+     * @return the size in bytes of the file header log entry.
+     */
+    public static int firstLogEntryOffset() {
+        return FileHeader.entrySize() + LogEntryHeader.MIN_HEADER_SIZE;
+    }
+
+    /**
+     * Return the next available LSN in the log. Note that this is
+     * unsynchronized, so is only valid as an approximation of log size.
+     */
+    public long getNextLsn() {
+        return nextAvailableLsn;
+    }
+
+    /**
+     * Return the last allocated LSN in the log. Note that this is
+     * unsynchronized, so if it is called outside the log write latch it is
+     * only valid as an approximation of log size.
+     */
+    public long getLastUsedLsn() {
+        return lastUsedLsn;
+    }
+
+    /*
+     * fsync stats.
+     */
+    public long getNFSyncs() {
+        return syncManager.getNFSyncs();
+    }
+
+    public long getNFSyncRequests() {
+        return syncManager.getNFSyncRequests();
+    }
+
+    public long getNFSyncTimeouts() {
+        return syncManager.getNTimeouts();
+    }
+
+    void loadStats(StatsConfig config, EnvironmentStats stats)
+        throws DatabaseException {
+
+        syncManager.loadStats(config, stats);
+        stats.setNRandomReads(nRandomReads);
+        stats.setNRandomWrites(nRandomWrites);
+        stats.setNSequentialReads(nSequentialReads);
+        stats.setNSequentialWrites(nSequentialWrites);
+        stats.setNRandomReadBytes(nRandomReadBytes);
+        stats.setNRandomWriteBytes(nRandomWriteBytes);
+        stats.setNSequentialReadBytes(nSequentialReadBytes);
+        stats.setNSequentialWriteBytes(nSequentialWriteBytes);
+        stats.setNFileOpens(nFileOpens);
+        stats.setNOpenFiles(fileCache.size());
+
+        if (config.getClear()) {
+            nRandomReads = 0;
+            nRandomWrites = 0;
+            nSequentialReads = 0;
+            nSequentialWrites = 0;
+            nRandomReadBytes = 0;
+            nRandomWriteBytes = 0;
+            nSequentialReadBytes = 0;
+            nSequentialWriteBytes = 0;
+            nFileOpens = 0;
+        }
+    }
+
+    /*
+     * Unit test support
+     */
+
+    /*
+     * @return ids of files in cache
+     */
+    Set<Long> getCacheKeys() {
+        return fileCache.getCacheKeys();
+    }
+
+    /**
+     * Clear a file out of the file cache regardless of mode type.
+     */
+    private void clearFileCache(long fileNum)
+        throws IOException, DatabaseException {
+
+        if (EnvironmentImpl.getFairLatches()) {
+            fileCacheLatch.acquire();
+            try {
+                fileCache.remove(fileNum);
+            } finally {
+                fileCacheLatch.release();
+            }
+        } else {
+            synchronized (fileCacheLatch) {
+                fileCache.remove(fileNum);
+            }
+        }
+    }
+
+    /*
+     * The file cache keeps N RandomAccessFile objects cached for file
+     * access. The cache consists of two parts: a Hashtable that doesn't
+     * require extra synchronization, for the most common access, and a linked
+     * list of files to support cache administration. Looking up a file from
+     * the hash table doesn't require extra latching, but adding or deleting a
+     * file does.
+     */
+    private static class FileCache {
+        private Map<Long, FileHandle> fileMap;            // Long->file
+        private LinkedList<Long> fileList;    // list of file numbers
+        private int fileCacheSize;
+
+        FileCache(DbConfigManager configManager)
+            throws DatabaseException {
+
+            /*
+             * A fileMap maps the file number to FileHandles (RandomAccessFile,
+             * latch). The fileList is a list of Longs to determine which files
+             * to eject out of the file cache if it's too small.
+             */
+            fileMap = new Hashtable<Long, FileHandle>();
+            fileList = new LinkedList<Long>();
+            fileCacheSize =
+                configManager.getInt(EnvironmentParams.LOG_FILE_CACHE_SIZE);
+        }
+
+        private FileHandle get(Long fileId) {
+            return fileMap.get(fileId);
+        }
+
+        private void add(Long fileId, FileHandle fileHandle)
+            throws DatabaseException {
+
+            /*
+             * Does the cache have any room or do we have to evict?  Hunt down
+             * the file list for an unused file. Note that the file cache might
+             * actually grow past the prescribed size if there is nothing
+             * evictable. Should we try to shrink the file cache? Presently if
+             * it grows, it doesn't shrink.
+             */
+            if (fileList.size() >= fileCacheSize) {
+                Iterator<Long> iter = fileList.iterator();
+                while (iter.hasNext()) {
+                    Long evictId = iter.next();
+                    FileHandle evictTarget = fileMap.get(evictId);
+
+                    /*
+                     * Try to latch. If latchNoWait returns false, then another
+                     * thread owns this latch. Note that a thread that's trying
+                     * to get a new file handle should never already own the
+                     * latch on another file handle, because these latches are
+                     * meant to be short lived and only held over the i/o out
+                     * of the file.
+                     */
+                    if (evictTarget.latchNoWait()) {
+                        try {
+                            fileMap.remove(evictId);
+                            iter.remove();
+                            evictTarget.close();
+                        } catch (IOException e) {
+                            throw new DatabaseException(e);
+                        } finally {
+                            evictTarget.release();
+                        }
+                        break;
+                    }
+                }
+            }
+
+            /*
+             * We've done our best to evict. Add the file the the cache now
+             * whether or not we did evict.
+             */
+            fileList.add(fileId);
+            fileMap.put(fileId, fileHandle);
+        }
+
+        /**
+         * Take any file handles corresponding to this file name out of the
+         * cache. A file handle could be there twice, in rd only and in r/w
+         * mode.
+         */
+        private void remove(long fileNum)
+            throws IOException, DatabaseException {
+
+            Iterator<Long> iter = fileList.iterator();
+            while (iter.hasNext()) {
+                Long evictId = iter.next();
+                if (evictId.longValue() == fileNum) {
+                    FileHandle evictTarget = fileMap.get(evictId);
+                    try {
+                        evictTarget.latch();
+                        fileMap.remove(evictId);
+                        iter.remove();
+                        evictTarget.close();
+                    } finally {
+                        evictTarget.release();
+                    }
+                }
+            }
+        }
+
+        private void clear()
+            throws IOException, DatabaseException {
+
+            Iterator<FileHandle> iter = fileMap.values().iterator();
+            while (iter.hasNext()) {
+                FileHandle fileHandle = iter.next();
+                try {
+                    fileHandle.latch();
+                    fileHandle.close();
+                    iter.remove();
+                } finally {
+                    fileHandle.release();
+                }
+            }
+            fileMap.clear();
+            fileList.clear();
+        }
+
+        private Set<Long> getCacheKeys() {
+            return fileMap.keySet();
+        }
+
+        private int size() {
+            return fileMap.size();
+        }
+    }
+
+    /**
+     * The LogEndFileDescriptor is used to write and fsync the end of the log.
+     * Because the JE log is append only, there is only one logical R/W file
+     * descriptor for the whole environment. This class actually implements two
+     * RandomAccessFile instances, one for writing and one for fsyncing, so the
+     * two types of operations don't block each other.
+     *
+     * The write file descriptor is considered the master.  Manipulation of
+     * this class is done under the log write latch. Here's an explanation of
+     * why the log write latch is sufficient to safeguard all operations.
+     *
+     * There are two types of callers who may use this file descriptor: the
+     * thread that is currently writing to the end of the log and any threads
+     * that are fsyncing on behalf of the FSyncManager.
+     *
+     * The writing thread appends data to the file and fsyncs the file when we
+     * flip over to a new log file.  The file is only instantiated at the point
+     * that it must do so -- which is either when the first fsync is required
+     * by JE or when the log file is full and we flip files.  Therefore, the
+     * writing thread has two actions that change this descriptor -- we
+     * initialize the file descriptor for the given log file at the first write
+     * to the file, and we close the file descriptor when the log file is full.
+     * Therefore is a period when there is no log descriptor -- when we have
+     * not yet written a log buffer into a given log file.
+     *
+     * The fsyncing threads ask for the log end file descriptor asynchronously,
+     * but will never modify it.  These threads may arrive at the point when
+     * the file descriptor is null, and therefore skip their fysnc, but that is
+     * fine because it means a writing thread already flipped that target file
+     * and has moved on to the next file.
+     *
+     * Time     Activity
+     * 10       thread 1 writes log entry A into file 0x0, issues fsync
+     *          outside of log write latch, yields the processor
+     * 20       thread 2 writes log entry B, piggybacks off thread 1
+     * 30       thread 3 writes log entry C, but no room left in that file,
+     *          so it flips the log, and fsyncs file 0x0, all under the log
+     *          write latch. It nulls out endOfLogRWFile, moves onto file
+     *          0x1, but doesn't create the file yet.
+     * 40       thread 1 finally comes along, but endOfLogRWFile is null--
+     *          no need to fsync in that case, 0x0 got fsynced.
+     */
+    class LogEndFileDescriptor {
+        private RandomAccessFile endOfLogRWFile = null;
+        private RandomAccessFile endOfLogSyncFile = null;
+        private Object fsyncFileSynchronizer = new Object();
+
+        /**
+         * getWritableFile must be called under the log write latch.
+         */
+        RandomAccessFile getWritableFile(long fileNumber)
+            throws RunRecoveryException {
+
+            try {
+
+                if (endOfLogRWFile == null) {
+
+                    /*
+                     * We need to make a file descriptor for the end of the
+                     * log.  This is guaranteed to be called under the log
+                     * write latch.
+                     */
+                    endOfLogRWFile =
+                        makeFileHandle(fileNumber,
+                                       getAppropriateReadWriteMode()).
+                        getFile();
+                    synchronized (fsyncFileSynchronizer) {
+                        endOfLogSyncFile =
+                            makeFileHandle(fileNumber,
+                                           getAppropriateReadWriteMode()).
+                            getFile();
+                    }
+                }
+
+                return endOfLogRWFile;
+            } catch (Exception e) {
+
+                /*
+                 * If we can't get a write channel, we need to go into
+                 * RunRecovery state.
+                 */
+                throw new RunRecoveryException(envImpl, e);
+            }
+        }
+
+        /**
+         * FSync the log file that makes up the end of the log.
+         */
+        void force()
+            throws DatabaseException, IOException {
+
+            /*
+             * Get a local copy of the end of the log file descriptor, it could
+             * change. No need to latch, no harm done if we get an old file
+             * descriptor, because we forcibly fsync under the log write latch
+             * when we switch files.
+             *
+             * If there is no current end file descriptor, we know that the log
+             * file has flipped to a new file since the fsync was issued.
+             */
+            synchronized (fsyncFileSynchronizer) {
+
+                /*
+                 * In [#17865] a user reported file corruption when using
+                 * Windows 7.  This SR includes a test program (JETester) which
+                 * readily demonstrates the problem.  We are reasonably sure
+                 * that the problem is caused by concurrent write() and fsync()
+                 * calls on the same file, and in particular, if two different
+                 * file descriptors are used concurrently.  The code below is a
+                 * workaround for this problem since it does not happen when we
+                 * use the same RAF for the write() and fsync().  In fact, the
+                 * test program has slightly better performance on the write
+                 * phase with the change below.  This problem is not present
+                 * in 4.0 because of the write queue code in that release.
+                 */
+                boolean isWin7 = EnvironmentImpl.IS_WINDOWS_7;
+                RandomAccessFile file =
+                    (isWin7 ? endOfLogRWFile : endOfLogSyncFile);
+
+                if (file != null) {
+                    bumpWriteCount("fsync");
+                    FileChannel channel = file.getChannel();
+                    try {
+                        channel.force(false);
+                    } catch (ClosedChannelException e) {
+
+                        /*
+                         * The channel should never be closed. It may be closed
+                         * because of an interrupt received by another
+                         * thread. See SR [#10463]
+                         */
+                        throw new RunRecoveryException
+                            (envImpl,
+                             "Channel closed, may be due to thread interrupt",
+                             e);
+                    }
+
+                    assert EnvironmentImpl.maybeForceYield();
+                }
+            }
+        }
+
+        /**
+         * Close the end of the log file descriptor. Use atomic assignment to
+         * ensure that we won't force and close on the same descriptor.
+         */
+        void close()
+            throws IOException {
+
+            IOException firstException = null;
+            if (endOfLogRWFile != null) {
+                RandomAccessFile file = endOfLogRWFile;
+
+                /*
+                 * Null out so that other threads know endOfLogRWFile is no
+                 * longer available.
+                 */
+                endOfLogRWFile = null;
+                try {
+                    file.close();
+                } catch (IOException e) {
+                    /* Save this exception, so we can try the second close. */
+                    firstException = e;
+                }
+            }
+            synchronized (fsyncFileSynchronizer) {
+                if (endOfLogSyncFile != null) {
+                    RandomAccessFile file = endOfLogSyncFile;
+
+                    /*
+                     * Null out so that other threads know endOfLogSyncFile is
+                     * no longer available.
+                     */
+                    endOfLogSyncFile = null;
+                    file.close();
+                }
+
+                if (firstException != null) {
+                    throw firstException;
+                }
+            }
+        }
+    }
+
+    /*
+     * Generate IOExceptions for testing.
+     */
+
+    /* Testing switch. public so others can read the value. */
+    public static final boolean RUNRECOVERY_EXCEPTION_TESTING;
+    private static String RRET_PROPERTY_NAME = "je.run.recovery.testing";
+
+    static {
+        RUNRECOVERY_EXCEPTION_TESTING =
+            (System.getProperty(RRET_PROPERTY_NAME) != null);
+    }
+
+    /* Max write counter value. */
+    private static final int RUNRECOVERY_EXCEPTION_MAX = 100;
+    /* Current write counter value. */
+    private int runRecoveryExceptionCounter = 0;
+    /* Whether an exception has been thrown. */
+    private boolean runRecoveryExceptionThrown = false;
+    /* Random number generator. */
+    private Random runRecoveryExceptionRandom = null;
+
+    private void generateRunRecoveryException(RandomAccessFile file,
+                                              ByteBuffer data,
+                                              long destOffset,
+                                              long fileNum)
+        throws DatabaseException, IOException {
+
+        if (runRecoveryExceptionThrown) {
+            try {
+                throw new Exception("Write after RunRecoveryException");
+            } catch (Exception e) {
+                e.printStackTrace();
+            }
+        }
+        runRecoveryExceptionCounter += 1;
+        if (runRecoveryExceptionCounter >= RUNRECOVERY_EXCEPTION_MAX) {
+            runRecoveryExceptionCounter = 0;
+        }
+        if (runRecoveryExceptionRandom == null) {
+            runRecoveryExceptionRandom = new Random(System.currentTimeMillis());
+        }
+        if (runRecoveryExceptionCounter ==
+            runRecoveryExceptionRandom.nextInt(RUNRECOVERY_EXCEPTION_MAX)) {
+            int len = runRecoveryExceptionRandom.nextInt(data.remaining());
+            if (len > 0) {
+                byte[] a = new byte[len];
+                data.get(a, 0, len);
+                ByteBuffer buf = ByteBuffer.wrap(a);
+                writeToFile(file, buf, destOffset, fileNum);
+            }
+            runRecoveryExceptionThrown = true;
+                throw new RunRecoveryException
+                    (envImpl, "Randomly generated for testing");
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/log/FileReader.java b/src/com/sleepycat/je/log/FileReader.java
new file mode 100644
index 0000000000000000000000000000000000000000..aa916fa9d0084fdc626c32221023b4a329520af7
--- /dev/null
+++ b/src/com/sleepycat/je/log/FileReader.java
@@ -0,0 +1,892 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: FileReader.java,v 1.119.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.IOException;
+import java.nio.Buffer;
+import java.nio.ByteBuffer;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DbConfigManager;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.Tracer;
+
+/**
+ * A FileReader is an abstract class that traverses the log files, reading in
+ * chunks of the file at a time. Concrete subclasses perform a particular
+ * action to each entry.
+ */
+public abstract class FileReader {
+
+    protected EnvironmentImpl envImpl;
+    protected FileManager fileManager;
+
+    /* Buffering reads */
+    private ByteBuffer readBuffer;   // buffer for reading from the file
+    private ByteBuffer saveBuffer;   // for piecing together data
+    private int maxReadBufferSize;   // read buffer can't grow larger than this
+
+    /* Managing the buffer reads */
+    private boolean singleFile;      // if true, do not read across files
+    protected boolean eof;           // true if at end of the log.
+                                     // TODO, assess whether this is redundant 
+                                     // with the EOFException, and could be
+                                     // streamlined.
+    private boolean forward;         // if true, we're reading forward
+
+    /*
+     * ReadBufferFileNum, readBufferFileStart and readBufferFileEnd indicate
+     * how the read buffer maps to the file. For example, if the read buffer
+     * size is 100 and the read buffer was filled from file 9, starting at byte
+     * 100, then
+     *          readBufferFileNum = 9
+     *          readBufferFileStart = 100
+     *          readBufferFileEnd = 200
+     */
+    protected long readBufferFileNum;  // file number we're pointing to
+    protected long readBufferFileStart;// file position that maps to buf start
+    protected long readBufferFileEnd;  // file position that maps to buf end
+
+    /* stats */
+    private int nRead;           // num entries we've seen
+
+    /*
+     * The number of times we've tried to read in a log entry that was too
+     * large for the read buffer.
+     */
+    private long nRepeatIteratorReads;
+
+    /* Number of reads since the last time getAndResetNReads was called. */
+    private int nReadOperations;
+
+    /* The log entry header for the entry that was just read. */
+    protected LogEntryHeader currentEntryHeader;
+
+    /*
+     * In general, currentEntryPrevOffset is the same as
+     * currentEntryHeader.getPrevOffset(), but it's initialized and used before
+     * a header is read.
+     */
+    protected long currentEntryPrevOffset;
+
+    /*
+     * nextEntryOffset is used to set the currentEntryOffset after we've read
+     * an entry.
+     */
+    protected long currentEntryOffset;
+    protected long nextEntryOffset;
+    protected long startLsn;  // We start reading from this LSN.
+    private long finishLsn; // If going backwards, read up to this LSN.
+
+    /* For checking checksum on the read. */
+    protected ChecksumValidator cksumValidator;
+    private boolean doValidateChecksum;     // Validate checksums
+    private boolean alwaysValidateChecksum; // Validate for all entry types
+
+    /* True if this is the scavenger and we are expecting checksum issues. */
+    protected boolean anticipateChecksumErrors;
+
+    /**
+     * A FileReader just needs to know what size chunks to read in.
+     * @param endOfFileLsn indicates the end of the log file
+     */
+    public FileReader(EnvironmentImpl envImpl,
+                      int readBufferSize,
+                      boolean forward,
+                      long startLsn,
+                      Long singleFileNumber,
+                      long endOfFileLsn,
+                      long finishLsn)
+        throws IOException, DatabaseException {
+
+        this.envImpl = envImpl;
+        this.fileManager = envImpl.getFileManager();
+        this.doValidateChecksum = envImpl.getLogManager().getChecksumOnRead();
+
+        /* Allocate a read buffer. */
+        this.singleFile = (singleFileNumber != null);
+        this.forward = forward;
+
+        readBuffer = ByteBuffer.allocate(readBufferSize);
+        threadSafeBufferFlip(readBuffer);
+        saveBuffer = ByteBuffer.allocate(readBufferSize);
+
+        DbConfigManager configManager = envImpl.getConfigManager();
+        maxReadBufferSize =
+	    configManager.getInt(EnvironmentParams.LOG_ITERATOR_MAX_SIZE);
+
+        /* Determine the starting position. */
+        this.startLsn = startLsn;
+        this.finishLsn = finishLsn;
+        initStartingPosition(endOfFileLsn, singleFileNumber);
+
+        /* stats */
+        nRead = 0;
+        if (doValidateChecksum) {
+            cksumValidator = new ChecksumValidator();
+        }
+	anticipateChecksumErrors = false;
+    }
+
+    /**
+     * Helper for determining the starting position and opening up a file at
+     * the desired location.
+     */
+    protected void initStartingPosition(long endOfFileLsn,
+					Long ignoreSingleFileNumber)
+        throws IOException, DatabaseException {
+
+        eof = false;
+        if (forward) {
+
+            /*
+             * Start off at the startLsn. If that's null, start at the
+             * beginning of the log. If there are no log files, set eof.
+             */
+            if (startLsn != DbLsn.NULL_LSN) {
+                readBufferFileNum = DbLsn.getFileNumber(startLsn);
+                readBufferFileEnd = DbLsn.getFileOffset(startLsn);
+            } else {
+                Long firstNum = fileManager.getFirstFileNum();
+                if (firstNum == null) {
+                    eof = true;
+                } else {
+                    readBufferFileNum = firstNum.longValue();
+                    readBufferFileEnd = 0;
+                }
+            }
+
+            /*
+             * After we read the first entry, the currentEntry will point here.
+             */
+            nextEntryOffset = readBufferFileEnd;
+        } else {
+
+            /*
+             * Make the read buffer look like it's positioned off the end of
+             * the file. Initialize the first LSN we want to read. When
+             * traversing the log backwards, we always start at the very end.
+             */
+            assert startLsn != DbLsn.NULL_LSN;
+            readBufferFileNum = DbLsn.getFileNumber(endOfFileLsn);
+            readBufferFileStart = DbLsn.getFileOffset(endOfFileLsn);
+            readBufferFileEnd = readBufferFileStart;
+
+            /*
+             * currentEntryPrevOffset points to the entry we want to start out
+             * reading when going backwards. If it's 0, the entry we want to
+             * read is in a different file.
+             */
+            if (DbLsn.getFileNumber(startLsn) ==
+		DbLsn.getFileNumber(endOfFileLsn)) {
+                currentEntryPrevOffset = DbLsn.getFileOffset(startLsn);
+            } else {
+                currentEntryPrevOffset = 0;
+            }
+            currentEntryOffset = DbLsn.getFileOffset(endOfFileLsn);
+        }
+    }
+
+    /**
+     * Whether to always validate the checksum, even for non-target entries.
+     */
+    public void setAlwaysValidateChecksum(boolean validate) {
+        alwaysValidateChecksum = validate;
+    }
+
+    /**
+     * @return the number of entries processed by this reader.
+     */
+    public int getNumRead() {
+        return nRead;
+    }
+
+    public long getNRepeatIteratorReads() {
+        return nRepeatIteratorReads;
+    }
+
+    /**
+     * Get LSN of the last entry read.
+     */
+    public long getLastLsn() {
+        return DbLsn.makeLsn(readBufferFileNum, currentEntryOffset);
+    }
+
+    /**
+     * Returns the total size (including header) of the last entry read.
+     */
+    public int getLastEntrySize() {
+        return currentEntryHeader.getSize() + currentEntryHeader.getItemSize();
+    }
+
+    /**
+     * readNextEntry scans the log files until either it's reached the end of
+     * the log or has hit an invalid portion. It then returns false.
+     *
+     * @return true if an element has been read
+     */
+    public boolean readNextEntry()
+        throws DatabaseException, IOException {
+
+        boolean foundEntry = false;
+        try {
+            while ((!eof) && (!foundEntry)) {
+
+                /* Read the invariant portion of the next header. */
+                getLogEntryInReadBuffer();
+                ByteBuffer dataBuffer =
+                    readData(LogEntryHeader.MIN_HEADER_SIZE,
+                             true); // collectData
+
+                readBasicHeader(dataBuffer);
+
+                boolean isTarget = isTargetEntry();
+                boolean doValidate = doValidateChecksum &&
+                    (isTarget || alwaysValidateChecksum);
+                boolean collectData = doValidate || isTarget;
+
+                /*
+                 * Init the checksum w/the invariant portion of the header.
+                 * This has to be done before we read the variable portion of
+                 * the header, because readData() only guarantees that it
+                 * returns a dataBuffer that contains the next bytes that are
+                 * needed, and has no guarantee that it holds any bytes that
+                 * were previously read.. The act of calling readData() to
+                 * obtain the optional portion may reset the dataBuffer, and
+                 * nudge the invariant part of the header out of the buffer
+                 * returned by readData()
+                 */
+                if (doValidate) {
+                    startChecksum(dataBuffer);
+                }
+
+                if (currentEntryHeader.getReplicated()) {
+                    int optionalPortionLen =
+                        currentEntryHeader.getVariablePortionSize();
+                    /* Load the optional part of the header into a buffer. */
+                    dataBuffer = readData(optionalPortionLen,
+                                          true);
+                    if (doValidate) {
+                        /*
+                         * Add to checksum while the buffer is positioned at
+                         * the start of the new bytes.
+                         */
+                        cksumValidator.update(envImpl,
+                                              dataBuffer,
+                                              optionalPortionLen,
+                                              anticipateChecksumErrors);
+                    }
+
+                    /* Now read the optional bytes. */
+                    currentEntryHeader.readVariablePortion(dataBuffer);
+                }
+
+                /*
+                 * Read in the body of the next entry. Note that even if this
+                 * isn't a targeted entry, we have to move the buffer position
+                 * along.
+                 */
+                dataBuffer = readData(currentEntryHeader.getItemSize(),
+                                      collectData);
+
+                /*
+                 * We've read an entry. Move up our offsets if we're moving
+                 * forward. If we're moving backwards, we set our offset before
+                 * we read the header, because we knew where the entry started.
+                 */
+                if (forward) {
+                    currentEntryOffset = nextEntryOffset;
+                    nextEntryOffset +=
+                        currentEntryHeader.getSize() +       // header size
+                        currentEntryHeader.getItemSize();    // item size
+                }
+
+                /* Validate the log entry checksum. */
+                if (doValidate) {
+                    validateChecksum(dataBuffer);
+                }
+
+                if (isTarget) {
+
+                    /*
+                     * For a target entry, call the subclass reader's
+                     * processEntry method to do whatever we need with the
+                     * entry.  It returns true if this entry is one that should
+                     * be returned.  Note that some entries, although targetted
+                     * and read, are not returned.
+                     */
+                    if (processEntry(dataBuffer)) {
+                        foundEntry = true;
+                        nRead++;
+                    }
+                } else if (collectData) {
+
+                    /*
+                     * For a non-target entry that was validated, the buffer is
+                     * positioned at the start of the entry; skip over it.
+                     */
+                    threadSafeBufferPosition
+                        (dataBuffer,
+                         threadSafeBufferPosition(dataBuffer) +
+                         currentEntryHeader.getItemSize());
+                }
+            }
+        } catch (EOFException e) {
+            eof = true;
+        } catch (DatabaseException e) {
+            eof = true;
+            /* Report on error. */
+            if (currentEntryHeader != null) {
+                LogEntryType problemType =
+                    LogEntryType.findType(currentEntryHeader.getType());
+                Tracer.trace(envImpl, "FileReader", "readNextEntry",
+                             "Halted log file reading at file 0x" +
+                             Long.toHexString(readBufferFileNum) +
+                             " offset 0x" +
+                             Long.toHexString(nextEntryOffset) +
+                             " offset(decimal)=" + nextEntryOffset +
+                             ":\nentry="+ problemType +
+                             "(typeNum=" + currentEntryHeader.getType() +
+                             ",version=" + currentEntryHeader.getVersion() +
+                             ")\nprev=0x" +
+                             Long.toHexString(currentEntryPrevOffset) +
+                             "\nsize=" + currentEntryHeader.getItemSize() +
+                             "\nNext entry should be at 0x" +
+                             Long.toHexString((nextEntryOffset +
+                                           currentEntryHeader.getSize() +
+                                           currentEntryHeader.getItemSize())) +
+                             "\n:", e);
+            } else {
+                Tracer.trace(envImpl, "FileReader", "readNextEntry",
+                             "Halted log file reading at file 0x" +
+                             Long.toHexString(readBufferFileNum) +
+                             " offset 0x" +
+                             Long.toHexString(nextEntryOffset) +
+                             " offset(decimal)=" + nextEntryOffset +
+                             " prev=0x" +
+                             Long.toHexString(currentEntryPrevOffset),
+                             e);
+            }
+            throw e;
+        }
+        return foundEntry;
+    }
+
+    protected boolean resyncReader(long nextGoodRecordPostCorruption,
+				   boolean dumpCorruptedBounds)
+	throws DatabaseException, IOException {
+
+	/* Resync not allowed for straight FileReader runs. */
+	return false;
+    }
+
+    /**
+     * Make sure that the start of the target log entry is in the header. This
+     * is a no-op if we're reading forwards
+     */
+    private void getLogEntryInReadBuffer()
+        throws IOException, DatabaseException, EOFException {
+
+        /*
+         * If we're going forward, because we read every byte sequentially,
+         * we're always sure the read buffer is positioned at the right spot.
+         * If we go backwards, we need to jump the buffer position.
+         */
+        if (!forward) {
+
+            /*
+             * currentEntryPrevOffset is the entry before the current entry.
+             * currentEntryOffset is the entry we just read (or the end of the
+             * file if we're starting out.
+             */
+            if ((currentEntryPrevOffset != 0) &&
+                (currentEntryPrevOffset >= readBufferFileStart)) {
+
+                /* The next log entry has passed the start LSN. */
+                long nextLsn = DbLsn.makeLsn(readBufferFileNum,
+					     currentEntryPrevOffset);
+                if (finishLsn != DbLsn.NULL_LSN) {
+                    if (DbLsn.compareTo(nextLsn, finishLsn) == -1) {
+                        throw new EOFException();
+                    }
+                }
+
+                /* This log entry starts in this buffer, just reposition. */
+		threadSafeBufferPosition(readBuffer,
+					 (int) (currentEntryPrevOffset -
+						readBufferFileStart));
+            } else {
+
+		/*
+		 * If the start of the log entry is not in this read buffer,
+		 * fill the buffer again. If the target log entry is in a
+		 * different file from the current read buffer file, just start
+		 * the read from the target LSN. If the target log entry is the
+		 * same file but the log entry is larger than the read chunk
+		 * size, also start the next read buffer from the target
+		 * LSN. Otherwise, try to position the next buffer chunk so the
+		 * target entry is held within the buffer, all the way at the
+		 * end.
+		 */
+                if (currentEntryPrevOffset == 0) {
+                    /* Go to another file. */
+                    currentEntryPrevOffset =
+                        fileManager.getFileHeaderPrevOffset(readBufferFileNum);
+                    Long prevFileNum =
+                        fileManager.getFollowingFileNum(readBufferFileNum,
+                                                        false);
+                    if (prevFileNum == null) {
+                        throw new EOFException();
+                    }
+                    if (readBufferFileNum - prevFileNum.longValue() != 1) {
+
+			if (!resyncReader(DbLsn.makeLsn
+					  (prevFileNum.longValue(),
+                       DbLsn.MAX_FILE_OFFSET),
+					  false)) {
+
+			    throw new DatabaseException
+				("Cannot read backward over cleaned file" +
+				 " from " + readBufferFileNum +
+				 " to " + prevFileNum);
+			}
+		    }
+                    readBufferFileNum = prevFileNum.longValue();
+                    readBufferFileStart = currentEntryPrevOffset;
+                } else if ((currentEntryOffset - currentEntryPrevOffset) >
+                           readBuffer.capacity()) {
+
+                    /*
+		     * The entry is in the same file, but is bigger than one
+		     * buffer.
+		     */
+                    readBufferFileStart = currentEntryPrevOffset;
+                } else {
+
+                    /* In same file, but not in this buffer. */
+                    long newPosition = currentEntryOffset -
+                        readBuffer.capacity();
+                    readBufferFileStart = (newPosition < 0) ? 0 : newPosition;
+                }
+
+                /* The next log entry has passed the start LSN. */
+                long nextLsn = DbLsn.makeLsn(readBufferFileNum,
+					     currentEntryPrevOffset);
+                if (finishLsn != DbLsn.NULL_LSN) {
+                    if (DbLsn.compareTo(nextLsn, finishLsn) == -1) {
+                        throw new EOFException();
+                    }
+                }
+
+                /*
+                 * Now that we've set readBufferFileNum and
+                 * readBufferFileStart, do the read.
+                 */
+                FileHandle fileHandle =
+                    fileManager.getFileHandle(readBufferFileNum);
+                try {
+                    readBuffer.clear();
+                    fileManager.readFromFile(fileHandle.getFile(), readBuffer,
+                                             readBufferFileStart,
+                                             fileHandle.getFileNum());
+                    nReadOperations += 1;
+
+		    assert EnvironmentImpl.maybeForceYield();
+                } finally {
+                    fileHandle.release();
+                }
+                readBufferFileEnd = readBufferFileStart +
+                    threadSafeBufferPosition(readBuffer);
+                threadSafeBufferFlip(readBuffer);
+		threadSafeBufferPosition(readBuffer,
+					 (int) (currentEntryPrevOffset -
+						readBufferFileStart));
+            }
+
+            /* The current entry will start at this offset. */
+            currentEntryOffset = currentEntryPrevOffset;
+        } else {
+
+	    /*
+	     * Going forward, and an end point has been specified.  Check if
+	     * we've gone past.
+	     */
+	    if (finishLsn != DbLsn.NULL_LSN) {
+		/* The next log entry has passed the end LSN. */
+		long nextLsn = DbLsn.makeLsn(readBufferFileNum,
+					     nextEntryOffset);
+		if (DbLsn.compareTo(nextLsn, finishLsn) >= 0) {
+		    throw new EOFException();
+		}
+	    }
+	}
+    }
+
+    /**
+     * Read the basic log entry header, leaving the buffer mark at the
+     * beginning of the checksummed header data.
+     */
+    private void readBasicHeader(ByteBuffer dataBuffer)
+        throws DatabaseException  {
+
+        /* Read the header for this entry. */
+        currentEntryHeader =
+            new LogEntryHeader(envImpl, dataBuffer, anticipateChecksumErrors);
+
+        /*
+         * currentEntryPrevOffset is a separate field, and is not obtained
+         * directly from the currentEntryHeader, because it is initialized and
+         * used before any log entry was read.
+         */
+        currentEntryPrevOffset = currentEntryHeader.getPrevOffset();
+    }
+
+    /**
+     * Reset the checksum validator and add the new header bytes. Assumes that
+     * the data buffer is positioned just past the end of the invariant
+     * portion of the log entry header.
+     */
+    private void startChecksum(ByteBuffer dataBuffer)
+        throws DatabaseException {
+
+        /* Clear out any previous data. */
+        cksumValidator.reset();
+
+        /*
+         * Move back up to the beginning of the portion of the log entry header
+         * covered by the checksum. That's everything after the checksum
+         * itself.
+         */
+        int originalPosition = threadSafeBufferPosition(dataBuffer);
+        int headerSizeMinusChecksum =
+            currentEntryHeader.getInvariantSizeMinusChecksum();
+        threadSafeBufferPosition(dataBuffer,
+                                 originalPosition-headerSizeMinusChecksum);
+        cksumValidator.update(envImpl,
+                              dataBuffer,
+                              headerSizeMinusChecksum,
+                              anticipateChecksumErrors);
+
+        /* Move the data buffer back to the original position. */
+        threadSafeBufferPosition(dataBuffer, originalPosition);
+    }
+
+    /**
+     * Add the entry bytes to the checksum and check the value.  This method
+     * must be called with the buffer positioned at the start of the entry.
+     */
+    private void validateChecksum(ByteBuffer dataBuffer)
+        throws DatabaseException {
+
+        cksumValidator.update(envImpl,
+                              dataBuffer,
+                              currentEntryHeader.getItemSize(),
+			      anticipateChecksumErrors);
+        cksumValidator.validate(envImpl,
+                                currentEntryHeader.getChecksum(),
+				readBufferFileNum,
+                                currentEntryOffset,
+				anticipateChecksumErrors);
+    }
+
+    /**
+     * Try to read a specified number of bytes.
+     * @param amountToRead is the number of bytes we need
+     * @param collectData is true if we need to actually look at the data.
+     *  If false, we know we're skipping this entry, and all we need to
+     *  do is to count until we get to the right spot.
+     * @return a byte buffer positioned at the head of the desired portion,
+     * or null if we reached eof.
+     */
+    private ByteBuffer readData(int amountToRead, boolean collectData)
+        throws IOException, DatabaseException, EOFException {
+
+        int alreadyRead = 0;
+        ByteBuffer completeBuffer = null;
+        saveBuffer.clear();
+
+        while ((alreadyRead < amountToRead) && !eof) {
+
+            int bytesNeeded = amountToRead - alreadyRead;
+            if (readBuffer.hasRemaining()) {
+
+                /* There's data in the read buffer, process it. */
+                if (collectData) {
+
+                    /*
+                     * Save data in a buffer for processing.
+                     */
+                    if ((alreadyRead > 0) ||
+                        (readBuffer.remaining() < bytesNeeded)) {
+
+                        /* We need to piece an entry together. */
+                        copyToSaveBuffer(bytesNeeded);
+                        alreadyRead = threadSafeBufferPosition(saveBuffer);
+                        completeBuffer = saveBuffer;
+                    } else {
+
+                        /* A complete entry is available in this buffer. */
+                        completeBuffer = readBuffer;
+                        alreadyRead = amountToRead;
+                    }
+                } else {
+
+                    /*
+                     * No need to save data, just move buffer positions.
+                     */
+                    int positionIncrement =
+                        (readBuffer.remaining() > bytesNeeded) ?
+                        bytesNeeded : readBuffer.remaining();
+
+                    alreadyRead += positionIncrement;
+		    threadSafeBufferPosition
+			(readBuffer,
+			 threadSafeBufferPosition(readBuffer) +
+			 positionIncrement);
+                    completeBuffer = readBuffer;
+                }
+            } else {
+
+                /*
+                 * Look for more data.
+                 */
+                fillReadBuffer(bytesNeeded);
+            }
+        }
+
+        /* Flip the save buffer just in case we've been accumulating in it. */
+        threadSafeBufferFlip(saveBuffer);
+
+        return completeBuffer;
+    }
+
+    /**
+     * Change the read buffer size if we start hitting large log entries so we
+     * don't get into an expensive cycle of multiple reads and piecing together
+     * of log entries.
+     */
+    private void adjustReadBufferSize(int amountToRead) {
+        int readBufferSize = readBuffer.capacity();
+        /* We need to read something larger than the current buffer size. */
+        if (amountToRead > readBufferSize) {
+            /* We're not at the max yet. */
+            if (readBufferSize < maxReadBufferSize) {
+
+                /*
+                 * Make the buffer the minimum of amountToRead or a
+                 * maxReadBufferSize.
+                 */
+                if (amountToRead < maxReadBufferSize) {
+                    readBufferSize = amountToRead;
+                    /* Make it a multiple of 1K. */
+                    int remainder = readBufferSize % 1024;
+                    readBufferSize += 1024 - remainder;
+                    readBufferSize = Math.min(readBufferSize,
+                                              maxReadBufferSize);
+                } else {
+                    readBufferSize = maxReadBufferSize;
+                }
+                readBuffer = ByteBuffer.allocate(readBufferSize);
+            }
+
+            if (amountToRead > readBuffer.capacity()) {
+                nRepeatIteratorReads++;
+            }
+        }
+    }
+
+    /**
+     * Copy the required number of bytes into the save buffer.
+     */
+    private void copyToSaveBuffer(int bytesNeeded) {
+        /* How much can we get from this current read buffer? */
+        int bytesFromThisBuffer;
+
+        if (bytesNeeded <= readBuffer.remaining()) {
+            bytesFromThisBuffer = bytesNeeded;
+        } else {
+            bytesFromThisBuffer = readBuffer.remaining();
+        }
+
+        /* Gather it all into this save buffer. */
+        ByteBuffer temp;
+
+        /* Make sure the save buffer is big enough. */
+        if (saveBuffer.capacity() - threadSafeBufferPosition(saveBuffer) <
+            bytesFromThisBuffer) {
+            /* Grow the save buffer. */
+            temp = ByteBuffer.allocate(saveBuffer.capacity() +
+                                          bytesFromThisBuffer);
+            threadSafeBufferFlip(saveBuffer);
+            temp.put(saveBuffer);
+            saveBuffer = temp;
+        }
+
+        /*
+         * Bulk copy only the required section from the read buffer into the
+         * save buffer. We need from readBuffer.position() to
+         * readBuffer.position() + bytesFromThisBuffer
+         */
+        temp = readBuffer.slice();
+        temp.limit(bytesFromThisBuffer);
+        saveBuffer.put(temp);
+	threadSafeBufferPosition(readBuffer,
+				 threadSafeBufferPosition(readBuffer) +
+				 bytesFromThisBuffer);
+    }
+
+    /**
+     * Fill up the read buffer with more data.
+     */
+    private void fillReadBuffer(int bytesNeeded)
+	throws DatabaseException, EOFException {
+
+        FileHandle fileHandle = null;
+        try {
+            adjustReadBufferSize(bytesNeeded);
+
+            /* Get a file handle to read in more log. */
+            fileHandle = fileManager.getFileHandle(readBufferFileNum);
+            boolean fileOk = false;
+
+            /*
+             * Check to see if we've come to the end of the file.  If so, get
+             * the next file.
+             */
+            if (readBufferFileEnd < fileHandle.getFile().length()) {
+                fileOk = true;
+            } else {
+                /* This file is done -- can we read in the next file? */
+                if (!singleFile) {
+                    Long nextFile =
+                        fileManager.getFollowingFileNum(readBufferFileNum,
+                                                        forward);
+                    if (nextFile != null) {
+                        readBufferFileNum = nextFile.longValue();
+                        fileHandle.release();
+                        fileHandle =
+                            fileManager.getFileHandle(readBufferFileNum);
+                        fileOk = true;
+                        readBufferFileEnd = 0;
+                        nextEntryOffset = 0;
+                    }
+                }
+            }
+
+            if (fileOk) {
+                readBuffer.clear();
+		fileManager.readFromFile(fileHandle.getFile(), readBuffer,
+                                         readBufferFileEnd,
+                                         fileHandle.getFileNum());
+                nReadOperations += 1;
+
+		assert EnvironmentImpl.maybeForceYield();
+
+                readBufferFileStart = readBufferFileEnd;
+                readBufferFileEnd =
+		    readBufferFileStart + threadSafeBufferPosition(readBuffer);
+                threadSafeBufferFlip(readBuffer);
+            } else {
+                throw new EOFException();
+            }
+        } catch (IOException e) {
+            e.printStackTrace();
+            throw new DatabaseException
+		("Problem in fillReadBuffer, readBufferFileNum = " +
+		 readBufferFileNum + ": " + e.getMessage());
+
+        } finally {
+            if (fileHandle != null) {
+                fileHandle.release();
+            }
+        }
+    }
+
+    /**
+     * Returns the number of reads since the last time this method was called.
+     */
+    public int getAndResetNReads() {
+        int tmp = nReadOperations;
+        nReadOperations = 0;
+        return tmp;
+    }
+
+    /**
+     * @return true if this reader should process this entry, or just
+     * skip over it.
+     */
+    protected boolean isTargetEntry()
+        throws DatabaseException {
+
+        return true;
+    }
+
+    /**
+     * Each file reader implements this method to process the entry data.
+     * @param enteryBuffer contains the entry data and is positioned at the
+     * data
+     * @return true if this entry should be returned
+     */
+    protected abstract boolean processEntry(ByteBuffer entryBuffer)
+        throws DatabaseException;
+
+    /**
+     * Never seen by user, used to indicate that the file reader should stop.
+     */
+    @SuppressWarnings("serial")
+    private static class EOFException extends Exception {
+    }
+
+    /**
+     * Note that we catch Exception here because it is possible that another
+     * thread is modifying the state of buffer simultaneously.  Specifically,
+     * this can happen if another thread is writing this log buffer out and it
+     * does (e.g.) a flip operation on it.  The actual mark/pos of the buffer
+     * may be caught in an unpredictable state.  We could add another latch to
+     * protect this buffer, but that's heavier weight than we need.  So the
+     * easiest thing to do is to just retry the duplicate operation.  See
+     * [#9822].
+     */
+    private Buffer threadSafeBufferFlip(ByteBuffer buffer) {
+	while (true) {
+	    try {
+		return buffer.flip();
+	    } catch (IllegalArgumentException IAE) {
+		continue;
+	    }
+	}
+    }
+
+    int threadSafeBufferPosition(ByteBuffer buffer) {
+	while (true) {
+	    try {
+		return buffer.position();
+	    } catch (IllegalArgumentException IAE) {
+		continue;
+	    }
+	}
+    }
+
+    Buffer threadSafeBufferPosition(ByteBuffer buffer,
+				    int newPosition) {
+        assert (newPosition >= 0) : "illegal new position=" + newPosition;
+	while (true) {
+	    try {
+		return buffer.position(newPosition);
+	    } catch (IllegalArgumentException IAE) {
+		if (newPosition > buffer.capacity()) {
+		    throw IAE;
+		}
+		continue;
+	    }
+	}
+    }
+}
diff --git a/src/com/sleepycat/je/log/FileSource.java b/src/com/sleepycat/je/log/FileSource.java
new file mode 100644
index 0000000000000000000000000000000000000000..95c7c66cf974de535dd05f03fc05aefec6dfa40f
--- /dev/null
+++ b/src/com/sleepycat/je/log/FileSource.java
@@ -0,0 +1,81 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: FileSource.java,v 1.37.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+
+/**
+ * FileSource is used as a channel to a log file when faulting in objects
+ * from the log.
+ */
+class FileSource implements LogSource {
+
+    private RandomAccessFile file;
+    private int readBufferSize;
+    private FileManager fileManager;
+    private long fileNum;
+
+    FileSource(RandomAccessFile file,
+	       int readBufferSize,
+	       FileManager fileManager,
+               long fileNum) {
+        this.file = file;
+        this.readBufferSize = readBufferSize;
+	this.fileManager = fileManager;
+        this.fileNum = fileNum;
+    }
+
+    /**
+     * @see LogSource#release
+     */
+    public void release()
+        throws DatabaseException {
+    }
+
+    /**
+     * @see LogSource#getBytes
+     */
+    public ByteBuffer getBytes(long fileOffset)
+        throws DatabaseException, IOException {
+
+        /* Fill up buffer from file. */
+        ByteBuffer destBuf = ByteBuffer.allocate(readBufferSize);
+        fileManager.readFromFile(file, destBuf, fileOffset, fileNum);
+
+	assert EnvironmentImpl.maybeForceYield();
+
+        destBuf.flip();
+        return destBuf;
+    }
+
+    /**
+     * @see LogSource#getBytes
+     */
+    public ByteBuffer getBytes(long fileOffset, int numBytes)
+        throws DatabaseException, IOException {
+
+        /* Fill up buffer from file. */
+        ByteBuffer destBuf = ByteBuffer.allocate(numBytes);
+        fileManager.readFromFile(file, destBuf, fileOffset, fileNum);
+
+	assert EnvironmentImpl.maybeForceYield();
+
+        destBuf.flip();
+
+        assert destBuf.remaining() >= numBytes:
+            "remaining=" + destBuf.remaining() +
+            " numBytes=" + numBytes;
+        return destBuf;
+    }
+}
diff --git a/src/com/sleepycat/je/log/INFileReader.java b/src/com/sleepycat/je/log/INFileReader.java
new file mode 100644
index 0000000000000000000000000000000000000000..326d175ced51208cb139db7e9335c4e63dde3ee8
--- /dev/null
+++ b/src/com/sleepycat/je/log/INFileReader.java
@@ -0,0 +1,715 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: INFileReader.java,v 1.72.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.cleaner.RecoveryUtilizationTracker;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.DbTree;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.entry.INContainingEntry;
+import com.sleepycat.je.log.entry.INLogEntry;
+import com.sleepycat.je.log.entry.LNLogEntry;
+import com.sleepycat.je.log.entry.LogEntry;
+import com.sleepycat.je.log.entry.NodeLogEntry;
+import com.sleepycat.je.tree.FileSummaryLN;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.tree.INDeleteInfo;
+import com.sleepycat.je.tree.INDupDeleteInfo;
+import com.sleepycat.je.tree.MapLN;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.FileMapper;
+import com.sleepycat.je.utilint.VLSN;
+
+/**
+ * INFileReader supports recovery by scanning log files during the IN rebuild
+ * pass. It looks for internal nodes (all types), segregated by whether they
+ * belong to the main tree or the duplicate trees.
+ *
+ * <p>This file reader can also be run in tracking mode to keep track of the
+ * maximum node id, database id and txn id seen so those sequences can be
+ * updated properly at recovery.  In this mode it also performs utilization
+ * counting.  It is only run once in tracking mode per recovery, in the
+ * first phase of recovery.</p>
+ */
+public class INFileReader extends FileReader {
+
+    /* Information about the last entry seen. */
+    private boolean lastEntryWasDelete;
+    private boolean lastEntryWasDupDelete;
+    private LogEntryType fromLogType;
+    private boolean isProvisional;
+
+    /*
+     * targetEntryMap maps DbLogEntryTypes to log entries. We use this
+     * collection to find the right LogEntry instance to read in the
+     * current entry.
+     */
+    private Map<LogEntryType, LogEntry> targetEntryMap;
+    private LogEntry targetLogEntry;
+
+    /*
+     * For tracking non-target log entries.
+     * Note that dbIdTrackingEntry and txnIdTrackingEntry do not overlap with
+     * targetLogEntry, since the former are LNs and the latter are INs.
+     * But nodeTrackingEntry and inTrackingEntry can overlap with the others,
+     * and we only load one of them when they do overlap.
+     */
+    private Map<LogEntryType, LogEntry> dbIdTrackingMap;
+    private LNLogEntry dbIdTrackingEntry;
+    private Map<LogEntryType, LogEntry> txnIdTrackingMap;
+    private LNLogEntry txnIdTrackingEntry;
+    private Map<LogEntryType, NodeLogEntry> otherNodeTrackingMap;
+    private NodeLogEntry nodeTrackingEntry;
+    private INLogEntry inTrackingEntry;
+    private LNLogEntry fsTrackingEntry;
+
+    /*
+     * If trackIds is true, peruse all node entries for the maximum
+     * node id, check all MapLNs for the maximum db id, and check all
+     * LNs for the maximum txn id
+     */
+    private boolean trackIds;
+    private long minReplicatedNodeId;
+    private long maxNodeId;
+    private int minReplicatedDbId;
+    private int maxDbId;
+    private long minReplicatedTxnId;
+    private long maxTxnId;
+    private boolean mapDbOnly;
+    private long ckptEnd;
+
+    /* Used for utilization tracking. */
+    private long partialCkptStart;
+    private RecoveryUtilizationTracker tracker;
+
+    /* Used for replication. */
+    private Map<Long,FileMapper> fileMappers;
+
+    /**
+     * Create this reader to start at a given LSN.
+     */
+    public INFileReader(EnvironmentImpl env,
+                        int readBufferSize,
+                        long startLsn,
+                        long finishLsn,
+                        boolean trackIds,
+                        boolean mapDbOnly,
+                        long partialCkptStart,
+                        long ckptEnd,
+                        RecoveryUtilizationTracker tracker)
+        throws IOException, DatabaseException {
+
+        super(env, readBufferSize, true, startLsn, null,
+              DbLsn.NULL_LSN, finishLsn);
+
+        this.trackIds = trackIds;
+        this.mapDbOnly = mapDbOnly;
+        this.ckptEnd = ckptEnd;
+        targetEntryMap = new HashMap<LogEntryType, LogEntry>();
+
+        if (trackIds) {
+            maxNodeId = 0;
+            maxDbId = 0;
+            maxTxnId = 0;
+            minReplicatedNodeId = 0;
+            minReplicatedDbId = DbTree.NEG_DB_ID_START;
+            minReplicatedTxnId = 0;
+            this.tracker = tracker;
+            this.partialCkptStart = partialCkptStart;
+
+            dbIdTrackingMap = new HashMap<LogEntryType, LogEntry>();
+            txnIdTrackingMap = new HashMap<LogEntryType, LogEntry>();
+            otherNodeTrackingMap = new HashMap<LogEntryType, NodeLogEntry>();
+
+            dbIdTrackingMap.put(LogEntryType.LOG_MAPLN_TRANSACTIONAL,
+                                LogEntryType.LOG_MAPLN_TRANSACTIONAL.
+                                getNewLogEntry());
+            dbIdTrackingMap.put(LogEntryType.LOG_MAPLN,
+                                LogEntryType.LOG_MAPLN.getNewLogEntry());
+            txnIdTrackingMap.put(LogEntryType.LOG_LN_TRANSACTIONAL,
+                                 LogEntryType.LOG_LN_TRANSACTIONAL.
+                                 getNewLogEntry());
+            txnIdTrackingMap.put(LogEntryType.LOG_MAPLN_TRANSACTIONAL,
+                                 LogEntryType.LOG_MAPLN_TRANSACTIONAL.
+                                 getNewLogEntry());
+            txnIdTrackingMap.put(LogEntryType.LOG_NAMELN_TRANSACTIONAL,
+                                 LogEntryType.LOG_NAMELN_TRANSACTIONAL.
+                                 getNewLogEntry());
+            txnIdTrackingMap.put(LogEntryType.LOG_DEL_DUPLN_TRANSACTIONAL,
+                                 LogEntryType.LOG_DEL_DUPLN_TRANSACTIONAL.
+                                 getNewLogEntry());
+            txnIdTrackingMap.put(LogEntryType.LOG_DUPCOUNTLN_TRANSACTIONAL,
+                                 LogEntryType.LOG_DUPCOUNTLN_TRANSACTIONAL.
+                                 getNewLogEntry());
+
+            fileMappers = new HashMap<Long,FileMapper>();
+        }
+    }
+
+    /**
+     * Configure this reader to target this kind of entry.
+     */
+    public void addTargetType(LogEntryType entryType)
+        throws DatabaseException {
+
+        targetEntryMap.put(entryType, entryType.getNewLogEntry());
+    }
+
+    /*
+     * Utilization Tracking
+     * --------------------
+     * This class counts all new log entries and obsolete INs.  Obsolete LNs,
+     * on the other hand, are counted by RecoveryManager undo/redo.
+     *
+     * Utilization counting is done in the first recovery pass where IDs are
+     * tracked (trackIds=true).  Processing is split between isTargetEntry
+     * and processEntry as follows.
+     *
+     * isTargetEntry counts only new non-node entries; this can be done very
+     * efficiently using only the LSN and entry type, without reading and
+     * unmarshalling the entry.  isTargetEntry also sets up several
+     * xxxTrackingEntry fields for utilization counting in processEntry.
+     *
+     * processEntry counts new node entries and obsolete INs.  processEntry is
+     * optimized to do a partial load (readEntry with readFullItem=false) of
+     * entries that are not the target entry and only need to be scanned for a
+     * transaction id, node id, or its owning database id.  In these cases it
+     * returns false, so that getNextEntry will not return a partially loaded
+     * entry to the RecoveryManager.  For example, a provisional IN will be
+     * partially loaded since only the node ID, database ID and obsolete LSN
+     * properties are needed for tracking.
+     *
+     * processEntry also resets (sets all counters to zero and clears obsolete
+     * offsets) the tracked summary for a file or database when a FileSummaryLN
+     * or MapLN is encountered.  This clears the totals that have accumulated
+     * during this recovery pass for entries prior to that point.  We only want
+     * to count utilization for entries after that point.
+     *
+     * In addition, when processEntry encounters a FileSummaryLN or MapLN, its
+     * LSN is recorded in the tracker.  This information is used during IN and
+     * LN utilization counting.  For each file, knowing the LSN of the last
+     * logged FileSummaryLN for that file allows the undo/redo code to know
+     * whether to perform obsolete countng.  If the LSN of the FileSummaryLN is
+     * less than (to the left of) the LN's LSN, obsolete counting should be
+     * performed.  If it is greater, obsolete counting is already included in
+     * the logged FileSummaryLN and should not be repeated to prevent double
+     * counting.  The same thing is true of counting per-database utilization
+     * relative to the LSN of the last logged MapLN.
+     */
+
+    /**
+     * If we're tracking node, database and txn ids, we want to see all node
+     * log entries. If not, we only want to see IN entries.
+     * @return true if this is an IN entry.
+     */
+    @Override
+    protected boolean isTargetEntry()
+        throws DatabaseException {
+
+        lastEntryWasDelete = false;
+        lastEntryWasDupDelete = false;
+        targetLogEntry = null;
+        dbIdTrackingEntry = null;
+        txnIdTrackingEntry = null;
+        nodeTrackingEntry = null;
+        inTrackingEntry = null;
+        fsTrackingEntry = null;
+        isProvisional = currentEntryHeader.getProvisional().isProvisional
+            (getLastLsn(), ckptEnd);
+
+        /* Get the log entry type instance we need to read the entry. */
+        fromLogType = LogEntryType.findType(currentEntryHeader.getType());
+        LogEntry possibleTarget = targetEntryMap.get(fromLogType);
+
+        /*
+         * If the entry is provisional, we won't be reading it in its entirety;
+         * otherwise, we try to establish targetLogEntry.
+         */
+        if (!isProvisional) {
+            targetLogEntry = possibleTarget;
+        }
+
+        /* Was the log entry an IN deletion? */
+        if (LogEntryType.LOG_IN_DELETE_INFO.equals(fromLogType)) {
+            lastEntryWasDelete = true;
+        }
+
+        if (LogEntryType.LOG_IN_DUPDELETE_INFO.equals(fromLogType)) {
+            lastEntryWasDupDelete = true;
+        }
+
+        if (trackIds) {
+
+            /*
+             * Check if it's a db or txn id tracking entry.  Note that these
+             * entries do not overlap with targetLogEntry.
+             */
+            if (!isProvisional) {
+                dbIdTrackingEntry = (LNLogEntry)
+                    dbIdTrackingMap.get(fromLogType);
+                txnIdTrackingEntry = (LNLogEntry)
+                    txnIdTrackingMap.get(fromLogType);
+            }
+
+            /*
+             * Determine nodeTrackingEntry, inTrackingEntry, fsTrackingEntry.
+             * Note that these entries do overlap with targetLogEntry.
+             */
+            if (fromLogType.isNodeType()) {
+                if (possibleTarget != null) {
+                    nodeTrackingEntry = (NodeLogEntry) possibleTarget;
+                } else if (dbIdTrackingEntry != null) {
+                    nodeTrackingEntry = dbIdTrackingEntry;
+                } else if (txnIdTrackingEntry != null) {
+                    nodeTrackingEntry = txnIdTrackingEntry;
+                } else {
+                    nodeTrackingEntry = otherNodeTrackingMap.get(fromLogType);
+                    if (nodeTrackingEntry == null) {
+                        nodeTrackingEntry = (NodeLogEntry)
+                            fromLogType.getNewLogEntry();
+                        otherNodeTrackingMap.put(fromLogType,
+                                                 nodeTrackingEntry);
+                    }
+                }
+                if (nodeTrackingEntry instanceof INLogEntry) {
+                    inTrackingEntry = (INLogEntry) nodeTrackingEntry;
+                }
+                if (LogEntryType.LOG_FILESUMMARYLN.equals(fromLogType)) {
+                    fsTrackingEntry = (LNLogEntry) nodeTrackingEntry;
+                }
+            } else {
+
+                /*
+                 * Count all non-node entries except for the file header as
+                 * new.  UtilizationTracker does not count the file header.
+                 * Node entries will be counted in processEntry.  Null is
+                 * passed for the database ID; it is only needed for node
+                 * entries.
+                 */
+                if (!LogEntryType.LOG_FILE_HEADER.equals(fromLogType)) {
+                    tracker.countNewLogEntry(getLastLsn(), fromLogType,
+                                             currentEntryHeader.getSize() +
+                                             currentEntryHeader.getItemSize(),
+                                             null); // DatabaseId
+                }
+
+                /*
+                 * When the Root is encountered, reset the tracked summary for
+                 * the ID and Name mapping DBs.  This clears what we
+                 * accummulated previously for these databases during this
+                 * recovery pass.  Save the LSN for these databases for use by
+                 * undo/redo.
+                 */
+                if (LogEntryType.LOG_ROOT.equals(fromLogType)) {
+                    tracker.saveLastLoggedMapLN(DbTree.ID_DB_ID,
+                                                getLastLsn());
+                    tracker.saveLastLoggedMapLN(DbTree.NAME_DB_ID,
+                                                getLastLsn());
+                    tracker.resetDbInfo(DbTree.ID_DB_ID);
+                    tracker.resetDbInfo(DbTree.NAME_DB_ID);
+                }
+            }
+
+            /*
+             * Return true if this entry should be passed on to processEntry.
+             * If we're tracking ids, return if this is a targeted entry
+             * or if it's any kind of tracked entry or node. If it's a
+             * replicated log entry, we'll want to track the VLSN in
+             * the optional portion of the header. We don't need a
+             * tracking log entry to do that, but we can only do it
+             * when the log entry header has been fully read, which is
+             * not true yet.
+             */
+            return (targetLogEntry != null) ||
+                (dbIdTrackingEntry != null) ||
+                (txnIdTrackingEntry != null) ||
+                (nodeTrackingEntry != null) ||
+                currentEntryHeader.getReplicated();
+        } else {
+
+            /*
+             * Return true if this entry should be passed on to processEntry.
+             * If we're not tracking ids, only return true if it's a targeted
+             * entry.
+             */
+            return (targetLogEntry != null);
+        }
+    }
+
+    /**
+     * Keep track of any VLSN mappings seen. We need to do this without
+     * checking if the environment is replicated, because this is done before
+     * the environment is set as replicated or not.  If this is expensive, we
+     * can instead indicate whether the environment is opening for replication
+     * before the ReplicatorImpl is created.
+     */
+    private void trackVLSNMappings() {
+
+        if (currentEntryHeader.getReplicated()) {
+
+            /*
+             * The VLSN is stored in the entry header, and we know the LSN.
+             * Store this mapping.
+             */
+            VLSN vlsn = currentEntryHeader.getVLSN();
+            long lsn = getLastLsn();
+            long fileNumber = DbLsn.getFileNumber(lsn);
+            FileMapper mapper = fileMappers.get(fileNumber);
+            if (mapper == null) {
+                mapper = new FileMapper(fileNumber);
+                fileMappers.put(fileNumber, mapper);
+            }
+            mapper.putLSN(vlsn.getSequence(), lsn, 
+                          LogEntryType.findType(currentEntryHeader.getType()));
+        }
+    }
+
+    /**
+     * This reader returns non-provisional INs and IN delete entries.
+     * In tracking mode, it may also scan log entries that aren't returned:
+     *  -to set the sequences for txn, node, and database id.
+     *  -to update utilization and obsolete offset information.
+     *  -for VLSN mappings for recovery
+     */
+    protected boolean processEntry(ByteBuffer entryBuffer)
+        throws DatabaseException {
+
+        boolean useEntry = false;
+        boolean entryLoaded = false;
+
+        /* If this is a targetted entry, read the entire log entry. */
+        if (targetLogEntry != null) {
+            targetLogEntry.readEntry(currentEntryHeader,
+                                     entryBuffer,
+                                     true); // readFullItem
+            entryLoaded = true;
+            DatabaseId dbId = getDatabaseId();
+            boolean isMapDb = dbId.equals(DbTree.ID_DB_ID);
+            useEntry = (!mapDbOnly || isMapDb);
+
+        }
+
+        /* Do a partial load during tracking if necessary. */
+        if (trackIds) {
+
+            DatabaseId dbIdToReset = null;
+            long fileNumToReset = -1;
+
+            /*
+             * Process db and txn id tracking entries.  Note that these entries
+             * do not overlap with targetLogEntry.
+             */
+            LNLogEntry lnEntry = null;
+            if (dbIdTrackingEntry != null) {
+                /* This entry has a db id */
+                lnEntry = dbIdTrackingEntry;
+
+                /* 
+                 * Do a full load to get DB ID from DatabaseImpl. Note that
+                 * while a partial read gets the database id for the database
+                 * that owns this LN, it doesn't get the database id for the
+                 * database contained by a MapLN. That's what we're trying to
+                 * track. 
+                 */
+                lnEntry.readEntry(currentEntryHeader,
+                                  entryBuffer,
+                                  true); // readFullItem
+                entryLoaded = true;
+                MapLN mapLN = (MapLN) lnEntry.getMainItem();
+                DatabaseId dbId = mapLN.getDatabase().getId();
+                int dbIdVal = dbId.getId();
+                maxDbId = (dbIdVal > maxDbId) ? dbIdVal : maxDbId;
+                minReplicatedDbId = (dbIdVal < minReplicatedDbId) ?
+                    dbIdVal : minReplicatedDbId;
+
+                /*
+                 * When a MapLN is encountered, reset the tracked information
+                 * for that database.  This clears what we accummulated
+                 * previously for the database during this recovery pass.
+                 */
+                dbIdToReset = dbId;
+
+                /* Save the LSN of the MapLN for use by undo/redo. */
+                tracker.saveLastLoggedMapLN(dbId, getLastLsn());
+            }
+
+            if (txnIdTrackingEntry != null) {
+                /* This entry has a txn id */
+                if (lnEntry == null) {
+                    /* Do a partial load since we only need the txn ID. */
+                    lnEntry = txnIdTrackingEntry;
+                    lnEntry.readEntry(currentEntryHeader,
+                                      entryBuffer,
+                                      false); // readFullItem
+                    entryLoaded = true;
+                }
+                long txnId = lnEntry.getTxnId().longValue();
+                maxTxnId = (txnId > maxTxnId) ? txnId : maxTxnId;
+                minReplicatedTxnId = (txnId < minReplicatedTxnId) ?
+                    txnId : minReplicatedTxnId;
+            }
+
+            /*
+             * Perform utilization counting under trackIds to prevent
+             * double-counting.
+             */
+            if (fsTrackingEntry != null) {
+
+                if (!entryLoaded) {
+                    /* Do full load to get file number from FileSummaryLN. */
+                    nodeTrackingEntry.readEntry(currentEntryHeader,
+                                                entryBuffer,
+                                                true); // readFullItem
+                    entryLoaded = true;
+                }
+
+                /*
+                 * When a FileSummaryLN is encountered, reset the tracked
+                 * summary for that file.  This clears what we accummulated
+                 * previously for the file during this recovery pass.
+                 */
+                byte[] keyBytes = fsTrackingEntry.getKey();
+                FileSummaryLN fsln =
+                    (FileSummaryLN) fsTrackingEntry.getMainItem();
+                long fileNum = fsln.getFileNumber(keyBytes);
+                fileNumToReset = fileNum;
+
+                /* Save the LSN of the FileSummaryLN for use by undo/redo. */
+                tracker.saveLastLoggedFileSummaryLN(fileNum, getLastLsn());
+
+                /*
+                 * Do not cache the file summary in the UtilizationProfile
+                 * here, since it may be for a deleted log file. [#10395]
+                 */
+            }
+
+            /* Process the nodeTrackingEntry (and inTrackingEntry). */
+            if (nodeTrackingEntry != null) {
+                if (!entryLoaded) {
+                    /* Do a partial load; we only need the node and DB IDs. */
+                    nodeTrackingEntry.readEntry(currentEntryHeader,
+                                                entryBuffer,
+                                                false); // readFullItem
+                    entryLoaded = true;
+                }
+                /* Keep track of the largest node id seen. */
+                long nodeId = nodeTrackingEntry.getNodeId();
+                maxNodeId = (nodeId > maxNodeId) ? nodeId : maxNodeId;
+                minReplicatedNodeId = (nodeId < minReplicatedNodeId) ?
+                    nodeId : minReplicatedNodeId;
+
+                /*
+                 * Count node entries as new.  Non-node entries are counted in
+                 * isTargetEntry.
+                 */
+                tracker.countNewLogEntry(getLastLsn(), fromLogType,
+                                         currentEntryHeader.getSize() +
+                                         currentEntryHeader.getItemSize(),
+                                         nodeTrackingEntry.getDbId());
+            }
+
+            if (inTrackingEntry != null) {
+                assert entryLoaded : "All nodes should have been loaded";
+
+                /*
+                 * Count the obsolete LSN of the previous version, if available
+                 * and if not already counted.  Use inexact counting for two
+                 * reasons: 1) we don't always have the full LSN because
+                 * earlier log versions only had the file number, and 2) we
+                 * can't guarantee obsoleteness for provisional INs.
+                 */
+                long oldLsn = inTrackingEntry.getObsoleteLsn();
+                if (oldLsn != DbLsn.NULL_LSN) {
+                    long newLsn = getLastLsn();
+                    tracker.countObsoleteIfUncounted
+                        (oldLsn, newLsn, fromLogType, 0,
+                         inTrackingEntry.getDbId(),
+                         false); // countExact
+                }
+
+                /*
+                 * Count a provisional IN as obsolete if it follows
+                 * partialCkptStart.  It cannot have been already counted,
+                 * because provisional INs are not normally counted as
+                 * obsolete; they are only considered obsolete when they are
+                 * part of a partial checkpoint.
+                 *
+                 * Depending on the exact point at which the checkpoint was
+                 * aborted, this technique is not always accurate; therefore
+                 * inexact counting must be used.
+                 */
+                if (isProvisional && partialCkptStart != DbLsn.NULL_LSN) {
+                    oldLsn = getLastLsn();
+                    if (DbLsn.compareTo(partialCkptStart, oldLsn) < 0) {
+                        tracker.countObsoleteUnconditional
+                            (oldLsn, fromLogType, 0,
+                             inTrackingEntry.getDbId(),
+                             false); // countExact
+                    }
+                }
+            }
+
+            /*
+             * Reset file and database utilization info only after counting a
+             * new or obsolete node.  The MapLN itself is a node and will be
+             * counted as new above, and we must reset that count as well.
+             */
+            if (fileNumToReset != -1) {
+                tracker.resetFileInfo(fileNumToReset);
+            }
+            if (dbIdToReset != null) {
+                tracker.resetDbInfo(dbIdToReset);
+            }
+
+            /*
+             * Look for VLSNs in the log entry header. If this log entry
+             * was processed only to find its vlsn, entryBuffer was not
+             * advanced yet because we didn't need to use the rest of the
+             * entry. Position it to the end of the entry.
+             */
+            trackVLSNMappings();
+            if (!entryLoaded) {
+                int endPosition = threadSafeBufferPosition(entryBuffer) +
+                    currentEntryHeader.getItemSize();
+                threadSafeBufferPosition(entryBuffer, endPosition);
+            }
+        }
+
+        /* Return true if this entry should be processed */
+        return useEntry;
+    }
+
+    /**
+     * Get the last IN seen by the reader.
+     */
+    public IN getIN()
+        throws DatabaseException {
+
+        return ((INContainingEntry) targetLogEntry).getIN(envImpl);
+    }
+
+    /**
+     * Get the last databaseId seen by the reader.
+     */
+    public DatabaseId getDatabaseId() {
+        if (lastEntryWasDelete) {
+            return ((INDeleteInfo) targetLogEntry.getMainItem()).
+                getDatabaseId();
+        } else if (lastEntryWasDupDelete) {
+            return ((INDupDeleteInfo) targetLogEntry.getMainItem()).
+                getDatabaseId();
+        } else {
+            return ((INContainingEntry) targetLogEntry).getDbId();
+        }
+    }
+
+    /**
+     * Get the maximum node id seen by the reader.
+     */
+    public long getMaxNodeId() {
+        return maxNodeId;
+    }
+    public long getMinReplicatedNodeId() {
+        return minReplicatedNodeId;
+    }
+
+    /**
+     * Get the maximum db id seen by the reader.
+     */
+    public int getMaxDbId() {
+        return maxDbId;
+    }
+    public int getMinReplicatedDbId() {
+        return minReplicatedDbId;
+    }
+
+    /**
+     * Get the maximum txn id seen by the reader.
+     */
+    public long getMaxTxnId() {
+        return maxTxnId;
+    }
+    public long getMinReplicatedTxnId() {
+        return minReplicatedTxnId;
+    }
+
+    /**
+     * @return true if the last entry was a delete info entry.
+     */
+    public boolean isDeleteInfo() {
+        return lastEntryWasDelete;
+    }
+
+    /**
+     * @return true if the last entry was a dup delete info entry.
+     */
+    public boolean isDupDeleteInfo() {
+        return lastEntryWasDupDelete;
+    }
+
+    /**
+     * Get the deleted node id stored in the last delete info log entry.
+     */
+    public long getDeletedNodeId() {
+        return ((INDeleteInfo)
+                targetLogEntry.getMainItem()).getDeletedNodeId();
+    }
+
+    /**
+     * Get the deleted id key stored in the last delete info log entry.
+     */
+    public byte[] getDeletedIdKey() {
+        return ((INDeleteInfo)
+                targetLogEntry.getMainItem()).getDeletedIdKey();
+    }
+
+    /**
+     * Get the deleted node id stored in the last delete info log entry.
+     */
+    public long getDupDeletedNodeId() {
+        return ((INDupDeleteInfo)
+                targetLogEntry.getMainItem()).getDeletedNodeId();
+    }
+
+    /**
+     * Get the deleted main key stored in the last delete info log entry.
+     */
+    public byte[] getDupDeletedMainKey() {
+        return ((INDupDeleteInfo)
+                targetLogEntry.getMainItem()).getDeletedMainKey();
+    }
+
+    /**
+     * Get the deleted main key stored in the last delete info log entry.
+     */
+    public byte[] getDupDeletedDupKey() {
+        return ((INDupDeleteInfo)
+                targetLogEntry.getMainItem()).getDeletedDupKey();
+    }
+
+    /**
+     * Get the LSN that should represent this IN. For most INs, it's the LSN
+     * that was just read. For BINDelta entries, it's the LSN of the last
+     * full version.
+     */
+    public long getLsnOfIN() {
+        return ((INContainingEntry) targetLogEntry).getLsnOfIN(getLastLsn());
+    }
+
+    public Collection<FileMapper> getFileMappers() {
+        return fileMappers.values();
+    }
+}
diff --git a/src/com/sleepycat/je/log/JEFileFilter.java b/src/com/sleepycat/je/log/JEFileFilter.java
new file mode 100644
index 0000000000000000000000000000000000000000..3339c31af8bba7066f650442deef6981d0e0301a
--- /dev/null
+++ b/src/com/sleepycat/je/log/JEFileFilter.java
@@ -0,0 +1,102 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: JEFileFilter.java,v 1.21.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.File;
+import java.io.FilenameFilter;
+import java.util.StringTokenizer;
+
+/**
+ * JEFileFilters are used for listing je files.
+ */
+class JEFileFilter implements FilenameFilter {
+    String[] suffix;
+    long minFileNumber = 0;
+    long maxFileNumber = -1;
+
+    JEFileFilter(String[] suffix) {
+        this.suffix = suffix;
+    }
+
+    /**
+     * @param maxFileNumber this filter will only return
+     * files that are numbers <= maxFileNumber.
+     */
+    JEFileFilter(String[] suffix, long maxFileNumber) {
+        this.suffix = suffix;
+        this.maxFileNumber = maxFileNumber;
+    }
+
+    /**
+     * @param minFileNumber this filter will only return files that are >=
+     * minFileNumber.
+     * @param maxFileNumber this filter will only return
+     * files that are numbers <= maxFileNumber.
+     */
+    JEFileFilter(String[] suffix, long minFileNumber, long maxFileNumber) {
+        this.suffix = suffix;
+        this.minFileNumber = minFileNumber;
+        this.maxFileNumber = maxFileNumber;
+    }
+
+    private boolean matches(String fileSuffix) {
+	for (int i = 0; i < suffix.length; i++) {
+	    if (fileSuffix.equalsIgnoreCase(suffix[i])) {
+		return true;
+	    }
+	}
+	return false;
+    }
+
+    /**
+     * A JE file has to be of the format nnnnnnnn.suffix.
+     */
+    public boolean accept(File dir, String name) {
+        boolean ok = false;
+        StringTokenizer tokenizer = new StringTokenizer(name, ".");
+        /* There should be two parts. */
+	int nTokens = tokenizer.countTokens();
+        if (nTokens == 2 || nTokens == 3) {
+	    boolean hasVersion = (nTokens == 3);
+            String fileNumber = tokenizer.nextToken();
+            String fileSuffix = "." + tokenizer.nextToken();
+	    String fileVersion = (hasVersion ? tokenizer.nextToken() : null);
+
+            /* Check the length and the suffix. */
+            if ((fileNumber.length() == 8) &&
+		matches(fileSuffix)) {
+                //(fileSuffix.equalsIgnoreCase(suffix))) {
+
+                /* The first part should be a number. */
+                try {
+                    long fileNum = Long.parseLong(fileNumber, 16);
+                    if (fileNum < minFileNumber) {
+                        ok = false;
+                    } else if ((fileNum <= maxFileNumber) ||
+                               (maxFileNumber == -1)) {
+                    ok = true;
+                    }
+                } catch (NumberFormatException e) {
+                    ok = false;
+                }
+		if (hasVersion) {
+		    try {
+			Integer.parseInt(fileVersion);
+			ok = true;
+		    } catch (NumberFormatException e) {
+			ok = false;
+		    }
+		}
+            }
+        }
+
+        return ok;
+    }
+}
+
diff --git a/src/com/sleepycat/je/log/LNFileReader.java b/src/com/sleepycat/je/log/LNFileReader.java
new file mode 100644
index 0000000000000000000000000000000000000000..8f246b0e8d4f691b92f27b3a0b91b09a70acefe0
--- /dev/null
+++ b/src/com/sleepycat/je/log/LNFileReader.java
@@ -0,0 +1,231 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LNFileReader.java,v 1.71.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.transaction.xa.Xid;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.entry.LNLogEntry;
+import com.sleepycat.je.log.entry.LogEntry;
+import com.sleepycat.je.tree.LN;
+import com.sleepycat.je.tree.MapLN;
+import com.sleepycat.je.txn.TxnAbort;
+import com.sleepycat.je.txn.TxnCommit;
+import com.sleepycat.je.txn.TxnPrepare;
+
+/**
+ * LNFileReader scans log files for LNs. Also, if it's going backwards for the
+ * undo phase in recovery, it reads transaction commit entries.
+ */
+public class LNFileReader extends FileReader {
+
+    /*
+     * targetEntryMap maps DbLogEntryTypes to log entries. We use this
+     * collection to find the right LogEntry instance to read in the current
+     * entry.
+     */
+    protected Map<LogEntryType, LogEntry> targetEntryMap;
+    protected LogEntry targetLogEntry;
+
+    private long ckptEnd;
+
+    /**
+     * Create this reader to start at a given LSN.
+     * @param env The relevant EnvironmentImpl
+     * @param readBufferSize buffer size in bytes for reading in log
+     * @param startLsn where to start in the log
+     * @param redo If true, we're going to go forward from
+     *             the start LSN to the end of the log. If false, we're going
+     *             backwards from the end of the log to the start LSN.
+     * @param finishLsn the last LSN to read in the log. May be null if we
+     *  want to read to the end of the log.
+     * @param endOfFileLsn the virtual LSN that marks the end of the log. (The
+     *  one off the end of the log). Only used if we're reading backwards.
+     *  Different from the startLsn because the startLsn tells us where the
+     *  beginning of the start entry is, but not the length/end of the start
+     *  entry. May be null if we're going foward.
+     */
+    public LNFileReader(EnvironmentImpl env,
+                        int readBufferSize,
+                        long startLsn,
+                        boolean redo,
+                        long endOfFileLsn,
+                        long finishLsn,
+			Long singleFileNum,
+                        long ckptEnd)
+        throws IOException, DatabaseException {
+
+        super(env, readBufferSize, redo, startLsn,
+              singleFileNum, endOfFileLsn, finishLsn);
+
+        this.ckptEnd = ckptEnd;
+        targetEntryMap = new HashMap<LogEntryType, LogEntry>();
+    }
+
+    public void addTargetType(LogEntryType entryType)
+        throws DatabaseException {
+
+        targetEntryMap.put(entryType, entryType.getNewLogEntry());
+    }
+
+    /**
+     * @return true if this is a transactional LN or Locker Commit entry.
+     */
+    @Override
+    protected boolean isTargetEntry() {
+
+        if (currentEntryHeader.getProvisional().isProvisional
+            (getLastLsn(), ckptEnd)) {
+            /* Skip provisionial entries */
+            targetLogEntry = null;
+        } else {
+            LogEntryType fromLogType =
+                new LogEntryType(currentEntryHeader.getType());
+
+            /* Is it a target entry? */
+            targetLogEntry = targetEntryMap.get(fromLogType);
+        }
+        return (targetLogEntry != null);
+    }
+
+    /**
+     * This reader instantiates an LN and key for every LN entry.
+     */
+    protected boolean processEntry(ByteBuffer entryBuffer)
+        throws DatabaseException {
+
+        targetLogEntry.readEntry
+            (currentEntryHeader, entryBuffer, true); // readFullItem
+        return true;
+    }
+
+    /**
+     * @return true if the last entry was an LN.
+     */
+    public boolean isLN() {
+        return (targetLogEntry instanceof LNLogEntry);
+    }
+
+    /**
+     * Get the last LN seen by the reader.
+     */
+    public LN getLN() {
+        return ((LNLogEntry) targetLogEntry).getLN();
+    }
+
+    /**
+     * Returns a MapLN if the LN is a MapLN, or null otherwise.
+     */
+    public MapLN getMapLN() {
+        LN ln = getLN();
+        if (ln instanceof MapLN) {
+            return (MapLN) getLN();
+        } else {
+            return null;
+        }
+    }
+
+    /**
+     * Get the last databaseId seen by the reader.
+     */
+    public DatabaseId getDatabaseId() {
+        return ((LNLogEntry) targetLogEntry).getDbId();
+    }
+
+    /**
+     * Get the last key seen by the reader.
+     */
+    public byte[] getKey() {
+        return ((LNLogEntry) targetLogEntry).getKey();
+    }
+
+    /**
+     * Get the last key seen by the reader.
+     */
+    public byte[] getDupTreeKey() {
+        return ((LNLogEntry) targetLogEntry).getDupKey();
+    }
+
+    /**
+     * @return the transaction id of the current entry.
+     */
+    public Long getTxnId() {
+        return ((LNLogEntry) targetLogEntry).getTxnId();
+    }
+
+    /*
+     * @return true if the last entry was a TxnPrepare record.
+     */
+    public boolean isPrepare() {
+	return (targetLogEntry.getMainItem() instanceof TxnPrepare);
+    }
+
+    /**
+     * Get the last txn prepare id seen by the reader.
+     */
+    public long getTxnPrepareId() {
+        return ((TxnPrepare) targetLogEntry.getMainItem()).getId();
+    }
+
+    /**
+     * Get the last txn prepare Xid seen by the reader.
+     */
+    public Xid getTxnPrepareXid() {
+        return ((TxnPrepare) targetLogEntry.getMainItem()).getXid();
+    }
+
+    /*
+     * @return true if the last entry was a TxnAbort record.
+     */
+    public boolean isAbort() {
+	return (targetLogEntry.getMainItem() instanceof TxnAbort);
+    }
+
+    /**
+     * Get the last txn abort id seen by the reader.
+     */
+    public long getTxnAbortId() {
+        return ((TxnAbort) targetLogEntry.getMainItem()).getId();
+    }
+
+    /**
+     * Get the last txn commit id seen by the reader.
+     */
+    public long getTxnCommitId() {
+        return ((TxnCommit) targetLogEntry.getMainItem()).getId();
+    }
+
+    /**
+     * Get node id of current LN.
+     */
+    public long getNodeId() {
+        return ((LNLogEntry) targetLogEntry).getLN().getNodeId();
+    }
+
+    /**
+     * Get last abort LSN seen by the reader (may be null).
+     */
+    public long getAbortLsn() {
+        return ((LNLogEntry) targetLogEntry).getAbortLsn();
+    }
+
+    /**
+     * Get last abort known deleted seen by the reader.
+     */
+    public boolean getAbortKnownDeleted() {
+        return ((LNLogEntry) targetLogEntry).getAbortKnownDeleted();
+    }
+}
diff --git a/src/com/sleepycat/je/log/LastFileReader.java b/src/com/sleepycat/je/log/LastFileReader.java
new file mode 100644
index 0000000000000000000000000000000000000000..84038385191f004ab553c87d5c29554e2c0324b0
--- /dev/null
+++ b/src/com/sleepycat/je/log/LastFileReader.java
@@ -0,0 +1,278 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LastFileReader.java,v 1.55.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.logging.Level;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.Tracer;
+
+/**
+ * LastFileReader traverses the last log file, doing checksums and looking for
+ * the end of the log. Different log types can be registered with it and it
+ * will remember the last occurrence of targetted entry types.
+ */
+public class LastFileReader extends FileReader {
+
+    /* Log entry types to track. */
+    private Set<LogEntryType> trackableEntries;
+
+    private long nextUnprovenOffset;
+    private long lastValidOffset;
+    private LogEntryType entryType;
+
+    /*
+     * Last lsn seen for tracked types. Key = LogEntryType, data is the offset
+     * (Long).
+     */
+    private Map<LogEntryType, Long> lastOffsetSeen;
+
+    /**
+     * This file reader is always positioned at the last file.
+     */
+    public LastFileReader(EnvironmentImpl env,
+                          int readBufferSize)
+        throws IOException, DatabaseException {
+
+        super(env, readBufferSize, true,  DbLsn.NULL_LSN, Long.valueOf(-1),
+	      DbLsn.NULL_LSN, DbLsn.NULL_LSN);
+
+        trackableEntries = new HashSet<LogEntryType>();
+        lastOffsetSeen = new HashMap<LogEntryType, Long>();
+
+        lastValidOffset = 0;
+	anticipateChecksumErrors = true;
+        nextUnprovenOffset = nextEntryOffset;
+    }
+
+    /**
+     * Ctor which allows passing in the file number we want to read to the end
+     * of.  This is used by the ScavengerFileReader when it encounters a bad
+     * log record in the middle of a file.
+     */
+    public LastFileReader(EnvironmentImpl env,
+                          int readBufferSize,
+			  Long specificFileNumber)
+        throws IOException, DatabaseException {
+
+        super(env, readBufferSize, true,  DbLsn.NULL_LSN,
+              specificFileNumber, DbLsn.NULL_LSN, DbLsn.NULL_LSN);
+
+        trackableEntries = new HashSet<LogEntryType>();
+        lastOffsetSeen = new HashMap<LogEntryType, Long>();
+
+        lastValidOffset = 0;
+	anticipateChecksumErrors = true;
+        nextUnprovenOffset = nextEntryOffset;
+    }
+
+    /**
+     * Override so that we always start at the last file.
+     */
+    @Override
+    protected void initStartingPosition(long endOfFileLsn,
+					Long singleFileNum)
+        throws IOException, DatabaseException {
+
+        eof = false;
+
+        /*
+         * Start at what seems like the last file. If it doesn't exist, we're
+         * done.
+         */
+        Long lastNum = ((singleFileNum != null) &&
+			(singleFileNum.longValue() >= 0)) ?
+	    singleFileNum :
+	    fileManager.getLastFileNum();
+        FileHandle fileHandle = null;
+        readBufferFileEnd = 0;
+
+        long fileLen = 0;
+        while ((fileHandle == null) && !eof) {
+            if (lastNum == null) {
+                eof = true;
+            } else {
+                try {
+                    readBufferFileNum = lastNum.longValue();
+                    fileHandle = fileManager.getFileHandle(readBufferFileNum);
+
+                    /*
+                     * Check the size of this file. If it opened successfully
+                     * but only held a header or is 0 length, backup to the
+                     * next "last" file unless this is the only file in the
+                     * log. Note that an incomplete header will end up throwing
+                     * a checksum exception, but a 0 length file will open
+                     * successfully in read only mode.
+                     */
+                    fileLen = fileHandle.getFile().length();
+                    if (fileLen <= FileManager.firstLogEntryOffset()) {
+                        lastNum = fileManager.getFollowingFileNum
+			    (lastNum.longValue(), false);
+                        if (lastNum != null) {
+                            fileHandle.release();
+                            fileHandle = null;
+                        }
+                    }
+                } catch (DatabaseException e) {
+                    lastNum = attemptToMoveBadFile(e);
+                    fileHandle = null;
+                } finally {
+                    if (fileHandle != null) {
+                        fileHandle.release();
+                    }
+                }
+            }
+        }
+
+        nextEntryOffset = 0;
+    }
+
+    /**
+     * Something is wrong with this file. If there is no data in this file (the
+     * header is <= the file header size) then move this last file aside and
+     * search the next "last" file. If the last file does have data in it,
+     * throw an exception back to the application, since we're not sure what to
+     * do now.
+     */
+    private Long attemptToMoveBadFile(DatabaseException origException)
+        throws DatabaseException, IOException {
+
+        String fileName = fileManager.getFullFileNames(readBufferFileNum)[0];
+        File problemFile = new File(fileName);
+        Long lastNum = null;
+
+        if (problemFile.length() <= FileManager.firstLogEntryOffset()) {
+            fileManager.clear(); // close all existing files
+            /* Move this file aside. */
+            lastNum = fileManager.getFollowingFileNum(readBufferFileNum,
+                                                      false);
+            fileManager.renameFile(readBufferFileNum,
+                                   FileManager.BAD_SUFFIX);
+
+        } else {
+            /* There's data in this file, throw up to the app. */
+            throw origException;
+        }
+        return lastNum;
+    }
+
+    public void setEndOfFile()
+        throws IOException, DatabaseException  {
+
+        fileManager.truncateLog(readBufferFileNum, nextUnprovenOffset);
+    }
+
+    /**
+     * @return The LSN to be used for the next log entry.
+     */
+    public long getEndOfLog() {
+        return DbLsn.makeLsn(readBufferFileNum, nextUnprovenOffset);
+    }
+
+    public long getLastValidLsn() {
+        return DbLsn.makeLsn(readBufferFileNum, lastValidOffset);
+    }
+
+    public long getPrevOffset() {
+        return lastValidOffset;
+    }
+
+    public  LogEntryType getEntryType() {
+        return entryType;
+    }
+
+    /**
+     * Tell the reader that we are interested in these kind of entries.
+     */
+    public void setTargetType(LogEntryType type) {
+        trackableEntries.add(type);
+    }
+
+    /**
+     * @return The last LSN seen in the log for this kind of entry, or null.
+     */
+    public long getLastSeen(LogEntryType type) {
+        Long typeNumber =lastOffsetSeen.get(type);
+        if (typeNumber != null) {
+            return DbLsn.makeLsn(readBufferFileNum, typeNumber.longValue());
+        } else {
+            return DbLsn.NULL_LSN;
+        }
+    }
+
+    /**
+     * Validate the checksum on each entry, see if we should remember the LSN
+     * of this entry.
+     */
+    protected boolean processEntry(ByteBuffer entryBuffer) {
+
+        /* Skip over the data, we're not doing anything with it. */
+        entryBuffer.position(entryBuffer.position() +
+                             currentEntryHeader.getItemSize());
+
+        /* If we're supposed to remember this lsn, record it. */
+        entryType = new LogEntryType(currentEntryHeader.getType());
+        if (trackableEntries.contains(entryType)) {
+            lastOffsetSeen.put(entryType, Long.valueOf(currentEntryOffset));
+        }
+
+        return true;
+    }
+
+    /**
+     * readNextEntry will stop at a bad entry.
+     * @return true if an element has been read.
+     */
+    @Override
+    public boolean readNextEntry()
+        throws DatabaseException, IOException {
+
+        boolean foundEntry = false;
+
+        try {
+
+            /*
+             * At this point,
+             *  currentEntryOffset is the entry we just read.
+             *  nextEntryOffset is the entry we're about to read.
+             *  currentEntryPrevOffset is 2 entries ago.
+             * Note that readNextEntry() moves all the offset pointers up.
+             */
+
+            foundEntry = super.readNextEntry();
+
+
+            /*
+             * Note that initStartingPosition() makes sure that the file header
+             * entry is valid.  So by the time we get to this method, we know
+             * we're at a file with a valid file header entry.
+             */
+            lastValidOffset = currentEntryOffset;
+            nextUnprovenOffset = nextEntryOffset;
+        } catch (DbChecksumException e) {
+            Tracer.trace(Level.INFO,
+                         envImpl, "Found checksum exception while searching " +
+                         " for end of log. Last valid entry is at " +
+                         DbLsn.toString
+			 (DbLsn.makeLsn(readBufferFileNum, lastValidOffset)) +
+                         " Bad entry is at " +
+                         DbLsn.makeLsn(readBufferFileNum, nextUnprovenOffset));
+        }
+        return foundEntry;
+    }
+}
diff --git a/src/com/sleepycat/je/log/LatchedLogManager.java b/src/com/sleepycat/je/log/LatchedLogManager.java
new file mode 100644
index 0000000000000000000000000000000000000000..dfcd19ae1c0f22904cff84c7f4ece2b171c23544
--- /dev/null
+++ b/src/com/sleepycat/je/log/LatchedLogManager.java
@@ -0,0 +1,175 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LatchedLogManager.java,v 1.28.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.IOException;
+import java.util.List;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.cleaner.LocalUtilizationTracker;
+import com.sleepycat.je.cleaner.TrackedFileSummary;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+
+/**
+ * The LatchedLogManager uses the latches to implement critical sections.
+ */
+public class LatchedLogManager extends LogManager {
+
+    /**
+     * There is a single log manager per database environment.
+     */
+    public LatchedLogManager(EnvironmentImpl envImpl,
+                             boolean readOnly)
+        throws DatabaseException {
+
+        super(envImpl, readOnly);
+    }
+
+    void serialLog(LogItem[] itemArray, LogContext context)
+        throws IOException, DatabaseException {
+
+        logWriteLatch.acquire();
+        try {
+            serialLogInternal(itemArray, context);
+        } finally {
+            logWriteLatch.release();
+        }
+    }
+
+    protected void flushInternal()
+        throws LogException, DatabaseException {
+
+        logWriteLatch.acquire();
+        try {
+            logBufferPool.writeBufferToFile(0);
+        } catch (IOException e) {
+            throw new LogException(e.getMessage());
+        } finally {
+            logWriteLatch.release();
+        }
+    }
+
+    /**
+     * @see LogManager#getUnflusableTrackedSummary
+     */
+    public TrackedFileSummary getUnflushableTrackedSummary(long file)
+        throws DatabaseException {
+
+        logWriteLatch.acquire();
+        try {
+            return getUnflushableTrackedSummaryInternal(file);
+        } finally {
+            logWriteLatch.release();
+        }
+    }
+
+    /**
+     * @see LogManager#removeTrackedFile
+     */
+    public void removeTrackedFile(TrackedFileSummary tfs)
+        throws DatabaseException {
+
+        logWriteLatch.acquire();
+        try {
+            removeTrackedFileInternal(tfs);
+        } finally {
+            logWriteLatch.release();
+        }
+    }
+
+    /**
+     * @see LogManager#countObsoleteLNs
+     */
+    public void countObsoleteNode(long lsn,
+                                  LogEntryType type,
+                                  int size,
+                                  DatabaseImpl nodeDb)
+        throws DatabaseException {
+
+        logWriteLatch.acquire();
+        try {
+            countObsoleteNodeInternal(lsn, type, size, nodeDb);
+        } finally {
+            logWriteLatch.release();
+        }
+    }
+
+    /**
+     * @see LogManager#transferToUtilizationTracker
+     */
+    public void transferToUtilizationTracker(LocalUtilizationTracker
+                                             localTracker)
+        throws DatabaseException {
+
+        logWriteLatch.acquire();
+        try {
+            transferToUtilizationTrackerInternal(localTracker);
+        } finally {
+            logWriteLatch.release();
+        }
+    }
+
+    /**
+     * @see LogManager#countObsoleteINs
+     */
+    public void countObsoleteINs(List<Long> lsnList, DatabaseImpl nodeDb)
+        throws DatabaseException {
+
+        logWriteLatch.acquire();
+        try {
+            countObsoleteINsInternal(lsnList, nodeDb);
+        } finally {
+            logWriteLatch.release();
+        }
+    }
+
+    /**
+     * @see LogManager#countObsoleteDb
+     */
+    public void countObsoleteDb(DatabaseImpl db)
+        throws DatabaseException {
+
+        logWriteLatch.acquire();
+        try {
+            countObsoleteDbInternal(db);
+        } finally {
+            logWriteLatch.release();
+        }
+    }
+
+    /**
+     * @see LogManager#removeDbFileSummary
+     */
+    public boolean removeDbFileSummary(DatabaseImpl db, Long fileNum)
+        throws DatabaseException {
+
+        logWriteLatch.acquire();
+        try {
+            return removeDbFileSummaryInternal(db, fileNum);
+        } finally {
+            logWriteLatch.release();
+        }
+    }
+
+    /**
+     * @see LogManager#loadEndOfLogStat
+     */
+    public void loadEndOfLogStat(EnvironmentStats stats)
+        throws DatabaseException {
+
+        logWriteLatch.acquire();
+        try {
+            loadEndOfLogStatInternal(stats);
+        } finally {
+            logWriteLatch.release();
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/log/LogBuffer.java b/src/com/sleepycat/je/log/LogBuffer.java
new file mode 100644
index 0000000000000000000000000000000000000000..22fd3b741b8f51dfb62d175eede0a0b67247e819
--- /dev/null
+++ b/src/com/sleepycat/je/log/LogBuffer.java
@@ -0,0 +1,234 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LogBuffer.java,v 1.47.2.3 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.nio.ByteBuffer;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.latch.Latch;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * DbLogBuffers hold outgoing, newly written log entries.
+ */
+class LogBuffer implements LogSource {
+
+    private static final String DEBUG_NAME = LogBuffer.class.getName();
+
+    /* Storage */
+    private ByteBuffer buffer;
+
+    /* Information about what log entries are held here. */
+    private long firstLsn;
+    private long lastLsn;
+
+    /* The read latch serializes access to and modification of the LSN info. */
+    private Latch readLatch;
+
+    /*
+     * Buffer may be rewritten because an IOException previously occurred.
+     */
+    private boolean rewriteAllowed;
+
+    LogBuffer(int capacity, EnvironmentImpl env)
+	throws DatabaseException {
+
+        buffer = ByteBuffer.allocate(capacity);
+        readLatch = new Latch(DEBUG_NAME);
+        reinit();
+    }
+
+    /*
+     * Used by LogManager for the case when we have a temporary buffer in hand
+     * and no LogBuffers in the LogBufferPool are large enough to hold the
+     * current entry being written.  We just wrap the temporary ByteBuffer
+     * in a LogBuffer and pass it to FileManager. [#12674].
+     */
+    LogBuffer(ByteBuffer buffer, long firstLsn)
+	throws DatabaseException {
+
+	this.buffer = buffer;
+        this.firstLsn = firstLsn;
+        this.lastLsn = firstLsn;
+	rewriteAllowed = false;
+    }
+
+    void reinit()
+	throws DatabaseException {
+
+        readLatch.acquire();
+        buffer.clear();
+        firstLsn = DbLsn.NULL_LSN;
+        lastLsn = DbLsn.NULL_LSN;
+	rewriteAllowed = false;
+        readLatch.release();
+    }
+
+    /*
+     * Write support
+     */
+
+    /**
+     * Return first LSN held in this buffer. Assumes the log write latch is
+     * held.
+     */
+    long getFirstLsn() {
+        return firstLsn;
+    }
+
+    /**
+     * This LSN has been written to the log.
+     */
+    void registerLsn(long lsn)
+	throws DatabaseException {
+
+        readLatch.acquire();
+	try {
+	    if (lastLsn != DbLsn.NULL_LSN) {
+		assert (DbLsn.compareTo(lsn, lastLsn) > 0):
+                    "lsn=" + lsn + " lastlsn=" + lastLsn;
+	    }
+	    lastLsn = lsn;
+	    if (firstLsn == DbLsn.NULL_LSN) {
+		firstLsn = lsn;
+	    }
+	} finally {
+	    readLatch.release();
+	}
+    }
+
+    /**
+     * Check capacity of buffer. Assumes that the log write latch is held.
+     * @return true if this buffer can hold this many more bytes.
+     */
+    boolean hasRoom(int numBytes) {
+        return (numBytes <= (buffer.capacity() - buffer.position()));
+    }
+
+    /**
+     * @return the actual data buffer.
+     */
+    ByteBuffer getDataBuffer() {
+        return buffer;
+    }
+
+    /**
+     * @return capacity in bytes
+     */
+    int getCapacity() {
+        return buffer.capacity();
+    }
+
+    /*
+     * Read support
+     */
+
+    /**
+     * Support for reading a log entry out of a still-in-memory log
+     * @return true if this buffer holds the entry at this LSN. The
+     *         buffer will be latched for read. Returns false if
+     *         LSN is not here, and releases the read latch.
+     */
+    boolean containsLsn(long lsn)
+	throws DatabaseException {
+
+        /* Latch before we look at the LSNs. */
+        readLatch.acquire();
+        boolean found = false;
+        if ((firstLsn != DbLsn.NULL_LSN) &&
+            ((DbLsn.compareTo(firstLsn, lsn) <= 0) &&
+	     (DbLsn.compareTo(lastLsn, lsn) >= 0))) {
+            found = true;
+        }
+
+        if (found) {
+            return true;
+        } else {
+            readLatch.release();
+            return false;
+        }
+    }
+
+    /**
+     * When modifying the buffer, acquire the readLatch.  Call release() to
+     * release the latch.  Note that containsLsn() acquires the latch for
+     * reading.
+     */
+    public void latchForWrite()
+        throws DatabaseException  {
+
+        readLatch.acquire();
+    }
+
+    /*
+     * LogSource support
+     */
+
+    /**
+     * @see LogSource#release
+     */
+    public void release()
+	throws DatabaseException  {
+
+    	readLatch.releaseIfOwner();
+    }
+
+    boolean getRewriteAllowed() {
+	return rewriteAllowed;
+    }
+
+    void setRewriteAllowed() {
+	rewriteAllowed = true;
+    }
+
+    /**
+     * @see LogSource#getBytes
+     */
+    public ByteBuffer getBytes(long fileOffset) {
+
+        /*
+         * Make a copy of this buffer (doesn't copy data, only buffer state)
+         * and position it to read the requested data.
+	 *
+	 * Note that we catch Exception here because it is possible that
+	 * another thread is modifying the state of buffer simultaneously.
+	 * Specifically, this can happen if another thread is writing this log
+	 * buffer out and it does (e.g.) a flip operation on it.  The actual
+	 * mark/pos of the buffer may be caught in an unpredictable state.  We
+	 * could add another latch to protect this buffer, but that's heavier
+	 * weight than we need.  So the easiest thing to do is to just retry
+	 * the duplicate operation.  See [#9822].
+         */
+        ByteBuffer copy = null;
+	while (true) {
+	    try {
+		copy = buffer.duplicate();
+		copy.position((int)
+			      (fileOffset - DbLsn.getFileOffset(firstLsn)));
+		break;
+	    } catch (IllegalArgumentException IAE) {
+		continue;
+	    }
+	}
+        return copy;
+    }
+
+    /**
+     * @see LogSource#getBytes
+     */
+    public ByteBuffer getBytes(long fileOffset, int numBytes) {
+        ByteBuffer copy = getBytes(fileOffset);
+        /* Log Buffer should always hold a whole entry. */
+        assert (copy.remaining() >= numBytes) :
+            "copy.remaining=" + copy.remaining() +
+            " numBytes=" + numBytes;
+        return copy;
+    }
+}
diff --git a/src/com/sleepycat/je/log/LogBufferPool.java b/src/com/sleepycat/je/log/LogBufferPool.java
new file mode 100644
index 0000000000000000000000000000000000000000..3e627e67d01dadd873df70f1ae9f377f6f3ec676
--- /dev/null
+++ b/src/com/sleepycat/je/log/LogBufferPool.java
@@ -0,0 +1,350 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LogBufferPool.java,v 1.77.2.3 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Iterator;
+import java.util.LinkedList;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DbConfigManager;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.latch.Latch;
+
+/**
+ * LogBufferPool keeps a set of log buffers.
+ */
+class LogBufferPool {
+    private static final String DEBUG_NAME = LogBufferPool.class.getName();
+
+    private EnvironmentImpl envImpl = null;
+    private int logBufferSize;      // size of each log buffer
+    private LinkedList<LogBuffer> bufferPool;
+
+    /* Buffer that holds the current log end. All writes go to this buffer. */
+    private LogBuffer currentWriteBuffer;
+
+    private FileManager fileManager;
+
+    /* Stats */
+    private long nNotResident = 0;  // had to be instantiated from an lsn
+    private long nCacheMiss = 0;    // had to retrieve from disk
+    private boolean runInMemory;
+
+    /*
+     * bufferPoolLatch is synchronizes access and changes to the buffer pool.
+     * Related latches are the log write latch in LogManager and the read
+     * latches in each log buffer. The log write latch is always taken before
+     * the bufferPoolLatch. The bufferPoolLatch is always taken before any
+     * logBuffer read latch. When faulting in an object from the log, the order
+     * of latching is:
+     *          bufferPoolLatch.acquire()
+     *          LogBuffer read latch acquire();
+     *          bufferPoolLatch.release();
+     *          LogBuffer read latch release()
+     * bufferPoolLatch is also used to protect assignment to the
+     * currentWriteBuffer field.
+     */
+    private Latch bufferPoolLatch;
+
+    LogBufferPool(FileManager fileManager,
+                  EnvironmentImpl envImpl)
+        throws DatabaseException {
+
+        this.fileManager = fileManager;
+        this.envImpl = envImpl;
+        bufferPoolLatch = new Latch(DEBUG_NAME + "_FullLatch");
+
+        /* Configure the pool. */
+        DbConfigManager configManager = envImpl.getConfigManager();
+        runInMemory = envImpl.isMemOnly();
+        reset(configManager);
+
+        /* Current buffer is the active buffer that writes go into. */
+        currentWriteBuffer = bufferPool.getFirst();
+    }
+
+    final int getLogBufferSize() {
+        return logBufferSize;
+    }
+
+    /**
+     * Initialize the pool at construction time and when the cache is resized.
+     * This method is called after the memory budget has been calculated.
+     */
+    void reset(DbConfigManager configManager)
+        throws DatabaseException {
+
+        /*
+         * When running in memory, we can't clear the existing pool and
+         * changing the buffer size is not very useful, so just return.
+         */
+        if (runInMemory && bufferPool != null) {
+            return;
+        }
+
+        /*
+         * Based on the log budget, figure the number and size of
+         * log buffers to use.
+         */
+        int numBuffers =
+            configManager.getInt(EnvironmentParams.NUM_LOG_BUFFERS);
+        long logBufferBudget = envImpl.getMemoryBudget().getLogBufferBudget();
+
+        /* Buffers must be int sized. */
+        int newBufferSize = (int) logBufferBudget / numBuffers;
+
+        /* list of buffers that are available for log writing */
+        LinkedList<LogBuffer> newPool = new LinkedList<LogBuffer>();
+
+        /*
+         * If we're running in memory only, don't pre-allocate all the buffers.
+         * This case only occurs when called from the constructor.
+         */
+        if (runInMemory) {
+            numBuffers = 1;
+        }
+
+        for (int i = 0; i < numBuffers; i++) {
+            newPool.add(new LogBuffer(newBufferSize, envImpl));
+        }
+
+        /*
+         * The following applies when this method is called to reset the pool
+         * when an existing pool is in use:
+         * - The old pool will no longer be referenced.
+         * - Buffers being read in the old pool will be no longer referenced
+         * after the read operation is complete.
+         * - The currentWriteBuffer field is not changed here; it will be no
+         * longer referenced after it is written to the file and a new
+         * currentWriteBuffer is assigned.
+         * - The logBufferSize can be changed now because it is only used for
+         * allocating new buffers; it is not used as the size of the
+         * currentWriteBuffer.
+         */
+        bufferPoolLatch.acquire();
+        bufferPool = newPool;
+        logBufferSize = newBufferSize;
+        bufferPoolLatch.release();
+    }
+
+    /**
+     * Get a log buffer for writing sizeNeeded bytes. If currentWriteBuffer is
+     * too small or too full, flush currentWriteBuffer and get a new one.
+     * Called within the log write latch.
+     *
+     * @return a buffer that can hold sizeNeeded bytes.
+     */
+    LogBuffer getWriteBuffer(int sizeNeeded, boolean flippedFile)
+        throws IOException, DatabaseException {
+
+        /*
+         * We need a new log buffer either because this log buffer is full, or
+         * the LSN has marched along to the next file.  Each log buffer only
+         * holds entries that belong to a single file.  If we've flipped over
+         * into the next file, we'll need to get a new log buffer even if the
+         * current one has room.
+         */
+        if ((!currentWriteBuffer.hasRoom(sizeNeeded)) || flippedFile) {
+
+            /*
+             * Write the currentWriteBuffer to the file and reset
+             * currentWriteBuffer.
+             */
+            writeBufferToFile(sizeNeeded);
+        }
+
+        if (flippedFile) {
+            /* Now that the old buffer has been written to disk, fsync. */
+            if (!runInMemory) {
+                fileManager.syncLogEndAndFinishFile();
+            }
+        }
+
+        return currentWriteBuffer;
+    }
+
+    /**
+     * Write the contents of the currentWriteBuffer to disk.  Leave this buffer
+     * in memory to be available to would be readers.  Set up a new
+     * currentWriteBuffer. Assumes the log write latch is held.
+     *
+     * @param sizeNeeded is the size of the next object we need to write to
+     * the log. May be 0 if this is called on behalf of LogManager.flush().
+     */
+    void writeBufferToFile(int sizeNeeded)
+        throws IOException, DatabaseException {
+
+        int bufferSize =
+            ((logBufferSize > sizeNeeded) ? logBufferSize : sizeNeeded);
+
+        /* We're done with the buffer, flip to make it readable. */
+        currentWriteBuffer.latchForWrite();
+        LogBuffer latchedBuffer = currentWriteBuffer;
+        try {
+            ByteBuffer currentByteBuffer = latchedBuffer.getDataBuffer();
+            int savePosition = currentByteBuffer.position();
+            int saveLimit = currentByteBuffer.limit();
+            currentByteBuffer.flip();
+
+            /* Dispose of it and get a new buffer for writing. */
+            if (runInMemory) {
+                /* We're done with the current buffer. */
+                latchedBuffer.release();
+                latchedBuffer = null;
+                /* We're supposed to run in-memory, allocate another buffer. */
+                bufferPoolLatch.acquire();
+                currentWriteBuffer = new LogBuffer(bufferSize, envImpl);
+                bufferPool.add(currentWriteBuffer);
+                bufferPoolLatch.release();
+            } else {
+
+                /*
+                 * If we're configured for writing (not memory-only situation),
+                 * write this buffer to disk and find a new buffer to use.
+                 */
+                try {
+                    fileManager.writeLogBuffer(latchedBuffer);
+
+                    /* Rewind so readers can see this. */
+                    latchedBuffer.getDataBuffer().rewind();
+
+                    /* We're done with the current buffer. */
+                    latchedBuffer.release();
+                    latchedBuffer = null;
+
+                    /*
+                     * Now look in the linked list for a buffer of the right
+                     * size.
+                     */
+                    LogBuffer nextToUse = null;
+                    try {
+                        bufferPoolLatch.acquire();
+                        Iterator<LogBuffer> iter = bufferPool.iterator();
+                        nextToUse = iter.next();
+
+                        boolean done = bufferPool.remove(nextToUse);
+                        assert done;
+                        nextToUse.reinit();
+
+                        /* Put the nextToUse buffer at the end of the queue. */
+                        bufferPool.add(nextToUse);
+
+                        /* Assign currentWriteBuffer with the latch held. */
+                        currentWriteBuffer = nextToUse;
+                    } finally {
+                        bufferPoolLatch.releaseIfOwner();
+                    }
+                } catch (DatabaseException DE) {
+                    currentByteBuffer.position(savePosition);
+                    currentByteBuffer.limit(saveLimit);
+                    throw DE;
+                }
+            }
+        } finally {
+            if (latchedBuffer != null) {
+                latchedBuffer.release();
+            }
+        }
+    }
+
+    /**
+     * A loggable object has been freshly marshalled into the write log buffer.
+     * 1. Update buffer so it knows what LSNs it contains.
+     * 2. If this object requires a flush, write this buffer out to the
+     * backing file.
+     * Assumes log write latch is held.
+     */
+    void writeCompleted(long lsn, boolean flushRequired)
+        throws DatabaseException, IOException  {
+
+        currentWriteBuffer.registerLsn(lsn);
+        if (flushRequired) {
+            writeBufferToFile(0);
+        }
+    }
+
+    /**
+     * Find a buffer that holds this LSN.
+     * @return the buffer that contains this LSN, latched and ready to
+     *         read, or return null.
+     */
+    LogBuffer getReadBuffer(long lsn)
+        throws DatabaseException {
+
+        LogBuffer foundBuffer = null;
+
+        bufferPoolLatch.acquire();
+        try {
+            nNotResident++;
+            Iterator<LogBuffer> iter = bufferPool.iterator();
+            while (iter.hasNext()) {
+                LogBuffer l = iter.next();
+                if (l.containsLsn(lsn)) {
+                    foundBuffer = l;
+                    break;
+                }
+            }
+
+            /*
+             * Check the currentWriteBuffer separately, since if the pool was
+             * recently reset it will not be in the pool.
+             */
+            if (foundBuffer == null &&
+                currentWriteBuffer.containsLsn(lsn)) {
+                foundBuffer = currentWriteBuffer;
+            }
+
+            if (foundBuffer == null) {
+                nCacheMiss++;
+            }
+
+        } finally {
+            bufferPoolLatch.releaseIfOwner();
+        }
+
+        if (foundBuffer == null) {
+            return null;
+        } else {
+            return foundBuffer;
+        }
+    }
+
+    void loadStats(StatsConfig config, EnvironmentStats stats)
+        throws DatabaseException {
+
+        stats.setNCacheMiss(nCacheMiss);
+        stats.setNNotResident(nNotResident);
+        if (config.getClear()) {
+            nCacheMiss = 0;
+            nNotResident = 0;
+        }
+
+        /* Also return buffer pool memory usage */
+        bufferPoolLatch.acquire();
+        long bufferBytes = 0;
+        int nLogBuffers = 0;
+        try {
+            Iterator<LogBuffer> iter = bufferPool.iterator();
+            while (iter.hasNext()) {
+                LogBuffer l = iter.next();
+                nLogBuffers++;
+                bufferBytes += l.getCapacity();
+            }
+        } finally {
+            bufferPoolLatch.release();
+        }
+        stats.setNLogBuffers(nLogBuffers);
+        stats.setBufferBytes(bufferBytes);
+    }
+}
diff --git a/src/com/sleepycat/je/log/LogContext.java b/src/com/sleepycat/je/log/LogContext.java
new file mode 100644
index 0000000000000000000000000000000000000000..e5105675974b20ebbf8f7b8e934d47b8fe1a56a5
--- /dev/null
+++ b/src/com/sleepycat/je/log/LogContext.java
@@ -0,0 +1,61 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LogContext.java,v 1.1.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import com.sleepycat.je.dbi.DatabaseImpl;
+
+/**
+ * Context parameters that apply to all logged items when multiple items are
+ * logged in one log operation.  Passed to LogManager log methods and to
+ * beforeLog and afterLog methods.
+ */
+public class LogContext {
+
+    /**
+     * Database of the node(s), or null if entry is not a node.  Used for per-
+     * database utilization tracking.
+     *
+     * Set by caller.
+     */
+    public DatabaseImpl nodeDb = null;
+
+    /**
+     * Whether the log buffer(s) must be written to the file system.
+     *
+     * Set by caller.
+     */
+    public boolean flushRequired = false;
+
+    /**
+     * Whether a new log file must be created for containing the logged
+     * item(s).
+     *
+     * Set by caller.
+     */
+    public boolean forceNewLogFile = false;
+
+    /**
+     * Whether an fsync must be performed after writing the item(s) to the log.
+     *
+     * Set by caller.
+     */
+    public boolean fsyncRequired = false;
+
+    /**
+     * Whether the write should be counted as background IO when throttling of
+     * background IO is configured.
+     *
+     * Set by caller.
+     */
+    public boolean backgroundIO = false;
+
+    /* Fields used internally by log method. */
+    boolean wakeupCleaner = false;
+    int totalNewSize = 0;
+}
diff --git a/src/com/sleepycat/je/log/LogEntryHeader.java b/src/com/sleepycat/je/log/LogEntryHeader.java
new file mode 100644
index 0000000000000000000000000000000000000000..f6c8cf680fa43548f79e9fa139501bac97265589
--- /dev/null
+++ b/src/com/sleepycat/je/log/LogEntryHeader.java
@@ -0,0 +1,442 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LogEntryHeader.java,v 1.26.2.2 2010/01/04 15:30:29 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.nio.ByteBuffer;
+import java.util.zip.Checksum;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.entry.LogEntry;
+import com.sleepycat.je.utilint.Adler32;
+import com.sleepycat.je.utilint.VLSN;
+
+/**
+ * A LogEntryHeader embodies the header information at the beginning of each
+ * log entry file.
+ */
+public class LogEntryHeader {
+
+    /**
+     * Persistent fields. Layout on disk is
+     * (invariant) checksum - 4 bytes
+     * (invariant) entry type - 1 byte
+     * (invariant) entry version and flags - 1 byte
+     * (invariant) offset of previous log entry - 4 bytes
+     * (invariant) item size (not counting header size) - 4 bytes
+     * (optional) vlsn - 8 bytes
+     *
+     * Flags:
+     * The provisional bit can be set for any log type in the log. It's an
+     * indication to recovery that the entry shouldn't be processed when
+     * rebuilding the tree. It's used to ensure the atomic logging of multiple
+     * entries.
+     *
+     * The replicated bit is set when this particular log entry is
+     * part of the replication stream and contains a VLSN in the header.
+     */
+
+    /* The invariant size of the log entry header. */
+    static final int MIN_HEADER_SIZE = 14;
+
+    /* Only used for tests and asserts. */
+    public static final int MAX_HEADER_SIZE = MIN_HEADER_SIZE + VLSN.LOG_SIZE;
+
+    private static final int CHECKSUM_BYTES = 4;
+    private static final int ENTRYTYPE_OFFSET = 4;
+    private static final int PREV_OFFSET = 6;
+    private static final int ITEMSIZE_OFFSET = 10;
+    private static final int VLSN_OFFSET = MIN_HEADER_SIZE;
+
+    /* Flags stored in the version field. */
+    private static final byte PROVISIONAL_ALWAYS_MASK = (byte) 0x80;
+    private static final byte IGNORE_PROVISIONAL_ALWAYS =
+                             ~PROVISIONAL_ALWAYS_MASK;
+    private static final byte PROVISIONAL_BEFORE_CKPT_END_MASK = (byte) 0x40;
+    private static final byte IGNORE_PROVISIONAL_BEFORE_CKPT_END =
+                             ~PROVISIONAL_BEFORE_CKPT_END_MASK;
+    private static final byte REPLICATED_MASK = (byte) 0x20;
+    private static final byte IGNORE_REPLICATED = ~REPLICATED_MASK;
+
+    private long checksumVal;   // stored in 4 bytes as an unsigned int
+    private byte entryType;
+    private byte entryVersion;
+    private long prevOffset;
+    private int itemSize;
+    private VLSN vlsn;
+
+    /* Version flag fields */
+    private Provisional provisional;
+    private boolean replicated;
+
+    /**
+     * For reading a log entry.
+     * @param anticipateChecksumErrors if true, invalidate the environment
+     * if the entry header is invalid.
+     * @throws DbChecksumException if the entry is invalid.
+     * If anticipateChecksumErrors is true and envImpl is not null, the
+     * environment is also invalidated.
+     */
+    public LogEntryHeader(EnvironmentImpl envImpl,
+                          ByteBuffer entryBuffer,
+                          boolean anticipateChecksumErrors)
+	throws DbChecksumException {
+
+        checksumVal = LogUtils.readUnsignedInt(entryBuffer);
+        entryType = entryBuffer.get();
+        if (!LogEntryType.isValidType(entryType)) {
+            throw new DbChecksumException
+		((anticipateChecksumErrors ? null : envImpl),
+                 "Read invalid log entry type: " +  entryType);
+        }
+
+        entryVersion = entryBuffer.get();
+        prevOffset = LogUtils.readUnsignedInt(entryBuffer);
+        itemSize = LogUtils.readInt(entryBuffer);
+
+        if ((entryVersion & PROVISIONAL_ALWAYS_MASK) != 0) {
+            provisional = Provisional.YES;
+        } else if ((entryVersion & PROVISIONAL_BEFORE_CKPT_END_MASK) != 0) {
+            provisional = Provisional.BEFORE_CKPT_END;
+        } else {
+            provisional = Provisional.NO;
+        }
+        replicated = ((entryVersion & REPLICATED_MASK) != 0);
+        entryVersion &= IGNORE_PROVISIONAL_ALWAYS;
+        entryVersion &= IGNORE_PROVISIONAL_BEFORE_CKPT_END;
+        entryVersion &= IGNORE_REPLICATED;
+    }
+
+    /**
+     * For writing a log header.  public for unit tests.
+     */
+    public LogEntryHeader(LogEntry entry,
+			  Provisional provisional,
+			  ReplicationContext repContext) {
+
+        LogEntryType logEntryType = entry.getLogType();
+        entryType = logEntryType.getTypeNum();
+        entryVersion = LogEntryType.LOG_VERSION;
+        this.itemSize = entry.getSize();
+        this.provisional = provisional;
+
+        assert (!((!logEntryType.isReplicationPossible()) &&
+                  repContext.inReplicationStream())) :
+            logEntryType + " should never be replicated.";
+
+        if (logEntryType.isReplicationPossible()) {
+            this.replicated = repContext.inReplicationStream();
+        } else {
+            this.replicated = false;
+        }
+    }
+
+    public long getChecksum() {
+        return checksumVal;
+    }
+
+    public byte getType() {
+        return entryType;
+    }
+
+    public byte getVersion() {
+        return entryVersion;
+    }
+
+    public long getPrevOffset() {
+        return prevOffset;
+    }
+
+    public int getItemSize() {
+        return itemSize;
+    }
+
+    public VLSN getVLSN() {
+        return vlsn;
+    }
+
+    public boolean getReplicated() {
+        return replicated;
+    }
+
+    public Provisional getProvisional() {
+        return provisional;
+    }
+
+    public int getVariablePortionSize() {
+        return VLSN.LOG_SIZE;
+    }
+
+    /**
+     * @return number of bytes used to store this header
+     */
+    public int getSize() {
+        if (replicated) {
+            return MIN_HEADER_SIZE + VLSN.LOG_SIZE;
+        } else {
+            return MIN_HEADER_SIZE;
+        }
+    }
+
+    /**
+     * @return the number of bytes used to store the header, excepting
+     * the checksum field.
+     */
+    int getSizeMinusChecksum() {
+        return getSize()- CHECKSUM_BYTES;
+    }
+
+    /**
+     * @return the number of bytes used to store the header, excepting
+     * the checksum field.
+     */
+    int getInvariantSizeMinusChecksum() {
+        return MIN_HEADER_SIZE - CHECKSUM_BYTES;
+    }
+
+    /**
+     * Assumes this is called directly after the constructor, and that the
+     * entryBuffer is positioned right before the VLSN.
+     */
+    public void readVariablePortion(ByteBuffer entryBuffer)
+        throws LogException {
+
+        if (replicated) {
+            vlsn = new VLSN();
+            vlsn.readFromLog(entryBuffer, entryVersion);
+        }
+    }
+
+    /**
+     * Serialize this object into the buffer and leave the buffer positioned in
+     * the right place to write the following item.  The checksum, prevEntry,
+     * and vlsn values will filled in later on.
+     *
+     * public for unit tests.
+     */
+    public void writeToLog(ByteBuffer entryBuffer) {
+
+        /* Skip over the checksumVal, proceed to the entry type. */
+        entryBuffer.position(ENTRYTYPE_OFFSET);
+        entryBuffer.put(entryType);
+
+        /* version and flags */
+        byte versionFlags = entryVersion;
+        if (provisional == Provisional.YES) {
+            versionFlags |= PROVISIONAL_ALWAYS_MASK;
+        } else if (provisional == Provisional.BEFORE_CKPT_END) {
+            versionFlags |= PROVISIONAL_BEFORE_CKPT_END_MASK;
+        }
+        if (replicated) {
+            versionFlags |= REPLICATED_MASK;
+        }
+        entryBuffer.put(versionFlags);
+
+        /*
+         * Leave room for the prev offset, which must be added under
+         * the log write latch. Proceed to write the item size.
+         */
+        entryBuffer.position(ITEMSIZE_OFFSET);
+        LogUtils.writeInt(entryBuffer, itemSize);
+
+        /*
+         * Leave room for a VLSN if needed, must also be generated
+         * under the log write latch.
+         */
+        if (replicated) {
+            entryBuffer.position(entryBuffer.position() + VLSN.LOG_SIZE);
+        }
+    }
+
+    /**
+     * Add those parts of the header that must be calculated later to the
+     * entryBuffer, and also assign the fields in this class.
+     * That's
+     * - the prev offset, which must be done within the log write latch to
+     *   be sure what that lsn is
+     * - the VLSN, for the same reason
+     * - the checksumVal, which must be added last, after all other
+     *   fields are marshalled.
+     * (public for unit tests)
+     */
+    public ByteBuffer addPostMarshallingInfo(EnvironmentImpl envImpl,
+                                             ByteBuffer entryBuffer,
+                                             long lastOffset,
+                                             ReplicationContext repContext) {
+
+        /* Add the prev pointer */
+        prevOffset = lastOffset;
+        entryBuffer.position(PREV_OFFSET);
+        LogUtils.writeUnsignedInt(entryBuffer, prevOffset);
+
+        /* Add the optional VLSN */
+        if (repContext.inReplicationStream()) {
+            entryBuffer.position(VLSN_OFFSET);
+
+            if (repContext.mustGenerateVLSN()) {
+                vlsn = envImpl.getReplicator().bumpVLSN();
+            } else {
+                vlsn = repContext.getClientVLSN();
+            }
+            vlsn.writeToLog(entryBuffer);
+        }
+
+        /*
+         * Now calculate the checksumVal and write it into the buffer.  Be sure
+         * to set the field in this instance, for use later when printing or
+         * debugging the header.
+         */
+        Checksum checksum = Adler32.makeChecksum();
+        checksum.update(entryBuffer.array(),
+                        entryBuffer.arrayOffset() + CHECKSUM_BYTES,
+                        entryBuffer.limit() - CHECKSUM_BYTES);
+        entryBuffer.position(0);
+        checksumVal = checksum.getValue();
+        LogUtils.writeUnsignedInt(entryBuffer, checksumVal);
+
+        /* Leave this buffer ready for copying into another buffer. */
+        entryBuffer.position(0);
+
+        return entryBuffer;
+    }
+
+    /**
+     * @param sb destination string buffer
+     * @param verbose if true, dump the full, verbose version
+     */
+    public void dumpLog(StringBuffer sb, boolean verbose) {
+        sb.append("<hdr ");
+        dumpLogNoTag(sb, verbose);
+        sb.append("\"/>");
+    }
+
+    /**
+     * Dump the header without enclosing <header> tags. Used for
+     * DbPrintLog, to make the header attributes in the <entry> tag, for
+     * a more compact rendering.
+     * @param sb destination string buffer
+     * @param verbose if true, dump the full, verbose version
+     */
+    void dumpLogNoTag(StringBuffer sb, boolean verbose) {
+        LogEntryType lastEntryType = LogEntryType.findType(entryType);
+
+        sb.append("type=\"").append(lastEntryType.toStringNoVersion()).
+	    append("/").append((int) entryVersion);
+        if (provisional != Provisional.NO) {
+            sb.append("\" prov=\"");
+            sb.append(provisional);
+        }
+        if (replicated) {
+            sb.append("\" rep=\"true");
+        }
+        if (vlsn != null) {
+            sb.append("\" ");
+            vlsn.dumpLog(sb, verbose);
+        } else {
+            sb.append("\"");
+        }
+        sb.append(" prev=\"0x").append(Long.toHexString(prevOffset));
+        if (verbose) {
+            sb.append("\" size=\"").append(itemSize);
+            sb.append("\" cksum=\"").append(checksumVal);
+        }
+    }
+
+    /**
+     * For use in special case where commits are transformed to aborts because
+     * of i/o errors during a logBuffer flush. See [11271].
+     * Assumes that the entryBuffer is positioned at the start of the item.
+     * Return with the entryBuffer positioned to the end of the log entry.
+     */
+    void convertCommitToAbort(ByteBuffer entryBuffer) {
+        assert (entryType == LogEntryType.LOG_TXN_COMMIT.getTypeNum());
+
+        /* Remember the start of the entry item. */
+        int itemStart = entryBuffer.position();
+
+        /* Back up to where the type is stored and change the type. */
+        int entryTypePosition =
+            itemStart - (getSize() - ENTRYTYPE_OFFSET);
+        entryBuffer.position(entryTypePosition);
+        entryBuffer.put(LogEntryType.LOG_TXN_ABORT.getTypeNum());
+
+        /*
+         * Recalculate the checksum. This byte buffer could be large,
+         * so don't just turn the whole buffer into an array to pass
+         * into the checksum object.
+         */
+        Checksum checksum = Adler32.makeChecksum();
+        int checksumSize = itemSize + (getSize() - CHECKSUM_BYTES);
+        checksum.update(entryBuffer.array(),
+                        entryTypePosition + entryBuffer.arrayOffset(),
+                        checksumSize);
+        entryBuffer.position(itemStart - getSize());
+        checksumVal = checksum.getValue();
+        LogUtils.writeUnsignedInt(entryBuffer, checksumVal);
+    }
+
+    @Override
+    public String toString() {
+        StringBuffer sb = new StringBuffer();
+        dumpLog(sb, true /* verbose */);
+        return sb.toString();
+    }
+
+    /**
+     * @return true if these two log headers are logically the same.
+     * Used for replication.
+     */
+    public boolean logicalEquals(LogEntryHeader other) {
+        /* 
+         * Note that item size is not part of the logical equality, because
+         * on-disk compression can make itemSize vary if the entry has VLSNs
+         * that were packed differently.
+         */
+        return ((getType() == other.getType()) &&
+                (getVersion() == other.getVersion()) &&
+                (getVLSN().equals(other.getVLSN())) &&
+                (getReplicated() == other.getReplicated()));
+
+    }
+
+    /**
+     * Return whether the log entry represented by this byte buffer is a
+     * replication sync possible type log entry. Leaves the byte buffer's
+     * position unchanged.
+     */
+    public static boolean isSyncPoint(ByteBuffer buffer) 
+        throws DbChecksumException {
+    	
+        buffer.mark();
+        LogEntryHeader header = 
+            new LogEntryHeader(null,  // envImpl, for checksum
+                               buffer,
+                               true); // anticipateChecksumError
+        buffer.reset();
+        return LogEntryType.isSyncPoint(header.getType());
+    }
+
+    /**
+     * Return the VLSN for the log entry header in this byte buffer. Leaves the
+     * byte buffer's position unchanged.
+     */
+    public static VLSN getVLSN(ByteBuffer buffer) 
+        throws DatabaseException {
+    	
+        buffer.mark();
+        LogEntryHeader header = 
+            new LogEntryHeader(null, // envImipl,
+                               buffer,
+                               true); // anticipateChecksumErrors
+
+        header.readVariablePortion(buffer);
+        buffer.reset();
+        return header.getVLSN();
+    }
+}
diff --git a/src/com/sleepycat/je/log/LogEntryType.java b/src/com/sleepycat/je/log/LogEntryType.java
new file mode 100644
index 0000000000000000000000000000000000000000..a601c5423963e838e396810ee9adf275ddf9f785
--- /dev/null
+++ b/src/com/sleepycat/je/log/LogEntryType.java
@@ -0,0 +1,612 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LogEntryType.java,v 1.99.2.2 2010/01/04 15:30:30 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.log.entry.BINDeltaLogEntry;
+import com.sleepycat.je.log.entry.DeletedDupLNLogEntry;
+import com.sleepycat.je.log.entry.INLogEntry;
+import com.sleepycat.je.log.entry.LNLogEntry;
+import com.sleepycat.je.log.entry.LogEntry;
+import com.sleepycat.je.log.entry.NameLNLogEntry;
+import com.sleepycat.je.log.entry.SingleItemEntry;
+
+/**
+ * LogEntryType is an  enumeration of all log entry types.
+ *
+ * <p>Log entries are versioned. When changing the persistent form of a log
+ * entry in any way that is incompatible with prior releases, make sure the
+ * LogEntry instance is capable of reading in older versions from the log and
+ * be sure to increment LOG_VERSION.  The LogEntry.readEntry and
+ * Loggable.readFromLog methods should check the actual version of the entry.
+ * If it is less than LOG_VERSION, the old version should be converted to the
+ * current version.
+ *
+ * <p>Prior to LOG_VERSION 6, each log entry type had a separate version number
+ * that was incremented only when that log version changed.  From LOG_VERSION 6
+ * onward, all types use the same version, the LOG_VERSION constant.  For
+ * versions prior to 6, the readEntry and readFromLog methods will be checking
+ * the old per-type version.  There is no overlap between the old per-type
+ * versions and the LOG_VERSION values, because the per-type values are all
+ * below 6. [#15365]</p>
+
+ * <p>The LogEntry instance must be sure that older versions are converted in
+ * memory into a correct instance of the newest version, so when that LogEntry
+ * object is written again as the result of migration, eviction, the resulting
+ * new log entry conforms to the requirements of the new version.  If context
+ * objects are required for data conversion, the conversion can be done in the
+ * Node.postFetchInit method.</p>
+ */
+public class LogEntryType {
+
+    /**
+     * Version of the file header, which identifies the version of all entries
+     * in that file.
+     *
+     * Changes to log entries for each version are:
+     *
+     * Version 3
+     * ---------
+     * [12328] Add main and dupe tree fanout values for DatabaseImpl.
+     * [12557] Add IN LSN array compression.
+     * [11597] Add a change to FileSummaryLNs: obsolete offset tracking was
+     * added and multiple records are stored for a single file rather than a
+     * single record.  Each record contains the offsets that were tracked since
+     * the last record was written.
+     * [11597] Add the full obsolete LSN in LNLogEntry.
+     *
+     * Version 4
+     * ---------
+     * [#14422] Bump MapLN version from 1 to 2.  Instead of a String for the
+     * comparator class name, store either a serialized string or Comparator.
+     *
+     * Version 5
+     * ---------
+     * [#15195] FileSummaryLN version 3.  Add FileSummary.obsoleteLNSize and
+     * obsoleteLNSizeCounted fields.
+     *
+     * Version 6 (in JE 3.3.X)
+     * ---------
+     * [#15365] From this point onward, all log entries have the same version,
+     * LOG_VERSION, rather than using per-type versions.
+     * [#15365] DatabaseImpl stores a map of DbFileSummaries.
+     *
+     * [#13467] Convert duplicatesAllowed boolean to DUPS_ALLOWED_BIT flag in
+     * DatabaseImpl. Add REPLICATED_BIT flag to DatabaseImpl.
+     * [#13467] Add REPLICATED_BIT to DbTree.
+     * [#13467] Add ReplicatedDatabaseConfig to NameLN_TX to support
+     * replication of database operations.
+     *
+     * [#15581] Add lastAllocateReplicatedDbId to DbTree
+     * [#16083] Add replication master node id to txn commit/abort 
+     */
+    public static final byte LOG_VERSION = 6;
+
+    /*
+     * Collection of log entry type classes, used to read the log.  Note that
+     * this must be declared before any instances of LogEntryType, since the
+     * constructor uses this map. Each statically defined LogEntryType should
+     * register itself with this collection.
+     */
+    private static final int MAX_TYPE_NUM = 27;
+
+    private static LogEntryType[] LOG_TYPES = new LogEntryType[MAX_TYPE_NUM];
+
+    /*
+     * Enumeration of log entry types. The log entry type represents the 2
+     * byte field that starts every log entry. The top byte is the log type,
+     * the bottom byte holds the version value, provisional bit, and
+     * replicated bit.
+     *
+     *  Log type (8 bits)
+     * (Provisional (2 bits) Replicated (1 bit) Version (6 bits)
+     *
+     * The top byte (log type) identifies the type and can be used to
+     * lookup the LogEntryType object, while the bottom byte has
+     * information about the entry (instance) of this type.  The bottom
+     * byte is effectively entry header information that is common to
+     * all types and is managed by methods in LogEntryHeader. See
+     * LogEntryHeader.java
+     */
+
+    /*  Node types */
+    public static final LogEntryType LOG_LN_TRANSACTIONAL =
+        new LogEntryType((byte) 1, "LN_TX",
+                         new LNLogEntry(com.sleepycat.je.tree.LN.class),
+                         Txnal.TXNAL,
+                         Marshall.OUTSIDE_LATCH,
+                         Replicable.REPLICABLE_NO_MATCH);
+
+    public static final LogEntryType LOG_LN =
+        new LogEntryType((byte) 2, "LN",
+                         new LNLogEntry(com.sleepycat.je.tree.LN.class),
+                         Txnal.NON_TXNAL,
+                         Marshall.OUTSIDE_LATCH,
+                         Replicable.REPLICABLE_NO_MATCH);
+
+    public static final LogEntryType LOG_MAPLN_TRANSACTIONAL =
+        new LogEntryType((byte) 3, "MapLN_TX",
+                         new LNLogEntry(com.sleepycat.je.tree.MapLN.class),
+                         Txnal.TXNAL,
+                         Marshall.INSIDE_LATCH,
+                         Replicable.LOCAL);
+
+    public static final LogEntryType LOG_MAPLN =
+        new LogEntryType((byte) 4, "MapLN",
+                         new LNLogEntry(com.sleepycat.je.tree.MapLN.class),
+                         Txnal.NON_TXNAL,
+                         Marshall.INSIDE_LATCH,
+                         Replicable.LOCAL);
+
+    public static final LogEntryType LOG_NAMELN_TRANSACTIONAL =
+        new LogEntryType((byte) 5, "NameLN_TX",
+                         new NameLNLogEntry(),
+                         Txnal.TXNAL,
+                         Marshall.OUTSIDE_LATCH,
+                         Replicable.REPLICABLE_NO_MATCH);
+
+    public static final LogEntryType LOG_NAMELN =
+        new LogEntryType((byte) 6, "NameLN",
+                         new NameLNLogEntry(),
+                         Txnal.NON_TXNAL,
+                         Marshall.OUTSIDE_LATCH,
+                         Replicable.REPLICABLE_NO_MATCH);
+
+    public static final LogEntryType LOG_DEL_DUPLN_TRANSACTIONAL =
+        new LogEntryType((byte) 7, "DelDupLN_TX",
+                         new DeletedDupLNLogEntry(),
+                         Txnal.TXNAL,
+                         Marshall.OUTSIDE_LATCH,
+                         Replicable.REPLICABLE_NO_MATCH);
+
+    public static final LogEntryType LOG_DEL_DUPLN =
+        new LogEntryType((byte) 8, "DelDupLN",
+                         new DeletedDupLNLogEntry(),
+                         Txnal.NON_TXNAL,
+                         Marshall.OUTSIDE_LATCH,
+                         Replicable.REPLICABLE_NO_MATCH);
+
+    public static final LogEntryType LOG_DUPCOUNTLN_TRANSACTIONAL =
+        new LogEntryType((byte) 9, "DupCountLN_TX",
+                 new LNLogEntry(com.sleepycat.je.tree.DupCountLN.class),
+                         Txnal.TXNAL,
+                         Marshall.OUTSIDE_LATCH,
+                         Replicable.LOCAL);
+
+    public static final LogEntryType LOG_DUPCOUNTLN =
+        new LogEntryType((byte) 10, "DupCountLN",
+                 new LNLogEntry(com.sleepycat.je.tree.DupCountLN.class),
+                         Txnal.NON_TXNAL,
+                         Marshall.OUTSIDE_LATCH,
+                         Replicable.LOCAL);
+
+    public static final LogEntryType LOG_FILESUMMARYLN =
+        new LogEntryType((byte) 11, "FileSummaryLN",
+              new LNLogEntry(com.sleepycat.je.tree.FileSummaryLN.class),
+                         Txnal.NON_TXNAL,
+                         Marshall.INSIDE_LATCH,
+                         Replicable.LOCAL);
+
+    public static final LogEntryType LOG_IN =
+        new LogEntryType((byte) 12, "IN",
+                         new INLogEntry(com.sleepycat.je.tree.IN.class),
+                         Txnal.NON_TXNAL,
+                         Marshall.OUTSIDE_LATCH,
+                         Replicable.LOCAL);
+
+    public static final LogEntryType LOG_BIN =
+        new LogEntryType((byte) 13, "BIN",
+                         new INLogEntry(com.sleepycat.je.tree.BIN.class),
+                         Txnal.NON_TXNAL,
+                         Marshall.OUTSIDE_LATCH,
+                         Replicable.LOCAL);
+
+    public static final LogEntryType LOG_DIN =
+        new LogEntryType((byte) 14, "DIN",
+                         new INLogEntry(com.sleepycat.je.tree.DIN.class),
+                         Txnal.NON_TXNAL,
+                         Marshall.OUTSIDE_LATCH,
+                         Replicable.LOCAL);
+
+    public static final LogEntryType LOG_DBIN =
+        new LogEntryType((byte) 15, "DBIN",
+                         new INLogEntry(com.sleepycat.je.tree.DBIN.class),
+                         Txnal.NON_TXNAL,
+                         Marshall.OUTSIDE_LATCH,
+                         Replicable.LOCAL);
+
+    public static final LogEntryType[] IN_TYPES = {
+        LogEntryType.LOG_IN,
+        LogEntryType.LOG_BIN,
+        LogEntryType.LOG_DIN,
+        LogEntryType.LOG_DBIN,
+    };
+
+    /*** If you add new types, be sure to update MAX_TYPE_NUM at the top.***/
+
+    private static final int MAX_NODE_TYPE_NUM = 15;
+
+    public static boolean isNodeType(byte typeNum) {
+        return (typeNum <= MAX_NODE_TYPE_NUM);
+    }
+
+    /* Root */
+    public static final LogEntryType LOG_ROOT =
+        new LogEntryType((byte) 16, "Root",
+                         new SingleItemEntry
+                         (com.sleepycat.je.dbi.DbTree.class),
+                         Txnal.NON_TXNAL,
+                         Marshall.INSIDE_LATCH,
+                         Replicable.LOCAL);
+
+    /* Transactional entries */
+    public static final LogEntryType LOG_TXN_COMMIT =
+        new LogEntryType((byte) 17, "Commit",
+                         new SingleItemEntry
+                         (com.sleepycat.je.txn.TxnCommit.class),
+                         Txnal.TXNAL,
+                         Marshall.OUTSIDE_LATCH,
+                         Replicable.REPLICABLE_MATCH);
+
+    public static final LogEntryType LOG_TXN_ABORT =
+        new LogEntryType((byte) 18, "Abort",
+                         new SingleItemEntry
+                         (com.sleepycat.je.txn.TxnAbort.class),
+                         Txnal.TXNAL,
+                         Marshall.OUTSIDE_LATCH,
+                         Replicable.REPLICABLE_MATCH);
+
+    public static final LogEntryType LOG_CKPT_START =
+        new LogEntryType((byte) 19, "CkptStart",
+                         new SingleItemEntry
+                         (com.sleepycat.je.recovery.CheckpointStart.class),
+                         Txnal.NON_TXNAL,
+                         Marshall.OUTSIDE_LATCH,
+                         Replicable.LOCAL);
+
+    public static final LogEntryType LOG_CKPT_END =
+        new LogEntryType((byte) 20, "CkptEnd",
+                         new SingleItemEntry
+                             (com.sleepycat.je.recovery.CheckpointEnd.class),
+                         Txnal.NON_TXNAL,
+                         Marshall.OUTSIDE_LATCH,
+                         Replicable.LOCAL);
+
+    public static final LogEntryType LOG_IN_DELETE_INFO =
+        new LogEntryType((byte) 21, "INDelete",
+                         new SingleItemEntry
+                             (com.sleepycat.je.tree.INDeleteInfo.class),
+                         Txnal.NON_TXNAL,
+                         Marshall.OUTSIDE_LATCH,
+                         Replicable.LOCAL);
+
+    public static final LogEntryType LOG_BIN_DELTA =
+        new LogEntryType((byte) 22, "BINDelta",
+                         new BINDeltaLogEntry
+                             (com.sleepycat.je.tree.BINDelta.class),
+                         Txnal.NON_TXNAL,
+                         Marshall.OUTSIDE_LATCH,
+                         Replicable.LOCAL);
+
+    public static final LogEntryType LOG_DUP_BIN_DELTA =
+        new LogEntryType((byte) 23, "DupBINDelta",
+                         new BINDeltaLogEntry
+                         (com.sleepycat.je.tree.BINDelta.class),
+                         Txnal.NON_TXNAL,
+                         Marshall.OUTSIDE_LATCH,
+                         Replicable.LOCAL);
+
+    /* Administrative entries */
+    public static final LogEntryType LOG_TRACE =
+        new LogEntryType((byte) 24, "Trace",
+                         new SingleItemEntry
+                         (com.sleepycat.je.utilint.Tracer.class),
+                         Txnal.NON_TXNAL,
+                         Marshall.OUTSIDE_LATCH,
+                         Replicable.REPLICABLE_NO_MATCH);
+
+    /* File header */
+    public static final LogEntryType LOG_FILE_HEADER =
+        new LogEntryType((byte) 25, "FileHeader",
+                         new SingleItemEntry
+                         (com.sleepycat.je.log.FileHeader.class),
+                         Txnal.NON_TXNAL,
+                         Marshall.OUTSIDE_LATCH,
+                         Replicable.LOCAL);
+
+    public static final LogEntryType LOG_IN_DUPDELETE_INFO =
+        new LogEntryType((byte) 26, "INDupDelete",
+                         new SingleItemEntry
+                         (com.sleepycat.je.tree.INDupDeleteInfo.class),
+                         Txnal.NON_TXNAL,
+                         Marshall.OUTSIDE_LATCH,
+                         Replicable.LOCAL);
+
+    public static final LogEntryType LOG_TXN_PREPARE =
+        new LogEntryType((byte) 27, "Prepare",
+                         new SingleItemEntry
+                         (com.sleepycat.je.txn.TxnPrepare.class),
+                         Txnal.TXNAL,
+                         Marshall.OUTSIDE_LATCH,
+                         Replicable.LOCAL);
+
+    /*** If you add new types, be sure to update MAX_TYPE_NUM at the top.***/
+
+    /* Persistent fields */
+    final private byte typeNum; // persistent value for this entry type
+
+    /* Transient fields */
+    final private String displayName;
+    final private LogEntry logEntry;
+
+    /*
+     * Attributes
+     */
+
+    /* Whether the log entry holds a transactional information. */
+    private Txnal isTransactional;
+
+    /* 
+     * Does this log entry be marshalled outside or inside the log write
+     * latch.
+     */
+    private Marshall marshallBehavior;
+
+    /* Can this log entry be put in the replication stream? */
+    private Replicable replicationPossible;
+
+    /*
+     * Constructors
+     */
+
+    /**
+     * For base class support.
+     */
+
+    /* 
+     * This constructor only used when the LogEntryType is being used as a key
+     * for a map. No log types can be defined outside this package. 
+     */
+    LogEntryType(byte typeNum) {
+        this.typeNum = typeNum;
+        displayName = null;
+        logEntry = null;
+    }
+
+    /**
+     * Create the static log types.
+     * @param isTransactional true if this type of log entry holds data
+     * involved in a transaction. For example, transaction commit and LN data
+     * records are transactional, but INs are not.
+     * @param marshallOutsideLatch true if this type of log entry may be
+     * serialized outside the log write latch. This is true of the majority of
+     * types. Certain types like the FileSummaryLN rely on the log write latch
+     * to enforce serial semantics.
+     * @param replicationPossible true if this type of log entry can be shared
+     * with a replication group.
+     */
+    private LogEntryType(byte typeNum,
+                         String displayName,
+                         LogEntry logEntry,
+                         Txnal isTransactional,
+                         Marshall marshallBehavior,
+                         Replicable replicationPossible) {
+
+        this.typeNum = typeNum;
+        this.displayName = displayName;
+        this.logEntry = logEntry;
+        this.isTransactional = isTransactional;
+        this.marshallBehavior = marshallBehavior;
+        this.replicationPossible = replicationPossible;
+        logEntry.setLogType(this);
+        LOG_TYPES[typeNum - 1] = this;
+    }
+
+    public boolean isNodeType() {
+        return (typeNum <= MAX_NODE_TYPE_NUM);
+    }
+
+    /**
+     * @return the static version of this type
+     */
+    public static LogEntryType findType(byte typeNum) {
+        if (typeNum <= 0 || typeNum > MAX_TYPE_NUM) {
+            return null;
+        }
+        return LOG_TYPES[typeNum - 1];
+    }
+
+    /**
+     * Get a copy of all types for unit testing.
+     */
+    public static Set<LogEntryType> getAllTypes() {
+        HashSet<LogEntryType> ret = new HashSet<LogEntryType>();
+
+        for (int i = 0; i < MAX_TYPE_NUM; i++) {
+            ret.add(LOG_TYPES[i]);
+        }
+        return ret;
+    }
+
+    /**
+     * @return the log entry type owned by the shared, static version
+     */
+    public LogEntry getSharedLogEntry() {
+        return logEntry;
+    }
+
+    /**
+     * @return a clone of the log entry type for a given log type.
+     */
+    public LogEntry getNewLogEntry()
+        throws DatabaseException {
+
+        try {
+            return (LogEntry) logEntry.clone();
+        } catch (CloneNotSupportedException e) {
+            throw new DatabaseException(e);
+        }
+    }
+
+    public byte getTypeNum() {
+        return typeNum;
+    }
+
+    /**
+     * @return true if type number is valid.
+     */
+    static boolean isValidType(byte typeNum) {
+        return typeNum > 0 && typeNum <= MAX_TYPE_NUM;
+    }
+
+    public String toStringNoVersion() {
+	return displayName;
+    }
+
+    @Override
+    public String toString() {
+        return displayName;
+    }
+
+    /**
+     * Check for equality without making a new object.
+     */
+    public boolean equalsType(byte typeNum) {
+        return (this.typeNum == typeNum);
+    }
+
+    /*
+     * Override Object.equals. Ignore provisional bit when checking for
+     * equality.
+     */
+    @Override
+    public boolean equals(Object obj) {
+        // Same instance?
+        if (this == obj) {
+            return true;
+        }
+
+        // Is it the right type of object?
+        if (!(obj instanceof LogEntryType)) {
+            return false;
+        }
+
+        return typeNum == ((LogEntryType) obj).typeNum;
+    }
+
+    /**
+     * This is used as a hash key.
+     */
+    @Override
+    public int hashCode() {
+        return typeNum;
+    }
+    static enum Txnal {
+        TXNAL(true),
+        NON_TXNAL(false);
+
+        private final boolean isTxnal;
+
+        Txnal(boolean isTxnal) {
+            this.isTxnal = isTxnal;
+        }
+
+        boolean isTransactional() {
+            return isTxnal;
+        }
+    }
+    /**
+     * Return true if this log entry has transactional information in it,
+     * like a commit or abort record, or a transactional LN.
+     */
+    public boolean isTransactional() {
+        return isTransactional.isTransactional();
+    }
+
+    static enum Marshall {
+        OUTSIDE_LATCH(true),
+        INSIDE_LATCH(false);
+
+        private final boolean marshallOutsideLatch;
+        
+        Marshall(boolean marshallOutsideLatch) {
+            this.marshallOutsideLatch = marshallOutsideLatch;
+        }
+
+        boolean marshallOutsideLatch() {
+            return marshallOutsideLatch;
+        }
+    }
+
+    /**
+     * Return true if this log entry should be marshalled into a buffer outside
+     * the log write latch. Currently, only the FileSummaryLN and MapLN (which
+     * contains DbFileSummary objects) need to be logged inside the log write
+     * latch.
+     */
+    public boolean marshallOutsideLatch() {
+        return marshallBehavior.marshallOutsideLatch();
+    }
+
+    /* 
+     * Indicates whether this type of log entry is shared in a replicated
+     * environment or not, and whether it can be used as a replication
+     * matchpoint.
+     */
+    static enum Replicable {
+        REPLICABLE_MATCH(true, true),
+        REPLICABLE_NO_MATCH(true, false),
+        LOCAL(false, false);
+
+        private final boolean isReplicable;
+        private final boolean isMatchable;
+        
+        Replicable(boolean isReplicable, boolean isMatchable) {
+            this.isReplicable = isReplicable;
+            this.isMatchable = isMatchable;
+        }
+
+        boolean isReplicable() {
+            return isReplicable;
+        }
+        
+        boolean isMatchable() {
+            return isMatchable;
+        }
+    }
+        
+    /**
+     * Return true if this type of log entry can be part of the replication
+     * stream. For example, INs can never be replicated, while LNs are
+     * replicated only if their owning database is replicated.
+     */
+    public boolean isReplicationPossible() {
+        return replicationPossible.isReplicable();
+    }
+
+    /**
+     * Return true if this type of log entry can serve as the synchronization
+     * matchpoint for the replication stream. That generally means that this
+     * log entry contains an replication node id.
+     */
+    public boolean isSyncPoint() {
+        return replicationPossible.isMatchable();
+    }
+
+    /**
+     * Return true if this type of log entry can serve as the synchronization
+     * matchpoint for the replication stream. 
+     */
+    public static boolean isSyncPoint(byte entryType) {
+        return findType(entryType).isSyncPoint();
+    }
+}
diff --git a/src/com/sleepycat/je/log/LogException.java b/src/com/sleepycat/je/log/LogException.java
new file mode 100644
index 0000000000000000000000000000000000000000..19aa21d692dddafcc7b062d0eae5ed0ef531edba
--- /dev/null
+++ b/src/com/sleepycat/je/log/LogException.java
@@ -0,0 +1,25 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LogException.java,v 1.16.2.2 2010/01/04 15:30:30 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import com.sleepycat.je.DatabaseException;
+
+/**
+ * Configuration related exceptions.
+ */
+public class LogException extends DatabaseException {
+    public LogException(String message) {
+	super(message);
+    }
+
+    public LogException(String message, Exception e) {
+	super(message, e);
+    }
+}
+
diff --git a/src/com/sleepycat/je/log/LogFileNotFoundException.java b/src/com/sleepycat/je/log/LogFileNotFoundException.java
new file mode 100644
index 0000000000000000000000000000000000000000..0552b735796c5b61c7582f671d139d970765c68d
--- /dev/null
+++ b/src/com/sleepycat/je/log/LogFileNotFoundException.java
@@ -0,0 +1,20 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LogFileNotFoundException.java,v 1.13.2.2 2010/01/04 15:30:30 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+/**
+ * Log file doesn't exist.
+ */
+public class LogFileNotFoundException extends LogException {
+
+    public LogFileNotFoundException(String message) {
+	super(message);
+    }
+}
+
diff --git a/src/com/sleepycat/je/log/LogItem.java b/src/com/sleepycat/je/log/LogItem.java
new file mode 100644
index 0000000000000000000000000000000000000000..12cb590c45c5b4ca765f276405769b773802623d
--- /dev/null
+++ b/src/com/sleepycat/je/log/LogItem.java
@@ -0,0 +1,64 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LogItem.java,v 1.1.2.2 2010/01/04 15:30:30 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.nio.ByteBuffer;
+
+import com.sleepycat.je.log.entry.LogEntry;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * Item parameters that apply to a single logged item.  Passed to LogManager
+ * log methods and to beforeLog and afterLog methods.
+ */
+public class LogItem {
+
+    /**
+     * Object to be marshaled and logged.
+     *
+     * Set by caller or beforeLog method.
+     */
+    public LogEntry entry = null;
+
+    /**
+     * The previous version of the node to be counted as obsolete, or NULL_LSN
+     * if the entry is not a node or has no old LSN.
+     *
+     * Set by caller or beforeLog method.
+     */
+    public long oldLsn = DbLsn.NULL_LSN;
+
+    /**
+     * LSN of the new log entry.  Is NULL_LSN if a BINDelta is logged.  If
+     * not NULL_LSN for a tree node, is typically used to update the slot in
+     * the parent IN.
+     *
+     * Set by log or afterLog method.
+     */
+    public long newLsn = DbLsn.NULL_LSN;
+
+    /**
+     * Whether the logged entry should be processed during recovery.
+     *
+     * Set by caller or beforeLog method.
+     */
+    public Provisional provisional = null;
+
+    /**
+     * Whether the logged entry should be replicated.
+     *
+     * Set by caller or beforeLog method.
+     */
+    public ReplicationContext repContext = null;
+
+    /* Fields used internally by log method. */
+    LogEntryHeader header = null;
+    ByteBuffer buffer = null;
+    int oldSize = 0;
+}
diff --git a/src/com/sleepycat/je/log/LogManager.java b/src/com/sleepycat/je/log/LogManager.java
new file mode 100644
index 0000000000000000000000000000000000000000..90cbbe8ce1c1df2b6c083316ccee0316fdb981e0
--- /dev/null
+++ b/src/com/sleepycat/je/log/LogManager.java
@@ -0,0 +1,1079 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LogManager.java,v 1.203.2.4 2010/02/08 17:19:42 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.BufferOverflowException;
+import java.nio.ByteBuffer;
+import java.nio.channels.ClosedChannelException;
+import java.util.List;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.RunRecoveryException;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.cleaner.LocalUtilizationTracker;
+import com.sleepycat.je.cleaner.TrackedFileSummary;
+import com.sleepycat.je.cleaner.UtilizationTracker;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.DbConfigManager;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.latch.Latch;
+import com.sleepycat.je.log.entry.LogEntry;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.TestHook;
+import com.sleepycat.je.utilint.Tracer;
+
+/**
+ * The LogManager supports reading and writing to the JE log.
+ */
+public abstract class LogManager {
+
+    /* No-op loggable object. */
+    private static final String DEBUG_NAME = LogManager.class.getName();
+
+    protected LogBufferPool logBufferPool; // log buffers
+    protected Latch logWriteLatch;           // synchronizes log writes
+    private boolean doChecksumOnRead;      // if true, do checksum on read
+    private FileManager fileManager;       // access to files
+    protected EnvironmentImpl envImpl;
+    private boolean readOnly;
+    private int readBufferSize; // how many bytes to read when faulting in.
+    /* The last LSN in the log during recovery. */
+    private long lastLsnAtRecovery = DbLsn.NULL_LSN;
+
+    /* Stats */
+
+    /*
+     * Number of times we have to repeat a read when we fault in an object
+     * because the initial read was too small.
+     */
+    private int nRepeatFaultReads;
+
+    /*
+     * Number of times we have to use the temporary marshalling buffer to
+     * write to the log.
+     */
+    private long nTempBufferWrites;
+
+    /* For unit tests */
+    private TestHook readHook; // used for generating exceptions on log reads
+
+    /**
+     * There is a single log manager per database environment.
+     */
+    public LogManager(EnvironmentImpl envImpl,
+                      boolean readOnly)
+        throws DatabaseException {
+
+        /* Set up log buffers. */
+        this.envImpl = envImpl;
+        this.fileManager = envImpl.getFileManager();
+        DbConfigManager configManager = envImpl.getConfigManager();
+        this.readOnly = readOnly;
+        logBufferPool = new LogBufferPool(fileManager, envImpl);
+
+        /* See if we're configured to do a checksum when reading in objects. */
+        doChecksumOnRead =
+            configManager.getBoolean(EnvironmentParams.LOG_CHECKSUM_READ);
+
+        logWriteLatch = new Latch(DEBUG_NAME);
+        readBufferSize =
+            configManager.getInt(EnvironmentParams.LOG_FAULT_READ_SIZE);
+    }
+
+    public boolean getChecksumOnRead() {
+        return doChecksumOnRead;
+    }
+
+    public long getLastLsnAtRecovery() {
+        return lastLsnAtRecovery;
+    }
+
+    public void setLastLsnAtRecovery(long lastLsnAtRecovery) {
+        this.lastLsnAtRecovery = lastLsnAtRecovery;
+    }
+
+    /**
+     * Reset the pool when the cache is resized.  This method is called after
+     * the memory budget has been calculated.
+     */
+    public void resetPool(DbConfigManager configManager)
+        throws DatabaseException {
+
+        logBufferPool.reset(configManager);
+    }
+
+    /*
+     * Writing to the log
+     */
+
+    /**
+     * Log this single object and force a write of the log files.
+     * @param entry object to be logged
+     * @param fsyncRequired if true, log files should also be fsynced.
+     * @return LSN of the new log entry
+     */
+    public long logForceFlush(LogEntry entry,
+                              boolean fsyncRequired,
+                              ReplicationContext repContext)
+        throws DatabaseException {
+
+        return log(entry,
+                   Provisional.NO,
+                   true,           // flush required
+                   fsyncRequired,
+                   false,          // forceNewLogFile
+                   false,          // backgroundIO
+                   DbLsn.NULL_LSN, // old lsn
+                   null,           // nodeDb
+                   repContext);    // repContext
+    }
+
+    /**
+     * Log this single object and force a flip of the log files.
+     * @param entry object to be logged
+     * @return LSN of the new log entry
+     */
+    public long logForceFlip(LogEntry entry)
+        throws DatabaseException {
+
+        return log(entry,
+                   Provisional.NO,
+                   true,           // flush required
+                   false,          // fsync required
+                   true,           // forceNewLogFile
+                   false,          // backgroundIO
+                   DbLsn.NULL_LSN, // old lsn
+                   null,           // nodeDb
+                   ReplicationContext.NO_REPLICATE);
+    }
+
+    /**
+     * Write a log entry.
+     * @param entry object to be logged
+     * @return LSN of the new log entry
+     */
+    public long log(LogEntry entry, ReplicationContext repContext)
+        throws DatabaseException {
+
+        return log(entry,
+                   Provisional.NO,
+                   false,           // flush required
+                   false,           // fsync required
+                   false,           // forceNewLogFile
+                   false,           // backgroundIO
+                   DbLsn.NULL_LSN,  // old lsn
+                   null,            // nodeDb
+                   repContext);
+    }
+
+    /**
+     * Write a log entry.
+     * @param entry object to be logged
+     * @param isProvisional true if this entry should not be read during
+     * recovery.
+     * @param backgroundIO if true, sleep when the backgroundIOLimit is
+     * exceeded.
+     * @param oldNodeLsn is the previous version of the node to be counted as
+     * obsolete, or NULL_LSN if the entry is not a node or has no old LSN.
+     * @param nodeDb database of the node, or null if entry is not a node.
+     * @return LSN of the new log entry
+     */
+    public long log(LogEntry entry,
+                    boolean isProvisional,
+                    boolean backgroundIO,
+                    long oldNodeLsn,
+                    DatabaseImpl nodeDb,
+                    ReplicationContext repContext)
+        throws DatabaseException {
+
+        return log(entry,
+                   isProvisional ? Provisional.YES : Provisional.NO,
+                   false,          // flush required
+                   false,          // fsync required
+                   false,          // forceNewLogFile
+                   backgroundIO,
+                   oldNodeLsn,
+                   nodeDb,
+                   repContext);
+    }
+
+    /**
+     * Write a log entry.
+     * @param entry object to be logged
+     * @param provisional whether this entry should be processed during
+     * recovery.
+     * @param backgroundIO if true, sleep when the backgroundIOLimit is
+     * exceeded.
+     * @param oldNodeLsn is the previous version of the node to be counted as
+     * obsolete, or NULL_LSN if the entry is not a node or has no old LSN.
+     * @param nodeDb database of the node, or null if entry is not a node.
+     * @return LSN of the new log entry
+     */
+    public long log(LogEntry entry,
+                    Provisional provisional,
+                    boolean backgroundIO,
+                    long oldNodeLsn,
+                    DatabaseImpl nodeDb,
+                    ReplicationContext repContext)
+        throws DatabaseException {
+
+        return log(entry,
+                   provisional,
+                   false,          // flush required
+                   false,          // fsync required
+                   false,          // forceNewLogFile
+                   backgroundIO,
+                   oldNodeLsn,
+                   nodeDb,
+                   repContext);
+    }
+
+    /**
+     * Translates individual log params to LogItem and LogContext fields.
+     */
+    private long log(LogEntry entry,
+                     Provisional provisional,
+                     boolean flushRequired,
+                     boolean fsyncRequired,
+                     boolean forceNewLogFile,
+                     boolean backgroundIO,
+                     long oldNodeLsn,
+                     DatabaseImpl nodeDb,
+                     ReplicationContext repContext)
+        throws DatabaseException {
+
+        LogItem item = new LogItem();
+        item.entry = entry;
+        item.provisional = provisional;
+        item.oldLsn = oldNodeLsn;
+        item.repContext = repContext;
+
+        LogContext context = new LogContext();
+        context.flushRequired = flushRequired;
+        context.fsyncRequired = fsyncRequired;
+        context.forceNewLogFile = forceNewLogFile;
+        context.backgroundIO = backgroundIO;
+        context.nodeDb = nodeDb;
+
+        log(item, context);
+
+        return item.newLsn;
+    }
+
+    /**
+     * Convenience method for logging a single entry.
+     */
+    public void log(LogItem item, LogContext context)
+        throws DatabaseException {
+
+        multiLog(new LogItem[] { item }, context);
+    }
+
+    public void multiLog(LogItem[] itemArray, LogContext context)
+        throws DatabaseException {
+
+        if (readOnly || itemArray.length == 0) {
+            return;
+        }
+
+        try {
+            for (LogItem item : itemArray) {
+                LogEntry logEntry = item.entry;
+
+                /*
+                 * Get the old size before marshaling, which updates it.
+                 * Holding the log write latch is not necessary, because the
+                 * parent IN latch prevents other threads from logging this
+                 * node.
+                 */
+                item.oldSize = logEntry.getLastLoggedSize();
+
+                /*
+                 * If possible, marshall this entry outside the log write latch
+                 * to allow greater concurrency by shortening the write
+                 * critical section.  Note that the header may only be created
+                 * during marshalling because it calls entry.getSize().
+                 */
+                if (logEntry.getLogType().marshallOutsideLatch()) {
+                    item.header = new LogEntryHeader
+                        (logEntry, item.provisional, item.repContext);
+                    item.buffer = marshallIntoBuffer(item.header, logEntry);
+                }
+            }
+
+            /*
+             * Perform the serial portion of the log operation, including
+             * appending to the log buffer.
+             */
+            serialLog(itemArray, context);
+
+        } catch (BufferOverflowException e) {
+
+            /*
+             * A BufferOverflowException may be seen when a thread is
+             * interrupted in the middle of the log and the nio direct buffer
+             * is mangled is some way by the NIO libraries. JE applications
+             * should refrain from using thread interrupt as a thread
+             * communications mechanism because nio behavior in the face of
+             * interrupts is uncertain. See SR [#10463].
+             *
+             * One way or another, this type of io exception leaves us in an
+             * unworkable state, so throw a run recovery exception.
+             */
+            throw new RunRecoveryException(envImpl, e);
+        } catch (IOException e) {
+
+            /*
+             * Other IOExceptions, such as out of disk conditions, should
+             * notify the application but leave the environment in workable
+             * condition.
+             */
+            throw new DatabaseException(Tracer.getStackTrace(e), e);
+        }
+
+        /*
+         * Finish up business outside of the log write latch critical section.
+         */
+
+        /*
+         * If this logged object needs to be fsynced, do so now using the group
+         * commit mechanism.
+         */
+        if (context.fsyncRequired) {
+            fileManager.groupSync();
+        }
+
+        for (LogItem item : itemArray) {
+
+            /*
+             * We've logged this log entry from the replication stream. Let
+             * the Replicator know, so this node can create a VLSN->LSN
+             * mapping.  Do this before the ckpt so we have a better chance
+             * of writing this mapping to disk.
+             */
+            if (item.repContext.inReplicationStream()) {
+                assert (item.header.getVLSN() != null) :
+                    "Unexpected null VLSN: " + item.header + " " +
+                    item.repContext;
+                envImpl.getReplicator().registerVLSN(item.newLsn, item.header);
+            }
+        }
+
+        /*
+         * Periodically, as a function of how much data is written, ask the
+         * checkpointer or the cleaner to wake up.
+         */
+        envImpl.getCheckpointer().wakeupAfterWrite();
+        if (context.wakeupCleaner) {
+            envImpl.getUtilizationTracker().activateCleaner();
+        }
+
+        /* Update background writes. */
+        if (context.backgroundIO) {
+            envImpl.updateBackgroundWrites
+                (context.totalNewSize, logBufferPool.getLogBufferSize());
+        }
+    }
+
+    /**
+     * Log one or more items while latched or synchronized in order to
+     * serialize log output.  Implementations of this method call
+     * serialLogInternal.
+     */
+    abstract void serialLog(LogItem[] itemArray, LogContext context)
+        throws IOException, DatabaseException;
+
+    /**
+     * Called within the log write critical section.
+     */
+    void serialLogInternal(LogItem[] itemArray, LogContext context)
+        throws IOException, DatabaseException {
+
+        UtilizationTracker tracker = envImpl.getUtilizationTracker();
+        LogItem firstItem = itemArray[0];
+        LogItem lastItem = itemArray[itemArray.length - 1];
+
+        for (LogItem item : itemArray) {
+            boolean marshallOutsideLatch = (item.buffer != null);
+            boolean isFirstItem = (item == firstItem);
+            boolean isLastItem = (item == lastItem);
+
+            /*
+             * Do obsolete tracking before marshalling a FileSummaryLN into the
+             * log buffer so that a FileSummaryLN counts itself.
+             * countObsoleteNode must be called before computing the entry
+             * size, since it can change the size of a FileSummaryLN entry that
+             * we're logging
+             */
+            LogEntryType entryType = item.entry.getLogType();
+            if (item.oldLsn != DbLsn.NULL_LSN) {
+                tracker.countObsoleteNode
+                    (item.oldLsn, entryType, item.oldSize, context.nodeDb);
+            }
+
+            /*
+             * If an entry must be protected within the log write latch for
+             * marshalling, take care to also calculate its size in the
+             * protected section. Note that we have to get the size *before*
+             * marshalling so that the currentLsn and size are correct for
+             * utilization tracking.
+             */
+            int entrySize;
+            if (marshallOutsideLatch) {
+                entrySize = item.buffer.limit();
+                assert item.header != null;
+            } else {
+                assert item.header == null;
+                item.header = new LogEntryHeader
+                    (item.entry, item.provisional, item.repContext);
+                entrySize = item.header.getSize() + item.header.getItemSize();
+            }
+
+            /*
+             * Get the next free slot in the log, under the log write latch.
+             * Bump the LSN values, which gives us a valid previous pointer,
+             * which is part of the log entry header. That's why doing the
+             * checksum must be in the log write latch -- we need to bump the
+             * LSN first, and bumping the LSN must be done within the log write
+             * latch.
+             */
+            if (isFirstItem && context.forceNewLogFile) {
+                fileManager.forceNewLogFile();
+            }
+
+            boolean flippedFile = fileManager.bumpLsn(entrySize);
+            long currentLsn = DbLsn.NULL_LSN;
+            boolean usedTemporaryBuffer = false;
+            boolean success = false;
+            try {
+                currentLsn = fileManager.getLastUsedLsn();
+
+                /*
+                 * countNewLogEntry and countObsoleteNodeInexact cannot change
+                 * a FileSummaryLN size, so they are safe to call after
+                 * getSizeForWrite.
+                 */
+                if (tracker.countNewLogEntry
+                    (currentLsn, entryType, entrySize, context.nodeDb)) {
+                    context.wakeupCleaner = true;
+                }
+
+                /*
+                 * LN deletions are obsolete immediately.  Inexact counting is
+                 * used to save resources because the cleaner knows that all
+                 * deleted LNs are obsolete.
+                 */
+                if (item.entry.countAsObsoleteWhenLogged()) {
+                    tracker.countObsoleteNodeInexact
+                        (currentLsn, entryType, entrySize, context.nodeDb);
+                }
+
+                /*
+                 * This entry must be marshalled within the log write latch.
+                 */
+                if (!marshallOutsideLatch) {
+                    assert item.buffer == null;
+                    item.buffer = marshallIntoBuffer(item.header, item.entry);
+                }
+
+                /* Sanity check */
+                if (entrySize != item.buffer.limit()) {
+                    throw new DatabaseException(
+                     "Logged entry entrySize= " + entrySize +
+                     " but marshalledSize=" + item.buffer.limit() +
+                     " type=" + entryType + " currentLsn=" +
+                     DbLsn.getNoFormatString(currentLsn));
+                }
+
+                /*
+                 * Ask for a log buffer suitable for holding this new entry.
+                 * If the current log buffer is full, or if we flipped into a
+                 * new file, write it to disk and get a new, empty log buffer
+                 * to use. The returned buffer will be latched for write.
+                 */
+                LogBuffer useLogBuffer =
+                    logBufferPool.getWriteBuffer(entrySize, flippedFile);
+
+                /* Add checksum, prev offset, and VLSN to the entry. */
+                item.buffer = item.header.addPostMarshallingInfo
+                    (envImpl, item.buffer, fileManager.getPrevEntryOffset(),
+                     item.repContext);
+
+                /*
+                 * If the LogBufferPool buffer (useBuffer) doesn't have
+                 * sufficient space (since they're fixed size), just use the
+                 * temporary buffer and throw it away when we're done.  That
+                 * way we don't grow the LogBuffers in the pool permanently.
+                 * We risk an OOME on this temporary usage, but we'll risk it.
+                 * [#12674]
+                 */
+                useLogBuffer.latchForWrite();
+                try {
+                    ByteBuffer useBuffer = useLogBuffer.getDataBuffer();
+                    if (useBuffer.capacity() - useBuffer.position() <
+                        entrySize) {
+                        fileManager.writeLogBuffer
+                            (new LogBuffer(item.buffer, currentLsn));
+                        usedTemporaryBuffer = true;
+                        assert useBuffer.position() == 0;
+                        nTempBufferWrites++;
+                    } else {
+                        /* Copy marshalled object into write buffer. */
+                        useBuffer.put(item.buffer);
+                    }
+                } finally {
+                    useLogBuffer.release();
+                }
+
+                success = true;
+            } finally {
+                if (!success) {
+
+                    /*
+                     * The LSN pointer, log buffer position, and corresponding
+                     * file position march in lockstep.
+                     *
+                     * 1. We bump the LSN.
+                     * 2. We copy loggable entry into the log buffer.
+                     * 3. We may try to write the log buffer.
+                     *
+                     * If we've failed to put the entry into the log buffer
+                     * (2), we need to restore old LSN state so that the log
+                     * buffer doesn't have a hole. [SR #12638] If we fail after
+                     * (2), we don't need to restore state, because log buffers
+                     * will still match file positions.
+                     *
+                     * This assumes that the last possible activity was the
+                     * write of the log buffers.
+                     */
+                    fileManager.restoreLastPosition();
+
+                    /*
+                     * If the entry was not written to the log, it will not be
+                     * part of the replication stream, and we should reuse the
+                     * vlsn.
+                     */
+                    if (item.header.getVLSN() != null) {
+                        envImpl.getReplicator().decrementVLSN();
+                    }
+                }
+            }
+
+            /*
+             * Tell the log buffer pool that we finished the write.  Record the
+             * LSN against this logbuffer, and write the buffer to disk if
+             * needed.
+             */
+            if (!usedTemporaryBuffer) {
+                logBufferPool.writeCompleted
+                    (currentLsn, isLastItem && context.flushRequired);
+            }
+
+            /*
+             * If the txn is not null, the first entry is an LN. Update the txn
+             * with info about the latest LSN. Note that this has to happen
+             * within the log write latch.
+             */
+            item.entry.postLogWork(currentLsn);
+
+            item.newLsn = currentLsn;
+            context.totalNewSize += entrySize;
+        }
+    }
+
+    /**
+     * Serialize a loggable object into this buffer. (public for
+     * unit tests.
+     */
+    private ByteBuffer marshallIntoBuffer(LogEntryHeader header, LogEntry entry)
+        throws DatabaseException {
+
+        int entrySize = header.getSize() + header.getItemSize();
+
+        ByteBuffer destBuffer = ByteBuffer.allocate(entrySize);
+        header.writeToLog(destBuffer);
+
+        /* Put the entry in. */
+        entry.writeEntry(header, destBuffer);
+
+        /* Set the limit so it can be used as the size of the entry. */
+        destBuffer.flip();
+
+        return destBuffer;
+    }
+
+    /**
+     * Serialize a log entry into this buffer with proper entry header. Return
+     * it ready for a copy.
+     */
+    ByteBuffer putIntoBuffer(LogEntry entry,
+                             long prevLogEntryOffset)
+        throws DatabaseException {
+
+        LogEntryHeader header = new LogEntryHeader
+            (entry, Provisional.NO, ReplicationContext.NO_REPLICATE);
+
+        /* Assert that we're not in a situation demanding Txn mutex. */
+        assert entry.getLogType() != LogEntryType.LOG_LN_TRANSACTIONAL;
+
+        ByteBuffer destBuffer = marshallIntoBuffer(header, entry);
+
+        return header.addPostMarshallingInfo(envImpl,
+                                             destBuffer,
+                                             prevLogEntryOffset,
+                                             ReplicationContext.NO_REPLICATE);
+    }
+
+    /*
+     * Reading from the log.
+     */
+
+    /**
+     * Instantiate all the objects in the log entry at this LSN.
+     * @param lsn location of entry in log.
+     * @return log entry that embodies all the objects in the log entry.
+     */
+    public LogEntry getLogEntry(long lsn)
+        throws DatabaseException {
+
+        /*
+         * Fail loudly if the environment is invalid.  A RunRecoveryException
+         * must have occurred.
+         */
+        envImpl.checkIfInvalid();
+
+        /*
+         * Get a log source for the log entry which provides an abstraction
+         * that hides whether the entry is in a buffer or on disk. Will
+         * register as a reader for the buffer or the file, which will take a
+         * latch if necessary.
+         */
+        LogSource logSource = getLogSource(lsn);
+
+        /* Read the log entry from the log source. */
+        return getLogEntryFromLogSource(lsn, logSource);
+    }
+
+    LogEntry getLogEntry(long lsn, RandomAccessFile file)
+        throws DatabaseException {
+
+        return getLogEntryFromLogSource
+            (lsn, new FileSource(file, readBufferSize, fileManager,
+                                 DbLsn.getFileNumber(lsn)));
+    }
+
+    /**
+     * Instantiate all the objects in the log entry at this LSN. This will
+     * release the log source at the first opportunity.
+     *
+     * @param lsn location of entry in log
+     * @return log entry that embodies all the objects in the log entry
+     */
+    private LogEntry getLogEntryFromLogSource(long lsn,
+                                              LogSource logSource)
+        throws DatabaseException {
+
+        try {
+
+            /*
+             * Read the log entry header into a byte buffer. This assumes
+             * that the minimum size of this byte buffer (determined by
+             * je.log.faultReadSize) is always >= the maximum log entry header.
+             */
+            long fileOffset = DbLsn.getFileOffset(lsn);
+            ByteBuffer entryBuffer = logSource.getBytes(fileOffset);
+            assert ((entryBuffer.limit() - entryBuffer.position()) >=
+                    LogEntryHeader.MAX_HEADER_SIZE);
+
+            /* Read the header */
+            LogEntryHeader header =
+                new LogEntryHeader(envImpl,
+                                   entryBuffer,
+                                   false); //anticipateChecksumErrors
+            header.readVariablePortion(entryBuffer);
+
+            ChecksumValidator validator = null;
+            if (doChecksumOnRead) {
+                /* Add header to checksum bytes */
+                validator = new ChecksumValidator();
+                int headerSizeMinusChecksum = header.getSizeMinusChecksum();
+                int itemStart = entryBuffer.position();
+                entryBuffer.position(itemStart -
+                                     headerSizeMinusChecksum);
+                validator.update(envImpl,
+                                 entryBuffer,
+                                 headerSizeMinusChecksum,
+                                 false); // anticipateChecksumErrors
+                entryBuffer.position(itemStart);
+            }
+
+            /*
+             * Now that we know the size, read the rest of the entry
+             * if the first read didn't get enough.
+             */
+            int itemSize = header.getItemSize();
+            if (entryBuffer.remaining() < itemSize) {
+                entryBuffer = logSource.getBytes(fileOffset + header.getSize(),
+                                                 itemSize);
+                nRepeatFaultReads++;
+            }
+
+            /*
+             * Do entry validation. Run checksum before checking the entry
+             * type, it will be the more encompassing error.
+             */
+            if (doChecksumOnRead) {
+                /* Check the checksum first. */
+                validator.update(envImpl, entryBuffer, itemSize, false);
+                validator.validate(envImpl, header.getChecksum(), lsn);
+            }
+
+            assert LogEntryType.isValidType(header.getType()):
+                "Read non-valid log entry type: " + header.getType();
+
+            /* Read the entry. */
+            LogEntry logEntry =
+                LogEntryType.findType(header.getType()).getNewLogEntry();
+            logEntry.readEntry(header,
+                               entryBuffer,
+                               true);  // readFullItem
+
+            /* For testing only; generate a read io exception. */
+            if (readHook != null) {
+                readHook.doIOHook();
+            }
+
+            /*
+             * Done with the log source, release in the finally clause.  Note
+             * that the buffer we get back from logSource is just a duplicated
+             * buffer, where the position and state are copied but not the
+             * actual data. So we must not release the logSource until we are
+             * done marshalling the data from the buffer into the object
+             * itself.
+             */
+            return logEntry;
+        } catch (DbChecksumException e) {
+            /* Add information on location and type of log source. */
+            e.addErrorMessage(" lsn= " + DbLsn.getNoFormatString(lsn) +
+                              " logSource=" + logSource);
+            throw e;
+        } catch (DatabaseException e) {
+
+            /*
+             * Propagate DatabaseExceptions, we want to preserve any subtypes
+             * for downstream handling.
+             */
+            throw e;
+        } catch (ClosedChannelException e) {
+
+            /*
+             * The channel should never be closed. It may be closed because
+             * of an interrupt received by another thread. See SR [#10463]
+             */
+            throw new RunRecoveryException(envImpl,
+                                           "Channel closed, may be "+
+                                           "due to thread interrupt",
+                                           e);
+        } catch (Exception e) {
+            throw new DatabaseException(e);
+        } finally {
+            if (logSource != null) {
+                logSource.release();
+            }
+        }
+    }
+
+    /**
+     * Return a ByteBuffer holding the log entry at this LSN. The log entry
+     * must begin at position 0, to mimic the marshalledBuffer used in
+     * logInternal().
+     *
+     * @param lsn location of entry in log
+     * @return log entry that embodies all the objects in the log entry
+     */
+    public ByteBuffer getByteBufferFromLog(long lsn)
+        throws DatabaseException {
+
+        /*
+         * Fail loudly if the environment is invalid.  A RunRecoveryException
+         * must have occurred.
+         */
+        envImpl.checkIfInvalid();
+
+        /*
+         * Get a log source for the log entry which provides an abstraction
+         * that hides whether the entry is in a buffer or on disk. Will
+         * register as a reader for the buffer or the file, which will take a
+         * latch if necessary.
+         */
+        LogSource logSource = getLogSource(lsn);
+        ByteBuffer entryBuffer = null;
+        try {
+
+            /*
+             * Read the log entry header into a byte buffer. This assumes
+             * that the minimum size of this byte buffer (determined by
+             * je.log.faultReadSize) is always >= the maximum log entry header.
+             */
+            long fileOffset = DbLsn.getFileOffset(lsn);
+            entryBuffer = logSource.getBytes(fileOffset);
+            int startingPosition = entryBuffer.position();
+            int amountRemaining = entryBuffer.remaining();
+            assert (amountRemaining >= LogEntryHeader.MAX_HEADER_SIZE);
+
+            /* Read the header, find out how large this buffer needs to be */
+            LogEntryHeader header =
+                new LogEntryHeader(envImpl,
+                                   entryBuffer,
+                                   false); //anticipateChecksumErrors
+            int totalSize = header.getSize() + header.getItemSize();
+
+            /*
+             * Now that we know the size, read in the rest of the entry
+             * if the first read didn't get enough.
+             */
+            if (amountRemaining < totalSize) {
+                entryBuffer = logSource.getBytes(fileOffset, totalSize);
+                nRepeatFaultReads++;
+            }
+
+            /*
+             * The log entry must be positioned at the start of the returned
+             * buffer, to mimic the normal logging path.
+             */
+            entryBuffer.position(startingPosition);
+            ByteBuffer singleEntryBuffer = ByteBuffer.allocate(totalSize);
+            entryBuffer.limit(startingPosition + totalSize);
+            singleEntryBuffer.put(entryBuffer);
+            singleEntryBuffer.position(0);
+            return singleEntryBuffer;
+        } catch (DatabaseException e) {
+
+            /*
+             * Propagate DatabaseExceptions, we want to preserve any subtypes
+             * for downstream handling.
+             */
+            throw e;
+        } catch (ClosedChannelException e) {
+
+            /*
+             * The channel should never be closed. It may be closed because
+             * of an interrupt received by another thread. See SR [#10463]
+             */
+            throw new RunRecoveryException(envImpl,
+                                           "Channel closed, may be "+
+                                           "due to thread interrupt",
+                                           e);
+        } catch (Exception e) {
+            throw new DatabaseException(e);
+        } finally {
+            if (logSource != null) {
+                logSource.release();
+            }
+        }
+    }
+
+    /**
+     * Fault in the first object in the log entry log entry at this LSN.
+     * @param lsn location of object in log
+     * @return the object in the log
+     */
+    public Object get(long lsn)
+        throws DatabaseException {
+
+        LogEntry entry = getLogEntry(lsn);
+        return entry.getMainItem();
+    }
+
+    /**
+     * Find the LSN, whether in a file or still in the log buffers.
+     * Is public for unit testing.
+     */
+    public LogSource getLogSource(long lsn)
+        throws DatabaseException {
+
+        /*
+         * First look in log to see if this LSN is still in memory.
+         */
+        LogBuffer logBuffer = logBufferPool.getReadBuffer(lsn);
+
+        if (logBuffer == null) {
+            try {
+                /* Not in the in-memory log -- read it off disk. */
+                long fileNum = DbLsn.getFileNumber(lsn);
+                return new FileHandleSource
+                    (fileManager.getFileHandle(fileNum),
+                     readBufferSize, fileManager);
+            } catch (LogFileNotFoundException e) {
+                /* Add LSN to exception message. */
+                throw new LogFileNotFoundException
+                    (DbLsn.getNoFormatString(lsn) + ' ' + e.getMessage());
+            }
+        } else {
+            return logBuffer;
+        }
+    }
+
+    /**
+     * Flush all log entries, fsync the log file.
+     */
+    public void flush()
+        throws DatabaseException {
+
+        if (!readOnly) {
+            flushInternal();
+            fileManager.syncLogEnd();
+        }
+    }
+
+    /**
+     * May be used to avoid sync to speed unit tests.
+     */
+    public void flushNoSync()
+        throws DatabaseException {
+
+        if (!readOnly) {
+            flushInternal();
+        }
+    }
+
+    abstract void flushInternal()
+        throws LogException, DatabaseException;
+
+
+    public void loadStats(StatsConfig config, EnvironmentStats stats)
+        throws DatabaseException {
+
+        stats.setNRepeatFaultReads(nRepeatFaultReads);
+        stats.setNTempBufferWrites(nTempBufferWrites);
+        if (config.getClear()) {
+            nRepeatFaultReads = 0;
+            nTempBufferWrites = 0;
+        }
+
+        logBufferPool.loadStats(config, stats);
+        fileManager.loadStats(config, stats);
+	if (!config.getFast()) {
+	    loadEndOfLogStat(stats);
+	}
+    }
+
+    /**
+     * Returns a tracked summary for the given file which will not be flushed.
+     * Used for watching changes that occur while a file is being cleaned.
+     */
+    public abstract TrackedFileSummary getUnflushableTrackedSummary(long file)
+        throws DatabaseException;
+
+    TrackedFileSummary getUnflushableTrackedSummaryInternal(long file)
+        throws DatabaseException {
+
+        return envImpl.getUtilizationTracker().
+                       getUnflushableTrackedSummary(file);
+    }
+
+    /**
+     * Removes the tracked summary for the given file.
+     */
+    abstract public void removeTrackedFile(TrackedFileSummary tfs)
+        throws DatabaseException;
+
+    protected void removeTrackedFileInternal(TrackedFileSummary tfs) {
+        tfs.reset();
+    }
+
+    /**
+     * Count node as obsolete under the log write latch.  This is done here
+     * because the log write latch is managed here, and all utilization
+     * counting must be performed under the log write latch.
+     */
+    public abstract void countObsoleteNode(long lsn,
+                                           LogEntryType type,
+                                           int size,
+                                           DatabaseImpl nodeDb)
+        throws DatabaseException;
+
+    void countObsoleteNodeInternal(long lsn,
+                                   LogEntryType type,
+                                   int size,
+                                   DatabaseImpl nodeDb)
+        throws DatabaseException {
+
+        UtilizationTracker tracker = envImpl.getUtilizationTracker();
+        tracker.countObsoleteNode(lsn, type, size, nodeDb);
+    }
+
+    /**
+     * @see LocalUtilizationTracker#transferToUtilizationTracker
+     */
+    public abstract void transferToUtilizationTracker(LocalUtilizationTracker
+                                                      localTracker)
+        throws DatabaseException;
+
+    void transferToUtilizationTrackerInternal(LocalUtilizationTracker
+                                              localTracker)
+        throws DatabaseException {
+
+        UtilizationTracker tracker = envImpl.getUtilizationTracker();
+        localTracker.transferToUtilizationTracker(tracker);
+    }
+
+    /**
+     * Counts the given obsolete IN LSNs under the log write latch.
+     */
+    public abstract void countObsoleteINs(List<Long> lsnList,
+                                          DatabaseImpl nodeDb)
+        throws DatabaseException;
+
+    void countObsoleteINsInternal(List<Long> lsnList, DatabaseImpl nodeDb)
+        throws DatabaseException {
+
+        UtilizationTracker tracker = envImpl.getUtilizationTracker();
+
+        for (int i = 0; i < lsnList.size(); i += 1) {
+            Long lsn = lsnList.get(i);
+            tracker.countObsoleteNode
+                (lsn.longValue(), LogEntryType.LOG_IN, 0, nodeDb);
+        }
+    }
+
+    /**
+     * @see DatabaseImpl#countObsoleteDb
+     */
+    public abstract void countObsoleteDb(DatabaseImpl db)
+        throws DatabaseException;
+
+    void countObsoleteDbInternal(DatabaseImpl db) {
+        db.countObsoleteDb(envImpl.getUtilizationTracker(),
+                           DbLsn.NULL_LSN /*mapLnLsn*/);
+    }
+
+    public abstract boolean removeDbFileSummary(DatabaseImpl db, Long fileNum)
+        throws DatabaseException;
+
+    boolean removeDbFileSummaryInternal(DatabaseImpl db, Long fileNum) {
+        return db.removeDbFileSummary(fileNum);
+    }
+
+    public abstract void loadEndOfLogStat(EnvironmentStats stats)
+	throws DatabaseException;
+
+    void loadEndOfLogStatInternal(EnvironmentStats stats) {
+	stats.setEndOfLog(fileManager.getLastUsedLsn());
+    }
+
+    /* For unit testing only. */
+    public void setReadHook(TestHook hook) {
+        readHook = hook;
+    }
+}
diff --git a/src/com/sleepycat/je/log/LogSource.java b/src/com/sleepycat/je/log/LogSource.java
new file mode 100644
index 0000000000000000000000000000000000000000..bea53a95c2566326147ed4a765868bdbc69c76d0
--- /dev/null
+++ b/src/com/sleepycat/je/log/LogSource.java
@@ -0,0 +1,39 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LogSource.java,v 1.26.2.2 2010/01/04 15:30:30 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import com.sleepycat.je.DatabaseException;
+
+/**
+ * A class that implements LogSource can return portions of the log.
+ * Is public for unit testing.
+ */
+public interface LogSource {
+
+    /**
+     * We're done with this log source.
+     */
+    void release() throws DatabaseException;
+
+    /**
+     * Fill the destination byte array with bytes. The offset indicates the
+     * absolute log file position.
+     */
+    ByteBuffer getBytes(long fileOffset) throws DatabaseException, IOException;
+
+    /**
+     * Fill the destination byte array with the requested number of bytes.  The
+     * offset indicates the absolute position in the log file.
+     */
+    ByteBuffer getBytes(long fileOffset, int numBytes)
+        throws DatabaseException, IOException;
+}
diff --git a/src/com/sleepycat/je/log/LogUtils.java b/src/com/sleepycat/je/log/LogUtils.java
new file mode 100644
index 0000000000000000000000000000000000000000..9f30af0681a6504f36bf37493784f1f7db9ef453
--- /dev/null
+++ b/src/com/sleepycat/je/log/LogUtils.java
@@ -0,0 +1,682 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LogUtils.java,v 1.62.2.2 2010/01/04 15:30:30 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.nio.ByteBuffer;
+import java.sql.Timestamp;
+
+import javax.transaction.xa.Xid;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.entry.LogEntry;
+import com.sleepycat.util.PackedInteger;
+
+/**
+ * This class holds convenience methods for marshalling internal JE data to and
+ * from the log.
+ */
+public class LogUtils {
+    /* Storage sizes for int, long in log. */
+    public static final int SHORT_BYTES = 2;
+    public static final int INT_BYTES = 4;
+    public static final int LONG_BYTES = 8;
+    public static final int UNSIGNED_INT_BYTES = 4;
+
+    private static final boolean DEBUG = false;
+
+    /*
+     * We can return the same byte[] for 0 length arrays.
+     */
+    public static final byte[] ZERO_LENGTH_BYTE_ARRAY = new byte[0];
+
+    /**
+     * Marshall a long into the next 4 bytes in this buffer. Necessary when the
+     * long is used to hold an unsigned int.
+     */
+    public static void writeUnsignedInt(ByteBuffer buf, long value) {
+        buf.put((byte) (value >>> 0));
+        buf.put((byte) (value >>> 8));
+        buf.put((byte) (value >>> 16));
+        buf.put((byte) (value >>> 24));
+    }
+
+    /**
+     * Unmarshall the next four bytes which hold an unsigned int into a long.
+     */
+    public static long readUnsignedInt(ByteBuffer buf) {
+	long ret = (buf.get() & 0xFFL) << 0;
+	ret += (buf.get() & 0xFFL) << 8;
+	ret += (buf.get() & 0xFFL) << 16;
+	ret += (buf.get() & 0xFFL) << 24;
+	return ret;
+    }
+
+    /*
+     * Marshall objects.
+     */
+
+    /**
+     * Write a short into the log.
+     */
+    public static void writeShort(ByteBuffer logBuf, short i) {
+        byte b = (byte) ((i >> 0) & 0xff);
+        logBuf.put(b);
+        b = (byte) ((i >> 8) & 0xff);
+        logBuf.put(b);
+    }
+
+    /**
+     * Read a short from the log.
+     */
+    public static short readShort(ByteBuffer logBuf) {
+        return (short) (((logBuf.get() & 0xFF) << 0) +
+                        ((logBuf.get() & 0xFF) << 8));
+    }
+
+    /**
+     * Read an int from the log in either packed or unpacked format.
+     */
+    public static int readInt(ByteBuffer logBuf, boolean unpacked) {
+        if (unpacked) {
+            return readInt(logBuf);
+        } else {
+            return readPackedInt(logBuf);
+        }
+    }
+
+    /**
+     * Write an int into the log.
+     */
+    public static void writeInt(ByteBuffer logBuf, int i) {
+        byte b = (byte) ((i >> 0) & 0xff);
+        logBuf.put(b);
+        b = (byte) ((i >> 8) & 0xff);
+        logBuf.put(b);
+        b = (byte) ((i >> 16) & 0xff);
+        logBuf.put(b);
+        b = (byte) ((i >> 24) & 0xff);
+        logBuf.put(b);
+    }
+
+    /**
+     * Read a int from the log.
+     */
+    public static int readInt(ByteBuffer logBuf) {
+	int ret = (logBuf.get() & 0xFF) << 0;
+	ret += (logBuf.get() & 0xFF) << 8;
+	ret += (logBuf.get() & 0xFF) << 16;
+	ret += (logBuf.get() & 0xFF) << 24;
+	return ret;
+    }
+
+    /**
+     * @return log storage size for an int.
+     */
+    public static int getIntLogSize() {
+        return INT_BYTES;
+    }
+
+    /**
+     * Write a packed int into the log.
+     */
+    public static void writePackedInt(ByteBuffer logBuf, int i) {
+        if (logBuf.hasArray()) {
+            int off = logBuf.arrayOffset();
+            int newPos =
+                PackedInteger.writeInt(logBuf.array(),
+                                       logBuf.position() + off, i);
+            logBuf.position(newPos - off);
+        } else {
+            byte[] a = new byte[PackedInteger.MAX_LENGTH];
+            int len = PackedInteger.writeInt(a, 0, i);
+            logBuf.put(a, 0, len);
+        }
+    }
+
+    /**
+     * Read a packed int from the log.
+     */
+    public static int readPackedInt(ByteBuffer logBuf) {
+        int val;
+        if (logBuf.hasArray()) {
+            byte a[] = logBuf.array();
+            int oldPos = logBuf.position();
+            int off = logBuf.arrayOffset() + oldPos;
+            int len = PackedInteger.getReadIntLength(a, off);
+            val = PackedInteger.readInt(a, off);
+            logBuf.position(oldPos + len);
+        } else {
+            byte[] a = new byte[PackedInteger.MAX_LENGTH];
+            a[0] = logBuf.get();
+            int len = PackedInteger.getReadIntLength(a, 0);
+            logBuf.get(a, 1, len - 1);
+            val = PackedInteger.readInt(a, 0);
+        }
+        return val;
+    }
+
+    /**
+     * @return log storage size for a packed int.
+     */
+    public static int getPackedIntLogSize(int i) {
+        return PackedInteger.getWriteIntLength(i);
+    }
+
+    /**
+     * Write an int into the log in MSB order.  Used for ordered keys.
+     */
+    public static void writeIntMSB(ByteBuffer logBuf, int i) {
+        byte b = (byte) ((i >> 24) & 0xff);
+        logBuf.put(b);
+        b = (byte) ((i >> 16) & 0xff);
+        logBuf.put(b);
+        b = (byte) ((i >> 8) & 0xff);
+        logBuf.put(b);
+        b = (byte) ((i >> 0) & 0xff);
+        logBuf.put(b);
+    }
+
+    /**
+     * Read a int from the log in MSB order.  Used for ordered keys.
+     */
+    public static int readIntMSB(ByteBuffer logBuf) {
+	int ret = (logBuf.get() & 0xFF) << 24;
+	ret += (logBuf.get() & 0xFF) << 16;
+	ret += (logBuf.get() & 0xFF) << 8;
+	ret += (logBuf.get() & 0xFF) << 0;
+	return ret;
+    }
+
+    /**
+     * Write a long into the log.
+     */
+    public static void writeLong(ByteBuffer logBuf, long l) {
+        byte b =(byte) (l >>> 0);
+        logBuf.put(b);
+        b =(byte) (l >>> 8);
+        logBuf.put(b);
+        b =(byte) (l >>> 16);
+        logBuf.put(b);
+        b =(byte) (l >>> 24);
+        logBuf.put(b);
+        b =(byte) (l >>> 32);
+        logBuf.put(b);
+        b =(byte) (l >>> 40);
+        logBuf.put(b);
+        b =(byte) (l >>> 48);
+        logBuf.put(b);
+        b =(byte) (l >>> 56);
+        logBuf.put(b);
+    }
+
+    /**
+     * Read an int from the log in either packed or unpacked format.
+     */
+    public static long readLong(ByteBuffer logBuf, boolean unpacked) {
+        if (unpacked) {
+            return readLong(logBuf);
+        } else {
+            return readPackedLong(logBuf);
+        }
+    }
+
+    /**
+     * Read a long from the log.
+     */
+    public static long readLong(ByteBuffer logBuf) {
+	long ret = (logBuf.get() & 0xFFL) << 0;
+	ret += (logBuf.get() & 0xFFL) << 8;
+	ret += (logBuf.get() & 0xFFL) << 16;
+	ret += (logBuf.get() & 0xFFL) << 24;
+	ret += (logBuf.get() & 0xFFL) << 32;
+	ret += (logBuf.get() & 0xFFL) << 40;
+	ret += (logBuf.get() & 0xFFL) << 48;
+	ret += (logBuf.get() & 0xFFL) << 56;
+	return ret;
+    }
+
+    /**
+     * @return log storage size for a long.
+     */
+    public static int getLongLogSize() {
+        return LONG_BYTES;
+    }
+
+    /**
+     * Write a packed long into the log.
+     */
+    public static void writePackedLong(ByteBuffer logBuf, long l) {
+        if (logBuf.hasArray()) {
+            int off = logBuf.arrayOffset();
+            int newPos =
+                PackedInteger.writeLong(logBuf.array(),
+                                        logBuf.position() + off, l);
+            logBuf.position(newPos - off);
+        } else {
+            byte[] a = new byte[PackedInteger.MAX_LONG_LENGTH];
+            int len = PackedInteger.writeLong(a, 0, l);
+            logBuf.put(a, 0, len);
+        }
+    }
+
+    /**
+     * Read a packed long from the log.
+     */
+    public static long readPackedLong(ByteBuffer logBuf) {
+        long val;
+        if (logBuf.hasArray()) {
+            byte a[] = logBuf.array();
+            int oldPos = logBuf.position();
+            int off = logBuf.arrayOffset() + oldPos;
+            int len = PackedInteger.getReadLongLength(a, off);
+            val = PackedInteger.readLong(a, off);
+            logBuf.position(oldPos + len);
+        } else {
+            byte[] a = new byte[PackedInteger.MAX_LONG_LENGTH];
+            a[0] = logBuf.get();
+            int len = PackedInteger.getReadLongLength(a, 0);
+            logBuf.get(a, 1, len - 1);
+            val = PackedInteger.readLong(a, 0);
+        }
+        return val;
+    }
+
+    /**
+     * @return log storage size for a packed long.
+     */
+    public static int getPackedLongLogSize(long l) {
+        return PackedInteger.getWriteLongLength(l);
+    }
+
+    /**
+     * Write a byte array into the log. The size is stored first as an integer.
+     */
+    public static void writeByteArray(ByteBuffer logBuf, byte[] b) {
+
+        if (b == null) {
+            writePackedInt(logBuf, -1);
+            return;
+        }
+
+        /* Write the length. */
+        writePackedInt(logBuf, b.length);
+
+        /* Add the data itself. */
+        logBuf.put(b);                     // data
+    }
+
+    /**
+     * Read a byte array from the log. The size is stored first as an integer.
+     */
+    public static byte[] readByteArray(ByteBuffer logBuf, boolean unpacked) {
+        int size = readInt(logBuf, unpacked);
+        if (DEBUG) {
+            System.out.println("pos = " + logBuf.position() +
+                               " byteArray is " + size + " on read");
+        }
+
+        if (size < 0) {
+            return null;
+        }
+
+	if (size == 0) {
+	    return ZERO_LENGTH_BYTE_ARRAY;
+	}
+
+        byte[] b = new byte[size];
+        logBuf.get(b);               // read it out
+        return b;
+    }
+
+    /**
+     * @return log storage size for a byteArray
+     */
+    public static int getByteArrayLogSize(byte[] b) {
+        if (b == null) {
+            return LogUtils.getPackedIntLogSize(-1);
+        } else {
+            int len = b.length;
+            return LogUtils.getPackedIntLogSize(len) + len;
+        }
+    }
+
+    /**
+     * Write a byte array into the log. No size is stored.
+     */
+    public static void writeBytesNoLength(ByteBuffer logBuf, byte[] b) {
+
+        /* Add the data itself. */
+        logBuf.put(b);
+    }
+
+    /**
+     * Read a byte array from the log.  The size is not stored.
+     */
+    public static byte[] readBytesNoLength(ByteBuffer logBuf, int size) {
+        if (DEBUG) {
+            System.out.println("pos = " + logBuf.position() +
+                               " byteArray is " + size + " on read");
+        }
+
+	if (size == 0) {
+	    return ZERO_LENGTH_BYTE_ARRAY;
+	}
+
+        byte[] b = new byte[size];
+        logBuf.get(b);               // read it out
+        return b;
+    }
+
+    /**
+     * writeString and readString should both use something other than the
+     * default character encoding (e.g. UTF-8).  But since switching now might
+     * cause incompatibilities with existing databases, we need to do something
+     * more complicated than just add "UTF-8" to the getBytes and "new
+     * String()" calls below.  User-defined character strings are encoded using
+     * these methods when XA is used.  See [#15293].
+     */
+
+    /**
+     * Write a string into the log. The size is stored first as an integer.
+     */
+    public static void writeString(ByteBuffer logBuf,
+                                   String stringVal) {
+        writeByteArray(logBuf, stringVal.getBytes());
+    }
+
+    /**
+     * Read a string from the log. The size is stored first as an integer.
+     */
+    public static String readString(ByteBuffer logBuf, boolean unpacked) {
+        return new String(readByteArray(logBuf, unpacked));
+    }
+
+    /**
+     * @return log storage size for a string
+     */
+    public static int getStringLogSize(String s) {
+        return getByteArrayLogSize(s.getBytes());
+    }
+
+    /**
+     * Write a timestamp into the log.
+     */
+    public static void writeTimestamp(ByteBuffer logBuf, Timestamp time) {
+        writePackedLong(logBuf, time.getTime());
+    }
+
+    /**
+     * Read a timestamp from the log.
+     */
+    public static Timestamp readTimestamp(ByteBuffer logBuf,
+                                          boolean unpacked) {
+        long millis = readLong(logBuf, unpacked);
+        return new Timestamp(millis);
+    }
+
+    /**
+     * @return log storage size for a timestamp
+     */
+    public static int getTimestampLogSize(Timestamp time) {
+        return PackedInteger.getWriteLongLength(time.getTime());
+    }
+
+    /**
+     * Write a boolean into the log.
+     */
+    public static void writeBoolean(ByteBuffer logBuf, boolean bool) {
+        byte val = bool ? (byte) 1 : (byte) 0;
+        logBuf.put(val);
+    }
+
+    /**
+     * Read a boolean from the log.
+     */
+    public static boolean readBoolean(ByteBuffer logBuf) {
+        byte val = logBuf.get();
+        return (val == (byte) 1) ? true : false;
+    }
+
+    /**
+     * @return log storage size for a boolean.
+     */
+    public static int getBooleanLogSize() {
+        return 1;
+    }
+
+    /*
+     * Dumping support.
+     */
+    public static boolean dumpBoolean(ByteBuffer itemBuffer,
+                                      StringBuffer sb,
+                                      String tag) {
+        sb.append("<");
+        sb.append(tag);
+        sb.append(" exists = \"");
+        boolean exists = readBoolean(itemBuffer);
+        sb.append(exists);
+        if (exists) {
+            sb.append("\">");
+        } else {
+            /* Close off the tag, we're done. */
+            sb.append("\"/>");
+        }
+        return exists;
+    }
+
+    /**
+     * The byte[]'s in Xid's are known to be 255 or less in length.  So instead
+     * of using read/writeByteArray(), we can save 6 bytes per record by making
+     * the byte[] length be 1 byte instead of 4.
+     */
+    public static int getXidSize(Xid xid) {
+	byte[] gid = xid.getGlobalTransactionId();
+	byte[] bqual = xid.getBranchQualifier();
+	return
+	    INT_BYTES +                         // FormatId
+	    1 +                                 // gxid length byte
+	    1 +                                 // bqual length byte
+	    (gid == null ? 0 : gid.length) +    // gid bytes
+	    (bqual == null ? 0 : bqual.length); // bqual bytes
+    }
+
+    /*
+     * Xid.gid[] and bqual[] can't be longer than 64 bytes so we can get away
+     * with writing the length in one byte, rather than 4.
+     */
+    public static void writeXid(ByteBuffer logBuf, Xid xid) {
+	byte[] gid = xid.getGlobalTransactionId();
+	byte[] bqual = xid.getBranchQualifier();
+
+	writeInt(logBuf, xid.getFormatId());
+
+	if (gid == null) {
+	    logBuf.put((byte) -1);
+	} else {
+	    logBuf.put((byte) (gid.length));
+	    logBuf.put(gid);
+	}
+
+	if (bqual == null) {
+	    logBuf.put((byte) -1);
+	} else {
+	    logBuf.put((byte) (bqual.length));
+	    logBuf.put(bqual);
+	}
+    }
+
+    /*
+     * Xid.gid[] and bqual[] can't be longer than 64 bytes so we can get away
+     * with writing the length in one byte, rather than 4.
+     */
+    public static Xid readXid(ByteBuffer logBuf) {
+	int formatId = readInt(logBuf);
+
+	int gidLen = logBuf.get();
+	byte[] gid = null;
+	if (gidLen >= 0) {
+	    gid = new byte[gidLen];
+	    logBuf.get(gid);
+	}
+
+	int bqualLen = logBuf.get();
+	byte[] bqual = null;
+	if (bqualLen >= 0) {
+	    bqual = new byte[bqualLen];
+	    logBuf.get(bqual);
+	}
+
+	return new XidImpl(formatId, gid, bqual);
+    }
+
+    public static class XidImpl implements Xid {
+	private int formatId;
+	private byte[] gid;
+	private byte[] bqual;
+
+	/* public for unit tests. */
+	public XidImpl(int formatId, byte[] gid, byte[] bqual) {
+	    this.formatId = formatId;
+	    this.gid = gid;
+	    this.bqual = bqual;
+	}
+
+	public int getFormatId() {
+	    return formatId;
+	}
+
+	public byte[] getGlobalTransactionId() {
+	    return gid;
+	}
+
+	public byte[] getBranchQualifier() {
+	    return bqual;
+	}
+
+        @Override
+	public boolean equals(Object o) {
+	    if (!(o instanceof XidImpl)) {
+		return false;
+	    }
+
+	    XidImpl xid = (XidImpl) o;
+	    if (xid.getFormatId() != formatId) {
+		return false;
+	    }
+	    if (compareByteArrays(xid.getGlobalTransactionId(), gid) &&
+		compareByteArrays(xid.getBranchQualifier(), bqual)) {
+		return true;
+	    }
+
+	    return false;
+	}
+
+        @Override
+	public int hashCode() {
+	    int code = formatId;
+	    if (gid != null) {
+		for (int i = 0; i < gid.length; i++) {
+		    code += gid[i];
+		}
+	    }
+	    if (bqual != null) {
+		for (int i = 0; i < bqual.length; i++) {
+		    code += bqual[i];
+		}
+	    }
+	    return code;
+	}
+
+	private boolean compareByteArrays(byte[] b1, byte[] b2) {
+	    if (b1 == null ||
+		b2 == null) {
+		return b1 == b2;
+	    }
+
+	    if (b1.length != b2.length) {
+		return false;
+	    }
+
+	    for (int i = 0; i < b1.length; i++) {
+		if (b1[i] != b2[i]) {
+		    return false;
+		}
+	    }
+
+	    return true;
+	}
+
+        @Override
+	public String toString() {
+	    StringBuffer sb = new StringBuffer();
+	    sb.append("<Xid formatId=\"").append(formatId);
+	    sb.append("\" gTxnId=\"");
+	    if (gid == null) {
+		sb.append("null");
+	    } else {
+		sb.append(new String(gid));
+	    }
+	    sb.append("\" bqual=\"");
+	    if (bqual == null) {
+		sb.append("null");
+	    } else {
+		sb.append(new String(bqual));
+	    }
+	    sb.append("\"/>");
+	    return sb.toString();
+	}
+    }
+
+    /**
+     * Convenience method for marshalling a header and log entry
+     * out of a byte buffer read directly out of the log.
+     * @throws DatabaseException
+     */
+    public static HeaderAndEntry
+        readHeaderAndEntry(ByteBuffer bytesFromLog,
+                           EnvironmentImpl envImpl,
+                           boolean anticipateChecksumErrors,
+                           boolean readFullItem)
+        throws DatabaseException {
+
+        HeaderAndEntry ret = new HeaderAndEntry();
+        ret.header = new LogEntryHeader(envImpl,
+                                        bytesFromLog,
+                                        anticipateChecksumErrors);
+        ret.header.readVariablePortion(bytesFromLog);
+
+        ret.entry =
+            LogEntryType.findType(ret.header.getType()).getNewLogEntry();
+
+        ret.entry.readEntry(ret.header,
+                            bytesFromLog,
+                            readFullItem);
+        return ret;
+    }
+
+    public static class HeaderAndEntry {
+        public LogEntryHeader header;
+        public LogEntry entry;
+
+        /* Get an HeaderAndEntry from LogUtils.readHeaderAndEntry */
+        private HeaderAndEntry() {
+        }
+
+        public boolean logicalEquals(HeaderAndEntry other) {
+            return (header.logicalEquals(other.header) &&
+                    entry.logicalEquals(other.entry));
+        }
+
+        @Override
+        public String toString() {
+            return header + " " + entry;
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/log/Loggable.java b/src/com/sleepycat/je/log/Loggable.java
new file mode 100644
index 0000000000000000000000000000000000000000..ecbce49b633e2998292e9de125e8640fc1772faf
--- /dev/null
+++ b/src/com/sleepycat/je/log/Loggable.java
@@ -0,0 +1,65 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Loggable.java,v 1.8.2.2 2010/01/04 15:30:30 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A class that implements Loggable knows how to read and write itself into
+ * a ByteBuffer in a format suitable for the JE log or JE replication messages.
+ */
+public interface Loggable {
+
+    /*
+     * Writing to a byte buffer
+     */
+
+    /**
+     * @return number of bytes used to store this object.
+     */
+    public int getLogSize();
+
+    /**
+     * Serialize this object into the buffer.
+     * @param logBuffer is the destination buffer
+     */
+    public void writeToLog(ByteBuffer logBuffer);
+
+    /*
+     *  Reading from a byte buffer
+     */
+
+    /**
+     * Initialize this object from the data in itemBuf.
+     * @param itemBuf the source buffer
+     */
+    public void readFromLog(ByteBuffer itemBuffer, byte entryVersion)
+	throws LogException;
+
+    /**
+     * Write the object into the string buffer for log dumping. Each object
+     * should be dumped without indentation or new lines and should be valid
+     * XML.
+     * @param sb destination string buffer
+     * @param verbose if true, dump the full, verbose version
+     */
+    public void dumpLog(StringBuffer sb, boolean verbose);
+
+    /**
+     * @return the transaction id embedded within this loggable object. Objects
+     * that have no transaction id should return 0.
+     */
+    public long getTransactionId();
+
+    /**
+     * @return true if these two loggable items are logically the same.
+     * Used for replication testing.
+     */
+    public boolean logicalEquals(Loggable other);
+}
diff --git a/src/com/sleepycat/je/log/PrintFileReader.java b/src/com/sleepycat/je/log/PrintFileReader.java
new file mode 100644
index 0000000000000000000000000000000000000000..8fff8e6d0b6de6b5ee17fd91af0a77133765d1dc
--- /dev/null
+++ b/src/com/sleepycat/je/log/PrintFileReader.java
@@ -0,0 +1,88 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PrintFileReader.java,v 1.22.2.2 2010/01/04 15:30:30 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.entry.LogEntry;
+
+/**
+ * The PrintFileReader prints out the target log entries.
+ */
+public class PrintFileReader extends DumpFileReader {
+
+    /**
+     * Create this reader to start at a given LSN.
+     */
+    public PrintFileReader(EnvironmentImpl env,
+			   int readBufferSize,
+			   long startLsn,
+			   long finishLsn,
+			   String entryTypes,
+			   String txnIds,
+			   boolean verbose)
+	throws IOException, DatabaseException {
+
+        super(env,
+              readBufferSize,
+              startLsn,
+              finishLsn,
+              entryTypes,
+              txnIds,
+              verbose);
+    }
+
+    /**
+     * This reader prints the log entry item.
+     */
+    protected boolean processEntry(ByteBuffer entryBuffer)
+        throws DatabaseException {
+
+        /* Figure out what kind of log entry this is */
+	byte curType = currentEntryHeader.getType();
+        LogEntryType lastEntryType = LogEntryType.findType(curType);
+
+        /* Print out a common header for each log item */
+        StringBuffer sb = new StringBuffer();
+        sb.append("<entry lsn=\"0x").append
+            (Long.toHexString(readBufferFileNum));
+        sb.append("/0x").append(Long.toHexString(currentEntryOffset));
+        sb.append("\" ");
+        currentEntryHeader.dumpLogNoTag(sb, verbose);
+        sb.append("\">");
+
+        /* Read the entry and dump it into a string buffer. */
+	LogEntry entry = lastEntryType.getSharedLogEntry();
+        entry.readEntry(currentEntryHeader, entryBuffer, true); // readFullItem
+	boolean dumpIt = true;
+	if (targetTxnIds.size() > 0) {
+	    if (lastEntryType.isTransactional()) {
+		if (!targetTxnIds.contains
+		    (Long.valueOf(entry.getTransactionId()))) {
+		    /* Not in the list of txn ids. */
+		    dumpIt = false;
+		}
+	    } else {
+		/* If -tx spec'd and not a transactional entry, don't dump. */
+		dumpIt = false;
+	    }
+	}
+
+	if (dumpIt) {
+	    entry.dumpEntry(sb, verbose);
+	    sb.append("</entry>");
+	    System.out.println(sb.toString());
+	}
+
+        return true;
+    }
+}
diff --git a/src/com/sleepycat/je/log/Provisional.java b/src/com/sleepycat/je/log/Provisional.java
new file mode 100644
index 0000000000000000000000000000000000000000..53d055800fa216d3b76b32622a35e5ad1ea2c9fb
--- /dev/null
+++ b/src/com/sleepycat/je/log/Provisional.java
@@ -0,0 +1,149 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Provisional.java,v 1.2.2.2 2010/01/04 15:30:30 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * Specifies whether to log an entry provisionally.
+ *
+ * Provisional log entries:
+ * 
+ * What are provisional log entries?
+ *
+ *    Provisional log entries are those tagged with the provisional attribute
+ *    in the log entry header. The provisional attribute can be applied to any
+ *    type of log entry, and is implemented in
+ *    com.sleepycat.je.log.LogEntryHeader as two stolen bits in the 8 bit
+ *    version field.
+ *
+ * When is the provisional attribute used?
+ * 
+ *    The provisional attribute is used only during recovery. It very simply
+ *    indicates that recovery will ignore and skip over this log entry.
+ * 
+ * When is the provisional attribute set?
+ * 
+ *    The provisional attribute started out as a way to create atomicity among
+ *    different log entries. Child pointers in the JE Btree are physical LSNs,
+ *    so each Btree node's children must be logged before it in the log. On
+ *    the other hand, one fundamental assumption of the JE log is that each
+ *    Btree node found in the log can be replayed and placed in the in-memory
+ *    tree. To do so, each Btree node must have a parent node that will house
+ *    it. The grouping of multiple log entries into one atomic group is often
+ *    used to fulfiil this requirement.
+ * 
+ *     * Atomic log entries:
+ *
+ *           + When a btree is first created, we bootstrap tree creation by
+ *           logging the first BIN provisionally, then creating a parent IN
+ *           which is the Btree root IN, which points to this first BIN.
+ *
+ *           + When we split a Btree node, we create a new IN, which is the
+ *           sibling of the split node. We log the old sibling and the new
+ *           sibling provisionally, and then log the parent, so that any
+ *           crashes in the middle of this triumvirate which result in the
+ *           failure to log the parent will skip over the orphaned siblings.
+ *
+ *           + Splitting the Btree root is just a special case of a split.
+ *
+ *           + Creating a duplicate subtree to hang in the middle of a btree is
+ *           just a special case of a split and btree first creation.
+ *
+ *     * Entries not meant to be recovered
+ *
+ *           Temp DBs are not meant to be recovered and we log their Btree
+ *           nodes in a very lax fashion, purely as a way of evicting them out
+ *           of the cache temporarily. There is no guarantee that a consistent
+ *           set has been logged to disk. We skip over them for both recovery
+ *           performance and the "each-node-must-have-a-parent" rule.
+ *
+ *     * Checkpoint performance
+ *
+ *           When we flush a series of nodes, it's a waste to replay nodes
+ *           which are referenced by higher levels. For example, if we
+ *           checkpoint this btree:
+ * 
+ *           INA -> INb -> BINc (dirty)-> LNd
+ * 
+ *           we log them in this order:
+ * 
+ *           BINc
+ *           INb
+ * 
+ *           And there's no point to replaying BINc, because it's referenced by
+ *           INb.  We skip over BINc, which we do by logging it provisionally.
+ * 
+ *     * Log cleaning - removing references to deleted files.
+ * 
+ *       When we delete a file for log cleaning we guarantee that no active log
+ *       entries refer to any log entry in the deleted file. Suppose our
+ *       checkpoint looks like this:
+ * 
+ *         5/100 LNa
+ *         5/200 Ckpt start
+ *         5/300 INs
+ *         ...
+ *         5/500 Ckpt end
+ *         ...
+ *         5/800 last entry in log
+ * 
+ *       Because we do not delete a file until the Ckpt end after processing
+ *       (cleaning) it, nothing from 5/500 to 5/800 can refer to a file deleted
+ *       due to the Ckpt end in 5/500.
+ *
+ *       BEFORE_CKPT_END is motivated by the fact that while log entries
+ *       between 5/100 (first active lsn) and 5/500 (ckpt end) will not in of
+ *       themselves contain a LSN for a cleaned, deleted file, the act of
+ *       processing them during recovery could require fetching a node from a
+ *       deleted file. For example, the IN at 5/300 could have an in-memory
+ *       parent which has a reference to an older, cleaned version of that IN.
+ *       Treating the span between 5/200 and 5/500 as provisional is both
+ *       optimal, because only the high level INs need to be processed, and
+ *       required, in order not to fetch from a cleaned file. See [#16037].
+ */
+public enum Provisional {
+
+    /**
+     * The entry is non-provisional and is always processed by recovery.
+     */
+    NO,
+    
+    /**
+     * The entry is provisional and is never processed by recovery.
+     */
+    YES,
+    
+    /**
+     * The entry is provisional (not processed by recovery) if it occurs before
+     * the CkptEnd in the recovery interval, or is non-provisional (is
+     * processed) if it occurs after CkptEnd.
+     */
+    BEFORE_CKPT_END;
+
+    /**
+     * Determines whether a given log entry should be processed during
+     * recovery.
+     */
+    public boolean isProvisional(long logEntryLsn, long ckptEndLsn) {
+        assert logEntryLsn != DbLsn.NULL_LSN;
+        switch (this) {
+        case NO:
+            return false;
+        case YES:
+            return true;
+        case BEFORE_CKPT_END:
+            return ckptEndLsn != DbLsn.NULL_LSN &&
+                   DbLsn.compareTo(logEntryLsn, ckptEndLsn) < 0;
+        default:
+            assert false;
+            return false;
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/log/ReplicationContext.java b/src/com/sleepycat/je/log/ReplicationContext.java
new file mode 100644
index 0000000000000000000000000000000000000000..661a01c8560f263d3f17ad7f7478a50fcb487ac5
--- /dev/null
+++ b/src/com/sleepycat/je/log/ReplicationContext.java
@@ -0,0 +1,125 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ReplicationContext.java,v 1.9.2.2 2010/01/04 15:30:30 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import com.sleepycat.je.log.entry.DbOperationType;
+import com.sleepycat.je.utilint.VLSN;
+
+/**
+ * ReplicationContext provides context about high-level operations so that the
+ * logging level can determine which replication related actions are required
+ * for a given Loggable item.
+ *
+ * Those lower level actions are:
+ * - does a log entry need to be logged with a VLSN generated by this
+ * (master) node?
+ * - does the log entry need to be logged with the VLSN which accompanied a
+ *   replication message?
+ * - do we need to wait for PERM acks after logging an entry?
+ * - do we need to record the client VLSN that was just written to the log?
+ *
+ * ReplicationContext subclasses may hold additional information about the
+ * logical operation which instigated logging, so that this can be added
+ * to the log entry.
+ *
+ * All LogEntryType(s) have a "replicationPossible" attribute. For example,
+ * INs will never be replicated, but LN_TX's may or may not be replicated,
+ * depending on whether the owning database is replicated.
+ *
+ * If a LogEntryType will never be replicated, it should be logged with
+ * the static ReplicationContext.NO_REPLICATE instance.
+ * If replication is possible, the replication context may be:
+ *   - one allocated for this operation, as the result of client apply
+ *   - the static instance MASTER, if this node is the replication master
+ *   - the static instance NO_REPLICATE, if this is a local operation
+ *
+ */
+public class ReplicationContext {
+
+    /*
+     * Convenience static instance used when you know this operation is
+     * executing on a replication master node.
+     */
+    public static final ReplicationContext MASTER =
+        new ReplicationContext(true /* inReplicationStream */);
+
+    /*
+     * Convenience static instance used when you know this operation will not
+     * be replicated, either because it's executing on a non-replicated node,
+     * it's a local operation for a local database, or because this loggable
+     * item is the type that is never replicated.
+     */
+    public static final ReplicationContext NO_REPLICATE =
+        new ReplicationContext(false /* inReplicationStream */);
+
+    /*
+     * If true, this Loggable item is part of the replication stream, and
+     * needs to be logged with a VLSN.
+     */
+    private boolean inReplicationStream;
+
+    /*
+     * The VLSN value passed in from a replication message directed at
+     * this replication client.
+     */
+    private VLSN clientVLSN;
+
+    protected ReplicationContext(boolean inReplicationStream) {
+        this.inReplicationStream = inReplicationStream;
+        clientVLSN = null;
+    }
+
+    /**
+     * Used to pass the VLSN held in an arriving message down to the logging
+     * levels.
+     */
+    public ReplicationContext(VLSN clientVLSN) {
+        this.inReplicationStream = true;
+        this.clientVLSN = clientVLSN;
+    }
+
+    /**
+     * @return the VLSN that arrived in the replication message which
+     * instigated this Loggable item.
+     */
+    VLSN getClientVLSN() {
+        return clientVLSN;
+    }
+
+    /**
+     * @return true if this loggable item is part of the replication stream
+     */
+    boolean inReplicationStream() {
+        return inReplicationStream;
+    }
+
+    /**
+     * @return true if this node is the master, and should
+     * generate a VLSN for this log entry
+     */
+    boolean mustGenerateVLSN() {
+        return (inReplicationStream && (clientVLSN == null));
+    }
+
+    /**
+     * @return the type of database operation in progress. For the default
+     * case, we return DbOperationType.NONE.
+     */
+    public DbOperationType getDbOperationType() {
+        return DbOperationType.NONE;
+    }
+
+    @Override
+    public String toString() {
+        StringBuilder sb = new StringBuilder();
+        sb.append("inRepStream=").append(inReplicationStream);
+        sb.append("clientVLSN=").append(clientVLSN);
+        return sb.toString();
+    }
+}
diff --git a/src/com/sleepycat/je/log/ScavengerFileReader.java b/src/com/sleepycat/je/log/ScavengerFileReader.java
new file mode 100644
index 0000000000000000000000000000000000000000..6085038b9438141db2dcc8e10c86ce210bf60aca
--- /dev/null
+++ b/src/com/sleepycat/je/log/ScavengerFileReader.java
@@ -0,0 +1,210 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ScavengerFileReader.java,v 1.23.2.2 2010/01/04 15:30:30 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.HashSet;
+import java.util.Set;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.entry.LogEntry;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * A ScavengerFileReader reads the log backwards.  If it encounters a checksum
+ * error, it goes to the start of that log file and reads forward until it
+ * encounters a checksum error.  It then continues the reading backwards in the
+ * log.
+ *
+ * The caller may set "dumpCorruptedBounds" to true if information about the
+ * start and finish of the corrupted portion should be displayed on stderr.
+ *
+ * The caller is expected to implement processEntryCallback. This method is
+ * called once for each entry that the ScavengerFileReader finds in the log.
+ */
+abstract public class ScavengerFileReader extends FileReader {
+
+    /* A Set of the entry type numbers that this FileReader should dump. */
+    private Set<Byte> targetEntryTypes;
+
+    private int readBufferSize;
+
+    /* True if reader should write corrupted boundaries to System.err. */
+    private boolean dumpCorruptedBounds;
+
+    /**
+     * Create this reader to start at a given LSN.
+     */
+    public ScavengerFileReader(EnvironmentImpl env,
+                               int readBufferSize,
+                               long startLsn,
+                               long finishLsn,
+                               long endOfFileLsn)
+        throws IOException, DatabaseException {
+
+        super(env,
+              readBufferSize,
+              false,
+              startLsn,
+              null, // single file number
+              endOfFileLsn,
+              finishLsn);
+
+        this.readBufferSize = readBufferSize;
+
+        /*
+         * Indicate that a checksum error should not shutdown the whole
+         * environment.
+         */
+        anticipateChecksumErrors = true;
+        targetEntryTypes = new HashSet<Byte>();
+        dumpCorruptedBounds = false;
+    }
+
+    /**
+     * Set to true if corrupted boundaries should be dumped to stderr.
+     */
+    public void setDumpCorruptedBounds(boolean dumpCorruptedBounds) {
+        this.dumpCorruptedBounds = dumpCorruptedBounds;
+    }
+
+    /**
+     * Tell the reader that we are interested in these kind of entries.
+     */
+    public void setTargetType(LogEntryType type) {
+        targetEntryTypes.add(Byte.valueOf(type.getTypeNum()));
+    }
+
+    /*
+     * For each entry that is selected, just call processEntryCallback.
+     */
+    protected boolean processEntry(ByteBuffer entryBuffer)
+        throws DatabaseException {
+
+        LogEntryType lastEntryType =
+            LogEntryType.findType(currentEntryHeader.getType());
+        LogEntry entry = lastEntryType.getSharedLogEntry();
+        entry.readEntry(currentEntryHeader, entryBuffer, true); // readFullItem
+        processEntryCallback(entry, lastEntryType);
+        return true;
+    }
+
+    /*
+     * Method overriden by the caller.  Each entry of the types selected
+     * is passed to this method.
+     */
+    abstract protected void processEntryCallback(LogEntry entry,
+                                                 LogEntryType entryType)
+        throws DatabaseException;
+
+    /*
+     * Read the next entry.  If a checksum exception is encountered, attempt
+     * to find the other side of the corrupted area and try to re-read this
+     * file.
+     */
+    @Override
+    public boolean readNextEntry()
+        throws DatabaseException, IOException {
+
+        long saveCurrentEntryOffset = currentEntryOffset;
+        try {
+            return super.readNextEntry();
+        } catch (DbChecksumException DCE) {
+            resyncReader(DbLsn.makeLsn(readBufferFileNum,
+                                       saveCurrentEntryOffset),
+                         dumpCorruptedBounds);
+            return super.readNextEntry();
+        }
+    }
+
+    /*
+     * A checksum error has been encountered.  Go to the start of this log file
+     * and read forward until the lower side of the corrupted area has been
+     * found.
+     */
+    @Override
+    protected boolean resyncReader(long nextGoodRecordPostCorruption,
+                                   boolean showCorruptedBounds)
+        throws DatabaseException, IOException {
+
+        LastFileReader reader = null;
+        long tryReadBufferFileNum =
+            DbLsn.getFileNumber(nextGoodRecordPostCorruption);
+
+        while (tryReadBufferFileNum >= 0) {
+            try {
+                reader =
+                    new LastFileReader(envImpl, readBufferSize,
+                                       Long.valueOf(tryReadBufferFileNum));
+                break;
+            } catch (DbChecksumException DCE) {
+
+                /*
+                 * We found a checksum error reading the header of this file
+                 * so skip to a completely earlier file.
+                 */
+                tryReadBufferFileNum--;
+                continue;
+            }
+        }
+
+        boolean switchedFiles = tryReadBufferFileNum !=
+            DbLsn.getFileNumber(nextGoodRecordPostCorruption);
+
+        if (!switchedFiles) {
+
+            /*
+             * This reader will not throw an exception if a checksum error is
+             * hit -- it will just exit.
+             */
+            while (reader.readNextEntry()) {
+            }
+        }
+
+        long lastUsedLsn = reader.getLastValidLsn();
+        long nextAvailableLsn = reader.getEndOfLog();
+        if (showCorruptedBounds) {
+            System.err.println("A checksum error was found in the log.");
+            System.err.println
+                ("Corruption begins at LSN:\n   " +
+                 DbLsn.toString(nextAvailableLsn));
+            System.err.println
+                ("Last known good record before corruption is at LSN:\n   " +
+                 DbLsn.toString(lastUsedLsn));
+            System.err.println
+                ("Next known good record after corruption is at LSN:\n   " +
+                 DbLsn.toString(nextGoodRecordPostCorruption));
+        }
+
+        startLsn = lastUsedLsn;
+        initStartingPosition(nextAvailableLsn, null);
+        if (switchedFiles) {
+            currentEntryPrevOffset = 0;
+        }
+        /* Indicate resync is permitted so don't throw exception. */
+        return true;
+    }
+
+    /**
+     * @return true if this reader should process this entry, or just skip
+     * over it.
+     */
+    @Override
+    protected boolean isTargetEntry() {
+        if (targetEntryTypes.size() == 0) {
+            /* We want to dump all entry types. */
+            return true;
+        } else {
+            return targetEntryTypes.contains
+                (Byte.valueOf(currentEntryHeader.getType()));
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/log/SearchFileReader.java b/src/com/sleepycat/je/log/SearchFileReader.java
new file mode 100644
index 0000000000000000000000000000000000000000..e542a560c2768b8b7da9534aa8919b1a40003ab6
--- /dev/null
+++ b/src/com/sleepycat/je/log/SearchFileReader.java
@@ -0,0 +1,70 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SearchFileReader.java,v 1.47.2.2 2010/01/04 15:30:30 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.entry.LogEntry;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * SearchFileReader searches for the a given entry type.
+ */
+public class SearchFileReader extends FileReader {
+
+    private LogEntryType targetType;
+    private LogEntry logEntry;
+
+    /**
+     * Create this reader to start at a given LSN.
+     */
+    public SearchFileReader(EnvironmentImpl env,
+                            int readBufferSize,
+                            boolean forward,
+                            long startLsn,
+                            long endOfFileLsn,
+                            LogEntryType targetType)
+	throws IOException, DatabaseException {
+
+        super(env, readBufferSize, forward, startLsn, null,
+	      endOfFileLsn, DbLsn.NULL_LSN);
+
+        this.targetType = targetType;
+        logEntry = targetType.getNewLogEntry();
+    }
+
+    /**
+     * @return true if this is a targetted entry.
+     */
+    @Override
+    protected boolean isTargetEntry() {
+        return (targetType.equalsType(currentEntryHeader.getType()));
+    }
+
+    /**
+     * This reader instantiate the first object of a given log entry.
+     */
+    protected boolean processEntry(ByteBuffer entryBuffer)
+        throws DatabaseException {
+
+        logEntry.readEntry
+            (currentEntryHeader, entryBuffer, true); // readFullItem
+        return true;
+    }
+
+    /**
+     * @return the last object read.
+     */
+    public Object getLastObject() {
+        return logEntry.getMainItem();
+    }
+}
diff --git a/src/com/sleepycat/je/log/StatsFileReader.java b/src/com/sleepycat/je/log/StatsFileReader.java
new file mode 100644
index 0000000000000000000000000000000000000000..acb01dce2a3aa8248de7ec4b7bd741d90d8f9c8f
--- /dev/null
+++ b/src/com/sleepycat/je/log/StatsFileReader.java
@@ -0,0 +1,467 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: StatsFileReader.java,v 1.25.2.2 2010/01/04 15:30:30 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.text.NumberFormat;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.TreeMap;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * The StatsFileReader generates stats about the log entries read, such as the
+ * count of each type of entry, the number of bytes, minimum and maximum sized
+ * log entry.
+ */
+public class StatsFileReader extends DumpFileReader {
+
+    private Map<LogEntryType,EntryInfo> entryInfoMap;
+    private long totalLogBytes;
+    private long totalCount;
+
+    /* Keep stats on log composition in terms of ckpt intervals. */
+    private ArrayList<CheckpointCounter> ckptList;
+    private CheckpointCounter ckptCounter;
+    private long firstLsnRead;
+
+    /**
+     * Create this reader to start at a given LSN.
+     */
+    public StatsFileReader(EnvironmentImpl envImpl,
+			   int readBufferSize,
+			   long startLsn,
+			   long finishLsn,
+			   String entryTypes,
+			   String txnIds,
+			   boolean verbose)
+	throws IOException, DatabaseException {
+
+        super(envImpl, readBufferSize, startLsn, finishLsn,
+              entryTypes, txnIds, verbose);
+        entryInfoMap = 
+            new TreeMap<LogEntryType, EntryInfo>(new LogEntryTypeComparator());
+
+        totalLogBytes = 0;
+        totalCount = 0;
+
+        ckptCounter = new CheckpointCounter();
+        ckptList = new ArrayList<CheckpointCounter>();
+        if (verbose) {
+            ckptList.add(ckptCounter);
+        }
+    }
+
+    /**
+     * This reader collects stats about the log entry.
+     */
+    protected boolean processEntry(ByteBuffer entryBuffer)
+        throws DatabaseException {
+
+        byte currentType = currentEntryHeader.getType();
+        int itemSize = currentEntryHeader.getItemSize();
+        int headerSize = currentEntryHeader.getSize();
+
+        /*
+         * Record various stats based on the entry header, then move the buffer
+         * forward to skip ahead.
+         */
+        LogEntryType lastEntryType = LogEntryType.findType(currentType);
+        entryBuffer.position(entryBuffer.position() + itemSize);
+
+        /*
+         * Get the info object for it, if this is the first time it's seen,
+         * create an info object and insert it.
+         */
+        EntryInfo info = entryInfoMap.get(lastEntryType);
+        if (info == null) {
+            info = new EntryInfo();
+            entryInfoMap.put(lastEntryType, info);
+        }
+
+        /* Update counts. */
+        info.count++;
+        totalCount++;
+        if (currentEntryHeader.getProvisional() == Provisional.YES) {
+            info.provisionalCount++;
+        }
+        int size = itemSize + headerSize;
+        info.totalBytes += size;
+        info.headerBytes += headerSize;
+        totalLogBytes += size;
+
+        if ((info.minBytes==0) || (info.minBytes > size)) {
+            info.minBytes = size;
+        }
+        if (info.maxBytes < size) {
+            info.maxBytes = size;
+        }
+
+        if (verbose) {
+            if (firstLsnRead == DbLsn.NULL_LSN) {
+                firstLsnRead = getLastLsn();
+            }
+
+            if (currentType == LogEntryType.LOG_CKPT_END.getTypeNum()) {
+                /* start counting a new interval */
+                ckptCounter.endCkptLsn = getLastLsn();
+                ckptCounter = new CheckpointCounter();
+                ckptList.add(ckptCounter);
+            } else {
+                ckptCounter.increment(this, currentType);
+            }
+        }
+
+        return true;
+    }
+
+    @Override
+    public void summarize() {
+        System.out.println("Log statistics:");
+        Iterator<Map.Entry<LogEntryType,EntryInfo>> iter = 
+            entryInfoMap.entrySet().iterator();
+
+        NumberFormat form = NumberFormat.getIntegerInstance();
+        NumberFormat percentForm = NumberFormat.getInstance();
+        percentForm.setMaximumFractionDigits(1);
+        System.out.println(pad("type") +
+                           pad("total") +
+                           pad("provisional") +
+                           pad("total") +
+                           pad("min") +
+                           pad("max") +
+                           pad("avg") +
+                           pad("entries"));
+
+        System.out.println(pad("") +
+                           pad("count") +
+                           pad("count") +
+                           pad("bytes") +
+                           pad("bytes") +
+                           pad("bytes") +
+                           pad("bytes") +
+                           pad("as % of log"));
+
+        long realTotalBytes = 0;
+
+        while (iter.hasNext()) {
+            Map.Entry<LogEntryType,EntryInfo> m = iter.next();
+            EntryInfo info = m.getValue();
+            StringBuffer sb = new StringBuffer();
+            LogEntryType entryType = m.getKey();
+            sb.append(pad(entryType.toString()));
+            sb.append(pad(form.format(info.count)));
+            sb.append(pad(form.format(info.provisionalCount)));
+            sb.append(pad(form.format(info.totalBytes)));
+            sb.append(pad(form.format(info.minBytes)));
+            sb.append(pad(form.format(info.maxBytes)));
+            sb.append(pad(form.format((long)(info.totalBytes/info.count))));
+            double entryPercent =
+                ((double)(info.totalBytes *100)/totalLogBytes);
+            sb.append(pad(percentForm.format(entryPercent)));
+            System.out.println(sb.toString());
+
+            /* Calculate key/data size for transactional LN */
+            if (entryType == LogEntryType.LOG_LN_TRANSACTIONAL) {
+                /*
+		   LN_TX entry overhead:
+                   8 bytes node id
+                   1 byte boolean (whether data exists or is null)
+                   4 bytes data size
+                   4 bytes key size
+                   4 bytes database id
+                   8 bytes abort LSN
+                   1 byte abortKnownDeleted
+                   8 bytes txn id
+                   8 bytes lastlogged LSN (backpointer for txn)
+                */
+
+                int overhead = (info.count*46) + info.headerBytes;
+                realTotalBytes += (info.totalBytes-overhead);
+            }
+
+            /* Calculate key/data size for non-transactional LN */
+            if (entryType == LogEntryType.LOG_LN) {
+                /*
+		   LN_TX entry overhead:
+                   8 bytes node id
+                   1 byte boolean (whether data exists or is null)
+                   4 bytes data size
+                   4 bytes key size
+                   4 bytes database id
+                */
+                int overhead = (info.count * 21) + info.headerBytes;
+                realTotalBytes += (info.totalBytes-overhead);
+            }
+        }
+
+        /* Print special line for key/data */
+        StringBuffer sb = new StringBuffer();
+        sb.append(pad("key/data"));
+        sb.append(pad(""));
+        sb.append(pad(""));
+        sb.append(pad(form.format(realTotalBytes)));
+        sb.append(pad(""));
+        sb.append(pad(""));
+        sb.append(pad(""));
+        String realSize = "(" +
+            percentForm.format((double)(realTotalBytes*100)/
+                               totalLogBytes) +
+            ")";
+        sb.append(pad(realSize));
+        System.out.println(sb.toString());
+
+        System.out.println("\nTotal bytes in portion of log read: " +
+                           form.format(totalLogBytes));
+        System.out.println("Total number of entries: " +
+                           form.format(totalCount));
+
+        if (verbose) {
+            summarizeCheckpointInfo();
+        }
+    }
+
+    private String pad(String result) {
+        int spaces = 15 - result.length();
+        StringBuffer sb = new StringBuffer();
+        for (int i = 0; i < spaces; i++) {
+            sb.append(" ");
+        }
+        sb.append(result);
+        return sb.toString();
+    }
+
+    private void summarizeCheckpointInfo() {
+        System.out.println("\nPer checkpoint interval info:");
+
+        /*
+         * Print out checkpoint interval info.
+         * If the log looks like this:
+         *
+         * start of log
+         * ckpt1 start
+         * ckpt1 end
+         * ckpt2 start
+         * ckpt2 end
+         * end of log
+         *
+         * There are 3 ckpt intervals
+         * start of log->ckpt1 end
+         * ckpt1 end -> ckpt2 end
+         * ckpt2 end -> end of log
+         */
+        System.out.println(pad("lnTxn") +
+                           pad("ln") +
+                           pad("mapLNTxn") +
+                           pad("mapLN") +
+                           pad("end-end") +    // ckpt n-1 end -> ckpt n end
+                           pad("end-start") +  // ckpt n-1 end -> ckpt n start
+                           pad("start-end") +  // ckpt n start -> ckpt n end
+                           pad("maxLNReplay") +
+                           pad("ckptEnd"));
+
+        long logFileMax;
+        try {
+            logFileMax = envImpl.getConfigManager().getLong(
+                                   EnvironmentParams.LOG_FILE_MAX);
+        } catch (DatabaseException e) {
+            e.printStackTrace();
+            return;
+        }
+
+        Iterator<CheckpointCounter> iter = ckptList.iterator();
+        CheckpointCounter prevCounter = null;
+        NumberFormat form = NumberFormat.getInstance();
+        while (iter.hasNext()) {
+            CheckpointCounter c = iter.next();
+            StringBuffer sb = new StringBuffer();
+
+            /* Entry type counts. */
+            int maxTxnLNs = c.preStartLNTxnCount + c.postStartLNTxnCount;
+            sb.append(pad(form.format(maxTxnLNs)));
+            int maxLNs = c.preStartLNCount + c.postStartLNCount;
+            sb.append(pad(form.format(maxLNs)));
+            sb.append(pad(form.format(c.preStartMapLNTxnCount +
+                                      c.postStartMapLNTxnCount)));
+            sb.append(pad(form.format(c.preStartMapLNCount +
+                                      c.postStartMapLNCount)));
+
+            /* Checkpoint interval distance. */
+            long end = (c.endCkptLsn == DbLsn.NULL_LSN) ? getLastLsn() :
+                c.endCkptLsn;
+            long endToEndDistance = 0;
+
+            FileManager fileMgr = envImpl.getFileManager();
+            if (prevCounter == null) {
+                endToEndDistance =
+                    DbLsn.getWithCleaningDistance(end,
+						  fileMgr,
+						  firstLsnRead,
+						  logFileMax);
+            } else {
+                endToEndDistance =
+                    DbLsn.getWithCleaningDistance(end,
+						  fileMgr,
+						  prevCounter.endCkptLsn,
+						  logFileMax);
+            }
+            sb.append(pad(form.format(endToEndDistance)));
+
+            /*
+             * Interval between last checkpoint end and
+             * this checkpoint start.
+             */
+            long start = (c.startCkptLsn == DbLsn.NULL_LSN) ? getLastLsn() :
+                c.startCkptLsn;
+            long endToStartDistance = 0;
+
+            if (prevCounter == null) {
+                endToStartDistance =
+                    DbLsn.getWithCleaningDistance(start,
+						  fileMgr,
+                                                  firstLsnRead,
+                                                  logFileMax);
+            } else {
+                endToStartDistance =
+                    DbLsn.getWithCleaningDistance(start,
+						  fileMgr,
+                                                  prevCounter.endCkptLsn,
+                                                  logFileMax);
+            }
+            sb.append(pad(form.format(endToStartDistance)));
+
+            /*
+             * Interval between ckpt start and ckpt end.
+             */
+            long startToEndDistance = 0;
+            if ((c.startCkptLsn != DbLsn.NULL_LSN)  &&
+                (c.endCkptLsn != DbLsn.NULL_LSN)) {
+                startToEndDistance =
+                    DbLsn.getWithCleaningDistance(c.endCkptLsn,
+						  fileMgr,
+						  c.startCkptLsn,
+						  logFileMax);
+            }
+            sb.append(pad(form.format(startToEndDistance)));
+
+            /*
+             * The maximum number of LNs to replay includes the portion of LNs
+             * from checkpoint start to checkpoint end of the previous
+             * interval.
+             */
+            int maxReplay = maxLNs + maxTxnLNs;
+            if (prevCounter != null) {
+                maxReplay += prevCounter.postStartLNTxnCount;
+                maxReplay += prevCounter.postStartLNCount;
+            }
+            sb.append(pad(form.format(maxReplay)));
+
+            if (c.endCkptLsn == DbLsn.NULL_LSN) {
+                sb.append("   ").append(DbLsn.getNoFormatString(getLastLsn()));
+            } else {
+                sb.append("   ").append(DbLsn.getNoFormatString(c.endCkptLsn));
+            }
+
+            System.out.println(sb.toString());
+            prevCounter = c;
+        }
+    }
+
+    static class EntryInfo {
+        public int count;
+        public int provisionalCount;
+        public long totalBytes;
+        public int headerBytes;
+        public int minBytes;
+        public int maxBytes;
+
+        EntryInfo() {
+            count = 0;
+            provisionalCount = 0;
+            totalBytes = 0;
+            headerBytes = 0;
+            minBytes = 0;
+            maxBytes = 0;
+        }
+    }
+
+    static class LogEntryTypeComparator implements Comparator<LogEntryType> {
+	public int compare(LogEntryType o1, LogEntryType o2) {
+	    if (o1 == null) {
+		return -1;
+	    }
+
+	    if (o2 == null) {
+		return 1;
+	    }
+
+            Byte t1 = Byte.valueOf(o1.getTypeNum());
+            Byte t2 = Byte.valueOf(o2.getTypeNum());
+            return t1.compareTo(t2);
+	}
+    }
+
+    /*
+     * Accumulate the count of items from checkpoint end->checkpoint end.
+     */
+    static class CheckpointCounter {
+        public long startCkptLsn = DbLsn.NULL_LSN;
+        public long endCkptLsn = DbLsn.NULL_LSN;
+        public int preStartLNTxnCount;
+        public int preStartLNCount;
+        public int preStartMapLNTxnCount;
+        public int preStartMapLNCount;
+        public int postStartLNTxnCount;
+        public int postStartLNCount;
+        public int postStartMapLNTxnCount;
+        public int postStartMapLNCount;
+
+        public void increment(FileReader reader,  byte currentEntryTypeNum) {
+            if (currentEntryTypeNum ==
+                LogEntryType.LOG_CKPT_START.getTypeNum()) {
+                startCkptLsn = reader.getLastLsn();
+            } else if (currentEntryTypeNum ==
+                       LogEntryType.LOG_LN_TRANSACTIONAL.getTypeNum()) {
+                if (startCkptLsn == DbLsn.NULL_LSN) {
+                    preStartLNTxnCount++;
+                } else {
+                    postStartLNTxnCount++;
+                }
+            } else if (currentEntryTypeNum ==
+                       LogEntryType.LOG_LN.getTypeNum()) {
+                if (startCkptLsn == DbLsn.NULL_LSN) {
+                    preStartLNCount++;
+                } else {
+                    postStartLNCount++;
+                }
+            } else if (currentEntryTypeNum ==
+                       LogEntryType.LOG_MAPLN.getTypeNum()) {
+                if (startCkptLsn == DbLsn.NULL_LSN) {
+                    preStartMapLNCount++;
+                } else {
+                    postStartMapLNCount++;
+                }
+            } else if (currentEntryTypeNum ==
+                       LogEntryType.LOG_MAPLN_TRANSACTIONAL.getTypeNum()) {
+                if (startCkptLsn == DbLsn.NULL_LSN) {
+                    preStartMapLNTxnCount++;
+                } else {
+                    postStartMapLNTxnCount++;
+                }
+            }
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/log/SyncedLogManager.java b/src/com/sleepycat/je/log/SyncedLogManager.java
new file mode 100644
index 0000000000000000000000000000000000000000..dd55bb7a7b17b671ee305657082a8eb094799b24
--- /dev/null
+++ b/src/com/sleepycat/je/log/SyncedLogManager.java
@@ -0,0 +1,148 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SyncedLogManager.java,v 1.28.2.2 2010/01/04 15:30:30 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.IOException;
+import java.util.List;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.cleaner.LocalUtilizationTracker;
+import com.sleepycat.je.cleaner.TrackedFileSummary;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+
+/**
+ * The SyncedLogManager uses the synchronized keyword to implement protected
+ * regions.
+ */
+public class SyncedLogManager extends LogManager {
+
+    /**
+     * There is a single log manager per database environment.
+     */
+    public SyncedLogManager(EnvironmentImpl envImpl,
+                            boolean readOnly)
+        throws DatabaseException {
+
+        super(envImpl, readOnly);
+    }
+
+    void serialLog(LogItem[] itemArray, LogContext context)
+        throws IOException, DatabaseException {
+
+        synchronized (logWriteLatch) {
+            serialLogInternal(itemArray, context);
+        }
+    }
+
+    protected void flushInternal()
+        throws LogException, DatabaseException {
+
+        try {
+            synchronized (logWriteLatch) {
+                logBufferPool.writeBufferToFile(0);
+            }
+        } catch (IOException e) {
+            throw new LogException(e.getMessage());
+        }
+    }
+
+    /**
+     * @see LogManager#getUnflushableTrackedSummary
+     */
+    public TrackedFileSummary getUnflushableTrackedSummary(long file)
+        throws DatabaseException {
+
+        synchronized (logWriteLatch) {
+            return getUnflushableTrackedSummaryInternal(file);
+        }
+    }
+
+    /**
+     * @see LogManager#removeTrackedFile
+     */
+    public void removeTrackedFile(TrackedFileSummary tfs)
+        throws DatabaseException {
+
+        synchronized (logWriteLatch) {
+            removeTrackedFileInternal(tfs);
+        }
+    }
+
+    /**
+     * @see LogManager#countObsoleteLNs
+     */
+    public void countObsoleteNode(long lsn,
+                                  LogEntryType type,
+                                  int size,
+                                  DatabaseImpl nodeDb)
+        throws DatabaseException {
+
+        synchronized (logWriteLatch) {
+            countObsoleteNodeInternal(lsn, type, size, nodeDb);
+        }
+    }
+
+    /**
+     * @see LogManager#transferToUtilizationTracker
+     */
+    public void transferToUtilizationTracker(LocalUtilizationTracker
+                                             localTracker)
+        throws DatabaseException {
+
+        synchronized (logWriteLatch) {
+            transferToUtilizationTrackerInternal(localTracker);
+        }
+    }
+
+    /**
+     * @see LogManager#countObsoleteINs
+     */
+    public void countObsoleteINs(List<Long> lsnList, DatabaseImpl nodeDb)
+        throws DatabaseException {
+
+        synchronized (logWriteLatch) {
+            countObsoleteINsInternal(lsnList, nodeDb);
+        }
+    }
+
+    /**
+     * @see LogManager#countObsoleteDb
+     */
+    public void countObsoleteDb(DatabaseImpl db)
+        throws DatabaseException {
+
+        synchronized (logWriteLatch) {
+            countObsoleteDbInternal(db);
+        }
+    }
+
+    /**
+     * @see LogManager#removeDbFileSummary
+     */
+    public boolean removeDbFileSummary(DatabaseImpl db, Long fileNum)
+        throws DatabaseException {
+
+        synchronized (logWriteLatch) {
+            return removeDbFileSummaryInternal(db, fileNum);
+        }
+    }
+
+    /**
+     * @see LogManager#loadEndOfLogStat
+     */
+    public void loadEndOfLogStat(EnvironmentStats stats)
+        throws DatabaseException {
+
+        synchronized (logWriteLatch) {
+            loadEndOfLogStatInternal(stats);
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/log/TraceLogHandler.java b/src/com/sleepycat/je/log/TraceLogHandler.java
new file mode 100644
index 0000000000000000000000000000000000000000..22f3a1fc01cbd38d2a29eb7a1ebb520959349089
--- /dev/null
+++ b/src/com/sleepycat/je/log/TraceLogHandler.java
@@ -0,0 +1,47 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TraceLogHandler.java,v 1.34.2.2 2010/01/04 15:30:30 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.util.logging.Handler;
+import java.util.logging.LogRecord;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.utilint.Tracer;
+
+/**
+ * Handler for java.util.logging. Takes logging records and publishes them into
+ * the database log.
+ */
+public class TraceLogHandler extends Handler {
+
+    private EnvironmentImpl env;
+
+    public TraceLogHandler(EnvironmentImpl env) {
+        this.env = env;
+    }
+
+    public void close() {
+    }
+
+    public void flush() {
+    }
+
+    public void publish(LogRecord l) {
+        if (!env.isReadOnly() &&
+	    !env.mayNotWrite()) {
+            try {
+                Tracer trace = new Tracer(l.getMessage());
+                trace.log(env.getLogManager());
+            } catch (DatabaseException e) {
+		throw new IllegalStateException(e);
+            }
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/log/UtilizationFileReader.java b/src/com/sleepycat/je/log/UtilizationFileReader.java
new file mode 100644
index 0000000000000000000000000000000000000000..417fa7b5885929c116f9cd3b3ccb13b60f71a3e0
--- /dev/null
+++ b/src/com/sleepycat/je/log/UtilizationFileReader.java
@@ -0,0 +1,310 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: UtilizationFileReader.java,v 1.19.2.2 2010/01/04 15:30:30 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.cleaner.FileSummary;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.entry.INLogEntry;
+import com.sleepycat.je.log.entry.LNLogEntry;
+import com.sleepycat.je.log.entry.LogEntry;
+import com.sleepycat.je.log.entry.SingleItemEntry;
+import com.sleepycat.je.tree.INDeleteInfo;
+import com.sleepycat.je.tree.INDupDeleteInfo;
+import com.sleepycat.je.tree.LN;
+import com.sleepycat.je.tree.MapLN;
+import com.sleepycat.je.txn.TxnCommit;
+import com.sleepycat.je.txn.TxnEnd;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * Summarizes the utilized and unutilized portion of each log file by examining
+ * each log entry.  Does not use the Cleaner UtilizationProfile information in
+ * order to provide a second measure against which to evaluation the
+ * UtilizationProfile accuracy.
+ */
+public class UtilizationFileReader extends FileReader {
+
+    private static final boolean DEBUG = true;
+
+    private Map<Long,FileSummary> summaries;     // Long file -> FileSummary
+    private Map<Long,NodeInfo> activeNodes;   // Long node ID -> NodeInfo
+    private Map<Long,List<Object>> txns;     // Long txn ID -> List of pairs, 
+                                     // where each pair
+                                     // is [ExtendedFileSummary, LNLogEntry]
+
+    /* holds one [ExtendedFileSummary, LNLogEntry] */
+    private List<Object> twoEntryList;
+
+    private UtilizationFileReader(EnvironmentImpl env, int readBufferSize)
+        throws IOException, DatabaseException {
+
+        super(env,
+              readBufferSize,
+              true,            // read forward
+              DbLsn.NULL_LSN,  // start LSN
+              null,            // single file number
+              DbLsn.NULL_LSN,  // end of file LSN
+              DbLsn.NULL_LSN); // finish LSN
+
+        summaries = new HashMap<Long,FileSummary>();
+        activeNodes = new HashMap<Long,NodeInfo>();
+        txns = new HashMap<Long,List<Object>>();
+
+        twoEntryList = new ArrayList<Object>();
+        twoEntryList.add(null);
+        twoEntryList.add(null);
+    }
+
+    @Override
+    protected boolean isTargetEntry() {
+        /* UtilizationTracker does not count the file header. */
+        return currentEntryHeader.getType() !=
+               LogEntryType.LOG_FILE_HEADER.getTypeNum();
+    }
+
+    protected boolean processEntry(ByteBuffer entryBuffer)
+        throws DatabaseException {
+
+        LogEntryType lastEntryType =
+            LogEntryType.findType(currentEntryHeader.getType());
+        LogEntry entry = lastEntryType.getNewLogEntry();
+        entry.readEntry(currentEntryHeader, entryBuffer, true); // readFullItem
+
+        ExtendedFileSummary summary = 
+        	(ExtendedFileSummary) summaries.get(readBufferFileNum);
+        if (summary == null) {
+            summary = new ExtendedFileSummary();
+            summaries.put(readBufferFileNum, summary);
+        }
+
+        int size = getLastEntrySize();
+
+        summary.totalCount += 1;
+        summary.totalSize += size;
+
+        if (entry instanceof LNLogEntry) {
+            LNLogEntry lnEntry = (LNLogEntry) entry;
+            if (DEBUG) {
+                int otherSize = lnEntry.getLN().getLastLoggedSize();
+                if (size != otherSize) {
+                    System.out.println
+                        ("LogReader.getLastEntrySize=" + size +
+                         " LN.getLastLoggedSize=" + otherSize +
+                         " " + lnEntry.getLogType());
+                }
+            }
+            if (lastEntryType.isTransactional()) {
+                Long txnId = Long.valueOf(lnEntry.getTransactionId());
+                List<Object> txnEntries = txns.get(txnId);
+                if (txnEntries == null) {
+                    txnEntries = new ArrayList<Object>();
+                    txns.put(txnId, txnEntries);
+                }
+                txnEntries.add(summary);
+                txnEntries.add(lnEntry);
+            } else {
+                twoEntryList.set(0, summary);
+                twoEntryList.set(1, lnEntry);
+                applyTxn(twoEntryList, true);
+            }
+        } else if (entry instanceof INLogEntry) {
+            INLogEntry inEntry = (INLogEntry) entry;
+            Long nodeId = Long.valueOf(inEntry.getNodeId());
+            summary.totalINCount += 1;
+            summary.totalINSize += size;
+            countObsoleteNode(nodeId);
+            putActiveNode(nodeId, size, summary,
+                          inEntry.getDbId().getId(),
+                          false);
+        } else if (entry instanceof SingleItemEntry) {
+            Object item = ((SingleItemEntry) entry).getMainItem();
+            long deletedNodeId = -1;
+            if (item instanceof INDeleteInfo) {
+                deletedNodeId = ((INDeleteInfo) item).getDeletedNodeId();
+            } else if (item instanceof INDupDeleteInfo) {
+                deletedNodeId = ((INDupDeleteInfo) item).getDeletedNodeId();
+            }
+            if (deletedNodeId != -1) {
+                Long nodeId = Long.valueOf(deletedNodeId);
+                countObsoleteNode(nodeId);
+                activeNodes.remove(nodeId);
+            }
+            if (item instanceof TxnEnd) {
+                Long txnId = Long.valueOf(((TxnEnd) item).getTransactionId());
+                List<Object> txnEntries = txns.remove(txnId);
+                if (txnEntries != null) {
+                    applyTxn(txnEntries, item instanceof TxnCommit);
+                }
+            }
+        }
+
+        return true;
+    }
+
+    private void applyTxn(List<Object> entries, boolean commit) {
+        for (int i = 0; i < entries.size(); i += 2) {
+            ExtendedFileSummary summary = (ExtendedFileSummary) entries.get(i);
+            LNLogEntry lnEntry = (LNLogEntry) entries.get(i + 1);
+            LN ln = lnEntry.getLN();
+            int size = ln.getLastLoggedSize();
+
+            summary.totalLNCount += 1;
+            summary.totalLNSize += size;
+
+            if (!commit || ln.isDeleted()) {
+                summary.obsoleteLNCount += 1;
+                summary.recalcObsoleteLNSize += size;
+            }
+
+            if (commit) {
+                Long nodeId = Long.valueOf(lnEntry.getNodeId());
+                countObsoleteNode(nodeId);
+                if (ln.isDeleted()) {
+                    activeNodes.remove(nodeId);
+                } else {
+                    putActiveNode(nodeId, size, summary,
+                                  lnEntry.getDbId().getId(),
+                                  true);
+                }
+            }
+
+            /* Process Database truncate or remove. */
+            if (commit && ln.isDeleted() && ln instanceof MapLN) {
+                int dbId = ((MapLN) ln).getDatabase().getId().getId();
+                Iterator<Map.Entry<Long,NodeInfo>> iter = 
+                    activeNodes.entrySet().iterator();
+                while (iter.hasNext()) {
+                    Map.Entry<Long,NodeInfo> iEntry = iter.next();
+                    NodeInfo info = iEntry.getValue();
+                    if (info.dbId == dbId) {
+                        Long nodeId = iEntry.getKey();
+                        countObsoleteNode(nodeId);
+                        iter.remove();
+                    }
+                }
+            }
+        }
+    }
+
+    private void finishProcessing() {
+
+        /* Apply uncomitted transactions. */
+        Iterator<List<Object>> txnIter = txns.values().iterator();
+        while (txnIter.hasNext()) {
+            List<Object> txnEntries = txnIter.next();
+            applyTxn(txnEntries, false);
+        }
+    }
+
+    private void putActiveNode(Long nodeId,
+                               int size,
+                               ExtendedFileSummary summary,
+                               int dbId,
+                               boolean isLN) {
+        NodeInfo info = activeNodes.get(nodeId);
+        if (info == null) {
+            info = new NodeInfo();
+            activeNodes.put(nodeId, info);
+        }
+        info.size = size;
+        info.summary = summary;
+        info.dbId = dbId;
+        info.isLN = isLN;
+    }
+
+    private void countObsoleteNode(Long nodeId) {
+        NodeInfo info = activeNodes.get(nodeId);
+        if (info != null) {
+            ExtendedFileSummary summary = info.summary;
+            if (info.isLN) {
+                summary.obsoleteLNCount += 1;
+                summary.recalcObsoleteLNSize += info.size;
+            } else {
+                summary.obsoleteINCount += 1;
+                summary.recalcObsoleteINSize += info.size;
+            }
+        }
+    }
+
+    /**
+     * Creates a UtilizationReader, reads the log, and returns the resulting
+     * Map of Long file number to FileSummary.
+     */
+    public static Map<Long,FileSummary> calcFileSummaryMap(EnvironmentImpl env)
+        throws IOException, DatabaseException {
+
+        int readBufferSize = env.getConfigManager().getInt
+            (EnvironmentParams.LOG_ITERATOR_READ_SIZE);
+
+        UtilizationFileReader reader =
+            new UtilizationFileReader(env, readBufferSize);
+        while (reader.readNextEntry()) {
+            /* All the work is done in processEntry. */
+        }
+
+        reader.finishProcessing();
+
+        return reader.summaries;
+    }
+
+    private static class ExtendedFileSummary extends FileSummary {
+        private int recalcObsoleteINSize;
+        private int recalcObsoleteLNSize;
+
+        /**
+         * Overrides the LN size calculation to return the recalculated number
+         * of obsolete LN bytes.
+         */
+        @Override
+        public int getObsoleteLNSize() {
+            return recalcObsoleteLNSize;
+        }
+
+        /**
+         * Overrides the IN size calculation to return the recalculated number
+         * of obsolete IN bytes.
+         */
+        @Override
+        public int getObsoleteINSize() {
+            return recalcObsoleteINSize;
+        }
+
+        /**
+         * Overrides to add the extended data fields.
+         */
+        @Override
+        public String toString() {
+            StringBuffer buf = new StringBuffer();
+            buf.append(super.toString());
+            buf.append("<extended-info recalcObosleteINSize=\"");
+            buf.append(recalcObsoleteINSize);
+            buf.append("\" recalcObosletedLNSize=\"");
+            buf.append(recalcObsoleteLNSize);
+            buf.append("\"/>");
+            return buf.toString();
+        }
+    }
+
+    private static class NodeInfo {
+        ExtendedFileSummary summary;
+        int size;
+        int dbId;
+        boolean isLN;
+    }
+}
diff --git a/src/com/sleepycat/je/log/entry/BINDeltaLogEntry.java b/src/com/sleepycat/je/log/entry/BINDeltaLogEntry.java
new file mode 100644
index 0000000000000000000000000000000000000000..7d61be34ac96eba28b116d160bb08093d01a5de1
--- /dev/null
+++ b/src/com/sleepycat/je/log/entry/BINDeltaLogEntry.java
@@ -0,0 +1,58 @@
+/*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: BINDeltaLogEntry.java,v 1.27.2.2 2010/01/04 15:30:32 cwl Exp $
+ */
+
+package com.sleepycat.je.log.entry;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.tree.BINDelta;
+import com.sleepycat.je.tree.IN;
+
+/**
+ * A BINDeltaLogEntry knows how to create a whole BIN from a delta entry.
+ */
+public class BINDeltaLogEntry extends SingleItemEntry
+    implements INContainingEntry {
+
+    /**
+     * @param logClass
+     */
+    public BINDeltaLogEntry(Class<BINDelta> logClass) {
+        super(logClass);
+    }
+
+    /*
+     * @see com.sleepycat.je.log.entry.INContainingEntry#getIN()
+     */
+    public IN getIN(EnvironmentImpl env)
+    	throws DatabaseException {
+
+        BINDelta delta = (BINDelta) getMainItem();
+        return delta.reconstituteBIN(env);
+    }
+
+    /*
+     * @see com.sleepycat.je.log.entry.INContainingEntry#getDbId()
+     */
+    public DatabaseId getDbId() {
+
+        BINDelta delta = (BINDelta) getMainItem();
+        return delta.getDbId();	
+    }
+
+    /**
+     * @return the LSN that represents this IN. For this BINDelta, it's
+     * the last full version.
+     */
+    public long getLsnOfIN(long lastReadLsn) {
+
+        BINDelta delta = (BINDelta) getMainItem();
+        return delta.getLastFullLsn();
+    }
+}
diff --git a/src/com/sleepycat/je/log/entry/BaseEntry.java b/src/com/sleepycat/je/log/entry/BaseEntry.java
new file mode 100644
index 0000000000000000000000000000000000000000..9de2d3926b6b4d99c8ab02cfd6808d4a6909fe16
--- /dev/null
+++ b/src/com/sleepycat/je/log/entry/BaseEntry.java
@@ -0,0 +1,101 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: BaseEntry.java,v 1.9.2.2 2010/01/04 15:30:32 cwl Exp $
+ */
+
+package com.sleepycat.je.log.entry;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.log.LogEntryType;
+
+/**
+ * A Log entry allows you to read, write and dump a database log entry.  Each
+ * entry may be made up of one or more loggable items.
+ *
+ * The log entry on disk consists of
+ *  a. a log header defined by LogManager
+ *  b. a VLSN, if this entry type requires it, and replication is on.
+ *  c. the specific contents of the log entry.
+ *
+ * This class encompasses (b & c).
+ */
+abstract class BaseEntry {
+
+    /*
+     * These fields are transient and are  not persisted to the log
+     */
+
+    /* Used to instantiate the key objects from on-disk bytes */
+    Class<?> logClass;
+
+    /*
+     * Attributes of the entry type may be used to conditionalizing the reading
+     * and writing of the entry.
+     */
+    LogEntryType entryType;
+
+    /**
+     * Constructor to read an entry. The logEntryType must be set
+     * later, through setLogType().
+     */
+    BaseEntry(Class<?> logClass) {
+        this.logClass = logClass;
+    }
+
+    /**
+     * Constructor to write an entry.
+     */
+    BaseEntry() {
+    }
+
+    /**
+     * Inform a BaseEntry instance of its corresponding LogEntryType.
+     */
+    public void setLogType(LogEntryType entryType) {
+        this.entryType = entryType;
+    }
+
+    /**
+     * @return the type of log entry
+     */
+    public LogEntryType getLogType() {
+        return entryType;
+    }
+
+    /**
+     * By default, return zero because the last logged size is unknown.  This
+     * method is overridden by LNLogEntry.
+     */
+    public int getLastLoggedSize() {
+        return 0;
+    }
+
+    /**
+     * Returns true if this item should be counted as obsoleted when logged.
+     * This currently applies to deleted LNs only.
+     */
+    public boolean countAsObsoleteWhenLogged() {
+        return false;
+    }
+
+    /**
+     * Do any processing we need to do after logging, while under the logging
+     * latch.
+     */
+    public void postLogWork(long justLoggedLsn)
+        throws DatabaseException {
+        /* by default, do nothing. */
+    }
+
+    public abstract StringBuffer dumpEntry(StringBuffer sb, boolean verbose);
+
+    @Override
+    public String toString() {
+        StringBuffer sb = new StringBuffer();
+        dumpEntry(sb, true);
+        return sb.toString();
+    }
+}
diff --git a/src/com/sleepycat/je/log/entry/DbOperationType.java b/src/com/sleepycat/je/log/entry/DbOperationType.java
new file mode 100644
index 0000000000000000000000000000000000000000..d6765d72445b37ef82b72706865d3d04847a64ed
--- /dev/null
+++ b/src/com/sleepycat/je/log/entry/DbOperationType.java
@@ -0,0 +1,90 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbOperationType.java,v 1.3.2.2 2010/01/04 15:30:32 cwl Exp $
+ */
+
+package com.sleepycat.je.log.entry;
+
+import java.nio.ByteBuffer;
+
+import com.sleepycat.je.log.Loggable;
+
+/**
+ * DbOperationType is a persistent enum used in NameLNLogEntries. It supports
+ * replication of database operations by documenting the type of api operation
+ * which instigated the logging of a NameLN.
+ */
+public enum DbOperationType implements Loggable {
+
+    NONE((byte) 0),
+    CREATE((byte) 1),
+    REMOVE((byte) 2),
+    TRUNCATE((byte) 3),
+    RENAME((byte) 4);
+	
+    private byte value;
+
+    private DbOperationType(byte value) {
+        this.value = value;
+    }
+	
+    public static DbOperationType readTypeFromLog(ByteBuffer entryBuffer,
+                                                  byte entryVersion) {
+        byte opVal = entryBuffer.get();
+        switch (opVal) {
+        case 1:
+            return CREATE;
+
+        case 2:
+            return REMOVE;
+
+        case 3:
+            return TRUNCATE;
+
+        case 4:
+            return RENAME;
+
+        case 0:
+        default:
+            return NONE;
+
+        }
+    }
+	
+    /** @see Loggable#getLogSize */
+    public int getLogSize() {
+        return 1;
+    }
+	
+    /** @see Loggable#writeToLog */
+    public void writeToLog(ByteBuffer logBuffer) {
+        logBuffer.put(value);
+    }
+	
+    /** @see Loggable#readFromLog */
+    public void readFromLog(ByteBuffer itemBuffer, byte entryVersion) {
+        value = itemBuffer.get();
+    }
+	
+    /** @see Loggable#dumpLog */
+    public void dumpLog(StringBuffer sb, boolean verbose) {
+        sb.append("<DbOp val=\"").append(this).append("\"/>");
+    }
+	
+    /** @see Loggable#getTransactionId */
+    public long getTransactionId() {
+        return 0;
+    }
+	
+    /** @see Loggable#logicalEquals */
+    public boolean logicalEquals(Loggable other) {
+        if (!(other instanceof DbOperationType))
+            return false;
+	
+        return value == ((DbOperationType) other).value;
+    }
+}
+
diff --git a/src/com/sleepycat/je/log/entry/DeletedDupLNLogEntry.java b/src/com/sleepycat/je/log/entry/DeletedDupLNLogEntry.java
new file mode 100644
index 0000000000000000000000000000000000000000..7d0c5a46087782fd708a16e667239e821ee6fa4a
--- /dev/null
+++ b/src/com/sleepycat/je/log/entry/DeletedDupLNLogEntry.java
@@ -0,0 +1,127 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DeletedDupLNLogEntry.java,v 1.33.2.2 2010/01/04 15:30:32 cwl Exp $
+ */
+
+package com.sleepycat.je.log.entry;
+
+import java.nio.ByteBuffer;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.log.LogEntryHeader;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.LogUtils;
+import com.sleepycat.je.tree.Key;
+import com.sleepycat.je.tree.LN;
+import com.sleepycat.je.txn.Txn;
+
+/**
+ * DupDeletedLNEntry encapsulates a deleted dupe LN entry. This contains all
+ * the regular transactional LN log entry fields and an extra key, which is the
+ * nulled out data field of the LN (which becomes the key in the duplicate
+ * tree.
+ */
+public class DeletedDupLNLogEntry extends LNLogEntry {
+
+    /*
+     * Deleted duplicate LN must log an entra key in their log entries,
+     * because the data field that is the "key" in a dup tree has been
+     * nulled out because the LN is deleted.
+     */
+    private byte[] dataAsKey;
+
+    /**
+     * Constructor to read an entry.
+     */
+    public DeletedDupLNLogEntry() {
+        super(com.sleepycat.je.tree.LN.class);
+    }
+
+    /**
+     * Constructor to make an object that can write this entry.
+     */
+    public DeletedDupLNLogEntry(LogEntryType entryType,
+                                LN ln,
+                                DatabaseId dbId,
+                                byte[] key,
+                                byte[] dataAsKey,
+                                long abortLsn,
+                                boolean abortKnownDeleted,
+                                Txn txn) {
+        super(entryType, ln, dbId, key, abortLsn, abortKnownDeleted, txn);
+        this.dataAsKey = dataAsKey;
+    }
+
+    /**
+     * Extends its super class to read in the extra dup key.
+     * @see LNLogEntry#readEntry
+     */
+    @Override
+    public void readEntry(LogEntryHeader header,
+                          ByteBuffer entryBuffer,
+                          boolean readFullItem)
+        throws DatabaseException {
+
+        super.readEntry(header, entryBuffer, readFullItem);
+
+        /* Key */
+        if (readFullItem) {
+            byte logVersion = header.getVersion();
+            dataAsKey = LogUtils.readByteArray(entryBuffer, (logVersion < 6));
+        } else {
+            /* The LNLogEntry base class has already positioned to the end. */
+            dataAsKey = null;
+        }
+    }
+
+    /**
+     * Extends super class to dump out extra key.
+     * @see LNLogEntry#dumpEntry
+     */
+    @Override
+    public StringBuffer dumpEntry(StringBuffer sb, boolean verbose) {
+        super.dumpEntry(sb, verbose);
+        sb.append(Key.dumpString(dataAsKey, 0));
+        return sb;
+    }
+
+    /*
+     * Writing support
+     */
+
+    /**
+     * Extend super class to add in extra key.
+     * @see LNLogEntry#getSize
+     */
+    @Override
+    public int getSize() {
+        return super.getSize() +
+	    LogUtils.getByteArrayLogSize(dataAsKey);
+    }
+
+    /**
+     * @see LogEntry#writeToLog
+     */
+    @Override
+    public void writeEntry(LogEntryHeader header,
+                           ByteBuffer destBuffer) {
+        super.writeEntry(header, destBuffer);
+        LogUtils.writeByteArray(destBuffer, dataAsKey);
+    }
+
+    /*
+     * Accessors
+     */
+
+    /**
+     * Get the data-as-key out of the entry.
+     */
+    @Override
+    public byte[] getDupKey() {
+        return dataAsKey;
+    }
+}
diff --git a/src/com/sleepycat/je/log/entry/INContainingEntry.java b/src/com/sleepycat/je/log/entry/INContainingEntry.java
new file mode 100644
index 0000000000000000000000000000000000000000..3a5d49daa00a14e2b2c9a9f454a478e3e3f6189a
--- /dev/null
+++ b/src/com/sleepycat/je/log/entry/INContainingEntry.java
@@ -0,0 +1,36 @@
+/*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: INContainingEntry.java,v 1.19.2.2 2010/01/04 15:30:32 cwl Exp $
+ */
+
+package com.sleepycat.je.log.entry;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.tree.IN;
+
+/**
+ * An INContainingEntry is a log entry that contains internal nodes.
+ */
+public interface INContainingEntry {
+	
+    /**
+     * @return the IN held within this log entry.
+     */
+    public IN getIN(EnvironmentImpl env)
+        throws DatabaseException;
+	
+    /**
+     * @return the database id held within this log entry.
+     */
+    public DatabaseId getDbId();
+
+    /**
+     * @return the LSN that represents this IN.
+     */
+    public long getLsnOfIN(long lastReadLsn);
+}
diff --git a/src/com/sleepycat/je/log/entry/INLogEntry.java b/src/com/sleepycat/je/log/entry/INLogEntry.java
new file mode 100644
index 0000000000000000000000000000000000000000..a7d4f62e0b4ce8eb6fdb008e5077a721b5960fc1
--- /dev/null
+++ b/src/com/sleepycat/je/log/entry/INLogEntry.java
@@ -0,0 +1,250 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: INLogEntry.java,v 1.50.2.2 2010/01/04 15:30:32 cwl Exp $
+ */
+
+package com.sleepycat.je.log.entry;
+
+import java.nio.ByteBuffer;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.LogEntryHeader;
+import com.sleepycat.je.log.LogUtils;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * INLogEntry embodies all IN log entries.
+ * On disk, an IN log entry contains:
+ * <pre>
+ *        IN
+ *        database id
+ *        obsolete LSN  -- in version 2
+ * </pre>
+ */
+public class INLogEntry extends BaseEntry
+    implements LogEntry, NodeLogEntry, INContainingEntry {
+
+    /*
+     * Persistent fields in an IN entry.
+     */
+    private IN in;
+    private DatabaseId dbId;
+    /*
+     * obsoleteFile was added in version 1, and changed to obsoleteLsn in
+     * version 2.  If the offset is zero in the LSN, we read a version 1 entry
+     * since only the file number was stored.
+     */
+    private long obsoleteLsn;
+
+    /*
+     * Transient fields
+     *
+     * Save the node id when we read the log entry from disk. Do so explicitly
+     * instead of merely returning in.getNodeId(), because we don't always
+     * instantiate the IN.
+     */
+    private long nodeId;
+
+    /**
+     * Construct a log entry for reading.
+     */
+    public INLogEntry(Class<? extends IN> INClass) {
+        super(INClass);
+    }
+
+    /**
+     * Construct a log entry for writing to the log.
+     */
+    public INLogEntry(IN in) {
+        setLogType(in.getLogType());
+        this.in = in;
+        this.dbId = in.getDatabase().getId();
+        this.nodeId = in.getNodeId();
+        this.obsoleteLsn = in.getLastFullVersion();
+    }
+
+    /*
+     * Read support
+     */
+
+    /**
+     * Read in an IN entry.
+     */
+    public void readEntry(LogEntryHeader header,
+                          ByteBuffer entryBuffer,
+                          boolean readFullItem)
+        throws DatabaseException {
+
+        byte logVersion = header.getVersion();
+        boolean version6OrLater = (logVersion >= 6);
+        try {
+            if (version6OrLater) {
+                dbId = new DatabaseId();
+                dbId.readFromLog(entryBuffer, logVersion);
+                obsoleteLsn =
+                    LogUtils.readLong(entryBuffer, false/*unpacked*/);
+            }
+            if (readFullItem) {
+                /* Read IN and get node ID. */
+                in = (IN) logClass.newInstance();
+                in.readFromLog(entryBuffer, logVersion);
+                nodeId = in.getNodeId();
+            } else {
+                /* Calculate position following IN. */
+                int position = entryBuffer.position() + header.getItemSize();
+                if (logVersion == 1) {
+                    /* Subtract size of obsoleteLsn */
+                    position -= LogUtils.UNSIGNED_INT_BYTES;
+                } else if (logVersion >= 2) {
+                    /* Subtract size of obsoleteLsn */
+                    if (version6OrLater) {
+                        position -= LogUtils.getPackedLongLogSize(obsoleteLsn);
+                    } else {
+                        position -= LogUtils.LONG_BYTES;
+                    }
+                }
+                /* Subtract size of dbId */
+                if (!version6OrLater) {
+                    position -= LogUtils.INT_BYTES;
+                } else {
+                    position -= LogUtils.getPackedIntLogSize(dbId.getId());
+                }
+                /* Read node ID and position after IN. */
+                nodeId = LogUtils.readLong(entryBuffer, !version6OrLater);
+                entryBuffer.position(position);
+                in = null;
+            }
+            if (!version6OrLater) {
+                dbId = new DatabaseId();
+                dbId.readFromLog(entryBuffer, logVersion);
+            }
+            if (logVersion < 1) {
+                obsoleteLsn = DbLsn.NULL_LSN;
+            } else if (logVersion == 1) {
+                long fileNum = LogUtils.readUnsignedInt(entryBuffer);
+                if (fileNum == 0xffffffffL) {
+                    obsoleteLsn = DbLsn.NULL_LSN;
+                } else {
+                    obsoleteLsn = DbLsn.makeLsn(fileNum, 0);
+                }
+            } else if (!version6OrLater) {
+                obsoleteLsn = LogUtils.readLong(entryBuffer, true/*unpacked*/);
+            }
+        } catch (IllegalAccessException e) {
+            throw new DatabaseException(e);
+        } catch (InstantiationException e) {
+            throw new DatabaseException(e);
+        }
+    }
+
+    /**
+     * Returns the LSN of the prior version of this node.  Used for counting
+     * the prior version as obsolete.  If the offset of the LSN is zero, only
+     * the file number is known because we read a version 1 log entry.
+     */
+    public long getObsoleteLsn() {
+
+        return obsoleteLsn;
+    }
+
+    /**
+     * Print out the contents of an entry.
+     */
+    public StringBuffer dumpEntry(StringBuffer sb, boolean verbose) {
+        in.dumpLog(sb, verbose);
+        dbId.dumpLog(sb, verbose);
+        return sb;
+    }
+
+    /**
+     * @return the item in the log entry
+     */
+    public Object getMainItem() {
+        return in;
+    }
+
+    @Override
+    public Object clone()
+        throws CloneNotSupportedException {
+
+        return super.clone();
+    }
+
+    /**
+     * @see LogEntry#getTransactionId
+     */
+    public long getTransactionId() {
+	return 0;
+    }
+
+    /*
+     * Writing support
+     */
+
+    /**
+     */
+    public int getSize() {
+        return (in.getLogSize() +
+		dbId.getLogSize() +
+                LogUtils.getPackedLongLogSize(obsoleteLsn));
+    }
+
+    /**
+     * @see LogEntry#writeEntry
+     */
+    public void writeEntry(LogEntryHeader header,
+                           ByteBuffer destBuffer) {
+        dbId.writeToLog(destBuffer);
+        LogUtils.writePackedLong(destBuffer, obsoleteLsn);
+        in.writeToLog(destBuffer);
+    }
+
+    /*
+     * Access the in held within the entry.
+     * @see INContainingEntry#getIN()
+     */
+    public IN getIN(EnvironmentImpl env)
+        throws DatabaseException {
+
+        return in;
+    }
+
+    /**
+     * @see NodeLogEntry#getNodeId
+     */
+    public long getNodeId() {
+        return nodeId;
+    }
+
+    /**
+     * @see INContainingEntry#getDbId()
+     */
+    public DatabaseId getDbId() {
+
+        return dbId;
+    }
+
+    /**
+     * @return the LSN that represents this IN. For a vanilla IN entry, it's
+     * the last lsn read by the log reader.
+     */
+    public long getLsnOfIN(long lastReadLsn) {
+        return lastReadLsn;
+    }
+
+    /**
+     * @see LogEntry#logicalEquals
+     *
+     * INs from two different environments are never considered equal,
+     * because they have lsns that are environment-specific.
+     */
+    public boolean logicalEquals(LogEntry other) {
+        return false;
+    }
+}
diff --git a/src/com/sleepycat/je/log/entry/LNLogEntry.java b/src/com/sleepycat/je/log/entry/LNLogEntry.java
new file mode 100644
index 0000000000000000000000000000000000000000..6972a8372c6bc1813a15df6c8d9a839076732c4b
--- /dev/null
+++ b/src/com/sleepycat/je/log/entry/LNLogEntry.java
@@ -0,0 +1,430 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LNLogEntry.java,v 1.56.2.2 2010/01/04 15:30:32 cwl Exp $
+ */
+
+package com.sleepycat.je.log.entry;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.log.LogEntryHeader;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.LogUtils;
+import com.sleepycat.je.tree.Key;
+import com.sleepycat.je.tree.LN;
+import com.sleepycat.je.txn.Txn;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * LNLogEntry embodies all LN transactional log entries.
+ * On disk, an LN log entry contains (pre version 6)
+ * <pre>
+ *   LN
+ *   databaseid
+ *   key
+ *   abortLsn          -- if transactional
+ *   abortKnownDeleted -- if transactional
+ *   txn               -- if transactional
+ *
+ * (version 6)
+ *   databaseid
+ *   abortLsn          -- if transactional
+ *   abortKnownDeleted -- if transactional
+ *   txn               -- if transactional
+ *   LN
+ *   key
+ * </pre>
+ * Before version 6, a non-full-item read of a log entry only retrieved
+ * the node id. After version 6, the database id, transaction id and node id
+ * are all available.
+ */
+public class LNLogEntry extends BaseEntry implements LogEntry, NodeLogEntry {
+    private static final byte ABORT_KNOWN_DELETED_MASK = (byte) 1;
+
+    /*
+     * Persistent fields in an LN entry
+     */
+    private LN ln;
+    private DatabaseId dbId;
+    private byte[] key;
+    private long abortLsn = DbLsn.NULL_LSN;
+    private boolean abortKnownDeleted;
+    private Txn txn;     // conditional
+
+    /*
+     * Transient fields used by the entry.
+     *
+     * Save the node id when we read the log entry from disk. Do so explicitly
+     * instead of merely returning ln.getNodeId(), because we don't always
+     * instantiate the LN.
+     */
+    private long nodeId;
+
+    /* Constructor to read an entry. */
+    public LNLogEntry(Class<? extends LN> LNClass) {
+        super(LNClass);
+    }
+
+    /* Constructor to write an entry. */
+    public LNLogEntry(LogEntryType entryType,
+                      LN ln,
+                      DatabaseId dbId,
+                      byte[] key,
+                      long abortLsn,
+                      boolean abortKnownDeleted,
+                      Txn txn) {
+        setLogType(entryType);
+        this.ln = ln;
+        this.dbId = dbId;
+        this.key = key;
+        this.abortLsn = abortLsn;
+        this.abortKnownDeleted = abortKnownDeleted;
+        this.txn = txn;
+        this.nodeId = ln.getNodeId();
+
+        /* A txn should only be provided for transactional entry types */
+        assert(entryType.isTransactional() == (txn!=null));
+    }
+
+    /**
+     * @see LogEntry#readEntry
+     */
+    public void readEntry(LogEntryHeader header,
+                          ByteBuffer entryBuffer,
+                          boolean readFullItem)
+        throws DatabaseException {
+
+        byte logVersion = header.getVersion();
+        boolean unpacked = (logVersion < 6);
+        int recStartPosition = entryBuffer.position();
+        try {
+
+            /*
+             * For log version 6 and above we store the key last so that we can
+             * avoid storing the key size. Instead, we derive it from the LN
+             * size and the total entry size. The DatabaseId is also packed.
+             * For older log versions the LN is first, which let us optimize
+             * better for to read the node id in a partial read, but didn't let
+             * us save on the key size.
+             *
+             * Since log version 6 now requires the read of the database id and
+             * transaction id before getting the node id, we're taking
+             * advantage of that and are changing the semantics of readFullItem
+             * == false to assume that. This helps because we'd like to do
+             * utilization tracking with partial log entry reads. If we run
+             * into entries < version 6, we'll just always do a full read.
+             */
+            if (unpacked) {
+                /* LN is first for log versions prior to 6. */
+                ln = (LN) logClass.newInstance();
+                ln.readFromLog(entryBuffer, logVersion);
+                nodeId = ln.getNodeId();
+            }
+
+            /* DatabaseImpl Id */
+            dbId = new DatabaseId();
+            dbId.readFromLog(entryBuffer, logVersion);
+
+            /* Key */
+            if (unpacked) {
+                key = LogUtils.readByteArray(entryBuffer, true/*unpacked*/);
+            } else {
+                /* read later. */
+            }
+
+            if (entryType.isTransactional()) {
+
+                /*
+                 * AbortLsn. If it was a marker LSN that was used to fill
+                 * in a create, mark it null.
+                 */
+                abortLsn = LogUtils.readLong(entryBuffer, unpacked);
+                if (DbLsn.getFileNumber(abortLsn) ==
+                    DbLsn.getFileNumber(DbLsn.NULL_LSN)) {
+                    abortLsn = DbLsn.NULL_LSN;
+                }
+
+                abortKnownDeleted =
+                    ((entryBuffer.get() & ABORT_KNOWN_DELETED_MASK) != 0) ?
+                    true : false;
+
+                /* Locker */
+                txn = new Txn();
+                txn.readFromLog(entryBuffer, logVersion);
+            }
+
+            if (unpacked) {
+                if (!readFullItem) {
+                    /* 
+                     * Position this buffer to its end, for the sake of any 
+                     * subclasses.
+                     */
+                    int endPosition = recStartPosition + header.getItemSize();
+                    entryBuffer.position(endPosition);
+                }
+            } else {
+                if (readFullItem) {
+                    /* LN is next for log version 6 and above. */
+                    ln = (LN) logClass.newInstance();
+                    ln.readFromLog(entryBuffer, logVersion);
+                    nodeId = ln.getNodeId();
+                    int bytesWritten =
+                        entryBuffer.position() - recStartPosition;
+                    if (isLNType()) {
+                        int keySize = header.getItemSize() - bytesWritten;
+                        key = LogUtils.readBytesNoLength(entryBuffer, keySize);
+                    } else {
+                        int keySize =
+                            LogUtils.readInt(entryBuffer, false/*unpacked*/);
+                        key = LogUtils.readBytesNoLength(entryBuffer, keySize);
+                    }
+                } else {
+
+                    /*
+                     * Read node ID and then set buffer position to end. This
+                     * takes advantage of the fact that the node id is in a
+                     * known spot, at the beginning of the LN.  We currently do
+                     * not support getting the db and txn ID in this mode, and
+                     * we may want to change the log format to do that
+                     * efficiently.
+                     */
+                    int endPosition = recStartPosition + header.getItemSize();
+                    nodeId = LogUtils.readPackedLong(entryBuffer);
+                    entryBuffer.position(endPosition);
+                    ln = null;
+                }
+            }
+
+            /* LNs save the last logged size. */
+            if (ln != null) {
+                ln.setLastLoggedSize(header.getSize() + header.getItemSize());
+            }
+        } catch (IllegalAccessException e) {
+            throw new DatabaseException(e);
+        } catch (InstantiationException e) {
+            throw new DatabaseException(e);
+        }
+    }
+
+    /**
+     * @see LogEntry#dumpEntry
+     */
+    public StringBuffer dumpEntry(StringBuffer sb, boolean verbose) {
+        ln.dumpLog(sb, verbose);
+        dbId.dumpLog(sb, verbose);
+        sb.append(Key.dumpString(key, 0));
+        if (entryType.isTransactional()) {
+            if (abortLsn != DbLsn.NULL_LSN) {
+                sb.append(DbLsn.toString(abortLsn));
+            }
+            sb.append("<knownDeleted val=\"");
+            sb.append(abortKnownDeleted ? "true" : "false");
+            sb.append("\"/>");
+            txn.dumpLog(sb, verbose);
+        }
+        return sb;
+    }
+
+    /**
+     * @see LogEntry#getMainItem
+     */
+    public Object getMainItem() {
+        return ln;
+    }
+
+    /**
+     * @see LogEntry#clone
+     */
+    public Object clone() throws CloneNotSupportedException {
+        return super.clone();
+    }
+
+    /**
+     * @see LogEntry#getTransactionId
+     */
+    public long getTransactionId() {
+        if (entryType.isTransactional()) {
+            return txn.getId();
+        } else {
+            return 0;
+        }
+    }
+
+    /**
+     * @see NodeLogEntry#getNodeId
+     */
+    public long getNodeId() {
+        return nodeId;
+    }
+
+    /*
+     * Writing support
+     */
+
+    /**
+     * #see LogEntry#getSize
+     */
+    public int getSize() {
+        int len = key.length;
+        int size = ln.getLogSize() +
+            dbId.getLogSize() +
+            len;
+        if (!isLNType()) {
+            size += LogUtils.getPackedIntLogSize(len);
+        }
+        if (entryType.isTransactional()) {
+            size += LogUtils.getPackedLongLogSize(abortLsn);
+            size++;   // abortKnownDeleted
+            size += txn.getLogSize();
+        }
+        return size;
+    }
+
+    /**
+     * Returns the last logged size, saved by readEntry and writeEntry.
+     */
+    @Override
+    public int getLastLoggedSize() {
+        return ln.getLastLoggedSize();
+    }
+
+    private boolean isLNType() {
+        return entryType == LogEntryType.LOG_LN ||
+            entryType == LogEntryType.LOG_LN_TRANSACTIONAL;
+    }
+
+    /**
+     * @see LogEntry#writeEntry
+     */
+    public void writeEntry(LogEntryHeader header, ByteBuffer destBuffer) {
+        dbId.writeToLog(destBuffer);
+
+        if (entryType.isTransactional()) {
+            LogUtils.writePackedLong(destBuffer, abortLsn);
+            byte aKD = 0;
+            if (abortKnownDeleted) {
+                aKD |= ABORT_KNOWN_DELETED_MASK;
+            }
+            destBuffer.put(aKD);
+            txn.writeToLog(destBuffer);
+        }
+
+        ln.writeToLog(destBuffer);
+        if (isLNType()) {
+            LogUtils.writeBytesNoLength(destBuffer, key);
+        } else {
+            LogUtils.writePackedInt(destBuffer, key.length);
+            LogUtils.writeBytesNoLength(destBuffer, key);
+        }
+
+        /* LNs save the last logged size. */
+        ln.setLastLoggedSize(header.getSize() + header.getItemSize());
+    }
+
+    /**
+     * Returns true for a deleted LN to count it immediately as obsolete.
+     * @see LogEntry#countAsObsoleteWhenLogged
+     */
+    @Override
+    public boolean countAsObsoleteWhenLogged() {
+        return ln.isDeleted();
+    }
+
+    /**
+     * For LN entries, we need to record the latest LSN for that node with the
+     * owning transaction, within the protection of the log latch. This is a
+     * callback for the log manager to do that recording.
+     *
+     * @see LogEntry#postLogWork
+     */
+    @Override
+    public void postLogWork(long justLoggedLsn)
+        throws DatabaseException {
+
+        if (entryType.isTransactional()) {
+            txn.addLogInfo(justLoggedLsn);
+        }
+    }
+
+    /*
+     * Accessors
+     */
+    public LN getLN() {
+        return ln;
+    }
+
+    public DatabaseId getDbId() {
+        return dbId;
+    }
+
+    public byte[] getKey() {
+        return key;
+    }
+
+    public byte[] getDupKey() {
+        if (ln.isDeleted()) {
+            return null;
+        } else {
+            return ln.getData();
+        }
+    }
+
+    public long getAbortLsn() {
+        return abortLsn;
+    }
+
+    public boolean getAbortKnownDeleted() {
+        return abortKnownDeleted;
+    }
+
+    public Long getTxnId() {
+        if (entryType.isTransactional()) {
+            return Long.valueOf(txn.getId());
+        } else {
+            return null;
+        }
+    }
+
+    public Txn getUserTxn() {
+        if (entryType.isTransactional()) {
+            return txn;
+        } else {
+            return null;
+        }
+    }
+
+    /**
+     * @see LogEntry#logicalEquals
+     */
+    public boolean logicalEquals(LogEntry other) {
+        if (!(other instanceof LNLogEntry))
+            return false;
+
+        LNLogEntry otherEntry = (LNLogEntry) other;
+
+        if (!dbId.logicalEquals(otherEntry.dbId))
+            return false;
+
+        if (txn != null) {
+            if (!txn.logicalEquals(otherEntry.txn))
+                return false;
+        } else {
+            if (otherEntry.txn != null)
+                return false;
+        }
+
+        if (!Arrays.equals(key, otherEntry.key))
+            return false;
+
+        if (!ln.logicalEquals(otherEntry.ln))
+            return false;
+
+        return true;
+    }
+}
diff --git a/src/com/sleepycat/je/log/entry/LogEntry.java b/src/com/sleepycat/je/log/entry/LogEntry.java
new file mode 100644
index 0000000000000000000000000000000000000000..27a9827bfc27140f64e9f75064eadf90addf7d01
--- /dev/null
+++ b/src/com/sleepycat/je/log/entry/LogEntry.java
@@ -0,0 +1,101 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LogEntry.java,v 1.27.2.2 2010/01/04 15:30:32 cwl Exp $
+ */
+
+package com.sleepycat.je.log.entry;
+
+import java.nio.ByteBuffer;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.log.LogEntryHeader;
+import com.sleepycat.je.log.LogEntryType;
+
+/**
+ * A Log entry allows you to read, write and dump a database log entry.  Each
+ * entry may be made up of one or more loggable items.
+ *
+ * The log entry on disk consists of  a log header defined by LogManager
+ * and the specific contents of the log entry.
+ */
+public interface LogEntry extends Cloneable {
+
+    /**
+     * Inform a LogEntry instance of its corresponding LogEntryType.
+     */
+    public void setLogType(LogEntryType entryType);
+
+    /**
+     * @return the type of log entry
+     */
+    public LogEntryType getLogType();
+
+    /**
+     * Read in a log entry.
+     */
+    public void readEntry(LogEntryHeader header,
+                          ByteBuffer entryBuffer,
+                          boolean readFullItem)
+        throws DatabaseException;
+
+    /**
+     * Print out the contents of an entry.
+     */
+    public StringBuffer dumpEntry(StringBuffer sb, boolean verbose);
+
+    /**
+     * @return the first item of the log entry
+     */
+    public Object getMainItem();
+
+    /**
+     * @return return the transaction id if this log entry is transactional,
+     * 0 otherwise.
+     */
+    public long getTransactionId();
+
+    /**
+     * @return size of byte buffer needed to store this entry.
+     */
+    public int getSize();
+
+    /**
+     * @return total size of last logged entry, or zero if unknown.  The last
+     * logged size is known for LNs, and is used for obsolete size counting.
+     */
+    public int getLastLoggedSize();
+
+    /**
+     * Serialize this object into the buffer.
+     * @param logBuffer is the destination buffer
+     */
+    public void writeEntry(LogEntryHeader header,
+                           ByteBuffer logBuffer);
+
+    /**
+     * Returns true if this item should be counted as obsoleted when logged.
+     * This currently applies to deleted LNs only.
+     */
+    public boolean countAsObsoleteWhenLogged();
+
+    /**
+     * Do any processing we need to do after logging, while under the logging
+     * latch.
+     */
+    public void postLogWork(long justLoggedLsn)
+        throws DatabaseException;
+
+    /**
+     * @return a shallow clone.
+     */
+    public Object clone() throws CloneNotSupportedException;
+
+    /**
+     * @return true if these two log entries are logically the same.
+     * Used for replication.
+     */
+    public boolean logicalEquals(LogEntry other);
+}
diff --git a/src/com/sleepycat/je/log/entry/NameLNLogEntry.java b/src/com/sleepycat/je/log/entry/NameLNLogEntry.java
new file mode 100644
index 0000000000000000000000000000000000000000..7eefdf3a12cda7dcef87888cf1c2f54ecc323164
--- /dev/null
+++ b/src/com/sleepycat/je/log/entry/NameLNLogEntry.java
@@ -0,0 +1,259 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: NameLNLogEntry.java,v 1.5.2.2 2010/01/04 15:30:32 cwl Exp $
+ */
+
+package com.sleepycat.je.log.entry;
+
+import java.nio.ByteBuffer;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.ReplicatedDatabaseConfig;
+import com.sleepycat.je.log.DbOpReplicationContext;
+import com.sleepycat.je.log.LogEntryHeader;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.ReplicationContext;
+import com.sleepycat.je.tree.NameLN;
+import com.sleepycat.je.txn.Txn;
+
+/**
+ * NameLNLogEntry contains all the regular LNLogEntry fields and additional
+ * information about the database operation which instigated the logging of
+ * this NameLN. This additional information is used to support replication of
+ * database operations in a replication group.
+ *
+ * Database operations pose a special problem for replication because unlike
+ * data record put and get calls, they can result in multiple log entries that
+ * are not all members of a single transaction.  Create and truncate are the
+ * problem operations because they end up logging new MapLNs, and our
+ * implementation does not treat MapLNs as transactional.  Database operations
+ * challenge two replication assumptions: (a) that all logical operations can
+ * be repeated on the client node based on the contents of a single log entry,
+ * and (b) that non-txnal log entries like MapLNs need not be replicated.
+ *
+ * Specifically, here's what is logged for database operations.
+ *
+ * create:
+ *
+ *  1. new NameLN_TX
+ *  2. new MapLN, which has the database config info.
+ *  3. txn commit of autocommit or user txn.
+ *
+ * rename:
+ *
+ *  1. deleted NameLN_TX
+ *  2. new NameLN_TX
+ *  3. txn commit from autocommit or user txn
+ *
+ * truncate:
+ *
+ *  1. new MapLN w/new id
+ *  2. modify the existing NameLN with new id (old database is deleted by
+ *     usual commit-time processing)
+ *  3. txn commit from autocommit or user txn
+ *
+ * delete
+ *
+ *  1. deleted NameLN_TX (old database gets deleted by usual commit-time
+ *     processing)
+ *  2. txn commit from autocommit or user txn
+ *
+ * Extra information is needed for create and truncate, which both log
+ * information within the MapLN. Rename and delete only log NameLNs, so they
+ * can be replicated on the client using the normal replication messages.  The
+ * extra fields which follow the usual LNLogEntry fields are:
+ *
+ * operationType - the type of database operation. In a single node system,
+ *                 this is local information implicit in the code path.
+ * databaseConfig (optional) - For creates, database configuration info
+ * databaseId (optional)- For truncates, the old db id, so we know which
+ *                        MapLN to delete.
+ */
+public class NameLNLogEntry extends LNLogEntry {
+
+    /*
+     * operationType, truncateOldDbId and replicatedCreateConfig are
+     * logged as part of the entry.
+     */
+    private DbOperationType operationType;
+    private DatabaseId truncateOldDbId;
+    private ReplicatedDatabaseConfig replicatedCreateConfig;
+
+    /**
+     * Constructor to read an entry.
+     */
+    public NameLNLogEntry() {
+        super(com.sleepycat.je.tree.NameLN.class);
+    }
+
+    /**
+     * Constructor to write this entry.
+     */
+    public NameLNLogEntry(LogEntryType entryType,
+                          NameLN nameLN,
+                          DatabaseId dbId,
+                          byte[] key,
+                          long abortLsn,
+                          boolean abortKnownDeleted,
+                          Txn txn,
+                          ReplicationContext repContext) {
+
+        super(entryType, nameLN, dbId, key, abortLsn, abortKnownDeleted, txn);
+        ReplicationContext operationContext = repContext;
+
+        operationType = repContext.getDbOperationType();
+        if (operationType == DbOperationType.CREATE) {
+            replicatedCreateConfig =
+                ((DbOpReplicationContext) operationContext).getCreateConfig();
+        } else if (operationType == DbOperationType.TRUNCATE) {
+            truncateOldDbId =
+              ((DbOpReplicationContext) operationContext).getTruncateOldDbId();
+        }
+    }
+
+    /**
+     * Extends its super class to read in database operation information.
+     * @see LNLogEntry#readEntry
+     */
+    @Override
+    public void readEntry(LogEntryHeader header,
+                          ByteBuffer entryBuffer,
+                          boolean readFullItem)
+        throws DatabaseException {
+
+        super.readEntry(header, entryBuffer, readFullItem);
+
+        /*
+         * The NameLNLogEntry was introduced in version 6. Before, a LNLogEntry
+         * was used for NameLNs, and there is no extra information in the log
+         * entry.
+         */
+        byte version = header.getVersion();
+        if (version >= 6) {
+            if (readFullItem) {
+                operationType = DbOperationType.readTypeFromLog(entryBuffer,
+                                                                version);
+                if (operationType == DbOperationType.CREATE) {
+                    replicatedCreateConfig = new ReplicatedDatabaseConfig();
+                    replicatedCreateConfig.readFromLog(entryBuffer, version);
+                } else if (operationType == DbOperationType.TRUNCATE) {
+                    truncateOldDbId = new DatabaseId();
+                    truncateOldDbId.readFromLog(entryBuffer, version);
+                }
+            }
+
+            /*
+             * If readFullItem is false, the LNLogEntry base class has already
+             * positioned to the end, so nothing more to do.
+             */
+        } else {
+	    operationType = DbOperationType.NONE;
+	}
+    }
+
+    /**
+     * Extends its super class to dump database operation information.
+     * @see LNLogEntry#dumpEntry
+     */
+    @Override
+    public StringBuffer dumpEntry(StringBuffer sb, boolean verbose) {
+
+        super.dumpEntry(sb, verbose);
+
+	operationType.dumpLog(sb, verbose);
+        if (replicatedCreateConfig != null ) {
+            replicatedCreateConfig.dumpLog(sb, verbose);
+        }
+        if (truncateOldDbId != null) {
+            truncateOldDbId.dumpLog(sb, verbose);
+        }
+
+        return sb;
+    }
+
+    /**
+     * Extends its super class to add in database operation information.
+     * @see LNLogEntry#getSize
+     */
+    @Override
+    public int getSize() {
+        int size = super.getSize() + operationType.getLogSize();
+
+        if (operationType == DbOperationType.CREATE) {
+            size += replicatedCreateConfig.getLogSize();
+        }
+        if (operationType == DbOperationType.TRUNCATE) {
+            size += truncateOldDbId.getLogSize();
+        }
+        return size;
+    }
+
+    /**
+     * Extends its super class to add in database operation information.
+     * @see LogEntry#writeToLog
+     */
+    @Override
+    public void writeEntry(LogEntryHeader header, ByteBuffer destBuffer) {
+
+        super.writeEntry(header, destBuffer);
+
+        operationType.writeToLog(destBuffer);
+        if (operationType == DbOperationType.CREATE) {
+            replicatedCreateConfig.writeToLog(destBuffer);
+        } else if (operationType == DbOperationType.TRUNCATE) {
+            truncateOldDbId.writeToLog(destBuffer);
+        }
+    }
+
+    /**
+     * @see LogEntry#logicalEquals
+     */
+    @Override
+    public boolean logicalEquals(LogEntry other) {
+
+        if (!super.logicalEquals(other))
+            return false;
+
+        NameLNLogEntry otherEntry = (NameLNLogEntry) other;
+        if (!operationType.logicalEquals(otherEntry.operationType)) {
+            return false;
+        }
+
+        if ((truncateOldDbId != null) &&
+            (!truncateOldDbId.logicalEquals(otherEntry.truncateOldDbId))) {
+                return false;
+        }
+
+        if (replicatedCreateConfig != null) {
+            if (!replicatedCreateConfig.logicalEquals
+                (otherEntry.replicatedCreateConfig))
+                return false;
+        }
+        return true;
+    }
+
+    /**
+     * @return the operationType
+     */
+    public DbOperationType getOperationType() {
+        return operationType;
+    }
+
+    /**
+     * @return the replicatedCreateConfig
+     */
+    public ReplicatedDatabaseConfig getReplicatedCreateConfig() {
+        return replicatedCreateConfig;
+    }
+
+    /**
+     * @return the truncateOldDbId
+     */
+    public DatabaseId getTruncateOldDbId() {
+        return truncateOldDbId;
+    }
+}
diff --git a/src/com/sleepycat/je/log/entry/NodeLogEntry.java b/src/com/sleepycat/je/log/entry/NodeLogEntry.java
new file mode 100644
index 0000000000000000000000000000000000000000..e664e628822b277a71b94541c7b186d1db958641
--- /dev/null
+++ b/src/com/sleepycat/je/log/entry/NodeLogEntry.java
@@ -0,0 +1,30 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: NodeLogEntry.java,v 1.8.2.2 2010/01/04 15:30:32 cwl Exp $
+ */
+
+package com.sleepycat.je.log.entry;
+
+import com.sleepycat.je.dbi.DatabaseId;
+
+/**
+ * Implemented by all LogEntry classes that provide a node ID.
+ */
+public interface NodeLogEntry extends LogEntry {
+
+    /**
+     * Returns the node ID.  This value is redundant with the main item (Node)
+     * of a log entry.  It is returned separately so that it can be obtained
+     * when the entry's main item (Node) is not loaded.  Partial loading is an
+     * optimization for recovery.
+     */
+    long getNodeId();
+
+    /**
+     * All node entries have a database ID.
+     */
+    DatabaseId getDbId();
+}
diff --git a/src/com/sleepycat/je/log/entry/SingleItemEntry.java b/src/com/sleepycat/je/log/entry/SingleItemEntry.java
new file mode 100644
index 0000000000000000000000000000000000000000..65a443f8b756ca42dbe31a8c4976e2ee1ae01032
--- /dev/null
+++ b/src/com/sleepycat/je/log/entry/SingleItemEntry.java
@@ -0,0 +1,120 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SingleItemEntry.java,v 1.9.2.2 2010/01/04 15:30:32 cwl Exp $
+ */
+
+package com.sleepycat.je.log.entry;
+
+import java.nio.ByteBuffer;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.log.LogEntryHeader;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.Loggable;
+
+/**
+ * This class embodies log entries that have a single loggable item.
+ * On disk, an entry contains:
+ * <pre>
+ *     the Loggable item
+ * </pre>
+ */
+public class SingleItemEntry extends BaseEntry implements LogEntry {
+
+    /*
+     * Persistent fields in a SingleItemEntry.
+     */
+    private Loggable item;
+
+    /**
+     * Construct a log entry for reading.
+     */
+    public SingleItemEntry(Class<?> logClass) {
+        super(logClass);
+    }
+
+    /**
+     * Construct a log entry for writing.
+     */
+    public SingleItemEntry(LogEntryType entryType, Loggable item) {
+        setLogType(entryType);
+        this.item = item;
+    }
+
+    /**
+     * @see LogEntry#readEntry
+     */
+    public void readEntry(LogEntryHeader header,
+                          ByteBuffer entryBuffer,
+                          boolean readFullItem)
+        throws DatabaseException {
+
+        try {
+            item = (Loggable) logClass.newInstance();
+            item.readFromLog(entryBuffer,
+                             header.getVersion());
+
+        } catch (IllegalAccessException e) {
+            throw new DatabaseException(e);
+        } catch (InstantiationException e) {
+            throw new DatabaseException(e);
+        }
+    }
+
+    /**
+     * @see LogEntry#dumpEntry
+     */
+    public StringBuffer dumpEntry(StringBuffer sb, boolean verbose) {
+        item.dumpLog(sb, verbose);
+        return sb;
+    }
+
+    /**
+     * @see LogEntry#getMainItem
+     */
+    public Object getMainItem() {
+        return item;
+    }
+
+    /**
+     * @see LogEntry#clone
+     */
+    @Override
+    public Object clone()
+        throws CloneNotSupportedException {
+
+        return super.clone();
+    }
+
+    /**
+     * @see LogEntry#getTransactionId
+     */
+    public long getTransactionId() {
+        return item.getTransactionId();
+    }
+
+    /*
+     * Writing support
+     */
+
+    public int getSize() {
+        return item.getLogSize();
+    }
+
+    /**
+     * @see LogEntry#writeEntry
+     */
+    public void writeEntry(LogEntryHeader header, ByteBuffer destBuffer) {
+        item.writeToLog(destBuffer);
+    }
+
+    /**
+     * @see LogEntry#logicalEquals
+     */
+    public boolean logicalEquals(LogEntry other) {
+        return item.logicalEquals((Loggable) other.getMainItem());
+    }
+}
diff --git a/src/com/sleepycat/je/log/package.html b/src/com/sleepycat/je/log/package.html
new file mode 100644
index 0000000000000000000000000000000000000000..ca82c60d08db96722c03cc0bc0a975b0b8e01995
--- /dev/null
+++ b/src/com/sleepycat/je/log/package.html
@@ -0,0 +1,26 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
+<html>
+<head>
+<!--
+
+ See the file LICENSE for redistribution information.
+
+ Copyright (c) 2002,2010 Oracle.  All rights reserved.
+
+ $Id: package.html,v 1.8.2.2 2010/01/04 15:30:30 cwl Exp $
+
+-->
+</head>
+<body bgcolor="white">
+
+Provides classes and interfaces for the JDB log subsystem.
+
+
+<h2>Package Specification</h2>
+
+(None)
+
+<!-- Put @see and @since tags down here. -->
+
+</body>
+</html>
diff --git a/src/com/sleepycat/je/package.html b/src/com/sleepycat/je/package.html
new file mode 100644
index 0000000000000000000000000000000000000000..2e5707bd3c93f02ce6f5d682fdb30569ae6e9c36
--- /dev/null
+++ b/src/com/sleepycat/je/package.html
@@ -0,0 +1,48 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
+<html>
+<head>
+<!--
+
+ See the file LICENSE for redistribution information.
+
+ Copyright (c) 2002,2010 Oracle.  All rights reserved.
+
+ $Id: package.html,v 1.16.2.2 2010/01/04 15:30:27 cwl Exp $
+
+-->
+</head>
+<body>
+Foundation for creating environments, databases and transactions; provides
+cursor based data access.
+
+<h2>Package Specification</h2>
+This package constitutes the base public API for Berkeley DB, Java
+Edition. The classes here are used to create database
+objects, and insert and retrieve data.
+<p>
+This package provides a key/data pair model of a database
+record. Databases and database cursors are the key objects used to
+access data. An alternative collections based API is available through 
+com.sleepycat.collections.
+<p>
+The Environment class embodies the database environment and is the starting
+point for the application. Databases and transaction objects are
+created through the Environment class.
+<p>
+Data can be inserted and retrieved directly through the Database
+object, or through a Cursor obtained from the Database. A database record
+consist of a key/data pair, where key and data are each individually
+represented by a DatabaseEntry object. Classes in com.sleepycat.bind
+provide optional support for mapping a Java object to a DatabaseEntry.
+<p>
+Configuration classes are used to specify the attributes of particular
+operations. For example the attributes of a database environment are
+specified in the EnvironmentConfig class. An instance of that class is
+required for Environment construction. Likewise, the attributes of a
+database are described in DatabaseConfig, which is a parameter to the
+Environment.openDatabase() method.
+
+@see <a href="{@docRoot}/../GettingStartedGuide/index.html"
+        target="_top">[Getting Started Guide]</a>
+</body>
+</html>
diff --git a/src/com/sleepycat/je/recovery/CheckpointEnd.java b/src/com/sleepycat/je/recovery/CheckpointEnd.java
new file mode 100644
index 0000000000000000000000000000000000000000..2c0dddb115542634d649944c28f8bf4ffb7d850e
--- /dev/null
+++ b/src/com/sleepycat/je/recovery/CheckpointEnd.java
@@ -0,0 +1,295 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: CheckpointEnd.java,v 1.38.2.2 2010/01/04 15:30:32 cwl Exp $
+ */
+
+package com.sleepycat.je.recovery;
+
+import java.nio.ByteBuffer;
+import java.sql.Timestamp;
+import java.util.Calendar;
+
+import com.sleepycat.je.log.LogException;
+import com.sleepycat.je.log.LogUtils;
+import com.sleepycat.je.log.Loggable;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * CheckpointEnd encapsulates the information needed by a checkpoint end
+ * log entry.
+ */
+public class CheckpointEnd implements Loggable {
+
+    /*
+     * invoker is just a way to tag each checkpoint in the
+     * log for easier log based debugging. It will tell us whether the
+     * checkpoint was invoked by recovery, the daemon, the api, or
+     * the cleaner.
+     */
+    private String invoker;
+
+    private Timestamp endTime;
+    private long checkpointStartLsn;
+    private boolean rootLsnExists;
+    private long rootLsn;
+    private long firstActiveLsn;
+    private long lastLocalNodeId;
+    private long lastReplicatedNodeId;
+    private int lastLocalDbId;
+    private int lastReplicatedDbId;
+    private long lastLocalTxnId;
+    private long lastReplicatedTxnId;
+    private long id;
+
+    public CheckpointEnd(String invoker,
+                         long checkpointStartLsn,
+                         long rootLsn,
+                         long firstActiveLsn,
+                         long lastLocalNodeId,
+                         long lastReplicatedNodeId,
+                         int lastLocalDbId,
+                         int lastReplicatedDbId,
+                         long lastLocalTxnId,
+                         long lastReplicatedTxnId,
+                         long id) {
+        if (invoker == null) {
+            this.invoker = "";
+        } else {
+            this.invoker = invoker;
+        }
+
+        Calendar cal = Calendar.getInstance();
+        this.endTime = new Timestamp(cal.getTime().getTime());
+        this.checkpointStartLsn = checkpointStartLsn;
+        this.rootLsn = rootLsn;
+        if (rootLsn == DbLsn.NULL_LSN) {
+            rootLsnExists = false;
+        } else {
+            rootLsnExists = true;
+        }
+        if (firstActiveLsn == DbLsn.NULL_LSN) {
+            this.firstActiveLsn = checkpointStartLsn;
+        } else {
+            this.firstActiveLsn = firstActiveLsn;
+        }
+        this.lastLocalNodeId = lastLocalNodeId;
+        this.lastReplicatedNodeId = lastReplicatedNodeId;
+        this.lastLocalDbId = lastLocalDbId;
+        this.lastReplicatedDbId = lastReplicatedDbId;
+        this.lastLocalTxnId = lastLocalTxnId;
+        this.lastReplicatedTxnId = lastReplicatedTxnId;
+        this.id = id;
+    }
+
+    /* For logging only */
+    public CheckpointEnd() {
+        checkpointStartLsn = DbLsn.NULL_LSN;
+        rootLsn = DbLsn.NULL_LSN;
+        firstActiveLsn = DbLsn.NULL_LSN;
+    }
+
+    /*
+     * Logging support for writing to the log
+     */
+
+    /**
+     * @see Loggable#getLogSize
+     */
+    public int getLogSize() {
+        int size =
+            LogUtils.getStringLogSize(invoker) +    // invoker
+            LogUtils.getTimestampLogSize(endTime) + // endTime
+	    LogUtils.getPackedLongLogSize(checkpointStartLsn) +
+            1 +                                     // rootLsnExists
+	    LogUtils.getPackedLongLogSize(firstActiveLsn) +
+            LogUtils.getPackedLongLogSize(lastLocalNodeId) +
+            LogUtils.getPackedLongLogSize(lastReplicatedNodeId) +
+            LogUtils.getPackedIntLogSize(lastLocalDbId) +
+            LogUtils.getPackedIntLogSize(lastReplicatedDbId) +
+            LogUtils.getPackedLongLogSize(lastLocalTxnId) +
+            LogUtils.getPackedLongLogSize(lastReplicatedTxnId) +
+            LogUtils.getPackedLongLogSize(id);
+
+        if (rootLsnExists) {
+            size += LogUtils.getPackedLongLogSize(rootLsn);
+        }
+        return size;
+    }
+
+    /**
+     * @see Loggable#writeToLog
+     */
+    public void writeToLog(ByteBuffer logBuffer) {
+        LogUtils.writeString(logBuffer, invoker);
+        LogUtils.writeTimestamp(logBuffer, endTime);
+	LogUtils.writePackedLong(logBuffer, checkpointStartLsn);
+        byte booleans = (byte) (rootLsnExists ? 1 : 0);
+        logBuffer.put(booleans);
+        if (rootLsnExists) {
+	    LogUtils.writePackedLong(logBuffer, rootLsn);
+        }
+	LogUtils.writePackedLong(logBuffer, firstActiveLsn);
+
+        LogUtils.writePackedLong(logBuffer, lastLocalNodeId);
+        LogUtils.writePackedLong(logBuffer, lastReplicatedNodeId);
+
+        LogUtils.writePackedInt(logBuffer, lastLocalDbId);
+        LogUtils.writePackedInt(logBuffer, lastReplicatedDbId);
+
+        LogUtils.writePackedLong(logBuffer, lastLocalTxnId);
+        LogUtils.writePackedLong(logBuffer, lastReplicatedTxnId);
+
+        LogUtils.writePackedLong(logBuffer, id);
+    }
+
+    /**
+     * @see Loggable#readFromLog
+     */
+    public void readFromLog(ByteBuffer logBuffer, byte entryVersion)
+	throws LogException {
+
+        boolean version6OrLater = (entryVersion >= 6);
+        invoker = LogUtils.readString(logBuffer, !version6OrLater);
+        endTime = LogUtils.readTimestamp(logBuffer, !version6OrLater);
+	checkpointStartLsn = LogUtils.readLong(logBuffer, !version6OrLater);
+        byte booleans = logBuffer.get();
+        rootLsnExists = (booleans & 1) != 0;
+        if (rootLsnExists) {
+	    rootLsn = LogUtils.readLong(logBuffer, !version6OrLater);
+        }
+	firstActiveLsn = LogUtils.readLong(logBuffer, !version6OrLater);
+
+        lastLocalNodeId = LogUtils.readLong(logBuffer, !version6OrLater);
+        if (version6OrLater) {
+            lastReplicatedNodeId = LogUtils.readLong(logBuffer,
+                                                     false/*unpacked*/);
+        }
+
+        lastLocalDbId = LogUtils.readInt(logBuffer, !version6OrLater);
+        if (version6OrLater) {
+            lastReplicatedDbId = LogUtils.readInt(logBuffer,
+                                                  false/*unpacked*/);
+        }
+
+        lastLocalTxnId = LogUtils.readLong(logBuffer, !version6OrLater);
+        if (version6OrLater) {
+            lastReplicatedTxnId = LogUtils.readLong(logBuffer,
+                                                    false/*unpacked*/);
+        }
+
+        id = LogUtils.readLong(logBuffer, !version6OrLater);
+    }
+
+    /**
+     * @see Loggable#dumpLog
+     */
+    public void dumpLog(StringBuffer sb, boolean verbose) {
+        sb.append("<CkptEnd invoker=\"").append(invoker);
+        sb.append("\" time=\"").append(endTime);
+        sb.append("\" lastLocalNodeId=\"").append(lastLocalNodeId);
+        sb.append("\" lastReplicatedNodeId=\"").append(lastReplicatedNodeId);
+        sb.append("\" lastLocalDbId=\"").append(lastLocalDbId);
+        sb.append("\" lastReplicatedDbId=\"").append(lastReplicatedDbId);
+        sb.append("\" lastLocalTxnId=\"").append(lastLocalTxnId);
+        sb.append("\" lastReplicatedTxnId=\"").append(lastReplicatedTxnId);
+        sb.append("\" id=\"").append(id);
+        sb.append("\" rootExists=\"").append(rootLsnExists);
+        sb.append("\">");
+        sb.append("<ckptStart>");
+	sb.append(DbLsn.toString(checkpointStartLsn));
+        sb.append("</ckptStart>");
+
+        if (rootLsnExists) {
+            sb.append("<root>");
+	    sb.append(DbLsn.toString(rootLsn));
+            sb.append("</root>");
+        }
+        sb.append("<firstActive>");
+	sb.append(DbLsn.toString(firstActiveLsn));
+        sb.append("</firstActive>");
+        sb.append("</CkptEnd>");
+    }
+
+    /**
+     * @see Loggable#getTransactionId
+     */
+    public long getTransactionId() {
+	return 0;
+    }
+
+    /**
+     * @see Loggable#logicalEquals
+     * Always return false, this item should never be compared.
+     */
+    public boolean logicalEquals(Loggable other) {
+        return false;
+    }
+
+    @Override
+    public String toString() {
+        StringBuffer sb = new StringBuffer();
+        sb.append("time=").append(endTime);
+        sb.append(" lastLocalNodeId=").append(lastLocalNodeId);
+        sb.append(" lastReplicatedNodeId=").append(lastReplicatedNodeId);
+        sb.append(" lastLocalDbId=").append(lastLocalDbId);
+        sb.append(" lastReplicatedDbId=").append(lastReplicatedDbId);
+        sb.append(" lastLocalTxnId=").append(lastLocalTxnId);
+        sb.append(" lastReplicatedTxnId=").append(lastReplicatedTxnId);
+        sb.append(" id=").append(id);
+        sb.append(" rootExists=").append(rootLsnExists);
+        sb.append(" ckptStartLsn=").append
+            (DbLsn.getNoFormatString(checkpointStartLsn));
+        if (rootLsnExists) {
+            sb.append(" root=").append(DbLsn.getNoFormatString(rootLsn));
+        }
+        sb.append(" firstActive=").
+	    append(DbLsn.getNoFormatString(firstActiveLsn));
+        return sb.toString();
+    }
+
+    /*
+     * Accessors
+     */
+    long getCheckpointStartLsn() {
+        return checkpointStartLsn;
+    }
+
+    long getRootLsn() {
+        return rootLsn;
+    }
+
+    long getFirstActiveLsn() {
+        return firstActiveLsn;
+    }
+
+    long getLastLocalNodeId() {
+        return lastLocalNodeId;
+    }
+
+    long getLastReplicatedNodeId() {
+        return lastReplicatedNodeId;
+    }
+
+    int getLastLocalDbId() {
+        return lastLocalDbId;
+    }
+
+    int getLastReplicatedDbId() {
+        return lastReplicatedDbId;
+    }
+
+    long getLastLocalTxnId() {
+        return lastLocalTxnId;
+    }
+
+    long getLastReplicatedTxnId() {
+        return lastReplicatedTxnId;
+    }
+
+    long getId() {
+        return id;
+    }
+}
diff --git a/src/com/sleepycat/je/recovery/CheckpointStart.java b/src/com/sleepycat/je/recovery/CheckpointStart.java
new file mode 100644
index 0000000000000000000000000000000000000000..419c7aaca1ca1e7ab9ea84a90ef70b348714c235
--- /dev/null
+++ b/src/com/sleepycat/je/recovery/CheckpointStart.java
@@ -0,0 +1,108 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: CheckpointStart.java,v 1.34.2.2 2010/01/04 15:30:32 cwl Exp $
+ */
+
+package com.sleepycat.je.recovery;
+
+import java.nio.ByteBuffer;
+import java.sql.Timestamp;
+import java.util.Calendar;
+
+import com.sleepycat.je.log.LogException;
+import com.sleepycat.je.log.LogUtils;
+import com.sleepycat.je.log.Loggable;
+
+/**
+ * CheckpointStart creates a log entry that marks the beginning of a
+ * checkpoint.
+ */
+public class CheckpointStart implements Loggable {
+
+    private Timestamp startTime;
+    private long id;
+
+    /*
+     * invoker is just a way to tag each checkpoint in the log for easier log
+     * based debugging. It will tell us whether the checkpoint was invoked by
+     * recovery, the daemon, the api, or the cleaner.
+     */
+    private String invoker;
+
+    public CheckpointStart(long id, String invoker) {
+        Calendar cal = Calendar.getInstance();
+        this.startTime = new Timestamp(cal.getTime().getTime());
+        this.id = id;
+        if (invoker == null) {
+            this.invoker = "";
+        } else {
+            this.invoker = invoker;
+        }
+    }
+
+    /* For logging only. */
+    public CheckpointStart() {
+    }
+
+    /*
+     * Logging support for writing.
+     */
+
+    /**
+     * @see Loggable#getLogSize
+     */
+    public int getLogSize() {
+        return LogUtils.getTimestampLogSize(startTime) +
+            LogUtils.getPackedLongLogSize(id) +
+            LogUtils.getStringLogSize(invoker);
+    }
+
+    /**
+     * @see Loggable#writeToLog
+     */
+    public void writeToLog(ByteBuffer logBuffer) {
+        LogUtils.writeTimestamp(logBuffer, startTime);
+        LogUtils.writePackedLong(logBuffer, id);
+        LogUtils.writeString(logBuffer, invoker);
+    }
+
+    /**
+     * @see Loggable#readFromLog
+     */
+    public void readFromLog(ByteBuffer logBuffer, byte entryVersion)
+	throws LogException {
+
+        boolean unpacked = (entryVersion < 6);
+        startTime = LogUtils.readTimestamp(logBuffer, unpacked);
+        id = LogUtils.readLong(logBuffer, unpacked);
+        invoker = LogUtils.readString(logBuffer, unpacked);
+    }
+
+    /**
+     * @see Loggable#dumpLog
+     */
+    public void dumpLog(StringBuffer sb, boolean verbose) {
+        sb.append("<CkptStart invoker=\"").append(invoker);
+        sb.append("\" time=\"").append(startTime);
+        sb.append("\" id=\"").append(id);
+        sb.append("\"/>");
+    }
+
+    /**
+     * @see Loggable#getTransactionId
+     */
+    public long getTransactionId() {
+	return 0;
+    }
+
+   /**
+     * @see Loggable#logicalEquals
+     * Always return false, this item should never be compared.
+     */
+    public boolean logicalEquals(Loggable other) {
+        return false;
+    }
+}
diff --git a/src/com/sleepycat/je/recovery/Checkpointer.java b/src/com/sleepycat/je/recovery/Checkpointer.java
new file mode 100644
index 0000000000000000000000000000000000000000..dc27a1148a66ea2067b85c319cfb4efa63dae49e
--- /dev/null
+++ b/src/com/sleepycat/je/recovery/Checkpointer.java
@@ -0,0 +1,1491 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Checkpointer.java,v 1.170.2.5 2010/03/26 01:52:07 mark Exp $
+ */
+
+package com.sleepycat.je.recovery;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.SortedMap;
+import java.util.TreeMap;
+import java.util.logging.Level;
+
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.EnvironmentMutableConfig;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.cleaner.Cleaner;
+import com.sleepycat.je.cleaner.LocalUtilizationTracker;
+import com.sleepycat.je.cleaner.FileSelector.CheckpointStartCleanerState;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.DbConfigManager;
+import com.sleepycat.je.dbi.DbTree;
+import com.sleepycat.je.dbi.EnvConfigObserver;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.LogItem;
+import com.sleepycat.je.log.LogManager;
+import com.sleepycat.je.log.Provisional;
+import com.sleepycat.je.log.ReplicationContext;
+import com.sleepycat.je.log.entry.SingleItemEntry;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.tree.ChildReference;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.tree.INLogContext;
+import com.sleepycat.je.tree.INLogItem;
+import com.sleepycat.je.tree.Node;
+import com.sleepycat.je.tree.SearchResult;
+import com.sleepycat.je.tree.Tree;
+import com.sleepycat.je.tree.WithRootLatched;
+import com.sleepycat.je.utilint.DaemonThread;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.PropUtil;
+import com.sleepycat.je.utilint.TestHook;
+import com.sleepycat.je.utilint.TestHookExecute;
+import com.sleepycat.je.utilint.Tracer;
+
+/**
+ * The Checkpointer looks through the tree for internal nodes that must be
+ * flushed to the log. Checkpoint flushes must be done in ascending order from
+ * the bottom of the tree up.
+ */
+public class Checkpointer extends DaemonThread implements EnvConfigObserver {
+
+    /*
+     * We currently use multi-logging whenever practical, but we're keeping an
+     * option open to disable it, perhaps via a config param.
+     */
+    private static final boolean MULTI_LOG = true;
+
+    /**
+     * For unit testing only.  Called before we flush the max level.  This
+     * field is static because it is called from the static flushIN method.
+     */
+    public static TestHook maxFlushLevelHook = null;
+
+    public static TestHook beforeFlushHook = null;
+
+    private EnvironmentImpl envImpl;
+
+    /* Checkpoint sequence, initialized at recovery. */
+    private long checkpointId;
+
+    /*
+     * How much the log should grow between checkpoints. If 0, we're using time
+     * based checkpointing.
+     */
+    private long logSizeBytesInterval;
+    private long logFileMax;
+    private long timeInterval;
+    private long lastCheckpointMillis;
+
+    /* Configured to true to minimize checkpoint duration. */
+    private boolean highPriority;
+
+    private volatile Map<DatabaseImpl,Integer> highestFlushLevels;
+
+    private long nCheckpoints;
+    private long lastCheckpointStart;
+    private long lastCheckpointEnd;
+    private FlushStats flushStats;
+
+    public Checkpointer(EnvironmentImpl envImpl,
+                        long waitTime,
+                        String name)
+        throws DatabaseException {
+
+        super(waitTime, name, envImpl);
+        this.envImpl = envImpl;
+        logSizeBytesInterval =
+            envImpl.getConfigManager().getLong
+                (EnvironmentParams.CHECKPOINTER_BYTES_INTERVAL);
+        logFileMax =
+            envImpl.getConfigManager().getLong(EnvironmentParams.LOG_FILE_MAX);
+        timeInterval = waitTime;
+        lastCheckpointMillis = 0;
+
+        nCheckpoints = 0;
+        flushStats = new FlushStats();
+
+        highestFlushLevels = Collections.emptyMap();
+
+        /* Initialize mutable properties and register for notifications. */
+        envConfigUpdate(envImpl.getConfigManager(), null);
+        envImpl.addConfigObserver(this);
+    }
+
+    /**
+     * Process notifications of mutable property changes.
+     */
+    public void envConfigUpdate(DbConfigManager cm,
+                                EnvironmentMutableConfig ignore)
+        throws DatabaseException {
+
+        highPriority = cm.getBoolean
+            (EnvironmentParams.CHECKPOINTER_HIGH_PRIORITY);
+    }
+
+    /**
+     * Initializes the checkpoint intervals when no checkpoint is performed
+     * while opening the environment.
+     */
+    public void initIntervals(long lastCheckpointEnd,
+                              long lastCheckpointMillis) {
+        this.lastCheckpointEnd = lastCheckpointEnd;
+        this.lastCheckpointMillis = lastCheckpointMillis;
+    }
+
+    /**
+     * Returns the highest flush level for a database that is part of a
+     * checkpoint currently in progress.  Used by the evictor to determine
+     * whether to log INs provisionally.  If an IN's level is less than the
+     * level returned, it should be logged provisionally by the evictor.
+     * IN.MIN_LEVEL is returned if no checkpoint is in progress or the given
+     * database is not part of the checkpoint.
+     */
+    public int getHighestFlushLevel(DatabaseImpl db) {
+        return getHighestFlushLevelInternal(db, highestFlushLevels);
+    }
+
+    private static int  
+        getHighestFlushLevelInternal(DatabaseImpl db,
+                                     Map<DatabaseImpl,Integer> 
+                                                      highestFlushLevels) {
+
+        Integer val = highestFlushLevels.get(db);
+        return (val != null) ? val.intValue() : IN.MIN_LEVEL;
+    }
+
+    /**
+     * Figure out the wakeup period. Supplied through this static method
+     * because we need to pass wakeup period to the superclass and need to do
+     * the calcuation outside this constructor.
+     */
+    public static long getWakeupPeriod(DbConfigManager configManager)
+        throws IllegalArgumentException, DatabaseException {
+
+        long wakeupPeriod = PropUtil.microsToMillis
+            (configManager.getLong
+                (EnvironmentParams.CHECKPOINTER_WAKEUP_INTERVAL));
+        long bytePeriod = configManager.getLong
+            (EnvironmentParams.CHECKPOINTER_BYTES_INTERVAL);
+
+        /* Checkpointing period must be set either by time or by log size. */
+        if ((wakeupPeriod == 0) && (bytePeriod == 0)) {
+            throw new IllegalArgumentException
+                (EnvironmentParams.CHECKPOINTER_BYTES_INTERVAL.getName() +
+                 " and " +
+                 EnvironmentParams.CHECKPOINTER_WAKEUP_INTERVAL.getName() +
+                 " cannot both be 0. ");
+        }
+
+        /*
+         * Checkpointing by log size takes precendence over time based period.
+         */
+        if (bytePeriod == 0) {
+            return wakeupPeriod;
+        } else {
+            return 0;
+        }
+    }
+
+    /**
+     * Set checkpoint id -- can only be done after recovery.
+     */
+    public synchronized void setCheckpointId(long lastCheckpointId) {
+        checkpointId = lastCheckpointId;
+    }
+
+    /**
+     * Load stats.
+     */
+    public void loadStats(StatsConfig config, EnvironmentStats stat)
+        throws DatabaseException {
+
+        stat.setNCheckpoints(nCheckpoints);
+        stat.setLastCheckpointStart(lastCheckpointStart);
+        stat.setLastCheckpointEnd(lastCheckpointEnd);
+        stat.setLastCheckpointId(checkpointId);
+        stat.setNFullINFlush(flushStats.nFullINFlush);
+        stat.setNFullBINFlush(flushStats.nFullBINFlush);
+        stat.setNDeltaINFlush(flushStats.nDeltaINFlush);
+
+        if (config.getClear()) {
+            nCheckpoints = 0;
+            flushStats.nFullINFlush = 0;
+            flushStats.nFullBINFlush = 0;
+            flushStats.nDeltaINFlush = 0;
+        }
+    }
+
+    public synchronized void clearEnv() {
+        envImpl = null;
+    }
+
+    /**
+     * Return the number of retries when a deadlock exception occurs.
+     */
+    @Override
+    protected long nDeadlockRetries()
+        throws DatabaseException {
+
+        return envImpl.getConfigManager().getInt
+            (EnvironmentParams.CHECKPOINTER_RETRY);
+    }
+
+    /**
+     * Called whenever the DaemonThread wakes up from a sleep.
+     */
+    protected void onWakeup()
+        throws DatabaseException {
+
+        if (envImpl.isClosed()) {
+            return;
+        }
+
+        doCheckpoint(CheckpointConfig.DEFAULT,
+                     false, // flushAll
+                     "daemon");
+    }
+
+    /**
+     * Wakes up the checkpointer if a checkpoint log interval is configured and
+     * the number of bytes written since the last checkpoint exeeds the size
+     * of the interval.
+     */
+    public void wakeupAfterWrite() {
+        if (logSizeBytesInterval != 0) {
+            long nextLsn = envImpl.getFileManager().getNextLsn();
+            if (DbLsn.getNoCleaningDistance
+                    (nextLsn, lastCheckpointEnd, logFileMax) >=
+                    logSizeBytesInterval) {
+                wakeup();
+            }
+        }
+    }
+
+    /**
+     * Determine whether a checkpoint should be run.
+     *
+     * 1. If the force parameter is specified, always checkpoint.
+     *
+     * 2. If the config object specifies time or log size, use that.
+     *
+     * 3. If the environment is configured to use log size based checkpointing,
+     * check the log.
+     *
+     * 4. Lastly, use time based checking.
+     */
+    private boolean isRunnable(CheckpointConfig config)
+        throws DatabaseException {
+
+        /* Figure out if we're using log size or time to determine interval.*/
+        long useBytesInterval = 0;
+        long useTimeInterval = 0;
+        long nextLsn = DbLsn.NULL_LSN;
+        boolean runnable = false;
+        try {
+            if (config.getForce()) {
+                runnable = true;
+                return runnable;
+            } else if (config.getKBytes() != 0) {
+                useBytesInterval = config.getKBytes() << 10;
+            } else if (config.getMinutes() != 0) {
+                // convert to millis
+                useTimeInterval = config.getMinutes() * 60 * 1000;
+            } else if (logSizeBytesInterval != 0) {
+                useBytesInterval = logSizeBytesInterval;
+            } else {
+                useTimeInterval = timeInterval;
+            }
+
+            /*
+             * If our checkpoint interval is defined by log size, check on how
+             * much log has grown since the last checkpoint.
+             */
+            if (useBytesInterval != 0) {
+                nextLsn = envImpl.getFileManager().getNextLsn();
+                if (DbLsn.getNoCleaningDistance(nextLsn, lastCheckpointEnd,
+                                                logFileMax) >=
+                    useBytesInterval) {
+                    runnable = true;
+                }
+            } else if (useTimeInterval != 0) {
+
+                /*
+                 * Our checkpoint is determined by time.  If enough time has
+                 * passed and some log data has been written, do a checkpoint.
+                 */
+                long lastUsedLsn = envImpl.getFileManager().getLastUsedLsn();
+                if (((System.currentTimeMillis() - lastCheckpointMillis) >=
+                     useTimeInterval) &&
+                    (DbLsn.compareTo(lastUsedLsn, lastCheckpointEnd) != 0)) {
+                    runnable = true;
+                }
+            }
+            return runnable;
+        } finally {
+            StringBuffer sb = new StringBuffer();
+            sb.append("size interval=").append(useBytesInterval);
+            if (nextLsn != DbLsn.NULL_LSN) {
+                sb.append(" nextLsn=").
+                    append(DbLsn.getNoFormatString(nextLsn));
+            }
+            if (lastCheckpointEnd != DbLsn.NULL_LSN) {
+                sb.append(" lastCkpt=");
+                sb.append(DbLsn.getNoFormatString(lastCheckpointEnd));
+            }
+            sb.append(" time interval=").append(useTimeInterval);
+            sb.append(" force=").append(config.getForce());
+            sb.append(" runnable=").append(runnable);
+
+            Tracer.trace(Level.FINEST,
+                         envImpl,
+                         sb.toString());
+        }
+    }
+
+    /**
+     * The real work to do a checkpoint. This may be called by the checkpoint
+     * thread when waking up, or it may be invoked programatically through the
+     * api.
+     *
+     * @param flushAll if true, this checkpoint must flush all the way to
+     *       the top of the dbtree, instead of stopping at the highest level
+     *       last modified.
+     *
+     * @param invokingSource a debug aid, to indicate who invoked this
+     *       checkpoint. (i.e. recovery, the checkpointer daemon, the cleaner,
+     *       programatically)
+     */
+    public synchronized void doCheckpoint(CheckpointConfig config,
+                                          boolean flushAll,
+                                          String invokingSource)
+        throws DatabaseException {
+
+        if (envImpl.isReadOnly()) {
+            return;
+        }
+
+        if (!isRunnable(config)) {
+            return;
+        }
+
+        /*
+         * If there are cleaned files to be deleted, flush an extra level to
+         * write out the parents of cleaned nodes.  This ensures that the node
+         * will contain the LSN of a cleaned files.
+         */
+        boolean flushExtraLevel = false;
+        Cleaner cleaner = envImpl.getCleaner();
+        CheckpointStartCleanerState cleanerState =
+            cleaner.getFilesAtCheckpointStart();
+        if (!cleanerState.isEmpty()) {
+            flushExtraLevel = true;
+        }
+
+        lastCheckpointMillis = System.currentTimeMillis();
+        flushStats.resetPerRunCounters();
+
+        /* Get the next checkpoint id. */
+        checkpointId++;
+        nCheckpoints++;
+
+        boolean success = false;
+        boolean traced = false;
+
+        LogManager logManager = envImpl.getLogManager();
+
+        /* dirtyMap keeps track of the INs to be written out by the ckpt. */
+        DirtyINMap dirtyMap = new DirtyINMap(envImpl);
+        try {
+
+            /*
+             * Eviction can run during checkpoint as long as it follows the
+             * same rules for using provisional logging and for propagating
+             * logging of the checkpoint dirty set up the tree. We have to lock
+             * out the evictor after the logging of checkpoint start until
+             * we've selected the dirty set and decided on the highest level to
+             * be flushed. See SR 11163, 11349.
+             */
+            long checkpointStart = DbLsn.NULL_LSN;
+            long firstActiveLsn = DbLsn.NULL_LSN;
+
+            synchronized (envImpl.getEvictor()) {
+
+                /* Log the checkpoint start. */
+                SingleItemEntry startEntry =
+                    new SingleItemEntry(LogEntryType.LOG_CKPT_START,
+                                        new CheckpointStart(checkpointId,
+                                                            invokingSource));
+                checkpointStart =
+                    logManager.log(startEntry,
+                                   ReplicationContext.NO_REPLICATE);
+
+                /*
+                 * Note the first active LSN point. The definition of
+                 * firstActiveLsn is that all log entries for active
+                 * transactions are equal to or after that LSN.
+                 */
+                firstActiveLsn = envImpl.getTxnManager().getFirstActiveLsn();
+
+                if (firstActiveLsn == DbLsn.NULL_LSN) {
+                    firstActiveLsn = checkpointStart;
+                } else {
+                    if (DbLsn.compareTo(checkpointStart, firstActiveLsn) < 0) {
+                        firstActiveLsn = checkpointStart;
+                    }
+                }
+
+                /*
+                 * Flush replication information if necessary. Do this before
+                 * the dirty set is selected.
+                 */
+                if (envImpl.isReplicated()) {
+                    envImpl.getReplicator().preCheckpointEndFlush();
+                }
+
+                /* 
+                 * Find the set of dirty INs that must be logged.  Update the
+                 * highestFlushLevels volatile field so it will be seen by the
+                 * evictor, before starting to flush dirty nodes.
+                 */
+                highestFlushLevels = dirtyMap.selectDirtyINsForCheckpoint
+                    (flushAll, flushExtraLevel);
+            }
+
+            /*
+             * Add the dirty map to the memory budget, outside the evictor
+             * synchronization section.
+             */
+            dirtyMap.addCostToMemoryBudget();
+
+            /* Call hook after dirty map creation and before flushing. */
+            TestHookExecute.doHookIfSet(beforeFlushHook);
+
+            /* Flush IN nodes. */
+            boolean allowDeltas = !config.getMinimizeRecoveryTime();
+            flushDirtyNodes(envImpl, dirtyMap, highestFlushLevels, allowDeltas,
+                            checkpointStart, highPriority, flushStats);
+
+            /*
+             * Flush MapLNs if not already done by flushDirtyNodes.  Only flush
+             * a database if it has not already been flushed since checkpoint
+             * start.  Lastly, flush the DB mapping tree root.
+             */
+            dirtyMap.flushMapLNs(checkpointStart);
+            dirtyMap.flushRoot(checkpointStart);
+
+            /*
+             * Flush utilization info AFTER flushing IN nodes to reduce the
+             * inaccuracies caused by the sequence FileSummaryLN-LN-BIN.
+             */
+            envImpl.getUtilizationProfile().flushFileUtilization
+                (envImpl.getUtilizationTracker().getTrackedFiles());
+
+            DbTree dbTree = envImpl.getDbTree();
+            CheckpointEnd ckptEnd = new CheckpointEnd
+                (invokingSource, checkpointStart, envImpl.getRootLsn(),
+                 firstActiveLsn,
+                 envImpl.getNodeSequence().getLastLocalNodeId(),
+                 envImpl.getNodeSequence().getLastReplicatedNodeId(),
+                 dbTree.getLastLocalDbId(), dbTree.getLastReplicatedDbId(),
+                 envImpl.getTxnManager().getLastLocalTxnId(),
+                 envImpl.getTxnManager().getLastReplicatedTxnId(),
+                 checkpointId);
+
+            SingleItemEntry endEntry =
+                new SingleItemEntry(LogEntryType.LOG_CKPT_END, ckptEnd);
+
+            /*
+             * Log checkpoint end and update state kept about the last
+             * checkpoint location. Send a trace message *before* the
+             * checkpoint end log entry. This is done so that the normal trace
+             * message doesn't affect the time-based isRunnable() calculation,
+             * which only issues a checkpoint if a log record has been written
+             * since the last checkpoint.
+             */
+            trace(envImpl, invokingSource, true);
+            traced = true;
+
+            /*
+             * Always flush to ensure that cleaned files are not referenced,
+             * and to ensure that this checkpoint is not wasted if we crash.
+             */
+            lastCheckpointEnd =
+                logManager.logForceFlush(endEntry,
+                                         true /*fsyncRequired*/,
+                                         ReplicationContext.NO_REPLICATE);
+
+            lastCheckpointStart = checkpointStart;
+
+            success = true;
+            cleaner.updateFilesAtCheckpointEnd(cleanerState);
+
+        } catch (DatabaseException e) {
+            Tracer.trace(envImpl, "Checkpointer", "doCheckpoint",
+                         "checkpointId=" + checkpointId, e);
+            throw e;
+        } finally {
+            dirtyMap.removeCostFromMemoryBudget();
+
+            /*
+             * Reset the highestFlushLevels so evictor activity knows there's
+             * no further requirement for provisional logging. SR 11163.
+             */
+            highestFlushLevels = Collections.emptyMap();
+
+            if (!traced) {
+                trace(envImpl, invokingSource, success);
+            }
+        }
+    }
+
+    private void trace(EnvironmentImpl envImpl,
+                       String invokingSource,
+                       boolean success ) {
+        StringBuffer sb = new StringBuffer();
+        sb.append("Checkpoint ").append(checkpointId);
+        sb.append(": source=" ).append(invokingSource);
+        sb.append(" success=").append(success);
+        sb.append(" nFullINFlushThisRun=");
+        sb.append(flushStats.nFullINFlushThisRun);
+        sb.append(" nDeltaINFlushThisRun=");
+        sb.append(flushStats.nDeltaINFlushThisRun);
+        Tracer.trace(Level.CONFIG, envImpl, sb.toString());
+    }
+
+    /**
+     * Flush a given database to disk. Like checkpoint, log from the bottom
+     * up so that parents properly represent their children.
+     */
+    public static void syncDatabase(EnvironmentImpl envImpl,
+                                    DatabaseImpl dbImpl,
+                                    boolean flushLog)
+        throws DatabaseException {
+
+        if (envImpl.isReadOnly()) {
+            return;
+        }
+
+        DirtyINMap dirtyMap = new DirtyINMap(envImpl);
+        FlushStats fstats = new FlushStats();
+        try {
+
+            /*
+             * Lock out eviction and other checkpointing during the selection
+             * of a dirty set.
+             */
+            Map<DatabaseImpl,Integer> highestFlushLevels;
+            synchronized (envImpl.getEvictor()) {
+                /* Find the dirty set. */
+                highestFlushLevels = dirtyMap.selectDirtyINsForDbSync(dbImpl);
+            }
+
+            dirtyMap.addCostToMemoryBudget();
+
+            /* Write all dirtyINs out.*/
+            flushDirtyNodes(envImpl,
+                            dirtyMap,
+                            highestFlushLevels,
+                            false, /*allowDeltas*/
+                            0,     /*ckptStart, only needed for allowDeltas*/
+                            false, /*highPriority*/
+                            fstats);
+
+            /* Make changes durable. [#15254] */
+            if (flushLog) {
+                envImpl.getLogManager().flush();
+            }
+        } catch (DatabaseException e) {
+            Tracer.trace(envImpl, "Checkpointer", "syncDatabase",
+                         "of " + dbImpl.getDebugName(), e);
+            throw e;
+        } finally {
+            dirtyMap.removeCostFromMemoryBudget();
+        }
+    }
+
+    /* For unit testing only. */
+    public static void setMaxFlushLevelHook(TestHook hook) {
+        maxFlushLevelHook = hook;
+    }
+
+    /* For unit testing only. */
+    public static void setBeforeFlushHook(TestHook hook) {
+        beforeFlushHook = hook;
+    }
+
+    /**
+     * Flush the nodes in order, from the lowest level to highest level.  As a
+     * flush dirties its parent, add it to the dirty map, thereby cascading the
+     * writes up the tree. If flushAll wasn't specified, we need only cascade
+     * up to the highest level set at the start of checkpointing.
+     *
+     * Note that all but the top level INs and the BINDeltas are logged
+     * provisionally. That's because we don't need to process lower INs during
+     * recovery because the higher INs will end up pointing at them.
+     */
+    private static void flushDirtyNodes(EnvironmentImpl envImpl,
+                                        DirtyINMap dirtyMap,    
+                                        Map<DatabaseImpl,Integer> 
+                                                  highestFlushLevels,
+                                        boolean allowDeltas,
+                                        long checkpointStart,
+                                        boolean highPriority,
+                                        FlushStats fstats)
+        throws DatabaseException {
+
+        LogManager logManager = envImpl.getLogManager();
+        DbTree dbTree = envImpl.getDbTree();
+
+        /*
+         * Use a tracker to count lazily compressed, deferred write, LNs as
+         * obsolete.  A local tracker is used to accumulate tracked obsolete
+         * info so it can be added in a single call under the log write latch.
+         * [#15365]
+         */
+        LocalUtilizationTracker localTracker =
+            new LocalUtilizationTracker(envImpl);
+
+        while (dirtyMap.getNumLevels() > 0) {
+
+            /* Work on one level's worth of nodes in ascending level order. */
+            Integer currentLevel = dirtyMap.getLowestLevelSet();
+            int currentLevelVal = currentLevel.intValue();
+
+            /*
+             * Flush MapLNs just prior to flushing the first level of the
+             * mapping tree.  Only flush a database if it has not already been
+             * flushed since checkpoint start.
+             */
+            if (currentLevelVal == IN.DBMAP_LEVEL) {
+                dirtyMap.flushMapLNs(checkpointStart);
+            }
+
+            /* Flush the nodes at the current level. */
+            while (true) {
+                CheckpointReference targetRef =
+                    dirtyMap.removeNextNode(currentLevel);
+                if (targetRef == null) {
+                    break;
+                }
+
+                /*
+                 * Check to make sure the DB was not deleted after putting it
+                 * in the dirty map, and prevent the DB from being deleted
+                 * while we're working with it.
+                 */
+                DatabaseImpl db = null;
+                try {
+                    db = dbTree.getDb(targetRef.dbId);
+                    if (db != null && !db.isDeleted()) {
+
+                        /* Flush if we're below maxFlushLevel. */
+                        int maxFlushLevel = getHighestFlushLevelInternal
+                            (db, highestFlushLevels);
+                        if (currentLevelVal <= maxFlushLevel) {
+
+                            /* Evict before each operation. */
+                            envImpl.getEvictor().doCriticalEviction
+                                (true); // backgroundIO
+
+                            flushIN
+                                (envImpl, db, logManager, targetRef, dirtyMap,
+                                 currentLevelVal, maxFlushLevel, allowDeltas,
+                                 checkpointStart, highPriority,
+                                 fstats, localTracker,
+                                 true /*allowLogSubtree*/);
+
+                            /*
+                             * Sleep if background read/write limit was
+                             * exceeded.
+                             */
+                            envImpl.sleepAfterBackgroundIO();
+                        }
+                    }
+                } finally {
+                    dbTree.releaseDb(db);
+                }
+            }
+
+            /* We're done with this level. */
+            dirtyMap.removeLevel(currentLevel);
+        }
+
+        /*
+         * Count obsolete nodes tracked doing lazy compression.  All latches
+         * must have been released. [#15365]
+         *
+         * Do not flush FileSummaryLNs/MapLNs (do not call
+         * UtilizationProfile.flushLocalTracker) here because that flushing is
+         * already done by the checkpoint.
+         */
+        logManager.transferToUtilizationTracker(localTracker);
+    }
+
+    /**
+     * Flush the target IN.
+     *
+     * Where applicable, also attempt to flush the subtree that houses this
+     * target, which means we flush the siblings of this target to promote
+     * better cleaning throughput. The problem lies in the fact that
+     * provisionally logged nodes are not available for log cleaning until
+     * their parent is logged non-provisionally.  On the other hand, we want to
+     * log nodes in provisional mode as much as possible, both for recovery
+     * performance, and for correctness to avoid fetches against cleaned log
+     * files. (See [#16037].) These conflicting goals are reconciled by
+     * flushing nodes in subtree grouping, because writing the non-provisional
+     * parent of a set of provisionally written nodes frees the cleaner to work
+     * on that set of provisional nodes as soon as possible. For example, if a
+     * tree consists of:
+     *
+     *             INa
+     *       +------+-------+
+     *      INb            INc
+     * +-----+----+         +-----+
+     * BINd BINe BINf      BINg BINh
+     *
+     * It is more efficient for cleaning throughput to log in this order:
+     *       BINd, BINe, BINf, INb, BINg, BINh, INc, INa
+     * rather than:
+     *       BINd, BINe, BINf, BINg, BINh, INb, INc, INa
+     * 
+     * Suppose the subtree in question is INb->{BINd, BINe, BINf}
+     *
+     * Suppose we see BINd in the dirty map first, before BINe and BINf.
+     *  - flushIN(BINd) is called
+     *  - we fetch and latch its parent, INb
+     *
+     * If this is a high priority checkpoint, we'll hold the INb latch across
+     * the time it takes to flush all three children.  In flushIN(BINd), we
+     * walk through INb, create a local map of all the siblings that can be
+     * found in the dirty map, and then call logSiblings with that local map.
+     * Then we'll write out INb.
+     *
+     * If high priority is false, we will not hold the INb latch across
+     * multiple IOs. Instead, we 
+     *  - write BINd out, using logSiblings
+     *  - while still holding the INb latch, we create a local map of dirty
+     *    siblings
+     *  - release the INb latch
+     *  - call flushIN() recursively on each entry in the local sibling map, 
+     *    which will result in a search and write of each sibling.  These
+     *    recursive calls to flushIN are called with the allowLogSubtree
+     *    parameter of false to halt the recursion and prevent a repeat of the
+     *    sibling examination.
+     *  - write INb
+     */
+    private static void flushIN(EnvironmentImpl envImpl,
+                                DatabaseImpl db,
+                                LogManager logManager,
+                                CheckpointReference targetRef,
+                                DirtyINMap dirtyMap,
+                                int currentLevel,
+                                int maxFlushLevel,
+                                boolean allowDeltas,
+                                long checkpointStart,
+                                boolean highPriority,
+                                FlushStats fstats,
+                                LocalUtilizationTracker localTracker,
+                                boolean allowLogSubtree)
+        throws DatabaseException {
+
+        /* Call test hook when we reach the max level. */
+        assert (currentLevel < maxFlushLevel) ||
+            TestHookExecute.doHookIfSet(maxFlushLevelHook);
+
+        Tree tree = db.getTree();
+        boolean targetWasRoot = false;
+        if (targetRef.isDbRoot) {
+
+            /* We're trying to flush the root. */
+            RootFlusher flusher =
+                new RootFlusher(db, logManager, targetRef.nodeId);
+            tree.withRootLatchedExclusive(flusher);
+            boolean flushed = flusher.getFlushed();
+
+            /*
+             * If this target isn't the root anymore, we'll have to handle it
+             * like a regular node.
+             */
+            targetWasRoot = flusher.stillRoot();
+
+            /*
+             * Update the tree's owner, whether it's the env root or the
+             * dbmapping tree.
+             */
+            if (flushed) {
+                DbTree dbTree = envImpl.getDbTree();
+                dbTree.modifyDbRoot(db);
+                fstats.nFullINFlushThisRun++;
+                fstats.nFullINFlush++;
+            }
+        }
+
+        /*
+         * The following attempt to flush applies to two cases:
+         *
+         * (1) the target was not ever the root
+         *
+         * (2) the target was the root, when the checkpoint dirty set was
+         * assembled but is not the root now.
+         */
+        if (!targetWasRoot) {
+
+            /*
+             * The "isRoot" param is used to stop a search in
+             * BIN.descendOnParentSearch and is passed as false (never stop).
+             */
+            SearchResult result =
+                tree.getParentINForChildIN(targetRef.nodeId,
+                                           targetRef.containsDuplicates,
+                                           false,  // isRoot
+                                           targetRef.mainTreeKey,
+                                           targetRef.dupTreeKey,
+                                           false,  // requireExactMatch
+                                           CacheMode.UNCHANGED,
+                                           -1,     // targetLevel
+                                           null,   // trackingList
+                                           false); // doFetch
+
+            /*
+             * We must make sure that every IN that was selected for the
+             * checkpointer's dirty IN set at the beginning of checkpoint is
+             * written into the log and can be properly accessed from
+             * ancestors. However, we have to take care for cases where the
+             * evictor has written out a member of this dirty set before the
+             * checkpointer got to it. See SR 10249.
+             *
+             * If no possible parent is found, the compressor may have deleted
+             * this item before we got to processing it.
+             */
+            if (result.parent != null) {
+                IN parent = result.parent;
+                int parentLevel = parent.getLevel();
+                boolean mustLogParent = false;
+
+                /*
+                 * If bottomLevelTarget is true, the parent IN contains bottom
+                 * level BINs -- either DBINs or BINs depending on whether dups
+                 * are configured or not.  If dups are configured we cannot
+                 * mask the level, since we do not want to select the parent of
+                 * a BIN in the upper part of the tree.  The masking is used to
+                 * normalize the level for ordinary non-dup DBs and the mapping
+                 * tree DB.
+                 */
+                boolean bottomLevelTarget = db.getSortedDuplicates() ?
+                    (parentLevel == 2) :
+                    ((parentLevel & IN.LEVEL_MASK) == 2);
+
+                /*
+                 * INs at the max flush level are always non-provisional and
+                 * INs at the bottom level (when this is not also the max flush
+                 * level) are always provisional.  In between INs are
+                 * provisional BEFORE_CKPT_END (see Provisional).
+                 *
+                 * Note that to determine whether this IN is at the
+                 * maxFlushLevel, we check (parentLevel > maxFlushLevel)
+                 * instead of (currentLevel >= maxFlushLevel).  This handles
+                 * the case where this IN is a DIN root, and the parent is a
+                 * BIN that will not be flushed because the maxFlushLevel is
+                 * less than IN.MAIN_LEVEL (0x10000).  For example, this IN is
+                 * a DIN root at level 2 and the maxFlushLevel is 3.  [#16712]
+                 */
+                Provisional provisional;
+                if (parentLevel > maxFlushLevel) {
+                    provisional = Provisional.NO;
+                } else if (bottomLevelTarget) {
+                    provisional = Provisional.YES;
+                } else {
+                    provisional = Provisional.BEFORE_CKPT_END;
+                }
+
+                /*
+                 * Log a sub-tree when the target is at the bottom level and
+                 * this is not a recursive call to flushIN during sub-tree
+                 * logging.
+                 */
+                boolean logSubtree = bottomLevelTarget && allowLogSubtree;
+
+                /*
+                 * Log sub-tree siblings with the latch held when highPriority
+                 * is configured and this is not a DW DB.  For a DW DB, dirty
+                 * LNs are logged for each BIN.  If we were to log a DW
+                 * sub-tree with the parent latch held, the amount of logging
+                 * may cause the latch to be held for too long a period.
+                 */
+                boolean logSiblingsWithParentLatchHeld =
+                    logSubtree &&
+                    highPriority &&
+                    !db.isDurableDeferredWrite();
+
+                /*
+                 * If we log siblings with the parent latch held, we log the
+                 * target along with other siblings so we can perform a single
+                 * multi-log call for all siblings.
+                 */
+                boolean logTargetWithOtherSiblings = false;
+
+                /*
+                 * Map of node ID to parent index for each sibling to log.  We
+                 * must process the siblings in node ID order during multi-log,
+                 * so that latching order is deterministic and only in one
+                 * direction.
+                 */
+                SortedMap<Long,Integer> siblingsToLog = null;
+
+                try {
+                    if (result.exactParentFound) {
+
+                        /*
+                         * If the child has already been evicted, don't
+                         * refetch it.
+                         */
+                        IN renewedTarget = (IN) parent.getTarget(result.index);
+
+                        if (renewedTarget == null) {
+                            /* nAlreadyEvictedThisRun++;  -- for future */
+                            mustLogParent |= true;
+                        } else {
+                            if (logSiblingsWithParentLatchHeld) {
+                                logTargetWithOtherSiblings = true;
+                            } else {
+                                mustLogParent |= logSiblings
+                                    (envImpl, dirtyMap, parent,
+                                     Collections.singleton(result.index),
+                                     allowDeltas, checkpointStart,
+                                     highPriority, provisional, fstats,
+                                     localTracker);
+                            }
+                        }
+                    } else {
+                        /* result.exactParentFound was false. */
+
+                        /* Do not flush children of the inexact parent. */
+                        logSubtree = false;
+
+                        if (result.childNotResident) {
+
+                            /*
+                             * But it was because the child wasn't resident.
+                             * To be on the safe side, we'll put the parent
+                             * into the dirty set to be logged when that level
+                             * is processed.
+                             *
+                             * Only do this if the parent we found is at a
+                             * higher level than the child.  This ensures that
+                             * the non-exact search does not find a sibling
+                             * rather than a parent. [#11555]
+                             */
+                            if (parentLevel > currentLevel) {
+                                mustLogParent |= true;
+                            }
+                            /* nAlreadyEvictedThisRun++; -- for future. */
+                        }
+                    }
+
+                    if (logSubtree) {
+
+                        /*
+                         * Create a map of node ID to parent index for each
+                         * sibling we intend to log.  Note that the dirty map
+                         * does not contain targetRef (the sibling we're
+                         * processing) because it was removed before calling
+                         * this method, but it is added to the map below.
+                         *
+                         * A TreeMap (sorted map) is used so that siblings are
+                         * latched in node ID order.  A deterministic order is
+                         * needed to avoid deadlocks, if siblings are latched
+                         * in multiple threads in the future.
+                         */
+                        siblingsToLog = new TreeMap<Long,Integer>();
+                        for (int index = 0;
+                             index < parent.getNEntries();
+                             index += 1) {
+                            Node child = parent.getTarget(index);
+                            if (child != null) {
+                                Long childId = child.getNodeId();
+                                if ((logTargetWithOtherSiblings &&
+                                     targetRef.nodeId ==
+                                     childId.longValue()) ||
+                                    dirtyMap.containsNode
+                                        (child.getLevel(), childId)) {
+                                    siblingsToLog.put(childId, index);
+                                }
+                            }
+                        }
+
+                        if (logSiblingsWithParentLatchHeld) {
+                            if (MULTI_LOG) {
+                                mustLogParent |= logSiblings
+                                    (envImpl, dirtyMap, parent,
+                                     siblingsToLog.values(), allowDeltas,
+                                     checkpointStart, highPriority,
+                                     provisional, fstats, localTracker);
+                            } else {
+                                for (int index : siblingsToLog.values()) {
+                                    IN child = (IN) parent.getTarget(index);
+                                    CheckpointReference childRef =
+                                        (targetRef.nodeId ==
+                                         child.getNodeId()) ? targetRef :
+                                        dirtyMap.removeNode(child.getLevel(),
+                                                            child.getNodeId());
+                                    assert childRef != null;
+                                    mustLogParent |= logSiblings
+                                        (envImpl, dirtyMap, parent,
+                                         Collections.singleton(index),
+                                         allowDeltas, checkpointStart,
+                                         highPriority, provisional, fstats,
+                                         localTracker);
+                                }
+                            }
+                            /* Siblings have been logged, do not log below. */
+                            siblingsToLog = null;
+                        }
+                    }
+
+                    if (mustLogParent) {
+                        assert checkParentChildRelationship(result,
+                                                            currentLevel) :
+                               dumpParentChildInfo(result, parent,
+                                                   targetRef.nodeId,
+                                                   currentLevel, tree);
+                        /*
+                         * Add the parent IN to the dirty map unconditionally,
+                         * even if not dirty, to cause changes to propogate
+                         * upward even when a node has been evicted and
+                         * refetched and is no longer dirty. [#16523]
+                         */
+                        dirtyMap.addIN(parent, true /*updateMemoryBudget*/);
+                    }
+                } finally {
+                    parent.releaseLatch();
+                }
+
+                /*
+                 * If highPriority is false, we don't hold the latch while
+                 * logging the bottom level siblings.  We log them here with
+                 * flushIN, performing a separate search for each one, after
+                 * releasing the parent latch above.
+                 */
+                if (siblingsToLog != null) {
+                    assert logSubtree;
+                    assert !logSiblingsWithParentLatchHeld;
+                    for (long childId : siblingsToLog.keySet()) {
+                        assert targetRef.nodeId != childId;
+                        CheckpointReference childRef =
+                            dirtyMap.removeNode(currentLevel, childId);
+                        if (childRef != null) {
+                            flushIN
+                                (envImpl, db, logManager, childRef,
+                                 dirtyMap, currentLevel, maxFlushLevel,
+                                 allowDeltas, checkpointStart,
+                                 highPriority, fstats, localTracker,
+                                 false /*allowLogSubtree*/);
+                        }
+                    }
+                }
+
+                /*
+                 * Log the sub-tree parent, which will be logged
+                 * non-provisionally, in order to update cleaner utilization.
+                 * This must be done with flushIN after releasing the parent
+                 * latch above, since we must search and acquire the
+                 * grandparent latch.
+                 */
+                if (logSubtree && parentLevel <= maxFlushLevel) {
+                    CheckpointReference parentRef = dirtyMap.removeNode
+                        (parentLevel, parent.getNodeId());
+                    if (parentRef != null) {
+                        flushIN
+                            (envImpl, db, logManager, parentRef, dirtyMap,
+                             parentLevel, maxFlushLevel, allowDeltas,
+                             checkpointStart, highPriority, fstats,
+                             localTracker, false /*allowLogSubtree*/);
+                    }
+                }
+            }
+        }
+    }
+
+    /**
+     * @return true if this parent is appropriately 1 level above the child.
+     */
+    private static boolean checkParentChildRelationship(SearchResult result,
+                                                        int childLevel) {
+
+        if (result.childNotResident && !result.exactParentFound) {
+
+            /*
+             * This might be coming from the #11555 clause, in which case we
+             * are logging over-cautiously, but intentionally, and the levels
+             * might not pass the test below.
+             */
+            return true;
+        }
+
+        /*
+         * In the main tree or mapping tree, your parent must be in the same
+         * number space, and must be 1 more than the child.  In the dup tree,
+         * the parent might be a BIN from the main tree.
+         */
+        int parentLevel = result.parent.getLevel();
+        boolean isMapTree = (childLevel & IN.DBMAP_LEVEL) != 0;
+        boolean isMainTree = (childLevel & IN.MAIN_LEVEL) != 0;
+
+        boolean checkOk = false;
+        if (isMapTree || isMainTree) {
+            /* The parent must be child level + 1 */
+            if (parentLevel == (childLevel + 1)) {
+                checkOk = true;
+            }
+        } else {
+            if (childLevel == 1) {
+                /* A DBIN must have a level 2 DIN parent. */
+                if (parentLevel == 2) {
+                    checkOk = true;
+                }
+            } else {
+                /* A DIN must have either a BIN or DIN parent. */
+                if ((parentLevel == IN.BIN_LEVEL)  ||
+                    (parentLevel == childLevel + 1)) {
+                    checkOk = true;
+                }
+            }
+        }
+        return checkOk;
+    }
+
+    private static String dumpParentChildInfo(SearchResult result,
+                                       IN parent,
+                                       long childNodeId,
+                                       int currentLevel,
+                                       Tree tree)
+        throws DatabaseException {
+
+        StringBuffer sb = new StringBuffer();
+        /*        sb.append("ckptId=").append(checkpointId); */
+        sb.append(" result=").append(result);
+        sb.append(" parent node=").append(parent.getNodeId());
+        sb.append(" level=").append(parent.getLevel());
+        sb.append(" child node=").append(childNodeId);
+        sb.append(" level=").append(currentLevel);
+        return sb.toString();
+    }
+
+    private static boolean logSiblings(EnvironmentImpl envImpl,
+                                       DirtyINMap dirtyMap,
+                                       IN parent,
+                                       Collection<Integer> indicesToLog,
+                                       boolean allowDeltas,
+                                       long checkpointStart,
+                                       boolean highPriority,
+                                       Provisional provisional,
+                                       FlushStats fstats,
+                                       LocalUtilizationTracker localTracker)
+        throws DatabaseException {
+
+        LogManager logManager = envImpl.getLogManager();
+
+        INLogContext context = new INLogContext();
+        context.nodeDb = parent.getDatabase();
+        context.backgroundIO = true;
+        context.allowDeltas = allowDeltas;
+        context.proactiveMigration = !highPriority;
+
+        boolean mustLogParent = false;
+        List<INLogItem> itemList = new ArrayList<INLogItem>();
+
+        try {
+            for (int index : indicesToLog) {
+                IN child = (IN) parent.getTarget(index);
+
+                /* Remove it from dirty map if it is present. */
+                dirtyMap.removeNode(child.getLevel(), child.getNodeId());
+
+                /*
+                 * Latch and add item with valid parentIndex, so we will
+                 * release the latch in the finally statement.
+                 */
+                child.latch(CacheMode.UNCHANGED);
+                INLogItem item = new INLogItem();
+                item.parentIndex = index;
+                itemList.add(item);
+
+                /*
+                 * Compress this node if necessary. Note that this may dirty
+                 * the node.
+                 */
+                envImpl.lazyCompress(child, localTracker);
+
+                if (child.getDirty()) {
+
+                    if (child.getDatabase().isDurableDeferredWrite()) {
+
+                        /*
+                         * Find dirty descendants to avoid logging nodes with
+                         * never-logged children. See [#13936] and
+                         * IN.logDirtyChildren for description of the case.
+                         *
+                         * Note that we must log both dirty and never-logged
+                         * descendants to be sure to have a consistent view of
+                         * the split. If we didn't, we could end up with the
+                         * post-split version of a new sibling and the
+                         * pre-split version of an split sibling in the log,
+                         * which could result in a recovery where descendants
+                         * are incorrectly duplicated, because they are in both
+                         * the pre-split split sibling, and the post-split
+                         * version of the new sibling.
+                         */
+                        child.logDirtyChildren();
+                    }
+
+                    /* Set default params. */
+                    item.provisional = provisional;
+                    item.repContext = ReplicationContext.NO_REPLICATE;
+                    item.parent = parent;
+
+                    /*
+                     * Allow child to perform "before log" processing.  Note
+                     * that child decides whether to log a delta. Only BINs
+                     * that fall into the required percentages and have not
+                     * been cleaned will be logged with a delta.
+                     */
+                    child.beforeLog(logManager, item, context);
+                } else {
+                    /* Do not process if not dirty.  Unlatch now. */
+                    itemList.remove(itemList.size() - 1);
+                    child.releaseLatch();
+
+                    /* Log parent if child has already been flushed. */
+                    mustLogParent = true;
+                }
+            }
+
+            /*
+             * Log all siblings at once.  Limitations of Java generics prevent
+             * conversion from List<INLogItem> to List<LogItem> even by
+             * casting, so we convert to an array instead.
+             */
+            LogItem[] itemArray = new LogItem[itemList.size()];
+            logManager.multiLog(itemList.toArray(itemArray), context);
+
+            for (INLogItem item : itemList) {
+                IN child = (IN) parent.getTarget(item.parentIndex);
+
+                /* Allow child to perform "after log" processing. */
+                child.afterLog(logManager, item, context);
+
+                /*
+                 * When logging a delta, if the BIN was already logged after
+                 * checkpoint start and before this point (i.e. by an
+                 * eviction), we must make sure that the last full version is
+                 * accessible from ancestors. We can skip logging parents only
+                 * if the last full version was not logged in this checkpoint
+                 * interval.
+                 */
+                boolean logThisParent = true;
+                if (allowDeltas && (item.newLsn == DbLsn.NULL_LSN)) {
+                    fstats.nDeltaINFlushThisRun++;
+                    fstats.nDeltaINFlush++;
+                    if (DbLsn.compareTo(child.getLastFullVersion(),
+                                        checkpointStart) < 0) {
+                        logThisParent = false;
+                    }
+                }
+                if (logThisParent) {
+                    mustLogParent = true;
+                }
+
+                /* Update the parent if a full version was logged. */
+                if (item.newLsn != DbLsn.NULL_LSN) {
+                    fstats.nFullINFlushThisRun++;
+                    fstats.nFullINFlush++;
+                    if (child instanceof BIN) {
+                        fstats.nFullBINFlush++;
+                        fstats.nFullBINFlushThisRun++;
+                    }
+                    parent.updateEntry(item.parentIndex, item.newLsn);
+                }
+            }
+            return mustLogParent;
+        } finally {
+            for (INLogItem item : itemList) {
+                IN child = (IN) parent.getTarget(item.parentIndex);
+                child.releaseLatch();
+            }
+        }
+    }
+
+    /*
+     * RootFlusher lets us write out the root IN within the root latch.
+     */
+    private static class RootFlusher implements WithRootLatched {
+        private DatabaseImpl db;
+        private boolean flushed;
+        private boolean stillRoot;
+        private LogManager logManager;
+        private long targetNodeId;
+
+        RootFlusher(DatabaseImpl db,
+                    LogManager logManager,
+                    long targetNodeId) {
+            this.db = db;
+            flushed = false;
+            this.logManager = logManager;
+            this.targetNodeId = targetNodeId;
+            stillRoot = false;
+        }
+
+        /**
+         * Flush the rootIN if dirty.
+         */
+        public IN doWork(ChildReference root)
+            throws DatabaseException {
+
+            if (root == null) {
+                return null;
+            }
+            IN rootIN = (IN) root.fetchTarget(db, null);
+            rootIN.latch(CacheMode.UNCHANGED);
+            try {
+                if (rootIN.getNodeId() == targetNodeId) {
+
+                    /*
+                     * Find dirty descendants to avoid logging nodes with
+                     * never-logged children. See [#13936]
+                     */
+                    if (rootIN.getDatabase().isDurableDeferredWrite()) {
+                        rootIN.logDirtyChildren();
+                    }
+
+                    /*
+                     * stillRoot handles the situation where the root was split
+                     * after it was placed in the checkpointer's dirty set.
+                     */
+                    stillRoot = true;
+                    if (rootIN.getDirty()) {
+                        long newLsn = rootIN.log(logManager);
+                        root.setLsn(newLsn);
+                        flushed = true;
+                    }
+                }
+            } finally {
+                rootIN.releaseLatch();
+            }
+            return null;
+        }
+
+        boolean getFlushed() {
+            return flushed;
+        }
+
+        boolean stillRoot() {
+            return stillRoot;
+        }
+    }
+
+    /*
+     * CheckpointReferences are used to identify nodes that must be flushed as
+     * part of the checkpoint. We don't keep an actual reference to the node
+     * because that prevents nodes from being GC'ed during checkpoint.
+     *
+     * Using a checkpointReference introduces a window between the point when
+     * the checkpoint dirty set is created and when the node is flushed. Some
+     * of the fields saved in the reference are immutable: db, nodeId,
+     * containsDuplicates. The others are not and we have to handle potential
+     * change:
+     *
+     * isDbRoot: it's possible for isDbRoot to go from true->false, but not
+     *         false->true. True->false is handled by the flushIN method
+     *         by finding the root and checking if it is the target.
+     * mainTreeKey, dupTreeKey: These can change only in the event of a
+     *         split. If they do, there is the chance that the checkpointer
+     *         will find the wrong node to flush, but that's okay because
+     *         the split guarantees flushing to the root, so the target will
+     *         be properly logged within the checkpoint period.
+     *
+     * The class and ctor are public for the Sizeof program.
+     */
+    public static class CheckpointReference {
+        DatabaseId dbId;
+        long nodeId;
+        boolean containsDuplicates;
+        boolean isDbRoot;
+        byte[] mainTreeKey;
+        byte[] dupTreeKey;
+
+        public CheckpointReference(DatabaseId dbId,
+                                   long nodeId,
+                                   boolean containsDuplicates,
+                                   boolean isDbRoot,
+                                   byte[] mainTreeKey,
+                                   byte[] dupTreeKey) {
+            this.dbId = dbId;
+            this.nodeId = nodeId;
+            this.containsDuplicates = containsDuplicates;
+            this.isDbRoot = isDbRoot;
+            this.mainTreeKey = mainTreeKey;
+            this.dupTreeKey = dupTreeKey;
+        }
+
+        @Override
+        public boolean equals(Object o) {
+            if (!(o instanceof CheckpointReference)) {
+                return false;
+            }
+
+            CheckpointReference other = (CheckpointReference) o;
+            return nodeId == other.nodeId;
+        }
+
+        @Override
+        public int hashCode() {
+            return (int) nodeId;
+        }
+
+        public String toString() {
+            StringBuffer sb = new StringBuffer();
+            sb.append("db=").append(dbId);
+            sb.append(" nodeId=").append(nodeId);
+            return sb.toString();
+        }
+    }
+
+    /**
+     * A struct to hold log flushing stats for checkpoint and database sync.
+     */
+    public static class FlushStats {
+
+        public long nFullINFlush;
+        public long nFullBINFlush;
+        public long nDeltaINFlush;
+        public long nFullINFlushThisRun;
+        public long nFullBINFlushThisRun;
+        public long nDeltaINFlushThisRun;
+
+        /* For future addition to stats:
+           private int nAlreadyEvictedThisRun;
+        */
+
+        /* Reset per-run counters. */
+        void resetPerRunCounters() {
+            nFullINFlushThisRun = 0;
+            nFullBINFlushThisRun = 0;
+            nDeltaINFlushThisRun = 0;
+            /* nAlreadyEvictedThisRun = 0; -- for future */
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/recovery/DirtyINMap.java b/src/com/sleepycat/je/recovery/DirtyINMap.java
new file mode 100644
index 0000000000000000000000000000000000000000..4ac273b4d754e63be38a56a66944a3d1001908ca
--- /dev/null
+++ b/src/com/sleepycat/je/recovery/DirtyINMap.java
@@ -0,0 +1,388 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DirtyINMap.java,v 1.17.2.3 2010/01/04 15:30:32 cwl Exp $
+ */
+
+package com.sleepycat.je.recovery;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.IdentityHashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.DbTree;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.INList;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.recovery.Checkpointer.CheckpointReference;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.tree.MapLN;
+
+/**
+ * Map of Integer->Set
+ * level->Set of checkpoint references
+ */
+class DirtyINMap {
+
+    private EnvironmentImpl envImpl;
+    private SortedMap<Integer,Map<Long,CheckpointReference>> levelMap;
+    private int numEntries;
+    private Set<DatabaseId> mapLNsToFlush;
+
+    DirtyINMap(EnvironmentImpl envImpl) {
+        this.envImpl = envImpl;
+        levelMap = new TreeMap<Integer,Map<Long,CheckpointReference>>();
+        numEntries = 0;
+        mapLNsToFlush = new HashSet<DatabaseId>();
+    }
+
+    /**
+     * Scan the INList for all dirty INs, excluding temp DB INs.  Save them in
+     * a tree-level ordered map for level ordered flushing.
+     *
+     * Take this opportunity to reset the memory budget tree value.
+     *
+     * @return highestFlushLevels, map of DatabaseImpl to Integer.
+     */
+    Map<DatabaseImpl,Integer> 
+        selectDirtyINsForCheckpoint(boolean flushAll, boolean flushExtraLevel)
+        throws DatabaseException {
+
+        Map<DatabaseImpl,Integer> highestLevelSeenMap =
+            new IdentityHashMap<DatabaseImpl,Integer>();
+        DbTree dbTree = envImpl.getDbTree();
+
+        INList inMemINs = envImpl.getInMemoryINs();
+
+        /*
+         * Opportunistically recalculate the INList memory budget while
+         * traversing the entire INList.
+         */
+        inMemINs.memRecalcBegin();
+        boolean completed = false;
+
+        try {
+            for (IN in : inMemINs) {
+                in.latch(CacheMode.UNCHANGED);
+                DatabaseImpl db = in.getDatabase();
+
+                try {
+                    inMemINs.memRecalcIterate(in);
+
+                    /* Do not checkpoint temporary databases. */
+                    if (db.isTemporary()) {
+                        continue;
+                    }
+
+                    Integer level =
+                        addDirtyIN(in, false /*updateMemoryBudget*/);
+                    if (level != null) {
+
+                        /*
+                         * IN was added to the dirty map.  Update the highest
+                         * level seen for the database.  Use one level higher
+                         * when flushExtraLevel is set.  When flushAll is set,
+                         * use the maximum level for the database.  Durable
+                         * deferred-write databases must be synced, so also use
+                         * the maximum level.
+                         */
+                        if (flushAll || db.isDurableDeferredWrite()) {
+                            if (!highestLevelSeenMap.containsKey(db)) {
+
+                                /*
+                                 * Null is used as an indicator that
+                                 * getHighestLevel should be called below, when
+                                 * no latches are held.
+                                 */
+                                highestLevelSeenMap.put(db, null);
+                            }
+                        } else {
+                            int levelVal = level.intValue();
+                            if (flushExtraLevel) {
+                                if (in.isRoot()) {
+                                    /* No reason to go above DB root. */
+                                    if (!in.isDbRoot()) {
+                                        /* The level above DIN root is BIN. */
+                                        levelVal = IN.BIN_LEVEL;
+                                    }
+                                } else {
+                                    /* Next level up in the same tree. */
+                                    levelVal += 1;
+                                }
+                            }
+                            Integer highestLevelSeen =
+                                highestLevelSeenMap.get(db);
+                            if (highestLevelSeen == null ||
+                                levelVal > highestLevelSeen.intValue()) {
+                                if (flushExtraLevel) {
+                                    level = Integer.valueOf(levelVal);
+                                }
+                                highestLevelSeenMap.put(db, level);
+                            }
+                        }
+                    }
+
+                    /* Save dirty/temp DBs for later. */
+                    saveMapLNsToFlush(in);
+                } finally {
+                    in.releaseLatch();
+                }
+            }
+            completed = true;
+        } finally {
+            inMemINs.memRecalcEnd(completed);
+        }
+
+        /* Call getHighestLevel only when no latches are held. */
+        for (Map.Entry<DatabaseImpl,Integer> entry :
+             highestLevelSeenMap.entrySet()) {
+            if (entry.getValue() == null) {
+                DatabaseImpl db = entry.getKey();
+                entry.setValue(Integer.valueOf(dbTree.getHighestLevel(db)));
+            }
+        }
+
+        return highestLevelSeenMap;
+    }
+
+    /**
+     * Scan the INList for all dirty INs for a given database.  Arrange them in
+     * level sorted map for level ordered flushing.
+     *
+     * @return highestFlushLevels, map of DatabaseImpl to Integer.
+     */
+    Map<DatabaseImpl, Integer> selectDirtyINsForDbSync(DatabaseImpl dbImpl)
+        throws DatabaseException {
+
+        DatabaseId dbId = dbImpl.getId();
+
+        for (IN in : envImpl.getInMemoryINs()) {
+            if (in.getDatabaseId().equals(dbId)) {
+                in.latch(CacheMode.UNCHANGED);
+                try {
+                    addDirtyIN(in, false /*updateMemoryBudget*/);
+                } finally {
+                    in.releaseLatch();
+                }
+            }
+        }
+
+        /*
+         * Create a single entry map that forces all levels of this DB to
+         * be flushed.
+         */
+        Map<DatabaseImpl, Integer> highestFlushLevels = 
+            new IdentityHashMap<DatabaseImpl, Integer>();
+        highestFlushLevels.put
+            (dbImpl,
+             Integer.valueOf(envImpl.getDbTree().getHighestLevel(dbImpl)));
+        return highestFlushLevels;
+    }
+
+    int getNumLevels() {
+        return levelMap.size();
+    }
+
+    void addCostToMemoryBudget() {
+        MemoryBudget mb = envImpl.getMemoryBudget();
+        int cost = numEntries * MemoryBudget.CHECKPOINT_REFERENCE_SIZE;
+        mb.updateAdminMemoryUsage(cost);
+    }
+
+    void removeCostFromMemoryBudget() {
+        MemoryBudget mb = envImpl.getMemoryBudget();
+        int cost = numEntries * MemoryBudget.CHECKPOINT_REFERENCE_SIZE;
+        mb.updateAdminMemoryUsage(0 - cost);
+    }
+
+    /**
+     * Adds the IN if dirty, otherwise returns null.  See addIN.
+     */
+    private Integer addDirtyIN(IN in, boolean updateMemoryBudget) {
+
+        if (in.getDirty()) {
+            return addIN(in, updateMemoryBudget);
+        } else {
+            return null;
+        }
+    }
+
+
+    /**
+     * Add a node unconditionally to the dirty map. The dirty map is keyed by
+     * level (Integers) and holds sets of IN references.
+     *
+     * @param updateMemoryBudget if true then update the memory budget as the
+     * map is changed; if false then addCostToMemoryBudget must be called
+     * later.
+     *
+     * @return level of IN added to the dirty map.  The level is returned
+     * rather than a boolean simply to avoid allocating another Integer in the
+     * caller.
+     */
+    Integer addIN(IN in, boolean updateMemoryBudget) {
+
+        Integer level = Integer.valueOf(in.getLevel());
+        Map<Long,CheckpointReference> nodeMap;
+        if (levelMap.containsKey(level)) {
+            nodeMap = levelMap.get(level);
+        } else {
+            nodeMap = new HashMap<Long,CheckpointReference>();
+            levelMap.put(level, nodeMap);
+        }
+
+        nodeMap.put(in.getNodeId(),
+                    new CheckpointReference(in.getDatabase().getId(),
+                                            in.getNodeId(),
+                                            in.containsDuplicates(),
+                                            in.isDbRoot(),
+                                            in.getMainTreeKey(),
+                                            in.getDupTreeKey()));
+        numEntries++;
+
+        if (updateMemoryBudget) {
+            MemoryBudget mb = envImpl.getMemoryBudget();
+            mb.updateAdminMemoryUsage
+                (MemoryBudget.CHECKPOINT_REFERENCE_SIZE);
+        }
+
+        return level;
+    }
+
+    /**
+     * Get the lowest level currently stored in the map.
+     */
+    Integer getLowestLevelSet() {
+        return levelMap.firstKey();
+    }
+
+    /**
+     * Get an iterator over the references corresponding to the given level.
+     */
+    Iterator<CheckpointReference> getIterator(Integer level) {
+        return levelMap.get(level).values().iterator();
+    }
+
+    /**
+     * Removes the set corresponding to the given level.
+     */
+    void removeLevel(Integer level) {
+        levelMap.remove(level);
+    }
+
+    boolean containsNode(Integer level, Long nodeId) {
+        Map<Long,CheckpointReference> nodeMap = levelMap.get(level);
+        if (nodeMap != null) {
+            return nodeMap.containsKey(nodeId);
+        }
+        return false;
+    }
+
+    CheckpointReference removeNode(Integer level, Long nodeId) {
+        Map<Long,CheckpointReference> nodeMap = levelMap.get(level);
+        if (nodeMap != null) {
+            return nodeMap.remove(nodeId);
+        }
+        return null;
+    }
+
+    CheckpointReference removeNextNode(Integer level) {
+        Map<Long,CheckpointReference> nodeMap = levelMap.get(level);
+        if (nodeMap != null) {
+            Iterator<Map.Entry<Long,CheckpointReference>> iter =
+                nodeMap.entrySet().iterator();
+            if (iter.hasNext()) {
+                CheckpointReference ref = iter.next().getValue();
+                iter.remove();
+                return ref;
+            }
+        }
+        return null;
+    }
+
+    /**
+     * If the given IN is a BIN for the ID mapping database, saves all
+     * dirty/temp MapLNs contained in it.
+     */
+    private void saveMapLNsToFlush(IN in) {
+        if (in instanceof BIN &&
+            in.getDatabase().getId().equals(DbTree.ID_DB_ID)) {
+            for (int i = 0; i < in.getNEntries(); i += 1) {
+                MapLN ln = (MapLN) in.getTarget(i);
+                if (ln != null && ln.getDatabase().isCheckpointNeeded()) {
+                    mapLNsToFlush.add(ln.getDatabase().getId());
+                }
+            }
+        }
+    }
+
+    /**
+     * Flushes all saved dirty/temp MapLNs and clears the saved set.
+     *
+     * <p>If dirty, a MapLN must be flushed at each checkpoint to record
+     * updated utilization info in the checkpoint interval.  If it is a
+     * temporary DB, the MapLN must be flushed because all temp DBs must be
+     * encountered by recovery so they can be removed if they were not closed
+     * (and removed) by the user.</p>
+     *
+     * @param checkpointStart start LSN of the checkpoint in progress.  To
+     * reduce unnecessary logging, the MapLN is only flushed if it has not been
+     * written since that LSN.
+     */
+    void flushMapLNs(long checkpointStart)
+        throws DatabaseException {
+
+        if (!mapLNsToFlush.isEmpty()) {
+            DbTree dbTree = envImpl.getDbTree();
+            Iterator<DatabaseId> i = mapLNsToFlush.iterator();
+            while (i.hasNext()) {
+                DatabaseId dbId = i.next();
+                DatabaseImpl db = null;
+                try {
+                    db = dbTree.getDb(dbId);
+                    if (db != null &&
+                        !db.isDeleted() &&
+                        db.isCheckpointNeeded()) {
+                        dbTree.modifyDbRoot
+                            (db, checkpointStart /*ifBeforeLsn*/,
+                             true /*mustExist*/);
+                    }
+                } finally {
+                    dbTree.releaseDb(db);
+                }
+            }
+            mapLNsToFlush.clear();
+        }
+    }
+
+    /**
+     * Flushes the DB mapping tree root at the end of the checkpoint, if either
+     * mapping DB is dirty and the root was not flushed previously during the
+     * checkpoint.
+     *
+     * @param checkpointStart start LSN of the checkpoint in progress.  To
+     * reduce unnecessary logging, the Root is only flushed if it has not been
+     * written since that LSN.
+     */
+    void flushRoot(long checkpointStart)
+        throws DatabaseException {
+
+        DbTree dbTree = envImpl.getDbTree();
+        if (dbTree.getDb(DbTree.ID_DB_ID).isCheckpointNeeded() ||
+            dbTree.getDb(DbTree.NAME_DB_ID).isCheckpointNeeded()) {
+            envImpl.logMapTreeRoot(checkpointStart);
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/recovery/LevelRecorder.java b/src/com/sleepycat/je/recovery/LevelRecorder.java
new file mode 100644
index 0000000000000000000000000000000000000000..2b649d2634df2dcae0f79d24a65ecac8c67b5db3
--- /dev/null
+++ b/src/com/sleepycat/je/recovery/LevelRecorder.java
@@ -0,0 +1,139 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LevelRecorder.java,v 1.7.2.2 2010/01/04 15:30:32 cwl Exp $
+ */
+
+package com.sleepycat.je.recovery;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+
+import com.sleepycat.je.dbi.DatabaseId;
+
+/**
+ * LevelRecorder is used to determine when an extra read-IN pass is needed
+ * during recovery. See SR [#14424]
+ *
+ * Splits and compression require logging up to the root of the tree, to ensure
+ * that all INs are properly returned to the correct position at recovery. In
+ * other words, splits and compression ensure that the creation and deletion of
+ * all nodes is promptly logged.
+ *
+ * However, checkpoints are not propagated to the top of the tree, in order to
+ * conserve on logging. Because of that, a great-aunt situation can occur,
+ * where an ancestor of a given node can be logged without referring to the
+ * latest on-disk position of the node, because that ancestor was part of a
+ * split or compression.
+ *
+ * Take this scenario:
+ *     Root-A
+ *      /    \
+ *    IN-B   IN-C
+ *   /      / | \
+ *  BIN-D
+ *  /
+ * LN-E
+ *
+ * 1) LN-E  is logged, BIN-D is dirtied
+ * 2) BIN-D is logged during a checkpoint, IN-B is dirtied
+ * 3) IN-C  is split and Root-A is logged
+ * 4) We recover using Root-A and the BIN-D logged at (2) is lost
+ *
+ * At (3) when Root-A is logged, it points to an IN-B on disk that does not
+ * point to the most recent BIN-D
+ *
+ * At (4) when we recover, although we will process the BIN-D logged at (2) and
+ * splice it into the tree, the Root-A logged at (3) is processed last and
+ * overrides the entire subtree containing BIN-D
+ *
+ * This could be addressed by always logging to the root at every checkpoint.
+ * Barring that, we address it by replaying INs a second time at recovery
+ * if we can detect that a split or compression occurred -- if there are
+ * multiple level of non-provisional IN entries for the given database.
+ *
+ * In the ideal case, this would occur infrequently. If there were a checkpoint
+ * and no splits/compressions, one should only see the checkpoint top level as
+ * non-provisional log entries.
+ *
+ * One issue that may make this extra read pass occur more than it should is
+ * that cascading updates for splits and compressions are logging all entries
+ * non-provisionally, when in theory, only the root need be logged
+ * non-provisionally. This makes splits and compressions more expensive to
+ * process at recovery than they should be, and may cause unnecessary extra
+ * passes. We'll leave that implementation now for stability, and will return
+ * to this optimization.
+ */
+class LevelRecorder {
+
+    private Map<DatabaseId,LevelInfo> dbLevels;
+
+    LevelRecorder() {
+        dbLevels = new HashMap<DatabaseId,LevelInfo>();
+    }
+
+    /*
+     * Record whether the level seen for the current IN is the highest or
+     * lowest.
+     */
+    void record(DatabaseId dbId, int level) {
+        LevelInfo info = dbLevels.get(dbId);
+        if (info == null) {
+            info = new LevelInfo();
+            dbLevels.put(dbId, info);
+        }
+        info.recordLevel(level);
+    }
+
+    /*
+     * Get the set of databases that were logged non-provisionally with
+     * different levels in the ckpt set. These databases must be redone.
+     */
+    Set<DatabaseId> getDbsWithDifferentLevels() {
+        Set<DatabaseId> reprocessDbs = new HashSet<DatabaseId>();
+        Iterator<Map.Entry<DatabaseId,LevelInfo>> iter = 
+            dbLevels.entrySet().iterator();
+        while (iter.hasNext()) {
+            Map.Entry<DatabaseId,LevelInfo> oneEntry = iter.next();
+            LevelInfo levelInfo = oneEntry.getValue();
+            if (levelInfo.getDifferenceSeen()) {
+                reprocessDbs.add(oneEntry.getKey());
+            }
+        }
+        return reprocessDbs;
+    }
+
+    /**
+     * Remember the highest and lowest level seen for a given database.
+     */
+    private static class LevelInfo {
+        private int highest = Integer.MIN_VALUE;
+        private int lowest = Integer.MAX_VALUE;
+        private boolean differenceSeen = false;
+
+        void recordLevel(int level) {
+            if (!differenceSeen) {
+                if (level < lowest) {
+                    lowest = level;
+                }
+                if (level > highest) {
+                    highest = level;
+                }
+                differenceSeen = highest > lowest;
+            }
+        }
+
+        /*
+         * @return true if there is a difference between the highest and
+         * lowest.
+         */
+        boolean getDifferenceSeen() {
+            return differenceSeen;
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/recovery/NoRootException.java b/src/com/sleepycat/je/recovery/NoRootException.java
new file mode 100644
index 0000000000000000000000000000000000000000..04a07181b217450a0f1efaa7c6d25b70e6f06748
--- /dev/null
+++ b/src/com/sleepycat/je/recovery/NoRootException.java
@@ -0,0 +1,22 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: NoRootException.java,v 1.3.2.2 2010/01/04 15:30:32 cwl Exp $
+ */
+
+package com.sleepycat.je.recovery;
+
+import com.sleepycat.je.dbi.EnvironmentImpl;
+
+/**
+ * Recovery related exceptions
+ */
+public class NoRootException extends RecoveryException {
+
+    public NoRootException(EnvironmentImpl env,
+                           String message) {
+	super(env, message);
+    }
+}
diff --git a/src/com/sleepycat/je/recovery/RecoveryException.java b/src/com/sleepycat/je/recovery/RecoveryException.java
new file mode 100644
index 0000000000000000000000000000000000000000..5a70dd3f03810f536a6de671db8c599e90d24216
--- /dev/null
+++ b/src/com/sleepycat/je/recovery/RecoveryException.java
@@ -0,0 +1,29 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RecoveryException.java,v 1.19.2.2 2010/01/04 15:30:32 cwl Exp $
+ */
+
+package com.sleepycat.je.recovery;
+
+import com.sleepycat.je.RunRecoveryException;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+
+/**
+ * Recovery related exceptions
+ */
+public class RecoveryException extends RunRecoveryException {
+
+    public RecoveryException(EnvironmentImpl env,
+                             String message,
+                             Throwable t) {
+	super(env, message, t);
+    }
+
+    public RecoveryException(EnvironmentImpl env,
+                             String message) {
+	super(env, message);
+    }
+}
diff --git a/src/com/sleepycat/je/recovery/RecoveryInfo.java b/src/com/sleepycat/je/recovery/RecoveryInfo.java
new file mode 100644
index 0000000000000000000000000000000000000000..f53747a83babc07b5a9fad59adcd2b28428af069
--- /dev/null
+++ b/src/com/sleepycat/je/recovery/RecoveryInfo.java
@@ -0,0 +1,100 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RecoveryInfo.java,v 1.31.2.2 2010/01/04 15:30:32 cwl Exp $
+ */
+
+package com.sleepycat.je.recovery;
+
+import java.util.Collection;
+import java.util.HashSet;
+
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.FileMapper;
+
+/**
+ * RecoveryInfo keeps information about recovery processing.
+ */
+public class RecoveryInfo {
+
+    /* Locations found during recovery. */
+    public long lastUsedLsn = DbLsn.NULL_LSN;      // location of last entry
+    /*  EOF, location of first unused spot. */
+    public long nextAvailableLsn = DbLsn.NULL_LSN;
+    public long firstActiveLsn = DbLsn.NULL_LSN;
+    public long checkpointStartLsn = DbLsn.NULL_LSN;
+    public long checkpointEndLsn = DbLsn.NULL_LSN;
+    public long useRootLsn = DbLsn.NULL_LSN;
+
+    /*
+     * Represents the first CkptStart following the CkptEnd.  It is a CkptStart
+     * with no CkptEnd, and is used for counting provisional INs obsolete.
+     */
+    public long partialCheckpointStartLsn = DbLsn.NULL_LSN;
+
+    /* Checkpoint record used for this recovery. */
+    public CheckpointEnd checkpointEnd;
+
+    /* Ids */
+    public long useMinReplicatedNodeId;
+    public long useMaxNodeId;
+    public int useMinReplicatedDbId;
+    public int useMaxDbId;
+    public long useMinReplicatedTxnId;
+    public long useMaxTxnId;
+
+    /* num nodes read */
+    public int numMapINs;
+    public int numOtherINs;
+    public int numBinDeltas;
+    public int numDuplicateINs;
+
+    /* ln processing */
+    public int lnFound;
+    public int lnNotFound;
+    public int lnInserted;
+    public int lnReplaced;
+
+    /* FileReader behavior. */
+    public int nRepeatIteratorReads;
+
+    /* VLSN mappings seen during recovery processing, for replication. */
+    public Collection<FileMapper> fileMappers = new HashSet<FileMapper>();
+
+    @Override
+    public String toString() {
+        StringBuffer sb = new StringBuffer();
+        sb.append("Recovery Info");
+        appendLsn(sb, " lastUsed=", lastUsedLsn);
+        appendLsn(sb, " nextAvail=", nextAvailableLsn);
+        appendLsn(sb, " ckptStart=", checkpointStartLsn);
+        appendLsn(sb, " firstActive=", firstActiveLsn);
+        appendLsn(sb, " ckptEnd=", checkpointEndLsn);
+        appendLsn(sb, " useRoot=", useRootLsn);
+        sb.append(checkpointEnd).append(">");
+        sb.append(" useMinReplicatedNodeId=").append(useMinReplicatedNodeId);
+        sb.append(" useMaxNodeId=").append(useMaxNodeId);
+        sb.append(" useMinReplicatedDbId=").append(useMinReplicatedDbId);
+        sb.append(" useMaxDbId=").append(useMaxDbId);
+        sb.append(" useMinReplicatedTxnId=").append(useMinReplicatedTxnId);
+        sb.append(" useMaxTxnId=").append(useMaxTxnId);
+        sb.append(" numMapINs=").append(numMapINs);
+        sb.append(" numOtherINs=").append(numOtherINs);
+        sb.append(" numBinDeltas=").append(numBinDeltas);
+        sb.append(" numDuplicateINs=").append(numDuplicateINs);
+        sb.append(" lnFound=").append(lnFound);
+        sb.append(" lnNotFound=").append(lnNotFound);
+        sb.append(" lnInserted=").append(lnInserted);
+        sb.append(" lnReplaced=").append(lnReplaced);
+        sb.append(" nRepeatIteratorReads=").append(nRepeatIteratorReads);
+        return sb.toString();
+    }
+
+    private void appendLsn(StringBuffer sb, String name, long lsn) {
+        if (lsn != DbLsn.NULL_LSN) {
+            sb.append(name).append(DbLsn.getNoFormatString(lsn));
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/recovery/RecoveryManager.java b/src/com/sleepycat/je/recovery/RecoveryManager.java
new file mode 100644
index 0000000000000000000000000000000000000000..88fc95476a3ddecb3b1ef463efd446cb62df4c8c
--- /dev/null
+++ b/src/com/sleepycat/je/recovery/RecoveryManager.java
@@ -0,0 +1,2554 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RecoveryManager.java,v 1.242.2.5 2010/01/04 15:30:32 cwl Exp $
+ */
+
+package com.sleepycat.je.recovery;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.TransactionConfig;
+import com.sleepycat.je.cleaner.RecoveryUtilizationTracker;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.DbConfigManager;
+import com.sleepycat.je.dbi.DbTree;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.latch.LatchSupport;
+import com.sleepycat.je.log.CheckpointFileReader;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.log.INFileReader;
+import com.sleepycat.je.log.LNFileReader;
+import com.sleepycat.je.log.LastFileReader;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.tree.ChildReference;
+import com.sleepycat.je.tree.DIN;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.tree.Key;
+import com.sleepycat.je.tree.LN;
+import com.sleepycat.je.tree.MapLN;
+import com.sleepycat.je.tree.Node;
+import com.sleepycat.je.tree.SearchResult;
+import com.sleepycat.je.tree.TrackingInfo;
+import com.sleepycat.je.tree.Tree;
+import com.sleepycat.je.tree.TreeLocation;
+import com.sleepycat.je.tree.WithRootLatched;
+import com.sleepycat.je.txn.BasicLocker;
+import com.sleepycat.je.txn.LockResult;
+import com.sleepycat.je.txn.LockType;
+import com.sleepycat.je.txn.PreparedTxn;
+import com.sleepycat.je.txn.Txn;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.Tracer;
+
+public class RecoveryManager {
+    private static final String TRACE_DUP_ROOT_REPLACE =
+        "DupRootRecover:";
+    private static final String TRACE_LN_REDO = "LNRedo:";
+    private static final String TRACE_LN_UNDO = "LNUndo";
+    private static final String TRACE_IN_REPLACE = "INRecover:";
+    private static final String TRACE_ROOT_REPLACE = "RootRecover:";
+    private static final String TRACE_IN_DEL_REPLAY = "INDelReplay:";
+    private static final String TRACE_IN_DUPDEL_REPLAY = "INDupDelReplay:";
+    private static final String TRACE_ROOT_DELETE = "RootDelete:";
+
+    private EnvironmentImpl env;
+    private int readBufferSize;
+    private RecoveryInfo info;       // stat info
+    private Map<Long, Long> committedTxnIds;  // committed txn ID to Commit LSN
+    private Set<Long> abortedTxnIds;          // aborted txns
+    private Map<Long, Txn> preparedTxns;     // txnid -> prepared Txn
+
+    /* dbs for which we have to build the in memory IN list. */
+    private Set<DatabaseId> inListBuildDbIds;
+
+    private Set<DatabaseId> tempDbIds;       // temp DBs to be removed
+
+    private Level detailedTraceLevel; // level value for detailed trace msgs
+    private RecoveryUtilizationTracker tracker;
+
+    /**
+     * Make a recovery manager
+     */
+    public RecoveryManager(EnvironmentImpl env)
+        throws DatabaseException {
+
+        this.env = env;
+        DbConfigManager cm = env.getConfigManager();
+        readBufferSize =
+            cm.getInt(EnvironmentParams.LOG_ITERATOR_READ_SIZE);
+        committedTxnIds = new HashMap<Long, Long>();
+        abortedTxnIds = new HashSet<Long>();
+        preparedTxns = new HashMap<Long, Txn>();
+        inListBuildDbIds = new HashSet<DatabaseId>();
+        tempDbIds = new HashSet<DatabaseId>();
+        tracker = new RecoveryUtilizationTracker(env);
+
+        /*
+         * Figure out the level to use for detailed trace messages, by choosing
+         * the more verbose of the recovery manager's trace setting vs the
+         * general trace setting.
+         */
+        detailedTraceLevel =
+            Tracer.parseLevel(env,
+                              EnvironmentParams.JE_LOGGING_LEVEL_RECOVERY);
+    }
+
+    /**
+     * Look for an existing log and use it to create an in memory structure for
+     * accessing existing databases. The file manager and logging system are
+     * only available after recovery.
+     * @return RecoveryInfo statistics about the recovery process.
+     */
+    public RecoveryInfo recover(boolean readOnly,
+                                boolean replicationIntended)
+        throws DatabaseException {
+
+        info = new RecoveryInfo();
+
+        try {
+            FileManager fileManager = env.getFileManager();
+            DbConfigManager configManager = env.getConfigManager();
+            boolean forceCheckpoint =
+                configManager.getBoolean
+                (EnvironmentParams.ENV_RECOVERY_FORCE_CHECKPOINT);
+            if (fileManager.filesExist()) {
+
+                /*
+                 * Establish the location of the end of the log. After this, we
+                 * can write to the log. No Tracer calls are allowed until
+                 * after this point is established in the log.
+                 */
+                findEndOfLog(readOnly);
+                Tracer.trace(Level.CONFIG, env,
+                             "Recovery underway, found end of log");
+
+                /*
+                 * Establish the location of the root, the last checkpoint, and
+                 * the first active LSN by finding the last checkpoint.
+                 */
+                findLastCheckpoint();
+                env.getLogManager().setLastLsnAtRecovery
+                    (fileManager.getLastUsedLsn());
+                Tracer.trace(Level.CONFIG, env,
+                             "Recovery checkpoint search, " +
+                             info);
+
+                /* Read in the root. */
+                env.readMapTreeFromLog(info.useRootLsn, replicationIntended);
+
+                /* Build the in memory tree from the log. */
+                buildTree();
+            } else {
+
+                /*
+                 * Nothing more to be done. Enable publishing of debug log
+                 * messages to the database log.
+                 */
+                env.enableDebugLoggingToDbLog();
+                Tracer.trace(Level.CONFIG, env, "Recovery w/no files.");
+
+                /* Enable the INList and log the root of the mapping tree. */
+                env.getInMemoryINs().enable();
+                env.logMapTreeRoot();
+
+                /* Add shared cache environment when buildTree is not used. */
+                if (env.getSharedCache()) {
+                    env.getEvictor().addEnvironment(env);
+                }
+
+                /*
+                 * Always force a checkpoint during creation.
+                 */
+                forceCheckpoint = true;
+            }
+
+	    int ptSize = preparedTxns.size();
+            if (ptSize > 0) {
+		boolean singular = (ptSize == 1);
+		Tracer.trace(Level.INFO, env,
+			     "There " + (singular ? "is " : "are ") +
+			     ptSize + " prepared but unfinished " +
+			     (singular ? "txn." : "txns."));
+
+                /*
+                 * We don't need this set any more since these are all
+                 * registered with the TxnManager now.
+                 */
+                preparedTxns = null;
+            }
+
+            /*
+             * Open the UP database and populate the cache before the first
+             * checkpoint so that the checkpoint may flush file summary
+             * information.  May be disabled for unittests.
+             */
+            if (DbInternal.getCreateUP
+                (env.getConfigManager().getEnvironmentConfig())) {
+                env.getUtilizationProfile().populateCache();
+            }
+
+            /* Transfer recovery utilization info to the global tracker. */
+            tracker.transferToUtilizationTracker(env.getUtilizationTracker());
+
+            /*
+             * After utilization info is complete and prior to the checkpoint,
+             * remove all temporary databases encountered during recovery.
+             */
+            removeTempDbs();
+
+            /*
+             * At this point, we've recovered (or there were no log files at
+             * all. Write a checkpoint into the log.
+             *
+             * NOTE: The discussion of deltas below may be obsolete now that
+             * we use dirty bits to determine what to include in a delta.
+             * However, we still want to disallow deltas to flush full versions
+             * after a crash.
+             *
+             * Don't allow deltas, because the delta-determining scheme that
+             * compares child entries to the last full LSN doesn't work in
+             * recovery land. New child entries may have an earlier LSN than
+             * the owning BIN's last full, because of the act of splicing in
+             * LNs during recovery.
+             *
+             * For example, suppose that during LN redo, bin 10 was split into
+             * bin 10 and bin 12. That splitting causes a full log.  Then later
+             * on, the redo splices LN x, which is from before the last full of
+             * bin 10, into bin 10. If we checkpoint allowing deltas after
+             * recovery finishes, we won't pick up the LNx diff, because that
+             * LN is an earlier LSN than the split-induced full log entry of
+             * bin 10.
+             */
+            if (!readOnly &&
+                (env.getLogManager().getLastLsnAtRecovery() !=
+                 info.checkpointEndLsn ||
+                 forceCheckpoint)) {
+                CheckpointConfig config = new CheckpointConfig();
+                config.setForce(true);
+                config.setMinimizeRecoveryTime(true);
+                env.invokeCheckpoint(config, false /*flushAll*/, "recovery");
+            } else {
+                /* Initialze intervals when there is no initial checkpoint. */
+                env.getCheckpointer().initIntervals
+                    (info.checkpointEndLsn, System.currentTimeMillis());
+            }
+
+        } catch (IOException e) {
+            Tracer.trace(env, "RecoveryManager", "recover",
+                         "Couldn't recover", e);
+            throw new RecoveryException(env, "Couldn't recover: " +
+                                        e.getMessage(), e);
+        } finally {
+            Tracer.trace(Level.CONFIG, env, "Recovery finished: " + info);
+        }
+
+        return info;
+    }
+
+    /**
+     * Find the end of the log, initialize the FileManager. While we're
+     * perusing the log, return the last checkpoint LSN if we happen to see it.
+     */
+    private void findEndOfLog(boolean readOnly)
+        throws IOException, DatabaseException {
+
+        LastFileReader reader = new LastFileReader(env, readBufferSize);
+
+        /*
+         * Tell the reader to iterate through the log file until we hit the end
+         * of the log or an invalid entry.  Remember the last seen CkptEnd, and
+         * the first CkptStart with no following CkptEnd.
+         */
+        while (reader.readNextEntry()) {
+            LogEntryType type = reader.getEntryType();
+            if (LogEntryType.LOG_CKPT_END.equals(type)) {
+                info.checkpointEndLsn = reader.getLastLsn();
+                info.partialCheckpointStartLsn = DbLsn.NULL_LSN;
+            } else if (LogEntryType.LOG_CKPT_START.equals(type)) {
+                if (info.partialCheckpointStartLsn == DbLsn.NULL_LSN) {
+                    info.partialCheckpointStartLsn = reader.getLastLsn();
+                }
+            } else if (LogEntryType.LOG_ROOT.equals(type)) {
+                info.useRootLsn = reader.getLastLsn();
+            }
+        }
+
+        /*
+         * The last valid LSN should point to the start of the last valid log
+         * entry, while the end of the log should point to the first byte of
+         * blank space, so these two should not be the same.
+         */
+        assert (reader.getLastValidLsn() != reader.getEndOfLog()):
+            "lastUsed=" + DbLsn.getNoFormatString(reader.getLastValidLsn()) +
+            " end=" + DbLsn.getNoFormatString(reader.getEndOfLog());
+
+
+        /* Now truncate if necessary. */
+        if (!readOnly) {
+            reader.setEndOfFile();
+        }
+
+        /* Tell the fileManager where the end of the log is. */
+        info.lastUsedLsn = reader.getLastValidLsn();
+        info.nextAvailableLsn = reader.getEndOfLog();
+        info.nRepeatIteratorReads += reader.getNRepeatIteratorReads();
+        env.getFileManager().setLastPosition(info.nextAvailableLsn,
+                                             info.lastUsedLsn,
+                                             reader.getPrevOffset());
+
+        /*
+         * Now the logging system is initialized and can do more
+         * logging. Enable publishing of debug log messages to the database
+         * log.
+         */
+        env.enableDebugLoggingToDbLog();
+    }
+
+    /**
+     * Find the last checkpoint and establish the firstActiveLsn point,
+     * checkpoint start, and checkpoint end.
+     */
+    private void findLastCheckpoint()
+        throws IOException, DatabaseException {
+
+        /*
+         * The checkpointLsn might have been already found when establishing
+         * the end of the log.  If it was found, then partialCheckpointStartLsn
+         * was also found.  If it was not found, search backwards for it now
+         * and also set partialCheckpointStartLsn.
+         */
+        if (info.checkpointEndLsn == DbLsn.NULL_LSN) {
+
+            /*
+             * Search backwards though the log for a checkpoint end entry and a
+             * root entry.
+             */
+            CheckpointFileReader searcher =
+                new CheckpointFileReader(env, readBufferSize, false,
+                                         info.lastUsedLsn, DbLsn.NULL_LSN,
+                                         info.nextAvailableLsn);
+
+            while (searcher.readNextEntry()) {
+
+                /*
+                 * Continue iterating until we find a checkpoint end entry.
+                 * While we're at it, remember the last root seen in case we
+                 * don't find a checkpoint end entry.
+                 */
+                if (searcher.isCheckpointEnd()) {
+
+                    /*
+                     * We're done, the checkpoint end will tell us where the
+                     * root is.
+                     */
+                    info.checkpointEndLsn = searcher.getLastLsn();
+                    break;
+                } else if (searcher.isCheckpointStart()) {
+
+                    /*
+                     * Remember the first CkptStart following the CkptEnd.
+                     */
+                    info.partialCheckpointStartLsn = searcher.getLastLsn();
+
+                } else if (searcher.isRoot()) {
+
+                    /*
+                     * Save the last root that was found in the log in case we
+                     * don't see a checkpoint.
+                     */
+                    if (info.useRootLsn == DbLsn.NULL_LSN) {
+                        info.useRootLsn = searcher.getLastLsn();
+                    }
+                }
+            }
+            info.nRepeatIteratorReads += searcher.getNRepeatIteratorReads();
+        }
+
+        /*
+         * If we haven't found a checkpoint, we'll have to recover without
+         * one. At a minimium, we must have found a root.
+         */
+        if (info.checkpointEndLsn == DbLsn.NULL_LSN) {
+            info.checkpointStartLsn = DbLsn.NULL_LSN;
+            info.firstActiveLsn = DbLsn.NULL_LSN;
+        } else {
+            /* Read in the checkpoint entry. */
+            CheckpointEnd checkpointEnd = (CheckpointEnd)
+		(env.getLogManager().get(info.checkpointEndLsn));
+            info.checkpointEnd = checkpointEnd;
+            info.checkpointStartLsn = checkpointEnd.getCheckpointStartLsn();
+            info.firstActiveLsn = checkpointEnd.getFirstActiveLsn();
+
+            /*
+             * Use the last checkpoint root only if there is no later root.
+             * The latest root has the correct per-DB utilization info.
+             */
+            if (checkpointEnd.getRootLsn() != DbLsn.NULL_LSN &&
+                info.useRootLsn == DbLsn.NULL_LSN) {
+                info.useRootLsn = checkpointEnd.getRootLsn();
+            }
+
+            /* Init the checkpointer's id sequence.*/
+            env.getCheckpointer().setCheckpointId(checkpointEnd.getId());
+        }
+        if (info.useRootLsn == DbLsn.NULL_LSN) {
+            throw new NoRootException
+                (env,
+                 "This environment's log file has no root. Since the root " +
+                 "is the first entry written into a log at environment " +
+                 "creation, this should only happen if the initial creation " +
+                 "of the environment was never checkpointed or synced. " +
+                 "Please move aside the existing log files to allow the " +
+                 "creation of a new environment");
+        }
+    }
+
+    /**
+     * Use the log to recreate an in memory tree.
+     */
+    private void buildTree()
+        throws IOException, DatabaseException {
+
+        /*
+         * Read all map database INs, find largest node id before any
+         * possiblity of splits, find largest txn Id before any need for a root
+         * update (which would use an AutoTxn)
+         */
+        int passNum = buildINs(1,
+                               true,   /* mapping tree */
+                               false); /* dup tree */
+
+        /*
+         * Undo all aborted map LNs. Read and remember all committed
+         * transaction ids.
+         */
+        Tracer.trace(Level.CONFIG, env, passStartHeader(passNum) +
+                     "undo map LNs");
+        long start = System.currentTimeMillis();
+        Set<LogEntryType> mapLNSet = new HashSet<LogEntryType>();
+        mapLNSet.add(LogEntryType.LOG_MAPLN_TRANSACTIONAL);
+        mapLNSet.add(LogEntryType.LOG_TXN_COMMIT);
+        mapLNSet.add(LogEntryType.LOG_TXN_ABORT);
+        mapLNSet.add(LogEntryType.LOG_TXN_PREPARE);
+        undoLNs(info, mapLNSet);
+        long end = System.currentTimeMillis();
+        Tracer.trace(Level.CONFIG, env, passEndHeader(passNum, start, end) +
+                     info.toString());
+        passNum++;
+
+        /*
+         * Replay all mapLNs, mapping tree in place now. Use the set of
+         * committed txns found from the undo pass.
+         */
+        Tracer.trace(Level.CONFIG, env, passStartHeader(passNum) +
+                     "redo map LNs");
+        start = System.currentTimeMillis();
+        mapLNSet.add(LogEntryType.LOG_MAPLN);
+        redoLNs(info, mapLNSet);
+        end = System.currentTimeMillis();
+        Tracer.trace(Level.CONFIG, env, passEndHeader(passNum, start, end) +
+                     info.toString());
+        passNum++;
+
+        /*
+         * Reconstruct the internal nodes for the main level trees.
+         */
+        passNum = buildINs(passNum,
+                           false,   /* mapping tree*/
+                           false);  /* dup tree */
+
+        /*
+         * Reconstruct the internal nodes for the duplicate level trees.
+         */
+        passNum = buildINs(passNum,
+                           false,   /* mapping tree*/
+                           true);  /* dup tree */
+
+        /*
+         * Build the in memory IN list. Before this point, the INList is not
+         * enabled, and fetched INs have not been put on the list.  Once the
+         * tree is complete we can add the environment to the evictor (for a
+         * shared cache) and invoke the evictor.  The evictor will also be
+         * invoked during the undo and redo passes.
+         */
+        buildINList();
+        if (env.getSharedCache()) {
+            env.getEvictor().addEnvironment(env);
+        }
+        env.invokeEvictor();
+
+        /*
+         * Undo aborted LNs. No need to collect committed txn ids again, was
+         * done in the undo of all aborted MapLNs.
+         */
+        Tracer.trace(Level.CONFIG, env, passStartHeader(9) + "undo LNs");
+        start = System.currentTimeMillis();
+        Set<LogEntryType> lnSet = new HashSet<LogEntryType>();
+        lnSet.add(LogEntryType.LOG_LN_TRANSACTIONAL);
+        lnSet.add(LogEntryType.LOG_NAMELN_TRANSACTIONAL);
+        lnSet.add(LogEntryType.LOG_DEL_DUPLN_TRANSACTIONAL);
+        lnSet.add(LogEntryType.LOG_DUPCOUNTLN_TRANSACTIONAL);
+
+        undoLNs(info, lnSet);
+        end = System.currentTimeMillis();
+        Tracer.trace(Level.CONFIG, env, passEndHeader(9, start, end) +
+                     info.toString());
+
+        /* Replay LNs. Also read non-transactional LNs. */
+        Tracer.trace(Level.CONFIG, env, passStartHeader(10) + "redo LNs");
+        start = System.currentTimeMillis();
+        lnSet.add(LogEntryType.LOG_LN);
+        lnSet.add(LogEntryType.LOG_NAMELN);
+        lnSet.add(LogEntryType.LOG_DEL_DUPLN);
+        lnSet.add(LogEntryType.LOG_DUPCOUNTLN);
+        lnSet.add(LogEntryType.LOG_FILESUMMARYLN);
+        redoLNs(info, lnSet);
+        end = System.currentTimeMillis();
+        Tracer.trace(Level.CONFIG, env, passEndHeader(10, start, end) +
+                     info.toString());
+    }
+
+    /*
+     * Up to three passes for the INs of a given level
+     * @param mappingTree if true, we're building the mapping tree
+     * @param dupTree if true, we're building the dup tree.
+     */
+    private int buildINs(int passNum,
+                         boolean mappingTree,
+                         boolean dupTree)
+        throws IOException, DatabaseException {
+
+        Set<LogEntryType> targetEntries = new HashSet<LogEntryType>();
+        Set<LogEntryType> deltaType = new HashSet<LogEntryType>();
+        String passADesc = null;
+        String passBDesc = null;
+        String passCDesc = null;
+
+        if (mappingTree) {
+            passADesc = "read mapping INs";
+            passBDesc = "redo mapping INs";
+            passCDesc = "read mapping BINDeltas";
+        } else if (dupTree) {
+            passADesc = "read dup INs";
+            passBDesc = "redo dup INs";
+            passCDesc = "read dup BINDeltas";
+        } else {
+            passADesc = "read main INs";
+            passBDesc = "redo main INs";
+            passCDesc = "read main BINDeltas";
+        }
+
+        if (dupTree) {
+            /* Duplicate trees read these entries. */
+            targetEntries.add(LogEntryType.LOG_DIN);
+            targetEntries.add(LogEntryType.LOG_DBIN);
+            targetEntries.add(LogEntryType.LOG_IN_DUPDELETE_INFO);
+            deltaType.add(LogEntryType.LOG_DUP_BIN_DELTA);
+        } else {
+            /* Main tree and mapping tree read these types of entries. */
+            targetEntries.add(LogEntryType.LOG_IN);
+            targetEntries.add(LogEntryType.LOG_BIN);
+            targetEntries.add(LogEntryType.LOG_IN_DELETE_INFO);
+            deltaType.add(LogEntryType.LOG_BIN_DELTA);
+        }
+
+        /*
+         * Pass a: Read all INs and place into the proper location.
+         */
+        Tracer.trace(Level.CONFIG, env, passStartHeader(passNum) + passADesc);
+        LevelRecorder recorder = new LevelRecorder();
+        long start = System.currentTimeMillis();
+        if (mappingTree) {
+            readINsAndTrackIds(info.checkpointStartLsn, recorder);
+        } else {
+            int numINsSeen = readINs(info.checkpointStartLsn,
+                                     false,  // mapping tree only
+                                     targetEntries,
+
+                                     /*
+                                      * requireExactMatch -- why is it true for
+                                      * dups, false for main tree?  Keeping
+                                      * logic the same for now.
+                                      */
+                                     (dupTree? true: false),
+                                     recorder);
+            if (dupTree) {
+                info.numDuplicateINs += numINsSeen;
+            } else {
+                info.numOtherINs += numINsSeen;
+            }
+        }
+        long end = System.currentTimeMillis();
+        Tracer.trace(Level.CONFIG, env, passEndHeader(passNum, start, end) +
+                     info.toString());
+        passNum++;
+
+        /*
+         * Pass b: Redo INs if the LevelRecorder detects a split/compression
+         * was done after ckpt [#14424]
+         */
+        Set<DatabaseId> redoSet = recorder.getDbsWithDifferentLevels();
+        if (redoSet.size() > 0) {
+            Tracer.trace(Level.CONFIG, env,
+                         passStartHeader(passNum) + passBDesc);
+            start = System.currentTimeMillis();
+            repeatReadINs(info.checkpointStartLsn,
+                          targetEntries,
+                          redoSet);
+            end = System.currentTimeMillis();
+            Tracer.trace(Level.CONFIG, env,
+                         passEndHeader(passNum, start, end) + info.toString());
+            passNum++;
+        }
+
+        /*
+         * Pass c: Read BIN Deltas.
+         * BINDeltas must be processed after all INs so the delta is properly
+         * applied to the last version. For example, suppose BINDeltas were not
+         * done in a later pass, the tree is INa->BINb, and the log has
+         *       INa
+         *       BINDelta for BINb
+         *       INa
+         * the splicing in of the second INa would override the BINDelta.
+         */
+        Tracer.trace(Level.CONFIG, env, passStartHeader(passNum) + passCDesc);
+        start = System.currentTimeMillis();
+        info.numBinDeltas += readINs(info.checkpointStartLsn,
+                                     mappingTree,
+                                     deltaType,
+                                     true,   // requireExactMatch
+                                     null);  // LevelRecorder
+        end = System.currentTimeMillis();
+        Tracer.trace(Level.CONFIG, env,
+                     passEndHeader(passNum, start, end) + info.toString());
+        passNum++;
+
+        return passNum;
+    }
+
+    /*
+     * Read every internal node and IN DeleteInfo in the mapping tree and place
+     * in the in-memory tree. Also peruse all pertinent log entries in order to
+     * update our knowledge of the last used database, transaction and node
+     * ids, and to to track utilization profile and vlsn->lsn mappings.
+     */
+    private void readINsAndTrackIds(long rollForwardLsn,
+                                    LevelRecorder recorder)
+        throws IOException, DatabaseException {
+
+        INFileReader reader =
+            new INFileReader(env,
+                             readBufferSize,
+                             rollForwardLsn,        // start lsn
+                             info.nextAvailableLsn, // end lsn
+                             true,   // track node and db ids
+                             false,  // map db only
+                             info.partialCheckpointStartLsn,
+                             info.checkpointEndLsn,
+                             tracker);
+        reader.addTargetType(LogEntryType.LOG_IN);
+        reader.addTargetType(LogEntryType.LOG_BIN);
+        reader.addTargetType(LogEntryType.LOG_IN_DELETE_INFO);
+
+        /* Validate all entries in at least one full recovery pass. */
+        reader.setAlwaysValidateChecksum(true);
+
+        try {
+            info.numMapINs = 0;
+            DbTree dbMapTree = env.getDbTree();
+
+            /* Process every IN, BIN and INDeleteInfo in the mapping tree. */
+            while (reader.readNextEntry()) {
+                DatabaseId dbId = reader.getDatabaseId();
+                if (dbId.equals(DbTree.ID_DB_ID)) {
+                    DatabaseImpl db = dbMapTree.getDb(dbId);
+                    try {
+                        replayOneIN(reader, db, false, recorder);
+                        info.numMapINs++;
+                    } finally {
+                        dbMapTree.releaseDb(db);
+                    }
+                }
+            }
+
+            /*
+             * Update node id, database id, and txn id sequences. Use either
+             * the maximum of the ids seen by the reader vs. the ids stored in
+             * the checkpoint.
+             */
+            info.useMinReplicatedNodeId = reader.getMinReplicatedNodeId();
+            info.useMaxNodeId = reader.getMaxNodeId();
+
+            info.useMinReplicatedDbId = reader.getMinReplicatedDbId();
+            info.useMaxDbId = reader.getMaxDbId();
+
+            info.useMinReplicatedTxnId = reader.getMinReplicatedTxnId();
+            info.useMaxTxnId = reader.getMaxTxnId();
+
+            if (info.checkpointEnd != null) {
+                CheckpointEnd ckptEnd = info.checkpointEnd;
+
+                if (info.useMinReplicatedNodeId >
+                    ckptEnd.getLastReplicatedNodeId()) {
+                    info.useMinReplicatedNodeId =
+                        ckptEnd.getLastReplicatedNodeId();
+                }
+                if (info.useMaxNodeId < ckptEnd.getLastLocalNodeId()) {
+                    info.useMaxNodeId = ckptEnd.getLastLocalNodeId();
+                }
+
+                if (info.useMinReplicatedDbId >
+                    ckptEnd.getLastReplicatedDbId()) {
+                    info.useMinReplicatedDbId =
+                        ckptEnd.getLastReplicatedDbId();
+                }
+                if (info.useMaxDbId < ckptEnd.getLastLocalDbId()) {
+                    info.useMaxDbId = ckptEnd.getLastLocalDbId();
+                }
+
+                if (info.useMinReplicatedTxnId >
+                    ckptEnd.getLastReplicatedTxnId()) {
+                    info.useMinReplicatedTxnId =
+                        ckptEnd.getLastReplicatedTxnId();
+                }
+                if (info.useMaxTxnId < ckptEnd.getLastLocalTxnId()) {
+                    info.useMaxTxnId = ckptEnd.getLastLocalTxnId();
+                }
+            }
+
+            env.getNodeSequence().
+                setLastNodeId(info.useMinReplicatedNodeId, info.useMaxNodeId);
+            env.getDbTree().setLastDbId(info.useMinReplicatedDbId,
+                                        info.useMaxDbId);
+            env.getTxnManager().setLastTxnId(info.useMinReplicatedTxnId,
+                                             info.useMaxTxnId);
+
+            info.nRepeatIteratorReads += reader.getNRepeatIteratorReads();
+
+            info.fileMappers = reader.getFileMappers();
+        } catch (Exception e) {
+            traceAndThrowException(reader.getLastLsn(), "readMapIns", e);
+        }
+    }
+
+    /**
+     * Read INs and process.
+     */
+    private int readINs(long rollForwardLsn,
+                        boolean mapDbOnly,
+                        Set<LogEntryType> targetLogEntryTypes,
+                        boolean requireExactMatch,
+                        LevelRecorder recorder)
+        throws IOException, DatabaseException {
+
+        /* Don't need to track NodeIds. */
+        INFileReader reader =
+            new INFileReader(env,
+                             readBufferSize,
+                             rollForwardLsn,                 // startlsn
+                             info.nextAvailableLsn,          // finish
+                             false,
+                             mapDbOnly,
+                             info.partialCheckpointStartLsn,
+                             info.checkpointEndLsn,
+                             tracker);
+
+        Iterator<LogEntryType> iter = targetLogEntryTypes.iterator();
+        while (iter.hasNext()) {
+            reader.addTargetType(iter.next());
+        }
+
+        int numINsSeen = 0;
+        try {
+
+            /*
+             * Read all non-provisional INs, and process if they don't belong
+             * to the mapping tree.
+             */
+            DbTree dbMapTree = env.getDbTree();
+            while (reader.readNextEntry()) {
+                DatabaseId dbId = reader.getDatabaseId();
+                boolean isMapDb = dbId.equals(DbTree.ID_DB_ID);
+                boolean isTarget = false;
+
+                if (mapDbOnly && isMapDb) {
+                    isTarget = true;
+                } else if (!mapDbOnly && !isMapDb) {
+                    isTarget = true;
+                }
+                if (isTarget) {
+                    DatabaseImpl db = dbMapTree.getDb(dbId);
+                    try {
+                        if (db == null) {
+                            // This db has been deleted, ignore the entry.
+                        } else {
+                            replayOneIN(reader, db, requireExactMatch,
+                                        recorder);
+                            numINsSeen++;
+
+                            /*
+                             * Add any db that we encounter IN's for because
+                             * they'll be part of the in-memory tree and
+                             * therefore should be included in the INList
+                             * build.
+                             */
+                            inListBuildDbIds.add(dbId);
+                        }
+                    } finally {
+                        dbMapTree.releaseDb(db);
+                    }
+                }
+            }
+
+            info.nRepeatIteratorReads += reader.getNRepeatIteratorReads();
+            return numINsSeen;
+        } catch (Exception e) {
+            traceAndThrowException(reader.getLastLsn(), "readNonMapIns", e);
+            return 0;
+        }
+    }
+
+    /**
+     * Read INs and process.
+     */
+    private void repeatReadINs(long rollForwardLsn,
+                               Set<LogEntryType> targetLogEntryTypes,
+                               Set<DatabaseId> targetDbs)
+        throws IOException, DatabaseException {
+
+        // don't need to track NodeIds
+        INFileReader reader =
+            new INFileReader(env,
+                             readBufferSize,
+                             rollForwardLsn,                 // startlsn
+                             info.nextAvailableLsn,          // finish
+                             false,
+                             false,                          // mapDbOnly
+                             info.partialCheckpointStartLsn,
+                             info.checkpointEndLsn,
+                             tracker);
+
+        Iterator<LogEntryType> iter = targetLogEntryTypes.iterator();
+        while (iter.hasNext()) {
+            reader.addTargetType(iter.next());
+        }
+
+        try {
+
+            /* Read all non-provisional INs that are in the repeat set. */
+            DbTree dbMapTree = env.getDbTree();
+            while (reader.readNextEntry()) {
+                DatabaseId dbId = reader.getDatabaseId();
+                if (targetDbs.contains(dbId)) {
+                    DatabaseImpl db = dbMapTree.getDb(dbId);
+                    try {
+                        if (db == null) {
+                            // This db has been deleted, ignore the entry.
+                        } else {
+                            replayOneIN(reader,
+                                        db,
+                                        true,  // requireExactMatch,
+                                        null); // level recorder
+                        }
+                    } finally {
+                        dbMapTree.releaseDb(db);
+                    }
+                }
+            }
+
+            info.nRepeatIteratorReads += reader.getNRepeatIteratorReads();
+        } catch (Exception e) {
+            traceAndThrowException(reader.getLastLsn(), "readNonMapIns", e);
+        }
+    }
+
+    /**
+     * Get an IN from the reader, set its database, and fit into tree.
+     */
+    private void replayOneIN(INFileReader reader,
+                             DatabaseImpl db,
+                             boolean requireExactMatch,
+                             LevelRecorder recorder)
+        throws DatabaseException {
+
+        if (reader.isDeleteInfo()) {
+            /* Last entry is a delete, replay it. */
+            replayINDelete(db,
+                           reader.getDeletedNodeId(),
+                           false,
+                           reader.getDeletedIdKey(),
+                           null,
+                           reader.getLastLsn());
+        } else if (reader.isDupDeleteInfo()) {
+            /* Last entry is a dup delete, replay it. */
+            replayINDelete(db,
+                           reader.getDupDeletedNodeId(),
+                           true,
+                           reader.getDupDeletedMainKey(),
+                           reader.getDupDeletedDupKey(),
+                           reader.getLastLsn());
+        } else {
+
+            /*
+             * Last entry is a node, replay it. Now, we should really call
+             * IN.postFetchInit, but we want to do something different from the
+             * faulting-in-a-node path, because we don't want to put the IN on
+             * the in memory list, and we don't want to search the db map tree,
+             * so we have a IN.postRecoveryInit.  Note also that we have to
+             * pass the LSN of the current log entry and also the LSN of the IN
+             * in question. The only time these differ is when the log entry is
+             * a BINDelta -- then the IN's LSN is the last full version LSN,
+             * and the log LSN is the current log entry.
+             */
+            IN in = reader.getIN();
+            long inLsn = reader.getLsnOfIN();
+            in.postRecoveryInit(db, inLsn);
+            in.latch();
+
+            /*
+             * Track the levels, in case we need an extra splits vs ckpt
+             * reconcilliation.
+             */
+            if (recorder != null) {
+                recorder.record(db.getId(), in.getLevel());
+            }
+            replaceOrInsert(db, in, reader.getLastLsn(), inLsn,
+                            requireExactMatch);
+        }
+    }
+
+    /**
+     * Undo all aborted LNs. To do so, walk the log backwards, keeping a
+     * collection of committed txns. If we see a log entry that doesn't have a
+     * committed txn, undo it.
+     */
+    private void undoLNs(RecoveryInfo info, Set<LogEntryType> lnTypes)
+        throws IOException, DatabaseException {
+
+        long firstActiveLsn = info.firstActiveLsn;
+        long lastUsedLsn = info.lastUsedLsn;
+        long endOfFileLsn = info.nextAvailableLsn;
+        /* Set up a reader to pick up target log entries from the log. */
+        LNFileReader reader =
+            new LNFileReader(env, readBufferSize, lastUsedLsn,
+                             false, endOfFileLsn, firstActiveLsn, null,
+                             info.checkpointEndLsn);
+
+        Iterator<LogEntryType> iter = lnTypes.iterator();
+        while (iter.hasNext()) {
+            LogEntryType lnType = iter.next();
+            reader.addTargetType(lnType);
+        }
+
+        Map<TxnNodeId,Long> countedFileSummaries = 
+            new HashMap<TxnNodeId,Long>(); // TxnNodeId -> file number
+        Set<TxnNodeId> countedAbortLsnNodes = new HashSet<TxnNodeId>();
+
+        DbTree dbMapTree = env.getDbTree();
+        TreeLocation location = new TreeLocation();
+        try {
+
+            /*
+             * Iterate over the target LNs and commit records, constructing
+             * tree.
+             */
+            while (reader.readNextEntry()) {
+                if (reader.isLN()) {
+
+                    /* Get the txnId from the log entry. */
+                    Long txnId = reader.getTxnId();
+
+                    /*
+                     * If this node is not in a committed or prepared txn,
+                     * examine it to see if it should be undone.
+                     */
+                    if (txnId != null &&
+			!committedTxnIds.containsKey(txnId) &&
+			preparedTxns.get(txnId) == null) {
+
+			/*
+			 * Invoke the evictor to reduce memory consumption.
+			 */
+			env.invokeEvictor();
+
+			LN ln = reader.getLN();
+			long logLsn = reader.getLastLsn();
+			long abortLsn = reader.getAbortLsn();
+			boolean abortKnownDeleted =
+			    reader.getAbortKnownDeleted();
+			DatabaseId dbId = reader.getDatabaseId();
+			DatabaseImpl db = dbMapTree.getDb(dbId);
+                        try {
+                            /* Database may be null if it's been deleted. */
+                            if (db != null) {
+                                ln.postFetchInit(db, logLsn);
+				undo(detailedTraceLevel, db, location, ln,
+				     reader.getKey(), reader.getDupTreeKey(),
+				     logLsn, abortLsn, abortKnownDeleted,
+				     info, true);
+
+                                /* Undo utilization info. */
+                                TxnNodeId txnNodeId =
+                                    new TxnNodeId(reader.getNodeId(),
+                                                  txnId.longValue());
+                                undoUtilizationInfo(ln, db, logLsn, abortLsn,
+                                                    abortKnownDeleted,
+                                                    reader.getLastEntrySize(),
+                                                    txnNodeId,
+                                                    countedFileSummaries,
+                                                    countedAbortLsnNodes);
+
+                                /*
+                                 * Add any db that we encounter LN's for
+                                 * because they'll be part of the in-memory
+                                 * tree and therefore should be included in the
+                                 * INList build.
+                                 */
+                                inListBuildDbIds.add(dbId);
+
+                                /*
+                                 * For temporary DBs that are encountered as
+                                 * MapLNs, add them to the set of databases to
+                                 * be removed.
+                                 */
+                                MapLN mapLN = reader.getMapLN();
+                                if (mapLN != null &&
+                                    mapLN.getDatabase().isTemporary()) {
+                                    tempDbIds.add(mapLN.getDatabase().getId());
+                                }
+                            }
+                        } finally {
+                            dbMapTree.releaseDb(db);
+                        }
+		    }
+                } else if (reader.isPrepare()) {
+
+                    /*
+                     * The entry just read is a prepare record.  There should
+                     * be no lock conflicts during recovery, but just in case
+                     * there are, we set the locktimeout to 0.
+                     */
+                    long prepareId = reader.getTxnPrepareId();
+                    Long prepareIdL = Long.valueOf(prepareId);
+                    if (!committedTxnIds.containsKey(prepareIdL) &&
+                        !abortedTxnIds.contains(prepareIdL)) {
+                        TransactionConfig txnConf = new TransactionConfig();
+                        Txn preparedTxn = PreparedTxn.createPreparedTxn
+			    (env, txnConf, prepareId);
+                        preparedTxn.setLockTimeout(0);
+                        preparedTxns.put(prepareIdL, preparedTxn);
+			preparedTxn.setPrepared(true);
+                        env.getTxnManager().registerXATxn
+                            (reader.getTxnPrepareXid(), preparedTxn, true);
+                        Tracer.trace(Level.INFO, env,
+                                     "Found unfinished prepare record: id: " +
+                                     reader.getTxnPrepareId() +
+                                     " Xid: " + reader.getTxnPrepareXid());
+                    }
+                } else if (reader.isAbort()) {
+                    /* The entry just read is an abort record. */
+                    abortedTxnIds.add(Long.valueOf(reader.getTxnAbortId()));
+                } else {
+                    /* The entry just read is a commit record. */
+                    committedTxnIds.put(Long.valueOf(reader.getTxnCommitId()),
+                                        Long.valueOf(reader.getLastLsn()));
+                }
+            }
+            info.nRepeatIteratorReads += reader.getNRepeatIteratorReads();
+        } catch (RuntimeException e) {
+            traceAndThrowException(reader.getLastLsn(), "undoLNs", e);
+        }
+    }
+
+    /**
+     * Apply all committed LNs.
+     * @param rollForwardLsn start redoing from this point
+     * @param lnType1 targetted LN
+     * @param lnType2 targetted LN
+     */
+    private void redoLNs(RecoveryInfo info, Set<LogEntryType> lnTypes)
+        throws IOException, DatabaseException {
+
+        long endOfFileLsn = info.nextAvailableLsn;
+        long rollForwardLsn = info.checkpointStartLsn;
+	long firstActiveLsn = info.firstActiveLsn;
+
+        /* 
+	 * Set up a reader to pick up target log entries from the log.  For
+	 * most LNs, we should only redo LNs starting at the checkpoint start
+	 * LSN.  However, LNs that are prepared, but not committed, (i.e. those
+	 * LNs that belong to 2PC txns that have been prepared, but still not
+	 * committed), still need to be processed and they can live in the log
+	 * between the firstActive LSN and the checkpointStart LSN.  So we
+	 * start the LNFileReader at the First Active LSN.
+	 */
+        LNFileReader reader =
+            new LNFileReader(env, readBufferSize, firstActiveLsn,
+                             true, DbLsn.NULL_LSN, endOfFileLsn, null,
+                             info.checkpointEndLsn);
+
+        Iterator<LogEntryType> iter = lnTypes.iterator();
+        while (iter.hasNext()) {
+            LogEntryType lnType = iter.next();
+            reader.addTargetType(lnType);
+        }
+
+        Set<TxnNodeId> countedAbortLsnNodes = new HashSet<TxnNodeId>();
+
+        DbTree dbMapTree = env.getDbTree();
+        TreeLocation location = new TreeLocation();
+        try {
+
+            /*
+             * Iterate over the target LNs and construct in- memory tree.
+             */
+            while (reader.readNextEntry()) {
+                long lastLsn = reader.getLastLsn();
+
+                /*
+                 * preparedOnlyLNs indicates that we're processing LSNs between
+                 * the First Active LSN and the Checkpoint Start LSN.  In this
+                 * range, only LNs which are prepared, but not committed,
+                 * should be processed. If there is no checkpoint, the
+                 * beginning of the log is really the First Active LSN, and all
+                 * prepared LNs should be processed.
+                 */
+                boolean preparedOnlyLNs =
+                    (rollForwardLsn == DbLsn.NULL_LSN) ? 
+                    true : 
+                    (DbLsn.compareTo(lastLsn, rollForwardLsn) < 0);
+
+                if (reader.isLN()) {
+
+                    /* Get the txnId from the log entry. */
+                    Long txnId = reader.getTxnId();
+
+                    /*
+                     * If this LN is in a committed txn, or if it's a
+                     * non-transactional LN between the checkpoint start and
+                     * the end of the log, then redo it.  If it's a prepared LN
+                     * in the log between the first active LSN and the end of
+                     * the log, resurrect it.
+                     */
+                    boolean processThisLN = false;
+                    boolean lnIsCommitted = false;
+                    boolean lnIsPrepared = false;
+                    long commitLsn = DbLsn.NULL_LSN;
+                    Txn preparedTxn = null;
+                    if (txnId == null &&
+                        !preparedOnlyLNs) {
+                        processThisLN = true;
+                    } else {
+                        lnIsCommitted = committedTxnIds.containsKey(txnId);
+                        if (lnIsCommitted) {
+                            commitLsn = committedTxnIds.get(txnId).
+                                longValue();
+                        } else {
+                            preparedTxn = preparedTxns.get(txnId);
+                            lnIsPrepared = preparedTxn != null;
+                        }
+                        if ((lnIsCommitted && !preparedOnlyLNs) ||
+                            lnIsPrepared) {
+                            processThisLN = true;
+                        }
+                    }
+                    if (processThisLN) {
+
+                        /* Invoke the evictor to reduce memory consumption. */
+                        env.invokeEvictor();
+
+                        LN ln = reader.getLN();
+                        DatabaseId dbId = reader.getDatabaseId();
+                        DatabaseImpl db = dbMapTree.getDb(dbId);
+                        try {
+                            long logLsn = reader.getLastLsn();
+                            long treeLsn = DbLsn.NULL_LSN;
+
+                            /* Database may be null if it's been deleted. */
+                            if (db != null) {
+                                ln.postFetchInit(db, logLsn);
+
+                                if (preparedTxn != null) {
+                                    preparedTxn.addLogInfo(logLsn);
+
+                                    /*
+                                     * We're reconstructing a prepared, but not
+                                     * finished, transaction.  We know that
+                                     * there was a write lock on this LN since
+                                     * it exists in the log under this txnId.
+                                     */
+                                    LockResult lockResult = preparedTxn.lock
+                                        (ln.getNodeId(), LockType.WRITE,
+                                         false /*noWait*/, db);
+                                    lockResult.setAbortLsn
+                                        (reader.getAbortLsn(),
+                                         reader.getAbortKnownDeleted()); 
+                                }
+
+                                treeLsn = redo(db,
+                                               location,
+                                               ln,
+                                               reader.getKey(),
+                                               reader.getDupTreeKey(),
+                                               logLsn,
+                                               info);
+
+                                /*
+                                 * Add any db that we encounter LN's for
+                                 * because they'll be part of the in-memory
+                                 * tree and therefore should be included in the
+                                 * INList build.
+                                 */
+                                inListBuildDbIds.add(dbId);
+
+                                /*
+                                 * For temporary DBs that are encountered as
+                                 * MapLNs, add them to the set of databases to
+                                 * be removed.
+                                 */
+                                MapLN mapLN = reader.getMapLN();
+                                if (mapLN != null &&
+                                    mapLN.getDatabase().isTemporary()) {
+                                    tempDbIds.add(mapLN.getDatabase().getId());
+                                }
+
+                                /*
+                                 * For deleted MapLNs (truncated or removed
+                                 * DBs), redo utilization counting by counting
+                                 * the entire database as obsolete.
+                                 */
+                                if (mapLN != null && mapLN.isDeleted()) {
+                                    mapLN.getDatabase().countObsoleteDb
+                                        (tracker, logLsn);
+                                }
+
+                                /* Redo utilization info. */
+                                TxnNodeId txnNodeId = null;
+                                if (txnId != null) {
+                                    txnNodeId = new TxnNodeId
+                                        (reader.getNodeId(),
+                                         txnId.longValue());
+                                }
+                                redoUtilizationInfo
+                                    (logLsn, treeLsn, commitLsn,
+                                     reader.getAbortLsn(),
+                                     reader.getAbortKnownDeleted(),
+                                     reader.getLastEntrySize(),
+                                     reader.getKey(),
+                                     ln, db, txnNodeId, countedAbortLsnNodes);
+                            }
+                        } finally {
+                            dbMapTree.releaseDb(db);
+                        }
+                    }
+                }
+            }
+            info.nRepeatIteratorReads += reader.getNRepeatIteratorReads();
+        } catch (Exception e) {
+            traceAndThrowException(reader.getLastLsn(), "redoLns", e);
+        }
+    }
+
+    /**
+     * Build the in memory inList with INs that have been made resident by the
+     * recovery process.
+     */
+    private void buildINList()
+        throws DatabaseException {
+
+        env.getInMemoryINs().enable();           // enable INList
+        env.getDbTree().rebuildINListMapDb();    // scan map db
+
+        /* For all the dbs that we read in recovery, scan for resident INs. */
+        Iterator<DatabaseId> iter = inListBuildDbIds.iterator();
+        while (iter.hasNext()) {
+            DatabaseId dbId = iter.next();
+            /* We already did the map tree, don't do it again. */
+            if (!dbId.equals(DbTree.ID_DB_ID)) {
+                DatabaseImpl db = env.getDbTree().getDb(dbId);
+                try {
+                    if (db != null) {
+                        /* Temp DBs will be removed, skip build. */
+                        if (!db.isTemporary()) {
+                            db.getTree().rebuildINList();
+                        }
+                    }
+                } finally {
+                    env.getDbTree().releaseDb(db);
+                }
+            }
+        }
+    }
+
+    /* Struct to hold a nodeId/txnId tuple */
+    private static class TxnNodeId {
+        long nodeId;
+        long txnId;
+
+        TxnNodeId(long nodeId, long txnId) {
+            this.nodeId = nodeId;
+            this.txnId = txnId;
+        }
+
+        /**
+         * Compare two TxnNodeId objects
+         */
+        @Override
+        public boolean equals(Object obj) {
+            if (this == obj) {
+                return true;
+            }
+
+            if (!(obj instanceof TxnNodeId)) {
+                return false;
+            }
+
+            return ((((TxnNodeId) obj).txnId == txnId) &&
+                    (((TxnNodeId) obj).nodeId == nodeId));
+        }
+
+        @Override
+        public int hashCode() {
+            return (int) (txnId + nodeId);
+        }
+
+        public String toString() {
+            return "txnId=" + txnId + "/nodeId=" + nodeId;
+        }
+    }
+
+    /*
+     * Tree manipulation methods.
+     */
+
+    /**
+     * Recover an internal node. If inFromLog is:
+     *       - not found, insert it in the appropriate location.
+     *       - if found and there is a physical match (LSNs are the same)
+     *         do nothing.
+     *       - if found and there is a logical match (LSNs are different,
+     *         another version of this IN is in place, replace the found node
+     *         with the node read from the log only if the log version's
+     *         LSN is greater.
+     * InFromLog should be latched upon entering this method and it will
+     * not be latched upon exiting.
+     *
+     * @param inFromLog - the new node to put in the tree.  The identifier key
+     * and node id are used to find the existing version of the node.
+     * @param logLsn - the location of log entry in in the log.
+     * @param inLsn LSN of this in -- may not be the same as the log LSN if
+     * the current entry is a BINDelta
+     * @param requireExactMatch - true if we won't place this node in the tree
+     * unless we find exactly that parent. Used for BINDeltas, where we want
+     * to only apply the BINDelta to that exact node.
+     */
+    private void replaceOrInsert(DatabaseImpl db,
+                                 IN inFromLog,
+                                 long logLsn,
+                                 long inLsn,
+                                 boolean requireExactMatch)
+        throws DatabaseException {
+
+        List<TrackingInfo> trackingList = null;
+        boolean inFromLogLatchReleased = false;
+        try {
+
+            /*
+             * We must know a priori if this node is the root. We can't infer
+             * that status from a search of the existing tree, because
+             * splitting the root is done by putting a node above the old root.
+             * A search downward would incorrectly place the new root below the
+             * existing tree.
+             */
+            if (inFromLog.isRoot()) {
+                if (inFromLog.containsDuplicates()) {
+                    replaceOrInsertDuplicateRoot(db, (DIN) inFromLog, logLsn);
+                } else {
+                    replaceOrInsertRoot(db, inFromLog, logLsn);
+		    inFromLogLatchReleased = true;
+                }
+            } else {
+
+                /*
+                 * Look for a parent. The call to getParentNode unlatches node.
+                 * Then place inFromLog in the tree if appropriate.
+                 */
+                trackingList = new ArrayList<TrackingInfo>();
+                replaceOrInsertChild(db, inFromLog, logLsn, inLsn,
+                                     trackingList, requireExactMatch);
+		inFromLogLatchReleased = true;
+            }
+        } catch (Exception e) {
+            String trace = printTrackList(trackingList);
+            Tracer.trace(db.getDbEnvironment(), "RecoveryManager",
+                         "replaceOrInsert", " lsnFromLog:" +
+                         DbLsn.getNoFormatString(logLsn) + " " + trace,
+                         e);
+            throw new DatabaseException
+		("lsnFromLog=" + DbLsn.getNoFormatString(logLsn), e);
+        } finally {
+	    if (!inFromLogLatchReleased) {
+		inFromLog.releaseLatch();
+	    }
+
+            assert (LatchSupport.countLatchesHeld() == 0):
+                LatchSupport.latchesHeldToString() +
+                "LSN = " + DbLsn.toString(logLsn) +
+                " inFromLog = " + inFromLog.getNodeId();
+        }
+    }
+
+    /**
+     * Dump a tracking list into a string.
+     */
+    private String printTrackList(List<TrackingInfo> trackingList) {
+        if (trackingList != null) {
+            StringBuffer sb = new StringBuffer();
+            Iterator<TrackingInfo> iter = trackingList.iterator();
+            sb.append("Trace list:");
+            sb.append('\n');
+            while (iter.hasNext()) {
+                sb.append(iter.next());
+                sb.append('\n');
+            }
+            return sb.toString();
+        } else {
+            return null;
+        }
+    }
+
+    /**
+     * Replay an IN delete. Remove an entry from an IN to reflect a reverse
+     * split.
+     */
+    private void replayINDelete(DatabaseImpl db,
+                                long nodeId,
+                                boolean containsDuplicates,
+                                byte[] mainKey,
+                                byte[] dupKey,
+                                long logLsn)
+        throws DatabaseException {
+
+        boolean found = false;
+        boolean deleted = false;
+        Tree tree = db.getTree();
+        SearchResult result = new SearchResult();
+
+        try {
+            /* Search for the parent of this target node. */
+            result = db.getTree().getParentINForChildIN
+                (nodeId,
+                 containsDuplicates,
+                 false, // do not stop at dup tree root
+                 mainKey,
+                 dupKey,
+                 false, // requireExactMatch
+                 CacheMode.UNCHANGED,
+                 -1,    // targetLevel
+                 null,  // trackingList
+                 true); // doFetch
+
+            if (result.parent == null) {
+                /* It's null -- we actually deleted the root. */
+                tree.withRootLatchedExclusive(new RootDeleter(tree));
+
+                /*
+                 * Dirty the database to call DbTree.modifyDbRoot later during
+                 * the checkpoint.  We should not log a DatabaseImpl until its
+                 * utilization info is correct.
+                 */
+                db.setDirtyUtilization();
+                traceRootDeletion(Level.FINE, db);
+                deleted = true;
+            } else if (result.exactParentFound) {
+                /* Exact match was found -- delete the parent entry. */
+                found = true;
+                deleted = result.parent.deleteEntry(result.index, false);
+            }
+        } finally {
+            if (result.parent != null) {
+                result.parent.releaseLatch();
+            }
+
+            traceINDeleteReplay
+                (nodeId, logLsn, found, deleted, result.index,
+                 containsDuplicates);
+        }
+    }
+
+    /*
+     * RootDeleter lets us clear the rootIN within the root latch.
+     */
+    private static class RootDeleter implements WithRootLatched {
+        Tree tree;
+        RootDeleter(Tree tree) {
+            this.tree = tree;
+        }
+
+        /**
+         * @return true if the in-memory root was replaced.
+         */
+        public IN doWork(ChildReference root)
+            throws DatabaseException {
+
+            tree.setRoot(null, false);
+            return null;
+        }
+    }
+
+    /**
+     * If the root of this tree is null, use this IN from the log as a root.
+     * Note that we should really also check the LSN of the mapLN, because
+     * perhaps the root is null because it's been deleted. However, the replay
+     * of all the LNs will end up adjusting the tree correctly.
+     *
+     * If there is a root, check if this IN is a different LSN and if so,
+     * replace it.
+     */
+    private void replaceOrInsertRoot(DatabaseImpl db, IN inFromLog, long lsn)
+        throws DatabaseException {
+
+        boolean success = true;
+        Tree tree = db.getTree();
+        RootUpdater rootUpdater = new RootUpdater(tree, inFromLog, lsn);
+        try {
+            /* Run the root updater while the root latch is held. */
+            tree.withRootLatchedExclusive(rootUpdater);
+
+            /* Update the mapLN if necessary */
+            if (rootUpdater.updateDone()) {
+
+                /*
+                 * Dirty the database to call DbTree.modifyDbRoot later during
+                 * the checkpoint.  We should not log a DatabaseImpl until its
+                 * utilization info is correct.
+                 */
+                db.setDirtyUtilization();
+            }
+        } catch (Exception e) {
+            success = false;
+            throw new DatabaseException("lsnFromLog=" +
+                                        DbLsn.getNoFormatString(lsn),
+                                        e);
+        } finally {
+	    if (rootUpdater.getInFromLogIsLatched()) {
+		inFromLog.releaseLatch();
+	    }
+
+            trace(detailedTraceLevel,
+                  db, TRACE_ROOT_REPLACE, success, inFromLog,
+                  lsn,
+                  null,
+                  true,
+                  rootUpdater.getReplaced(),
+                  rootUpdater.getInserted(),
+                  rootUpdater.getOriginalLsn(),
+                  DbLsn.NULL_LSN,
+                  -1);
+        }
+    }
+
+    /*
+     * RootUpdater lets us replace the tree root within the tree root latch.
+     */
+    private static class RootUpdater implements WithRootLatched {
+        private Tree tree;
+        private IN inFromLog;
+        private long lsn = DbLsn.NULL_LSN;
+        private boolean inserted = false;
+        private boolean replaced = false;
+        private long originalLsn = DbLsn.NULL_LSN;
+	private boolean inFromLogIsLatched = true;
+
+        RootUpdater(Tree tree, IN inFromLog, long lsn) {
+            this.tree = tree;
+            this.inFromLog = inFromLog;
+            this.lsn = lsn;
+        }
+
+	boolean getInFromLogIsLatched() {
+	    return inFromLogIsLatched;
+	}
+
+        public IN doWork(ChildReference root)
+            throws DatabaseException {
+
+            ChildReference newRoot =
+                tree.makeRootChildReference(inFromLog, new byte[0], lsn);
+            inFromLog.releaseLatch();
+	    inFromLogIsLatched = false;
+
+            if (root == null) {
+                tree.setRoot(newRoot, false);
+                inserted = true;
+            } else {
+                originalLsn = root.getLsn(); // for debugLog
+
+                /*
+                 * The current in-memory root IN is older than the root IN from
+                 * the log.
+                 */
+                if (DbLsn.compareTo(originalLsn, lsn) < 0) {
+                    tree.setRoot(newRoot, false);
+                    replaced = true;
+                }
+            }
+            return null;
+        }
+
+        boolean updateDone() {
+            return inserted || replaced;
+        }
+
+        boolean getInserted() {
+            return inserted;
+        }
+
+        boolean getReplaced() {
+            return replaced;
+        }
+
+        long getOriginalLsn() {
+            return originalLsn;
+        }
+    }
+
+    /**
+     * Recover this root of a duplicate tree.
+     */
+    private void replaceOrInsertDuplicateRoot(DatabaseImpl db,
+                                              DIN inFromLog,
+                                              long lsn)
+        throws DatabaseException {
+
+        boolean found = true;
+        boolean inserted = false;
+        boolean replaced = false;
+        long originalLsn = DbLsn.NULL_LSN;
+
+        byte[] mainTreeKey = inFromLog.getMainTreeKey();
+        IN parent = null;
+        int index = -1;
+        boolean success = false;
+        try {
+
+            /*
+             * Allow splits since the parent BIN of this DIN may be full.
+             * [#13435]
+             */
+            parent = db.getTree().searchSplitsAllowed
+                (mainTreeKey, -1, CacheMode.DEFAULT);
+            assert parent instanceof BIN;
+
+            ChildReference newRef =
+                new ChildReference(inFromLog, mainTreeKey, lsn);
+            index = parent.insertEntry1(newRef);
+            if ((index >= 0 &&
+                 (index & IN.EXACT_MATCH) != 0)) {
+
+                index &= ~IN.EXACT_MATCH;
+
+                /*
+                 * Replace whatever's at this entry, whether it's an LN or an
+                 * earlier root DIN as long as one of the following is true:
+                 *
+                 * - the entry is known deleted
+                 * - or the LSN is earlier than the one we've just read from
+                 *     the log.
+                 */
+                if (parent.isEntryKnownDeleted(index)) {
+                    /* Be sure to clear the known deleted bit. */
+                    parent.setEntry(index, inFromLog, mainTreeKey,
+                                    lsn, (byte) 0);
+                    replaced = true;
+                } else {
+                    originalLsn = parent.getLsn(index);
+                    if (DbLsn.compareTo(originalLsn, lsn) < 0) {
+                        parent.setEntry(index, inFromLog, mainTreeKey, lsn,
+                                        parent.getState(index));
+                        replaced = true;
+                    }
+                }
+            } else {
+                found = false;
+            }
+            success = true;
+        } finally {
+            if (parent != null) {
+                parent.releaseLatch();
+            }
+            trace(detailedTraceLevel,
+                  db,
+                  TRACE_DUP_ROOT_REPLACE, success, inFromLog,
+                  lsn, parent, found,
+                  replaced, inserted, originalLsn, DbLsn.NULL_LSN, index);
+        }
+    }
+
+    private void replaceOrInsertChild(DatabaseImpl db,
+                                      IN inFromLog,
+                                      long logLsn,
+                                      long inLsn,
+                                      List<TrackingInfo> trackingList,
+                                      boolean requireExactMatch)
+        throws DatabaseException {
+
+        boolean inserted = false;
+        boolean replaced = false;
+        long originalLsn = DbLsn.NULL_LSN;
+        boolean success = false;
+        SearchResult result = new SearchResult();
+        try {
+            result = db.getTree().getParentINForChildIN
+                (inFromLog,
+                 requireExactMatch,
+                 CacheMode.UNCHANGED,
+                 -1,    // targetLevel
+                 trackingList);
+
+            /*
+             * Does inFromLog exist in this parent?
+             *
+             * 1. No possible parent -- skip this child. It's represented
+             *    by a parent that's later in the log.
+             * 2. No match, but a possible parent: don't insert, all nodes
+             *    are logged in such a way that they must have a possible
+             *    parent (#13501)
+             * 3. physical match: (LSNs same) this LSN is already in place,
+             *                    do nothing.
+             * 4. logical match: another version of this IN is in place.
+             *                   Replace child with inFromLog if inFromLog's
+             *                   LSN is greater.
+             */
+            if (result.parent == null) {
+                return;  // case 1, no possible parent.
+            }
+
+            /* Get the key that will locate inFromLog in this parent. */
+            if (result.index >= 0) {
+                if (result.parent.getLsn(result.index) == logLsn) {
+                    /* case 3: do nothing */
+
+                } else {
+
+                    /*
+                     * Not an exact physical match, now need to look at child.
+                     */
+                    if (result.exactParentFound) {
+                        originalLsn = result.parent.getLsn(result.index);
+
+                        /* case 4: It's a logical match, replace */
+                        if (DbLsn.compareTo(originalLsn, logLsn) < 0) {
+
+                            /*
+                             * It's a logical match, replace. Put the child
+                             * node reference into the parent, as well as the
+                             * true LSN of the IN. (If this entry is a
+                             * BINDelta, the node has been updated with all the
+                             * deltas, but the LSN we want to put in should be
+                             * the last full LSN, not the LSN of the BINDelta)
+                             */
+                            result.parent.updateNode(result.index,
+                                                     inFromLog,
+                                                     inLsn,
+                                                     null /*lnSlotKey*/);
+                            replaced = true;
+                        }
+                    }
+                    /* else case 2 */
+                }
+            }
+            /* else case 2 */
+
+            success = true;
+        } finally {
+            if (result.parent != null) {
+                result.parent.releaseLatch();
+            }
+
+            trace(detailedTraceLevel, db,
+                  TRACE_IN_REPLACE, success, inFromLog,
+                  logLsn, result.parent,
+                  result.exactParentFound, replaced, inserted,
+                  originalLsn, DbLsn.NULL_LSN, result.index);
+        }
+    }
+
+    /**
+     * Redo a committed LN for recovery.
+     *
+     * <pre>
+     * log LN found  | logLSN > LSN | LN is deleted | action
+     *   in tree     | in tree      |               |
+     * --------------+--------------+---------------+------------------------
+     *     Y         |    N         |    n/a        | no action
+     * --------------+--------------+---------------+------------------------
+     *     Y         |    Y         |     N         | replace w/log LSN
+     * --------------+--------------+---------------+------------------------
+     *     Y         |    Y         |     Y         | replace w/log LSN, put
+     *               |              |               | on compressor queue
+     * --------------+--------------+---------------+------------------------
+     *     N         |    n/a       |     N         | insert into tree
+     * --------------+--------------+---------------+------------------------
+     *     N         |    n/a       |     Y         | no action
+     * --------------+--------------+---------------+------------------------
+     *
+     * </pre>
+     *
+     * @param location holds state about the search in the tree. Passed
+     *  in from the recovery manager to reduce objection creation overhead.
+     * @param lnFromLog - the new node to put in the tree.
+     * @param mainKey is the key that navigates us through the main tree
+     * @param dupTreeKey is the key that navigates us through the duplicate
+     * tree
+     * @param logLsn is the LSN from the just-read log entry
+     * @param info is a recovery stats object.
+     * @return the LSN found in the tree, or NULL_LSN if not found.
+     */
+    private long redo(DatabaseImpl db,
+                      TreeLocation location,
+                      LN lnFromLog,
+                      byte[] mainKey,
+                      byte[] dupKey,
+                      long logLsn,
+                      RecoveryInfo info)
+        throws DatabaseException {
+
+        boolean found = false;
+        boolean replaced = false;
+        boolean inserted = false;
+        boolean success = false;
+        try {
+
+            /*
+             * Find the BIN which is the parent of this LN.
+             */
+            location.reset();
+            found = db.getTree().getParentBINForChildLN
+                (location, mainKey, dupKey, lnFromLog,
+                 true,  // splitsAllowed
+                 false, // findDeletedEntries
+                 true,  // searchDupTree
+                 CacheMode.DEFAULT);
+
+            if (!found && (location.bin == null)) {
+
+                /*
+                 * There is no possible parent for this LN. This tree was
+                 * probably compressed away.
+                 */
+                success = true;
+                return DbLsn.NULL_LSN;
+            }
+
+            /*
+             * Now we're at the parent for this LN, whether BIN, DBIN or DIN
+             */
+            if (lnFromLog.containsDuplicates()) {
+                if (found) {
+
+                    /*
+                     * This is a dupCountLN. It's ok if there's no DIN parent
+                     * for it. [#11307].
+                     */
+                    DIN duplicateRoot = (DIN)
+                        location.bin.fetchTarget(location.index);
+                    if (DbLsn.compareTo(logLsn, location.childLsn) >= 0) {
+                        /* DupCountLN needs replacing. */
+                        duplicateRoot.latch();
+                        duplicateRoot.updateDupCountLNRefAndNullTarget(logLsn);
+                        duplicateRoot.releaseLatch();
+                    }
+                }
+            } else {
+                if (found) {
+
+                    /*
+                     * This LN is in the tree. See if it needs replacing.
+                     */
+                    info.lnFound++;
+
+                    if (DbLsn.compareTo(logLsn, location.childLsn) > 0) {
+                        info.lnReplaced++;
+                        replaced = true;
+
+                        /*
+			 * Be sure to make the target null. We don't want this
+			 * new LN resident, it will make recovery start
+			 * dragging in the whole tree and will consume too much
+			 * memory.
+                         *
+                         * Also, LN must be left null to ensure the key in the
+                         * BIN slot is transactionally correct (keys are
+                         * updated if necessary when the LN is fetched).
+                         * [#15704]
+                         */
+                        location.bin.updateNode(location.index,
+                                                null /*node*/,
+                                                logLsn,
+                                                null /*lnSlotKey*/);
+                    }
+
+                    /*
+                     * If the entry in the tree is deleted, put it on the
+                     * compressor queue.  Set KnownDeleted to prevent fetching
+                     * a cleaned LN.
+                     */
+                    if (DbLsn.compareTo(logLsn, location.childLsn) >= 0 &&
+                        lnFromLog.isDeleted()) {
+                        location.bin.setKnownDeletedLeaveTarget
+                            (location.index);
+                        byte[] deletedKey = location.bin.containsDuplicates() ?
+                            dupKey : mainKey;
+
+                        /*
+                         * In the case of SR 8984, the LN has no data and
+                         * therefore no valid delete key. Don't compress.
+                         */
+                        if (deletedKey != null) {
+                            db.getDbEnvironment().addToCompressorQueue
+                                (location.bin,
+                                 new Key(deletedKey),
+                                 false); // don't wakeup compressor
+                        }
+                    }
+                } else {
+
+                    /*
+                     * This LN is not in the tree. If it's not deleted, insert
+                     * it.
+                     */
+                    info.lnNotFound++;
+                    if (!lnFromLog.isDeleted()) {
+                        info.lnInserted++;
+                        inserted = true;
+                        boolean insertOk =
+                            insertRecovery(db, location, logLsn);
+                        assert insertOk;
+                    }
+                }
+            }
+
+            if (!inserted) {
+                /* 
+                 * We're about to cast away this instantiated LN. It may
+                 * have registered for some portion of the memory budget,
+                 * so free that now. Specifically, this would be true
+                 * for the DbFileSummaryMap in a MapLN.
+                 */
+                lnFromLog.releaseMemoryBudget();
+            }
+
+            success = true;
+            return found ? location.childLsn : DbLsn.NULL_LSN;
+        } finally {
+            if (location.bin != null) {
+                location.bin.releaseLatch();
+            }
+            trace(detailedTraceLevel, db,
+                  TRACE_LN_REDO, success, lnFromLog,
+                  logLsn, location.bin, found,
+                  replaced, inserted,
+                  location.childLsn, DbLsn.NULL_LSN, location.index);
+        }
+    }
+
+    /**
+     * Undo the changes to this node. Here are the rules that govern the action
+     * taken.
+     *
+     * <pre>
+     *
+     * found LN in  | abortLsn is | logLsn ==       | action taken
+     *    tree      | null        | LSN in tree     | by undo
+     * -------------+-------------+----------------------------------------
+     *      Y       |     N       |      Y          | replace w/abort LSN
+     * ------------ +-------------+-----------------+-----------------------
+     *      Y       |     Y       |      Y          | remove from tree
+     * ------------ +-------------+-----------------+-----------------------
+     *      Y       |     N/A     |      N          | no action
+     * ------------ +-------------+-----------------+-----------------------
+     *      N       |     N/A     |    N/A          | no action (*)
+     * (*) If this key is not present in the tree, this record doesn't
+     * reflect the IN state of the tree and this log entry is not applicable.
+     *
+     * </pre>
+     * @param location holds state about the search in the tree. Passed
+     *  in from the recovery manager to reduce objection creation overhead.
+     * @param lnFromLog - the new node to put in the tree.
+     * @param mainKey is the key that navigates us through the main tree
+     * @param dupTreeKey is the key that navigates us through the duplicate
+     *                   tree
+     * @param logLsn is the LSN from the just-read log entry
+     * @param abortLsn gives us the location of the original version of the
+     *                 node
+     * @param info is a recovery stats object.
+     */
+    public static void undo(Level traceLevel,
+                            DatabaseImpl db,
+                            TreeLocation location,
+                            LN lnFromLog,
+                            byte[] mainKey,
+                            byte[] dupKey,
+                            long logLsn,
+                            long abortLsn,
+                            boolean abortKnownDeleted,
+                            RecoveryInfo info,
+                            boolean splitsAllowed)
+        throws DatabaseException {
+
+        boolean found = false;
+        boolean replaced = false;
+        boolean success = false;
+
+        try {
+
+            /*
+             * Find the BIN which is the parent of this LN.
+             */
+            location.reset();
+            found = db.getTree().getParentBINForChildLN
+                (location, mainKey, dupKey, lnFromLog, splitsAllowed,
+                 true,  // findDeletedEntries
+                 false, // searchDupTree
+                 CacheMode.DEFAULT); // updateGeneration
+
+            /*
+             * Now we're at the rightful parent, whether BIN or DBIN.
+             */
+            if (lnFromLog.containsDuplicates()) {
+
+                /*
+                 * This is a dupCountLN. It's ok if there's no DIN parent
+                 * for it. [#11307].
+                 */
+                if (found) {
+                    DIN duplicateRoot = (DIN)
+                        location.bin.fetchTarget(location.index);
+                    duplicateRoot.latch();
+                    try {
+                        if (DbLsn.compareTo(logLsn, location.childLsn) == 0) {
+                            /* DupCountLN needs replacing. */
+                            duplicateRoot.
+                                updateDupCountLNRefAndNullTarget(abortLsn);
+                            replaced = true;
+                        }
+                    } finally {
+                        duplicateRoot.releaseLatch();
+                    }
+                }
+            } else {
+                if (found) {
+                    /* This LN is in the tree. See if it needs replacing. */
+                    if (info != null) {
+                        info.lnFound++;
+                    }
+                    boolean updateEntry =
+                        DbLsn.compareTo(logLsn, location.childLsn) == 0;
+                    if (updateEntry) {
+                        if (abortLsn == DbLsn.NULL_LSN) {
+
+                            /*
+                             * To undo a node that was created by this txn,
+                             * remove it.  If this entry is deleted, put it on
+                             * the compressor queue.  Set KnownDeleted to
+                             * prevent fetching a cleaned LN.
+                             */
+                            location.bin.
+                                setKnownDeletedLeaveTarget(location.index);
+                            byte[] deletedKey =
+                                location.bin.containsDuplicates() ?
+                                dupKey : mainKey;
+                            db.getDbEnvironment().addToCompressorQueue
+                                (location.bin,
+                                 new Key(deletedKey),
+                                 false); // don't wakeup compressor
+
+                        } else {
+
+			    /*
+			     * Apply the log record by updating the in memory
+			     * tree slot to contain the abort LSN and abort
+			     * Known Deleted flag.  The LN is set to null so
+                             * that it will be fetched later by abort LSN.
+                             *
+                             * Also, the LN must be left null to ensure the
+                             * key in the BIN slot is transactionally correct
+                             * (the key is updated if necessary when the LN is
+                             * fetched).  [#15704]
+			     */
+			    if (info != null) {
+				info.lnReplaced++;
+			    }
+			    replaced = true;
+			    location.bin.updateNode(location.index,
+						    null /*node*/,
+						    abortLsn,
+                                                    null /*lnSlotKey*/);
+			    if (abortKnownDeleted) {
+				location.bin.setKnownDeleted(location.index);
+			    } else {
+				location.bin.clearKnownDeleted(location.index);
+			    }
+			}
+
+                        /*
+                         * We must clear the PendingDeleted flag for
+                         * non-deleted entries.  Clear it unconditionally,
+                         * since KnownDeleted will be set above for a deleted
+                         * entry. [#12885]
+                         */
+                        location.bin.clearPendingDeleted(location.index);
+                    }
+
+                } else {
+
+                    /*
+                     * This LN is not in the tree.  Just make a note of it.
+                     */
+                    if (info != null) {
+                        info.lnNotFound++;
+                    }
+                }
+            }
+
+            success = true;
+        } finally {
+
+	    if (location.bin != null) {
+		location.bin.releaseLatch();
+	    }
+
+            trace(traceLevel, db, TRACE_LN_UNDO, success, lnFromLog,
+                  logLsn, location.bin, found, replaced, false,
+                  location.childLsn, abortLsn, location.index);
+        }
+    }
+
+    /**
+     * Inserts a LN into the tree for recovery redo processing.  In this
+     * case, we know we don't have to lock when checking child LNs for deleted
+     * status (there can be no other thread running on this tree) and we don't
+     * have to log the new entry. (it's in the log already)
+     *
+     * @param db
+     * @param location this embodies the parent bin, the index, the key that
+     * represents this entry in the bin.
+     * @param logLsn LSN of this current ln
+     * @param key to use when creating a new ChildReference object.
+     * @return true if LN was inserted, false if it was a duplicate
+     * duplicate or if an attempt was made to insert a duplicate when
+     * allowDuplicates was false.
+     */
+    private static boolean insertRecovery(DatabaseImpl db,
+                                          TreeLocation location,
+                                          long logLsn)
+        throws DatabaseException {
+
+        /*
+         * Make a child reference as a candidate for insertion.  The LN is null
+         * to avoid pulling the entire tree into memory.
+         *
+         * Also, the LN must be left null to ensure the key in the BIN slot is
+         * transactionally correct (keys are updated if necessary when the LN
+         * is fetched).  [#15704]
+         */
+        ChildReference newLNRef =
+            new ChildReference(null, location.lnKey, logLsn);
+
+        BIN parentBIN = location.bin;
+        int entryIndex = parentBIN.insertEntry1(newLNRef);
+
+        if ((entryIndex & IN.INSERT_SUCCESS) == 0) {
+
+            /*
+             * Entry may have been a duplicate. Insertion was not successful.
+             */
+            entryIndex &= ~IN.EXACT_MATCH;
+
+            boolean canOverwrite = false;
+            if (parentBIN.isEntryKnownDeleted(entryIndex)) {
+                canOverwrite = true;
+            } else {
+
+                /*
+                 * Read the LN that's in this slot to check for deleted
+                 * status.  No need to lock, since this is recovery.  If
+                 * fetchTarget returns null, a deleted LN was cleaned.
+                 */
+                LN currentLN = (LN) parentBIN.fetchTarget(entryIndex);
+
+                if (currentLN == null || currentLN.isDeleted()) {
+                    canOverwrite = true;
+                }
+
+                /*
+                 * Evict the target again manually, to reduce memory
+                 * consumption while the evictor is not running.
+                 */
+                parentBIN.updateNode(entryIndex, null /*node*/,
+                                     null /*lnSlotKey*/);
+            }
+
+            if (canOverwrite) {
+
+                /*
+                 * Note that the LN must be left null to ensure the key in the
+                 * BIN slot is transactionally correct (keys are updated if
+                 * necessary when the LN is fetched).  [#15704]
+                 */
+                parentBIN.updateEntry(entryIndex, null, logLsn,
+                                      location.lnKey);
+                parentBIN.clearKnownDeleted(entryIndex);
+                location.index = entryIndex;
+                return true;
+            } else {
+                return false;
+            }
+        }
+        location.index = entryIndex & ~IN.INSERT_SUCCESS;
+        return true;
+    }
+
+    /**
+     * Update utilization info during redo.
+     *
+     * There are cases where we do not count the previous version of an LN as
+     * obsolete when that obsolete LN occurs prior to the recovery interval.
+     * This happens when a later version of the LN is current in the tree
+     * because its parent IN has been flushed non-provisionally after it.  The
+     * old version of the LN is not in the tree so we never encounter it during
+     * recovery and cannot count it as obsolete.  For example:
+     *
+     * 100 LN-A
+     * checkpoint occurred (ckpt begin, flush, ckpt end)
+     * 200 LN-A
+     * 300 BIN parent of 200
+     * 400 IN parent of 300, non-provisional via a sync
+     *
+     * no utilization info is flushed
+     * no checkpoint
+     * crash and recover
+     *
+     * 200 is the current LN-A in the tree.  When we redo 200 we do not count
+     * anything as obsolete because the log and tree LSNs are equal.  100 is
+     * never counted obsolete because it is not in the recovery interval.
+     *
+     * The same thing occurs when a deleted LN is replayed and the old version
+     * is not found in the tree because it was compressed and the IN was
+     * flushed non-provisionally.
+     *
+     * In these cases we may be able to count the abortLsn as obsolete but that
+     * would not work for non-transactional entries.
+     */
+    private void redoUtilizationInfo(long logLsn,
+                                     long treeLsn,
+                                     long commitLsn,
+                                     long abortLsn,
+                                     boolean abortKnownDeleted,
+                                     int logEntrySize,
+                                     byte[] key,
+                                     LN ln,
+                                     DatabaseImpl db,
+                                     TxnNodeId txnNodeId,
+                                     Set<TxnNodeId> countedAbortLsnNodes)
+        throws DatabaseException {
+
+        /*
+         * If the LN is marked deleted and its LSN follows the FileSummaryLN
+         * for its file, count it as obsolete.
+         *
+         * Inexact counting is used to save resources because the cleaner knows
+         * that all deleted LNs are obsolete.
+         */
+        if (ln.isDeleted()) {
+            tracker.countObsoleteIfUncounted
+                (logLsn, logLsn, null, logEntrySize, db.getId(),
+                 false /*countExact*/);
+        }
+
+        /* Was the LN found in the tree? */
+        if (treeLsn != DbLsn.NULL_LSN) {
+            int cmpLogLsnToTreeLsn = DbLsn.compareTo(logLsn, treeLsn);
+
+            /*
+             * If the oldLsn and newLsn differ and the newLsn follows the
+             * FileSummaryLN for the file of the oldLsn, count the oldLsn as
+             * obsolete.
+             *
+             * Use exact counting only if the transaction is committed.  A
+             * prepared or resurrected transaction may be committed or aborted
+             * later on.  We perform obsolete counting as if a commit will
+             * occur to ensure cleaning will occur, but we count inexact to
+             * prevent LogFileNotFound in case an abort occurs.  [#17022]
+             */
+            if (cmpLogLsnToTreeLsn != 0) {
+                long newLsn = (cmpLogLsnToTreeLsn < 0) ? treeLsn : logLsn;
+                long oldLsn = (cmpLogLsnToTreeLsn > 0) ? treeLsn : logLsn;
+                int oldSize = (oldLsn == logLsn) ? logEntrySize : 0;
+                tracker.countObsoleteIfUncounted
+                    (oldLsn, newLsn, null,
+                     tracker.fetchLNSize(oldSize, oldLsn), db.getId(),
+                     commitLsn != DbLsn.NULL_LSN /*countExact*/);
+            }
+
+            /*
+             * If the logLsn is equal to or precedes the treeLsn and the entry
+             * has an abortLsn that was not previously deleted, consider the
+             * set of entries for the given node.  If the logLsn is the first
+             * in the set that follows the FileSummaryLN of the abortLsn, count
+             * the abortLsn as obsolete.
+             */
+            if (cmpLogLsnToTreeLsn <= 0 &&
+                abortLsn != DbLsn.NULL_LSN &&
+                !abortKnownDeleted &&
+                !countedAbortLsnNodes.contains(txnNodeId)) {
+
+                /*
+                 * We have not counted this abortLsn yet.  Count abortLsn as
+                 * obsolete if commitLsn follows the FileSummaryLN of the
+                 * abortLsn, since the abortLsn is counted obsolete at commit.
+                 * The abortLsn is only an approximation of the prior LSN, so
+                 * use inexact counting.  Since this is relatively rare, a zero
+                 * entry size (use average size) is acceptable.
+                 *
+                 * Note that commitLsn may be null if this is a prepared or
+                 * resurrected HA Txn. [#16375]
+                 */
+                if (commitLsn != DbLsn.NULL_LSN) {
+                    tracker.countObsoleteIfUncounted
+                            (abortLsn, commitLsn, null, 0, db.getId(),
+                             false /*countExact*/);
+                    /* Don't count this abortLsn (this node) again. */
+                    countedAbortLsnNodes.add(txnNodeId);
+                }
+            }
+        }
+    }
+
+    /**
+     * Update utilization info during recovery undo (not abort undo).
+     */
+    private void undoUtilizationInfo(LN ln,
+                                     DatabaseImpl db,
+                                     long logLsn,
+                                     long abortLsn,
+                                     boolean abortKnownDeleted,
+                                     int logEntrySize,
+                                     TxnNodeId txnNodeId,
+                                     Map<TxnNodeId,Long> countedFileSummaries,
+                                     Set<TxnNodeId> countedAbortLsnNodes) {
+        /*
+         * Count the logLsn as obsolete if it follows the FileSummaryLN for the
+         * file of its Lsn.
+         */
+        boolean counted = tracker.countObsoleteIfUncounted
+            (logLsn, logLsn, null, logEntrySize, db.getId(),
+             true /*countExact*/);
+
+        /*
+         * Consider the latest LSN for the given node that precedes the
+         * FileSummaryLN for the file of its LSN.  Count this LSN as obsolete
+         * if it is not a deleted LN.
+         */
+        if (!counted) {
+            Long logFileNum = Long.valueOf(DbLsn.getFileNumber(logLsn));
+            Long countedFile = countedFileSummaries.get(txnNodeId);
+            if (countedFile == null ||
+                countedFile.longValue() > logFileNum.longValue()) {
+
+                /*
+                 * We encountered a new file number and the FsLsn follows the
+                 * logLsn.
+                 */
+                if (!ln.isDeleted()) {
+                    tracker.countObsoleteUnconditional
+                        (logLsn, null, logEntrySize, db.getId(),
+                         true /*countExact*/);
+                }
+                /* Don't count this file again. */
+                countedFileSummaries.put(txnNodeId, logFileNum);
+            }
+        }
+    }
+
+    /**
+     * Remove all temporary databases that were encountered as MapLNs during
+     * recovery undo/redo.  A temp DB needs to be removed when it is not closed
+     * (closing a temp DB removes it) prior to a crash.  We ensure that the
+     * MapLN for every open temp DBs is logged each checkpoint interval.
+     */
+    private void removeTempDbs()
+        throws DatabaseException {
+
+        DbTree dbMapTree = env.getDbTree();
+        BasicLocker locker =
+	    BasicLocker.createBasicLocker(env, false /*noWait*/,
+					  true /*noAPIReadLock*/);
+        boolean operationOk = false;
+        try {
+            Iterator<DatabaseId> removeDbs = tempDbIds.iterator();
+            while (removeDbs.hasNext()) {
+                DatabaseId dbId = removeDbs.next();
+                DatabaseImpl db = dbMapTree.getDb(dbId);
+                dbMapTree.releaseDb(db); // Decrement use count.
+                if (db != null) {
+                    assert db.isTemporary();
+                    if (!db.isDeleted()) {
+                        env.getDbTree().dbRemove(locker,
+                                                 db.getName(),
+                                                 db.getId());
+                    }
+                }
+            }
+            operationOk = true;
+        } catch (Error E) {
+            env.invalidate(E);
+            throw E;
+        } finally {
+            locker.operationEnd(operationOk);
+        }
+    }
+
+    /**
+     * Concoct a header for the recovery pass trace info.
+     */
+    private String passStartHeader(int passNum) {
+        return "Recovery Pass " + passNum + " start: ";
+    }
+
+    /**
+     * Concoct a header for the recovery pass trace info.
+     */
+    private String passEndHeader(int passNum, long start, long end) {
+        return "Recovery Pass " + passNum + " end (" +
+            (end-start) + "): ";
+    }
+
+    /**
+     * Send trace messages to the java.util.logger. Don't rely on the logger
+     * alone to conditionalize whether we send this message, we don't even want
+     * to construct the message if the level is not enabled. This is used to
+     * construct verbose trace messages for individual log entry processing.
+     */
+    private static void trace(Level level,
+                              DatabaseImpl database,
+                              String debugType,
+                              boolean success,
+                              Node node,
+                              long logLsn,
+                              IN parent,
+                              boolean found,
+                              boolean replaced,
+                              boolean inserted,
+                              long replacedLsn,
+                              long abortLsn,
+                              int index) {
+        Logger logger = database.getDbEnvironment().getLogger();
+        Level useLevel= level;
+        if (!success) {
+            useLevel = Level.SEVERE;
+        }
+        if (logger.isLoggable(useLevel)) {
+            StringBuffer sb = new StringBuffer();
+            sb.append(debugType);
+            sb.append(" success=").append(success);
+            sb.append(" node=");
+            sb.append(node.getNodeId());
+            sb.append(" lsn=");
+            sb.append(DbLsn.getNoFormatString(logLsn));
+            if (parent != null) {
+                sb.append(" parent=").append(parent.getNodeId());
+            }
+            sb.append(" found=");
+            sb.append(found);
+            sb.append(" replaced=");
+            sb.append(replaced);
+            sb.append(" inserted=");
+            sb.append(inserted);
+            if (replacedLsn != DbLsn.NULL_LSN) {
+                sb.append(" replacedLsn=");
+                sb.append(DbLsn.getNoFormatString(replacedLsn));
+            }
+            if (abortLsn != DbLsn.NULL_LSN) {
+                sb.append(" abortLsn=");
+                sb.append(DbLsn.getNoFormatString(abortLsn));
+            }
+            sb.append(" index=").append(index);
+            logger.log(useLevel, sb.toString());
+        }
+    }
+
+    /**
+     * Send trace messages to the java.util.logger. Don't rely on the logger
+     * alone to conditionalize whether we send this message, we don't even want
+     * to construct the message if the level is not enabled.
+     */
+    private void traceINDeleteReplay(long nodeId,
+                                     long logLsn,
+                                     boolean found,
+                                     boolean deleted,
+                                     int index,
+                                     boolean isDuplicate) {
+        Logger logger = env.getLogger();
+        if (logger.isLoggable(detailedTraceLevel)) {
+            StringBuffer sb = new StringBuffer();
+            sb.append((isDuplicate) ?
+                      TRACE_IN_DUPDEL_REPLAY :
+                      TRACE_IN_DEL_REPLAY);
+            sb.append(" node=").append(nodeId);
+            sb.append(" lsn=").append(DbLsn.getNoFormatString(logLsn));
+            sb.append(" found=").append(found);
+            sb.append(" deleted=").append(deleted);
+            sb.append(" index=").append(index);
+            logger.log(detailedTraceLevel, sb.toString());
+        }
+    }
+
+    private void traceAndThrowException(long badLsn,
+                                        String method,
+                                        Exception originalException)
+        throws DatabaseException {
+        String badLsnString = DbLsn.getNoFormatString(badLsn);
+        Tracer.trace(env,
+                     "RecoveryManager",
+                     method,
+                     "last LSN = " + badLsnString,
+                     originalException);
+        throw new DatabaseException("last LSN=" + badLsnString,
+                                    originalException);
+    }
+
+    /**
+     * Log trace information about root deletions, called by INCompressor and
+     * recovery.
+     */
+    public static void traceRootDeletion(Level level, DatabaseImpl database) {
+        Logger logger = database.getDbEnvironment().getLogger();
+        if (logger.isLoggable(level)) {
+            StringBuffer sb = new StringBuffer();
+            sb.append(TRACE_ROOT_DELETE);
+            sb.append(" Dbid=").append(database.getId());
+            logger.log(level, sb.toString());
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/tree/BIN.java b/src/com/sleepycat/je/tree/BIN.java
new file mode 100644
index 0000000000000000000000000000000000000000..8cba4aeb61b9435d740f170baafb5a3495ff41f9
--- /dev/null
+++ b/src/com/sleepycat/je/tree/BIN.java
@@ -0,0 +1,1281 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: BIN.java,v 1.215.2.5 2010/03/26 13:23:55 mark Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.Set;
+
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.cleaner.Cleaner;
+import com.sleepycat.je.cleaner.LocalUtilizationTracker;
+import com.sleepycat.je.dbi.CursorImpl;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.DbConfigManager;
+import com.sleepycat.je.dbi.DbTree;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.LogManager;
+import com.sleepycat.je.log.Loggable;
+import com.sleepycat.je.log.Provisional;
+import com.sleepycat.je.log.ReplicationContext;
+import com.sleepycat.je.log.entry.SingleItemEntry;
+import com.sleepycat.je.txn.BasicLocker;
+import com.sleepycat.je.txn.LockGrantType;
+import com.sleepycat.je.txn.LockResult;
+import com.sleepycat.je.txn.LockType;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.TinyHashSet;
+
+/**
+ * A BIN represents a Bottom Internal Node in the JE tree.
+ */
+public class BIN extends IN implements Loggable {
+
+    private static final String BEGIN_TAG = "<bin>";
+    private static final String END_TAG = "</bin>";
+
+    /*
+     * The set of cursors that are currently referring to this BIN.
+     */
+    private TinyHashSet<CursorImpl> cursorSet;
+
+    /*
+     * Support for logging BIN deltas. (Partial BIN logging)
+     */
+
+    /* Location of last delta, for cleaning. */
+    private long lastDeltaVersion = DbLsn.NULL_LSN;
+    private int numDeltasSinceLastFull; // num deltas logged
+    private boolean prohibitNextDelta;  // disallow delta on next log
+
+    public BIN() {
+        cursorSet = new TinyHashSet<CursorImpl>();
+        numDeltasSinceLastFull = 0;
+        prohibitNextDelta = false;
+    }
+
+    public BIN(DatabaseImpl db,
+	       byte[] identifierKey,
+	       int maxEntriesPerNode,
+	       int level) {
+        super(db, identifierKey, maxEntriesPerNode, level);
+
+        cursorSet = new TinyHashSet<CursorImpl>();
+        numDeltasSinceLastFull = 0;
+        prohibitNextDelta = false;
+    }
+
+    /**
+     * Create a holder object that encapsulates information about this BIN for
+     * the INCompressor.
+     */
+    public BINReference createReference() {
+      return new BINReference(getNodeId(), getDatabase().getId(),
+			      getIdentifierKey());
+    }
+
+    /**
+     * Create a new BIN.  Need this because we can't call newInstance()
+     * without getting a 0 for nodeid.
+     */
+    @Override
+    protected IN createNewInstance(byte[] identifierKey,
+				   int maxEntries,
+				   int level) {
+        return new BIN(getDatabase(), identifierKey, maxEntries, level);
+    }
+
+    /*
+     * Return whether the shared latch for this kind of node should be of the
+     * "always exclusive" variety.  Presently, only IN's are actually latched
+     * shared.  BINs, DINs, and DBINs are all latched exclusive only.
+     */
+    @Override
+    boolean isAlwaysLatchedExclusively() {
+	return true;
+    }
+
+    @Override
+    boolean isBottomMostNode() {
+        return !getDatabase().getSortedDuplicates();
+    }
+
+    /**
+     * Get the key (dupe or identifier) in child that is used to locate it in
+     * 'this' node.  For BIN's, the child node has to be a DIN so we use the
+     * Dup Key to cross the main-tree/dupe-tree boundary.
+     */
+    @Override
+    public byte[] getChildKey(IN child)
+        throws DatabaseException {
+
+        return child.getDupKey();
+    }
+
+    /**
+     * @return the log entry type to use for bin delta log entries.
+     */
+    LogEntryType getBINDeltaType() {
+        return LogEntryType.LOG_BIN_DELTA;
+    }
+
+    /**
+     * @return location of last logged delta version. If never set,
+     * return null.
+     */
+    public long getLastDeltaVersion() {
+        return lastDeltaVersion;
+    }
+
+    /**
+     * If cleaned or compressed, must log full version.
+     */
+    @Override
+    public void setProhibitNextDelta() {
+        prohibitNextDelta = true;
+    }
+
+    /*
+     * If this search can go further, return the child. If it can't, and you
+     * are a possible new parent to this child, return this IN. If the search
+     * can't go further and this IN can't be a parent to this child, return
+     * null.
+     */
+    @Override
+    protected void descendOnParentSearch(SearchResult result,
+                                         boolean targetContainsDuplicates,
+                                         boolean targetIsRoot,
+                                         long targetNodeId,
+                                         Node child,
+                                         boolean requireExactMatch)
+        throws DatabaseException {
+
+        if (child.canBeAncestor(targetContainsDuplicates)) {
+            if (targetContainsDuplicates && targetIsRoot) {
+
+                /*
+                 * Don't go further -- the target is a root of a dup tree, so
+                 * this BIN will have to be the parent.
+                 */
+                long childNid = child.getNodeId();
+                ((IN) child).releaseLatch();
+
+                result.keepSearching = false;           // stop searching
+
+                if (childNid  == targetNodeId) {        // set if exact find
+                    result.exactParentFound = true;
+                } else {
+                    result.exactParentFound = false;
+                }
+
+                /*
+                 * Return a reference to this node unless we need an exact
+                 * match and this isn't exact.
+                 */
+                if (requireExactMatch && ! result.exactParentFound) {
+                    result.parent = null;
+                    releaseLatch();
+                } else {
+                    result.parent = this;
+                }
+
+            } else {
+                /*
+                 * Go further down into the dup tree.
+                 */
+                releaseLatch();
+                result.parent = (IN) child;
+            }
+        } else {
+
+            /*
+             * Our search ends, we didn't find it. If we need an exact match,
+             * give up, if we only need a potential match, keep this node
+             * latched and return it.
+             */
+            result.exactParentFound = false;
+            result.keepSearching = false;
+            if (!requireExactMatch && targetContainsDuplicates) {
+                result.parent = this;
+            } else {
+                releaseLatch();
+                result.parent = null;
+            }
+        }
+    }
+
+    /*
+     * A BIN can be the ancestor of an internal node of the duplicate tree. It
+     * can't be the parent of an IN or another BIN.
+     */
+    @Override
+    protected boolean canBeAncestor(boolean targetContainsDuplicates) {
+        /* True if the target is a DIN or DBIN */
+        return targetContainsDuplicates;
+    }
+
+    /**
+     * Note that the IN may or may not be latched when this method is called.
+     * Returning the wrong answer is OK in that case (it will be called again
+     * later when latched), but an exception should not occur.
+     */
+    @Override
+    boolean isEvictionProhibited() {
+        return (nCursors() > 0);
+    }
+
+    /**
+     * Note that the IN may or may not be latched when this method is called.
+     * Returning the wrong answer is OK in that case (it will be called again
+     * later when latched), but an exception should not occur.
+     */
+    @Override
+    boolean hasPinnedChildren() {
+
+	DatabaseImpl db = getDatabase();
+
+        /*
+         * For the mapping DB, if any MapLN is resident we cannot evict this
+         * BIN.  If a MapLN was not previously stripped, then the DB may be
+         * open.  [#13415]
+         */
+        if (db.getId().equals(DbTree.ID_DB_ID)) {
+            return hasResidentChildren();
+
+        /*
+         * For other DBs, if there are no duplicates allowed we can always
+         * evict this BIN because its children are limited to LNs.  When
+         * logging the BIN, any dirty LNs will be logged and non-dirty LNs can
+         * be discarded.
+         */
+        } else if (!db.getSortedDuplicates()) {
+            return false;
+
+        /*
+         * If duplicates are allowed, we disallow eviction of this BIN if it
+         * has any non-LN (DIN) children.
+         */
+        } else {
+            for (int i = 0; i < getNEntries(); i++) {
+                Node node = getTarget(i);
+                if (node != null) {
+                    if (!(node instanceof LN)) {
+                        return true;
+                    }
+                }
+            }
+            return false;
+        }
+    }
+
+    /**
+     * Note that the IN may or may not be latched when this method is called.
+     * Returning the wrong answer is OK in that case (it will be called again
+     * later when latched), but an exception should not occur.
+     */
+    @Override
+    int getChildEvictionType() {
+
+        Cleaner cleaner = getDatabase().getDbEnvironment().getCleaner();
+
+        for (int i = 0; i < getNEntries(); i++) {
+            Node node = getTarget(i);
+            if (node != null) {
+                if (node instanceof LN) {
+                    LN ln = (LN) node;
+
+                    /*
+                     * If the LN is not evictable, we may neither strip the LN
+                     * nor evict the node.  isEvictableInexact is used here as
+                     * a fast check, to avoid the overhead of acquiring a
+                     * handle lock while selecting an IN for eviction.   See
+                     * evictInternal which will call LN.isEvictable to acquire
+                     * an handle lock and guarantee that another thread cannot
+                     * open the MapLN.  [#13415]
+                     */
+                    if (!ln.isEvictableInexact()) {
+                        return MAY_NOT_EVICT;
+                    }
+
+                    /*
+                     * If the cleaner allows eviction, then this LN may be
+                     * stripped.
+                     */
+                    if (cleaner.isEvictable(this, i)) {
+                        return MAY_EVICT_LNS;
+                    }
+                } else {
+                    return MAY_NOT_EVICT;
+                }
+            }
+        }
+        return MAY_EVICT_NODE;
+    }
+
+    /**
+     * Indicates whether entry 0's key is "special" in that it always compares
+     * less than any other key.  BIN's don't have the special key, but IN's do.
+     */
+    @Override
+    boolean entryZeroKeyComparesLow() {
+        return false;
+    }
+
+    /**
+     * Mark this entry as deleted, using the delete flag. Only BINS may do
+     * this.
+     *
+     * @param index indicates target entry
+     */
+    @Override
+    public void setKnownDeleted(int index) {
+
+        /*
+         * The target is cleared to save memory, since a known deleted entry
+         * will never be fetched.  The migrate flag is also cleared since
+         * migration is never needed for known deleted entries either.
+         */
+        super.setKnownDeleted(index);
+        
+        /* 
+         * We know it's an LN because we never call setKnownDeleted for
+         * an IN.
+         */
+        LN oldLN = (LN) getTarget(index);
+        updateMemorySize(oldLN, null /* newNode */);
+        if (oldLN != null) {
+            oldLN.releaseMemoryBudget();
+        }
+        setMigrate(index, false);
+        super.setTarget(index, null);
+        setDirty(true);
+    }
+
+    /**
+     * Mark this entry as deleted, using the delete flag. Only BINS may do
+     * this.  Don't null the target field.
+     *
+     * This is used so that an LN can still be locked by the compressor even if
+     * the entry is knownDeleted.  See BIN.compress.
+     *
+     * @param index indicates target entry
+     */
+    public void setKnownDeletedLeaveTarget(int index) {
+
+        /*
+         * The migrate flag is cleared since migration is never needed for
+         * known deleted entries.
+         */
+        setMigrate(index, false);
+        super.setKnownDeleted(index);
+        setDirty(true);
+    }
+
+    /**
+     * Clear the known deleted flag. Only BINS may do this.
+     * @param index indicates target entry
+     */
+    @Override
+    public void clearKnownDeleted(int index) {
+        super.clearKnownDeleted(index);
+        setDirty(true);
+    }
+
+    /* Called once at environment startup by MemoryBudget */
+    public static long computeOverhead(DbConfigManager configManager)
+        throws DatabaseException {
+
+        /*
+	 * Overhead consists of all the fields in this class plus the
+	 * entry arrays in the IN class.
+         */
+        return MemoryBudget.BIN_FIXED_OVERHEAD +
+	    IN.computeArraysOverhead(configManager);
+    }
+
+    @Override
+    protected long getMemoryOverhead(MemoryBudget mb) {
+        return mb.getBINOverhead();
+    }
+
+    /**
+     * Returns the treeAdmin memory in objects referenced by this BIN.
+     * Specifically, this refers to the DbFileSummaryMap held by
+     * MapLNs
+     */
+    @Override
+    public long getTreeAdminMemorySize() {
+        
+        if (getDatabase().getId().equals(DbTree.ID_DB_ID)) {
+            long treeAdminMem = 0;
+            for (int i = 0; i < getMaxEntries(); i++) {
+                Node n = getTarget(i);
+                if (n != null) {
+                    MapLN mapLN = (MapLN) n;
+                    treeAdminMem += mapLN.getDatabase().getTreeAdminMemory();
+                }
+            } 
+            return treeAdminMem;
+        } else {
+            return 0;
+        }
+    }    
+
+    /*
+     * Cursors
+     */
+
+    /* public for the test suite. */
+    public Set<CursorImpl> getCursorSet() {
+        return cursorSet.copy();
+    }
+
+    /**
+     * Register a cursor with this BIN.  Caller has this BIN already latched.
+     * @param cursor Cursor to register.
+     */
+    public void addCursor(CursorImpl cursor) {
+        assert isLatchOwnerForWrite();
+        cursorSet.add(cursor);
+    }
+
+    /**
+     * Unregister a cursor with this bin.  Caller has this BIN already
+     * latched.
+     *
+     * @param cursor Cursor to unregister.
+     */
+    public void removeCursor(CursorImpl cursor) {
+        assert isLatchOwnerForWrite();
+        cursorSet.remove(cursor);
+    }
+
+    /**
+     * @return the number of cursors currently referring to this BIN.
+     */
+    public int nCursors() {
+        return cursorSet.size();
+    }
+
+    /**
+     * The following four methods access the correct fields in a cursor
+     * depending on whether "this" is a BIN or DBIN.  For BIN's, the
+     * CursorImpl.index and CursorImpl.bin fields should be used.  For DBIN's,
+     * the CursorImpl.dupIndex and CursorImpl.dupBin fields should be used.
+     */
+    BIN getCursorBIN(CursorImpl cursor) {
+        return cursor.getBIN();
+    }
+
+    BIN getCursorBINToBeRemoved(CursorImpl cursor) {
+        return cursor.getBINToBeRemoved();
+    }
+
+    int getCursorIndex(CursorImpl cursor) {
+        return cursor.getIndex();
+    }
+
+    void setCursorBIN(CursorImpl cursor, BIN bin) {
+        cursor.setBIN(bin);
+    }
+
+    void setCursorIndex(CursorImpl cursor, int index) {
+        cursor.setIndex(index);
+    }
+
+    /**
+     * Called when we know we are about to split on behalf of a key that is the
+     * minimum (leftSide) or maximum (!leftSide) of this node.  This is
+     * achieved by just forcing the split to occur either one element in from
+     * the left or the right (i.e. splitIndex is 1 or nEntries - 1).
+     */
+    @Override
+    void splitSpecial(IN parent,
+                      int parentIndex,
+                      int maxEntriesPerNode,
+		      byte[] key,
+                      boolean leftSide,
+                      CacheMode cacheMode)
+	throws DatabaseException {
+
+	int index = findEntry(key, true, false);
+	int nEntries = getNEntries();
+	boolean exact = (index & IN.EXACT_MATCH) != 0;
+	index &= ~IN.EXACT_MATCH;
+	if (leftSide &&
+	    index < 0) {
+	    splitInternal(parent, parentIndex, maxEntriesPerNode,
+                          1, cacheMode);
+	} else if (!leftSide &&
+		   !exact &&
+		   index == (nEntries - 1)) {
+	    splitInternal(parent, parentIndex, maxEntriesPerNode,
+			  nEntries - 1, cacheMode);
+	} else {
+	    split(parent, parentIndex, maxEntriesPerNode, cacheMode);
+	}
+    }
+
+    /**
+     * Adjust any cursors that are referring to this BIN.  This method is
+     * called during a split operation.  "this" is the BIN being split.
+     * newSibling is the new BIN into which the entries from "this" between
+     * newSiblingLow and newSiblingHigh have been copied.
+     *
+     * @param newSibling - the newSibling into which "this" has been split.
+     * @param newSiblingLow, newSiblingHigh - the low and high entry of
+     * "this" that were moved into newSibling.
+     */
+    @Override
+    void adjustCursors(IN newSibling,
+                       int newSiblingLow,
+                       int newSiblingHigh) {
+        assert newSibling.isLatchOwnerForWrite();
+        assert this.isLatchOwnerForWrite();
+        int adjustmentDelta = (newSiblingHigh - newSiblingLow);
+        Iterator<CursorImpl> iter = cursorSet.iterator();
+        while (iter.hasNext()) {
+            CursorImpl cursor = iter.next();
+            if (getCursorBINToBeRemoved(cursor) == this) {
+
+                /*
+                 * This BIN will be removed from the cursor by CursorImpl
+                 * following advance to next BIN; ignore it.
+                 */
+                continue;
+            }
+            int cIdx = getCursorIndex(cursor);
+            BIN cBin = getCursorBIN(cursor);
+            assert cBin == this :
+                "nodeId=" + getNodeId() +
+                " cursor=" + cursor.dumpToString(true);
+            assert newSibling instanceof BIN;
+
+            /*
+             * There are four cases to consider for cursor adjustments,
+             * depending on (1) how the existing node gets split, and (2) where
+             * the cursor points to currently.  In cases 1 and 2, the id key of
+             * the node being split is to the right of the splitindex so the
+             * new sibling gets the node entries to the left of that index.
+             * This is indicated by "new sibling" to the left of the vertical
+             * split line below.  The right side of the node contains entries
+             * that will remain in the existing node (although they've been
+             * shifted to the left).  The vertical bar (^) indicates where the
+             * cursor currently points.
+             *
+             * case 1:
+             *
+             *   We need to set the cursor's "bin" reference to point at the
+             *   new sibling, but we don't need to adjust its index since that
+             *   continues to be correct post-split.
+             *
+             *   +=======================================+
+             *   |  new sibling        |  existing node  |
+             *   +=======================================+
+             *         cursor ^
+             *
+             * case 2:
+             *
+             *   We only need to adjust the cursor's index since it continues
+             *   to point to the current BIN post-split.
+             *
+             *   +=======================================+
+             *   |  new sibling        |  existing node  |
+             *   +=======================================+
+             *                              cursor ^
+             *
+             * case 3:
+             *
+             *   Do nothing.  The cursor continues to point at the correct BIN
+             *   and index.
+             *
+             *   +=======================================+
+             *   |  existing Node        |  new sibling  |
+             *   +=======================================+
+             *         cursor ^
+             *
+             * case 4:
+             *
+             *   Adjust the "bin" pointer to point at the new sibling BIN and
+             *   also adjust the index.
+             *
+             *   +=======================================+
+             *   |  existing Node        |  new sibling  |
+             *   +=======================================+
+             *                                 cursor ^
+             */
+            BIN ns = (BIN) newSibling;
+            if (newSiblingLow == 0) {
+                if (cIdx < newSiblingHigh) {
+                    /* case 1 */
+                    setCursorBIN(cursor, ns);
+                    iter.remove();
+                    ns.addCursor(cursor);
+                } else {
+                    /* case 2 */
+                    setCursorIndex(cursor, cIdx - adjustmentDelta);
+                }
+            } else {
+                if (cIdx >= newSiblingLow) {
+                    /* case 4 */
+                    setCursorIndex(cursor, cIdx - newSiblingLow);
+                    setCursorBIN(cursor, ns);
+                    iter.remove();
+                    ns.addCursor(cursor);
+                }
+            }
+        }
+    }
+
+    /**
+     * For each cursor in this BIN's cursor set, ensure that the cursor is
+     * actually referring to this BIN.
+     */
+    public void verifyCursors() {
+        if (cursorSet != null) {
+            Iterator<CursorImpl> iter = cursorSet.iterator();
+            while (iter.hasNext()) {
+                CursorImpl cursor = iter.next();
+                if (getCursorBINToBeRemoved(cursor) != this) {
+                    BIN cBin = getCursorBIN(cursor);
+                    assert cBin == this;
+                }
+            }
+        }
+    }
+
+    /**
+     * Adjust cursors referring to this BIN following an insert.
+     *
+     * @param insertIndex - The index of the new entry.
+     */
+    @Override
+    void adjustCursorsForInsert(int insertIndex) {
+        assert this.isLatchOwnerForWrite();
+        /* cursorSet may be null if this is being created through
+           createFromLog() */
+        if (cursorSet != null) {
+            Iterator<CursorImpl> iter = cursorSet.iterator();
+            while (iter.hasNext()) {
+                CursorImpl cursor = iter.next();
+                if (getCursorBINToBeRemoved(cursor) != this) {
+                    int cIdx = getCursorIndex(cursor);
+                    if (insertIndex <= cIdx) {
+                        setCursorIndex(cursor, cIdx + 1);
+                    }
+                }
+            }
+        }
+    }
+
+    /**
+     * Adjust cursors referring to the given binIndex in this BIN following a
+     * mutation of the entry from an LN to a DIN.  The entry was moved from a
+     * BIN to a newly created DBIN so each cursor must be added to the new
+     * DBIN.
+     *
+     * @param binIndex - The index of the DIN (previously LN) entry in the BIN.
+     *
+     * @param dupBin - The DBIN into which the LN entry was moved.
+     *
+     * @param dupBinIndex - The index of the moved LN entry in the DBIN.
+     *
+     * @param excludeCursor - The cursor being used for insertion and that
+     * should not be updated.
+     */
+    void adjustCursorsForMutation(int binIndex,
+				  DBIN dupBin,
+				  int dupBinIndex,
+                                  CursorImpl excludeCursor) {
+        assert this.isLatchOwnerForWrite();
+        /* cursorSet may be null if this is being created through
+           createFromLog() */
+        if (cursorSet != null) {
+            Iterator<CursorImpl> iter = cursorSet.iterator();
+            while (iter.hasNext()) {
+                CursorImpl cursor = iter.next();
+                if (getCursorBINToBeRemoved(cursor) != this &&
+                    cursor != excludeCursor &&
+                    cursor.getIndex() == binIndex) {
+                    assert cursor.getDupBIN() == null;
+                    cursor.addCursor(dupBin);
+                    cursor.updateDBin(dupBin, dupBinIndex);
+                }
+            }
+        }
+    }
+
+    /**
+     * Compress this BIN by removing any entries that are deleted.  Deleted
+     * entries are those that have LN's marked deleted or if the knownDeleted
+     * flag is set. Caller is responsible for latching and unlatching this
+     * node.
+     *
+     * @param binRef is used to determine the set of keys to be checked for
+     * deletedness, or is null to check all keys.
+     *
+     * @param canFetch if false, don't fetch any non-resident children. We
+     * don't want some callers of compress, such as the evictor, to fault in
+     * other nodes.
+     *
+     * @return true if we had to requeue the entry because we were unable to
+     * get locks, false if all entries were processed and therefore any
+     * remaining deleted keys in the BINReference must now be in some other BIN
+     * because of a split.
+     */
+    @Override
+    public boolean compress(BINReference binRef,
+                            boolean canFetch,
+                            LocalUtilizationTracker localTracker)
+        throws DatabaseException {
+
+        boolean ret = false;
+        boolean setNewIdKey = false;
+        boolean anyLocksDenied = false;
+	DatabaseImpl db = getDatabase();
+        EnvironmentImpl envImpl = db.getDbEnvironment();
+        BasicLocker lockingTxn = BasicLocker.createBasicLocker(envImpl);
+
+        try {
+            for (int i = 0; i < getNEntries(); i++) {
+
+		/*
+		 * We have to be able to lock the LN before we can compress the
+		 * entry.  If we can't, then, skip over it.
+		 *
+		 * We must lock the LN even if isKnownDeleted is true, because
+		 * locks protect the aborts. (Aborts may execute multiple
+		 * operations, where each operation latches and unlatches. It's
+		 * the LN lock that protects the integrity of the whole
+		 * multi-step process.)
+                 *
+                 * For example, during abort, there may be cases where we have
+		 * deleted and then added an LN during the same txn.  This
+		 * means that to undo/abort it, we first delete the LN (leaving
+		 * knownDeleted set), and then add it back into the tree.  We
+		 * want to make sure the entry is in the BIN when we do the
+		 * insert back in.
+		 */
+                boolean deleteEntry = false;
+                Node n = null;
+
+                if (binRef == null ||
+		    isEntryPendingDeleted(i) ||
+                    isEntryKnownDeleted(i) ||
+                    binRef.hasDeletedKey(new Key(getKey(i)))) {
+
+                    if (canFetch) {
+			if (db.isDeferredWriteMode() &&
+			    getLsn(i) == DbLsn.NULL_LSN) {
+			    /* Null LSNs are ok in DW. [#15588] */
+			    n = getTarget(i);
+			} else {
+			    n = fetchTarget(i);
+			}
+                    } else {
+                        n = getTarget(i);
+                        if (n == null) {
+                            /* Punt, we don't know the state of this child. */
+                            continue;
+                        }
+                    }
+
+                    if (n == null) {
+                        /* Cleaner deleted the log file.  Compress this LN. */
+                        deleteEntry = true;
+                    } else if (isEntryKnownDeleted(i)) {
+                        LockResult lockRet = lockingTxn.nonBlockingLock
+                            (n.getNodeId(), LockType.READ, db);
+                        if (lockRet.getLockGrant() == LockGrantType.DENIED) {
+                            anyLocksDenied = true;
+                            continue;
+                        }
+
+                        deleteEntry = true;
+                    } else {
+                        if (!n.containsDuplicates()) {
+                            LN ln = (LN) n;
+                            LockResult lockRet = lockingTxn.nonBlockingLock
+                                (ln.getNodeId(), LockType.READ, db);
+                            if (lockRet.getLockGrant() ==
+                                LockGrantType.DENIED) {
+                                anyLocksDenied = true;
+                                continue;
+                            }
+
+                            if (ln.isDeleted()) {
+                                deleteEntry = true;
+                            }
+                        }
+                    }
+
+                    /* Remove key from BINReference in case we requeue it. */
+                    if (binRef != null) {
+                        binRef.removeDeletedKey(new Key(getKey(i)));
+                    }
+                }
+
+                /* At this point, we know we can delete. */
+                if (deleteEntry) {
+                    boolean entryIsIdentifierKey = Key.compareKeys
+                        (getKey(i), getIdentifierKey(),
+                         getKeyComparator()) == 0;
+                    if (entryIsIdentifierKey) {
+
+                        /*
+                         * We're about to remove the entry with the idKey so
+                         * the node will need a new idkey.
+                         */
+                        setNewIdKey = true;
+                    }
+
+                    if (db.isDeferredWriteMode() &&
+                        n instanceof LN) {
+                        LN ln = (LN) n;
+                        long lsn = getLsn(i);
+                        if (ln.isDirty() && lsn != DbLsn.NULL_LSN) {
+                            if (db.isTemporary()) {
+
+                                /*
+                                 * When a previously logged LN in a temporary
+                                 * DB is dirty, we can count the LSN of the
+                                 * last logged LN as obsolete without logging.
+                                 * There it no requirement for the dirty
+                                 * deleted LN to be durable past recovery.
+                                 * There is no danger of the last logged LN
+                                 * being accessed again (after log cleaning,
+                                 * for example), since temporary DBs do not
+                                 * survive recovery.
+                                 */
+                                if (localTracker != null) {
+                                    localTracker.countObsoleteNode
+                                        (lsn, ln.getLogType(),
+                                         ln.getLastLoggedSize(), db);
+                                }
+                            } else {
+
+                                /*
+                                 * When a previously logged deferred-write LN
+                                 * is dirty, we log the dirty deleted LN to
+                                 * make the deletion durable.  The act of
+                                 * logging will also count the last logged LSN
+                                 * as obsolete.
+                                 */
+                                logDirtyLN(i, ln, false /*force*/);
+                            }
+                        }
+                    }
+
+                    boolean deleteSuccess = deleteEntry(i, true);
+                    assert deleteSuccess;
+
+                    /*
+                     * Since we're deleting the current entry, bump the current
+                     * index back down one.
+                     */
+                    i--;
+                }
+            }
+        } finally {
+            if (lockingTxn != null) {
+                lockingTxn.operationEnd();
+            }
+        }
+
+        if (anyLocksDenied && binRef != null) {
+            db.getDbEnvironment().addToCompressorQueue(binRef, false);
+            ret = true;
+        }
+
+        if (getNEntries() != 0 && setNewIdKey) {
+            setIdentifierKey(getKey(0));
+        }
+
+        /* This BIN is empty and expendable. */
+        if (getNEntries() == 0) {
+            setGeneration(0);
+        }
+
+        return ret;
+    }
+
+    @Override
+    public boolean isCompressible() {
+        return true;
+    }
+
+    /**
+     * Reduce memory consumption by evicting all LN targets. Note that this may
+     * cause LNs to be logged, which would require marking this BIN dirty.
+     *
+     * The BIN should be latched by the caller.
+     *
+     * @return number of evicted bytes. Note that a 0 return does not
+     * necessarily mean that the BIN had no evictable LNs. It's possible that
+     * resident, dirty LNs were not lockable.
+     */
+    public long evictLNs()
+        throws DatabaseException {
+
+        assert isLatchOwnerForWrite() :
+            "BIN must be latched before evicting LNs";
+
+        Cleaner cleaner = getDatabase().getDbEnvironment().getCleaner();
+
+        /*
+         * We can't evict an LN which is pointed to by a cursor, in case that
+         * cursor has a reference to the LN object. We'll take the cheap choice
+         * and avoid evicting any LNs if there are cursors on this BIN. We
+         * could do a more expensive, precise check to see entries have which
+         * cursors. (We'd have to be careful to use the right field, index vs
+         * dupIndex). This is something we might move to later.
+         */
+        long removed = 0;
+        if (nCursors() == 0) {
+            for (int i = 0; i < getNEntries(); i++) {
+                removed += evictInternal(i, cleaner);
+            }
+            updateMemorySize(removed, 0);
+        }
+        return removed;
+    }
+
+    /**
+     * Evict a single LN if allowed and adjust the memory budget.
+     */
+    public void evictLN(int index)
+        throws DatabaseException {
+
+        Cleaner cleaner = getDatabase().getDbEnvironment().getCleaner();
+        long removed = evictInternal(index, cleaner);
+        updateMemorySize(removed, 0);
+    }
+
+    /**
+     * Evict a single LN if allowed.  The amount of memory freed is returned
+     * and must be subtracted from the memory budget by the caller.
+     *
+     * @return number of evicted bytes. Note that a 0 return does not
+     * necessarily mean there was no eviction because the targetLN was not
+     * resident. It's possible that resident, dirty LNs were not lockable.
+     */
+    private long evictInternal(int index, Cleaner cleaner)
+        throws DatabaseException {
+
+        Node n = getTarget(index);
+
+        if (n instanceof LN) {
+            LN ln = (LN) n;
+
+            /*
+             * Don't evict MapLNs for open databases (LN.isEvictable) [#13415].
+             * And don't strip LNs that the cleaner will be migrating
+             * (Cleaner.isEvictable).
+             */
+            if (ln.isEvictable() &&
+                cleaner.isEvictable(this, index)) {
+
+                boolean force = getDatabase().isDeferredWriteMode() &&
+                    getLsn(index) == DbLsn.NULL_LSN;
+                /* Log target if necessary. */
+                logDirtyLN(index, (LN) n, force);
+
+                /* Clear target. */
+                setTarget(index, null);
+                ln.releaseMemoryBudget();
+
+                return n.getMemorySizeIncludedByParent();
+            }
+        }
+        return 0;
+    }
+
+    /**
+     * Logs the LN at the given index if it is dirty.
+     */
+    private void logDirtyLN(int index, LN ln, boolean force)
+        throws DatabaseException {
+
+        if (ln.isDirty() || force) {
+            DatabaseImpl dbImpl = getDatabase();
+
+            /* Only deferred write databases should have dirty LNs. */
+            assert dbImpl.isDeferredWriteMode();
+
+            /* Log the LN with the main tree key. */
+            byte[] key = containsDuplicates() ? getDupKey() : getKey(index);
+
+            /*
+             * No need to lock, this is non-txnal. This should never be part
+             * othe replication stream, because this is a deferred write db.
+             */
+            long lsn = ln.log(dbImpl.getDbEnvironment(),
+                              dbImpl,
+                              key,
+                              getLsn(index), // obsoleteLsn
+                              null,          // locker
+                              true,          // backgroundIO
+                              ReplicationContext.NO_REPLICATE);
+            updateEntry(index, lsn);
+        }
+    }
+
+    /* For debugging.  Overrides method in IN. */
+    @Override
+    boolean validateSubtreeBeforeDelete(int index)
+        throws DatabaseException {
+
+        return true;
+    }
+
+    /**
+     * Check if this node fits the qualifications for being part of a deletable
+     * subtree. It can only have one IN child and no LN children.
+     *
+     * We assume that this is only called under an assert.
+     */
+    @Override
+    boolean isValidForDelete()
+        throws DatabaseException {
+
+        /*
+	 * Can only have one valid child, and that child should be deletable.
+	 */
+        int validIndex = 0;
+        int numValidEntries = 0;
+	boolean needToLatch = !isLatchOwnerForWrite();
+	try {
+	    if (needToLatch) {
+		latch();
+	    }
+	    for (int i = 0; i < getNEntries(); i++) {
+		if (!isEntryKnownDeleted(i)) {
+		    numValidEntries++;
+		    validIndex = i;
+		}
+	    }
+
+	    if (numValidEntries > 1) {      // more than 1 entry
+		return false;
+	    } else {
+		if (nCursors() > 0) {        // cursors on BIN, not eligable
+		    return false;
+		}
+		if (numValidEntries == 1) {  // need to check child (DIN or LN)
+		    Node child = fetchTarget(validIndex);
+ 		    if (child == null) {
+			return false;
+		    }
+		    child.latchShared();
+		    boolean ret = child.isValidForDelete();
+		    child.releaseLatch();
+		    return ret;
+		} else {
+		    return true;             // 0 entries.
+		}
+	    }
+	} finally {
+	    if (needToLatch &&
+		isLatchOwnerForWrite()) {
+		releaseLatch();
+	    }
+	}
+    }
+
+    /*
+     * DbStat support.
+     */
+    @Override
+    void accumulateStats(TreeWalkerStatsAccumulator acc) {
+	acc.processBIN(this, Long.valueOf(getNodeId()), getLevel());
+    }
+
+    /**
+     * Return the relevant user defined comparison function for this type of
+     * node.  For IN's and BIN's, this is the BTree Comparison function.
+     * Overriden by DBIN.
+     */
+    @Override
+    public Comparator<byte[]> getKeyComparator() {
+        return getDatabase().getBtreeComparator();
+    }
+
+    @Override
+    public String beginTag() {
+        return BEGIN_TAG;
+    }
+
+    @Override
+    public String endTag() {
+        return END_TAG;
+    }
+
+    /*
+     * Logging support
+     */
+
+    /**
+     * @see IN.logDirtyChildren();
+     */
+    @Override
+    public void logDirtyChildren()
+        throws DatabaseException {
+
+        /* Look for targets that are dirty. */
+        EnvironmentImpl envImpl = getDatabase().getDbEnvironment();
+        for (int i = 0; i < getNEntries(); i++) {
+            Node node = getTarget(i);
+            if (node != null) {
+
+                if (node instanceof LN) {
+                    logDirtyLN(i, (LN) node, false);
+                } else {
+                    DIN din = (DIN) node;
+                    din.latch(CacheMode.UNCHANGED);
+                    try {
+                        if (din.getDirty()) {
+                            din.logDirtyChildren();
+                            /* Logging the DIN will log the DupCountLN. */
+                            long childLsn =
+                                din.log(envImpl.getLogManager(),
+                                        false, // allow deltas
+                                        true,  // is provisional
+                                        false, // proactive migration
+                                        true,  // backgroundIO
+                                        this); // provisional parent
+                            updateEntry(i, childLsn);
+                        }
+                    } finally {
+                        din.releaseLatch();
+                    }
+                }
+            }
+        }
+    }
+
+    /**
+     * @see Node#getLogType
+     */
+    @Override
+    public LogEntryType getLogType() {
+        return LogEntryType.LOG_BIN;
+    }
+
+    @Override
+    public String shortClassName() {
+        return "BIN";
+    }
+
+    @Override
+    public void beforeLog(LogManager logManager,
+                          INLogItem item,
+                          INLogContext context)
+        throws DatabaseException {
+
+        EnvironmentImpl envImpl = getDatabase().getDbEnvironment();
+
+        /* Allow the cleaner to migrate LNs before logging. */
+        envImpl.getCleaner().lazyMigrateLNs
+            (this, context.proactiveMigration, context.backgroundIO);
+
+        /* Check for dirty LNs in deferred-write databases. */
+        if (getDatabase().isDeferredWriteMode()) {
+            logDirtyLNs(logManager);
+        }
+
+        /*
+         * We can log a delta rather than full version of this BIN if
+         * - this has been called from the checkpointer with allowDeltas=true
+         * - there is a full version on disk
+         * - we meet the percentage heuristics defined by environment params.
+         * - this delta is not prohibited because of cleaning or compression
+         * - this is not a deferred write db
+         * All other logging should be of the full version.
+         */
+        boolean doDeltaLog = false;
+        BINDelta deltaInfo = null;
+        if (context.allowDeltas &&
+            getLastFullVersion() != DbLsn.NULL_LSN &&
+            !prohibitNextDelta &&
+            !getDatabase().isDeferredWriteMode()) {
+            deltaInfo = new BINDelta(this);
+            doDeltaLog = doDeltaLog(deltaInfo);
+        }
+
+        if (doDeltaLog) {
+            item.provisional = Provisional.NO;
+            item.oldLsn = DbLsn.NULL_LSN;
+            item.entry = new SingleItemEntry(getBINDeltaType(), deltaInfo);
+            item.isDelta = true;
+        } else {
+            /* Log a full version of the IN. */
+            super.beforeLog(logManager, item, context);
+        }
+    }
+
+    @Override
+    public void afterLog(LogManager logManager,
+                         INLogItem item,
+                         INLogContext context)
+        throws DatabaseException {
+
+        if (item.isDelta) {
+
+            /*
+             * Don't change the dirtiness of the node -- leave it dirty. Deltas
+             * are never provisional, they must be processed at recovery time.
+             */
+            lastDeltaVersion = item.newLsn;
+            item.newLsn = DbLsn.NULL_LSN;
+            numDeltasSinceLastFull++;
+        } else {
+            super.afterLog(logManager, item, context);
+            lastDeltaVersion = DbLsn.NULL_LSN;
+            numDeltasSinceLastFull = 0;
+        }
+        prohibitNextDelta = false;
+    }
+
+    private void logDirtyLNs(LogManager logManager)
+        throws DatabaseException {
+
+	boolean isDeferredWrite = getDatabase().isDeferredWriteMode();
+
+        for (int i = 0; i < getNEntries(); i++) {
+            Node node = getTarget(i);
+            if ((node != null) && (node instanceof LN)) {
+                logDirtyLN(i, (LN) node,
+			   (getLsn(i) == DbLsn.NULL_LSN && isDeferredWrite));
+            }
+        }
+    }
+
+    /**
+     * Decide whether to log a full or partial BIN, depending on the ratio of
+     * the delta size to full BIN size, and the number of deltas that have been
+     * logged since the last full.
+     *
+     * @return true if we should log the deltas of this BIN
+     */
+    private boolean doDeltaLog(BINDelta deltaInfo)
+        throws DatabaseException {
+
+        int maxDiffs = (getNEntries() *
+                        getDatabase().getBinDeltaPercent())/100;
+        if ((deltaInfo.getNumDeltas() <= maxDiffs) &&
+            (numDeltasSinceLastFull < getDatabase().getBinMaxDeltas())) {
+            return true;
+        } else {
+            return false;
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/tree/BINBoundary.java b/src/com/sleepycat/je/tree/BINBoundary.java
new file mode 100644
index 0000000000000000000000000000000000000000..5416c75de323dcc03a8c509807d9d446e4aa96aa
--- /dev/null
+++ b/src/com/sleepycat/je/tree/BINBoundary.java
@@ -0,0 +1,19 @@
+/*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: BINBoundary.java,v 1.7.2.2 2010/01/04 15:30:36 cwl Exp $:
+ */
+
+package com.sleepycat.je.tree;
+
+/**
+ * Contains information about the BIN returned by a search.
+ */
+public class BINBoundary {
+    /** The last BIN was returned. */
+    public boolean isLastBin;
+    /** The first BIN was returned. */
+    public boolean isFirstBin;
+}
diff --git a/src/com/sleepycat/je/tree/BINDelta.java b/src/com/sleepycat/je/tree/BINDelta.java
new file mode 100644
index 0000000000000000000000000000000000000000..43224942d4d6ae59d286bb4466cfed10304dde64
--- /dev/null
+++ b/src/com/sleepycat/je/tree/BINDelta.java
@@ -0,0 +1,253 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: BINDelta.java,v 1.53.2.2 2010/01/04 15:30:36 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.LogException;
+import com.sleepycat.je.log.LogUtils;
+import com.sleepycat.je.log.Loggable;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * BINDelta contains the information needed to create a partial (delta) BIN log
+ * entry. It also knows how to combine a full BIN log entry and a delta to
+ * generate a new BIN.
+ */
+public class BINDelta implements Loggable {
+
+    private DatabaseId dbId;    // owning db for this bin.
+    private long lastFullLsn;   // location of last full version
+    private List<DeltaInfo> deltas;        // list of key/action changes
+
+    /**
+     * Read a BIN and create the deltas.
+     */
+    public BINDelta(BIN bin) {
+        lastFullLsn = bin.getLastFullVersion();
+        dbId = bin.getDatabaseId();
+        deltas = new ArrayList<DeltaInfo>();
+
+        /*
+         * Save every entry that has been modified since the last full version.
+         * Note that we must rely on the dirty bit, and we can't infer any
+         * dirtiness by comparing the last full version LSN and the child
+         * reference LSN. That's because the ChildReference LSN may be earlier
+         * than the full version LSN because of aborts.
+         */
+        for (int i = 0; i < bin.getNEntries(); i++) {
+            if (bin.isDirty(i)) {
+                deltas.add(new DeltaInfo(bin.getKey(i),
+                                         bin.getLsn(i),
+                                         bin.getState(i)));
+            }
+        }
+    }
+
+    /**
+     * For instantiating from the log.
+     */
+    public BINDelta() {
+        dbId = new DatabaseId();
+        lastFullLsn = DbLsn.NULL_LSN;
+        deltas = new ArrayList<DeltaInfo>();
+    }
+
+    /**
+     * @return a count of deltas for this BIN.
+     */
+    int getNumDeltas() {
+        return deltas.size();
+    }
+
+    /**
+     * @return the dbId for this BIN.
+     */
+    public DatabaseId getDbId() {
+        return dbId;
+    }
+
+    /**
+     * @return the last full version of this BIN
+     */
+    public long getLastFullLsn() {
+        return lastFullLsn;
+    }
+
+    /**
+     * Create a BIN by starting with the full version and applying the deltas.
+     */
+    public BIN reconstituteBIN(EnvironmentImpl env)
+        throws DatabaseException {
+
+        /* Get the last full version of this BIN. */
+        BIN fullBIN = (BIN) env.getLogManager().get(lastFullLsn);
+        DatabaseImpl db = env.getDbTree().getDb(dbId);
+        try {
+
+            /*
+             * In effect, call fullBIN.postFetchInit(db) here.  But we don't
+             * want to do that since it will put fullBIN on the INList.  Since
+             * this is either recovery or during the Cleaner run, we don't want
+             * it on the INList.
+             */
+            fullBIN.setDatabase(db);
+            fullBIN.setLastFullLsn(lastFullLsn);
+
+            /* Process each delta. */
+            fullBIN.latch();
+            for (int i = 0; i < deltas.size(); i++) {
+                DeltaInfo info = (DeltaInfo) deltas.get(i);
+
+                /*
+                 * The BINDelta holds the authoritative version of each entry.
+                 * In all cases, its entry should supercede the entry in the
+                 * full BIN.  This is true even if the BIN Delta's entry is
+                 * knownDeleted or if the full BIN's version is knownDeleted.
+                 * Therefore we use the flavor of findEntry that will return a
+                 * knownDeleted entry if the entry key matches (i.e. true,
+                 * false) but still indicates exact matches with the return
+                 * index.  findEntry only returns deleted entries if third arg
+                 * is false, but we still need to know if it's an exact match
+                 * or not so indicateExact is true.
+                 */
+                int foundIndex = fullBIN.findEntry(info.getKey(), true, false);
+                if (foundIndex >= 0 &&
+                    (foundIndex & IN.EXACT_MATCH) != 0) {
+                    foundIndex &= ~IN.EXACT_MATCH;
+
+                    /*
+                     * The entry exists in the full version, update it with the
+                     * delta info.
+                     */
+                    if (info.isKnownDeleted()) {
+                        fullBIN.setKnownDeleted(foundIndex);
+                    } else {
+                        fullBIN.updateEntry
+                            (foundIndex, info.getLsn(), info.getState());
+                    }
+                } else {
+
+                    /*
+                     * The entry doesn't exist, add a new entry from the delta.
+                     */
+                    if (!info.isKnownDeleted()) {
+                        ChildReference entry =
+                            new ChildReference(null,
+                                               info.getKey(),
+                                               info.getLsn(),
+                                               info.getState());
+                        boolean insertOk = fullBIN.insertEntry(entry);
+                        assert insertOk;
+                    }
+                }
+            }
+        } finally {
+            env.getDbTree().releaseDb(db);
+        }
+
+        /*
+         * Reset the generation to 0, all this manipulation might have driven
+         * it up.
+         */
+        fullBIN.setGeneration(0);
+        fullBIN.releaseLatch();
+        return fullBIN;
+    }
+
+    /*
+     * Logging support
+     */
+
+
+    /*
+     * @see Loggable#getLogSize()
+     */
+    public int getLogSize() {
+        int numDeltas = deltas.size();
+        int size =
+            dbId.getLogSize() + // database id
+            LogUtils.getPackedLongLogSize(lastFullLsn) +
+            LogUtils.getPackedIntLogSize(numDeltas);
+
+        for (int i = 0; i < numDeltas; i++) {    // deltas
+            DeltaInfo info = (DeltaInfo) deltas.get(i);
+            size += info.getLogSize();
+        }
+
+        return size;
+    }
+
+    /*
+     * @see Loggable#writeToLog
+     */
+    public void writeToLog(ByteBuffer logBuffer) {
+        dbId.writeToLog(logBuffer);                     // database id
+	LogUtils.writePackedLong(logBuffer, lastFullLsn);     // last version
+        LogUtils.writePackedInt(logBuffer, deltas.size());  // num deltas
+
+        for (int i = 0; i < deltas.size(); i++) {              // deltas
+            DeltaInfo info = (DeltaInfo) deltas.get(i);
+            info.writeToLog(logBuffer);
+        }
+    }
+
+    /*
+     * @see Loggable#readFromLog()
+     */
+    public void readFromLog(ByteBuffer itemBuffer, byte entryVersion)
+	throws LogException {
+
+        dbId.readFromLog(itemBuffer, entryVersion); // database id
+	lastFullLsn = LogUtils.readLong(itemBuffer, (entryVersion < 6));
+        int numDeltas = LogUtils.readInt(itemBuffer, (entryVersion < 6));
+
+        for (int i=0; i < numDeltas; i++) {      // deltas
+            DeltaInfo info = new DeltaInfo();
+            info.readFromLog(itemBuffer, entryVersion);
+            deltas.add(info);
+        }
+    }
+
+    /*
+     * @see Loggable#dumpLog
+     */
+    public void dumpLog(StringBuffer sb, boolean verbose) {
+        dbId.dumpLog(sb, verbose);
+        sb.append("<lastFullLsn>");
+	sb.append(DbLsn.toString(lastFullLsn));
+        sb.append("</lastFullLsn>");
+        sb.append("<deltas size=\"").append(deltas.size()).append("\"/>");
+        for (int i = 0; i < deltas.size(); i++) {    // deltas
+            DeltaInfo info = (DeltaInfo) deltas.get(i);
+            info.dumpLog(sb, verbose);
+        }
+    }
+
+    /**
+     * @see Loggable#getTransactionId
+     */
+    public long getTransactionId() {
+	return 0;
+    }
+
+    /**
+     * @see Loggable#logicalEquals
+     * Always return false, this item should never be compared.
+     */
+    public boolean logicalEquals(Loggable other) {
+        return false;
+    }
+}
diff --git a/src/com/sleepycat/je/tree/BINReference.java b/src/com/sleepycat/je/tree/BINReference.java
new file mode 100644
index 0000000000000000000000000000000000000000..ab4c06966e81c1642df1eebb40862759b468c025
--- /dev/null
+++ b/src/com/sleepycat/je/tree/BINReference.java
@@ -0,0 +1,124 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: BINReference.java,v 1.23.2.2 2010/01/04 15:30:36 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Set;
+
+import com.sleepycat.je.dbi.DatabaseId;
+
+/**
+ * A class that embodies a reference to a BIN that does not rely on a
+ * Java reference to the actual BIN.
+ */
+public class BINReference {
+    protected byte[] idKey;
+    private long nodeId;
+    private DatabaseId databaseId;
+    private Set<Key> deletedKeys;  // Set of Key objects
+
+    BINReference(long nodeId, DatabaseId databaseId, byte[] idKey) {
+	this.nodeId = nodeId;
+	this.databaseId = databaseId;
+	this.idKey = idKey;
+    }
+
+    public long getNodeId() {
+	return nodeId;
+    }
+
+    public DatabaseId getDatabaseId() {
+	return databaseId;
+    }
+
+    public byte[] getKey() {
+	return idKey;
+    }
+
+    public byte[] getData() {
+	return null;
+    }
+
+    public void addDeletedKey(Key key) {
+
+        if (deletedKeys == null) {
+            deletedKeys = new HashSet<Key>();
+        }
+        deletedKeys.add(key);
+    }
+
+    public void addDeletedKeys(BINReference other) {
+
+        if (deletedKeys == null) {
+            deletedKeys = new HashSet<Key>();
+        }
+        if (other.deletedKeys != null) {
+            deletedKeys.addAll(other.deletedKeys);
+        }
+    }
+
+    public void removeDeletedKey(Key key) {
+
+        if (deletedKeys != null) {
+            deletedKeys.remove(key);
+            if (deletedKeys.size() == 0) {
+                deletedKeys = null;
+            }
+        }
+    }
+
+    public boolean hasDeletedKey(Key key) {
+
+        return (deletedKeys != null) && deletedKeys.contains(key);
+    }
+
+    public boolean deletedKeysExist() {
+
+        return ((deletedKeys != null) && (deletedKeys.size() > 0));
+    }
+
+    public Iterator<Key> getDeletedKeyIterator() {
+        if (deletedKeys != null) {
+            return deletedKeys.iterator();
+        } else {
+            return null;
+        }
+    }
+
+    /**
+     * Compare two BINReferences.
+     */
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) {
+            return true;
+        }
+
+        if (!(obj instanceof BINReference)) {
+            return false;
+        }
+
+	return ((BINReference) obj).nodeId == nodeId;
+    }
+
+    @Override
+    public int hashCode() {
+	return (int) nodeId;
+    }
+
+    @Override
+    public String toString() {
+        return "idKey=" + Key.getNoFormatString(idKey) +
+            " nodeId = " + nodeId +
+            " db=" + databaseId +
+            " deletedKeys=" + deletedKeys;
+    }
+}
+
diff --git a/src/com/sleepycat/je/tree/ChildReference.java b/src/com/sleepycat/je/tree/ChildReference.java
new file mode 100644
index 0000000000000000000000000000000000000000..019c83faad293911b41d7a61763ff8b64754d69f
--- /dev/null
+++ b/src/com/sleepycat/je/tree/ChildReference.java
@@ -0,0 +1,371 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ChildReference.java,v 1.110.2.2 2010/01/04 15:30:36 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.nio.ByteBuffer;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.LogFileNotFoundException;
+import com.sleepycat.je.log.LogUtils;
+import com.sleepycat.je.log.Loggable;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * A ChildReference is a reference in the tree from parent to child.  It
+ * contains a node reference, key, and LSN.
+ */
+public class ChildReference implements Loggable {
+
+    private Node target;
+    private long lsn;
+    private byte[] key;
+
+    /*
+     * The state byte holds knownDeleted state in bit 0 and dirty state in bit
+     * 1. Bit flags are used here because of the desire to keep the child
+     * reference compact. State is persistent because knownDeleted is
+     * persistent, but the dirty bit is cleared when read in from the log.
+     *
+     * -- KnownDeleted is a way of indicating that the reference is invalid
+     * without logging new data. This happens in aborts and recoveries. If
+     * knownDeleted is true, this entry is surely deleted. If knownDeleted is
+     * false, this entry may or may not be deleted. Future space optimizations:
+     * store as a separate bit array in the BIN, or subclass ChildReference and
+     * make a special reference only used by BINs and not by INs.
+     *
+     * -- Dirty is true if the LSN or key has been changed since the last time
+     * the ownign node was logged. This supports the calculation of BIN deltas.
+     */
+    private byte state;
+    private static final byte KNOWN_DELETED_BIT = 0x1;
+    private static final byte DIRTY_BIT = 0x2;
+    private static final byte CLEAR_DIRTY_BIT = ~0x2;
+    private static final byte MIGRATE_BIT = 0x4;
+    private static final byte CLEAR_MIGRATE_BIT = ~0x4;
+    private static final byte PENDING_DELETED_BIT = 0x8;
+
+    /**
+     * Construct an empty child reference, for reading from the log.
+     */
+    ChildReference() {
+        init(null, Key.EMPTY_KEY, DbLsn.NULL_LSN, 0);
+    }
+
+    /**
+     * Construct a ChildReference for inserting a new entry.
+     */
+    public ChildReference(Node target, byte[] key, long lsn) {
+        init(target, key, lsn, DIRTY_BIT);
+    }
+
+    /**
+     * Construct a ChildReference for inserting an existing entry.
+     */
+    public ChildReference(Node target,
+			  byte[] key,
+			  long lsn,
+                          byte existingState) {
+        init(target, key, lsn, existingState | DIRTY_BIT);
+    }
+
+    private void init(Node target,
+		      byte[] key,
+		      long lsn,
+                      int state) {
+        this.target = target;
+        this.key = key;
+        this.lsn = lsn;
+        this.state = (byte) state;
+    }
+
+    /**
+     * Return the key for this ChildReference.
+     */
+    public byte[] getKey() {
+        return key;
+    }
+
+    /**
+     * Set the key for this ChildReference.
+     */
+    public void setKey(byte[] key) {
+        this.key = key;
+        setDirty();
+    }
+
+    /**
+     * Fetch the target object that this ChildReference refers to.  If the
+     * object is already in VM, then just return the reference to it.  If the
+     * object is not in VM, then read the object from the log.  If the object
+     * has been faulted in and the in arg is supplied, then the total memory
+     * size cache in the IN is invalidated.
+     *
+     * @param database The database that this ChildReference resides in.
+     * @param in The IN that this ChildReference lives in.  If
+     * the target is fetched (i.e. it is null on entry), then the
+     * total in memory count is invalidated in the IN. May be null.
+     * For example, the root is a ChildReference and there is no parent IN
+     * when the rootIN is fetched in.
+     * @return the Node object representing the target node in the tree, or
+     * null if there is no target of this ChildReference, or null if a
+     * pendingDelete or knownDeleted entry has been cleaned.
+     */
+    public Node fetchTarget(DatabaseImpl database, IN in)
+        throws DatabaseException {
+
+        if (target == null) {
+            /* fault object in from log */
+            if (lsn == DbLsn.NULL_LSN) {
+                if (!isKnownDeleted()) {
+                    throw new DatabaseException(IN.makeFetchErrorMsg
+                        ("NULL_LSN without KnownDeleted", in, lsn, state));
+                }
+                /* Ignore a NULL_LSN (return null) if KnownDeleted is set. */
+            } else {
+                try {
+                    EnvironmentImpl env = database.getDbEnvironment();
+                    Node node = (Node) env.getLogManager().get(lsn);
+                    node.postFetchInit(database, lsn);
+                    target = node;
+                    if (in != null) {
+                        in.updateMemorySize(null, target);
+                    }
+                } catch (LogFileNotFoundException LNFE) {
+                    if (!isKnownDeleted() && !isPendingDeleted()) {
+                        throw new DatabaseException
+                            (IN.makeFetchErrorMsg
+                                (LNFE.toString(), in, lsn, state),
+                             LNFE);
+                    }
+                    /* Ignore. Cleaner got to it, so just return null. */
+                } catch (Exception e) {
+                    throw new DatabaseException
+                        (IN.makeFetchErrorMsg(e.toString(), in, lsn, state),
+                         e);
+                }
+            }
+        }
+
+        return target;
+    }
+
+    /*
+     * Return the state byte for this ChildReference.
+     */
+    byte getState() {
+	return state;
+    }
+
+    /**
+     * Return the target for this ChildReference.
+     */
+    public Node getTarget() {
+        return target;
+    }
+
+    /**
+     * Sets the target for this ChildReference. No need to make dirty, that
+     * state only applies to key and LSN.
+     */
+    public void setTarget(Node target) {
+        this.target = target;
+    }
+
+    /**
+     * Clear the target for this ChildReference. No need to make dirty, that
+     * state only applies to key and LSN. This method is public because it's
+     * safe and used by RecoveryManager. This can't corrupt the tree.
+     */
+    public void clearTarget() {
+        this.target = null;
+    }
+
+    /**
+     * Return the LSN for this ChildReference.
+     *
+     * @return the LSN for this ChildReference.
+     */
+    public long getLsn() {
+        return lsn;
+    }
+
+    /**
+     * Sets the target LSN for this ChildReference.
+     *
+     * @param the target LSN.
+     */
+    public void setLsn(long lsn) {
+        this.lsn = lsn;
+        setDirty();
+    }
+
+    /**
+     * Do deferredWrite optional logging check.
+     */
+    void updateLsnAfterOptionalLog(DatabaseImpl dbImpl, long lsn) {
+        if ((lsn == DbLsn.NULL_LSN) &&
+            dbImpl.isDeferredWriteMode()) {
+            /*
+             * Don't update the lsn -- we don't want to overwrite a
+             * non-null lsn.
+             */
+            setDirty();
+        } else {
+            setLsn(lsn);
+        }
+    }
+
+    private void setDirty() {
+        state |= DIRTY_BIT;
+    }
+
+    /**
+     * @return true if the entry has been deleted, although the transaction the
+     * performed the deletion may not be committed.
+     */
+    private boolean isPendingDeleted() {
+        return ((state & PENDING_DELETED_BIT) != 0);
+    }
+
+    /**
+     * @return true if entry is deleted for sure.
+     */
+    public boolean isKnownDeleted() {
+        return ((state & KNOWN_DELETED_BIT) != 0);
+    }
+
+    /**
+     * @return true if the object is dirty.
+     */
+    private boolean isDirty() {
+        return ((state & DIRTY_BIT) != 0);
+    }
+
+    /**
+     * Get the entry migrate status.
+     */
+    public boolean getMigrate() {
+        return (state & MIGRATE_BIT) != 0;
+    }
+
+    /**
+     * Set the entry migrate status.
+     */
+    public void setMigrate(boolean migrate) {
+        if (migrate) {
+            state |= MIGRATE_BIT;
+        } else {
+            state &= CLEAR_MIGRATE_BIT;
+        }
+    }
+
+    /*
+     * Support for logging.
+     */
+
+    /**
+     * @see Loggable#getLogSize
+     */
+    public int getLogSize() {
+        return
+            LogUtils.getByteArrayLogSize(key) +   // key
+	    LogUtils.getPackedLongLogSize(lsn) +  // LSN
+            1;                                    // state
+    }
+
+    /**
+     * @see Loggable#writeToLog
+     */
+    public void writeToLog(ByteBuffer logBuffer) {
+        LogUtils.writeByteArray(logBuffer, key);  // key
+	LogUtils.writePackedLong(logBuffer, lsn);
+        logBuffer.put(state);                     // state
+        state &= CLEAR_DIRTY_BIT;
+    }
+
+    /**
+     * @see Loggable#readFromLog
+     */
+    public void readFromLog(ByteBuffer itemBuffer, byte entryVersion) {
+        boolean unpacked = (entryVersion < 6);
+        key = LogUtils.readByteArray(itemBuffer, unpacked);      // key
+	lsn = LogUtils.readLong(itemBuffer, unpacked);           // LSN
+        state = itemBuffer.get();                                // state
+        state &= CLEAR_DIRTY_BIT;
+    }
+
+    /**
+     * @see Loggable#dumpLog
+     */
+    public void dumpLog(StringBuffer sb, boolean verbose) {
+        sb.append("<ref knownDeleted=\"").append(isKnownDeleted());
+        sb.append("\" pendingDeleted=\"").append(isPendingDeleted());
+        sb.append("\">");
+        sb.append(Key.dumpString(key, 0));
+	sb.append(DbLsn.toString(lsn));
+        sb.append("</ref>");
+    }
+
+    /**
+     * @see Loggable#getTransactionId
+     */
+    public long getTransactionId() {
+	return 0;
+    }
+
+    /**
+     * @see Loggable#logicalEquals
+     * Always return false, this item should never be compared.
+     */
+    public boolean logicalEquals(Loggable other) {
+        return false;
+    }
+
+    /*
+     * Dumping
+     */
+    String dumpString(int nspaces, boolean dumpTags) {
+        StringBuffer sb = new StringBuffer();
+        if (lsn == DbLsn.NULL_LSN) {
+            sb.append(TreeUtils.indent(nspaces));
+            sb.append("<lsn/>");
+        } else {
+            sb.append(DbLsn.dumpString(lsn, nspaces));
+        }
+        sb.append('\n');
+        if (key == null) {
+            sb.append(TreeUtils.indent(nspaces));
+            sb.append("<key/>");
+        } else {
+            sb.append(Key.dumpString(key, nspaces));
+        }
+        sb.append('\n');
+        if (target == null) {
+            sb.append(TreeUtils.indent(nspaces));
+            sb.append("<target/>");
+        } else {
+            sb.append(target.dumpString(nspaces, true));
+        }
+        sb.append('\n');
+        sb.append(TreeUtils.indent(nspaces));
+        sb.append("<knownDeleted val=\"");
+        sb.append(isKnownDeleted()).append("\"/>");
+        sb.append("<pendingDeleted val=\"");
+        sb.append(isPendingDeleted()).append("\"/>");
+        sb.append("<dirty val=\"").append(isDirty()).append("\"/>");
+        return sb.toString();
+    }
+
+    @Override
+    public String toString() {
+        return dumpString(0, false);
+    }
+}
diff --git a/src/com/sleepycat/je/tree/CursorsExistException.java b/src/com/sleepycat/je/tree/CursorsExistException.java
new file mode 100644
index 0000000000000000000000000000000000000000..1ebdd48966538f59ac9d84fdd7f8110751da0a1f
--- /dev/null
+++ b/src/com/sleepycat/je/tree/CursorsExistException.java
@@ -0,0 +1,27 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: CursorsExistException.java,v 1.9.2.2 2010/01/04 15:30:36 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+
+/**
+ * Error to indicate that a bottom level BIN has cursors on it during a
+ * delete subtree operation.
+ */
+public class CursorsExistException extends Exception {
+
+    /*
+     * Throw this static instance, in order to reduce the cost of
+     * fill in the stack trace.
+     */
+    public static final CursorsExistException CURSORS_EXIST =
+        new CursorsExistException();
+
+    private CursorsExistException() {
+    }
+}
diff --git a/src/com/sleepycat/je/tree/DBIN.java b/src/com/sleepycat/je/tree/DBIN.java
new file mode 100644
index 0000000000000000000000000000000000000000..577143075c144e7f9f1e30b75e533966938623f7
--- /dev/null
+++ b/src/com/sleepycat/je/tree/DBIN.java
@@ -0,0 +1,370 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DBIN.java,v 1.82.2.3 2010/03/26 13:23:55 mark Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.nio.ByteBuffer;
+import java.util.Comparator;
+
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.CursorImpl;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.DbConfigManager;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.LogException;
+import com.sleepycat.je.log.LogUtils;
+import com.sleepycat.je.log.Loggable;
+
+/**
+ * A DBIN represents an Duplicate Bottom Internal Node in the JE tree.
+ */
+public final class DBIN extends BIN implements Loggable {
+    private static final String BEGIN_TAG = "<dbin>";
+    private static final String END_TAG = "</dbin>";
+
+    /**
+     * Full key for this set of duplicates.
+     */
+    private byte[] dupKey;
+
+    public DBIN() {
+        super();
+    }
+
+    public DBIN(DatabaseImpl db,
+                byte[] identifierKey,
+                int maxEntriesPerNode,
+                byte[] dupKey,
+                int level) {
+        super(db, identifierKey, maxEntriesPerNode, level);
+        this.dupKey = dupKey;
+    }
+
+    /**
+     * Create a new DBIN.  Need this because we can't call newInstance()
+     * without getting a 0 node.
+     */
+    @Override
+    protected IN createNewInstance(byte[] identifierKey,
+                                   int maxEntries,
+                                   int level) {
+        return new DBIN(getDatabase(),
+                        identifierKey,
+                        maxEntries,
+                        dupKey,
+                        level);
+    }
+
+    /*
+     * Return whether the shared latch for this kind of node should be of the
+     * "always exclusive" variety.  Presently, only IN's are actually latched
+     * shared.  BINs, DINs, and DBINs are all latched exclusive only.
+     */
+    @Override
+    boolean isAlwaysLatchedExclusively() {
+	return true;
+    }
+
+    @Override
+    boolean isBottomMostNode() {
+        return true;
+    }
+
+    /* Duplicates have no mask on their levels. */
+    @Override
+    protected int generateLevel(DatabaseId dbId, int newLevel) {
+        return newLevel;
+    }
+
+    /**
+     * Return the comparator function to be used for DBINs.  This is
+     * the user defined duplicate comparison function, if defined.
+     */
+    @Override
+    public final Comparator<byte[]> getKeyComparator() {
+        return getDatabase().getDuplicateComparator();
+    }
+
+    /**
+     * Return the key for this duplicate set.
+     */
+    @Override
+    public byte[] getDupKey() {
+        return dupKey;
+    }
+
+    /**
+     * Get the key (dupe or identifier) in child that is used to locate
+     * it in 'this' node.
+     */
+    @Override
+    public byte[] getChildKey(IN child)
+        throws DatabaseException {
+
+        return child.getIdentifierKey();
+    }
+
+    /*
+     * A DBIN uses the dupTree key in its searches.
+     */
+    @Override
+    public byte[] selectKey(byte[] mainTreeKey, byte[] dupTreeKey) {
+        return dupTreeKey;
+    }
+
+    /**
+     * Return the key for navigating through the duplicate tree.
+     */
+    @Override
+    public byte[] getDupTreeKey() {
+        return getIdentifierKey();
+    }
+
+    /**
+     * Return the key for navigating through the main tree.
+     */
+    @Override
+    public byte[] getMainTreeKey() {
+        return dupKey;
+    }
+
+    /**
+     * @return true if this node is a duplicate-bearing node type, false
+     * if otherwise.
+     */
+    @Override
+    public boolean containsDuplicates() {
+        return true;
+    }
+
+    /**
+     * @return the log entry type to use for bin delta log entries.
+     */
+    @Override
+    LogEntryType getBINDeltaType() {
+        return LogEntryType.LOG_DUP_BIN_DELTA;
+    }
+
+    @Override
+    public BINReference createReference() {
+        return new DBINReference(getNodeId(), getDatabase().getId(),
+                                 getIdentifierKey(), dupKey);
+    }
+
+    /**
+     * Count up the memory usage attributable to this node alone.
+     */
+    @Override
+    protected long computeMemorySize() {
+        long size = super.computeMemorySize();
+        return size;
+    }
+
+    /* Called once at environment startup by MemoryBudget. */
+    public static long computeOverhead(DbConfigManager configManager)
+        throws DatabaseException {
+
+        /*
+	 * Overhead consists of all the fields in this class plus the
+	 * entry arrays in the IN class.
+         */
+        return MemoryBudget.DBIN_FIXED_OVERHEAD +
+	    IN.computeArraysOverhead(configManager);
+    }
+
+    @Override
+    protected long getMemoryOverhead(MemoryBudget mb) {
+        return mb.getDBINOverhead();
+    }
+
+    /*
+     * A DBIN cannot be the ancestor of any IN.
+     */
+    @Override
+    protected boolean canBeAncestor(boolean targetContainsDuplicates) {
+        return false;
+    }
+
+    /**
+     * Note that the IN may or may not be latched when this method is called.
+     * Returning the wrong answer is OK in that case (it will be called again
+     * later when latched), but an exception should not occur.
+     */
+    @Override
+    boolean hasPinnedChildren() {
+        return false;
+    }
+
+    /**
+     * The following four methods access the correct fields in a
+     * cursor depending on whether "this" is a BIN or DBIN.  For
+     * BIN's, the CursorImpl.index and CursorImpl.bin fields should be
+     * used.  For DBIN's, the CursorImpl.dupIndex and CursorImpl.dupBin
+     * fields should be used.
+     */
+    @Override
+    BIN getCursorBIN(CursorImpl cursor) {
+        return cursor.getDupBIN();
+    }
+
+    @Override
+    BIN getCursorBINToBeRemoved(CursorImpl cursor) {
+        return cursor.getDupBINToBeRemoved();
+    }
+
+    @Override
+    int getCursorIndex(CursorImpl cursor) {
+        return cursor.getDupIndex();
+    }
+
+    @Override
+    void setCursorBIN(CursorImpl cursor, BIN bin) {
+        cursor.setDupBIN((DBIN) bin);
+    }
+
+    @Override
+    void setCursorIndex(CursorImpl cursor, int index) {
+        cursor.setDupIndex(index);
+    }
+
+    /*
+     * Depth first search through a duplicate tree looking for an LN that
+     * has nodeId.  When we find it, set location.bin and index and return
+     * true.  If we don't find it, return false.
+     *
+     * No latching is performed.
+     */
+    @Override
+    boolean matchLNByNodeId(TreeLocation location,
+                            long nodeId,
+                            CacheMode cacheMode)
+	throws DatabaseException {
+
+	latch();
+	try {
+	    for (int i = 0; i < getNEntries(); i++) {
+		LN ln = (LN) fetchTarget(i);
+		if (ln != null) {
+		    if (ln.getNodeId() == nodeId) {
+			location.bin = this;
+			location.index = i;
+			location.lnKey = getKey(i);
+			location.childLsn = getLsn(i);
+			return true;
+		    }
+		}
+	    }
+
+	    return false;
+	} finally {
+	    releaseLatch();
+	}
+    }
+
+    /*
+     * DbStat support.
+     */
+    @Override
+    void accumulateStats(TreeWalkerStatsAccumulator acc) {
+	acc.processDBIN(this, Long.valueOf(getNodeId()), getLevel());
+    }
+
+    @Override
+    public String beginTag() {
+        return BEGIN_TAG;
+    }
+
+    @Override
+    public String endTag() {
+        return END_TAG;
+    }
+
+    /**
+     * For unit test support:
+     * @return a string that dumps information about this IN, without
+     */
+    @Override
+    public String dumpString(int nSpaces, boolean dumpTags) {
+        StringBuffer sb = new StringBuffer();
+        sb.append(TreeUtils.indent(nSpaces));
+        sb.append(beginTag());
+        sb.append('\n');
+
+        sb.append(TreeUtils.indent(nSpaces+2));
+        sb.append("<dupkey>");
+        sb.append(dupKey == null ? "" : Key.dumpString(dupKey, 0));
+        sb.append("</dupkey>");
+        sb.append('\n');
+
+        sb.append(super.dumpString(nSpaces, false));
+
+        sb.append(TreeUtils.indent(nSpaces));
+        sb.append(endTag());
+        return sb.toString();
+    }
+
+    /**
+     * @see Node#getLogType()
+     */
+    @Override
+    public LogEntryType getLogType() {
+        return LogEntryType.LOG_DBIN;
+    }
+
+    /*
+     * Logging support
+     */
+
+    /**
+     * @see Loggable#getLogSize
+     */
+    @Override
+    public int getLogSize() {
+        int size = super.getLogSize(); // ancestors
+        size += LogUtils.getByteArrayLogSize(dupKey);  // identifier key
+        return size;
+    }
+
+    /**
+     * @see Loggable#writeToLog
+     */
+    @Override
+    public void writeToLog(ByteBuffer logBuffer) {
+
+        super.writeToLog(logBuffer);
+        LogUtils.writeByteArray(logBuffer, dupKey);
+    }
+
+    /**
+     * @see BIN#readFromLog
+     */
+    @Override
+    public void readFromLog(ByteBuffer itemBuffer, byte entryVersion)
+        throws LogException {
+
+        super.readFromLog(itemBuffer, entryVersion);
+        dupKey = LogUtils.readByteArray(itemBuffer, (entryVersion < 6));
+    }
+
+    /**
+     * DBINS need to dump their dup key
+     */
+    @Override
+    protected void dumpLogAdditional(StringBuffer sb) {
+        super.dumpLogAdditional(sb);
+        sb.append(Key.dumpString(dupKey, 0));
+    }
+
+    @Override
+    public String shortClassName() {
+        return "DBIN";
+    }
+}
diff --git a/src/com/sleepycat/je/tree/DBINReference.java b/src/com/sleepycat/je/tree/DBINReference.java
new file mode 100644
index 0000000000000000000000000000000000000000..aca0553feafe3380516fad8e022c7681c46dc0d3
--- /dev/null
+++ b/src/com/sleepycat/je/tree/DBINReference.java
@@ -0,0 +1,42 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DBINReference.java,v 1.19.2.2 2010/01/04 15:30:36 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import com.sleepycat.je.dbi.DatabaseId;
+
+/**
+ * A class that embodies a reference to a DBIN that does not rely on a Java
+ * reference to the actual DBIN.
+ */
+public class DBINReference extends BINReference {
+    private byte[] dupKey;
+
+    DBINReference(long nodeId,
+                  DatabaseId databaseId,
+                  byte[] idKey,
+                  byte[] dupKey) {
+	super(nodeId, databaseId, idKey);
+	this.dupKey = dupKey;
+    }
+
+    @Override
+    public byte[] getKey() {
+	return dupKey;
+    }
+
+    @Override
+    public byte[] getData() {
+	return idKey;
+    }
+
+    @Override
+    public String toString() {
+	return super.toString() + " dupKey=" + Key.dumpString(dupKey, 0);
+    }
+}
diff --git a/src/com/sleepycat/je/tree/DIN.java b/src/com/sleepycat/je/tree/DIN.java
new file mode 100644
index 0000000000000000000000000000000000000000..f15d8f1d6ab2f6f7b9c7ac9897f2d7bba3a99f6e
--- /dev/null
+++ b/src/com/sleepycat/je/tree/DIN.java
@@ -0,0 +1,513 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DIN.java,v 1.100.2.2 2010/01/04 15:30:36 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.nio.ByteBuffer;
+import java.util.Comparator;
+
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.DbConfigManager;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.LogException;
+import com.sleepycat.je.log.LogManager;
+import com.sleepycat.je.log.LogUtils;
+import com.sleepycat.je.log.ReplicationContext;
+import com.sleepycat.je.txn.LockResult;
+import com.sleepycat.je.txn.Locker;
+
+/**
+ * An DIN represents an Duplicate Internal Node in the JE tree.
+ */
+public final class DIN extends IN {
+
+    private static final String BEGIN_TAG = "<din>";
+    private static final String END_TAG = "</din>";
+
+    /**
+     * Full key for this set of duplicates. For example, if the tree
+     * contains k1/d1, k1/d2, k1/d3, the dupKey = k1.
+     */
+    private byte[] dupKey;
+
+    /**
+     * Reference to DupCountLN which stores the count.
+     */
+    private ChildReference dupCountLNRef;
+
+    /**
+     * Create an empty DIN, with no node id, to be filled in from the log.
+     */
+    public DIN() {
+        super();
+
+        dupCountLNRef = new ChildReference();
+        init(null, Key.EMPTY_KEY, 0, 0);
+    }
+
+    /**
+     * Create a new DIN.
+     */
+    public DIN(DatabaseImpl db,
+	       byte[] identifierKey,
+	       int capacity,
+               byte[] dupKey,
+	       ChildReference dupCountLNRef,
+	       int level) {
+        super(db, identifierKey, capacity, level);
+
+        this.dupKey = dupKey;
+        this.dupCountLNRef = dupCountLNRef;
+        initMemorySize(); // init after adding Dup Count LN. */
+    }
+
+    /* Duplicates have no mask on their levels. */
+    @Override
+    protected int generateLevel(DatabaseId dbId, int newLevel) {
+        return newLevel;
+    }
+
+    /**
+     * Create a new DIN.  Need this because we can't call newInstance()
+     * without getting a 0 node.
+     */
+    @Override
+    protected IN createNewInstance(byte[] identifierKey,
+                                   int maxEntries,
+                                   int level) {
+        return new DIN(getDatabase(),
+                       identifierKey,
+                       maxEntries,
+                       dupKey,
+                       dupCountLNRef,
+                       level);
+    }
+
+    /*
+     * Return whether the shared latch for this kind of node should be of the
+     * "always exclusive" variety.  Presently, only IN's are actually latched
+     * shared.  BINs, DINs, and DBINs are all latched exclusive only.
+     */
+    @Override
+    boolean isAlwaysLatchedExclusively() {
+	return true;
+    }
+
+    /**
+     * Return the key for this duplicate set.
+     */
+    @Override
+    public byte[] getDupKey() {
+        return dupKey;
+    }
+
+    /**
+     * Get the key (dupe or identifier) in child that is used to locate
+     * it in 'this' node.
+     */
+    @Override
+    public byte[] getChildKey(IN child)
+        throws DatabaseException {
+
+        return child.getIdentifierKey();
+    }
+
+    /*
+     * A DIN uses the dupTree key in its searches.
+     */
+    @Override
+    public byte[] selectKey(byte[] mainTreeKey, byte[] dupTreeKey) {
+        return dupTreeKey;
+    }
+
+    /**
+     * Return the key for navigating through the duplicate tree.
+     */
+    @Override
+    public byte[] getDupTreeKey() {
+        return getIdentifierKey();
+    }
+
+    /**
+     * Return the key for navigating through the main tree.
+     */
+    @Override
+    public byte[] getMainTreeKey() {
+        return dupKey;
+    }
+
+    public ChildReference getDupCountLNRef() {
+        return dupCountLNRef;
+    }
+
+    public DupCountLN getDupCountLN()
+        throws DatabaseException {
+
+        return (DupCountLN) dupCountLNRef.fetchTarget(getDatabase(), this);
+    }
+
+    /*
+     * All methods that modify the dup count LN must adjust memory sizing.
+     */
+
+    /**
+     * Assign the Dup Count LN.
+     */
+    void setDupCountLN(ChildReference dupCountLNRef) {
+        updateMemorySize(this.dupCountLNRef, dupCountLNRef);
+        this.dupCountLNRef = dupCountLNRef;
+    }
+
+    /**
+     * Assign the Dup Count LN node.  Does not dirty the DIN.
+     */
+    public void updateDupCountLN(Node target) {
+        long oldSize = getEntryInMemorySize(dupCountLNRef.getKey(),
+				            dupCountLNRef.getTarget());
+        dupCountLNRef.setTarget(target);
+        long newSize = getEntryInMemorySize(dupCountLNRef.getKey(),
+				            dupCountLNRef.getTarget());
+        updateMemorySize(oldSize, newSize);
+    }
+
+    /**
+     * Update Dup Count LN.
+     */
+    public void updateDupCountLNRefAndNullTarget(long newLsn) {
+        setDirty(true);
+        long oldSize = getEntryInMemorySize(dupCountLNRef.getKey(),
+				            dupCountLNRef.getTarget());
+        dupCountLNRef.setTarget(null);
+        if (notOverwritingDeferredWriteEntry(newLsn)) {
+            dupCountLNRef.setLsn(newLsn);
+        }
+        long newSize = getEntryInMemorySize(dupCountLNRef.getKey(),
+				            dupCountLNRef.getTarget());
+        updateMemorySize(oldSize, newSize);
+    }
+
+    /**
+     * Update dup count LSN.
+     */
+    public void updateDupCountLNRef(long newLsn) {
+        setDirty(true);
+        if (notOverwritingDeferredWriteEntry(newLsn)) {
+            dupCountLNRef.setLsn(newLsn);
+        }
+    }
+
+    /**
+     * @return true if this node is a duplicate-bearing node type, false
+     * if otherwise.
+     */
+    @Override
+    public boolean containsDuplicates() {
+        return true;
+    }
+
+    /* Never true for a DIN. */
+    @Override
+    public boolean isDbRoot() {
+	return false;
+    }
+
+    /**
+     * Return the comparator function to be used for DINs.  This is
+     * the user defined duplicate comparison function, if defined.
+     */
+    @Override
+    public final Comparator<byte[]> getKeyComparator() {
+        return getDatabase().getDuplicateComparator();
+    }
+
+    /**
+     * Increment or decrement the DupCountLN, log the updated LN, and update
+     * the lock result.
+     *
+     * Preconditions: This DIN is latched and the DupCountLN is write locked.
+     * Postconditions: Same as preconditions.
+     */
+    public void incrementDuplicateCount(LockResult lockResult,
+                                        byte[] key,
+                                        Locker locker,
+                                        boolean increment)
+        throws DatabaseException {
+
+        /* Increment/decrement the dup count and update its owning DIN. */
+        long oldLsn = dupCountLNRef.getLsn();
+        lockResult.setAbortLsn(oldLsn, dupCountLNRef.isKnownDeleted());
+        DupCountLN dupCountLN = getDupCountLN();
+        if (increment) {
+            dupCountLN.incDupCount();
+        } else {
+            dupCountLN.decDupCount();
+	    assert dupCountLN.getDupCount() >= 0;
+        }
+        DatabaseImpl db = getDatabase();
+        long newCountLSN = dupCountLN.optionalLog
+            (db.getDbEnvironment(), db, key,
+             oldLsn, locker, ReplicationContext.NO_REPLICATE);
+        updateDupCountLNRef(newCountLSN);
+    }
+
+    /**
+     * Count up the memory usage attributable to this node alone. LNs children
+     * are counted by their BIN/DIN parents, but INs are not counted by
+     * their parents because they are resident on the IN list.
+     */
+    @Override
+    protected long computeMemorySize() {
+        long size = super.computeMemorySize();
+        if (dupCountLNRef != null) {
+            size += getEntryInMemorySize(dupCountLNRef.getKey(),
+				         dupCountLNRef.getTarget());
+        }
+        return size;
+    }
+
+    /* Called once at environment startup by MemoryBudget. */
+    public static long computeOverhead(DbConfigManager configManager)
+        throws DatabaseException {
+
+        /*
+	 * Overhead consists of all the fields in this class plus the
+	 * entry arrays in the IN class.
+         */
+        return MemoryBudget.DIN_FIXED_OVERHEAD +
+	    IN.computeArraysOverhead(configManager);
+    }
+
+    @Override
+    protected long getMemoryOverhead(MemoryBudget mb) {
+        return mb.getDINOverhead();
+    }
+
+    /*
+     * Depth first search through a duplicate tree looking for an LN that
+     * has nodeId.  When we find it, set location.bin and index and return
+     * true.  If we don't find it, return false.
+     *
+     * No latching is performed.
+     */
+    @Override
+    boolean matchLNByNodeId(TreeLocation location,
+                            long nodeId,
+                            CacheMode cacheMode)
+	throws DatabaseException {
+
+	latch();
+	try {
+	    for (int i = 0; i < getNEntries(); i++) {
+		Node n = fetchTarget(i);
+		if (n != null) {
+		    boolean ret =
+                        n.matchLNByNodeId(location, nodeId, cacheMode);
+		    if (ret) {
+			return true;
+		    }
+		}
+	    }
+
+	    return false;
+	} finally {
+	    releaseLatch();
+	}
+    }
+
+    /*
+     * DbStat support.
+     */
+    @Override
+    void accumulateStats(TreeWalkerStatsAccumulator acc) {
+	acc.processDIN(this, Long.valueOf(getNodeId()), getLevel());
+    }
+
+    /*
+     * Logging Support
+     */
+
+    /**
+     * @see Node#getLogType
+     */
+    @Override
+    public LogEntryType getLogType() {
+        return LogEntryType.LOG_DIN;
+    }
+
+    /**
+     * Handles lazy migration of DupCountLNs prior to logging a DIN.
+     */
+    @Override
+    public void beforeLog(LogManager logManager,
+                          INLogItem item,
+                          INLogContext context)
+        throws DatabaseException {
+
+        if (dupCountLNRef != null) {
+            EnvironmentImpl envImpl = getDatabase().getDbEnvironment();
+            DupCountLN dupCntLN = (DupCountLN) dupCountLNRef.getTarget();
+
+            if ((dupCntLN != null) && (dupCntLN.isDirty())) {
+
+                /* If deferred write, write any dirty LNs now. */
+                long newLsn = dupCntLN.log(envImpl,
+                                           getDatabase(),
+                                           dupKey,
+                                           dupCountLNRef.getLsn(),
+                                           null,          // locker
+                                           context.backgroundIO,
+                                           // dupCountLNS  are never replicated
+                                           ReplicationContext.NO_REPLICATE);
+
+                updateDupCountLNRef(newLsn);
+            } else {
+
+                /*
+                 * Allow the cleaner to migrate the DupCountLN before logging.
+                 */
+                envImpl.getCleaner().lazyMigrateDupCountLN
+                    (this, dupCountLNRef, context.proactiveMigration);
+            }
+        }
+
+        super.beforeLog(logManager, item, context);
+    }
+
+    /**
+     * @see IN#getLogSize
+     */
+    @Override
+    public int getLogSize() {
+        int size = super.getLogSize();               // ancestors
+        size += LogUtils.getByteArrayLogSize(dupKey);// identifier key
+        size += 1;                                   // dupCountLNRef null flag
+        if (dupCountLNRef != null) {
+            size += dupCountLNRef.getLogSize();
+        }
+        return size;
+    }
+
+    /**
+     * @see IN#writeToLog
+     */
+    @Override
+    public void writeToLog(ByteBuffer logBuffer) {
+
+        // ancestors
+        super.writeToLog(logBuffer);
+
+        // identifier key
+        LogUtils.writeByteArray(logBuffer, dupKey);
+
+        /* DupCountLN */
+        boolean dupCountLNRefExists = (dupCountLNRef != null);
+        byte booleans = (byte) (dupCountLNRefExists ? 1 : 0);
+        logBuffer.put(booleans);
+        if (dupCountLNRefExists) {
+            dupCountLNRef.writeToLog(logBuffer);
+        }
+    }
+
+    /**
+     * @see IN#readFromLog
+     */
+    @Override
+    public void readFromLog(ByteBuffer itemBuffer, byte entryVersion)
+        throws LogException {
+
+        boolean unpacked = (entryVersion < 6);
+        super.readFromLog(itemBuffer, entryVersion);
+        dupKey = LogUtils.readByteArray(itemBuffer, unpacked);
+
+        /* DupCountLN */
+        boolean dupCountLNRefExists = false;
+        byte booleans = itemBuffer.get();
+        dupCountLNRefExists = (booleans & 1) != 0;
+        if (dupCountLNRefExists) {
+            dupCountLNRef.readFromLog(itemBuffer, entryVersion);
+        } else {
+            dupCountLNRef = null;
+        }
+    }
+
+    /**
+     * DINS need to dump their dup key
+     */
+    @Override
+    protected void dumpLogAdditional(StringBuffer sb) {
+        super.dumpLogAdditional(sb);
+        sb.append(Key.dumpString(dupKey, 0));
+        if (dupCountLNRef != null) {
+            dupCountLNRef.dumpLog(sb, true);
+        }
+    }
+
+    /*
+     * Dumping
+     */
+
+    @Override
+    public String beginTag() {
+        return BEGIN_TAG;
+    }
+
+    @Override
+    public String endTag() {
+        return END_TAG;
+    }
+
+    /**
+     * For unit test support:
+     * @return a string that dumps information about this DIN, without
+     */
+    @Override
+    public String dumpString(int nSpaces, boolean dumpTags) {
+        StringBuffer sb = new StringBuffer();
+        if (dumpTags) {
+            sb.append(TreeUtils.indent(nSpaces));
+            sb.append(beginTag());
+            sb.append('\n');
+        }
+
+        sb.append(TreeUtils.indent(nSpaces+2));
+        sb.append("<dupkey>");
+        sb.append(dupKey == null ? "" :
+                  Key.dumpString(dupKey, 0));
+        sb.append("</dupkey>");
+        sb.append('\n');
+        if (dupCountLNRef == null) {
+	    sb.append(TreeUtils.indent(nSpaces+2));
+            sb.append("<dupCountLN/>");
+        } else {
+            sb.append(dupCountLNRef.dumpString(nSpaces + 4, true));
+        }
+        sb.append('\n');
+        sb.append(super.dumpString(nSpaces, false));
+
+        if (dumpTags) {
+            sb.append(TreeUtils.indent(nSpaces));
+            sb.append(endTag());
+        }
+        return sb.toString();
+    }
+
+    @Override
+    public String toString() {
+        return dumpString(0, true);
+    }
+
+    @Override
+    public String shortClassName() {
+        return "DIN";
+    }
+}
diff --git a/src/com/sleepycat/je/tree/DeltaInfo.java b/src/com/sleepycat/je/tree/DeltaInfo.java
new file mode 100644
index 0000000000000000000000000000000000000000..8c4982848ad5e398d13b904c50aa28f0fad7d63f
--- /dev/null
+++ b/src/com/sleepycat/je/tree/DeltaInfo.java
@@ -0,0 +1,122 @@
+/*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DeltaInfo.java,v 1.27.2.2 2010/01/04 15:30:36 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.nio.ByteBuffer;
+
+import com.sleepycat.je.log.LogException;
+import com.sleepycat.je.log.LogUtils;
+import com.sleepycat.je.log.Loggable;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * DeltaInfo holds the delta for one BIN entry in a partial BIN log entry.
+ * The data here is all that we need to update a BIN to its proper state.
+ */
+public class DeltaInfo implements Loggable {
+    private byte[] key;
+    private long lsn;
+    private byte state;
+		
+    DeltaInfo(byte[] key, long lsn, byte state) {
+        this.key = key;
+        this.lsn = lsn;
+        this.state = state;
+    }
+
+    /**
+     * For reading from the log only.
+     */
+    DeltaInfo() {
+        lsn = DbLsn.NULL_LSN;
+    }
+
+    /*
+     * @see Loggable#getLogSize()
+     */
+    public int getLogSize() {
+        return
+            LogUtils.getByteArrayLogSize(key) +
+	    LogUtils.getPackedLongLogSize(lsn) + // LSN
+            1; // state
+    }
+
+    /*
+     * @see Loggable#writeToLog(java.nio.ByteBuffer)
+     */
+    public void writeToLog(ByteBuffer logBuffer) {
+        LogUtils.writeByteArray(logBuffer, key);
+	LogUtils.writePackedLong(logBuffer, lsn);
+        logBuffer.put(state);
+    }
+
+    /*
+     * @seeLoggable#readFromLog
+     */
+    public void readFromLog(ByteBuffer itemBuffer, byte entryVersion)
+	throws LogException {
+
+        boolean unpacked = (entryVersion < 6);
+        key = LogUtils.readByteArray(itemBuffer, unpacked);
+	lsn = LogUtils.readLong(itemBuffer, unpacked);
+        state = itemBuffer.get();
+    }
+
+    /*
+     * @see Loggable#dumpLog(java.lang.StringBuffer)
+     */
+    public void dumpLog(StringBuffer sb, boolean verbose) {
+        sb.append(Key.dumpString(key, 0));
+	sb.append(DbLsn.toString(lsn));
+        IN.dumpDeletedState(sb, state);
+    }
+
+    /**
+     * @see Loggable#getTransactionId
+     */
+    public long getTransactionId() {
+	return 0;
+    }
+
+    /**
+     * @see Loggable#logicalEquals
+     * Always return false, this item should never be compared.
+     */
+    public boolean logicalEquals(Loggable other) {
+        return false;
+    }
+
+    /**
+     * @return the Key.
+     */
+    byte[] getKey() {
+        return key;
+    }
+
+    /**
+     * @return the state flags.
+     */
+    byte getState() {
+        return state;
+    }
+
+    /**
+     * @return true if this is known to be deleted.
+     */
+    boolean isKnownDeleted() {
+        return IN.isStateKnownDeleted(state);
+    }
+
+    /**
+     * @return the LSN.
+     */
+    long getLsn() {
+        return lsn;
+    }
+}
diff --git a/src/com/sleepycat/je/tree/DupCountLN.java b/src/com/sleepycat/je/tree/DupCountLN.java
new file mode 100644
index 0000000000000000000000000000000000000000..009fd535293159997f53ecf08eacb25a5bd8b726
--- /dev/null
+++ b/src/com/sleepycat/je/tree/DupCountLN.java
@@ -0,0 +1,216 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DupCountLN.java,v 1.40.2.2 2010/01/04 15:30:36 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.nio.ByteBuffer;
+
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.LogException;
+import com.sleepycat.je.log.LogUtils;
+import com.sleepycat.je.log.Loggable;
+
+/**
+ * A DupCountLN represents the transactional part of the root of a
+ * duplicate tree, specifically the count of dupes in the tree.
+ */
+public final class DupCountLN extends LN {
+
+    private static final String BEGIN_TAG = "<dupCountLN>";
+    private static final String END_TAG = "</dupCountLN>";
+
+    private int dupCount;
+
+    /**
+     * Create a new DupCountLn to hold a new DIN.
+     */
+    public DupCountLN(EnvironmentImpl envImpl, int count) {
+        /*
+         * Never replicate DupCountLNs, they are generated on the client
+         * side.
+         */
+        super(new byte[0], envImpl, false /* replicate */);
+
+        /*
+         * This ctor is always called from Tree.createDuplicateEntry
+         * where there will be one existing LN and a new dup LN being
+         * inserted to create the new duplicate tree.  So the minimum
+         * starting point for a duplicate tree is 2 entries.
+         */
+        this.dupCount = count;
+    }
+
+    /**
+     * Create an empty DupCountLN, to be filled in from the log.
+     */
+    public DupCountLN() {
+        super();
+        dupCount = 0;
+    }
+
+    public int getDupCount() {
+        return dupCount;
+    }
+
+    public int incDupCount() {
+        dupCount++;
+        setDirty();
+        assert dupCount >= 0;
+        return dupCount;
+    }
+
+    public int decDupCount() {
+        dupCount--;
+        setDirty();
+        assert dupCount >= 0;
+        return dupCount;
+    }
+
+    void setDupCount(int dupCount) {
+        this.dupCount = dupCount;
+        setDirty();
+    }
+
+    /**
+     * @return true if this node is a duplicate-bearing node type, false
+     * if otherwise.
+     */
+    @Override
+    public boolean containsDuplicates() {
+        return true;
+    }
+
+    @Override
+    public boolean isDeleted() {
+        return false;
+    }
+
+    /**
+     * Compute the approximate size of this node in memory for evictor
+     * invocation purposes.
+     */
+    @Override
+    public long getMemorySizeIncludedByParent() {
+        return MemoryBudget.DUPCOUNTLN_OVERHEAD;
+    }
+
+    /*
+     * DbStat support.
+     */
+    public void accumulateStats(TreeWalkerStatsAccumulator acc) {
+	acc.processDupCountLN(this, Long.valueOf(getNodeId()));
+    }
+
+    /*
+     * Dumping
+     */
+
+    @Override
+    public String toString() {
+        return dumpString(0, true);
+    }
+
+    @Override
+    public String beginTag() {
+        return BEGIN_TAG;
+    }
+
+    @Override
+    public String endTag() {
+        return END_TAG;
+    }
+
+    @Override
+    public String dumpString(int nSpaces, boolean dumpTags) {
+        StringBuffer sb = new StringBuffer();
+        if (dumpTags) {
+            sb.append(TreeUtils.indent(nSpaces));
+            sb.append(beginTag());
+            sb.append('\n');
+        }
+        sb.append(TreeUtils.indent(nSpaces+2));
+        sb.append("<count v=\"").append(dupCount).append("\"/>").append('\n');
+        sb.append(super.dumpString(nSpaces, false));
+        if (dumpTags) {
+            sb.append(TreeUtils.indent(nSpaces));
+            sb.append(endTag());
+        }
+        return sb.toString();
+    }
+
+    /*
+     * Logging
+     */
+
+    /**
+     * Log type for transactional entries.
+     */
+    @Override
+    protected LogEntryType getTransactionalLogType() {
+        return LogEntryType.LOG_DUPCOUNTLN_TRANSACTIONAL;
+    }
+
+    /**
+     * @see Node#getLogType
+     */
+    @Override
+    public LogEntryType getLogType() {
+        return LogEntryType.LOG_DUPCOUNTLN;
+    }
+
+    /**
+     * @see LN#getLogSize
+     */
+    @Override
+    public int getLogSize() {
+        return super.getLogSize() +
+            LogUtils.getPackedIntLogSize(dupCount);
+    }
+
+    /**
+     * @see LN#writeToLog
+     */
+    @Override
+    public void writeToLog(ByteBuffer logBuffer) {
+        // Ask ancestors to write to log
+        super.writeToLog(logBuffer);
+        LogUtils.writePackedInt(logBuffer, dupCount);
+    }
+
+    /**
+     * @see LN#readFromLog
+     */
+    @Override
+    public void readFromLog(ByteBuffer itemBuffer, byte entryVersion)
+        throws LogException {
+
+        super.readFromLog(itemBuffer, entryVersion);
+        dupCount = LogUtils.readInt(itemBuffer, (entryVersion < 6));
+    }
+
+    /**
+     * @see Loggable#logicalEquals
+     * DupCountLNs are never replicated.
+     */
+    @Override
+    public boolean logicalEquals(Loggable other) {
+
+        return false;
+    }
+
+    /**
+     * Dump additional fields
+     */
+    @Override
+    protected void dumpLogAdditional(StringBuffer sb, boolean verbose) {
+        super.dumpLogAdditional(sb, verbose);
+        sb.append("<count v=\"").append(dupCount).append("\"/>");
+    }
+}
diff --git a/src/com/sleepycat/je/tree/DuplicateEntryException.java b/src/com/sleepycat/je/tree/DuplicateEntryException.java
new file mode 100644
index 0000000000000000000000000000000000000000..60954aa640b33160ee10fa70b3867e057d117bbd
--- /dev/null
+++ b/src/com/sleepycat/je/tree/DuplicateEntryException.java
@@ -0,0 +1,24 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DuplicateEntryException.java,v 1.15.2.2 2010/01/04 15:30:36 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import com.sleepycat.je.DatabaseException;
+
+/**
+ * Exception to indicate that an entry is already present in a node.
+ */
+public class DuplicateEntryException extends DatabaseException {
+    public DuplicateEntryException() {
+	super();
+    }
+
+    public DuplicateEntryException(String message) {
+	super(message);
+    }
+}
diff --git a/src/com/sleepycat/je/tree/FileSummaryLN.java b/src/com/sleepycat/je/tree/FileSummaryLN.java
new file mode 100644
index 0000000000000000000000000000000000000000..3a0c08af863986031f95636605f08e3879098676
--- /dev/null
+++ b/src/com/sleepycat/je/tree/FileSummaryLN.java
@@ -0,0 +1,460 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: FileSummaryLN.java,v 1.34.2.3 2010/01/04 15:30:36 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.io.UnsupportedEncodingException;
+import java.nio.ByteBuffer;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.cleaner.FileSummary;
+import com.sleepycat.je.cleaner.PackedOffsets;
+import com.sleepycat.je.cleaner.TrackedFileSummary;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.LogException;
+import com.sleepycat.je.log.LogUtils;
+import com.sleepycat.je.log.Loggable;
+
+/**
+ * A FileSummaryLN represents a Leaf Node in the UtilizationProfile database.
+ *
+ * <p>The contents of the FileSummaryLN are not fixed until the moment at which
+ * the LN is added to the log.  A base summary object contains the summary last
+ * added to the log.  A tracked summary object contains live summary info being
+ * updated in real time.  The tracked summary is added to the base summary just
+ * before logging it, and then the tracked summary is reset.  This ensures that
+ * the logged summary will accurately reflect the totals calculated at the
+ * point in the log where the LN is added.</p>
+ *
+ * <p>This is all done in the writeToLog method, which operates under the log
+ * write latch.  All utilization tracking must be done under the log write
+ * latch.</p>
+ *
+ * <p>In record version 1, obsolete offset tracking was added and multiple
+ * records are stored for a single file rather than a single record.  Each
+ * record contains the offsets that were tracked since the last record was
+ * written.
+ *
+ * <p>The key is 8 bytes: 4 bytes for the file number followed by 4 bytes for
+ * the sequence number.  The lowest valued key for a given file contains the
+ * most recent summary information, while to get a complete list of obsolete
+ * offsets all records for the file must be read.  A range search using just
+ * the first 4 bytes can be used to find the most recent record -- this is
+ * possible because the sequence number values are decreasing over time for a
+ * given file.  Here are example keys for three summary records in file 1:</p>
+ *
+ * <pre>
+ * (file=1, sequence=Integer.MAX_VALUE - 300)
+ * (file=1, sequence=Integer.MAX_VALUE - 200)
+ * (file=1, sequence=Integer.MAX_VALUE - 100)
+ * </pre>
+ *
+ * <p>The sequence number is the number of obsolete entries counted so far,
+ * subtracted from Integer.MAX_VALUE to cause the latest written record to have
+ * the lowest key.</p>
+ *
+ * <h3>Log version information</h3>
+ * <p>Version 0: Keys are old format strings. No obsolete detail is
+ * present.</p>
+ * <p>Version 1: Keys are two 4 byte integers: {file, sequence}.  Obsolete
+ * detail is present.  Some offsets may be invalid if RMW was used.</p>
+ * <p>Version 2: The RMW problem with invalid offsets was corrected.  There is
+ * no data format change; all versions of JE 2.0.x can read version 1.</p>
+ *
+ * @see com.sleepycat.je.cleaner.UtilizationProfile
+ */
+public final class FileSummaryLN extends LN {
+
+    private static final String BEGIN_TAG = "<fileSummaryLN>";
+    private static final String END_TAG = "</fileSummaryLN>";
+
+    private int extraMarshaledMemorySize;
+    private FileSummary baseSummary;
+    private TrackedFileSummary trackedSummary;
+    private PackedOffsets obsoleteOffsets;
+    private boolean needOffsets;
+    private byte entryVersion;
+
+    /**
+     * Creates a new LN with a given base summary.
+     */
+    public FileSummaryLN(EnvironmentImpl envImpl,
+                         FileSummary baseSummary) {
+        super(new byte[0],
+              envImpl,   // envImpl
+              false); // replicate
+        assert baseSummary != null;
+        this.baseSummary = baseSummary;
+        obsoleteOffsets = new PackedOffsets();
+        entryVersion = -1;
+    }
+
+    /**
+     * Creates an empty LN to be filled in from the log.
+     */
+    public FileSummaryLN()
+        throws DatabaseException {
+
+        baseSummary = new FileSummary();
+        obsoleteOffsets = new PackedOffsets();
+    }
+
+    /**
+     * Sets the live summary object that will be added to the base summary at
+     * the time the LN is logged.
+     */
+    public void setTrackedSummary(TrackedFileSummary trackedSummary) {
+        this.trackedSummary = trackedSummary;
+        needOffsets = true;
+    }
+
+    /**
+     * Returns the tracked summary, or null if setTrackedSummary was not
+     * called.
+     */
+    public TrackedFileSummary getTrackedSummary() {
+        return trackedSummary;
+    }
+
+    /**
+     * Returns the base summary for the file that is stored in the LN.
+     */
+    public FileSummary getBaseSummary() {
+        return baseSummary;
+    }
+
+    /**
+     * Returns the obsolete offsets for the file.
+     */
+    public PackedOffsets getObsoleteOffsets() {
+        return obsoleteOffsets;
+    }
+
+    /**
+     * Returns true if the given key for this LN is a String file number key.
+     * For the old version of the LN there will be a single record per file.
+     *
+     * If this is a version 0 log entry, the key is a string.  However, such an
+     * LN may be migrated by the cleaner, in which case the version will be 1
+     * or greater [#13061].  In the latter case, we can distinguish a string
+     * key by:
+     *
+     * 1) If the key is not 8 bytes long, it has to be a string key.
+     *
+     * 2) If the key is 8 bytes long, but bytes[4] is ascii "0" to "9", then it
+     * must be a string key.  bytes[4] to bytes[7] are a sequence number that
+     * is the number of log entries counted.  For this number to be greater
+     * than 0x30000000, the binary value of 4 digits starting with ascii "0",
+     * over 400 million log entries would have to occur in a single file; this
+     * should never happen.
+     *
+     * Note that having to rely on method (2) is unlikely.  A string key will
+     * only be 8 bytes if the file number reach 8 decimal digits (10,000,000 to
+     * 99,999,999).  This is a very large file number and unlikely to have
+     * occurred using JE 1.7.1 or earlier.
+     *
+     * In summary, the only time the algorithm here could fail is if there were
+     * more than 400 million log entries per file, and more than 10 million
+     * were written with JE 1.7.1 or earlier.
+     */
+    public boolean hasStringKey(byte[] bytes) {
+
+        if (entryVersion == 0 || bytes.length != 8) {
+            return true;
+        } else {
+           return (bytes[4] >= '0' && bytes[4] <= '9');
+        }
+    }
+
+    /**
+     * Convert a FileSummaryLN key from a byte array to a long.  The file
+     * number is the first 4 bytes of the key.
+     */
+    public long getFileNumber(byte[] bytes) {
+
+        if (hasStringKey(bytes)) {
+            try {
+                return Long.valueOf(new String(bytes, "UTF-8")).longValue();
+            } catch (UnsupportedEncodingException shouldNeverHappen) {
+                assert false: shouldNeverHappen;
+                return 0;
+            }
+        } else {
+            ByteBuffer buf = ByteBuffer.wrap(bytes);
+            return LogUtils.readIntMSB(buf) & 0xFFFFFFFFL;
+        }
+    }
+
+    /**
+     * Returns the first 4 bytes of the key for the given file number.  This
+     * can be used to do a range search to find the first LN for the file.
+     */
+    public static byte[] makePartialKey(long fileNum) {
+
+        byte[] bytes = new byte[4];
+        ByteBuffer buf = ByteBuffer.wrap(bytes);
+
+        LogUtils.writeIntMSB(buf, (int) fileNum);
+
+        return bytes;
+    }
+
+    /**
+     * Returns the full two-part key for a given file number and unique
+     * sequence.  This can be used to insert a new LN.
+     *
+     * @param sequence is a unique identifier for the LN for the given file,
+     * and must be greater than the last sequence.
+     */
+    public static byte[] makeFullKey(long fileNum, int sequence) {
+
+        assert sequence >= 0;
+
+        byte[] bytes = new byte[8];
+        ByteBuffer buf = ByteBuffer.wrap(bytes);
+
+        /*
+         * The sequence is subtracted from MAX_VALUE so that increasing values
+         * will be sorted first.  This allows a simple range search to find the
+         * most recent value.
+         */
+        LogUtils.writeIntMSB(buf, (int) fileNum);
+        LogUtils.writeIntMSB(buf, Integer.MAX_VALUE - sequence);
+
+        return bytes;
+    }
+
+    /**
+     * Initialize a node that has been faulted in from the log.  If this FSLN
+     * contains version 1 offsets that can be incorrect when RMW was used, and
+     * if je.cleaner.rmwFix is enabled, discard the offsets.  [#13158]
+     */
+    @Override
+    public void postFetchInit(DatabaseImpl db, long sourceLsn)
+        throws DatabaseException {
+
+        super.postFetchInit(db, sourceLsn);
+
+        if (entryVersion == 1 &&
+            db.getDbEnvironment().getUtilizationProfile().isRMWFixEnabled()) {
+            obsoleteOffsets = new PackedOffsets();
+        }
+    }
+
+    /*
+     * Dumping
+     */
+
+    @Override
+    public String toString() {
+        return dumpString(0, true);
+    }
+
+    @Override
+    public String beginTag() {
+        return BEGIN_TAG;
+    }
+
+    @Override
+    public String endTag() {
+        return END_TAG;
+    }
+
+    @Override
+    public String dumpString(int nSpaces, boolean dumpTags) {
+        StringBuffer sb = new StringBuffer();
+        sb.append(super.dumpString(nSpaces, dumpTags));
+        sb.append('\n');
+        if (!isDeleted()) {
+            sb.append(baseSummary.toString());
+            sb.append(obsoleteOffsets.toString());
+        }
+        return sb.toString();
+    }
+
+    /**
+     * Dump additional fields. Done this way so the additional info can
+     * be within the XML tags defining the dumped log entry.
+     */
+    @Override
+    protected void dumpLogAdditional(StringBuffer sb, boolean verbose) {
+        if (!isDeleted()) {
+            baseSummary.dumpLog(sb, true);
+            if (verbose) {
+               obsoleteOffsets.dumpLog(sb, true);
+            }
+        }
+    }
+
+    /*
+     * Logging
+     */
+
+    /**
+     * Log type for transactional entries.
+     */
+    @Override
+    protected LogEntryType getTransactionalLogType() {
+        assert false : "Txnl access to UP db not allowed";
+        return LogEntryType.LOG_FILESUMMARYLN;
+    }
+
+    /**
+     * @see Node#getLogType
+     */
+    @Override
+    public LogEntryType getLogType() {
+        return LogEntryType.LOG_FILESUMMARYLN;
+    }
+
+    /**
+     * This log entry type is configured to perform marshaling (getLogSize and
+     * writeToLog) under the write log mutex.  Otherwise, the size could change
+     * in between calls to these two methods as the result of utilizaton
+     * tracking.
+     *
+     * @see LN#getLogSize
+     */
+    @Override
+    public int getLogSize() {
+        int size = super.getLogSize();
+        if (!isDeleted()) {
+            size += baseSummary.getLogSize();
+            getOffsets();
+            size += obsoleteOffsets.getLogSize();
+        }
+        return size;
+    }
+
+    /**
+     * @see LN#writeToLog
+     */
+    @Override
+    public void writeToLog(ByteBuffer logBuffer) {
+
+        /*
+         * Add the tracked (live) summary to the base summary before writing it
+         * to the log, and reset the tracked summary.  Do this even when
+         * deleting the LN, so that the tracked summary is cleared.
+         */
+        if (trackedSummary != null) {
+
+            baseSummary.add(trackedSummary);
+
+            if (!isDeleted()) {
+                getOffsets();
+            }
+
+            /* Reset the totals to zero and clear the tracked offsets. */
+            trackedSummary.reset();
+        }
+
+        super.writeToLog(logBuffer);
+
+        if (!isDeleted()) {
+            baseSummary.writeToLog(logBuffer);
+            obsoleteOffsets.writeToLog(logBuffer);
+        }
+    }
+
+    /**
+     * @see LN#readFromLog
+     */
+    @Override
+    public void readFromLog(ByteBuffer itemBuffer, byte entryVersion)
+        throws LogException {
+
+        this.entryVersion = entryVersion;
+
+        super.readFromLog(itemBuffer, entryVersion);
+
+        if (!isDeleted()) {
+            baseSummary.readFromLog(itemBuffer, entryVersion);
+            if (entryVersion > 0) {
+                obsoleteOffsets.readFromLog(itemBuffer, entryVersion);
+            }
+        }
+    }
+
+    /**
+     * @see Loggable#logicalEquals
+     * Should never be replicated.
+     */
+    @Override
+    public boolean logicalEquals(Loggable other) {
+
+        return false;
+    }
+
+    /**
+     * If tracked offsets may be present, get them so they are ready to be
+     * written to the log.
+     */
+    private void getOffsets() {
+        assert !isDeleted();
+        if (needOffsets) {
+            long[] offsets = trackedSummary.getObsoleteOffsets();
+            if (offsets != null) {
+                int oldSize = obsoleteOffsets.getExtraMemorySize();
+                obsoleteOffsets.pack(offsets);
+                int newSize = obsoleteOffsets.getExtraMemorySize();
+                extraMarshaledMemorySize = newSize - oldSize;
+            }
+            needOffsets = false;
+        }
+    }
+
+    /**
+     * Overrides this method to add space occupied by this object's fields.
+     */
+    @Override
+    public long getMemorySizeIncludedByParent() {
+        return super.getMemorySizeIncludedByParent() +
+               (MemoryBudget.FILESUMMARYLN_OVERHEAD -
+                MemoryBudget.LN_OVERHEAD) +
+               obsoleteOffsets.getExtraMemorySize();
+    }
+
+    /**
+     * Clear out the obsoleteOffsets to save memory when the LN is deleted.
+     */
+    @Override
+    void makeDeleted() {
+        super.makeDeleted();
+        obsoleteOffsets = new PackedOffsets();
+    }
+
+    /**
+     * Adds the extra memory used by obsoleteOffsets to the parent BIN memory
+     * size. Must be called after LN is inserted into the BIN and logged,
+     * while the cursor is still positioned on the inserted LN.  The BIN must
+     * be latched.  [#17462]
+     *
+     * <p>The obsoleteOffsets memory size is not intially budgeted in the usual
+     * way because PackedOffsets.pack (which changes the memory size) is called
+     * during marshalling (see getOffset).  This amount is not counted in the
+     * parent IN size in the usual way, because LN logging / marshalling occurs
+     * after the LN is inserted in the BIN and its memory size has been counted
+     * (see Tree.insert).</p>
+     * 
+     * <p>Note that the tree memory usage cannot be updated directly in
+     * getOffsets because the tree memory usage must always be the sum of all
+     * IN sizes, and it is reset to this sum each checkpoint.</p>
+     */
+    public void addExtraMarshaledMemorySize(BIN parentBIN) {
+        if (extraMarshaledMemorySize != 0) {
+            assert trackedSummary != null; /* Must be set during the insert. */
+            assert parentBIN.isLatchOwnerForWrite();
+            parentBIN.updateMemorySize(0, extraMarshaledMemorySize);
+            extraMarshaledMemorySize = 0;
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/tree/Generation.java b/src/com/sleepycat/je/tree/Generation.java
new file mode 100644
index 0000000000000000000000000000000000000000..32d27e2a0445ef3c78e7a85f5e7324f22d39b1d6
--- /dev/null
+++ b/src/com/sleepycat/je/tree/Generation.java
@@ -0,0 +1,17 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Generation.java,v 1.14.2.2 2010/01/04 15:30:36 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+public final class Generation {
+    static private long nextGeneration = 0;
+
+    static long getNextGeneration() {
+	return nextGeneration++;
+    }
+}
diff --git a/src/com/sleepycat/je/tree/IN.java b/src/com/sleepycat/je/tree/IN.java
new file mode 100644
index 0000000000000000000000000000000000000000..c0229a7cb08219d477ce70878e35f20f2fcf793b
--- /dev/null
+++ b/src/com/sleepycat/je/tree/IN.java
@@ -0,0 +1,3749 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: IN.java,v 1.346.2.7 2010/03/26 13:23:55 mark Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.List;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.cleaner.LocalUtilizationTracker;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.DbConfigManager;
+import com.sleepycat.je.dbi.DbTree;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.INList;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.latch.LatchNotHeldException;
+import com.sleepycat.je.latch.SharedLatch;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.LogException;
+import com.sleepycat.je.log.LogFileNotFoundException;
+import com.sleepycat.je.log.LogManager;
+import com.sleepycat.je.log.LogUtils;
+import com.sleepycat.je.log.Loggable;
+import com.sleepycat.je.log.Provisional;
+import com.sleepycat.je.log.ReplicationContext;
+import com.sleepycat.je.log.entry.LogEntry;
+import com.sleepycat.je.log.entry.INLogEntry;
+import com.sleepycat.je.log.entry.LNLogEntry;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.Tracer;
+
+/**
+ * An IN represents an Internal Node in the JE tree.
+ */
+public class IN extends Node implements Comparable<IN>, Loggable {
+
+    private static final String BEGIN_TAG = "<in>";
+    private static final String END_TAG = "</in>";
+    private static final String TRACE_SPLIT = "Split:";
+    private static final String TRACE_DELETE = "Delete:";
+
+    private static final byte KNOWN_DELETED_BIT = 0x1;
+    private static final byte CLEAR_KNOWN_DELETED_BIT = ~0x1;
+    private static final byte DIRTY_BIT = 0x2;
+    private static final byte CLEAR_DIRTY_BIT = ~0x2;
+    private static final byte MIGRATE_BIT = 0x4;
+    private static final byte CLEAR_MIGRATE_BIT = ~0x4;
+    private static final byte PENDING_DELETED_BIT = 0x8;
+    private static final byte CLEAR_PENDING_DELETED_BIT = ~0x8;
+
+    private static final int BYTES_PER_LSN_ENTRY = 4;
+    private static final int MAX_FILE_OFFSET = 0xfffffe;
+    private static final int THREE_BYTE_NEGATIVE_ONE = 0xffffff;
+    @SuppressWarnings("unused")
+    private static final int GROWTH_INCREMENT = 5; // for future
+
+    /*
+     * Levels:
+     * The mapping tree has levels in the 0x20000 -> 0x2ffffnumber space.
+     * The main tree has levels in the 0x10000 -> 0x1ffff number space.
+     * The duplicate tree levels are in 0-> 0xffff number space.
+     */
+    public static final int DBMAP_LEVEL = 0x20000;
+    public static final int MAIN_LEVEL = 0x10000;
+    public static final int LEVEL_MASK = 0x0ffff;
+    public static final int MIN_LEVEL = -1;
+    public static final int MAX_LEVEL = Integer.MAX_VALUE;
+    public static final int BIN_LEVEL = MAIN_LEVEL | 1;
+
+    /*
+     * IN eviction types returned by getEvictionType.
+     */
+    public static final int MAY_NOT_EVICT = 0;
+    public static final int MAY_EVICT_LNS = 1;
+    public static final int MAY_EVICT_NODE = 2;
+
+    private static final int IN_DIRTY_BIT = 0x1;
+    private static final int IN_RECALC_TOGGLE_BIT = 0x2;
+    private static final int IN_IS_ROOT_BIT = 0x4;
+    private int flags; // not persistent
+
+    protected SharedLatch latch;
+    private long generation;
+    private int nEntries;
+    private byte[] identifierKey;
+
+    /*
+     * The following four arrays could more easily be embodied in an array of
+     * ChildReferences.  However, for in-memory space savings, we save the
+     * overhead of ChildReference and DbLsn objects by in-lining the elements
+     * of the ChildReference directly in the IN.
+     *
+     * entryKeyVals contains the whole keys if key prefixing is not being used.
+     * If prefixing is enabled, then keyPrefix contains the prefix and
+     * entryKeyVals contains the suffixes.
+     */
+    private Node[] entryTargets;
+    private byte[][] entryKeyVals; // byte[][] instead of Key[] to save space
+    private byte[] keyPrefix;
+
+    /*
+     * The following entryLsnXXX fields are used for storing LSNs.  There are
+     * two possible representations: a byte array based rep, and a long array
+     * based one.  For compactness, the byte array rep is used initially.  A
+     * single byte[] that uses four bytes per LSN is used. The baseFileNumber
+     * field contains the lowest file number of any LSN in the array.  Then for
+     * each entry (four bytes each), the first byte contains the offset from
+     * the baseFileNumber of that LSN's file number.  The remaining three bytes
+     * contain the file offset portion of the LSN.  Three bytes will hold a
+     * maximum offset of 16,777,214 (0xfffffe), so with the default JE log file
+     * size of 10,000,000 bytes this works well.
+     *
+     * If either (1) the difference in file numbers exceeds 127
+     * (Byte.MAX_VALUE) or (2) the file offset is greater than 16,777,214, then
+     * the byte[] based rep mutates to a long[] based rep.
+     *
+     * In the byte[] rep, DbLsn.NULL_LSN is represented by setting the file
+     * offset bytes for a given entry to -1 (0xffffff).
+     */
+    private long baseFileNumber;
+    private byte[] entryLsnByteArray;
+    private long[] entryLsnLongArray;
+    private byte[] entryStates;
+    private DatabaseImpl databaseImpl;
+    private int level;
+    private long inMemorySize;
+
+    private boolean inListResident; // true if this IN is on the IN list
+
+    /* Location of last full version. */
+    private long lastFullVersion = DbLsn.NULL_LSN;
+
+    /*
+     * A list of Long LSNs that cannot be counted as obsolete until an ancestor
+     * IN is logged non-provisionally.
+     */
+    private List<Long> provisionalObsolete;
+
+    /* Used to indicate that an exact match was found in findEntry. */
+    public static final int EXACT_MATCH = (1 << 16);
+
+    /* Used to indicate that an insert was successful. */
+    public static final int INSERT_SUCCESS = (1 << 17);
+
+    /*
+     * accumluted memory budget delta.  Once this exceeds
+     * MemoryBudget.ACCUMULATED_LIMIT we inform the MemoryBudget that a change
+     * has occurred.  See SR 12273.
+     */
+    private int accumulatedDelta = 0;
+
+    /*
+     * Max allowable accumulation of memory budget changes before MemoryBudget
+     * should be updated. This allows for consolidating multiple calls to
+     * updateXXXMemoryBudget() into one call.  Not declared final so that the
+     * unit tests can modify it.  See SR 12273.
+     */
+    public static int ACCUMULATED_LIMIT = 1000;
+
+    /**
+     * Create an empty IN, with no node id, to be filled in from the log.
+     */
+    public IN() {
+        init(null, Key.EMPTY_KEY, 0, 0);
+    }
+
+    /**
+     * Create a new IN.
+     */
+    public IN(DatabaseImpl dbImpl,
+              byte[] identifierKey,
+              int capacity,
+              int level) {
+
+        super(dbImpl.getDbEnvironment(),
+                  false); // replicated
+        init(dbImpl, identifierKey, capacity,
+             generateLevel(dbImpl.getId(), level));
+        initMemorySize();
+    }
+
+    /**
+     * Initialize IN object.
+     */
+    protected void init(DatabaseImpl db,
+                        byte[] identifierKey,
+                        int initialCapacity,
+                        int level) {
+        setDatabase(db);
+        latch =  new SharedLatch(shortClassName() + getNodeId());
+        latch.setExclusiveOnly(EnvironmentImpl.getSharedLatches() ?
+                               isAlwaysLatchedExclusively() :
+                               true);
+        assert latch.setNoteLatch(true);
+        generation = 0;
+        flags = 0;
+        nEntries = 0;
+        this.identifierKey = identifierKey;
+	entryTargets = new Node[initialCapacity];
+	entryKeyVals = new byte[initialCapacity][];
+        keyPrefix = null;
+	baseFileNumber = -1;
+	entryLsnByteArray = new byte[initialCapacity << 2];
+	entryLsnLongArray = null;
+	entryStates = new byte[initialCapacity];
+        this.level = level;
+        inListResident = false;
+    }
+
+    /**
+     * Initialize the per-node memory count by computing its memory usage.
+     */
+    protected void initMemorySize() {
+        inMemorySize = computeMemorySize();
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (!(obj instanceof IN)) {
+            return false;
+        }
+        IN in = (IN) obj;
+        return (this.getNodeId() == in.getNodeId());
+    }
+
+    /**
+     * We would like as even a hash distribution as possible so that the
+     * Evictor's LRU is as accurate as possible.  ConcurrentHashMap takes the
+     * value returned by this method and runs its own hash algorithm on it.
+     * So a bit complement of the node ID is sufficent as the return value and
+     * is a little better than returning just the node ID.  If we use a
+     * different container in the future that does not re-hash the return
+     * value, we should probably implement the Wang-Jenkins hash function here.
+     */
+    @Override
+    public int hashCode() {
+        return (int) ~getNodeId();
+    }
+
+    /**
+     * Sort based on equality key.
+     */
+    public int compareTo(IN argIN) {
+        long argNodeId = argIN.getNodeId();
+        long myNodeId = getNodeId();
+
+        if (myNodeId < argNodeId) {
+            return -1;
+        } else if (myNodeId > argNodeId) {
+            return 1;
+        } else {
+            return 0;
+        }
+    }
+
+    /**
+     * Create a new IN.  Need this because we can't call newInstance() without
+     * getting a 0 for nodeid.
+     */
+    protected IN createNewInstance(byte[] identifierKey,
+                                   int maxEntries,
+                                   int level) {
+        return new IN(databaseImpl, identifierKey, maxEntries, level);
+    }
+
+    /*
+     * Return whether the shared latch for this kind of node should be of the
+     * "always exclusive" variety.  Presently, only IN's are actually latched
+     * shared.  BINs, DINs, and DBINs are all latched exclusive only.
+     */
+    boolean isAlwaysLatchedExclusively() {
+        return false;
+    }
+
+    /**
+     * Initialize a node that has been read in from the log.
+     */
+    @Override
+    public void postFetchInit(DatabaseImpl db, long sourceLsn)
+        throws DatabaseException {
+
+        setDatabase(db);
+        setLastFullLsn(sourceLsn);
+        EnvironmentImpl env = db.getDbEnvironment();
+        initMemorySize(); // compute before adding to inlist
+        env.getInMemoryINs().add(this);
+    }
+
+    /**
+     * Initialize a node read in during recovery.
+     */
+    public void postRecoveryInit(DatabaseImpl db, long sourceLsn) {
+        setDatabase(db);
+        setLastFullLsn(sourceLsn);
+        initMemorySize();
+    }
+
+    /**
+     * Sets the last logged LSN.
+     */
+    void setLastFullLsn(long lsn) {
+        lastFullVersion = lsn;
+    }
+
+    /**
+     * Returns the last logged LSN, or null if never logged.  Is public for
+     * unit testing.
+     */
+    public long getLastFullVersion() {
+        return lastFullVersion;
+    }
+
+    /**
+     * Latch this node exclusive, optionally setting the generation.
+     */
+    public void latch(CacheMode cacheMode)
+        throws DatabaseException {
+
+        setGeneration(cacheMode);
+        latch.acquireExclusive();
+    }
+
+    /**
+     * Latch this node shared, optionally setting the generation.
+     */
+    @Override
+    public void latchShared(CacheMode cacheMode)
+	throws DatabaseException {
+
+        setGeneration(cacheMode);
+	latch.acquireShared();
+    }
+
+    /**
+     * Latch this node if it is not latched by another thread, optionally
+     * setting the generation if the latch succeeds.
+     */
+    public boolean latchNoWait(CacheMode cacheMode)
+        throws DatabaseException {
+
+        if (latch.acquireExclusiveNoWait()) {
+            setGeneration(cacheMode);
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    /**
+     * Latch this node exclusive and set the generation.
+     */
+    public void latch()
+        throws DatabaseException {
+
+        latch(CacheMode.DEFAULT);
+    }
+
+    /**
+     * Latch this node shared and set the generation.
+     */
+    @Override
+    public void latchShared()
+        throws DatabaseException {
+
+	latchShared(CacheMode.DEFAULT);
+    }
+
+    /**
+     * Latch this node if it is not latched by another thread, and set the
+     * generation if the latch succeeds.
+     */
+    public boolean latchNoWait()
+        throws DatabaseException {
+
+        return latchNoWait(CacheMode.DEFAULT);
+    }
+
+    /**
+     * Release the latch on this node.
+     */
+    @Override
+    public void releaseLatch()
+        throws LatchNotHeldException {
+
+        latch.release();
+    }
+
+    /**
+     * Release the latch on this node.
+     */
+    public void releaseLatchIfOwner()
+        throws LatchNotHeldException {
+
+        latch.releaseIfOwner();
+    }
+
+    /**
+     * @return true if this thread holds the IN's latch
+     */
+    public boolean isLatchOwnerForRead() {
+        return latch.isOwner();
+    }
+
+    public boolean isLatchOwnerForWrite() {
+        return latch.isWriteLockedByCurrentThread();
+    }
+
+    public long getGeneration() {
+        return generation;
+    }
+
+    public void setGeneration(CacheMode cacheMode) {
+        switch (cacheMode) {
+        case DEFAULT:
+            generation = Generation.getNextGeneration();
+            break;
+
+        case UNCHANGED:
+            break;
+
+        case KEEP_HOT:
+            generation = Long.MAX_VALUE;
+            break;
+
+        case MAKE_COLD:
+        case EVICT_LN:
+            if (isBottomMostNode()) {
+                generation = 0L;
+            }
+            break;
+
+        default:
+            throw new RuntimeException("unknown cacheMode: " + cacheMode);
+        }
+    }
+
+    public void setGeneration(long newGeneration) {
+        generation = newGeneration;
+    }
+
+    @Override
+    public int getLevel() {
+        return level;
+    }
+
+    /**
+     * @return true if this node cannot contain contain children INs, i.e., if
+     * this is a DBIN, or this is a BIN in a non-duplicates DB.
+     */
+    boolean isBottomMostNode() {
+        return false;
+    }
+
+    protected int generateLevel(DatabaseId dbId, int newLevel) {
+        if (dbId.equals(DbTree.ID_DB_ID)) {
+            return newLevel | DBMAP_LEVEL;
+        } else {
+            return newLevel | MAIN_LEVEL;
+        }
+    }
+
+    /* This has default protection for access by the unit tests. */
+    void setKeyPrefix(byte[] keyPrefix) {
+        assert databaseImpl != null;
+        this.keyPrefix = keyPrefix;
+    }
+
+    byte[] getKeyPrefix() {
+        return keyPrefix;
+    }
+
+    public boolean getDirty() {
+        return (flags & IN_DIRTY_BIT) != 0;
+    }
+
+    /* public for unit tests */
+    public void setDirty(boolean dirty) {
+        if (dirty) {
+            flags |= IN_DIRTY_BIT;
+        } else {
+            flags &= ~IN_DIRTY_BIT;
+        }
+    }
+
+    public boolean getRecalcToggle() {
+        return (flags & IN_RECALC_TOGGLE_BIT) != 0;
+    }
+
+    public void setRecalcToggle(boolean toggle) {
+        if (toggle) {
+            flags |= IN_RECALC_TOGGLE_BIT;
+        } else {
+            flags &= ~IN_RECALC_TOGGLE_BIT;
+        }
+    }
+
+    public boolean isRoot() {
+        return (flags & IN_IS_ROOT_BIT) != 0;
+    }
+
+    public boolean isDbRoot() {
+        return (flags & IN_IS_ROOT_BIT) != 0;
+    }
+
+    void setIsRoot(boolean isRoot) {
+        setIsRootFlag(isRoot);
+        setDirty(true);
+    }
+
+    private void setIsRootFlag(boolean isRoot) {
+        if (isRoot) {
+            flags |= IN_IS_ROOT_BIT;
+        } else {
+            flags &= ~IN_IS_ROOT_BIT;
+        }
+    }
+
+    /**
+     * @return the identifier key for this node.
+     */
+    public byte[] getIdentifierKey() {
+        return identifierKey;
+    }
+
+    /**
+     * Set the identifier key for this node.
+     *
+     * @param key - the new identifier key for this node.
+     */
+    void setIdentifierKey(byte[] key) {
+
+        /*
+         * The identifierKey is "intentionally" not kept track of in the
+         * memory budget.  If we did, then it would look like this:
+
+        int oldIDKeySz = (identifierKey == null) ?
+            0 :
+            MemoryBudget.byteArraySize(identifierKey.length);
+
+        int newIDKeySz = (key == null) ?
+            0 :
+            MemoryBudget.byteArraySize(key.length);
+        changeMemorySize(newIDKeySz - oldIDKeySz);
+
+        */
+        identifierKey = key;
+        setDirty(true);
+    }
+
+    /**
+     * Get the key (dupe or identifier) in child that is used to locate it in
+     * 'this' node.
+     */
+    public byte[] getChildKey(IN child)
+        throws DatabaseException {
+
+        return child.getIdentifierKey();
+    }
+
+    /*
+     * An IN uses the main key in its searches.
+     */
+    public byte[] selectKey(byte[] mainTreeKey, byte[] dupTreeKey) {
+        return mainTreeKey;
+    }
+
+    /**
+     * Return the key for this duplicate set.
+     */
+    public byte[] getDupKey()
+        throws DatabaseException {
+
+        throw new DatabaseException(shortClassName() + ".getDupKey() called");
+    }
+
+    /**
+     * Return the key for navigating through the duplicate tree.
+     */
+    public byte[] getDupTreeKey() {
+        return null;
+    }
+    /**
+     * Return the key for navigating through the main tree.
+     */
+    public byte[] getMainTreeKey() {
+        return getIdentifierKey();
+    }
+
+    /**
+     * Get the database for this IN.
+     */
+    public DatabaseImpl getDatabase() {
+        return databaseImpl;
+    }
+
+    /**
+     * Set the database reference for this node.
+     */
+    public void setDatabase(DatabaseImpl db) {
+        databaseImpl = db;
+    }
+
+    /*
+     * Get the database id for this node.
+     */
+    public DatabaseId getDatabaseId() {
+        return databaseImpl.getId();
+    }
+
+    private void setEntryInternal(int from, int to) {
+        entryTargets[to] = entryTargets[from];
+        entryKeyVals[to] = entryKeyVals[from];
+        entryStates[to] = entryStates[from];
+        /* Will implement this in the future. Note, don't adjust if mutating.*/
+        //maybeAdjustCapacity(offset);
+        if (entryLsnLongArray == null) {
+            int fromOff = from << 2;
+            int toOff = to << 2;
+            entryLsnByteArray[toOff++] = entryLsnByteArray[fromOff++];
+            entryLsnByteArray[toOff++] = entryLsnByteArray[fromOff++];
+            entryLsnByteArray[toOff++] = entryLsnByteArray[fromOff++];
+            entryLsnByteArray[toOff] = entryLsnByteArray[fromOff];
+        } else {
+            entryLsnLongArray[to] = entryLsnLongArray[from];
+        }
+    }
+
+    private void clearEntry(int idx) {
+        entryTargets[idx] = null;
+        entryKeyVals[idx] = null;
+        setLsnElement(idx, DbLsn.NULL_LSN);
+        entryStates[idx] = 0;
+    }
+
+    /**
+     * Return the idx'th key.  If prefixing is enabled, construct a new byte[]
+     * containing the prefix and suffix.  If prefixing is not enabled, just
+     * return the current byte[] in entryKeyVals[].
+     */
+    public byte[] getKey(int idx) {
+        if (keyPrefix != null) {
+            int prefixLen = keyPrefix.length;
+            byte[] suffix = entryKeyVals[idx];
+            if (prefixLen == 0) {
+                return suffix;
+            }
+            int suffixLen = (suffix == null ? 0 : suffix.length);
+            byte[] ret = new byte[prefixLen + suffixLen];
+            if (keyPrefix != null) {
+                System.arraycopy(keyPrefix, 0, ret, 0, prefixLen);
+            }
+
+            if (suffix != null) {
+                System.arraycopy(suffix, 0, ret, prefixLen, suffixLen);
+            }
+            return ret;
+        } else {
+            return entryKeyVals[idx];
+        }
+    }
+
+    /**
+     * Set the idx'th key.
+     */
+    private boolean setKeyAndDirty(int idx, byte[] keyVal) {
+        entryStates[idx] |= DIRTY_BIT;
+        return setKeyAndPrefix(idx, keyVal);
+    }
+
+    /*
+     * Set the key at idx and adjust the key prefix if necessary.  Return true
+     * if the prefixes and suffixes were adjusted to indicate that memory
+     * recalculation can occur.
+     */
+    private boolean setKeyAndPrefix(int idx, byte[] keyVal) {
+
+        /*
+         * Only compute key prefix if prefixing is enabled and there's an
+         * existing prefix.
+         */
+        assert databaseImpl != null;
+        if (databaseImpl.getKeyPrefixing() &&
+            keyPrefix != null) {
+            if (!compareToKeyPrefix(keyVal)) {
+
+                /*
+                 * The new key doesn't share the current prefix, so recompute
+                 * the prefix and readjust all the existing suffixes.
+                 */
+                byte[] newPrefix = computeKeyPrefix(idx);
+                if (newPrefix != null) {
+                    /* Take the new key into consideration for new prefix. */
+                    newPrefix = Key.createKeyPrefix(newPrefix, keyVal);
+                }
+                recalcSuffixes(newPrefix, keyVal, idx);
+                return true;
+            } else {
+                entryKeyVals[idx] = computeKeySuffix(keyPrefix, keyVal);
+                return false;
+            }
+        } else {
+            if (keyPrefix != null) {
+
+                /*
+                 * Key prefixing has been turned off on this database, but
+                 * there are existing prefixes.  Remove prefixes for this IN.
+                 */
+                recalcSuffixes(new byte[0], keyVal, idx);
+            } else {
+                entryKeyVals[idx] = keyVal;
+            }
+            return false;
+        }
+    }
+
+    /*
+     * Iterate over all keys in this IN and recalculate their suffixes based on
+     * newPrefix.  If keyVal and idx are supplied, it means that entry[idx] is
+     * about to be changed to keyVal so use that instead of entryKeyVals[idx]
+     * when computing the new suffixes.  If idx is < 0, and keyVal is null,
+     * then recalculate suffixes for all entries in this.
+     */
+    private void recalcSuffixes(byte[] newPrefix, byte[] keyVal, int idx) {
+        for (int i = 0; i < nEntries; i++) {
+            byte[] curKey = (i == idx ? keyVal : getKey(i));
+            entryKeyVals[i] = computeKeySuffix(newPrefix, curKey);
+        }
+        setKeyPrefix(newPrefix);
+    }
+
+    /*
+     * Returns whether the given newKey is a prefix of, or equal to, the
+     * current keyPrefix.
+     *
+     * This has default protection for the unit tests.
+     */
+    boolean compareToKeyPrefix(byte[] newKey) {
+        if (keyPrefix == null ||
+            keyPrefix.length == 0) {
+            return false;
+        }
+
+        int newKeyLen = newKey.length;
+        for (int i = 0; i < keyPrefix.length; i++) {
+            if (i < newKeyLen &&
+                keyPrefix[i] == newKey[i]) {
+                continue;
+            } else {
+                return false;
+            }
+        }
+
+        return true;
+    }
+
+    /*
+     * Computes a key prefix based on all the keys in 'this'.  Return null if
+     * the IN is empty or prefixing is not enabled or there is no common
+     * prefix for the keys.
+     */
+    private byte[] computeKeyPrefix(int excludeIdx) {
+        if (!databaseImpl.getKeyPrefixing() ||
+            nEntries == 0) {
+            return null;
+        }
+
+        int startIdx = 1;
+        byte[] curPrefixKey = null;
+        if (excludeIdx == 0) {
+            startIdx = 2;
+            curPrefixKey = getKey(1);
+        } else {
+            curPrefixKey = getKey(0);
+        }
+
+        int prefixLen = curPrefixKey.length;
+        for (int i = startIdx; i < nEntries; i++) {
+            byte[] curKey = getKey(i);
+            if (curPrefixKey == null || curKey == null) {
+                return null;
+            }
+            int newPrefixLen =
+                Key.getKeyPrefixLength(curPrefixKey, prefixLen, curKey);
+            if (newPrefixLen < prefixLen) {
+                curPrefixKey = curKey;
+                prefixLen = newPrefixLen;
+            }
+        }
+
+        byte[] ret = new byte[prefixLen];
+        System.arraycopy(curPrefixKey, 0, ret, 0, prefixLen);
+
+	return ret;
+    }
+
+    /*
+     * Given a prefix and a key, return the suffix portion of keyVal.
+     */
+    private byte[] computeKeySuffix(byte[] newPrefix, byte[] keyVal) {
+        int prefixLen = (newPrefix == null ? 0 : newPrefix.length);
+
+        if (prefixLen == 0) {
+            return keyVal;
+        }
+
+        int suffixLen = keyVal.length - prefixLen;
+        byte[] ret = new byte[suffixLen];
+        System.arraycopy(keyVal, prefixLen, ret, 0, suffixLen);
+        return ret;
+    }
+
+    /*
+     * For debugging.
+     */
+    boolean verifyKeyPrefix() {
+        byte[] computedKeyPrefix = computeKeyPrefix(-1);
+        if (keyPrefix == null) {
+            return computedKeyPrefix == null;
+        }
+
+        if (computedKeyPrefix == null ||
+            computedKeyPrefix.length < keyPrefix.length) {
+                System.out.println("VerifyKeyPrefix failed");
+                System.out.println(dumpString(0, false));
+                return false;
+        }
+        for (int i = 0; i < keyPrefix.length; i++) {
+            if (keyPrefix[i] != computedKeyPrefix[i]) {
+                System.out.println("VerifyKeyPrefix failed");
+                System.out.println(dumpString(0, false));
+                return false;
+            }
+        }
+        return true;
+    }
+
+    /**
+     * Get the idx'th migrate status.
+     */
+    public boolean getMigrate(int idx) {
+        return (entryStates[idx] & MIGRATE_BIT) != 0;
+    }
+
+    /**
+     * Set the idx'th migrate status.
+     */
+    public void setMigrate(int idx, boolean migrate) {
+        if (migrate) {
+            entryStates[idx] |= MIGRATE_BIT;
+        } else {
+            entryStates[idx] &= CLEAR_MIGRATE_BIT;
+        }
+    }
+
+    public byte getState(int idx) {
+        return entryStates[idx];
+    }
+
+    /**
+     * Return the idx'th target.
+     */
+    public Node getTarget(int idx) {
+        return entryTargets[idx];
+    }
+
+    /**
+     * Sets the idx'th target. No need to make dirty, that state only applies
+     * to key and LSN.
+     *
+     * <p>WARNING: This method does not update the memory budget.  The caller
+     * must update the budget.</p>
+     */
+    void setTarget(int idx, Node target) {
+        assert isLatchOwnerForWrite() :
+            "Not latched for write " + getClass().getName() +
+             " id=" + getNodeId();
+        entryTargets[idx] = target;
+    }
+
+    /**
+     * Return the idx'th LSN for this entry.
+     *
+     * @return the idx'th LSN for this entry.
+     */
+    public long getLsn(int idx) {
+        if (entryLsnLongArray == null) {
+            int offset = idx << 2;
+            int fileOffset = getFileOffset(offset);
+            if (fileOffset == -1) {
+                return DbLsn.NULL_LSN;
+            } else {
+                return DbLsn.makeLsn((long) (baseFileNumber +
+                                             getFileNumberOffset(offset)),
+                                     fileOffset);
+            }
+        } else {
+            return entryLsnLongArray[idx];
+        }
+    }
+
+    /**
+     * Sets the idx'th target LSN. Make this a private helper method, so we're
+     * sure to set the IN dirty where appropriate.
+     */
+    private void setLsn(int idx, long lsn) {
+
+        int oldSize = computeLsnOverhead();
+        /* setLsnElement can mutate to an array of longs. */
+        setLsnElement(idx, lsn);
+        changeMemorySize(computeLsnOverhead() - oldSize);
+        entryStates[idx] |= DIRTY_BIT;
+    }
+
+    /* For unit tests. */
+    long[] getEntryLsnLongArray() {
+        return entryLsnLongArray;
+    }
+
+    /* For unit tests. */
+    byte[] getEntryLsnByteArray() {
+        return entryLsnByteArray;
+    }
+
+    /* For unit tests. */
+    void initEntryLsn(int capacity) {
+        entryLsnLongArray = null;
+        entryLsnByteArray = new byte[capacity << 2];
+        baseFileNumber = -1;
+    }
+
+    /* Use default protection for unit tests. */
+    void setLsnElement(int idx, long value) {
+
+        int offset = idx << 2;
+        /* Will implement this in the future. Note, don't adjust if mutating.*/
+        //maybeAdjustCapacity(offset);
+        if (entryLsnLongArray != null) {
+            entryLsnLongArray[idx] = value;
+            return;
+        }
+
+        if (value == DbLsn.NULL_LSN) {
+            setFileNumberOffset(offset, (byte) 0);
+            setFileOffset(offset, -1);
+            return;
+        }
+
+        long thisFileNumber = DbLsn.getFileNumber(value);
+
+        if (baseFileNumber == -1) {
+            /* First entry. */
+            baseFileNumber = thisFileNumber;
+            setFileNumberOffset(offset, (byte) 0);
+        } else {
+            if (thisFileNumber < baseFileNumber) {
+                if (!adjustFileNumbers(thisFileNumber)) {
+                    mutateToLongArray(idx, value);
+                    return;
+                }
+                baseFileNumber = thisFileNumber;
+            }
+            long fileNumberDifference = thisFileNumber - baseFileNumber;
+            if (fileNumberDifference > Byte.MAX_VALUE) {
+                mutateToLongArray(idx, value);
+                return;
+            }
+            setFileNumberOffset
+                (offset, (byte) (thisFileNumber - baseFileNumber));
+            //assert getFileNumberOffset(offset) >= 0;
+        }
+
+        int fileOffset = (int) DbLsn.getFileOffset(value);
+        if (fileOffset > MAX_FILE_OFFSET) {
+            mutateToLongArray(idx, value);
+            return;
+        }
+
+        setFileOffset(offset, fileOffset);
+        //assert getLsn(offset) == value;
+    }
+
+    private void mutateToLongArray(int idx, long value) {
+        int nElts = entryLsnByteArray.length >> 2;
+        long[] newArr = new long[nElts];
+        for (int i = 0; i < nElts; i++) {
+            newArr[i] = getLsn(i);
+        }
+        newArr[idx] = value;
+        entryLsnLongArray = newArr;
+        entryLsnByteArray = null;
+    }
+
+    /* Will implement this in the future. Note, don't adjust if mutating.*/
+    /***
+    private void maybeAdjustCapacity(int offset) {
+        if (entryLsnLongArray == null) {
+            int bytesNeeded = offset + BYTES_PER_LSN_ENTRY;
+            int currentBytes = entryLsnByteArray.length;
+            if (currentBytes < bytesNeeded) {
+                int newBytes = bytesNeeded +
+                    (GROWTH_INCREMENT * BYTES_PER_LSN_ENTRY);
+                byte[] newArr = new byte[newBytes];
+                System.arraycopy(entryLsnByteArray, 0, newArr, 0,
+                                 currentBytes);
+                entryLsnByteArray = newArr;
+                for (int i = currentBytes;
+                     i < newBytes;
+                     i += BYTES_PER_LSN_ENTRY) {
+                    setFileNumberOffset(i, (byte) 0);
+                    setFileOffset(i, -1);
+                }
+            }
+        } else {
+            int currentEntries = entryLsnLongArray.length;
+            int idx = offset >> 2;
+            if (currentEntries < idx + 1) {
+                int newEntries = idx + GROWTH_INCREMENT;
+                long[] newArr = new long[newEntries];
+                System.arraycopy(entryLsnLongArray, 0, newArr, 0,
+                                 currentEntries);
+                entryLsnLongArray = newArr;
+                for (int i = currentEntries; i < newEntries; i++) {
+                    entryLsnLongArray[i] = DbLsn.NULL_LSN;
+                }
+            }
+        }
+    }
+    ***/
+
+    private boolean adjustFileNumbers(long newBaseFileNumber) {
+        long oldBaseFileNumber = baseFileNumber;
+        for (int i = 0;
+             i < entryLsnByteArray.length;
+             i += BYTES_PER_LSN_ENTRY) {
+            if (getFileOffset(i) == -1) {
+                continue;
+            }
+
+            long curEntryFileNumber =
+                oldBaseFileNumber + getFileNumberOffset(i);
+            long newCurEntryFileNumberOffset =
+                (curEntryFileNumber - newBaseFileNumber);
+            if (newCurEntryFileNumberOffset > Byte.MAX_VALUE) {
+                long undoOffset = oldBaseFileNumber - newBaseFileNumber;
+                for (int j = i - BYTES_PER_LSN_ENTRY;
+                     j >= 0;
+                     j -= BYTES_PER_LSN_ENTRY) {
+                    if (getFileOffset(j) == -1) {
+                        continue;
+                    }
+                    setFileNumberOffset
+                        (j, (byte) (getFileNumberOffset(j) - undoOffset));
+                    //assert getFileNumberOffset(j) >= 0;
+                }
+                return false;
+            }
+            setFileNumberOffset(i, (byte) newCurEntryFileNumberOffset);
+
+            //assert getFileNumberOffset(i) >= 0;
+        }
+        return true;
+    }
+
+    private void setFileNumberOffset(int offset, byte fileNumberOffset) {
+        entryLsnByteArray[offset] = fileNumberOffset;
+    }
+
+    private byte getFileNumberOffset(int offset) {
+        return entryLsnByteArray[offset];
+    }
+
+    private void setFileOffset(int offset, int fileOffset) {
+        put3ByteInt(offset + 1, fileOffset);
+    }
+
+    private int getFileOffset(int offset) {
+        return get3ByteInt(offset + 1);
+    }
+
+    private void put3ByteInt(int offset, int value) {
+        entryLsnByteArray[offset++] = (byte) (value >>> 0);
+        entryLsnByteArray[offset++] = (byte) (value >>> 8);
+        entryLsnByteArray[offset]   = (byte) (value >>> 16);
+    }
+
+    private int get3ByteInt(int offset) {
+        int ret = (entryLsnByteArray[offset++] & 0xFF) << 0;
+        ret += (entryLsnByteArray[offset++] & 0xFF) << 8;
+        ret += (entryLsnByteArray[offset]   & 0xFF) << 16;
+        if (ret == THREE_BYTE_NEGATIVE_ONE) {
+            ret = -1;
+        }
+
+        return ret;
+    }
+
+    /**
+     * @return true if the idx'th entry has been deleted, although the
+     * transaction that performed the deletion may not be committed.
+     */
+    public boolean isEntryPendingDeleted(int idx) {
+        return ((entryStates[idx] & PENDING_DELETED_BIT) != 0);
+    }
+
+    /**
+     * Set pendingDeleted to true.
+     */
+    public void setPendingDeleted(int idx) {
+        entryStates[idx] |= PENDING_DELETED_BIT;
+        entryStates[idx] |= DIRTY_BIT;
+    }
+
+    /**
+     * Set pendingDeleted to false.
+     */
+    public void clearPendingDeleted(int idx) {
+        entryStates[idx] &= CLEAR_PENDING_DELETED_BIT;
+        entryStates[idx] |= DIRTY_BIT;
+    }
+
+    /**
+     * @return true if the idx'th entry is deleted for sure.  If a transaction
+     * performed the deletion, it has been committed.
+     */
+    public boolean isEntryKnownDeleted(int idx) {
+        return ((entryStates[idx] & KNOWN_DELETED_BIT) != 0);
+    }
+
+    /**
+     * Set knownDeleted to true.
+     */
+    void setKnownDeleted(int idx) {
+        entryStates[idx] |= KNOWN_DELETED_BIT;
+        entryStates[idx] |= DIRTY_BIT;
+    }
+
+    /**
+     * Set knownDeleted to false.
+     */
+    void clearKnownDeleted(int idx) {
+        entryStates[idx] &= CLEAR_KNOWN_DELETED_BIT;
+        entryStates[idx] |= DIRTY_BIT;
+    }
+
+    /**
+     * @return true if the object is dirty.
+     */
+    boolean isDirty(int idx) {
+        return ((entryStates[idx] & DIRTY_BIT) != 0);
+    }
+
+    /**
+     * @return the number of entries in this node.
+     */
+    public int getNEntries() {
+        return nEntries;
+    }
+
+    /*
+     * In the future we may want to move the following static methods to an
+     * EntryState utility class and share all state bit twidling among IN,
+     * ChildReference, and DeltaInfo.
+     */
+
+    /**
+     * Returns true if the given state is known deleted.
+     */
+    static boolean isStateKnownDeleted(byte state) {
+        return ((state & KNOWN_DELETED_BIT) != 0);
+    }
+
+    /**
+     * Returns true if the given state is known deleted.
+     */
+    static boolean isStatePendingDeleted(byte state) {
+        return ((state & PENDING_DELETED_BIT) != 0);
+    }
+
+    /**
+     * @return the maximum number of entries in this node.
+     */
+    int getMaxEntries() {
+        return entryTargets.length;
+    }
+
+    /**
+     * Returns the target of the idx'th entry or null if a pendingDeleted or
+     * knownDeleted entry has been cleaned.  Note that null can only be
+     * returned for a slot that could contain a deleted LN, not other node
+     * types and not a DupCountLN since DupCountLNs are never deleted.  Null is
+     * also returned for a KnownDeleted slot with a NULL_LSN.
+     *
+     * @return the target node or null.
+     */
+    public Node fetchTarget(int idx)
+        throws DatabaseException {
+
+        if (entryTargets[idx] == null) {
+            /* Fault object in from log. */
+            long lsn = getLsn(idx);
+            if (lsn == DbLsn.NULL_LSN) {
+                if (!isEntryKnownDeleted(idx)) {
+                    throw new DatabaseException(makeFetchErrorMsg
+                        ("NULL_LSN without KnownDeleted", this, lsn,
+                         entryStates[idx]));
+                }
+
+                /*
+                 * Ignore a NULL_LSN (return null) if KnownDeleted is set.
+                 * This is the remnant of an incomplete insertion -- see
+                 * Tree.insert. [#13126]
+                 */
+            } else {
+                try {
+                    EnvironmentImpl env = databaseImpl.getDbEnvironment();
+                    LogEntry logEntry = env.getLogManager().getLogEntry(lsn);
+                    Node node = (Node) logEntry.getMainItem();
+                    node.postFetchInit(databaseImpl, lsn);
+                    assert isLatchOwnerForWrite();
+                    /* Ensure keys are transactionally correct. [#15704] */
+                    byte[] lnSlotKey = null;
+                    if (logEntry instanceof LNLogEntry) {
+                        LNLogEntry lnEntry = (LNLogEntry) logEntry;
+                        lnSlotKey = containsDuplicates() ?
+                            lnEntry.getDupKey() : lnEntry.getKey();
+                    }
+                    updateNode(idx, node, lnSlotKey);
+                } catch (LogFileNotFoundException LNFE) {
+                    if (!isEntryKnownDeleted(idx) &&
+                        !isEntryPendingDeleted(idx)) {
+                        throw new DatabaseException
+                            (makeFetchErrorMsg(LNFE.toString(),
+                                               this,
+                                               lsn,
+                                               entryStates[idx]));
+                    }
+
+                    /*
+                     * Ignore. Cleaner got to the log file, so just return
+                     * null.  It is safe to ignore a deleted file for a
+                     * pendingDeleted entry because the cleaner will not clean
+                     * files with active transactions.
+                     */
+                } catch (Exception e) {
+                    throw new DatabaseException
+                        (makeFetchErrorMsg(e.toString(), this, lsn,
+                                           entryStates[idx]),
+                         e);
+                }
+            }
+        }
+
+        return entryTargets[idx];
+    }
+
+    static String makeFetchErrorMsg(String msg, IN in, long lsn, byte state) {
+
+        /*
+         * Bolster the exception with the LSN, which is critical for
+         * debugging. Otherwise, the exception propagates upward and loses the
+         * problem LSN.
+         */
+        StringBuffer sb = new StringBuffer();
+        sb.append("fetchTarget of ");
+        if (lsn == DbLsn.NULL_LSN) {
+            sb.append("null lsn");
+        } else {
+            sb.append(DbLsn.getNoFormatString(lsn));
+        }
+        if (in != null) {
+            sb.append(" parent IN=").append(in.getNodeId());
+            sb.append(" IN class=").append(in.getClass().getName());
+            sb.append(" lastFullVersion=");
+            sb.append(DbLsn.getNoFormatString(in.getLastFullVersion()));
+            sb.append(" parent.getDirty()=").append(in.getDirty());
+        }
+        sb.append(" state=").append(state);
+        sb.append(" ").append(msg);
+        return sb.toString();
+    }
+
+    /*
+     * All methods that modify the entry array must adjust memory sizing.
+     */
+
+    /**
+     * Set the idx'th entry of this node.
+     */
+    public void setEntry(int idx,
+                         Node target,
+                         byte[] keyVal,
+                         long lsn,
+                         byte state) {
+
+	long oldSize = computeLsnOverhead();
+	int newNEntries = idx + 1;
+
+        if (newNEntries > nEntries) {
+
+	    /*
+	     * If the new entry is going to bump nEntries, then we don't need
+	     * the entry size accounting included in oldSize.
+	     */
+            nEntries = newNEntries;
+        } else {
+	    oldSize += getEntryInMemorySize(idx);
+        }
+
+	entryTargets[idx] = target;
+        setKeyAndPrefix(idx, keyVal);
+
+	/* setLsnElement can mutate to an array of longs. */
+	setLsnElement(idx, lsn);
+	entryStates[idx] = state;
+	long newSize = getEntryInMemorySize(idx) + computeLsnOverhead();
+	updateMemorySize(oldSize, newSize);
+        setDirty(true);
+    }
+
+    /**
+     * Set the LSN to null for the idx'th entry of this node.  Only allowed for
+     * a temporary database.  Used to wipe an LSN for a file that is being
+     * cleaned and will be deleted.
+     */
+    public void clearLsn(int idx) {
+        assert getDatabase().isTemporary();
+        setLsn(idx, DbLsn.NULL_LSN);
+    }
+
+    /**
+     * Update the idx'th entry of this node. This flavor is used when the
+     * target LN is being modified, by an operation like a delete or update. We
+     * don't have to check whether the LSN has been nulled or not, because we
+     * know an LSN existed before. Also, the modification of the target is done
+     * in the caller, so instead of passing in the old and new nodes, we pass
+     * in the new node and old size.
+     */
+    public void updateNode(int idx,
+                           Node node,
+                           long oldSize,
+                           long lsn,
+                           byte[] lnSlotKey) {
+        long newSize = node.getMemorySizeIncludedByParent();
+
+        boolean suffixesChanged = setLNSlotKey(idx, node, lnSlotKey);
+        if (suffixesChanged) {
+
+            /*
+             * Changes were made to either multiple entries and/or the
+             * prefix so a recomputation of the inMemorySize based on the
+             * entry at index is not sufficient.  Recalculate the memory
+             * usage of the entire IN and adjust the cache accordingly.
+             */
+            long curInMemorySize = inMemorySize;
+            updateMemorySize(curInMemorySize, computeMemorySize());
+        }
+        if (notOverwritingDeferredWriteEntry(lsn)) {
+            setLsn(idx, lsn);
+        }
+        if (!suffixesChanged) {
+            updateMemorySize(oldSize, newSize);
+        }
+        setDirty(true);
+    }
+
+    /**
+     * Update the idx'th entry, replacing the node and, if appropriate, the LN
+     * slot key.  See updateNode(int, Node, long, byte[]) for details.
+     *
+     * Note that the LSN is not passed to this method because the node has been
+     * either (a) fetched in from disk and is not dirty, or (b) will be written
+     * out later by something like a checkpoint.
+     *
+     * Note: does not dirty the node unless the LN slot key is changed.
+     */
+    public void updateNode(int idx, Node node, byte[] lnSlotKey) {
+        long oldSize = getEntryInMemorySize(idx);
+        setTarget(idx, node);
+        setLNSlotKey(idx, node, lnSlotKey);
+        long newSize = getEntryInMemorySize(idx);
+        updateMemorySize(oldSize, newSize);
+    }
+
+    /**
+     * Update the idx'th entry, replacing the node and, if appropriate, the LN
+     * slot key.
+     *
+     * The updateNode methods are special versions of updateEntry that are
+     * called to update the node in a slot.  When an LN node is changed, the
+     * slot key may also need to be updated when a partial key comparator is
+     * used.  Callers must be sure to pass the correct lnSlotKey parameter when
+     * passing an LN for the node parameter.  See setLNSlotKey for details.
+     * [#15704]
+     */
+    public void updateNode(int idx, Node node, long lsn, byte[] lnSlotKey) {
+        long oldSize = getEntryInMemorySize(idx);
+        if (notOverwritingDeferredWriteEntry(lsn)) {
+            setLsn(idx, lsn);
+        }
+        setTarget(idx, node);
+        setLNSlotKey(idx, node, lnSlotKey);
+        long newSize = getEntryInMemorySize(idx);
+        updateMemorySize(oldSize, newSize);
+        setDirty(true);
+    }
+
+    /**
+     * Sets the idx'th key of this node if it is not identical to the given
+     * key, and the node is an LN. [#15704]
+     *
+     * This method is called when an LN is fetched in order to ensure the key
+     * slot is transactionally correct.  A key can change in three
+     * circumstances, when a key comparator is configured that may not compare
+     * all bytes of the key:
+     *
+     * 1) The user calls Cursor.putCurrent to update the data of a duplicate
+     * data item.  CursorImpl.putCurrent will call this method indirectly to
+     * update the key.
+     *
+     * 2) A transaction aborts or a BIN becomes out of date due to the
+     * non-transactional nature of INs.  The Node is set to null during abort
+     * and recovery.  IN.fetchCurrent will call this method indirectly to
+     * update the key.
+     *
+     * 3) A slot for a deleted LN is reused.  The key in the slot is updated
+     * by IN.updateEntry along with the node and LSN.
+     *
+     * Note that transaction abort and recovery of BIN (and DBIN) entries may
+     * cause incorrect keys to be present in the tree, since these entries are
+     * non-transactional.  However, an incorrect key in a BIN slot may only be
+     * present when the node in that slot is null.  Undo/redo sets the node to
+     * null.  When the LN node is fetched, the key in the slot is set to the
+     * LN's key (or data for DBINs), which is the source of truth and is
+     * transactionally correct.
+     *
+     * @param node is the node that is being set in the slot.  The newKey is
+     * set only if the node is an LN (and is non-null).
+     *
+     * @param newKey is the key to set in the slot and is either the LN key or
+     * the duplicate data depending on whether this is a BIN or DBIN.  It may
+     * be null if it is known that the key cannot be changed (as in putCurrent
+     * in a BIN).  It must be null if the node is not an LN.
+     *
+     * @return true if the key was changed and the memory size must be updated.
+     */
+    private boolean setLNSlotKey(int idx, Node node, byte[] newKey) {
+
+        assert newKey == null || node instanceof LN;
+
+        /*
+         * The new key may be null if a dup LN was deleted, in which case there
+         * is no need to update it.  There is no need to compare keys if there
+         * is no comparator configured, since a key cannot be changed when the
+         * default comparator is used.
+         */
+        if (newKey != null &&
+            getKeyComparator() != null &&
+            !Arrays.equals(newKey, getKey(idx))) {
+            setKeyAndDirty(idx, newKey);
+            setDirty(true);
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    /**
+     * Update the idx'th entry of this node.
+     *
+     * Note that although this method allows updating the node, it always
+     * replaces the key and therefore does not need an lnSlotKey parameter.
+     * See the updateNode methods for more information.  [#15704]
+     */
+    public void updateEntry(int idx, Node node, long lsn, byte[] key) {
+        long oldSize = getEntryInMemorySize(idx);
+        if (notOverwritingDeferredWriteEntry(lsn)) {
+            setLsn(idx, lsn);
+        }
+	setTarget(idx, node);
+        boolean suffixesChanged = setKeyAndDirty(idx, key);
+        if (suffixesChanged) {
+
+            /*
+             * Changes were made to either multiple entries and/or the
+             * prefix so a recomputation of the inMemorySize based on the
+             * entry at index is not sufficient.  Recalculate the memory
+             * usage of the entire IN and adjust the cache accordingly.
+             */
+            long curInMemorySize = inMemorySize;
+            updateMemorySize(curInMemorySize, computeMemorySize());
+        } else {
+            long newSize = getEntryInMemorySize(idx);
+            updateMemorySize(oldSize, newSize);
+        }
+        setDirty(true);
+    }
+
+    /**
+     * Update the idx'th entry of this node.
+     */
+    public void updateEntry(int idx, long lsn) {
+        if (notOverwritingDeferredWriteEntry(lsn)) {
+            setLsn(idx, lsn);
+        }
+        setDirty(true);
+    }
+
+    /**
+     * Update the idx'th entry of this node.
+     */
+    public void updateEntry(int idx, long lsn, byte state) {
+        if (notOverwritingDeferredWriteEntry(lsn)) {
+            setLsn(idx, lsn);
+        }
+        entryStates[idx] = state;
+        setDirty(true);
+    }
+
+    /**
+     * Update the idx'th entry of this node.  Only update the key if the new
+     * key is less than the existing key.
+     */
+    private void updateEntryCompareKey(int idx,
+                                       Node node,
+                                       long lsn,
+                                       byte[] key) {
+        long oldSize = getEntryInMemorySize(idx);
+        if (notOverwritingDeferredWriteEntry(lsn)) {
+            setLsn(idx, lsn);
+        }
+	setTarget(idx, node);
+	byte[] existingKey = getKey(idx);
+	int s = Key.compareKeys(key, existingKey, getKeyComparator());
+        boolean suffixesChanged = false;
+	if (s < 0) {
+	    suffixesChanged = setKeyAndDirty(idx, key);
+	}
+        if (suffixesChanged) {
+
+            /*
+             * Changes were made to either multiple entries and/or the
+             * prefix so a recomputation of the inMemorySize based on the
+             * entry at index is not sufficient.  Recalculate the memory
+             * usage of the entire IN and adjust the cache accordingly.
+             */
+            long curInMemorySize = inMemorySize;
+            updateMemorySize(curInMemorySize, computeMemorySize());
+        } else {
+            long newSize = getEntryInMemorySize(idx);
+            updateMemorySize(oldSize, newSize);
+        }
+        setDirty(true);
+    }
+
+    /**
+     * When a deferred write database calls one of the optionalLog methods,
+     * it may receive a DbLsn.NULL_LSN as the return value, because the
+     * logging didn't really happen. A NULL_LSN should never overwrite a
+     * valid lsn (that resulted from Database.sync() or eviction), lest
+     * we lose the handle to the last on disk version.
+     */
+    boolean notOverwritingDeferredWriteEntry(long newLsn) {
+        if (databaseImpl.isDeferredWriteMode() &&
+            (newLsn == DbLsn.NULL_LSN)) {
+            return false;
+        } else
+            return true;
+    }
+
+    /*
+     * Memory usage calculations.
+     */
+    public boolean verifyMemorySize() {
+
+        long calcMemorySize = computeMemorySize();
+        if (calcMemorySize != inMemorySize) {
+
+            String msg = "-Warning: Out of sync. " +
+                "Should be " + calcMemorySize +
+                " / actual: " +
+                inMemorySize + " node: " + getNodeId();
+            Tracer.trace(Level.INFO,
+                         databaseImpl.getDbEnvironment(),
+                         msg);
+
+            System.out.println(msg);
+
+            return false;
+        } else {
+            return true;
+        }
+    }
+
+    /**
+     * Returns the amount of memory currently budgeted for this IN.
+     */
+    public long getBudgetedMemorySize() {
+        return inMemorySize - accumulatedDelta;
+    }
+
+    /**
+     * Returns the treeAdmin memory in objects referenced by this IN.
+     * Specifically, this refers to the DbFileSummaryMap held by
+     * MapLNs
+     */
+    public long getTreeAdminMemorySize() {
+        return 0;  // by default, no treeAdminMemory
+    }
+
+    /**
+     * For unit tests.
+     */
+    public long getInMemorySize() {
+        return inMemorySize;
+    }
+
+    private long getEntryInMemorySize(int idx) {
+	return getEntryInMemorySize(entryKeyVals[idx], entryTargets[idx]);
+    }
+
+    protected long getEntryInMemorySize(byte[] key, Node target) {
+
+        /*
+         * Do not count state size here, since it is counted as overhead
+         * during initialization.
+         */
+        long ret = 0;
+        if (key != null) {
+            ret += MemoryBudget.byteArraySize(key.length);
+        }
+        if (target != null) {
+            ret += target.getMemorySizeIncludedByParent();
+        }
+        return ret;
+    }
+
+    /**
+     * Count up the memory usage attributable to this node alone. LNs children
+     * are counted by their BIN/DIN parents, but INs are not counted by their
+     * parents because they are resident on the IN list.  The identifierKey is
+     * "intentionally" not kept track of in the memory budget.
+     */
+    protected long computeMemorySize() {
+        MemoryBudget mb = databaseImpl.getDbEnvironment().getMemoryBudget();
+        long calcMemorySize = getMemoryOverhead(mb);
+
+        calcMemorySize += computeLsnOverhead();
+        for (int i = 0; i < nEntries; i++) {
+            calcMemorySize += getEntryInMemorySize(i);
+        }
+
+	if (keyPrefix != null) {
+            calcMemorySize += MemoryBudget.byteArraySize(keyPrefix.length);
+	}
+
+        if (provisionalObsolete != null) {
+            calcMemorySize += provisionalObsolete.size() *
+                MemoryBudget.LONG_LIST_PER_ITEM_OVERHEAD;
+        }
+
+        return calcMemorySize;
+    }
+
+    /* Called once at environment startup by MemoryBudget. */
+    public static long computeOverhead(DbConfigManager configManager)
+        throws DatabaseException {
+
+        /*
+         * Overhead consists of all the fields in this class plus the
+         * entry arrays in the IN class.
+         */
+        return MemoryBudget.IN_FIXED_OVERHEAD +
+            IN.computeArraysOverhead(configManager);
+    }
+
+    private int computeLsnOverhead() {
+        if (entryLsnLongArray == null) {
+            return MemoryBudget.byteArraySize(entryLsnByteArray.length);
+        } else {
+            return MemoryBudget.ARRAY_OVERHEAD +
+                (entryLsnLongArray.length *
+                 MemoryBudget.PRIMITIVE_LONG_ARRAY_ITEM_OVERHEAD);
+        }
+    }
+
+    protected static long computeArraysOverhead(DbConfigManager configManager)
+        throws DatabaseException {
+
+        /* Count three array elements: states, Keys, and Nodes */
+        int capacity = configManager.getInt(EnvironmentParams.NODE_MAX);
+        return
+            MemoryBudget.byteArraySize(capacity) + // state array
+            (capacity *
+             (2 * MemoryBudget.OBJECT_ARRAY_ITEM_OVERHEAD)); // keys + nodes
+    }
+
+    /* Overridden by subclasses. */
+    protected long getMemoryOverhead(MemoryBudget mb) {
+        return mb.getINOverhead();
+    }
+
+    protected void updateMemorySize(ChildReference oldRef,
+                                    ChildReference newRef) {
+        long delta = 0;
+        if (newRef != null) {
+            delta = getEntryInMemorySize(newRef.getKey(), newRef.getTarget());
+        }
+
+        if (oldRef != null) {
+            delta -= getEntryInMemorySize(oldRef.getKey(), oldRef.getTarget());
+        }
+        changeMemorySize(delta);
+    }
+
+    protected void updateMemorySize(long oldSize, long newSize) {
+        long delta = newSize - oldSize;
+        changeMemorySize(delta);
+    }
+
+    void updateMemorySize(Node oldNode, Node newNode) {
+        long delta = 0;
+        if (newNode != null) {
+            delta = newNode.getMemorySizeIncludedByParent();
+        }
+
+        if (oldNode != null) {
+            delta -= oldNode.getMemorySizeIncludedByParent();
+        }
+        changeMemorySize(delta);
+    }
+
+    private void changeMemorySize(long delta) {
+        inMemorySize += delta;
+
+        /*
+         * Only update the environment cache usage stats if this IN is actually
+         * on the IN list. For example, when we create new INs, they are
+         * manipulated off the IN list before being added; if we updated the
+         * environment wide cache then, we'd end up double counting.
+         */
+        if (inListResident) {
+            EnvironmentImpl env = databaseImpl.getDbEnvironment();
+
+            accumulatedDelta += delta;
+            if (accumulatedDelta > ACCUMULATED_LIMIT ||
+                accumulatedDelta < -ACCUMULATED_LIMIT) {
+                env.getInMemoryINs().memRecalcUpdate(this, accumulatedDelta);
+                env.getMemoryBudget().updateTreeMemoryUsage(accumulatedDelta);
+                accumulatedDelta = 0;
+            }
+        }
+    }
+
+    /**
+     * Called when adding/removing this IN to/from the INList.
+     */
+    public void setInListResident(boolean resident) {
+        inListResident = resident;
+    }
+
+    /**
+     * Returns whether this IN is on the INList.
+     */
+    public boolean getInListResident() {
+        return inListResident;
+    }
+
+    /**
+     * Returns whether the given key is greater than or equal to the first key
+     * in the IN and less than or equal to the last key in the IN.  This method
+     * is used to determine whether a key to be inserted belongs in this IN,
+     * without doing a tree search.  If false is returned it is still possible
+     * that the key belongs in this IN, but a tree search must be performed to
+     * find out.
+     */
+    public boolean isKeyInBounds(byte[] keyVal) {
+
+        if (nEntries < 2) {
+            return false;
+        }
+
+        Comparator<byte[]> userCompareToFcn = getKeyComparator();
+        int cmp;
+        byte[] myKey;
+
+        /* Compare key given to my first key. */
+        myKey = getKey(0);
+        cmp = Key.compareKeys(keyVal, myKey, userCompareToFcn);
+        if (cmp < 0) {
+            return false;
+        }
+
+        /* Compare key given to my last key. */
+        myKey = getKey(nEntries - 1);
+        cmp = Key.compareKeys(keyVal, myKey, userCompareToFcn);
+        if (cmp > 0) {
+            return false;
+        }
+
+        return true;
+    }
+
+    /**
+     * Find the entry in this IN for which key arg is >= the key.
+     *
+     * Currently uses a binary search, but eventually, this may use binary or
+     * linear search depending on key size, number of entries, etc.
+     *
+     * Note that the 0'th entry's key is treated specially in an IN.  It always
+     * compares lower than any other key.
+     *
+     * This is public so that DbCursorTest can access it.
+     *
+     * @param key - the key to search for.
+     * @param indicateIfDuplicate - true if EXACT_MATCH should
+     * be or'd onto the return value if key is already present in this node.
+     * @param exact - true if an exact match must be found.
+     * @return offset for the entry that has a key >= the arg.  0 if key
+     * is less than the 1st entry.  -1 if exact is true and no exact match
+     * is found.  If indicateIfDuplicate is true and an exact match was found
+     * then EXACT_MATCH is or'd onto the return value.
+     */
+    public int findEntry(byte[] key,
+                         boolean indicateIfDuplicate,
+                         boolean exact) {
+        int high = nEntries - 1;
+        int low = 0;
+        int middle = 0;
+
+        Comparator<byte[]> userCompareToFcn = getKeyComparator();
+
+        /*
+         * IN's are special in that they have a entry[0] where the key is a
+         * virtual key in that it always compares lower than any other key.
+         * BIN's don't treat key[0] specially.  But if the caller asked for an
+         * exact match or to indicate duplicates, then use the key[0] and
+         * forget about the special entry zero comparison.
+         */
+        boolean entryZeroSpecialCompare =
+            entryZeroKeyComparesLow() && !exact && !indicateIfDuplicate;
+
+        assert nEntries >= 0;
+
+        while (low <= high) {
+            middle = (high + low) / 2;
+            int s;
+            byte[] middleKey = null;
+            if (middle == 0 && entryZeroSpecialCompare) {
+                s = 1;
+            } else {
+                middleKey = getKey(middle);
+                s = Key.compareKeys(key, middleKey, userCompareToFcn);
+            }
+            if (s < 0) {
+                high = middle - 1;
+            } else if (s > 0) {
+                low = middle + 1;
+            } else {
+                int ret;
+                if (indicateIfDuplicate) {
+                    ret = middle | EXACT_MATCH;
+                } else {
+                    ret = middle;
+                }
+
+                if ((ret >= 0) && exact && isEntryKnownDeleted(ret & 0xffff)) {
+                    return -1;
+                } else {
+                    return ret;
+                }
+            }
+        }
+
+        /*
+         * No match found.  Either return -1 if caller wanted exact matches
+         * only, or return entry for which arg key is > entry key.
+         */
+        if (exact) {
+            return -1;
+        } else {
+            return high;
+        }
+    }
+
+    /**
+     * Inserts the argument ChildReference into this IN.  Assumes this node is
+     * already latched by the caller.
+     *
+     * @param entry The ChildReference to insert into the IN.
+     *
+     * @return true if the entry was successfully inserted, false
+     * if it was a duplicate.
+     *
+     * @throws InconsistentNodeException if the node is full
+     * (it should have been split earlier).
+     */
+    public boolean insertEntry(ChildReference entry)
+        throws DatabaseException {
+
+        return (insertEntry1(entry) & INSERT_SUCCESS) != 0;
+    }
+
+    /**
+     * Same as insertEntry except that it returns the index where the dup was
+     * found instead of false.  The return value is |'d with either
+     * INSERT_SUCCESS or EXACT_MATCH depending on whether the entry was
+     * inserted or it was a duplicate, resp.
+     *
+     * This returns a failure if there's a duplicate match. The caller must do
+     * the processing to check if the entry is actually deleted and can be
+     * overwritten. This is foisted upon the caller rather than handled in this
+     * object because there may be some latch releasing/retaking in order to
+     * check a child LN.
+     *
+     * Inserts the argument ChildReference into this IN.  Assumes this node is
+     * already latched by the caller.
+     *
+     * @param entry The ChildReference to insert into the IN.
+     *
+     * @return either (1) the index of location in the IN where the entry was
+     * inserted |'d with INSERT_SUCCESS, or (2) the index of the duplicate in
+     * the IN |'d with EXACT_MATCH if the entry was found to be a duplicate.
+     *
+     * @throws InconsistentNodeException if the node is full (it should have
+     * been split earlier).
+     */
+    public int insertEntry1(ChildReference entry)
+        throws DatabaseException {
+
+	if (nEntries >= entryTargets.length) {
+	    compress(null, true, null);
+	}
+
+	if (nEntries < entryTargets.length) {
+	    byte[] key = entry.getKey();
+
+	    /*
+	     * Search without requiring an exact match, but do let us know the
+	     * index of the match if there is one.
+	     */
+	    int index = findEntry(key, true, false);
+
+	    if (index >= 0 && (index & EXACT_MATCH) != 0) {
+
+		/*
+		 * There is an exact match.  Don't insert; let the caller
+		 * decide what to do with this duplicate.
+		 */
+		return index;
+	    } else {
+
+		/*
+		 * There was no key match, so insert to the right of this
+		 * entry.
+		 */
+		index++;
+	    }
+
+	    /* We found a spot for insert, shift entries as needed. */
+	    if (index < nEntries) {
+		int oldSize = computeLsnOverhead();
+
+		/*
+		 * Adding elements to the LSN array can change the space used.
+		 */
+		shiftEntriesRight(index);
+		changeMemorySize(computeLsnOverhead() - oldSize);
+	    }
+            int oldSize = computeLsnOverhead();
+	    entryTargets[index] = entry.getTarget();
+            /* setLsnElement can mutate to an array of longs. */
+	    setLsnElement(index, entry.getLsn());
+	    entryStates[index] = entry.getState();
+	    nEntries++;
+            boolean suffixesChanged = setKeyAndPrefix(index, key);
+	    adjustCursorsForInsert(index);
+	    updateMemorySize(oldSize, getEntryInMemorySize(index) +
+                                      computeLsnOverhead());
+	    setDirty(true);
+            if (suffixesChanged) {
+
+                /*
+                 * Changes were made to either multiple entries and/or the
+                 * prefix so a recomputation of the inMemorySize based on the
+                 * entry at index is not sufficient.  Recalculate the memory
+                 * usage of the entire IN and adjust the cache accordingly.
+                 */
+                long curInMemorySize = inMemorySize;
+                updateMemorySize(curInMemorySize, computeMemorySize());
+            }
+	    return (index | INSERT_SUCCESS);
+	} else {
+	    throw new InconsistentNodeException
+		("Node " + getNodeId() +
+		 " should have been split before calling insertEntry");
+	}
+    }
+
+    /**
+     * Deletes the ChildReference with the key arg from this IN.  Assumes this
+     * node is already latched by the caller.
+     *
+     * This seems to only be used by INTest.
+     *
+     * @param key The key of the reference to delete from the IN.
+     *
+     * @param maybeValidate true if assert validation should occur prior to
+     * delete.  Set this to false during recovery.
+     *
+     * @return true if the entry was successfully deleted, false if it was not
+     * found.
+     */
+    boolean deleteEntry(byte[] key, boolean maybeValidate)
+        throws DatabaseException {
+
+        if (nEntries == 0) {
+            return false; // caller should put this node on the IN cleaner list
+        }
+
+        int index = findEntry(key, false, true);
+        if (index < 0) {
+            return false;
+        }
+
+        return deleteEntry(index, maybeValidate);
+    }
+
+    /**
+     * Deletes the ChildReference at index from this IN.  Assumes this node is
+     * already latched by the caller.
+     *
+     * @param index The index of the entry to delete from the IN.
+     *
+     * @param maybeValidate true if asserts are enabled.
+     *
+     * @return true if the entry was successfully deleted, false if it was not
+     * found.
+     */
+    public boolean deleteEntry(int index, boolean maybeValidate)
+        throws DatabaseException {
+
+        if (nEntries == 0) {
+            return false;
+        }
+
+        /* Check the subtree validation only if maybeValidate is true. */
+        assert maybeValidate ?
+            validateSubtreeBeforeDelete(index) :
+            true;
+
+        if (index < nEntries) {
+            updateMemorySize(getEntryInMemorySize(index), 0);
+            int oldLSNArraySize = computeLsnOverhead();
+            /* LSNArray.setElement can mutate to an array of longs. */
+            for (int i = index; i < nEntries - 1; i++) {
+                setEntryInternal(i + 1, i);
+            }
+            clearEntry(nEntries - 1);
+            updateMemorySize(oldLSNArraySize, computeLsnOverhead());
+            nEntries--;
+            setDirty(true);
+            setProhibitNextDelta();
+
+            /*
+             * Note that we don't have to adjust cursors for delete, since
+             * there should be nothing pointing at this record.
+             */
+            traceDelete(Level.FINEST, index);
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    /**
+     * Do nothing since INs don't support deltas.
+     */
+    public void setProhibitNextDelta() {
+    }
+
+    /* Called by the incompressor. */
+    public boolean compress(BINReference binRef,
+                            boolean canFetch,
+                            LocalUtilizationTracker localTracker)
+        throws DatabaseException {
+
+        return false;
+    }
+
+    public boolean isCompressible() {
+        return false;
+    }
+
+    /*
+     * Validate the subtree that we're about to delete.  Make sure there aren't
+     * more than one valid entry on each IN and that the last level of the tree
+     * is empty. Also check that there are no cursors on any bins in this
+     * subtree. Assumes caller is holding the latch on this parent node.
+     *
+     * While we could latch couple down the tree, rather than hold latches as
+     * we descend, we are presumably about to delete this subtree so
+     * concurrency shouldn't be an issue.
+     *
+     * @return true if the subtree rooted at the entry specified by "index" is
+     * ok to delete.
+     */
+    boolean validateSubtreeBeforeDelete(int index)
+        throws DatabaseException {
+
+        if (index >= nEntries) {
+
+            /*
+             * There's no entry here, so of course this entry is deletable.
+             */
+            return true;
+        } else {
+            Node child = fetchTarget(index);
+            return child != null && child.isValidForDelete();
+        }
+    }
+
+    /**
+     * Return true if this node needs splitting.  For the moment, needing to be
+     * split is defined by there being no free entries available.
+     */
+    public boolean needsSplitting() {
+        if ((entryTargets.length - nEntries) < 1) {
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    /**
+     * Indicates whether whether entry 0's key is "special" in that it always
+     * compares less than any other key.  BIN's don't have the special key, but
+     * IN's do.
+     */
+    boolean entryZeroKeyComparesLow() {
+        return true;
+    }
+
+    /**
+     * Split this into two nodes.  Parent IN is passed in parent and should be
+     * latched by the caller.
+     *
+     * childIndex is the index in parent of where "this" can be found.
+     * @return lsn of the newly logged parent
+     */
+    void split(IN parent, int childIndex, int maxEntries, CacheMode cacheMode)
+        throws DatabaseException {
+
+        splitInternal(parent, childIndex, maxEntries, -1, cacheMode);
+    }
+
+    protected void splitInternal(IN parent,
+				 int childIndex,
+				 int maxEntries,
+				 int splitIndex,
+                                 CacheMode cacheMode)
+        throws DatabaseException {
+
+        /*
+         * Find the index of the existing identifierKey so we know which IN
+         * (new or old) to put it in.
+         */
+        if (identifierKey == null) {
+            throw new InconsistentNodeException("idkey is null");
+        }
+        int idKeyIndex = findEntry(identifierKey, false, false);
+
+        if (splitIndex < 0) {
+            splitIndex = nEntries / 2;
+        }
+
+        int low, high;
+        IN newSibling = null;
+
+        if (idKeyIndex < splitIndex) {
+
+            /*
+             * Current node (this) keeps left half entries.  Right half entries
+             * will go in the new node.
+             */
+            low = splitIndex;
+            high = nEntries;
+        } else {
+
+            /*
+             * Current node (this) keeps right half entries.  Left half entries
+             * and entry[0] will go in the new node.
+             */
+            low = 0;
+            high = splitIndex;
+        }
+
+        byte[] newIdKey = getKey(low);
+	long parentLsn = DbLsn.NULL_LSN;
+
+        newSibling = createNewInstance(newIdKey, maxEntries, level);
+        newSibling.latch(cacheMode);
+        long oldMemorySize = inMemorySize;
+        try {
+	    int toIdx = 0;
+	    boolean deletedEntrySeen = false;
+	    BINReference binRef = null;
+	    for (int i = low; i < high; i++) {
+                byte[] thisKey = getKey(i);
+		if (isEntryPendingDeleted(i)) {
+		    if (!deletedEntrySeen) {
+			deletedEntrySeen = true;
+			assert (newSibling instanceof BIN);
+			binRef = ((BIN) newSibling).createReference();
+		    }
+		    binRef.addDeletedKey(new Key(thisKey));
+		}
+		newSibling.setEntry(toIdx++,
+				    entryTargets[i],
+				    thisKey,
+				    getLsn(i),
+				    entryStates[i]);
+                clearEntry(i);
+            }
+
+            if (deletedEntrySeen) {
+                databaseImpl.getDbEnvironment().
+                    addToCompressorQueue(binRef, false);
+            }
+
+            int newSiblingNEntries = (high - low);
+
+            /*
+             * Remove the entries that we just copied into newSibling from this
+             * node.
+             */
+            if (low == 0) {
+                shiftEntriesLeft(newSiblingNEntries);
+            }
+
+            newSibling.nEntries = toIdx;
+            nEntries -= newSiblingNEntries;
+            setDirty(true);
+
+            adjustCursors(newSibling, low, high);
+
+            /*
+             * Parent refers to child through an element of the entries array.
+             * Depending on which half of the BIN we copied keys from, we
+             * either have to adjust one pointer and add a new one, or we have
+             * to just add a new pointer to the new sibling.
+             *
+             * Note that we must use the provisional form of logging because
+             * all three log entries must be read atomically. The parent must
+             * get logged last, as all referred-to children must preceed
+             * it. Provisional entries guarantee that all three are processed
+             * as a unit. Recovery skips provisional entries, so the changed
+             * children are only used if the parent makes it out to the log.
+             */
+            EnvironmentImpl env = databaseImpl.getDbEnvironment();
+            LogManager logManager = env.getLogManager();
+            INList inMemoryINs = env.getInMemoryINs();
+
+            long newSiblingLsn =
+                newSibling.optionalLogProvisional(logManager, parent);
+
+            long myNewLsn = optionalLogProvisional(logManager, parent);
+
+            /*
+             * When we update the parent entry, we use updateEntryCompareKey so
+             * that we don't replace the parent's key that points at 'this'
+             * with a key that is > than the existing one.  Replacing the
+             * parent's key with something > would effectively render a piece
+             * of the subtree inaccessible.  So only replace the parent key
+             * with something <= the existing one.  See tree/SplitTest.java for
+             * more details on the scenario.
+             */
+            if (low == 0) {
+
+                /*
+                 * Change the original entry to point to the new child and add
+                 * an entry to point to the newly logged version of this
+                 * existing child.
+                 */
+                if (childIndex == 0) {
+                    parent.updateEntryCompareKey(childIndex, newSibling,
+                                                 newSiblingLsn, newIdKey);
+                } else {
+                    parent.updateNode(childIndex, newSibling, newSiblingLsn,
+                                      null /*lnSlotKey*/);
+                }
+
+                byte[] ourKey = getKey(0);
+		boolean insertOk = parent.insertEntry
+		    (new ChildReference(this, ourKey, myNewLsn));
+		assert insertOk;
+	    } else {
+
+		/*
+		 * Update the existing child's LSN to reflect the newly logged
+		 * version and insert new child into parent.
+		 */
+		if (childIndex == 0) {
+
+		    /*
+		     * this's idkey may be < the parent's entry 0 so we need to
+		     * update parent's entry 0 with the key for 'this'.
+		     */
+		    parent.updateEntryCompareKey
+			(childIndex, this, myNewLsn, getKey(0));
+		} else {
+		    parent.updateNode(childIndex, this, myNewLsn,
+                                      null /*lnSlotKey*/);
+                }
+                boolean insertOk = parent.insertEntry
+                    (new ChildReference(newSibling, newIdKey, newSiblingLsn));
+                assert insertOk;
+            }
+
+            /*
+             * If this node has no key prefix, calculate it now that it has
+             * been split.
+             */
+            byte[] newKeyPrefix = computeKeyPrefix(-1);
+            recalcSuffixes(newKeyPrefix, null, -1);
+
+            /* Only recalc if there are multiple entries in newSibling. */
+            if (newSibling.getNEntries() > 1) {
+                byte[] newSiblingPrefix = newSibling.getKeyPrefix();
+                newSiblingPrefix = newSibling.computeKeyPrefix(-1);
+                newSibling.recalcSuffixes(newSiblingPrefix, null, -1);
+                newSibling.initMemorySize();
+            }
+
+	    parentLsn = parent.optionalLog(logManager);
+
+            /*
+             * Maintain dirtiness if this is the root, so this parent will be
+             * checkpointed. Other parents who are not roots are logged as part
+             * of the propagation of splits upwards.
+             */
+            if (parent.isRoot()) {
+                parent.setDirty(true);
+            }
+
+            /*
+             * Update size. newSibling and parent are correct, but this IN has
+             * had its entries shifted and is not correct.
+             */
+            long newSize = computeMemorySize();
+            updateMemorySize(oldMemorySize, newSize);
+            inMemoryINs.add(newSibling);
+
+            /* Debug log this information. */
+            traceSplit(Level.FINE, parent,
+                       newSibling, parentLsn, myNewLsn,
+                       newSiblingLsn, splitIndex, idKeyIndex, childIndex);
+        } finally {
+            newSibling.releaseLatch();
+        }
+    }
+
+    /**
+     * Called when we know we are about to split on behalf of a key that is the
+     * minimum (leftSide) or maximum (!leftSide) of this node.  This is
+     * achieved by just forcing the split to occur either one element in from
+     * the left or the right (i.e. splitIndex is 1 or nEntries - 1).
+     */
+    void splitSpecial(IN parent,
+		      int parentIndex,
+		      int maxEntriesPerNode,
+		      byte[] key,
+		      boolean leftSide,
+                      CacheMode cacheMode)
+	throws DatabaseException {
+
+	int index = findEntry(key, false, false);
+	if (leftSide &&
+	    index == 0) {
+	    splitInternal(parent, parentIndex, maxEntriesPerNode,
+                          1, cacheMode);
+	} else if (!leftSide &&
+		   index == (nEntries - 1)) {
+	    splitInternal(parent, parentIndex,
+                          maxEntriesPerNode, nEntries - 1, cacheMode);
+	} else {
+            split(parent, parentIndex, maxEntriesPerNode, cacheMode);
+	}
+    }
+
+    void adjustCursors(IN newSibling,
+                       int newSiblingLow,
+                       int newSiblingHigh) {
+        /* Cursors never refer to IN's. */
+    }
+
+    void adjustCursorsForInsert(int insertIndex) {
+        /* Cursors never refer to IN's. */
+    }
+
+    /**
+     * Return the relevant user defined comparison function for this type of
+     * node.  For IN's and BIN's, this is the BTree Comparison function.
+     */
+    public Comparator<byte[]> getKeyComparator() {
+        return databaseImpl.getBtreeComparator();
+    }
+
+    /**
+     * Shift entries to the right starting with (and including) the entry at
+     * index. Caller is responsible for incrementing nEntries.
+     *
+     * @param index - The position to start shifting from.
+     */
+    private void shiftEntriesRight(int index) {
+        for (int i = nEntries; i > index; i--) {
+            setEntryInternal(i - 1, i);
+        }
+        clearEntry(index);
+        setDirty(true);
+    }
+
+    /**
+     * Shift entries starting at the byHowMuch'th element to the left, thus
+     * removing the first byHowMuch'th elements of the entries array.  This
+     * always starts at the 0th entry.  Caller is responsible for decrementing
+     * nEntries.
+     *
+     * @param byHowMuch - The number of entries to remove from the left side
+     * of the entries array.
+     */
+    private void shiftEntriesLeft(int byHowMuch) {
+        for (int i = 0; i < nEntries - byHowMuch; i++) {
+            setEntryInternal(i + byHowMuch, i);
+        }
+        for (int i = nEntries - byHowMuch; i < nEntries; i++) {
+            clearEntry(i);
+        }
+        setDirty(true);
+    }
+
+    /**
+     * Check that the IN is in a valid state.  For now, validity means that the
+     * keys are in sorted order and that there are more than 0 entries.
+     * maxKey, if non-null specifies that all keys in this node must be less
+     * than maxKey.
+     */
+    @Override
+    public void verify(byte[] maxKey)
+        throws DatabaseException {
+
+        /********* never used, but may be used for the basis of a verify()
+                   method in the future.
+        try {
+            Comparator<byte[]> userCompareToFcn =
+                (databaseImpl == null ? null : getKeyComparator());
+
+            byte[] key1 = null;
+            for (int i = 1; i < nEntries; i++) {
+                key1 = entryKeyVals[i];
+                byte[] key2 = entryKeyVals[i - 1];
+
+                int s = Key.compareKeys(key1, key2, userCompareToFcn);
+                if (s <= 0) {
+                    throw new InconsistentNodeException
+                        ("IN " + getNodeId() + " key " + (i-1) +
+                         " (" + Key.dumpString(key2, 0) +
+                         ") and " +
+                         i + " (" + Key.dumpString(key1, 0) +
+                         ") are out of order");
+                }
+            }
+
+            boolean inconsistent = false;
+            if (maxKey != null && key1 != null) {
+                if (Key.compareKeys(key1, maxKey, userCompareToFcn) >= 0) {
+                    inconsistent = true;
+                }
+            }
+
+            if (inconsistent) {
+                throw new InconsistentNodeException
+                    ("IN " + getNodeId() +
+                     " has entry larger than next entry in parent.");
+            }
+        } catch (DatabaseException DE) {
+            DE.printStackTrace(System.out);
+        }
+        *****************/
+    }
+
+    /**
+     * Add self and children to this in-memory IN list. Called by recovery, can
+     * run with no latching.
+     */
+    @Override
+    void rebuildINList(INList inList)
+        throws DatabaseException {
+
+        /*
+         * Recompute your in memory size first and then add yourself to the
+         * list.
+         */
+        initMemorySize();
+        inList.add(this);
+
+        /*
+         * Add your children if they're resident. (LNs know how to stop the
+         * flow).
+         */
+        for (int i = 0; i < nEntries; i++) {
+            Node n = getTarget(i);
+            if (n != null) {
+                n.rebuildINList(inList);
+            }
+        }
+    }
+
+    /**
+     * Remove self and children from the in-memory IN list. The INList latch is
+     * already held before this is called.  Also count removed nodes as
+     * obsolete.
+     */
+    void accountForSubtreeRemoval(INList inList,
+                                  LocalUtilizationTracker localTracker)
+        throws DatabaseException {
+
+        if (nEntries > 1) {
+            throw new DatabaseException
+                ("Found non-deletable IN " + getNodeId() +
+                 " while flushing INList. nEntries = " + nEntries);
+        }
+
+        /* Remove self. */
+        inList.remove(this);
+
+        /* Count as obsolete. */
+        if (lastFullVersion != DbLsn.NULL_LSN) {
+            localTracker.countObsoleteNode
+                (lastFullVersion, getLogType(), 0, databaseImpl);
+        }
+
+        /*
+         * Remove your children.  They should already be resident.  (LNs know
+         * how to stop.)
+         */
+        for (int i = 0; i < nEntries; i++) {
+            Node n = fetchTarget(i);
+            if (n != null) {
+                n.accountForSubtreeRemoval(inList, localTracker);
+            }
+        }
+    }
+
+    /**
+     * Check if this node fits the qualifications for being part of a deletable
+     * subtree. It can only have one IN child and no LN children.
+     *
+     * We assume that this is only called under an assert.
+     */
+    boolean isValidForDelete()
+        throws DatabaseException {
+
+	/*
+	 * Can only have one valid child, and that child should be
+	 * deletable.
+	 */
+	if (nEntries > 1) {            // more than 1 entry.
+	    return false;
+	} else if (nEntries == 1) {    // 1 entry, check child
+	    Node child = fetchTarget(0);
+	    if (child == null) {
+		return false;
+	    }
+	    child.latchShared(CacheMode.UNCHANGED);
+	    boolean ret = child.isValidForDelete();
+	    child.releaseLatch();
+	    return ret;
+	} else {                       // 0 entries.
+	    return true;
+	}
+    }
+
+    /**
+     * Determine if 'this' is the parent of a child (targetNodeId).  If not,
+     * find a child of 'this' that may be the parent and return it.  If there
+     * are no possibilities, then return null.  Note that the keys of the
+     * target are passed in as args so we don't have to latch the target to
+     * look at them.  Also, 'this' is latched upon entry.
+     *
+     * @param doFetch If true, fetch the child in the pursuit of this search.
+     * If false, give up if the child is not resident. In that case, we have
+     * a potential ancestor, but are not sure if this is the parent.
+     *
+     * On return, if result.parent is non-null, then the IN that it refers to
+     * will be latched.  If an exception is thrown, then "this" is latched.
+     */
+    void findParent(Tree.SearchType searchType,
+                    long targetNodeId,
+                    boolean targetContainsDuplicates,
+                    boolean targetIsRoot,
+                    byte[] targetMainTreeKey,
+                    byte[] targetDupTreeKey,
+                    SearchResult result,
+                    boolean requireExactMatch,
+                    CacheMode cacheMode,
+                    int targetLevel,
+                    List<TrackingInfo> trackingList,
+                    boolean doFetch)
+        throws DatabaseException {
+
+        assert doFetch ? isLatchOwnerForWrite() : isLatchOwnerForRead();
+
+        /* We are this node -- there's no parent in this subtree. */
+        if (getNodeId() == targetNodeId) {
+            releaseLatch();
+            result.exactParentFound = false;  // no parent exists
+            result.keepSearching = false;
+            result.parent = null;
+            return;
+        }
+
+        /* Find an entry */
+        if (getNEntries() == 0) {
+
+            /*
+             * No more children, can't descend anymore. Return this node, you
+             * could be the parent.
+             */
+            result.keepSearching = false;
+            result.exactParentFound = false;
+            if (requireExactMatch) {
+                releaseLatch();
+                result.parent = null;
+            } else {
+                result.parent = this;
+                result.index = -1;
+            }
+            return;
+        } else {
+            if (searchType == Tree.SearchType.NORMAL) {
+                /* Look for the entry matching key in the current node. */
+                result.index = findEntry(selectKey(targetMainTreeKey,
+                                                   targetDupTreeKey),
+                                         false, false);
+            } else if (searchType == Tree.SearchType.LEFT) {
+                /* Left search, always take the 0th entry. */
+                result.index = 0;
+            } else if (searchType == Tree.SearchType.RIGHT) {
+                /* Right search, always take the highest entry. */
+                result.index = nEntries - 1;
+            } else {
+                throw new IllegalArgumentException
+                    ("Invalid value of searchType: " + searchType);
+            }
+
+            if (result.index < 0) {
+                result.keepSearching = false;
+                result.exactParentFound = false;
+                if (requireExactMatch) {
+                    releaseLatch();
+                    result.parent = null;
+                } else {
+                    /* This node is going to be the prospective parent. */
+                    result.parent = this;
+                }
+                return;
+            }
+
+            /*
+             * Get the child node that matches.  If fetchTarget returns null, a
+             * deleted LN was cleaned.
+             */
+            Node child = null;
+            boolean isDeleted = false;
+            if (isEntryKnownDeleted(result.index)) {
+                isDeleted = true;
+            } else if (doFetch) {
+                child = fetchTarget(result.index);
+                if (child == null) {
+                    isDeleted = true;
+                }
+            } else {
+                child = getTarget(result.index);
+            }
+
+            /* The child is a deleted cleaned entry or is knownDeleted. */
+            if (isDeleted) {
+                result.exactParentFound = false;
+                result.keepSearching = false;
+                if (requireExactMatch) {
+                    result.parent = null;
+                    releaseLatch();
+                } else {
+                    result.parent = this;
+                }
+                return;
+            }
+
+            /* Try matching by level. */
+            if (targetLevel >= 0 && level == targetLevel + 1) {
+                result.exactParentFound = true;
+                result.parent = this;
+                result.keepSearching = false;
+                return;
+            }
+
+            if (child == null) {
+                assert !doFetch;
+
+                /*
+                 * This node will be the possible parent.
+                 */
+                result.keepSearching = false;
+                result.exactParentFound = false;
+                result.parent = this;
+                result.childNotResident = true;
+                return;
+            }
+
+            long childLsn = getLsn(result.index);
+
+            /*
+             * Note that if the child node needs latching, it's done in
+             * isSoughtNode.
+             */
+            if (child.isSoughtNode(targetNodeId, cacheMode)) {
+                /* We found the child, so this is the parent. */
+                result.exactParentFound = true;
+                result.parent = this;
+                result.keepSearching = false;
+                return;
+            } else {
+
+                /*
+                 * Decide whether we can descend, or the search is going to be
+                 * unsuccessful or whether this node is going to be the future
+                 * parent. It depends on what this node is, the target, and the
+                 * child.
+                 */
+                descendOnParentSearch(result,
+                                      targetContainsDuplicates,
+                                      targetIsRoot,
+                                      targetNodeId,
+                                      child,
+                                      requireExactMatch);
+
+                /* If we're tracking, save the LSN and node id */
+                if (trackingList != null) {
+                    if ((result.parent != this) && (result.parent != null)) {
+                        trackingList.add(new TrackingInfo(childLsn,
+                                                          child.getNodeId()));
+                    }
+                }
+                return;
+            }
+        }
+    }
+
+    /*
+     * If this search can go further, return the child. If it can't, and you
+     * are a possible new parent to this child, return this IN. If the search
+     * can't go further and this IN can't be a parent to this child, return
+     * null.
+     */
+    protected void descendOnParentSearch(SearchResult result,
+                                         boolean targetContainsDuplicates,
+                                         boolean targetIsRoot,
+                                         long targetNodeId,
+                                         Node child,
+                                         boolean requireExactMatch)
+        throws DatabaseException {
+
+        if (child.canBeAncestor(targetContainsDuplicates)) {
+            /* We can search further. */
+            releaseLatch();
+            result.parent = (IN) child;
+        } else {
+
+            /*
+             * Our search ends, we didn't find it. If we need an exact match,
+             * give up, if we only need a potential match, keep this node
+             * latched and return it.
+             */
+            ((IN) child).releaseLatch();
+            result.exactParentFound = false;
+            result.keepSearching = false;
+
+            if (requireExactMatch) {
+                releaseLatch();
+                result.parent = null;
+            } else {
+                result.parent = this;
+            }
+        }
+    }
+
+    /*
+     * @return true if this IN is the child of the search chain. Note that
+     * if this returns false, the child remains latched.
+     */
+    protected boolean isSoughtNode(long nid, CacheMode cacheMode)
+        throws DatabaseException {
+
+        latch(cacheMode);
+        if (getNodeId() == nid) {
+            releaseLatch();
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    /*
+     * An IN can be an ancestor of any internal node.
+     */
+    protected boolean canBeAncestor(boolean targetContainsDuplicates) {
+        return true;
+    }
+
+    /**
+     * Returns whether this node can be evicted.  This is slower than
+     * (getEvictionType() == MAY_EVICT_NODE) because it does a more static,
+     * stringent check and is used by the evictor after a node has been
+     * selected, to check that it is still evictable. The more specific
+     * evaluation done by getEvictionType() is used when initially selecting a
+     * node for inclusion in the eviction set.
+     *
+     * Note that the IN may or may not be latched when this method is called.
+     * Returning the wrong answer is OK in that case (it will be called again
+     * later when latched), but an exception should not occur.
+     */
+    public boolean isEvictable() {
+
+        if (isEvictionProhibited()) {
+            return false;
+        }
+
+        /*
+         * An IN can be evicted only if its resident children are all evictable
+         * LNs, because those children can be logged (if dirty) and stripped
+         * before this node is evicted.  Non-LN children or pinned LNs (MapLNs
+         * for open DBs) will prevent eviction.
+         */
+        if (hasPinnedChildren()) {
+            return false;
+        }
+
+        for (int i = 0; i < getNEntries(); i++) {
+            /* Target and LSN can be null in DW. Not evictable in that case. */
+            if (getLsn(i) == DbLsn.NULL_LSN &&
+                getTarget(i) == null) {
+                return false;
+            }
+        }
+
+        return true;
+    }
+
+    /**
+     * Returns the eviction type for this IN, for use by the evictor.  Uses the
+     * internal isEvictionProhibited and getChildEvictionType methods that may
+     * be overridden by subclasses.
+     *
+     * This differs from isEvictable() because it does more detailed evaluation
+     * about the degree of evictability. It's used generally when selecting
+     * candidates for eviction.
+     *
+     * Note that the IN may or may not be latched when this method is called.
+     * Returning the wrong answer is OK in that case (it will be called again
+     * later when latched), but an exception should not occur.
+     *
+     * @return MAY_EVICT_LNS if evictable LNs may be stripped; otherwise,
+     * MAY_EVICT_NODE if the node itself may be evicted; otherwise,
+     * MAY_NOT_EVICT.
+     */
+    public int getEvictionType() {
+
+        if (isEvictionProhibited()) {
+            return MAY_NOT_EVICT;
+        } else {
+            return getChildEvictionType();
+        }
+    }
+
+    /**
+     * Returns whether the node is not evictable, irrespective of the status
+     * of the children nodes.
+     *
+     * Note that the IN may or may not be latched when this method is called.
+     * Returning the wrong answer is OK in that case (it will be called again
+     * later when latched), but an exception should not occur.
+     */
+    boolean isEvictionProhibited() {
+
+        if (isDbRoot()) {
+
+            /*
+             * Disallow eviction of a dirty DW DB root, since logging the MapLN
+             * (via DbTree.modifyDbRoot) will make the all other changes to the
+             * DW DB effectively non-provisional (durable).  This implies that
+             * a DW DB root cannot be evicted until it is synced (or removed).
+             * [#13415]
+             */
+            if (databaseImpl.isDeferredWriteMode() && getDirty()) {
+                return true;
+            }
+
+            /*
+             * Disallow eviction of the mapping and naming DB roots, because
+             * the use count is not incremented for these DBs.  In addition,
+             * their eviction and re-fetching is a special case that is not
+             * worth supporting.  [#13415]
+             */
+            DatabaseId dbId = databaseImpl.getId();
+            if (dbId.equals(DbTree.ID_DB_ID) ||
+                dbId.equals(DbTree.NAME_DB_ID)) {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    /**
+     * Returns whether any resident children are not LNs (are INs).
+     * For an IN, that equates to whether there are any resident children
+     * at all.
+     *
+     * Note that the IN may or may not be latched when this method is called.
+     * Returning the wrong answer is OK in that case (it will be called again
+     * later when latched), but an exception should not occur.
+     */
+    boolean hasPinnedChildren() {
+
+        return hasResidentChildren();
+    }
+
+    /**
+     * Returns the eviction type based on the status of child nodes,
+     * irrespective of isEvictionProhibited.
+     *
+     * Note that the IN may or may not be latched when this method is called.
+     * Returning the wrong answer is OK in that case (it will be called again
+     * later when latched), but an exception should not occur.
+     */
+    int getChildEvictionType() {
+
+        return hasResidentChildren() ? MAY_NOT_EVICT : MAY_EVICT_NODE;
+    }
+
+    /**
+     * Returns whether any child is non-null.  Is final to indicate it is not
+     * overridden (unlike hasPinnedChildren, isEvictionProhibited, etc).
+     *
+     * Note that the IN may or may not be latched when this method is called.
+     * Returning the wrong answer is OK in that case (it will be called again
+     * later when latched), but an exception should not occur.
+     */
+    final boolean hasResidentChildren() {
+
+        for (int i = 0; i < getNEntries(); i++) {
+            if (getTarget(i) != null) {
+                return true;
+            }
+        }
+
+        return false;
+    }
+
+    /*
+     * DbStat support.
+     */
+    void accumulateStats(TreeWalkerStatsAccumulator acc) {
+        acc.processIN(this, Long.valueOf(getNodeId()), getLevel());
+    }
+
+    /*
+     * Logging support
+     */
+
+    /**
+     * When splits and checkpoints intermingle in a deferred write databases,
+     * a checkpoint target may appear which has a valid target but a null LSN.
+     * Deferred write dbs are written out in checkpoint style by either
+     * Database.sync() or a checkpoint which has cleaned a file containing
+     * deferred write entries. For example,
+     *   INa
+     *    |
+     *   BINb
+     *
+     *  A checkpoint or Database.sync starts
+     *  The INList is traversed, dirty nodes are selected
+     *  BINb is bypassed on the INList, since it's not dirty
+     *  BINb is split, creating a new sibling, BINc, and dirtying INa
+     *  INa is selected as a dirty node for the ckpt
+     *
+     * If this happens, INa is in the selected dirty set, but not its dirty
+     * child BINb and new child BINc.
+     *
+     * In a durable db, the existence of BINb and BINc are logged
+     * anyway. But in a deferred write db, there is an entry that points to
+     * BINc, but no logged version.
+     *
+     * This will not cause problems with eviction, because INa can't be
+     * evicted until BINb and BINc are logged, are non-dirty, and are detached.
+     * But it can cause problems at recovery, because INa will have a null LSN
+     * for a valid entry, and the LN children of BINc will not find a home.
+     * To prevent this, search for all dirty children that might have been
+     * missed during the selection phase, and write them out. It's not
+     * sufficient to write only null-LSN children, because the existing sibling
+     * must be logged lest LN children recover twice (once in the new sibling,
+     * once in the old existing sibling.
+     */
+    public void logDirtyChildren()
+        throws DatabaseException {
+
+        EnvironmentImpl envImpl = getDatabase().getDbEnvironment();
+
+        /* Look for targets that are dirty. */
+        for (int i = 0; i < getNEntries(); i++) {
+
+            IN child = (IN) getTarget(i);
+            if (child != null) {
+                child.latch(CacheMode.UNCHANGED);
+                try {
+                    if (child.getDirty()) {
+                        /* Ask descendents to log their children. */
+                        child.logDirtyChildren();
+                        long childLsn =
+                            child.log(envImpl.getLogManager(),
+                                      false, // allow deltas
+                                      true,  // is provisional
+                                      false, // proactive migration
+                                      true,  // backgroundIO
+                                      this); // provisional parent
+                        updateEntry(i, childLsn);
+                    }
+                } finally {
+                    child.releaseLatch();
+                }
+            }
+        }
+    }
+
+    /**
+     * Log this IN and clear the dirty flag.
+     */
+    public long log(LogManager logManager)
+        throws DatabaseException {
+
+        return logInternal(logManager,
+                           false,  // allowDeltas
+                           Provisional.NO,
+                           false,  // proactiveMigration
+                           false,  // backgroundIO
+                           null);  // parent
+    }
+
+    /**
+     * Log this node with all available options.
+     */
+    public long log(LogManager logManager,
+                    boolean allowDeltas,
+                    boolean isProvisional,
+                    boolean proactiveMigration,
+                    boolean backgroundIO,
+                    IN parent) // for provisional
+        throws DatabaseException {
+
+        return logInternal(logManager,
+                           allowDeltas,
+                           isProvisional ? Provisional.YES : Provisional.NO,
+                           proactiveMigration,
+                           backgroundIO,
+                           parent);
+    }
+
+    public long log(LogManager logManager,
+                    boolean allowDeltas,
+                    Provisional provisional,
+                    boolean proactiveMigration,
+                    boolean backgroundIO,
+                    IN parent) // for provisional
+        throws DatabaseException {
+
+        return logInternal(logManager,
+                           allowDeltas,
+                           provisional,
+                           proactiveMigration,
+                           backgroundIO,
+                           parent);
+    }
+
+    /**
+     * Log this IN and clear the dirty flag.
+     */
+    public long optionalLog(LogManager logManager)
+        throws DatabaseException {
+
+        if (databaseImpl.isDeferredWriteMode()) {
+            return DbLsn.NULL_LSN;
+        } else {
+            return logInternal(logManager,
+                               false,  // allowDeltas
+                               Provisional.NO,
+                               false,  // proactiveMigration
+                               false,  // backgroundIO
+                               null);  // parent
+        }
+    }
+
+    /**
+     * Log this node provisionally and clear the dirty flag.
+     * @param item object to be logged
+     * @return LSN of the new log entry
+     */
+    public long optionalLogProvisional(LogManager logManager, IN parent)
+        throws DatabaseException {
+
+        if (databaseImpl.isDeferredWriteMode()) {
+            return DbLsn.NULL_LSN;
+        } else {
+            return logInternal(logManager,
+                               false,  // allowDeltas
+                               Provisional.YES,
+                               false,  // proactiveMigration
+                               false,  // backgroundIO
+                               parent);
+        }
+    }
+
+    /**
+     * Bottleneck method for all single-IN logging.  Multi-IN logging uses
+     * beforeLog and afterLog instead.
+     */
+    private long logInternal(LogManager logManager,
+                             boolean allowDeltas,
+                             Provisional provisional,
+                             boolean proactiveMigration,
+                             boolean backgroundIO,
+                             IN parent)
+        throws DatabaseException {
+
+        INLogItem item = new INLogItem();
+        item.provisional = provisional;
+        item.parent = parent;
+        item.repContext = ReplicationContext.NO_REPLICATE;
+
+        INLogContext context = new INLogContext();
+        context.nodeDb = getDatabase();
+        context.backgroundIO = backgroundIO;
+        context.allowDeltas = allowDeltas;
+        context.proactiveMigration = proactiveMigration;
+
+        beforeLog(logManager, item, context);
+        logManager.log(item, context);
+        afterLog(logManager, item, context);
+
+        return item.newLsn;
+    }
+
+    /**
+     * Pre-log processing.  Used implicitly for single-item logging and
+     * explicitly for multi-item logging.  Overridden by subclasses as needed.
+     *
+     * Decide how to log this node.  INs are always logged in full.  Cleaner LN
+     * migration is never performed since it only applies to BINs.
+     */
+    public void beforeLog(LogManager logManager,
+                          INLogItem item,
+                          INLogContext context)
+        throws DatabaseException {
+
+        item.oldLsn = countObsoleteDuringLogging(item.provisional) ?
+            lastFullVersion :
+            DbLsn.NULL_LSN;
+        item.entry = new INLogEntry(this);
+    }
+
+    /**
+     * Post-log processing.  Used implicitly for single-item logging and
+     * explicitly for multi-item logging.  Overridden by subclasses as needed.
+     *
+     * The last version of this node must be counted obsolete at the correct
+     * time. If logging non-provisionally, the last version of this node and
+     * any provisionally logged descendants are immediately obsolete and can be
+     * flushed. If logging provisionally, the last version isn't obsolete until
+     * an ancestor is logged non-provisionally, so propagate obsolete lsns
+     * upwards.
+     */
+    public void afterLog(LogManager logManager,
+                         INLogItem item,
+                         INLogContext context)
+        throws DatabaseException {
+
+        if (countObsoleteDuringLogging(item.provisional)) {
+            flushProvisionalObsolete(logManager);
+        } else {
+            if (item.parent != null) {
+                item.parent.trackProvisionalObsolete
+                    (this, lastFullVersion, DbLsn.NULL_LSN);
+            }
+        }
+
+        setLastFullLsn(item.newLsn);
+        setDirty(false);
+    }
+
+    /**
+     * Returns whether to count the prior version of an IN (as well as
+     * accumulated provisionally obsolete LSNs for child nodes) obsolete when
+     * logging the new version.
+     *
+     * True is returned if we are logging the IN non-provisionally, since the
+     * non-provisional version durably replaces the prior version and causes
+     * all provisional children to also become durable.
+     *
+     * True is also returned if the database is temporary. Since we never use a
+     * temporary DB past recovery, prior versions of an IN are never used.
+     * [#16928]
+     */
+    private boolean countObsoleteDuringLogging(Provisional provisional) {
+        return provisional != Provisional.YES ||
+               databaseImpl.isTemporary();
+    }
+
+    /**
+     * Adds the given obsolete LSNs and any tracked obsolete LSNs for the given
+     * child IN to this IN's tracking list.  This method is called to track
+     * obsolete LSNs when a child IN is logged provisionally.  Such LSNs cannot
+     * be considered obsolete until an ancestor IN is logged non-provisionally.
+     */
+    void trackProvisionalObsolete(IN child,
+                                  long obsoleteLsn1,
+                                  long obsoleteLsn2) {
+
+        int memDelta = 0;
+
+        if (child.provisionalObsolete != null) {
+
+            int childMemDelta = child.provisionalObsolete.size() *
+                                MemoryBudget.LONG_LIST_PER_ITEM_OVERHEAD;
+
+            if (provisionalObsolete != null) {
+                provisionalObsolete.addAll(child.provisionalObsolete);
+            } else {
+                provisionalObsolete = child.provisionalObsolete;
+            }
+            child.provisionalObsolete = null;
+
+            child.changeMemorySize(0 - childMemDelta);
+            memDelta += childMemDelta;
+        }
+
+        if (obsoleteLsn1 != DbLsn.NULL_LSN || obsoleteLsn2 != DbLsn.NULL_LSN) {
+
+            if (provisionalObsolete == null) {
+                provisionalObsolete = new ArrayList<Long>();
+            }
+
+            if (obsoleteLsn1 != DbLsn.NULL_LSN) {
+                provisionalObsolete.add(Long.valueOf(obsoleteLsn1));
+                memDelta += MemoryBudget.LONG_LIST_PER_ITEM_OVERHEAD;
+            }
+
+            if (obsoleteLsn2 != DbLsn.NULL_LSN) {
+                provisionalObsolete.add(Long.valueOf(obsoleteLsn2));
+                memDelta += MemoryBudget.LONG_LIST_PER_ITEM_OVERHEAD;
+            }
+        }
+
+        if (memDelta != 0) {
+            changeMemorySize(memDelta);
+        }
+    }
+
+    /**
+     * Adds the provisional obsolete tracking information in this node to the
+     * live tracker.  This method is called when this node is logged
+     * non-provisionally.
+     */
+    void flushProvisionalObsolete(LogManager logManager)
+        throws DatabaseException {
+
+        if (provisionalObsolete != null) {
+
+            int memDelta = provisionalObsolete.size() *
+                MemoryBudget.LONG_LIST_PER_ITEM_OVERHEAD;
+
+            logManager.countObsoleteINs(provisionalObsolete, getDatabase());
+            provisionalObsolete = null;
+
+            changeMemorySize(0 - memDelta);
+        }
+    }
+
+    /**
+     * @see Node#getLogType
+     */
+    public LogEntryType getLogType() {
+        return LogEntryType.LOG_IN;
+    }
+
+    /**
+     * @see Loggable#getLogSize
+     */
+    @Override
+    public int getLogSize() {
+        int size = super.getLogSize();          // ancestors
+        size += LogUtils.getByteArrayLogSize(identifierKey); // identifier key
+        if (keyPrefix != null) {
+            size += LogUtils.getByteArrayLogSize(keyPrefix);
+        }
+        size += 1;                              // isRoot
+        size += LogUtils.getPackedIntLogSize(nEntries);
+        size += LogUtils.getPackedIntLogSize(level);
+        size += LogUtils.getPackedIntLogSize(entryTargets.length);
+        size += LogUtils.getBooleanLogSize();   // compactLsnsRep
+        boolean compactLsnsRep = (entryLsnLongArray == null);
+        if (compactLsnsRep) {
+            size += LogUtils.INT_BYTES;         // baseFileNumber
+        }
+
+        for (int i = 0; i < nEntries; i++) {    // entries
+            size += LogUtils.getByteArrayLogSize(entryKeyVals[i]) + // key
+                (compactLsnsRep ? LogUtils.INT_BYTES :
+                 LogUtils.getLongLogSize()) +                       // LSN
+                1;                                                  // state
+        }
+        return size;
+    }
+
+    /**
+     * @see Loggable#writeToLog
+     */
+    @Override
+    public void writeToLog(ByteBuffer logBuffer) {
+
+        super.writeToLog(logBuffer);
+
+        boolean hasKeyPrefix = (keyPrefix != null);
+        LogUtils.writeByteArray(logBuffer, identifierKey);
+        byte booleans = (byte) (isRoot() ? 1 : 0);
+        booleans |= (hasKeyPrefix ? 2 : 0);
+        logBuffer.put((byte) booleans);
+        if (hasKeyPrefix) {
+            LogUtils.writeByteArray(logBuffer, keyPrefix);
+        }
+        LogUtils.writePackedInt(logBuffer, nEntries);
+        LogUtils.writePackedInt(logBuffer, level);
+        LogUtils.writePackedInt(logBuffer, entryTargets.length);
+
+        /* true if compact representation. */
+        boolean compactLsnsRep = (entryLsnLongArray == null);
+        LogUtils.writeBoolean(logBuffer, compactLsnsRep);
+        if (compactLsnsRep) {
+            LogUtils.writeInt(logBuffer, (int) baseFileNumber);
+        }
+
+        for (int i = 0; i < nEntries; i++) {
+            LogUtils.writeByteArray(logBuffer, entryKeyVals[i]); // key
+
+            /*
+             * A NULL_LSN may be stored when an incomplete insertion occurs,
+             * but in that case the KnownDeleted flag must be set. See
+             * Tree.insert.  [#13126]
+             */
+            assert checkForNullLSN(i) :
+                "logging IN " + getNodeId() + " with null lsn child " +
+                " db=" + databaseImpl.getDebugName() +
+                " isDeferredWriteMode=" + databaseImpl.isDeferredWriteMode() +
+                " isTemporary=" + databaseImpl.isTemporary();
+
+            if (compactLsnsRep) {                                // LSN
+                int offset = i << 2;
+                int fileOffset = getFileOffset(offset);
+                logBuffer.put(getFileNumberOffset(offset));
+                logBuffer.put((byte) ((fileOffset >>> 0) & 0xff));
+                logBuffer.put((byte) ((fileOffset >>> 8) & 0xff));
+                logBuffer.put((byte) ((fileOffset >>> 16) & 0xff));
+            } else {
+                LogUtils.writeLong(logBuffer, entryLsnLongArray[i]);
+            }
+            logBuffer.put(entryStates[i]);                       // state
+            entryStates[i] &= CLEAR_DIRTY_BIT;
+        }
+    }
+
+    /*
+     * Used for assertion to prevent writing a null lsn to the log.
+     */
+    private boolean checkForNullLSN(int index) {
+        boolean ok;
+        if (this instanceof BIN) {
+            ok = !(getLsn(index) == DbLsn.NULL_LSN &&
+                   (entryStates[index] & KNOWN_DELETED_BIT) == 0);
+        } else {
+            ok = (getLsn(index) != DbLsn.NULL_LSN);
+        }
+        return ok;
+    }
+
+    /**
+     * @see Loggable#readFromLog
+     */
+    @Override
+    public void readFromLog(ByteBuffer itemBuffer, byte entryVersion)
+        throws LogException {
+
+        super.readFromLog(itemBuffer, entryVersion);
+
+        boolean unpacked = (entryVersion < 6);
+        identifierKey = LogUtils.readByteArray(itemBuffer, unpacked);
+        byte booleans = itemBuffer.get();
+        setIsRootFlag((booleans & 1) != 0);
+        if ((booleans & 2) != 0) {
+            keyPrefix = LogUtils.readByteArray(itemBuffer, unpacked);
+        }
+
+        nEntries = LogUtils.readInt(itemBuffer, unpacked);
+        level = LogUtils.readInt(itemBuffer, unpacked);
+        int length = LogUtils.readInt(itemBuffer, unpacked);
+
+        entryTargets = new Node[length];
+        entryKeyVals = new byte[length][];
+        baseFileNumber = -1;
+        long storedBaseFileNumber = -1;
+        entryLsnByteArray = new byte[length << 2];
+        entryLsnLongArray = null;
+        entryStates = new byte[length];
+        boolean compactLsnsRep = false;
+        if (entryVersion > 1) {
+            compactLsnsRep = LogUtils.readBoolean(itemBuffer);
+            if (compactLsnsRep) {
+                baseFileNumber = LogUtils.readInt(itemBuffer) & 0xffffffff;
+                storedBaseFileNumber = baseFileNumber;
+            }
+        }
+        for (int i = 0; i < nEntries; i++) {
+            entryKeyVals[i] = LogUtils.readByteArray(itemBuffer, unpacked);
+            long lsn;
+            if (compactLsnsRep) {
+                /* LSNs in compact form. */
+                byte fileNumberOffset = itemBuffer.get();
+                int fileOffset = (itemBuffer.get() & 0xff);
+                fileOffset |= ((itemBuffer.get() & 0xff) << 8);
+                fileOffset |= ((itemBuffer.get() & 0xff) << 16);
+                if (fileOffset == THREE_BYTE_NEGATIVE_ONE) {
+                    lsn = DbLsn.NULL_LSN;
+                } else {
+                    lsn = DbLsn.makeLsn
+                        (storedBaseFileNumber + fileNumberOffset, fileOffset);
+                }
+            } else {
+                /* LSNs in long form. */
+                lsn = LogUtils.readLong(itemBuffer);              // LSN
+            }
+            setLsnElement(i, lsn);
+
+            byte entryState = itemBuffer.get();                   // state
+            entryState &= CLEAR_DIRTY_BIT;
+            entryState &= CLEAR_MIGRATE_BIT;
+
+            /*
+             * A NULL_LSN is the remnant of an incomplete insertion and the
+             * KnownDeleted flag should be set.  But because of bugs in prior
+             * releases, the KnownDeleted flag may not be set.  So set it here.
+             * See Tree.insert.  [#13126]
+             */
+            if (lsn == DbLsn.NULL_LSN) {
+                entryState |= KNOWN_DELETED_BIT;
+            }
+
+            entryStates[i] = entryState;
+        }
+
+        latch.setName(shortClassName() + getNodeId());
+    }
+
+    /**
+     * @see Loggable#dumpLog
+     */
+    @Override
+    public void dumpLog(StringBuffer sb, boolean verbose) {
+        sb.append(beginTag());
+
+        super.dumpLog(sb, verbose);
+        sb.append(Key.dumpString(identifierKey, 0));
+
+        // isRoot
+        sb.append("<isRoot val=\"");
+        sb.append(isRoot());
+        sb.append("\"/>");
+
+        // level
+        sb.append("<level val=\"");
+        sb.append(Integer.toHexString(level));
+        sb.append("\"/>");
+
+        if (keyPrefix != null) {
+            sb.append("<keyPrefix>");
+            sb.append(Key.dumpString(keyPrefix, 0));
+            sb.append("</keyPrefix>");
+        }
+
+        // nEntries, length of entries array
+        sb.append("<entries numEntries=\"");
+        sb.append(nEntries);
+        sb.append("\" length=\"");
+        sb.append(entryTargets.length);
+        boolean compactLsnsRep = (entryLsnLongArray == null);
+        if (compactLsnsRep) {
+            sb.append("\" baseFileNumber=\"");
+            sb.append(baseFileNumber);
+        }
+        sb.append("\">");
+
+        if (verbose) {
+            for (int i = 0; i < nEntries; i++) {
+                sb.append("<ref knownDeleted=\"").
+                    append(isEntryKnownDeleted(i));
+                sb.append("\" pendingDeleted=\"").
+                    append(isEntryPendingDeleted(i));
+                sb.append("\">");
+                sb.append(Key.dumpString(entryKeyVals[i], 0));
+                sb.append(DbLsn.toString(getLsn(i)));
+                sb.append("</ref>");
+            }
+        }
+
+        sb.append("</entries>");
+
+        /* Add on any additional items from subclasses before the end tag. */
+        dumpLogAdditional(sb);
+
+        sb.append(endTag());
+    }
+
+    /**
+     * @see Loggable#logicalEquals
+     * Always return false, this item should never be compared.
+     */
+    public boolean logicalEquals(Loggable other) {
+        return false;
+    }
+
+    /**
+     * Allows subclasses to add additional fields before the end tag. If they
+     * just overload dumpLog, the xml isn't nested.
+     */
+    protected void dumpLogAdditional(StringBuffer sb) {
+    }
+
+    public String beginTag() {
+        return BEGIN_TAG;
+    }
+
+    public String endTag() {
+        return END_TAG;
+    }
+
+    void dumpKeys()
+        throws DatabaseException {
+
+        for (int i = 0; i < nEntries; i++) {
+            System.out.println(Key.dumpString(entryKeyVals[i], 0));
+        }
+    }
+
+    /**
+     * For unit test support:
+     * @return a string that dumps information about this IN, without
+     */
+    @Override
+    public String dumpString(int nSpaces, boolean dumpTags) {
+        StringBuffer sb = new StringBuffer();
+        if (dumpTags) {
+            sb.append(TreeUtils.indent(nSpaces));
+            sb.append(beginTag());
+            sb.append('\n');
+        }
+
+        sb.append(super.dumpString(nSpaces+2, true));
+        sb.append('\n');
+
+        sb.append(TreeUtils.indent(nSpaces+2));
+        sb.append("<idkey>");
+        sb.append(identifierKey == null ?
+                  "" :
+                  Key.dumpString(identifierKey, 0));
+        sb.append("</idkey>");
+        sb.append('\n');
+        sb.append(TreeUtils.indent(nSpaces+2));
+        sb.append("<prefix>");
+        sb.append(keyPrefix == null ? "" : Key.dumpString(keyPrefix, 0));
+        sb.append("</prefix>\n");
+        sb.append(TreeUtils.indent(nSpaces+2));
+        sb.append("<dirty val=\"").append(getDirty()).append("\"/>");
+        sb.append('\n');
+        sb.append(TreeUtils.indent(nSpaces+2));
+        sb.append("<generation val=\"").append(generation).append("\"/>");
+        sb.append('\n');
+        sb.append(TreeUtils.indent(nSpaces+2));
+        sb.append("<level val=\"");
+        sb.append(Integer.toHexString(level)).append("\"/>");
+        sb.append('\n');
+        sb.append(TreeUtils.indent(nSpaces+2));
+        sb.append("<isRoot val=\"").append(isRoot()).append("\"/>");
+        sb.append('\n');
+
+        sb.append(TreeUtils.indent(nSpaces+2));
+        sb.append("<entries nEntries=\"");
+        sb.append(nEntries);
+        sb.append("\">");
+        sb.append('\n');
+
+        for (int i = 0; i < nEntries; i++) {
+            sb.append(TreeUtils.indent(nSpaces+4));
+            sb.append("<entry id=\"" + i + "\">");
+            sb.append('\n');
+            if (getLsn(i) == DbLsn.NULL_LSN) {
+                sb.append(TreeUtils.indent(nSpaces + 6));
+                sb.append("<lsn/>");
+            } else {
+                sb.append(DbLsn.dumpString(getLsn(i), nSpaces + 6));
+            }
+            sb.append('\n');
+            if (entryKeyVals[i] == null) {
+                sb.append(TreeUtils.indent(nSpaces + 6));
+                sb.append("<key/>");
+            } else {
+                sb.append(Key.dumpString(entryKeyVals[i], (nSpaces + 6)));
+            }
+            sb.append('\n');
+            if (entryTargets[i] == null) {
+                sb.append(TreeUtils.indent(nSpaces + 6));
+                sb.append("<target/>");
+            } else {
+                sb.append(entryTargets[i].dumpString(nSpaces + 6, true));
+            }
+            sb.append('\n');
+            sb.append(TreeUtils.indent(nSpaces + 6));
+            dumpDeletedState(sb, getState(i));
+            sb.append("<dirty val=\"").append(isDirty(i)).append("\"/>");
+            sb.append('\n');
+            sb.append(TreeUtils.indent(nSpaces+4));
+            sb.append("</entry>");
+            sb.append('\n');
+        }
+
+        sb.append(TreeUtils.indent(nSpaces+2));
+        sb.append("</entries>");
+        sb.append('\n');
+        if (dumpTags) {
+            sb.append(TreeUtils.indent(nSpaces));
+            sb.append(endTag());
+        }
+        return sb.toString();
+    }
+
+    /**
+     * Utility method for output of knownDeleted and pendingDelete.
+     */
+    static void dumpDeletedState(StringBuffer sb, byte state) {
+        sb.append("<knownDeleted val=\"");
+        sb.append(isStateKnownDeleted(state)).append("\"/>");
+        sb.append("<pendingDeleted val=\"");
+        sb.append(isStatePendingDeleted(state)).append("\"/>");
+    }
+
+    @Override
+    public String toString() {
+        return dumpString(0, true);
+    }
+
+    public String shortClassName() {
+        return "IN";
+    }
+
+    /**
+     * Send trace messages to the java.util.logger. Don't rely on the logger
+     * alone to conditionalize whether we send this message, we don't even want
+     * to construct the message if the level is not enabled.
+     */
+    private void traceSplit(Level level,
+                            IN parent,
+                            IN newSibling,
+                            long parentLsn,
+                            long myNewLsn,
+                            long newSiblingLsn,
+                            int splitIndex,
+                            int idKeyIndex,
+                            int childIndex) {
+        Logger logger = databaseImpl.getDbEnvironment().getLogger();
+        if (logger.isLoggable(level)) {
+            StringBuffer sb = new StringBuffer();
+            sb.append(TRACE_SPLIT);
+            sb.append(" parent=");
+            sb.append(parent.getNodeId());
+            sb.append(" child=");
+            sb.append(getNodeId());
+            sb.append(" newSibling=");
+            sb.append(newSibling.getNodeId());
+            sb.append(" parentLsn = ");
+            sb.append(DbLsn.getNoFormatString(parentLsn));
+            sb.append(" childLsn = ");
+            sb.append(DbLsn.getNoFormatString(myNewLsn));
+            sb.append(" newSiblingLsn = ");
+            sb.append(DbLsn.getNoFormatString(newSiblingLsn));
+            sb.append(" splitIdx=");
+            sb.append(splitIndex);
+            sb.append(" idKeyIdx=");
+            sb.append(idKeyIndex);
+            sb.append(" childIdx=");
+            sb.append(childIndex);
+            logger.log(level, sb.toString());
+        }
+    }
+
+    /**
+     * Send trace messages to the java.util.logger. Don't rely on the logger
+     * alone to conditionalize whether we send this message, we don't even want
+     * to construct the message if the level is not enabled.
+     */
+    private void traceDelete(Level level, int index) {
+        Logger logger = databaseImpl.getDbEnvironment().getLogger();
+        if (logger.isLoggable(level)) {
+            StringBuffer sb = new StringBuffer();
+            sb.append(TRACE_DELETE);
+            sb.append(" in=").append(getNodeId());
+            sb.append(" index=");
+            sb.append(index);
+            logger.log(level, sb.toString());
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/tree/INDeleteInfo.java b/src/com/sleepycat/je/tree/INDeleteInfo.java
new file mode 100644
index 0000000000000000000000000000000000000000..fc740d4a17d0b70d83433cd4bb45b462c4d8f710
--- /dev/null
+++ b/src/com/sleepycat/je/tree/INDeleteInfo.java
@@ -0,0 +1,142 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: INDeleteInfo.java,v 1.40.2.3 2010/01/04 15:30:36 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.nio.ByteBuffer;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.LogException;
+import com.sleepycat.je.log.LogManager;
+import com.sleepycat.je.log.LogUtils;
+import com.sleepycat.je.log.Loggable;
+import com.sleepycat.je.log.ReplicationContext;
+import com.sleepycat.je.log.entry.SingleItemEntry;
+
+/**
+ * INDeleteInfo encapsulates the information logged about the removal of a
+ * child from an IN during IN compression.
+ *
+ * As of JE 3.3.87, INDelete is no longer logged becaue the root compression
+ * feature has been disabled.  However, INDelete must still be processed in log
+ * files created with 3.3.87 and earlier. [#17546]
+ */
+public class INDeleteInfo implements Loggable {
+
+    private long deletedNodeId;
+    private byte[] deletedIdKey;
+    private DatabaseId dbId;
+
+    /**
+     * Create a new delete info entry.
+     */
+    public INDeleteInfo(long deletedNodeId,
+			byte[] deletedIdKey,
+			DatabaseId dbId) {
+        this.deletedNodeId = deletedNodeId;
+        this.deletedIdKey = deletedIdKey;
+        this.dbId = dbId;
+    }
+
+    /**
+     * Used by logging system only.
+     */
+    public INDeleteInfo() {
+        dbId = new DatabaseId();
+    }
+
+    /*
+     * Accessors.
+     */
+    public long getDeletedNodeId() {
+        return deletedNodeId;
+    }
+
+    public byte[] getDeletedIdKey() {
+        return deletedIdKey;
+    }
+
+    public DatabaseId getDatabaseId() {
+        return dbId;
+    }
+
+    /*
+     * Logging support for writing.
+     */
+    public void optionalLog(LogManager logManager,
+                            DatabaseImpl dbImpl)
+        throws DatabaseException {
+
+        if (!dbImpl.isDeferredWriteMode()) {
+            logManager.log
+                (new SingleItemEntry(LogEntryType.LOG_IN_DELETE_INFO,
+                                     this),
+                 ReplicationContext.NO_REPLICATE);
+        }
+    }
+
+    /**
+     * @see Loggable#getLogSize
+     */
+    public int getLogSize() {
+        return LogUtils.getPackedLongLogSize(deletedNodeId) +
+            LogUtils.getByteArrayLogSize(deletedIdKey) +
+            dbId.getLogSize();
+    }
+
+    /**
+     * @see Loggable#writeToLog
+     */
+    public void writeToLog(ByteBuffer logBuffer) {
+
+        LogUtils.writePackedLong(logBuffer, deletedNodeId);
+        LogUtils.writeByteArray(logBuffer, deletedIdKey);
+        dbId.writeToLog(logBuffer);
+    }
+
+    /**
+     * @see Loggable#readFromLog
+     */
+    public void readFromLog(ByteBuffer itemBuffer, byte entryVersion)
+	throws LogException {
+
+        boolean unpacked = (entryVersion < 6);
+        deletedNodeId = LogUtils.readLong(itemBuffer, unpacked);
+        deletedIdKey = LogUtils.readByteArray(itemBuffer, unpacked);
+        dbId.readFromLog(itemBuffer, entryVersion);
+    }
+
+    /**
+     * @see Loggable#dumpLog
+     */
+    public void dumpLog(StringBuffer sb, boolean verbose) {
+        sb.append("<INDeleteEntry node=\"").append(deletedNodeId);
+        sb.append("\">");
+        sb.append(Key.dumpString(deletedIdKey, 0));
+        dbId.dumpLog(sb, verbose);
+        sb.append("</INDeleteEntry>");
+    }
+
+    /**
+     * @see Loggable#getTransactionId
+     */
+    public long getTransactionId() {
+	return 0;
+    }
+
+    /**
+     * @see Loggable#logicalEquals
+     * Always return false, this item should never be compared.
+     */
+    public boolean logicalEquals(Loggable other) {
+        return false;
+    }
+}
diff --git a/src/com/sleepycat/je/tree/INDupDeleteInfo.java b/src/com/sleepycat/je/tree/INDupDeleteInfo.java
new file mode 100644
index 0000000000000000000000000000000000000000..7b5f91f37aac632cb0b2eae0b2e263620b27f433
--- /dev/null
+++ b/src/com/sleepycat/je/tree/INDupDeleteInfo.java
@@ -0,0 +1,152 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: INDupDeleteInfo.java,v 1.21.2.2 2010/01/04 15:30:36 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.nio.ByteBuffer;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.LogException;
+import com.sleepycat.je.log.LogManager;
+import com.sleepycat.je.log.LogUtils;
+import com.sleepycat.je.log.Loggable;
+import com.sleepycat.je.log.ReplicationContext;
+import com.sleepycat.je.log.entry.SingleItemEntry;
+
+/**
+ * INDupDeleteInfo encapsulates the information logged about the removal of a
+ * child from a duplicate IN during IN compression.
+ */
+public class INDupDeleteInfo implements Loggable {
+
+    private long deletedNodeId;
+    private byte[] deletedMainKey;
+    private byte[] deletedDupKey;
+    private DatabaseId dbId;
+
+    /**
+     * Create a new delete info entry.
+     */
+    public INDupDeleteInfo(long deletedNodeId,
+			   byte[] deletedMainKey,
+			   byte[] deletedDupKey,
+			   DatabaseId dbId) {
+        this.deletedNodeId = deletedNodeId;
+        this.deletedMainKey = deletedMainKey;
+        this.deletedDupKey = deletedDupKey;
+        this.dbId = dbId;
+    }
+
+    /**
+     * Used by logging system only.
+     */
+    public INDupDeleteInfo() {
+        dbId = new DatabaseId();
+    }
+
+    /*
+     * Accessors.
+     */
+    public long getDeletedNodeId() {
+        return deletedNodeId;
+    }
+
+    public byte[] getDeletedMainKey() {
+        return deletedMainKey;
+    }
+
+    public byte[] getDeletedDupKey() {
+        return deletedDupKey;
+    }
+
+    public DatabaseId getDatabaseId() {
+        return dbId;
+    }
+
+    /*
+     * Logging support for writing.
+     */
+
+    /*
+     * Logging support for writing.
+     */
+    public void optionalLog(LogManager logManager,
+                            DatabaseImpl dbImpl)
+        throws DatabaseException {
+
+        if (!dbImpl.isDeferredWriteMode()) {
+            logManager.log(
+               new SingleItemEntry(LogEntryType.LOG_IN_DUPDELETE_INFO, this),
+               ReplicationContext.NO_REPLICATE);
+        }
+    }
+
+    /**
+     * @see Loggable#getLogSize
+     */
+    public int getLogSize() {
+        return LogUtils.getPackedLongLogSize(deletedNodeId) +
+            LogUtils.getByteArrayLogSize(deletedMainKey) +
+            LogUtils.getByteArrayLogSize(deletedDupKey) +
+            dbId.getLogSize();
+    }
+
+    /**
+     * @see Loggable#writeToLog
+     */
+    public void writeToLog(ByteBuffer logBuffer) {
+
+        LogUtils.writePackedLong(logBuffer, deletedNodeId);
+        LogUtils.writeByteArray(logBuffer, deletedMainKey);
+        LogUtils.writeByteArray(logBuffer, deletedDupKey);
+        dbId.writeToLog(logBuffer);
+    }
+
+    /**
+     * @see Loggable#readFromLog
+     */
+    public void readFromLog(ByteBuffer itemBuffer, byte entryVersion)
+	throws LogException {
+
+        boolean unpacked = (entryVersion < 6);
+        deletedNodeId = LogUtils.readLong(itemBuffer, unpacked);
+        deletedMainKey =
+            LogUtils.readByteArray(itemBuffer, unpacked);
+        deletedDupKey = LogUtils.readByteArray(itemBuffer, unpacked);
+        dbId.readFromLog(itemBuffer, entryVersion);
+    }
+
+    /**
+     * @see Loggable#dumpLog
+     */
+    public void dumpLog(StringBuffer sb, boolean verbose) {
+        sb.append("<INDupDeleteEntry node=\"").append(deletedNodeId);
+        sb.append("\">");
+        sb.append(Key.dumpString(deletedMainKey, 0));
+        sb.append(Key.dumpString(deletedDupKey, 0));
+        dbId.dumpLog(sb, verbose);
+        sb.append("</INDupDeleteEntry>");
+    }
+
+    /**
+     * @see Loggable#getTransactionId
+     */
+    public long getTransactionId() {
+	return 0;
+    }
+    /**
+     * @see Loggable#logicalEquals
+     * Always return false, this item should never be compared.
+     */
+    public boolean logicalEquals(Loggable other) {
+        return false;
+    }
+}
diff --git a/src/com/sleepycat/je/tree/INLogContext.java b/src/com/sleepycat/je/tree/INLogContext.java
new file mode 100644
index 0000000000000000000000000000000000000000..3cb86df40f4c9ccab5caf3e3941d5a33098400e8
--- /dev/null
+++ b/src/com/sleepycat/je/tree/INLogContext.java
@@ -0,0 +1,34 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: INLogContext.java,v 1.1.2.2 2010/01/04 15:30:36 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import com.sleepycat.je.log.LogContext;
+
+/**
+ * Extends LogContext to add fields used by IN.beforeLog and afterLog methods.
+ */
+public class INLogContext extends LogContext {
+
+    /**
+     * Whether a BINDelta may be logged.  A BINDelta is logged rather than a BIN
+     * if this field is true and other qualifications are met for a delta.
+     * Used by BIN.beforeLog.
+     *
+     * Set by caller.
+     */
+    public boolean allowDeltas;
+
+    /**
+     * Whether LNs are migrated proactively by the log cleaner as part of
+     * logging a BIN.  Used by BIN.beforeLog.
+     *
+     * Set by caller.
+     */
+    public boolean proactiveMigration;
+}
diff --git a/src/com/sleepycat/je/tree/INLogItem.java b/src/com/sleepycat/je/tree/INLogItem.java
new file mode 100644
index 0000000000000000000000000000000000000000..9aeec3c5b787628b2bee9cea1012462b2887bc84
--- /dev/null
+++ b/src/com/sleepycat/je/tree/INLogItem.java
@@ -0,0 +1,36 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: INLogItem.java,v 1.1.2.2 2010/01/04 15:30:36 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import com.sleepycat.je.log.LogItem;
+
+/**
+ * Extends LogItem to add fields used by IN.beforeLog and afterLog methods.
+ */
+public class INLogItem extends LogItem {
+    
+    /**
+     * Parent IN of IN to be logged, or null for the root IN.  Used to count
+     * utilization for provisional logging.
+     *
+     * Set by caller.
+     */
+    public IN parent = null;
+
+    /**
+     * Index of parent slot for IN to be logged.  Used to identify and update
+     * the slot.
+     *
+     * Set by caller.
+     */
+    public int parentIndex = -1;
+
+    /* Fields used internally by beforeLog and afterLog methods. */
+    boolean isDelta = false;
+}
diff --git a/src/com/sleepycat/je/tree/InconsistentNodeException.java b/src/com/sleepycat/je/tree/InconsistentNodeException.java
new file mode 100644
index 0000000000000000000000000000000000000000..d2d0f6c34c2a119fd33c7e7eb25748015b61bb12
--- /dev/null
+++ b/src/com/sleepycat/je/tree/InconsistentNodeException.java
@@ -0,0 +1,24 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: InconsistentNodeException.java,v 1.16.2.2 2010/01/04 15:30:36 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import com.sleepycat.je.DatabaseException;
+
+/**
+ * Error to indicate that something is out of wack in the tree.
+ */
+public class InconsistentNodeException extends DatabaseException {
+    public InconsistentNodeException() {
+	super();
+    }
+
+    public InconsistentNodeException(String message) {
+	super(message);
+    }
+}
diff --git a/src/com/sleepycat/je/tree/Key.java b/src/com/sleepycat/je/tree/Key.java
new file mode 100644
index 0000000000000000000000000000000000000000..7f37a365b4f1fb3a2ce258ba4b506eae51ca5fe4
--- /dev/null
+++ b/src/com/sleepycat/je/tree/Key.java
@@ -0,0 +1,272 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Key.java,v 1.71.2.2 2010/01/04 15:30:36 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.util.Comparator;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * Key represents a JE B-Tree Key.  Keys are immutable.  Within JE, keys are
+ * usually represented as byte arrays rather than as Key instances in order to
+ * reduce the in-memory footprint. The static methods of this class are used to
+ * operate on the byte arrays.
+ *
+ * One exception is when keys are held within a collection. In that case, Key
+ * objects are instantiated so that keys are hashed and compared by value.
+ */
+public final class Key implements Comparable<Key> {
+    public abstract static class DumpType {
+
+	private String name;
+
+	private DumpType(String name) {
+	    this.name = name;
+	}
+
+	public static final DumpType BINARY = new DumpType("BINARY") {
+		@Override
+		void dumpByteArrayInternal(StringBuffer sb, byte[] b) {
+		    for (int i = 0; i < b.length; i++) {
+			sb.append(b[i] & 0xFF).append(" ");
+		    }
+		}
+	    };
+
+	public static final DumpType HEX = new DumpType("HEX") {
+		@Override
+		void dumpByteArrayInternal(StringBuffer sb, byte[] b) {
+		    for (int i = 0; i < b.length; i++) {
+			sb.append(Integer.toHexString(b[i] & 0xFF)).
+			    append(" ");
+		    }
+		}
+	    };
+
+	public static final DumpType TEXT = new DumpType("TEXT") {
+		@Override
+		void dumpByteArrayInternal(StringBuffer sb, byte[] b) {
+		    sb.append(new String(b));
+		}
+	    };
+
+	public static final DumpType OBFUSCATE = new DumpType("OBFUSCATE") {
+		@Override
+		void dumpByteArrayInternal(StringBuffer sb, byte[] b) {
+		    int len = b.length;
+		    sb.append("[").append(len).
+			append(len == 1 ? " byte]" : " bytes]");
+		}
+	    };
+
+	public String dumpByteArray(byte[] b) {
+	    StringBuffer sb = new StringBuffer();
+	    if (b != null) {
+		dumpByteArrayInternal(sb, b);
+	    } else {
+		sb.append("null");
+	    }
+	    return sb.toString();
+	}
+
+	@Override
+	public String toString() {
+	    return name;
+	}
+
+	abstract void dumpByteArrayInternal(StringBuffer sb, byte[] b);
+    }
+
+    public static DumpType DUMP_TYPE = DumpType.BINARY;
+
+    /* Not declared final since unit tests use it. */
+    public static boolean DUMP_INT_BINDING = false;
+    public static final byte[] EMPTY_KEY = new byte[0];
+    private byte[] key;
+
+    /**
+     * Construct a new key from a byte array.
+     */
+    public Key(byte[] key) {
+	if (key == null) {
+	    this.key = null;
+	} else {
+            this.key = new byte[key.length];
+            System.arraycopy(key, 0, this.key, 0, key.length);
+	}
+    }
+
+    public static byte[] makeKey(DatabaseEntry dbt) {
+        byte[] entryKey = dbt.getData();
+	if (entryKey == null) {
+            return EMPTY_KEY;
+	} else {
+            byte[] newKey = new byte[dbt.getSize()];
+            System.arraycopy(entryKey, dbt.getOffset(), newKey,
+                             0, dbt.getSize());
+            return newKey;
+	}
+    }
+
+    /**
+     * Get the byte array for the key.
+     */
+    public byte[] getKey() {
+	return key;
+    }
+
+    /**
+     * Compare two keys.  Standard compareTo function and returns.
+     *
+     * Note that any configured user comparison function is not used, and
+     * therefore this method should not be used for comparison of keys during
+     * Btree operations.
+     */
+    public int compareTo(Key argKey) {
+        return compareUnsignedBytes(this.key, argKey.key);
+    }
+
+    /**
+     * Support Set of Key in BINReference.
+     */
+    @Override
+    public boolean equals(Object o) {
+        return (o instanceof Key) && (compareTo((Key)o) == 0);
+    }
+
+    /**
+     * Support HashSet of Key in BINReference.
+     */
+    @Override
+    public int hashCode() {
+        int code = 0;
+        for (int i = 0; i < key.length; i += 1) {
+            code += key[i];
+        }
+        return code;
+    }
+
+    /**
+     * Compare keys with an optional comparator.
+     */
+    public static int compareKeys(byte[] key1,
+                                  byte[] key2,
+                                  Comparator<byte[]> comparator) {
+        if (comparator != null) {
+            return comparator.compare(key1, key2);
+        } else {
+            return compareUnsignedBytes(key1, key2);
+        }
+    }
+
+    /**
+     * Compare using a default unsigned byte comparison.
+     */
+    private static int compareUnsignedBytes(byte[] key1, byte[] key2) {
+	int a1Len = key1.length;
+	int a2Len = key2.length;
+
+	int limit = Math.min(a1Len, a2Len);
+
+	for (int i = 0; i < limit; i++) {
+	    byte b1 = key1[i];
+	    byte b2 = key2[i];
+	    if (b1 == b2) {
+		continue;
+	    } else {
+
+		/* 
+                 * Remember, bytes are signed, so convert to shorts so that we
+                 * effectively do an unsigned byte comparison.
+                 */
+		return (b1 & 0xff) - (b2 & 0xff);
+	    }
+	}
+
+	return (a1Len - a2Len);
+    }
+
+    /*
+     * Return the number of leading bytes that key1 and key2 have in common
+     * (i.e. the length of their common prefix).
+     */
+    public static int getKeyPrefixLength(byte[] key1, int a1Len, byte[] key2) {
+        assert key1 != null && key2 != null;
+
+	int a2Len = key2.length;
+
+	int limit = Math.min(a1Len, a2Len);
+
+	for (int i = 0; i < limit; i++) {
+	    byte b1 = key1[i];
+	    byte b2 = key2[i];
+	    if (b1 != b2) {
+                return i;
+	    }
+	}
+
+        return limit;
+    }
+
+    /*
+     * Return a new byte[] containing the common prefix of key1 and key2.
+     * Return null if there is no common prefix.
+     */
+    public static byte[] createKeyPrefix(byte[] key1, byte[] key2) {
+        int len = getKeyPrefixLength(key1, key1.length, key2);
+        if (len == 0) {
+            return null;
+        }
+
+        byte[] ret = new byte[len];
+        System.arraycopy(key1, 0, ret, 0, len);
+
+	return ret;
+    }
+
+    public static String dumpString(byte[] key, int nspaces) {
+	StringBuffer sb = new StringBuffer();
+        sb.append(TreeUtils.indent(nspaces));
+	sb.append("<key v=\"");
+
+	if (DUMP_TYPE == DumpType.BINARY ||
+	    DUMP_TYPE == DumpType.HEX) {
+	    if (key == null) {
+		sb.append("<null>");
+	    } else {
+		sb.append(DUMP_TYPE.dumpByteArray(key));
+	    }
+	} else if (DUMP_TYPE == DumpType.TEXT) {
+	    if (DUMP_INT_BINDING) {
+		if (key == null) {
+		    sb.append("<null>");
+		} else {
+		    DatabaseEntry e = new DatabaseEntry(key);
+		    sb.append(IntegerBinding.entryToInt(e));
+		}
+	    } else {
+		sb.append(key == null ? "" : new String(key));
+	    }
+	} else if (DUMP_TYPE == DumpType.OBFUSCATE) {
+	    int len = key.length;
+	    sb.append("[").append(len).append(len == 1 ? " byte]" : " bytes]");
+	}
+	sb.append("\"/>");
+
+	return sb.toString();
+    }
+
+    /**
+     * Print the string w/out XML format.
+     */
+    public static String getNoFormatString(byte[] key) {
+        return "key=" + dumpString(key, 0);
+    }
+}
diff --git a/src/com/sleepycat/je/tree/LN.java b/src/com/sleepycat/je/tree/LN.java
new file mode 100644
index 0000000000000000000000000000000000000000..36eb6d85c1f4ea07b3ab6430cac3d65bcae4ff07
--- /dev/null
+++ b/src/com/sleepycat/je/tree/LN.java
@@ -0,0 +1,724 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LN.java,v 1.152.2.5 2010/01/04 15:30:36 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.cleaner.LocalUtilizationTracker;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.INList;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.LogException;
+import com.sleepycat.je.log.LogUtils;
+import com.sleepycat.je.log.Loggable;
+import com.sleepycat.je.log.ReplicationContext;
+import com.sleepycat.je.log.entry.DeletedDupLNLogEntry;
+import com.sleepycat.je.log.entry.LNLogEntry;
+import com.sleepycat.je.txn.Locker;
+import com.sleepycat.je.txn.Txn;
+import com.sleepycat.je.txn.WriteLockInfo;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * An LN represents a Leaf Node in the JE tree.
+ */
+public class LN extends Node implements Loggable {
+    private static final String BEGIN_TAG = "<ln>";
+    private static final String END_TAG = "</ln>";
+
+    private byte[] data;
+
+    /*
+     * Flags: bit fields
+     *
+     * -Dirty means that the in-memory version is not present on disk.
+     * -The last logged bits store the total size of the last logged entry.
+     */
+    private static final int DIRTY_BIT = 0x80000000;
+    private static final int CLEAR_DIRTY_BIT = ~DIRTY_BIT;
+    private static final int LAST_LOGGED_SIZE_MASK = 0x7FFFFFFF;
+    private static final int CLEAR_LAST_LOGGED_SIZE = ~LAST_LOGGED_SIZE_MASK;
+    private int flags; // not persistent
+
+    /**
+     * Create an empty LN, to be filled in from the log.
+     */
+    public LN() {
+        this.data = null;
+    }
+
+    /**
+     * Create a new LN from a byte array.
+     */
+    public LN(byte[] data, EnvironmentImpl envImpl, boolean replicated) {
+        super(envImpl, replicated);
+        if (data == null) {
+            this.data = null;
+        } else {
+            init(data, 0, data.length);
+        }
+        setDirty();
+    }
+
+    /**
+     * Create a new LN from a DatabaseEntry.
+     */
+    public LN(DatabaseEntry dbt, EnvironmentImpl envImpl, boolean replicated) {
+        super(envImpl, replicated);
+
+        byte[] dat = dbt.getData();
+        if (dat == null) {
+            data = null;
+        } else if (dbt.getPartial()) {
+            init(dat,
+                 dbt.getOffset(),
+                 dbt.getPartialOffset() + dbt.getSize(),
+                 dbt.getPartialOffset(),
+                 dbt.getSize());
+        } else {
+            init(dat, dbt.getOffset(), dbt.getSize());
+        }
+        setDirty();
+    }
+
+    private void init(byte[] data, int off, int len, int doff, int dlen) {
+	if (len == 0) {
+	    this.data = LogUtils.ZERO_LENGTH_BYTE_ARRAY;
+	} else {
+	    this.data = new byte[len];
+	    System.arraycopy(data, off, this.data, doff, dlen);
+	}
+    }
+
+    private void init(byte[] data, int off, int len) {
+        init(data, off, len, 0, len);
+    }
+
+    public byte[] getData() {
+        return data;
+    }
+
+    public byte[] copyData() {
+        int len = data.length;
+        byte[] ret = new byte[len];
+        System.arraycopy(data, 0, ret, 0, len);
+        return ret;
+    }
+
+    public boolean isDeleted() {
+        return (data == null);
+    }
+
+    void makeDeleted() {
+        data = null;
+    }
+
+    public boolean isDirty() {
+        return ((flags & DIRTY_BIT) != 0);
+    }
+
+    public void setDirty() {
+        flags |= DIRTY_BIT;
+    }
+
+    private void clearDirty() {
+        flags &= CLEAR_DIRTY_BIT;
+    }
+
+    /*
+     * If you get to an LN, this subtree isn't valid for delete. True, the LN
+     * may have been deleted, but you can't be sure without taking a lock, and
+     * the validate -subtree-for-delete process assumes that bin compressing
+     * has happened and there are no committed, deleted LNS hanging off the
+     * BIN.
+     */
+    boolean isValidForDelete() {
+        return false;
+    }
+
+    /**
+     * Returns true by default, but is overridden by MapLN to prevent eviction
+     * of open databases.  This method is meant to be a fast but not guaranteed
+     * check and is used during selection of BINs for LN stripping.  [#13415]
+     */
+    boolean isEvictableInexact() {
+        return true;
+    }
+
+    /**
+     * Returns true by default, but is overridden by MapLN to prevent eviction
+     * of open databases.  This method is meant to be a guaranteed check and is
+     * used after a BIN has been selected for LN stripping but before actually
+     * stripping an LN. [#13415]
+     */
+    boolean isEvictable()
+        throws DatabaseException {
+
+        return true;
+    }
+
+    /**
+     * A LN can never be a child in the search chain.
+     */
+    protected boolean isSoughtNode(long nid, CacheMode cacheMode) {
+        return false;
+    }
+
+    /**
+     * A LN can never be the ancestor of another node.
+     */
+    protected boolean canBeAncestor(boolean targetContainsDuplicates) {
+        return false;
+    }
+
+    /**
+     * Delete this LN's data and log the new version.
+     */
+    public long delete(DatabaseImpl database,
+		       byte[] lnKey,
+		       byte[] dupKey,
+		       long oldLsn,
+                       Locker locker,
+                       ReplicationContext repContext)
+        throws DatabaseException {
+
+        makeDeleted();
+        setDirty();
+
+        /* Log if necessary */
+        EnvironmentImpl env = database.getDbEnvironment();
+        long newLsn = DbLsn.NULL_LSN;
+        if (dupKey != null) {
+
+            /*
+             * If this is a deferred write database, and the LN has
+             * never been logged, we don't need to log the delete either,
+             * since we are currently running in non-txnal mode. This
+             * will have to be adapted when we support txnal mode.
+             */
+            if (database.isDeferredWriteMode() &&
+                oldLsn == DbLsn.NULL_LSN) {
+                clearDirty();
+            } else {
+
+                /*
+                 * Log as a deleted duplicate LN by passing dupKey.  Note that
+                 * we log a deleted duplicate LN even in Deferred Write mode,
+                 * because the data (dupKey) is set to null when it is deleted,
+                 * so logging it later is not possible.
+                 */
+                newLsn = log(env, database, lnKey, dupKey, oldLsn, locker,
+                             false,  // isProvisional
+                             false, // backgroundIO
+                             repContext);
+            }
+        } else {
+
+            /*
+             * Non duplicate LN, just log the normal way.
+             */
+            newLsn = optionalLog(env, database, lnKey, oldLsn,
+                                 locker, repContext);
+        }
+        return newLsn;
+    }
+
+    /**
+     * Modify the LN's data and log the new version.
+     * @param repContext indicates whether this LN is part of the replication
+     * stream. If this environment is a client node, repContext has the VLSN to
+     * be used when logging the LN. If this environment is a master, it
+     * indicates that the LN should be broadcast.
+     */
+    public long modify(byte[] newData,
+		       DatabaseImpl database,
+		       byte[] lnKey,
+		       long oldLsn,
+                       Locker locker,
+                       ReplicationContext repContext)
+        throws DatabaseException {
+
+        data = newData;
+        setDirty();
+
+        /* Log the new LN. */
+        EnvironmentImpl env = database.getDbEnvironment();
+        long newLsn = optionalLog(env, database,
+                                  lnKey, oldLsn, locker,
+                                  repContext);
+        return newLsn;
+    }
+
+    /**
+     * Add yourself to the in memory list if you're a type of node that should
+     * belong.
+     */
+    @Override
+    void rebuildINList(INList inList) {
+        /* 
+         * Don't add, LNs don't belong on the list. 
+         */
+    }
+
+    /**
+     * No need to do anything, stop the search.
+     */
+    void accountForSubtreeRemoval(INList inList,
+                                  LocalUtilizationTracker localTracker) {
+        /* Don't remove, LNs not on this list. */
+    }
+
+    /**
+     * Compute the approximate size of this node in memory for evictor
+     * invocation purposes.
+     */
+    @Override
+    public long getMemorySizeIncludedByParent() {
+        int size = MemoryBudget.LN_OVERHEAD;
+        if (data != null) {
+            size += MemoryBudget.byteArraySize(data.length);
+        }
+        return size;
+    }
+
+    /**
+     * Release the memory budget for any objects referenced by this 
+     * LN. For now, only release treeAdmin memory, because treeMemory
+     * is handled in aggregate at the IN level. Over time, transition
+     * all of the LN's memory budget to this, so we update the memory
+     * budget counters more locally. Called when we are releasing a LN
+     * for garbage collection.
+     */
+    public void releaseMemoryBudget() {
+        // nothing to do for now, no treeAdmin memory 
+    }
+
+
+    /*
+     * Dumping
+     */
+
+    public String beginTag() {
+        return BEGIN_TAG;
+    }
+
+    public String endTag() {
+        return END_TAG;
+    }
+
+    @Override
+    public String dumpString(int nSpaces, boolean dumpTags) {
+        StringBuffer self = new StringBuffer();
+        if (dumpTags) {
+	    self.append(TreeUtils.indent(nSpaces));
+            self.append(beginTag());
+            self.append('\n');
+        }
+
+        self.append(super.dumpString(nSpaces + 2, true));
+        self.append('\n');
+        if (data != null) {
+            self.append(TreeUtils.indent(nSpaces+2));
+            self.append("<data>");
+            self.append(Key.DUMP_TYPE.dumpByteArray(data));
+            self.append("</data>");
+            self.append('\n');
+        }
+        if (dumpTags) {
+            self.append(TreeUtils.indent(nSpaces));
+            self.append(endTag());
+        }
+        return self.toString();
+    }
+
+    /*
+     * Logging Support
+     */
+
+    /**
+     * Log this LN and clear the dirty flag. Whether it's logged as a
+     * transactional entry or not depends on the type of locker.
+     * @param env the environment.
+     * @param dbId database id of this node. (Not stored in LN)
+     * @param key key of this node. (Not stored in LN)
+     * @param oldLsn is the LSN of the previous version or null.
+     * @param locker owning locker.
+     * @param repContext indicates whether this LN is part of the replication
+     * stream. If this environment is a client node, repContext has the VLSN to
+     * be used when logging the LN. If this environment is a master, it
+     * indicates that the LN should be broadcast.
+     */
+    public long log(EnvironmentImpl env,
+		    DatabaseImpl databaseImpl,
+		    byte[] key,
+		    long oldLsn,
+		    Locker locker,
+                    boolean backgroundIO,
+                    ReplicationContext repContext)
+        throws DatabaseException {
+
+        return log(env, databaseImpl, key, null, /* delDupKey */
+                   oldLsn, locker, backgroundIO, false, /* provisional */
+                   repContext);
+    }
+
+    /**
+     * Log this LN if it's not part of a deferred-write db.  Whether it's
+     * logged as a transactional entry or not depends on the type of locker.
+     * @param env the environment.
+     * @param dbId database id of this node. (Not stored in LN)
+     * @param key key of this node. (Not stored in LN)
+     * @param oldLsn is the LSN of the previous version or NULL_LSN.
+     * @param locker owning locker.
+     * @param repContext indicates whether this LN is part of the replication
+     * stream. If this environment is a client node, repContext has the VLSN to
+     * be used when logging the LN. If this environment is a master, it
+     * indicates that the LN should be broadcast.
+     */
+    public long optionalLog(EnvironmentImpl env,
+                            DatabaseImpl databaseImpl,
+                            byte[] key,
+                            long oldLsn,
+                            Locker locker,
+                            ReplicationContext repContext)
+        throws DatabaseException {
+
+        if (databaseImpl.isDeferredWriteMode()) {
+            return DbLsn.NULL_LSN;
+        } else {
+            return log(env,
+                       databaseImpl,
+                       key,
+                 null,   // delDupKey
+                 oldLsn, locker,
+                 false,  // backgroundIO
+                       false,  // provisional
+                       repContext);
+        }
+    }
+
+    /**
+     * Log a provisional, non-txnal version of an LN.
+     * @param env the environment.
+     * @param dbId database id of this node. (Not stored in LN)
+     * @param key key of this node. (Not stored in LN)
+     * @param oldLsn is the LSN of the previous version or NULL_LSN.
+     */
+    public long optionalLogProvisional(EnvironmentImpl env,
+                                       DatabaseImpl databaseImpl,
+                                       byte[] key,
+                                       long oldLsn,
+                                       ReplicationContext repContext)
+        throws DatabaseException {
+
+        if (databaseImpl.isDeferredWriteMode()) {
+            return DbLsn.NULL_LSN;
+        } else {
+            return log(env, databaseImpl, key,
+                 null,   // delDupKey
+                 oldLsn,
+                 null,  // locker
+                 false, // backgroundIO
+                       true,   // provisional
+                       repContext);
+        }
+    }
+
+    /**
+     * Log this LN. Clear dirty bit. Whether it's logged as a transactional
+     * entry or not depends on the type of locker.
+     * @param env the environment.
+     * @param dbId database id of this node. (Not stored in LN)
+     * @param key key of this node. (Not stored in LN)
+     * @param delDupKey if non-null, the dupKey for deleting the LN.
+     * @param oldLsn is the LSN of the previous version or NULL_LSN.
+     * @param locker owning locker.
+     */
+    long log(EnvironmentImpl env,
+             DatabaseImpl dbImpl,
+             byte[] key,
+             byte[] delDupKey,
+             long oldLsn,
+             Locker locker,
+             boolean backgroundIO,
+             boolean isProvisional,
+             ReplicationContext repContext)
+        throws DatabaseException {
+
+        boolean isDelDup = (delDupKey != null);
+        LogEntryType entryType;
+        long logAbortLsn;
+	boolean logAbortKnownDeleted;
+        Txn logTxn;
+        if (locker != null && locker.isTransactional()) {
+            entryType = isDelDup ?
+                LogEntryType.LOG_DEL_DUPLN_TRANSACTIONAL :
+                getTransactionalLogType();
+	    WriteLockInfo info = locker.getWriteLockInfo(getNodeId());
+	    logAbortLsn = info.getAbortLsn();
+	    logAbortKnownDeleted = info.getAbortKnownDeleted();
+            logTxn = locker.getTxnLocker();
+            assert logTxn != null;
+            if (oldLsn == logAbortLsn) {
+                info.setAbortInfo(dbImpl, getLastLoggedSize());
+            }
+        } else {
+            entryType = isDelDup ? LogEntryType.LOG_DEL_DUPLN : getLogType();
+            logAbortLsn = DbLsn.NULL_LSN;
+	    logAbortKnownDeleted = false;
+            logTxn = null;
+        }
+
+        /* Don't count abortLsn as obsolete, this is done during commit. */
+        if (oldLsn == logAbortLsn) {
+            oldLsn = DbLsn.NULL_LSN;
+        }
+
+        LNLogEntry logEntry = createLogEntry(entryType,
+                                             dbImpl,
+                                             key,
+                                             delDupKey,
+                                             logAbortLsn,
+                                             logAbortKnownDeleted,
+                                             logTxn,
+                                             repContext);
+
+        /*
+         * Always log temporary DB LNs as provisional.  This prevents the
+         * possibility of a LogFileNotFoundException during recovery, since
+         * temporary DBs are not checkpointed.  And it speeds recovery --
+         * temporary DBs are removed during recovery anyway.
+         */
+        if (dbImpl.isTemporary()) {
+            isProvisional = true;
+        }
+
+        long lsn = DbLsn.NULL_LSN;
+	try {
+            if (logTxn != null) {
+
+                /*
+                 * Writing an LN_TX entry requires looking at the Txn's
+                 * lastLoggedLsn.  The Txn may be used by multiple threads so
+                 * ensure that the view we get is consistent. [#17204]
+                 */
+                synchronized (logTxn) {
+                    lsn = env.getLogManager().log(logEntry, isProvisional,
+                                                  backgroundIO, oldLsn, dbImpl,
+                                                  repContext);
+                }
+            } else {
+                lsn = env.getLogManager().log(logEntry, isProvisional,
+                                              backgroundIO, oldLsn, dbImpl,
+                                              repContext);
+            }
+	} catch (DatabaseException DE) {
+
+	    /*
+	     * If any exception happens during logging, then force this txn
+	     * to onlyAbortable. [#15768]
+	     */
+            if (locker != null) {
+                locker.setOnlyAbortable();
+            }
+	    throw DE;
+	}
+        clearDirty();
+        return lsn;
+    }
+
+    /*
+     * Each LN knows what kind of log entry it uses to log itself. Overridden
+     * by subclasses.
+     */
+    LNLogEntry createLogEntry(LogEntryType entryType,
+                              DatabaseImpl dbImpl,
+                              byte[] key,
+                              byte[] delDupKey,
+                              long logAbortLsn,
+                              boolean logAbortKnownDeleted,
+                              Txn logTxn,
+                              ReplicationContext repContext) {
+
+        DatabaseId dbId = dbImpl.getId();
+        boolean isDelDup = (delDupKey != null);
+        if (isDelDup) {
+
+            /*
+             * Deleted Duplicate LNs are logged with two keys -- the one
+             * that identifies the main tree (the dup key) and the one that
+             * places them in the duplicate tree (really the data) since we
+             * can't recreate the latter because the data field has been
+             * nulled. Note that the dupKey is passed to the log manager
+             * FIRST, because the dup key is the one that navigates us in
+             * the main tree. The "key" is the one that navigates us in the
+             * duplicate tree.
+             */
+            return new DeletedDupLNLogEntry(entryType,
+                                            this,
+                                            dbId,
+                                            delDupKey,
+                                            key,
+                                            logAbortLsn,
+                                            logAbortKnownDeleted,
+                                            logTxn);
+        } else {
+            /* Not a deleted duplicate LN -- use a regular LNLogEntry. */
+            return new LNLogEntry(entryType,
+                                  this,
+                                  dbId,
+                                  key,
+                                  logAbortLsn,
+                                  logAbortKnownDeleted,
+                                  logTxn);
+        }
+    }
+
+    /**
+     * Log type for transactional entries
+     */
+    protected LogEntryType getTransactionalLogType() {
+        return LogEntryType.LOG_LN_TRANSACTIONAL;
+    }
+
+    /**
+     * @see Node#getLogType()
+     */
+    public LogEntryType getLogType() {
+        return LogEntryType.LOG_LN;
+    }
+
+    /**
+     * Returns the total last logged log size, including the LNLogEntry
+     * overhead of this LN when it was last logged and the log entry
+     * header.  Used for computing obsolete size when an LNLogEntry is not in
+     * hand.
+     */
+    public int getLastLoggedSize() {
+        return flags & LAST_LOGGED_SIZE_MASK;
+    }
+
+    /**
+     * Saves the last logged size.
+     */
+    public void setLastLoggedSize(int size) {
+        /* Clear the old size and OR in the new size. */
+        flags = (flags & CLEAR_LAST_LOGGED_SIZE) | size;
+    }
+
+    /**
+     * @see Loggable#getLogSize
+     */
+    @Override
+    public int getLogSize() {
+        int size = super.getLogSize();
+
+        if (isDeleted()) {
+            size += LogUtils.getPackedIntLogSize(-1);
+        } else {
+            int len = data.length;
+            size += LogUtils.getPackedIntLogSize(len);
+            size += len;
+        }
+
+        return size;
+    }
+
+    /**
+     * @see Loggable#writeToLog
+     */
+    @Override
+    public void writeToLog(ByteBuffer logBuffer) {
+        super.writeToLog(logBuffer);
+
+        if (isDeleted()) {
+            LogUtils.writePackedInt(logBuffer, -1);
+        } else {
+            LogUtils.writePackedInt(logBuffer, data.length);
+            LogUtils.writeBytesNoLength(logBuffer, data);
+        }
+    }
+
+    /**
+     * @see Loggable#readFromLog
+     */
+    @Override
+    public void readFromLog(ByteBuffer itemBuffer, byte entryVersion)
+        throws LogException {
+
+        super.readFromLog(itemBuffer, entryVersion);
+
+        if (entryVersion < 6) {
+            boolean dataExists = LogUtils.readBoolean(itemBuffer);
+            if (dataExists) {
+                data = LogUtils.readByteArray(itemBuffer, true/*unpacked*/);
+            }
+        } else {
+            int size = LogUtils.readInt(itemBuffer, false/*unpacked*/);
+            if (size >= 0) {
+                data = LogUtils.readBytesNoLength(itemBuffer, size);
+            }
+        }
+    }
+
+    /**
+     * @see Loggable#logicalEquals
+     */
+    public boolean logicalEquals(Loggable other) {
+
+        if (!(other instanceof LN))
+            return false;
+
+        LN otherLN = (LN) other;
+
+        if (getNodeId() != otherLN.getNodeId())
+            return false;
+
+        if (!Arrays.equals(getData(), otherLN.getData()))
+            return false;
+
+        return true;
+    }
+
+    /**
+     * @see Loggable#dumpLog
+     */
+    @Override
+    public void dumpLog(StringBuffer sb, boolean verbose) {
+        sb.append(beginTag());
+        super.dumpLog(sb, verbose);
+
+        if (data != null) {
+            sb.append("<data>");
+            if (verbose) {
+                sb.append(Key.DUMP_TYPE.dumpByteArray(data));
+            } else {
+                sb.append("hidden");
+            }
+            sb.append("</data>");
+        }
+
+        dumpLogAdditional(sb, verbose);
+
+        sb.append(endTag());
+    }
+
+    /*
+     * Allows subclasses to add additional fields before the end tag.
+     */
+    protected void dumpLogAdditional(StringBuffer sb, boolean verbose) {
+    }
+}
diff --git a/src/com/sleepycat/je/tree/MapLN.java b/src/com/sleepycat/je/tree/MapLN.java
new file mode 100644
index 0000000000000000000000000000000000000000..cc5a80507226dc76d4212da86e912110856cd777
--- /dev/null
+++ b/src/com/sleepycat/je/tree/MapLN.java
@@ -0,0 +1,341 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: MapLN.java,v 1.87.2.2 2010/01/04 15:30:36 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.nio.ByteBuffer;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.LogException;
+import com.sleepycat.je.log.Loggable;
+import com.sleepycat.je.txn.BasicLocker;
+import com.sleepycat.je.txn.LockGrantType;
+import com.sleepycat.je.txn.LockResult;
+import com.sleepycat.je.txn.LockType;
+
+/**
+ * A MapLN represents a Leaf Node in the JE Db Mapping Tree.
+ *
+ * MapLNs contain a DatabaseImpl, which in turn contain three categories of
+ * information - database configuration information, the per-database File
+ * Summary utilization information, and each database's btree root. While LNs
+ * are written to the log as the result of API operations which create new data
+ * records, MapLNs are written to the log as a result of configuration changes,
+ * utilization information changes, or updates to the btree which cascade up
+ * the tree and result in a new root. Because they serve as a bridge between 
+ * the application data btree and the db mapping tree, MapLNs must be written
+ * with special rules, and should only be written from DbTree.modifyDbRoot.
+ * The basic rule is that in order to ensure that the MapLN contains the
+ * proper btree root, the btree root latch is used to protect both any logging
+ * of the MapLN, and any updates to the root lsn.
+ *
+ * Updates to the internal btree nodes obey a strict bottom up approach, in
+ * accordance with the log semantics which require that later log entries are
+ * known to supercede earlier log entries. In other words, for a btree that 
+ * looks like
+ *      MapLN
+ *        |
+ *       IN
+ *        |
+ *       BIN
+ *        |
+ *       LN
+ * we know that update operations cause the btree nodes must be logged in this
+ * order: LN, BIN, IN, MapLN, so that the reference to each on disk node is
+ * correct. (Note that logging order is special and different when the btree
+ * is initially created.)
+ *
+ * However, MapLNs may need to be written to disk at arbitrary points in time
+ * in order to save database config or utilization data. Those writes don't
+ * have the time and context to be done in a cascading-upwards fashion.  We
+ * ensure that MapLNs are not erroneously written with an out of sync root by
+ * requiring that DbTree.modifyDbRoot takes the root latch for the application
+ * data btree. RootINs are also written with the root latch, so it serves to
+ * ensure that the root doesn't change during the time when the MapLN is
+ * written. For example, suppose thread 1 is doing a cascading-up MapLN write,
+ * and thread 2 is doing an arbitrary-point MapLN write:
+ *
+ * Thread 1                   Thread 2
+ * --------                   --------
+ * latch root                 latch BIN parent of MapLN
+ * log root IN
+ * log MapLN (Tree root)       wants to log MapLN too -- but has to take
+ *  to refer to new root IN    root latch, so we'll get the right rootIN
+ *
+ * Without latching the root this could produce the following, incorrect log
+ *  30 LNa
+ *  40 BIN
+ *  50 IN (first version of root)
+ *  60 MapLN, refers to IN(50)
+ *  ...
+ *  90 LNb     
+ *  100 BIN
+ *  110 IN (second version of root)
+ *  120 CkptStart (the tree is not dirty, no IN will be logged during the 
+ *   ckpt interval))
+ *   ..  something arbirarily writes out the MapLN
+ *  130 MapLN refers to first root, IN(50)    <------ impossible
+ * 
+ * While a MapLN can't be written out with the wrong root, it's possible
+ * for a rootIN to be logged without the MapLN, and for that rootIN not
+ * to be processed at recovery. Suppose a checkpoint begins and ends
+ * in the window between when a rootIN is written, and DbTree.modifyDbRoot is
+ * called:
+ *   300 log new root IN,  
+ *   update root reference in tree
+ *   unlatch root
+ *
+ *   310 Checkpoint starts
+ *   320 Checkpoint ends
+ *   ...if we crash here, before the MapLN is logged, , we won't see the new
+ *   root IN at lsn 300. However, the IN is non-txnal and will be * recreated
+ *   during reply of txnal * information (LNs) by normal recovery processing.
+ */
+public final class MapLN extends LN {
+
+    private static final String BEGIN_TAG = "<mapLN>";
+    private static final String END_TAG = "</mapLN>";
+
+    private DatabaseImpl databaseImpl;
+    private boolean deleted;
+
+    /**
+     * Create a new MapLn to hold a new databaseImpl. In the ideal world, we'd
+     * have a base LN class so that this MapLN doesn't have a superfluous data
+     * field, but we want to optimize the LN class for size and speed right
+     * now.
+     */
+    public MapLN(DatabaseImpl db) {
+        super(new byte[0], db.getDbEnvironment(), false /* replicate */ );
+        databaseImpl = db;
+        deleted = false;
+    }
+
+    /**
+     * Create an empty MapLN, to be filled in from the log.
+     */
+    public MapLN()
+        throws DatabaseException {
+
+        super();
+        databaseImpl = new DatabaseImpl();
+    }
+
+    @Override
+    public boolean isDeleted() {
+        return deleted;
+    }
+
+    @Override
+    void makeDeleted() {
+        deleted = true;
+
+        /* Release all references to nodes held by this database. */
+        databaseImpl.getTree().setRoot(null, true);
+    }
+
+    public DatabaseImpl getDatabase() {
+        return databaseImpl;
+    }
+
+    /**
+     * Does a fast check without acquiring the MapLN write-lock.  This is
+     * important because the overhead of requesting the lock is significant and
+     * unnecessary if this DB is open or the root IN is resident.  When there
+     * are lots of databases open, this method will be called often during
+     * selection of BINs for eviction.  [#13415]
+     */
+    @Override
+    boolean isEvictableInexact() {
+        /* Always prohibit eviction when je.env.dbEviction=false. */
+        return databaseImpl.getDbEnvironment().getDbEviction() &&
+               !databaseImpl.isInUse() &&
+               !databaseImpl.getTree().isRootResident();
+    }
+
+    /**
+     * Does a guaranteed check by acquiring the write-lock and then calling
+     * isEvictableInexact.  [#13415]
+     */
+    @Override
+    boolean isEvictable()
+        throws DatabaseException {
+
+        boolean evictable = false;
+
+        /* To prevent DB open, get a write-lock on the MapLN. */
+        BasicLocker locker =
+	    BasicLocker.createBasicLocker(databaseImpl.getDbEnvironment());
+        try {
+            LockResult lockResult = locker.nonBlockingLock
+                (getNodeId(), LockType.WRITE, databaseImpl);
+
+            /*
+             * The isEvictableInexact result is guaranteed to hold true during
+             * LN stripping if it is still true after acquiring the write-lock.
+             */
+            if (lockResult.getLockGrant() != LockGrantType.DENIED &&
+                isEvictableInexact()) {
+
+                /*
+                 * While holding both a write-lock on the MapLN, we are
+                 * guaranteed that the DB is not currently open.  It cannot be
+                 * subsequently opened until the BIN latch is released, since
+                 * the BIN latch will block DbTree.getDb (called during DB
+                 * open).  We will evict the LN before releasing the BIN latch.
+                 * After releasing the BIN latch, if a DB open is waiting on
+                 * getDb, then it will proceed, fetch the evicted LN and open
+                 * the DB normally.
+                 */
+                evictable = true;
+            }
+        } finally {
+            /* Release the write-lock.  The BIN latch is still held. */
+            locker.operationEnd();
+        }
+
+        return evictable;
+    }
+
+    /**
+     * Initialize a node that has been faulted in from the log.
+     */
+    @Override
+    public void postFetchInit(DatabaseImpl db, long sourceLsn)
+        throws DatabaseException {
+
+        databaseImpl.setEnvironmentImpl(db.getDbEnvironment());
+    }
+
+    /**
+     * Compute the approximate size of this node in memory for evictor
+     * invocation purposes. Don't count the treeAdmin memory, because
+     * that goes into a different bucket.
+     */
+    @Override
+    public long getMemorySizeIncludedByParent() {
+        return MemoryBudget.MAPLN_OVERHEAD;
+    }
+
+    /**
+     * @see LN#releaseMemoryBudget
+     */
+    @Override
+    public void releaseMemoryBudget() {
+        databaseImpl.releaseTreeAdminMemory();
+    }
+
+    /*
+     * Dumping
+     */
+
+    @Override
+    public String toString() {
+        return dumpString(0, true);
+    }
+
+    @Override
+    public String beginTag() {
+        return BEGIN_TAG;
+    }
+
+    @Override
+    public String endTag() {
+        return END_TAG;
+    }
+
+    @Override
+    public String dumpString(int nSpaces, boolean dumpTags) {
+        StringBuffer sb = new StringBuffer();
+        sb.append(super.dumpString(nSpaces, dumpTags));
+        sb.append('\n');
+        sb.append(TreeUtils.indent(nSpaces));
+        sb.append("<deleted val=\"").append(Boolean.toString(deleted));
+        sb.append("\">");
+        sb.append('\n');
+        sb.append(databaseImpl.dumpString(nSpaces));
+        return sb.toString();
+    }
+
+    /*
+     * Logging
+     */
+
+    /**
+     * Log type for transactional entries.
+     */
+    @Override
+    protected LogEntryType getTransactionalLogType() {
+        return LogEntryType.LOG_MAPLN_TRANSACTIONAL;
+    }
+
+    /**
+     * @see Node#getLogType
+     */
+    @Override
+    public LogEntryType getLogType() {
+        return LogEntryType.LOG_MAPLN;
+    }
+
+    /**
+     * @see LN#getLogSize
+     */
+    @Override
+    public int getLogSize() {
+        return super.getLogSize() +
+            databaseImpl.getLogSize() +
+            1;                                 // deleted
+    }
+
+    /**
+     * @see LN#writeToLog
+     */
+    @Override
+    public void writeToLog(ByteBuffer logBuffer) {
+        /* Ask ancestors to write to log. */
+        super.writeToLog(logBuffer);
+        databaseImpl.writeToLog(logBuffer);
+        byte booleans = (byte) (deleted ? 1 : 0);
+        logBuffer.put(booleans);
+    }
+
+    /**
+     * @see LN#readFromLog
+     */
+    @Override
+    public void readFromLog(ByteBuffer itemBuffer, byte entryVersion)
+        throws LogException {
+
+        super.readFromLog(itemBuffer, entryVersion);
+        databaseImpl.readFromLog(itemBuffer, entryVersion);
+        byte booleans = itemBuffer.get();
+        deleted = (booleans & 1) != 0;
+    }
+
+    /**
+     * @see Loggable#logicalEquals
+     * Should never be replicated.
+     */
+    @Override
+    public boolean logicalEquals(Loggable other) {
+
+        return false;
+    }
+
+    /**
+     * Dump additional fields. Done this way so the additional info can be
+     * within the XML tags defining the dumped log entry.
+     */
+    @Override
+    protected void dumpLogAdditional(StringBuffer sb, boolean verbose) {
+        databaseImpl.dumpLog(sb, true);
+    }
+}
diff --git a/src/com/sleepycat/je/tree/NameLN.java b/src/com/sleepycat/je/tree/NameLN.java
new file mode 100644
index 0000000000000000000000000000000000000000..b94379e6c543f602838cfcc95b8d5cfbae08be7e
--- /dev/null
+++ b/src/com/sleepycat/je/tree/NameLN.java
@@ -0,0 +1,218 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: NameLN.java,v 1.29.2.2 2010/01/04 15:30:36 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.nio.ByteBuffer;
+
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.LogException;
+import com.sleepycat.je.log.Loggable;
+import com.sleepycat.je.log.ReplicationContext;
+import com.sleepycat.je.log.entry.LNLogEntry;
+import com.sleepycat.je.log.entry.NameLNLogEntry;
+import com.sleepycat.je.txn.Txn;
+
+/**
+ * A NameLN represents a Leaf Node in the name->database id mapping tree.
+ */
+public final class NameLN extends LN {
+
+    private static final String BEGIN_TAG = "<nameLN>";
+    private static final String END_TAG = "</nameLN>";
+
+    private DatabaseId id;
+    private boolean deleted;
+
+    /**
+     * In the ideal world, we'd have a base LN class so that this NameLN
+     * doesn't have a superfluous data field, but we want to optimize the LN
+     * class for size and speed right now.
+     */
+    public NameLN(DatabaseId id, EnvironmentImpl envImpl, boolean replicate) {
+        super(new byte[0], envImpl, replicate);
+        this.id = id;
+        deleted = false;
+    }
+
+    /**
+     * Create an empty NameLN, to be filled in from the log.
+     */
+    public NameLN() {
+        super();
+        id = new DatabaseId();
+    }
+
+    @Override
+    public boolean isDeleted() {
+        return deleted;
+    }
+
+    @Override
+    void makeDeleted() {
+        deleted = true;
+    }
+
+    public DatabaseId getId() {
+        return id;
+    }
+
+    public void setId(DatabaseId id) {
+        this.id = id;
+    }
+
+    /*
+     * Dumping
+     */
+
+    @Override
+    public String toString() {
+        return dumpString(0, true);
+    }
+
+    @Override
+    public String beginTag() {
+        return BEGIN_TAG;
+    }
+
+    @Override
+    public String endTag() {
+        return END_TAG;
+    }
+
+    @Override
+    public String dumpString(int nSpaces, boolean dumpTags) {
+        StringBuffer sb = new StringBuffer();
+        sb.append(super.dumpString(nSpaces, dumpTags));
+        sb.append('\n');
+        sb.append(TreeUtils.indent(nSpaces));
+        sb.append("<deleted val=\"").append(Boolean.toString(deleted));
+        sb.append("\">");
+        sb.append('\n');
+        sb.append(TreeUtils.indent(nSpaces));
+        sb.append("<id val=\"").append(id);
+        sb.append("\">");
+        sb.append('\n');
+        return sb.toString();
+    }
+
+    /*
+     * Logging
+     */
+
+    /**
+     * Log type for transactional entries.
+     */
+    @Override
+    protected LogEntryType getTransactionalLogType() {
+        return LogEntryType.LOG_NAMELN_TRANSACTIONAL;
+    }
+
+    /**
+     * @see Node#getLogType
+     */
+    @Override
+    public LogEntryType getLogType() {
+        return LogEntryType.LOG_NAMELN;
+    }
+
+    /**
+     * @see LN#getLogSize
+     */
+    @Override
+    public int getLogSize() {
+        return
+            super.getLogSize() +                     // superclass
+            id.getLogSize() +                        // id
+            1;                                       // deleted flag
+    }
+
+    /**
+     * @see LN#writeToLog
+     */
+    @Override
+    public void writeToLog(ByteBuffer logBuffer) {
+        /* Ask ancestors to write to log. */
+        super.writeToLog(logBuffer);         // super class
+        id.writeToLog(logBuffer);            // id
+        byte booleans = (byte) (deleted ? 1 : 0);
+        logBuffer.put(booleans);
+    }
+
+    /**
+     * @see LN#readFromLog
+     */
+    @Override
+    public void readFromLog(ByteBuffer itemBuffer, byte entryVersion)
+        throws LogException {
+
+        super.readFromLog(itemBuffer, entryVersion); // super class
+        id.readFromLog(itemBuffer, entryVersion); // id
+        byte booleans = itemBuffer.get();
+        deleted = (booleans & 1) != 0;
+    }
+
+    /**
+     * @see Loggable#logicalEquals
+     */
+    @Override
+    public boolean logicalEquals(Loggable other) {
+
+        if (!(other instanceof NameLN))
+            return false;
+
+        NameLN otherLN = (NameLN) other;
+
+        if (getNodeId() != otherLN.getNodeId())
+            return false;
+
+        if (!(id.equals(otherLN.id)))
+            return false;
+
+        if (deleted != otherLN.deleted)
+            return false;
+
+        return true;
+    }
+
+    /**
+     * Dump additional fields. Done this way so the additional info can be
+     * within the XML tags defining the dumped log entry.
+     */
+    @Override
+    protected void dumpLogAdditional(StringBuffer sb, boolean verbose) {
+        id.dumpLog(sb, true);
+    }
+
+    /*
+     * Each LN knows what kind of log entry it uses to log itself. Overridden
+     * by subclasses.
+     */
+    @Override
+    LNLogEntry createLogEntry(LogEntryType entryType,
+                              DatabaseImpl dbImpl,
+                              byte[] key,
+                              byte[] delDupKey,
+                              long logAbortLsn,
+                              boolean logAbortKnownDeleted,
+                              Txn logTxn,
+                              ReplicationContext repContext) {
+
+        return new NameLNLogEntry(entryType,
+                                  this,
+                                  dbImpl.getId(),
+                                  key,
+                                  logAbortLsn,
+                                  logAbortKnownDeleted,
+                                  logTxn,
+                                  repContext);
+    }
+}
diff --git a/src/com/sleepycat/je/tree/Node.java b/src/com/sleepycat/je/tree/Node.java
new file mode 100644
index 0000000000000000000000000000000000000000..4c4447af4f1459ec26660af634d5caf7ac62aca0
--- /dev/null
+++ b/src/com/sleepycat/je/tree/Node.java
@@ -0,0 +1,258 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Node.java,v 1.108.2.2 2010/01/04 15:30:36 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.nio.ByteBuffer;
+
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.cleaner.LocalUtilizationTracker;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.INList;
+import com.sleepycat.je.latch.LatchNotHeldException;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.LogException;
+import com.sleepycat.je.log.LogUtils;
+import com.sleepycat.je.log.Loggable;
+
+/**
+ * A Node contains all the common base information for any JE B-Tree node.
+ */
+public abstract class Node implements Loggable {
+
+    private static final String BEGIN_TAG = "<node>";
+    private static final String END_TAG = "</node>";
+
+    /* The unique id of this node. */
+    private long nodeId;
+
+    /**
+     * Only for subclasses.
+     */
+    protected Node() {
+    }
+
+    /**
+     * Create a new node, assigning it the next available node id.
+     */
+    protected Node(EnvironmentImpl envImpl,
+                   boolean replicated) {
+        if (replicated) {
+            nodeId = envImpl.getNodeSequence().getNextReplicatedNodeId();
+        } else {
+            nodeId = envImpl.getNodeSequence().getNextLocalNodeId();
+        }
+    }
+
+    /**
+     * Initialize a node that has been faulted in from the log.
+     */
+    public void postFetchInit(DatabaseImpl db, long sourceLsn)
+        throws DatabaseException {
+
+        /* Nothing to do. */
+    }
+
+    public long getNodeId() {
+	return nodeId;
+    }
+
+    /* For unit tests only. */
+    void setNodeId(long nid) {
+	nodeId = nid;
+    }
+
+    public void latchShared()
+	throws DatabaseException {
+
+    }
+
+    public void latchShared(CacheMode ignore)
+	throws DatabaseException {
+
+    }
+
+    public void releaseLatch()
+	throws LatchNotHeldException {
+
+    }
+
+    public void verify(byte[] maxKey)
+	throws DatabaseException {
+    }
+
+    /**
+     * @return true if this node is a duplicate-bearing node type, false
+     * if otherwise.
+     */
+    public boolean containsDuplicates() {
+	return false;
+    }
+
+    /**
+     * Cover for LN's and just return 0 since they'll always be at the bottom
+     * of the tree.
+     */
+    public int getLevel() {
+	return 0;
+    }
+
+    /*
+     * Depth first search through a duplicate tree looking for an LN that has
+     * nodeId.  When we find it, set location.bin and index and return true.
+     * If we don't find it, return false.
+     *
+     * No latching is performed.
+     */
+    boolean matchLNByNodeId(TreeLocation location,
+                            long nodeId,
+                            CacheMode cachemode)
+	throws DatabaseException {
+
+	throw new DatabaseException("matchLNByNodeId called on non DIN/DBIN");
+    }
+
+    /**
+     * Add yourself to the in memory list if you're a type of node that
+     * should belong.
+     */
+    abstract void rebuildINList(INList inList)
+        throws DatabaseException;
+
+    /**
+     * Remove yourself from the in memory list if you're a type of node that
+     * is put there.
+     */
+    abstract void accountForSubtreeRemoval(INList inList,
+                                           LocalUtilizationTracker
+                                           localTracker)
+        throws DatabaseException;
+
+    /**
+     * @return true if you're part of a deletable subtree.
+     */
+    abstract boolean isValidForDelete()
+        throws DatabaseException;
+
+    /**
+     * @return true if you're an IN in the search path
+     */
+    abstract protected boolean isSoughtNode(long nid, CacheMode cacheMode)
+        throws DatabaseException;
+
+    /**
+     * @return true if you can be the ancestor of the target IN.
+     * Currently the determining factor is whether the target IN contains
+     * duplicates.
+     */
+    abstract protected boolean canBeAncestor(boolean targetContainsDuplicates);
+
+    /**
+     * Return the approximate size of this node in memory, if this size should
+     * be included in it's parents memory accounting.  For example, all INs
+     * return 0, because they are accounted for individually. LNs must return a
+     * count, they're not counted on the INList.
+     */
+    protected long getMemorySizeIncludedByParent() {
+        return 0;
+    }
+
+    /*
+     * Dumping
+     */
+
+    /**
+     * Default toString method at the root of the tree.
+     */
+    @Override
+    public String toString() {
+        return this.dumpString(0, true);
+    }
+
+    private String beginTag() {
+	return BEGIN_TAG;
+    }
+
+    private String endTag() {
+	return END_TAG;
+    }
+
+    public void dump(int nSpaces) {
+	System.out.print(dumpString(nSpaces, true));
+    }
+
+    String dumpString(int nSpaces, boolean dumpTags) {
+        StringBuffer self = new StringBuffer();
+        self.append(TreeUtils.indent(nSpaces));
+	if (dumpTags) {
+	    self.append(beginTag());
+	}
+        self.append(nodeId);
+	if (dumpTags) {
+	    self.append(endTag());
+	}
+        return self.toString();
+    }
+
+    public String shortDescription() {
+	return "<" + getType() + "/" + getNodeId();
+    }
+
+    public String getType() {
+	return getClass().getName();
+    }
+
+    /**
+     */
+    public abstract LogEntryType getLogType();
+
+    /*
+     * Logging support
+     */
+
+    /**
+     * @see Loggable#getLogSize
+     */
+    public int getLogSize() {
+        return LogUtils.getPackedLongLogSize(nodeId);
+    }
+
+    /**
+     * @see Loggable#writeToLog
+     */
+    public void writeToLog(ByteBuffer logBuffer) {
+        LogUtils.writePackedLong(logBuffer, nodeId);
+    }
+
+    /**
+     * @see Loggable#readFromLog
+     */
+    public void readFromLog(ByteBuffer itemBuffer, byte entryVersion)
+	throws LogException {
+
+        nodeId = LogUtils.readLong(itemBuffer, (entryVersion < 6));
+    }
+
+    /**
+     * @see Loggable#dumpLog
+     */
+    public void dumpLog(StringBuffer sb, boolean verbose) {
+        sb.append(BEGIN_TAG);
+        sb.append(nodeId);
+        sb.append(END_TAG);
+    }
+
+    /**
+     * @see Loggable#getTransactionId
+     */
+    public long getTransactionId() {
+	return 0;
+    }
+}
diff --git a/src/com/sleepycat/je/tree/NodeNotEmptyException.java b/src/com/sleepycat/je/tree/NodeNotEmptyException.java
new file mode 100644
index 0000000000000000000000000000000000000000..b46eb64d1fe305e962c99b81b72e2e4d93b8af7a
--- /dev/null
+++ b/src/com/sleepycat/je/tree/NodeNotEmptyException.java
@@ -0,0 +1,26 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: NodeNotEmptyException.java,v 1.17.2.2 2010/01/04 15:30:36 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+/**
+ * Error to indicate that a bottom level BIN is not empty during a
+ * delete subtree operation.
+ */
+public class NodeNotEmptyException extends Exception {
+
+    /*
+     * Throw this static instance, in order to reduce the cost of
+     * fill in the stack trace.
+     */
+    public static final NodeNotEmptyException NODE_NOT_EMPTY =
+        new NodeNotEmptyException();
+
+    private NodeNotEmptyException() {
+    }
+}
diff --git a/src/com/sleepycat/je/tree/SearchResult.java b/src/com/sleepycat/je/tree/SearchResult.java
new file mode 100644
index 0000000000000000000000000000000000000000..17d0c26ed23f6cd6466e11f1d542b2157cee1751
--- /dev/null
+++ b/src/com/sleepycat/je/tree/SearchResult.java
@@ -0,0 +1,44 @@
+/*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SearchResult.java,v 1.17.2.2 2010/01/04 15:30:36 cwl Exp $:
+ */
+
+package com.sleepycat.je.tree;
+
+/**
+ * Contains the result of a tree search
+ */
+public class SearchResult {
+    public boolean exactParentFound;
+    public boolean keepSearching;
+
+    /*
+     * Set to true if a search stopped because a child was not resident, and
+     * we are doing a do-not-fetch kind of search.
+     */
+    public boolean childNotResident;
+    public IN parent;
+    public int index;
+	
+    public SearchResult() {
+        exactParentFound = false;
+        keepSearching = true;
+        parent = null;
+        index = -1;
+        childNotResident = false;
+    }
+
+    @Override
+    public String toString() {
+        return
+            "exactParentFound="+ exactParentFound +
+            " keepSearching=" + keepSearching +
+            " parent=" + ((parent == null)? "null":
+                          Long.toString(parent.getNodeId())) +
+            " index=" + index +
+            " childNotResident=" + childNotResident;
+    }
+}
diff --git a/src/com/sleepycat/je/tree/SplitRequiredException.java b/src/com/sleepycat/je/tree/SplitRequiredException.java
new file mode 100644
index 0000000000000000000000000000000000000000..c23907c0bff1f7a35a3411985e0479a676c84d40
--- /dev/null
+++ b/src/com/sleepycat/je/tree/SplitRequiredException.java
@@ -0,0 +1,19 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SplitRequiredException.java,v 1.8.2.2 2010/01/04 15:30:36 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+/**
+ * Indicates that we need to return to the top of the tree in order to
+ * do a forced splitting pass.
+ */
+@SuppressWarnings("serial")
+class SplitRequiredException extends Exception {
+    public SplitRequiredException(){
+    }
+}
diff --git a/src/com/sleepycat/je/tree/TrackingInfo.java b/src/com/sleepycat/je/tree/TrackingInfo.java
new file mode 100644
index 0000000000000000000000000000000000000000..9e5750ab2aa2f343d01fa08bcf66725bab90d87e
--- /dev/null
+++ b/src/com/sleepycat/je/tree/TrackingInfo.java
@@ -0,0 +1,30 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TrackingInfo.java,v 1.15.2.2 2010/01/04 15:30:36 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * Tracking info packages some tree tracing info.
+ */
+public class TrackingInfo {
+    private long lsn;
+    private long nodeId;
+
+    public TrackingInfo(long lsn, long nodeId) {
+        this.lsn = lsn;
+        this.nodeId = nodeId;
+    }
+
+    @Override
+    public String toString() {
+        return "lsn=" + DbLsn.getNoFormatString(lsn) +
+            " node=" + nodeId;
+    }
+}
diff --git a/src/com/sleepycat/je/tree/Tree.java b/src/com/sleepycat/je/tree/Tree.java
new file mode 100644
index 0000000000000000000000000000000000000000..6ab9e590e842b616a91e330469b147128a287c32
--- /dev/null
+++ b/src/com/sleepycat/je/tree/Tree.java
@@ -0,0 +1,3780 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Tree.java,v 1.458.2.5 2010/01/04 15:30:36 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.cleaner.LocalUtilizationTracker;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.CursorImpl;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.DbConfigManager;
+import com.sleepycat.je.dbi.DbTree;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.INList;
+import com.sleepycat.je.latch.LatchSupport;
+import com.sleepycat.je.latch.SharedLatch;
+import com.sleepycat.je.log.LogManager;
+import com.sleepycat.je.log.Loggable;
+import com.sleepycat.je.log.ReplicationContext;
+import com.sleepycat.je.recovery.RecoveryManager;
+import com.sleepycat.je.txn.BasicLocker;
+import com.sleepycat.je.txn.LockGrantType;
+import com.sleepycat.je.txn.LockResult;
+import com.sleepycat.je.txn.LockType;
+import com.sleepycat.je.txn.Locker;
+import com.sleepycat.je.txn.WriteLockInfo;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.TestHook;
+import com.sleepycat.je.utilint.TestHookExecute;
+import com.sleepycat.je.utilint.Tracer;
+
+/**
+ * Tree implements the JE B+Tree.
+ *
+ * A note on tree search patterns:
+ * There's a set of Tree.search* methods. Some clients of the tree use
+ * those search methods directly, whereas other clients of the tree
+ * tend to use methods built on top of search.
+ *
+ * The semantics of search* are
+ *   they leave you pointing at a BIN or IN
+ *   they don't tell you where the reference of interest is.
+ *   they traverse a single tree, to jump into the duplicate tree, the
+ *   caller has to take explicit action.
+ * The semantics of the get* methods are:
+ *   they leave you pointing at a BIN or IN
+ *   they return the index of the slot of interest
+ *   they traverse down to whatever level is needed -- they'll take care of
+ *   jumping into the duplicate tree.
+ *   they are built on top of search* methods.
+ * For the future:
+ * Over time, we need to clarify which methods are to be used by clients
+ * of the tree. Preferably clients that call the tree use get*, although
+ * their are cases where they need visibility into the tree structure. For
+ * example, tee cursors use search* because they want to add themselves to
+ * BIN before jumping into the duplicate tree.
+ *
+ * Also, search* should return the location of the slot to save us a
+ * second binary search.
+ */
+public final class Tree implements Loggable {
+
+    /* For debug tracing */
+    private static final String TRACE_ROOT_SPLIT = "RootSplit:";
+    private static final String TRACE_DUP_ROOT_SPLIT = "DupRootSplit:";
+    private static final String TRACE_MUTATE = "Mut:";
+    private static final String TRACE_INSERT = "Ins:";
+    private static final String TRACE_INSERT_DUPLICATE = "InsD:";
+
+    private DatabaseImpl database;
+    private ChildReference root;
+    private int maxMainTreeEntriesPerNode;
+    private int maxDupTreeEntriesPerNode;
+    private boolean purgeRoot;
+
+    /*
+     * Latch that must be held when using/accessing the root node.  Protects
+     * against the root being changed out from underneath us by splitRoot.
+     */
+    private SharedLatch rootLatch;
+
+    private TreeStats treeStats;
+
+    private ThreadLocal<TreeWalkerStatsAccumulator> treeStatsAccumulatorTL = new ThreadLocal<TreeWalkerStatsAccumulator>();
+
+    /*
+     * We don't need the stack trace on this so always throw a static and
+     * avoid the cost of Throwable.fillInStack() every time it's thrown.
+     * [#13354].
+     */
+    private static SplitRequiredException splitRequiredException =
+	new SplitRequiredException();
+
+    /**
+     * Embodies an enum for the type of search being performed.  NORMAL means
+     * do a regular search down the tree.  LEFT/RIGHT means search down the
+     * left/right side to find the first/last node in the tree.
+     */
+    public static class SearchType {
+        /* Search types */
+        public static final SearchType NORMAL = new SearchType();
+        public static final SearchType LEFT   = new SearchType();
+        public static final SearchType RIGHT  = new SearchType();
+
+        /* No lock types can be defined outside this class. */
+        private SearchType() {
+        }
+    }
+
+    /* For unit tests */
+    private TestHook waitHook; // used for generating race conditions
+    private TestHook searchHook; // [#12736]
+    private TestHook ckptHook; // [#13897]
+
+    /**
+     * Create a new tree.
+     */
+    public Tree(DatabaseImpl database)
+        throws DatabaseException {
+
+        init(database);
+        setDatabase(database);
+    }
+
+    /**
+     * Create a tree that's being read in from the log.
+     */
+    public Tree()
+        throws DatabaseException {
+
+        init(null);
+        maxMainTreeEntriesPerNode = 0;
+        maxDupTreeEntriesPerNode = 0;
+    }
+
+    /**
+     * constructor helper
+     */
+    private void init(DatabaseImpl database) {
+        rootLatch = new SharedLatch("RootLatch");
+        treeStats = new TreeStats();
+        this.root = null;
+        this.database = database;
+    }
+
+    /**
+     * Set the database for this tree. Used by recovery when recreating an
+     * existing tree.
+     */
+    public void setDatabase(DatabaseImpl database)
+        throws DatabaseException {
+
+        this.database = database;
+        maxMainTreeEntriesPerNode = database.getNodeMaxEntries();
+	maxDupTreeEntriesPerNode = database.getNodeMaxDupTreeEntries();
+        DbConfigManager configManager =
+            database.getDbEnvironment().getConfigManager();
+
+        /*
+         * Root compression is no longer supported.  Root compression has no
+         * impact on memory usage now that we evict the root IN.  It reduces
+         * log space taken by INs for empty (but not removed) databases, yet
+         * requires logging an INDelete and MapLN; this provides very little
+         * benefit, if any.  Because it requires extensive testing (which has
+         * not been done), this minor benefit is not worth the cost.  And by
+         * removing it we no longer log INDelete, which reduces complexity
+         * going forward.
+         *
+         * In JE 3.3 we have made the minimal change (setting purgeRoot to
+         * false below) to disable this feature, while in 4.0 and later the
+         * supporting code will be removed.
+         *
+         * [#17546]
+         */
+        purgeRoot = false;
+    }
+
+    /**
+     * @return the database for this Tree.
+     */
+    public DatabaseImpl getDatabase() {
+        return database;
+    }
+
+    /**
+     * Set the root for the tree. Should only be called within the root latch.
+     */
+    public void setRoot(ChildReference newRoot, boolean notLatched) {
+	assert (notLatched || rootLatch.isWriteLockedByCurrentThread());
+        root = newRoot;
+    }
+
+    public ChildReference makeRootChildReference(Node target,
+						 byte[] key,
+						 long lsn) {
+	return new RootChildReference(target, key, lsn);
+    }
+
+    private ChildReference makeRootChildReference() {
+	return new RootChildReference();
+    }
+
+    /*
+     * A tree doesn't have a root if (a) the root field is null, or (b)
+     * the root is non-null, but has neither a valid target nor a valid
+     * LSN. Case (b) can happen if the dataabase is or was previously opened in
+     * deferred write mode.
+     * @return false if there is no real root.
+     */
+    public boolean rootExists() {
+        if (root == null) {
+            return false;
+        }
+
+        if ((root.getTarget() == null) &&
+            (root.getLsn() == DbLsn.NULL_LSN)) {
+            return false;
+        }
+
+        return true;
+    }
+
+    /**
+     * Perform a fast check to see if the root IN is resident.  No latching is
+     * performed.  To ensure that the root IN is not loaded by another thread,
+     * this method should be called while holding a write lock on the MapLN.
+     * That will prevent opening the DB in another thread, and potentially
+     * loading the root IN. [#13415]
+     */
+    public boolean isRootResident() {
+        return root != null && root.getTarget() != null;
+    }
+
+    /*
+     * Class that overrides fetchTarget() so that if the rootLatch is not
+     * held exclusively when the root is fetched, we upgrade it to exclusive.
+     */
+    private class RootChildReference extends ChildReference {
+
+	private RootChildReference() {
+	    super();
+	}
+
+	private RootChildReference(Node target, byte[] key, long lsn) {
+	    super(target, key, lsn);
+	}
+
+	/* Caller is responsible for releasing rootLatch. */
+        @Override
+	public Node fetchTarget(DatabaseImpl database, IN in)
+	    throws DatabaseException {
+
+	    if (getTarget() == null &&
+		!rootLatch.isWriteLockedByCurrentThread()) {
+		rootLatch.release();
+		rootLatch.acquireExclusive();
+	    }
+
+	    return super.fetchTarget(database, in);
+	}
+
+        @Override
+	public void setTarget(Node target) {
+	    assert rootLatch.isWriteLockedByCurrentThread();
+	    super.setTarget(target);
+	}
+
+        @Override
+	public void clearTarget() {
+	    assert rootLatch.isWriteLockedByCurrentThread();
+	    super.clearTarget();
+	}
+
+        @Override
+	public void setLsn(long lsn) {
+	    assert rootLatch.isWriteLockedByCurrentThread();
+	    super.setLsn(lsn);
+	}
+
+        @Override
+	void updateLsnAfterOptionalLog(DatabaseImpl dbImpl, long lsn) {
+	    assert rootLatch.isWriteLockedByCurrentThread();
+	    super.updateLsnAfterOptionalLog(dbImpl, lsn);
+	}
+    }
+
+    /**
+     * Get LSN of the rootIN. Obtained without latching, should only be
+     * accessed while quiescent.
+     */
+    public long getRootLsn() {
+        if (root == null) {
+            return DbLsn.NULL_LSN;
+        } else {
+            return root.getLsn();
+        }
+    }
+
+    /**
+     * @return the TreeStats for this tree.
+     */
+    TreeStats getTreeStats() {
+        return treeStats;
+    }
+
+    private TreeWalkerStatsAccumulator getTreeStatsAccumulator() {
+	if (EnvironmentImpl.getThreadLocalReferenceCount() > 0) {
+	    return treeStatsAccumulatorTL.get();
+	} else {
+	    return null;
+	}
+    }
+
+    public void setTreeStatsAccumulator(TreeWalkerStatsAccumulator tSA) {
+	treeStatsAccumulatorTL.set(tSA);
+    }
+
+    public IN withRootLatchedExclusive(WithRootLatched wrl)
+        throws DatabaseException {
+
+        try {
+            rootLatch.acquireExclusive();
+            return wrl.doWork(root);
+        } finally {
+            rootLatch.release();
+        }
+    }
+
+    public IN withRootLatchedShared(WithRootLatched wrl)
+        throws DatabaseException {
+
+        try {
+            rootLatch.acquireShared();
+            return wrl.doWork(root);
+        } finally {
+            rootLatch.release();
+        }
+    }
+
+    /**
+     * Deletes a BIN specified by key from the tree. If the BIN resides in a
+     * subtree that can be pruned away, prune as much as possible, so we
+     * don't leave a branch that has no BINs.
+     *
+     * It's possible that the targeted BIN will now have entries, or will
+     * have resident cursors. Either will prevent deletion.
+     *
+     * @param idKey - the identifier key of the node to delete.
+     * @param localTracker is used for tracking obsolete node info.
+     */
+    public void delete(byte[] idKey,
+                       LocalUtilizationTracker localTracker)
+        throws DatabaseException,
+               NodeNotEmptyException,
+               CursorsExistException {
+
+        IN subtreeRootIN = null;
+
+        /*
+         * A delete is a reverse split that must be propagated up to the root.
+         * [#13501] Keep all nodes from the rootIN to the parent of the
+         * deletable subtree latched as we descend so we can log the
+         * IN deletion and cascade the logging up the tree. The latched
+         * nodes are kept in order in the nodeLadder.
+         */
+        ArrayList<SplitInfo> nodeLadder = new ArrayList<SplitInfo>();
+
+        IN rootIN = null;
+        boolean rootNeedsUpdating = false;
+        rootLatch.acquireExclusive();
+        try {
+            if (!rootExists()) {
+                /* no action, tree is deleted or was never persisted. */
+                return;
+            }
+
+            rootIN = (IN) root.fetchTarget(database, null);
+            rootIN.latch(CacheMode.UNCHANGED);
+
+            searchDeletableSubTree(rootIN, idKey, nodeLadder);
+            if (nodeLadder.size() == 0) {
+
+                /*
+                 * The root is the top of the deletable subtree. Delete the
+                 * whole tree if the purge root je property is set.
+                 * In general, there's no reason to delete this last
+                 * IN->...IN->BIN subtree since we're likely to to add more
+                 * nodes to this tree again.  Deleting the subtree also
+                 * adds to the space used by the log since a MapLN needs to
+                 * be written when the root is nulled, and a MapLN, IN
+                 * (root), BIN needs to be written when the root is
+                 * recreated.
+                 *
+                 * Consider a queue application which frequently inserts
+                 * and deletes entries and often times leaves the tree
+                 * empty, but will insert new records again.
+                 *
+                 * An optimization might be to prune the multiple IN path
+                 * to the last BIN (if it even exists) to just a root IN
+                 * pointing to the single BIN, but this doesn't feel like
+                 * it's worth the trouble since the extra depth doesn't
+                 * matter all that much.
+                 */
+
+                if (purgeRoot) {
+                    subtreeRootIN = logTreeRemoval(rootIN);
+                    if (subtreeRootIN != null) {
+                        rootNeedsUpdating = true;
+                    }
+                }
+            } else {
+                /* Detach this subtree. */
+                SplitInfo detachPoint =
+                    nodeLadder.get(nodeLadder.size() - 1);
+                boolean deleteOk =
+                    detachPoint.parent.deleteEntry(detachPoint.index,
+                                                   true);
+                assert deleteOk;
+
+                /* Cascade updates upward, including writing the root IN. */
+                rootNeedsUpdating = cascadeUpdates(nodeLadder, null, -1);
+                subtreeRootIN = detachPoint.child;
+            }
+        } finally {
+            releaseNodeLadderLatches(nodeLadder);
+
+            if (rootIN != null) {
+                rootIN.releaseLatch();
+            }
+
+            rootLatch.release();
+        }
+
+
+        if (subtreeRootIN != null) {
+
+            EnvironmentImpl envImpl = database.getDbEnvironment();
+            if (rootNeedsUpdating) {
+
+                /*
+                 * modifyDbRoot will grab locks and we can't have the INList
+                 * latches or root latch held while it tries to acquire locks.
+                 */
+                DbTree dbTree = envImpl.getDbTree();
+                dbTree.optionalModifyDbRoot(database);
+                RecoveryManager.traceRootDeletion(Level.FINE, database);
+            }
+
+            /*
+             * Count obsolete nodes after logging the delete. We can do
+             * this without having the nodes of the subtree latched because the
+             * subtree has been detached from the tree.
+             */
+            INList inList = envImpl.getInMemoryINs();
+            accountForSubtreeRemoval(inList, subtreeRootIN, localTracker);
+        }
+    }
+
+    private void releaseNodeLadderLatches(ArrayList<SplitInfo> nodeLadder)
+        throws DatabaseException {
+
+        /*
+         * Clear any latches left in the node ladder. Release from the
+         * bottom up.
+         */
+        ListIterator<SplitInfo> iter = nodeLadder.listIterator(nodeLadder.size());
+        while (iter.hasPrevious()) {
+            SplitInfo info = iter.previous();
+            info.child.releaseLatch();
+        }
+    }
+
+    /**
+     * This entire tree is empty, clear the root and log a new MapLN
+     * @return the rootIN that has been detached, or null if there
+     * hasn't been any removal.
+     */
+    private IN logTreeRemoval(IN rootIN)
+        throws DatabaseException {
+
+	assert rootLatch.isWriteLockedByCurrentThread();
+        IN detachedRootIN = null;
+
+        /**
+         * TODO: Suspect that validateSubtree is no longer needed, now that we
+         * hold all latches. Revisit.
+         */
+        if ((rootIN.getNEntries() <= 1) &&
+            (rootIN.validateSubtreeBeforeDelete(0))) {
+
+            root = null;
+
+            /*
+             * Record the root deletion for recovery. Do this within
+             * the root latch. We need to put this log entry into the
+             * log before another thread comes in and creates a new
+             * rootIN for this database.
+             *
+             * For example,
+             * LSN 1000 IN delete info entry
+             * LSN 1010 new IN, for next set of inserts
+             * LSN 1020 new BIN, for next set of inserts.
+             *
+             * The entry at 1000 is needed so that LSN 1010 will
+             * properly supercede all previous IN entries in the tree.
+             * Without the INDelete, we may not use the new root, because
+             * it has a different node id.
+             */
+            INDeleteInfo info = new INDeleteInfo(rootIN.getNodeId(),
+                                                 rootIN.getIdentifierKey(),
+                                                 database.getId());
+            info.optionalLog(database.getDbEnvironment().getLogManager(),
+                     database);
+
+
+            detachedRootIN = rootIN;
+        }
+        return detachedRootIN;
+    }
+
+    /**
+     * Update nodes for a delete, going upwards. For example, suppose a
+     * node ladder holds:
+     * INa, INb, index for INb in INa
+     * INb, INc, index for INc in INb
+     * INc, BINd, index for BINd in INc
+     *
+     * When we enter this method, BINd has already been removed from INc. We
+     * need to
+     *  - log INc
+     *  - update INb, log INb
+     *  - update INa, log INa
+     *
+     * @param nodeLadder List of SplitInfos describing each node pair on the
+     * downward path
+     * @param binRoot parent of the dup tree, or null if this is not for
+     * dups.
+     * @param index slot occupied by this din tree.
+     * @return whether the DB root needs updating.
+     */
+    private boolean cascadeUpdates(ArrayList<SplitInfo> nodeLadder,
+                                   BIN binRoot,
+                                   int index)
+        throws DatabaseException {
+
+        ListIterator<SplitInfo> iter = nodeLadder.listIterator(nodeLadder.size());
+        EnvironmentImpl envImpl = database.getDbEnvironment();
+        LogManager logManager = envImpl.getLogManager();
+
+        long newLsn = DbLsn.NULL_LSN;
+        SplitInfo info = null;
+        while (iter.hasPrevious()) {
+            info = iter.previous();
+
+            if (newLsn != DbLsn.NULL_LSN) {
+                info.parent.updateEntry(info.index, newLsn);
+	    }
+            newLsn = info.parent.optionalLog(logManager);
+        }
+
+        boolean rootNeedsUpdating = false;
+        if (info != null) {
+            /* We've logged the top of this subtree, record it properly. */
+            if (info.parent.isDbRoot()) {
+                /* We updated the rootIN of the database. */
+                assert rootLatch.isWriteLockedByCurrentThread();
+                root.updateLsnAfterOptionalLog(database, newLsn);
+                rootNeedsUpdating = true;
+            } else if ((binRoot != null) && info.parent.isRoot()) {
+                /* We updated the DIN root of the database. */
+                binRoot.updateEntry(index, newLsn);
+            } else {
+                assert false;
+            }
+        }
+        return rootNeedsUpdating;
+    }
+
+    /**
+     * Delete a subtree of a duplicate tree.  Find the duplicate tree using
+     * mainKey in the top part of the tree and idKey in the duplicate tree.
+     *
+     * @param idKey the identifier key to be used in the duplicate subtree to
+     * find the duplicate path.
+     * @param mainKey the key to be used in the main tree to find the
+     * duplicate subtree.
+     * @param localTracker is used for tracking obsolete node info.
+     *
+     * @return true if the delete succeeded, false if there were still cursors
+     * present on the leaf DBIN of the subtree that was located.
+     */
+    public void deleteDup(byte[] idKey,
+                          byte[] mainKey,
+                          LocalUtilizationTracker localTracker)
+        throws DatabaseException,
+               NodeNotEmptyException,
+               CursorsExistException {
+
+        /* Find the BIN that is the parent of this duplicate tree. */
+        IN in =
+            search(mainKey, SearchType.NORMAL, -1, null, CacheMode.UNCHANGED);
+
+        IN deletedSubtreeRoot = null;
+        try {
+            assert in.isLatchOwnerForWrite();
+            assert in instanceof BIN;
+            assert in.getNEntries() > 0;
+
+            /* Find the appropriate entry in this BIN. */
+            int index = in.findEntry(mainKey, false, true);
+            if (index >= 0) {
+                deletedSubtreeRoot = deleteDupSubtree(idKey, (BIN) in, index);
+            }
+        } finally {
+            in.releaseLatch();
+        }
+
+        if (deletedSubtreeRoot != null) {
+            EnvironmentImpl envImpl = database.getDbEnvironment();
+            accountForSubtreeRemoval(envImpl.getInMemoryINs(),
+                                     deletedSubtreeRoot, localTracker);
+        }
+    }
+
+    /**
+     * We enter and leave this method with 'bin' latched.
+     * @return the root of the subtree we have deleted, so it can be
+     * properly accounted for. May be null if nothing was deleted.
+     */
+    private IN deleteDupSubtree(byte[] idKey,
+                                BIN bin,
+                                int index)
+        throws DatabaseException,
+               NodeNotEmptyException,
+               CursorsExistException {
+
+        EnvironmentImpl envImpl = database.getDbEnvironment();
+	DupCountLN dcl = null;
+        BasicLocker locker = BasicLocker.createBasicLocker(envImpl);
+
+        /*  Latch the DIN root. */
+        DIN duplicateRoot = (DIN) bin.fetchTarget(index);
+        duplicateRoot.latch(CacheMode.UNCHANGED);
+
+        ArrayList<SplitInfo> nodeLadder = new ArrayList<SplitInfo>();
+        IN subtreeRootIN = null;
+
+	try {
+
+            /*
+             * Read lock the dup count LN to ascertain whether there are any
+             * writers in the tree. TODO: This seems unnecessary now, revisit.
+             */
+            ChildReference dclRef = duplicateRoot.getDupCountLNRef();
+            dcl = (DupCountLN) dclRef.fetchTarget(database, duplicateRoot);
+
+            LockResult lockResult = locker.nonBlockingLock(dcl.getNodeId(),
+                                                           LockType.READ,
+                                                           database);
+            if (lockResult.getLockGrant() == LockGrantType.DENIED) {
+                throw CursorsExistException.CURSORS_EXIST;
+            }
+
+            /*
+             * We don't release the latch on bin before we search the
+             * duplicate tree below because we might be deleting the whole
+             * subtree from the IN and we want to keep it latched until we
+             * know.
+             */
+            searchDeletableSubTree(duplicateRoot, idKey, nodeLadder);
+
+            if (nodeLadder.size() == 0) {
+                /* We're deleting the duplicate root. */
+                if (bin.nCursors() == 0) {
+                    boolean deleteOk = bin.deleteEntry(index, true);
+                    assert deleteOk;
+
+                    /*
+                     * Use an INDupDeleteInfo to make it clear that this
+                     * duplicate tree has been eradicated. This is analagous to
+                     * deleting a root; we must be sure that we can overlay
+                     * another subtree onto this slot at recovery redo.
+                     */
+                    INDupDeleteInfo info =
+                        new INDupDeleteInfo(duplicateRoot.getNodeId(),
+                                            duplicateRoot.getMainTreeKey(),
+                                            duplicateRoot.getDupTreeKey(),
+                                            database.getId());
+                    info.optionalLog(envImpl.getLogManager(), database);
+
+                    subtreeRootIN = duplicateRoot;
+
+                    if (bin.getNEntries() == 0) {
+                        database.getDbEnvironment().
+                            addToCompressorQueue(bin, null, false);
+                    }
+                } else {
+
+                    /*
+                     * Cursors prevent us from deleting this dup tree, we'll
+                     * have to retry.
+                     */
+                    throw CursorsExistException.CURSORS_EXIST;
+                }
+            } else {
+
+                /* We're deleting a portion of the duplicate tree. */
+                SplitInfo detachPoint =
+                    nodeLadder.get(nodeLadder.size() - 1);
+                boolean deleteOk =
+                    detachPoint.parent.deleteEntry(detachPoint.index,
+                                                   true);
+                assert deleteOk;
+
+                /*
+                 * Cascade updates upward, including writing the root
+                 * DIN and parent BIN.
+                 */
+                cascadeUpdates(nodeLadder, bin, index);
+                subtreeRootIN = detachPoint.child;
+	    }
+	} finally {
+            releaseNodeLadderLatches(nodeLadder);
+
+	    locker.operationEnd(true);
+	    duplicateRoot.releaseLatch();
+	}
+
+        return subtreeRootIN;
+    }
+
+    /**
+     * Find the leftmost node (IN or BIN) in the tree.  Do not descend into a
+     * duplicate tree if the leftmost entry of the first BIN refers to one.
+     *
+     * @return the leftmost node in the tree, null if the tree is empty.  The
+     * returned node is latched and the caller must release it.
+     */
+    public IN getFirstNode(CacheMode cacheMode)
+        throws DatabaseException {
+
+        return search
+            (null, SearchType.LEFT, -1, null, cacheMode);
+    }
+
+    /**
+     * Find the rightmost node (IN or BIN) in the tree.  Do not descend into a
+     * duplicate tree if the rightmost entry of the last BIN refers to one.
+     *
+     * @return the rightmost node in the tree, null if the tree is empty.  The
+     * returned node is latched and the caller must release it.
+     */
+    public IN getLastNode(CacheMode cacheMode)
+        throws DatabaseException {
+
+        return search
+            (null, SearchType.RIGHT, -1, null, cacheMode);
+    }
+
+    /**
+     * Find the leftmost node (DBIN) in a duplicate tree.
+     *
+     * @return the leftmost node in the tree, null if the tree is empty.  The
+     * returned node is latched and the caller must release it.
+     */
+    public DBIN getFirstNode(DIN dupRoot, CacheMode cacheMode)
+        throws DatabaseException {
+
+        if (dupRoot == null) {
+            throw new IllegalArgumentException
+                ("getFirstNode passed null root");
+        }
+
+        assert dupRoot.isLatchOwnerForWrite();
+
+        IN ret = searchSubTree
+            (dupRoot, null, SearchType.LEFT, -1, null, cacheMode);
+        return (DBIN) ret;
+    }
+
+    /**
+     * Find the rightmost node (DBIN) in a duplicate tree.
+     *
+     * @return the rightmost node in the tree, null if the tree is empty.  The
+     * returned node is latched and the caller must release it.
+     */
+    public DBIN getLastNode(DIN dupRoot, CacheMode cacheMode)
+        throws DatabaseException {
+
+        if (dupRoot == null) {
+            throw new IllegalArgumentException
+                ("getLastNode passed null root");
+        }
+
+        assert dupRoot.isLatchOwnerForWrite();
+
+        IN ret = searchSubTree
+            (dupRoot, null, SearchType.RIGHT, -1, null, cacheMode);
+        return (DBIN) ret;
+    }
+
+    /**
+     * GetParentNode without optional tracking.
+     */
+    public SearchResult getParentINForChildIN(IN child,
+					      boolean requireExactMatch,
+					      CacheMode cacheMode)
+        throws DatabaseException {
+
+        return getParentINForChildIN
+            (child, requireExactMatch, cacheMode, -1, null);
+    }
+
+    /**
+     * Return a reference to the parent or possible parent of the child.  Used
+     * by objects that need to take a standalone node and find it in the tree,
+     * like the evictor, checkpointer, and recovery.
+     *
+     * @param child The child node for which to find the parent.  This node is
+     * latched by the caller and is released by this function before returning
+     * to the caller.
+     *
+     * @param requireExactMatch if true, we must find the exact parent, not a
+     * potential parent.
+     *
+     * @param cacheMode The CacheMode for affecting the hotness of the tree.
+     *
+     * @param trackingList if not null, add the LSNs of the parents visited
+     * along the way, as a debug tracing mechanism. This is meant to stay in
+     * production, to add information to the log.
+     *
+     * @return a SearchResult object. If the parent has been found,
+     * result.foundExactMatch is true. If any parent, exact or potential has
+     * been found, result.parent refers to that node.
+     */
+    public SearchResult getParentINForChildIN(IN child,
+					      boolean requireExactMatch,
+                                              CacheMode cacheMode,
+                                              int targetLevel,
+					      List<TrackingInfo> trackingList)
+        throws DatabaseException {
+
+        /* Sanity checks */
+        if (child == null) {
+            throw new IllegalArgumentException("getParentNode passed null");
+        }
+
+        assert child.isLatchOwnerForWrite();
+
+        /*
+         * Get information from child before releasing latch.
+         */
+        byte[] mainTreeKey = child.getMainTreeKey();
+        byte[] dupTreeKey = child.getDupTreeKey();
+        boolean isRoot = child.isRoot();
+        child.releaseLatch();
+
+        return getParentINForChildIN(child.getNodeId(),
+                                     child.containsDuplicates(),
+                                     isRoot,
+                                     mainTreeKey,
+                                     dupTreeKey,
+                                     requireExactMatch,
+                                     cacheMode,
+                                     targetLevel,
+                                     trackingList,
+                                     true);
+    }
+
+    /**
+     * Return a reference to the parent or possible parent of the child.  Used
+     * by objects that need to take a node id and find it in the tree,
+     * like the evictor, checkpointer, and recovery.
+     *
+     * @param requireExactMatch if true, we must find the exact parent, not a
+     * potential parent.
+     *
+     * @param cacheMode The CacheMode for affecting the hotness of the tree.
+     *
+     * @param trackingList if not null, add the LSNs of the parents visited
+     * along the way, as a debug tracing mechanism. This is meant to stay in
+     * production, to add information to the log.
+     *
+     * @param doFetch if false, stop the search if we run into a non-resident
+     * child. Used by the checkpointer to avoid conflicting with work done
+     * by the evictor.
+     *
+     * @return a SearchResult object. If the parent has been found,
+     * result.foundExactMatch is true. If any parent, exact or potential has
+     * been found, result.parent refers to that node.
+     */
+    public SearchResult getParentINForChildIN(long targetNodeId,
+                                              boolean targetContainsDuplicates,
+                                              boolean targetIsRoot,
+                                              byte[] targetMainTreeKey,
+                                              byte[] targetDupTreeKey,
+					      boolean requireExactMatch,
+					      CacheMode cacheMode,
+                                              int targetLevel,
+					      List<TrackingInfo> trackingList,
+                                              boolean doFetch)
+        throws DatabaseException {
+
+        IN rootIN = doFetch ?
+            getRootINLatchedExclusive(cacheMode) :
+            getRootIN(cacheMode);
+
+        SearchResult result = new SearchResult();
+        if (rootIN != null) {
+            /* The tracking list is a permanent tracing aid. */
+            if (trackingList != null) {
+                trackingList.add(new TrackingInfo(root.getLsn(),
+                                                  rootIN.getNodeId()));
+            }
+
+            IN potentialParent = rootIN;
+
+            try {
+                while (result.keepSearching) {
+
+		    /*
+		     * [12736] Prune away oldBin.  Assert has intentional
+		     * side effect.
+		     */
+		    assert TestHookExecute.doHookIfSet(searchHook);
+
+                    potentialParent.findParent(SearchType.NORMAL,
+                                               targetNodeId,
+                                               targetContainsDuplicates,
+                                               targetIsRoot,
+                                               targetMainTreeKey,
+                                               targetDupTreeKey,
+                                               result,
+                                               requireExactMatch,
+					       cacheMode,
+                                               targetLevel,
+                                               trackingList,
+                                               doFetch);
+                    potentialParent = result.parent;
+                }
+            } catch (Exception e) {
+
+		/*
+		 * The only thing that can be latched at this point is
+		 * potentialParent.
+		 */
+		potentialParent.releaseLatch();
+		throw new DatabaseException(e);
+            }
+        }
+        return result;
+    }
+
+    /**
+     * Return a reference to the parent of this LN. This searches through the
+     * main and duplicate tree and allows splits. Set the tree location to the
+     * proper BIN parent whether or not the LN child is found. That's because
+     * if the LN is not found, recovery or abort will need to place it within
+     * the tree, and so we must point at the appropriate position.
+     *
+     * <p>When this method returns with location.bin non-null, the BIN is
+     * latched and must be unlatched by the caller.  Note that location.bin may
+     * be non-null even if this method returns false.</p>
+     *
+     * @param location a holder class to hold state about the location
+     * of our search. Sort of an internal cursor.
+     *
+     * @param mainKey key to navigate through main key
+     *
+     * @param dupKey key to navigate through duplicate tree. May be null, since
+     * deleted lns have no data.
+     *
+     * @param ln the node instantiated from the log
+     *
+     * @param splitsAllowed true if this method is allowed to cause tree splits
+     * as a side effect. In practice, recovery can cause splits, but abort
+     * can't.
+     *
+     * @param searchDupTree true if a search through the dup tree looking for
+     * a match on the ln's node id should be made (only in the case where
+     * dupKey == null).  See SR 8984.
+     *
+     * @param cacheMode The CacheMode for affecting the hotness of the tree.
+     *
+     * @return true if node found in tree.
+     * If false is returned and there is the possibility that we can insert
+     * the record into a plausible parent we must also set
+     * - location.bin (may be null if no possible parent found)
+     * - location.lnKey (don't need to set if no possible parent).
+     */
+    public boolean getParentBINForChildLN(TreeLocation location,
+                                          byte[] mainKey,
+                                          byte[] dupKey,
+                                          LN ln,
+                                          boolean splitsAllowed,
+					  boolean findDeletedEntries,
+					  boolean searchDupTree,
+                                          CacheMode cacheMode)
+        throws DatabaseException {
+
+        /*
+         * Find the BIN that either points to this LN or could be its
+         * ancestor.
+         */
+        IN searchResult = null;
+	if (splitsAllowed) {
+	    searchResult = searchSplitsAllowed(mainKey, -1, cacheMode);
+	} else {
+	    searchResult = search
+		(mainKey, SearchType.NORMAL, -1, null, cacheMode);
+	}
+	location.bin = (BIN) searchResult;
+
+	if (location.bin == null) {
+	    return false;
+	}
+
+	/*
+	 * If caller wants us to consider knownDeleted entries then do an
+	 * inexact search in findEntry since that will find knownDeleted
+	 * entries.  If caller doesn't want us to consider knownDeleted entries
+	 * then do an exact search in findEntry since that will not return
+	 * knownDeleted entries.
+	 */
+	boolean exactSearch = false;
+	boolean indicateIfExact = true;
+	if (!findDeletedEntries) {
+	    exactSearch = true;
+	    indicateIfExact = false;
+	}
+        location.index =
+	    location.bin.findEntry(mainKey, indicateIfExact, exactSearch);
+
+	boolean match = false;
+	if (findDeletedEntries) {
+	    match = (location.index >= 0 &&
+		     (location.index & IN.EXACT_MATCH) != 0);
+	    location.index &= ~IN.EXACT_MATCH;
+	} else {
+	    match = (location.index >= 0);
+	}
+
+        if (match) {
+
+            /*
+             * A BIN parent was found and a slot matches the key. See if
+             * we have to search further into what may be a dup tree.
+             */
+	    if (!location.bin.isEntryKnownDeleted(location.index)) {
+
+                /*
+                 * If this database doesn't support duplicates, no point in
+                 * incurring the potentially large cost of fetching in
+                 * the child to check for dup trees. In the future, we could
+                 * optimize further by storing state per slot as to whether
+                 * a dup tree hangs below.
+                 */
+                if (database.getSortedDuplicates()) {
+
+                    Node childNode = location.bin.fetchTarget(location.index);
+                    try {
+
+                        /*
+                         * Is our target LN a regular record or a dup count?
+                         */
+                        if (childNode == null) {
+                            /* Child is a deleted cleaned LN. */
+                        } else if (ln.containsDuplicates()) {
+                            /* This is a duplicate count LN. */
+                            return searchDupTreeForDupCountLNParent
+                                (location, mainKey, childNode);
+                        } else {
+
+                            /*
+                             * This is a regular LN. If this is a dup tree,
+                             * descend and search. If not, we've found the
+                             * parent.
+                             */
+                            if (childNode.containsDuplicates()) {
+                                if (dupKey == null) {
+
+                                    /*
+                                     * We are at a dup tree but our target LN
+                                     * has no dup key because it's a deleted
+                                     * LN.  We've encountered the case of SR
+                                     * 8984 where we are searching for an LN
+                                     * that was deleted before the conversion
+                                     * to a duplicate tree.
+                                     */
+				    return searchDupTreeByNodeId
+                                        (location, childNode, ln,
+                                         searchDupTree, cacheMode);
+                                } else {
+                                    return searchDupTreeForDBIN
+                                        (location, dupKey, (DIN) childNode,
+                                         ln, findDeletedEntries,
+                                         indicateIfExact, exactSearch,
+                                         splitsAllowed, cacheMode);
+                                }
+                            }
+                        }
+                    } catch (DatabaseException e) {
+			location.bin.releaseLatchIfOwner();
+			throw e;
+                    }
+                }
+            }
+
+            /* We had a match, we didn't need to search the duplicate tree. */
+            location.childLsn = location.bin.getLsn(location.index);
+            return true;
+        } else {
+            location.lnKey = mainKey;
+            return false;
+        }
+    }
+
+    /**
+     * For SR [#8984]: our prospective child is a deleted LN, and we're facing
+     * a dup tree. Alas, the deleted LN has no data, and therefore nothing to
+     * guide the search in the dup tree. Instead, we search by node id.  This
+     * is very expensive, but this situation is a very rare case.
+     */
+    private boolean searchDupTreeByNodeId(TreeLocation location,
+                                          Node childNode,
+                                          LN ln,
+                                          boolean searchDupTree,
+                                          CacheMode cacheMode)
+        throws DatabaseException {
+
+        if (searchDupTree) {
+            BIN oldBIN = location.bin;
+            if (childNode.matchLNByNodeId
+                (location, ln.getNodeId(), cacheMode)) {
+                location.index &= ~IN.EXACT_MATCH;
+                if (oldBIN != null) {
+                    oldBIN.releaseLatch();
+                }
+                location.bin.latch(cacheMode);
+                return true;
+            } else {
+                return false;
+            }
+        } else {
+
+            /*
+             * This is called from undo() so this LN can
+             * just be ignored.
+             */
+            return false;
+        }
+    }
+
+    /**
+     * @return true if childNode is the DIN parent of this DupCountLN
+     */
+    private boolean searchDupTreeForDupCountLNParent(TreeLocation location,
+                                                     byte[] mainKey,
+                                                     Node childNode) {
+
+        location.lnKey = mainKey;
+        if (childNode instanceof DIN) {
+            DIN dupRoot = (DIN) childNode;
+            location.childLsn = dupRoot.getDupCountLNRef().getLsn();
+            return true;
+        } else {
+
+            /*
+             * If we're looking for a DupCountLN but don't find a duplicate
+             * tree, then the key now refers to a single datum.  This can
+             * happen when all dups for a key are deleted, the compressor runs,
+             * and then a single datum is inserted.  [#10597]
+             */
+            return false;
+        }
+    }
+
+    /**
+     * Search the dup tree for the DBIN parent of this LN.
+     */
+    private boolean searchDupTreeForDBIN(TreeLocation location,
+                                         byte[] dupKey,
+                                         DIN dupRoot,
+                                         LN ln,
+                                         boolean findDeletedEntries,
+                                         boolean indicateIfExact,
+                                         boolean exactSearch,
+                                         boolean splitsAllowed,
+                                         CacheMode cacheMode)
+        throws DatabaseException {
+
+        assert dupKey != null;
+
+        dupRoot.latch(cacheMode);
+
+	/* Make sure there's room for inserts. */
+	if (maybeSplitDuplicateRoot(location.bin, location.index, cacheMode)) {
+	    dupRoot = (DIN) location.bin.fetchTarget(location.index);
+	}
+
+	/*
+	 * Wait until after any duplicate root splitting to unlatch the BIN.
+	 */
+	location.bin.releaseLatch();
+
+	/*
+	 * The dupKey is going to be the key that represents the LN in this BIN
+	 * parent.
+	 */
+	location.lnKey = dupKey;
+
+	/* Search the dup tree */
+	if (splitsAllowed) {
+	    try {
+		location.bin = (BIN) searchSubTreeSplitsAllowed
+		    (dupRoot, location.lnKey, ln.getNodeId(), cacheMode);
+	    } catch (SplitRequiredException e) {
+
+		/*
+		 * Shouldn't happen; the only caller of this method which
+		 * allows splits is from recovery, which is single
+		 * threaded.
+		 */
+		throw new DatabaseException(e);
+	    }
+	} else {
+	    location.bin = (BIN) searchSubTree
+		(dupRoot, location.lnKey, SearchType.NORMAL,
+		 ln.getNodeId(), null, cacheMode);
+	}
+
+	/* Search for LN w/exact key. */
+	location.index = location.bin.findEntry
+	    (location.lnKey, indicateIfExact, exactSearch);
+	boolean match;
+	if (findDeletedEntries) {
+	    match = (location.index >= 0 &&
+		     (location.index & IN.EXACT_MATCH) != 0);
+	    location.index &= ~IN.EXACT_MATCH;
+	} else {
+	    match = (location.index >= 0);
+	}
+
+	if (match) {
+	    location.childLsn = location.bin.getLsn(location.index);
+	    return true;
+	} else {
+	    return false;
+	}
+    }
+
+    /**
+     * Return a reference to the adjacent BIN.
+     *
+     * @param bin The BIN to find the next BIN for.  This BIN is latched.
+     * @param traverseWithinDupTree if true, only search within the dup tree
+     * and return null when the traversal runs out of duplicates.
+     *
+     * @return The next BIN, or null if there are no more.  The returned node
+     * is latched and the caller must release it.  If null is returned, the
+     * argument BIN remains latched.
+     */
+    public BIN getNextBin(BIN bin,
+                          boolean traverseWithinDupTree,
+                          CacheMode cacheMode)
+        throws DatabaseException {
+
+        return getNextBinInternal(traverseWithinDupTree, bin, true, cacheMode);
+    }
+
+    /**
+     * Return a reference to the previous BIN.
+     *
+     * @param bin The BIN to find the next BIN for.  This BIN is latched.
+     * @param traverseWithinDupTree if true, only search within the dup tree
+     * and return null when the traversal runs out of duplicates.
+     *
+     * @return The previous BIN, or null if there are no more.  The returned
+     * node is latched and the caller must release it.  If null is returned,
+     * the argument bin remains latched.
+     */
+    public BIN getPrevBin(BIN bin,
+                          boolean traverseWithinDupTree,
+                          CacheMode cacheMode)
+        throws DatabaseException {
+
+        return getNextBinInternal(traverseWithinDupTree, bin,
+                                  false, cacheMode);
+    }
+
+    /**
+     * Helper routine for above two routines to iterate through BIN's.
+     */
+    private BIN getNextBinInternal(boolean traverseWithinDupTree,
+				   BIN bin,
+				   boolean forward,
+                                   CacheMode cacheMode)
+        throws DatabaseException {
+
+        /*
+         * Use the right most key (for a forward progressing cursor) or the
+         * left most key (for a backward progressing cursor) as the idkey.  The
+         * reason is that the BIN may get split while finding the next BIN so
+         * it's not safe to take the BIN's identifierKey entry.  If the BIN
+         * gets splits, then the right (left) most key will still be on the
+         * resultant node.  The exception to this is that if there are no
+         * entries, we just use the identifier key.
+         */
+        byte[] idKey = null;
+
+	if (bin.getNEntries() == 0) {
+	    idKey = bin.getIdentifierKey();
+	} else if (forward) {
+	    idKey = bin.getKey(bin.getNEntries() - 1);
+	} else {
+	    idKey = bin.getKey(0);
+	}
+
+        IN next = bin;
+	boolean nextIsLatched = false;
+
+        assert LatchSupport.countLatchesHeld() == 1:
+            LatchSupport.latchesHeldToString();
+
+        /*
+         * Ascend the tree until we find a level that still has nodes to the
+         * right (or left if !forward) of the path that we're on.  If we reach
+         * the root level, we're done. If we're searching within a duplicate
+         * tree, stay within the tree.
+         */
+        IN parent = null;
+        IN nextIN = null;
+	boolean nextINIsLatched = false;
+        try {
+            while (true) {
+
+                /*
+                 * Move up a level from where we are now and check to see if we
+                 * reached the top of the tree.
+                 */
+                SearchResult result = null;
+                if (!traverseWithinDupTree) {
+                    /* Operating on a regular tree -- get the parent. */
+		    nextIsLatched = false;
+		    result = getParentINForChildIN
+			(next, true /*requireExactMatch*/, cacheMode);
+                    if (result.exactParentFound) {
+                        parent = result.parent;
+                    } else {
+                        /* We've reached the root of the tree. */
+                        assert (LatchSupport.countLatchesHeld() == 0):
+                            LatchSupport.latchesHeldToString();
+                        return null;
+                    }
+                } else {
+                    /* This is a duplicate tree, stay within the tree.*/
+                    if (next.isRoot()) {
+                        /* We've reached the top of the dup tree. */
+                        next.releaseLatch();
+			nextIsLatched = false;
+                        return null;
+                    } else {
+			nextIsLatched = false;
+			result = getParentINForChildIN
+			    (next, true /*requireExactMatch*/, cacheMode);
+                        if (result.exactParentFound) {
+                            parent = result.parent;
+                        } else {
+                            return null;
+                        }
+                    }
+                }
+
+                assert (LatchSupport.countLatchesHeld() == 1) :
+                    LatchSupport.latchesHeldToString();
+
+                /*
+                 * Figure out which entry we are in the parent.  Add (subtract)
+                 * 1 to move to the next (previous) one and check that we're
+                 * still pointing to a valid child.  Don't just use the result
+                 * of the parent.findEntry call in getParentNode, because we
+                 * want to use our explicitly chosen idKey.
+                 */
+                int index = parent.findEntry(idKey, false, false);
+                boolean moreEntriesThisBin = false;
+                if (forward) {
+                    index++;
+                    if (index < parent.getNEntries()) {
+                        moreEntriesThisBin = true;
+                    }
+                } else {
+                    if (index > 0) {
+                        moreEntriesThisBin = true;
+                    }
+                    index--;
+                }
+
+                if (moreEntriesThisBin) {
+
+                    /*
+                     * There are more entries to the right of the current path
+                     * in parent.  Get the entry, and then descend down the
+                     * left most path to a BIN.
+                     */
+                    nextIN = (IN) parent.fetchTarget(index);
+                    nextIN.latch(cacheMode);
+		    nextINIsLatched = true;
+
+                    assert (LatchSupport.countLatchesHeld() == 2):
+                        LatchSupport.latchesHeldToString();
+
+                    if (nextIN instanceof BIN) {
+                        /* We landed at a leaf (i.e. a BIN). */
+                        parent.releaseLatch();
+			parent = null; // to avoid falsely unlatching parent
+                        TreeWalkerStatsAccumulator treeStatsAccumulator =
+                            getTreeStatsAccumulator();
+                        if (treeStatsAccumulator != null) {
+                            nextIN.accumulateStats(treeStatsAccumulator);
+                        }
+
+                        return (BIN) nextIN;
+                    } else {
+
+                        /*
+                         * We landed at an IN.  Descend down to the appropriate
+                         * leaf (i.e. BIN) node.
+                         */
+			IN ret = searchSubTree(nextIN, null,
+					       (forward ?
+						SearchType.LEFT :
+						SearchType.RIGHT),
+					       -1,
+					       null,
+                                               cacheMode);
+			nextINIsLatched = false;
+                        parent.releaseLatch();
+			parent = null; // to avoid falsely unlatching parent
+
+                        assert LatchSupport.countLatchesHeld() == 1:
+                            LatchSupport.latchesHeldToString();
+
+                        if (ret instanceof BIN) {
+                            return (BIN) ret;
+                        } else {
+                            throw new InconsistentNodeException
+                                ("subtree did not have a BIN for leaf");
+                        }
+                    }
+                }
+
+		/* Nothing at this level.  Ascend to a higher level. */
+                next = parent;
+		nextIsLatched = true;
+		parent = null; // to avoid falsely unlatching parent below
+            }
+        } catch (DatabaseException e) {
+
+	    if (next != null &&
+		nextIsLatched) {
+		next.releaseLatch();
+	    }
+
+            if (parent != null) {
+                parent.releaseLatch();
+            }
+
+	    if (nextIN != null &&
+		nextINIsLatched) {
+		nextIN.releaseLatch();
+	    }
+
+            throw e;
+        }
+    }
+
+    /**
+     * Split the root of the tree.
+     */
+    private void splitRoot(CacheMode cacheMode)
+        throws DatabaseException {
+
+        /*
+         * Create a new root IN, insert the current root IN into it, and then
+         * call split.
+         */
+        EnvironmentImpl env = database.getDbEnvironment();
+        LogManager logManager = env.getLogManager();
+        INList inMemoryINs = env.getInMemoryINs();
+
+        IN curRoot = null;
+        curRoot = (IN) root.fetchTarget(database, null);
+        curRoot.latch(cacheMode);
+        long curRootLsn = 0;
+        long logLsn = 0;
+        IN newRoot = null;
+        try {
+
+            /*
+             * Make a new root IN, giving it an id key from the previous root.
+             */
+            byte[] rootIdKey = curRoot.getKey(0);
+            newRoot = new IN(database, rootIdKey, maxMainTreeEntriesPerNode,
+			     curRoot.getLevel() + 1);
+	    newRoot.latch(cacheMode);
+            newRoot.setIsRoot(true);
+            curRoot.setIsRoot(false);
+
+            /*
+             * Make the new root IN point to the old root IN. Log the old root
+             * provisionally, because we modified it so it's not the root
+             * anymore, then log the new root. We are guaranteed to be able to
+             * insert entries, since we just made this root.
+             */
+            try {
+                curRootLsn =
+                    curRoot.optionalLogProvisional(logManager, newRoot);
+                boolean insertOk = newRoot.insertEntry
+                    (new ChildReference(curRoot, rootIdKey, curRootLsn));
+                assert insertOk;
+
+                logLsn = newRoot.optionalLog(logManager);
+            } catch (DatabaseException e) {
+                /* Something went wrong when we tried to log. */
+                curRoot.setIsRoot(true);
+                throw e;
+            }
+            inMemoryINs.add(newRoot);
+
+            /*
+             * Make the tree's root reference point to this new node. Now the
+             * MapLN is logically dirty, but the change hasn't been logged.  Be
+             * sure to flush the MapLN if we ever evict the root.
+             */
+            root.setTarget(newRoot);
+            root.updateLsnAfterOptionalLog(database, logLsn);
+            curRoot.split(newRoot, 0, maxMainTreeEntriesPerNode, cacheMode);
+            root.setLsn(newRoot.getLastFullVersion());
+
+        } finally {
+	    /* FindBugs ignore possible null pointer dereference of newRoot. */
+	    newRoot.releaseLatch();
+            curRoot.releaseLatch();
+        }
+        treeStats.nRootSplits++;
+        traceSplitRoot(Level.FINE, TRACE_ROOT_SPLIT, newRoot, logLsn,
+                       curRoot, curRootLsn);
+    }
+
+    /**
+     * Search the tree, starting at the root.  Depending on search type either
+     * search using key, or search all the way down the right or left sides.
+     * Stop the search either when the bottom of the tree is reached, or a node
+     * matching nid is found (see below) in which case that node's parent is
+     * returned.
+     *
+     * Preemptive splitting is not done during the search.
+     *
+     * @param key - the key to search for, or null if searchType is LEFT or
+     * RIGHT.
+     *
+     * @param searchType - The type of tree search to perform.  NORMAL means
+     * we're searching for key in the tree.  LEFT/RIGHT means we're descending
+     * down the left or right side, resp.  DELETE means we're descending the
+     * tree and will return the lowest node in the path that has > 1 entries.
+     *
+     * @param nid - The nodeid to search for in the tree.  If found, returns
+     * its parent.  If the nodeid of the root is passed, null is returned.
+     *
+     * @param binBoundary - If non-null, information is returned about whether
+     * the BIN found is the first or last BIN in the database.
+     *
+     * @return - the Node that matches the criteria, if any.  This is the node
+     * that is farthest down the tree with a match.  Returns null if the root
+     * is null.  Node is latched (unless it's null) and must be unlatched by
+     * the caller.  Only IN's and BIN's are returned, not LN's.  In a NORMAL
+     * search, It is the caller's responsibility to do the findEntry() call on
+     * the key and BIN to locate the entry that matches key.  The return value
+     * node is latched upon return and it is the caller's responsibility to
+     * unlatch it.
+     */
+    public IN search(byte[] key,
+                     SearchType searchType,
+                     long nid,
+                     BINBoundary binBoundary,
+                     CacheMode cacheMode)
+        throws DatabaseException {
+
+        IN rootIN = getRootIN(cacheMode);
+
+        if (rootIN != null) {
+            return searchSubTree
+                (rootIN, key, searchType, nid, binBoundary, cacheMode);
+        } else {
+            return null;
+        }
+    }
+
+    /**
+     * Do a key based search, permitting pre-emptive splits. Returns the
+     * target node's parent.
+     */
+    public IN searchSplitsAllowed(byte[] key, long nid, CacheMode cacheMode)
+        throws DatabaseException {
+
+        IN insertTarget = null;
+        while (insertTarget == null) {
+            rootLatch.acquireShared();
+            boolean rootLatched = true;
+	    boolean rootLatchedExclusive = false;
+	    boolean rootINLatched = false;
+	    boolean success = false;
+            IN rootIN = null;
+	    try {
+		while (true) {
+		    if (rootExists()) {
+			rootIN = (IN) root.fetchTarget(database, null);
+
+			/* Check if root needs splitting. */
+			if (rootIN.needsSplitting()) {
+			    if (!rootLatchedExclusive) {
+				rootIN = null;
+				rootLatch.release();
+				rootLatch.acquireExclusive();
+				rootLatchedExclusive = true;
+				continue;
+			    }
+			    splitRoot(cacheMode);
+
+			    /*
+			     * We can't hold any latches while we lock.  If the
+			     * root splits again between latch release and
+			     * DbTree.db lock, no problem.  The latest root
+			     * will still get written out.
+			     */
+			    rootLatch.release();
+			    rootLatched = false;
+			    EnvironmentImpl env = database.getDbEnvironment();
+			    env.getDbTree().optionalModifyDbRoot(database);
+			    rootLatched = true;
+			    rootLatch.acquireExclusive();
+			    rootIN = (IN) root.fetchTarget(database, null);
+			}
+			rootIN.latch(cacheMode);
+			rootINLatched = true;
+		    }
+		    break;
+		}
+		success = true;
+	    } finally {
+		if (!success && rootINLatched) {
+		    rootIN.releaseLatch();
+		}
+		if (rootLatched) {
+		    rootLatch.release();	
+		}
+	    }
+
+            /* Don't loop forever if the root is null. [#13897] */
+            if (rootIN == null) {
+                break;
+            }
+
+            try {
+		assert rootINLatched;
+                insertTarget =
+		    searchSubTreeSplitsAllowed(rootIN, key, nid, cacheMode);
+            } catch (SplitRequiredException e) {
+
+                /*
+                 * The last slot in the root was used at the point when this
+                 * thread released the rootIN latch in order to force splits.
+                 * Retry. SR [#11147].
+                 */
+                continue;
+	    }
+        }
+
+        return insertTarget;
+    }
+
+    /*
+     * Singleton class to indicate that root IN needs to be relatched for
+     * exclusive access due to a fetch occurring.
+     */
+    @SuppressWarnings("serial")
+    private static class RelatchRequiredException extends DatabaseException {
+        @Override
+	public synchronized Throwable fillInStackTrace() {
+	    return this;
+	}
+    }
+
+    private static RelatchRequiredException relatchRequiredException =
+	new RelatchRequiredException();
+
+    /**
+     * Wrapper for searchSubTreeInternal that does a restart if a
+     * RelatchRequiredException is thrown (i.e. a relatch of the root is
+     * needed.
+     */
+    public IN searchSubTree(IN parent,
+			    byte[] key,
+			    SearchType searchType,
+                            long nid,
+                            BINBoundary binBoundary,
+                            CacheMode cacheMode)
+        throws DatabaseException {
+
+	/*
+	 * Max of two iterations required.  First is root latched shared, and
+	 * second is root latched exclusive.
+	 */
+	for (int i = 0; i < 2; i++) {
+	    try {
+		return searchSubTreeInternal(parent, key, searchType, nid,
+					     binBoundary, cacheMode);
+	    } catch (RelatchRequiredException RRE) {
+		parent = getRootINLatchedExclusive(cacheMode);
+	    }
+	}
+
+	throw new DatabaseException
+	    ("searchSubTreeInternal should have completed in two tries");
+    }
+
+    /**
+     * Searches a portion of the tree starting at parent using key.  If during
+     * the search a node matching a non-null nid argument is found, its parent
+     * is returned.  If searchType is NORMAL, then key must be supplied to
+     * guide the search.  If searchType is LEFT (or RIGHT), then the tree is
+     * searched down the left (or right) side to find the first (or last) leaf,
+     * respectively.
+     * <p>
+     * Enters with parent latched, assuming it's not null.  Exits with the
+     * return value latched, assuming it's not null.
+     * <p>
+     * @param parent - the root of the subtree to start the search at.  This
+     * node should be latched by the caller and will be unlatched prior to
+     * return.
+     *
+     * @param key - the key to search for, unless searchType is LEFT or RIGHT
+     *
+     * @param searchType - NORMAL means search using key and, optionally, nid.
+     *                     LEFT means find the first (leftmost) leaf
+     *                     RIGHT means find the last (rightmost) leaf
+     *
+     * @param nid - The nodeid to search for in the tree.  If found, returns
+     * its parent.  If the nodeid of the root is passed, null is returned.
+     * Pass -1 if no nodeid based search is desired.
+     *
+     * @return - the node matching the argument criteria, or null.  The node is
+     * latched and must be unlatched by the caller.  The parent argument and
+     * any other nodes that are latched during the search are unlatched prior
+     * to return.
+     *
+     * @throws RelatchRequiredException if the root node (parent) must be
+     * relatched exclusively because a null target was encountered (i.e. a
+     * fetch must be performed on parent's child and the parent is latched
+     * shared.
+     */
+    private IN searchSubTreeInternal(IN parent,
+				     byte[] key,
+				     SearchType searchType,
+				     long nid,
+				     BINBoundary binBoundary,
+				     CacheMode cacheMode)
+        throws DatabaseException {
+
+        /* Return null if we're passed a null arg. */
+        if (parent == null) {
+            return null;
+        }
+
+        if ((searchType == SearchType.LEFT ||
+             searchType == SearchType.RIGHT) &&
+            key != null) {
+
+            /*
+	     * If caller is asking for a right or left search, they shouldn't
+	     * be passing us a key.
+	     */
+            throw new IllegalArgumentException
+                ("searchSubTree passed key and left/right search");
+        }
+
+        assert parent.isLatchOwnerForRead();
+
+        if (parent.getNodeId() == nid) {
+            parent.releaseLatch();
+            return null;
+        }
+
+        if (binBoundary != null) {
+            binBoundary.isLastBin = true;
+            binBoundary.isFirstBin = true;
+        }
+
+        int index;
+        IN child = null;
+	IN grandParent = null;
+	boolean childIsLatched = false;
+	boolean grandParentIsLatched = false;
+	boolean maintainGrandParentLatches = !parent.isLatchOwnerForWrite();
+
+        TreeWalkerStatsAccumulator treeStatsAccumulator =
+            getTreeStatsAccumulator();
+
+        try {
+            do {
+                if (treeStatsAccumulator != null) {
+                    parent.accumulateStats(treeStatsAccumulator);
+                }
+
+                if (parent.getNEntries() == 0) {
+                    /* No more children, can't descend anymore. */
+                    return parent;
+                } else if (searchType == SearchType.NORMAL) {
+                    /* Look for the entry matching key in the current node. */
+                    index = parent.findEntry(key, false, false);
+                } else if (searchType == SearchType.LEFT) {
+                    /* Left search, always take the 0th entry. */
+                    index = 0;
+                } else if (searchType == SearchType.RIGHT) {
+                    /* Right search, always take the highest entry. */
+                    index = parent.getNEntries() - 1;
+                } else {
+                    throw new IllegalArgumentException
+                        ("Invalid value of searchType: " + searchType);
+                }
+
+                assert index >= 0;
+
+                if (binBoundary != null) {
+                    if (index != parent.getNEntries() - 1) {
+                        binBoundary.isLastBin = false;
+                    }
+                    if (index != 0) {
+                        binBoundary.isFirstBin = false;
+                    }
+                }
+
+		/*
+		 * Get the child node.  If target is null, and we don't have
+		 * parent latched exclusively, then we need to relatch this
+		 * parent so that we can fill in the target.  Fetching a target
+		 * is a write to a node so it must be exclusively latched.
+		 * Once we have the parent relatched exclusively, then we can
+		 * release the grand parent.
+		 */
+		if (maintainGrandParentLatches &&
+		    parent.getTarget(index) == null &&
+		    !parent.isAlwaysLatchedExclusively()) {
+
+		    if (grandParent == null) {
+
+			/*
+			 * grandParent is null which implies parent is the root
+			 * so throw RelatchRequiredException.
+			 */
+			throw relatchRequiredException;
+		    } else {
+			/* Release parent shared and relatch exclusive. */
+			parent.releaseLatch();
+			parent.latch(cacheMode);
+		    }
+
+		    /*
+		     * Once parent has been re-latched exclusive we can release
+		     * grandParent now (sooner), rather than after the
+		     * fetchTarget (later).
+		     */
+		    if (grandParent != null) {
+			grandParent.releaseLatch();
+			grandParentIsLatched = false;
+			grandParent = null;
+		    }
+		}
+                child = (IN) parent.fetchTarget(index);
+
+		/*
+		 * We know we're done with grandParent for sure, so release
+		 * now.
+		 */
+		if (grandParent != null) {
+		    grandParent.releaseLatch();
+		    grandParentIsLatched = false;
+		}
+
+		/* See if we're even using shared latches. */
+		if (maintainGrandParentLatches) {
+		    child.latchShared(cacheMode);
+		} else {
+		    child.latch(cacheMode);
+		}
+		childIsLatched = true;
+
+                if (treeStatsAccumulator != null) {
+                    child.accumulateStats(treeStatsAccumulator);
+                }
+
+                /*
+                 * If this child matches nid, then stop the search and return
+                 * the parent.
+                 */
+                if (child.getNodeId() == nid) {
+                    child.releaseLatch();
+		    childIsLatched = false;
+                    return parent;
+                }
+
+                /* Continue down a level */
+		if (maintainGrandParentLatches) {
+		    grandParent = parent;
+		    grandParentIsLatched = true;
+		} else {
+		    parent.releaseLatch();
+		}
+                parent = child;
+            } while (!(parent instanceof BIN));
+
+            return child;
+        } catch (Exception t) {
+
+            /*
+             * In [#14903] we encountered a latch exception below and the
+             * original exception t was lost.  Print the stack trace and
+             * rethrow the original exception if this happens again, to get
+             * more information about the problem.
+             */
+            try {
+                if (child != null &&
+                    childIsLatched) {
+                    child.releaseLatch();
+                }
+
+                if (parent != child) {
+                    parent.releaseLatch();
+                }
+            } catch (Exception t2) {
+                t2.printStackTrace();
+            }
+
+            if (t instanceof DatabaseException) {
+                /* don't re-wrap a DatabaseException; we may need its type. */
+                throw (DatabaseException) t;
+            } else {
+                throw new DatabaseException(t);
+            }
+        } finally {
+            if (grandParent != null &&
+		grandParentIsLatched) {
+                grandParent.releaseLatch();
+		grandParentIsLatched = false;
+            }
+	}
+    }
+
+    /**
+     * Search down the tree using a key, but instead of returning the BIN that
+     * houses that key, find the point where we can detach a deletable
+     * subtree. A deletable subtree is a branch where each IN has one child,
+     * and the bottom BIN has no entries and no resident cursors. That point
+     * can be found by saving a pointer to the lowest node in the path with
+     * more than one entry.
+     *
+     *              INa
+     *             /   \
+     *          INb    INc
+     *          |       |
+     *         INd     ..
+     *         / \
+     *      INe  ..
+     *       |
+     *     BINx (suspected of being empty)
+     *
+     * In this case, we'd like to prune off the subtree headed by INe. INd
+     * is the parent of this deletable subtree. As we descend, we must keep
+     * latches for all the nodes that will be logged. In this case, we
+     * will need to keep INa, INb and INd latched when we return from this
+     * method.
+     *
+     * The method returns a list of parent/child/index structures. In this
+     * example, the list will hold:
+     *  INa/INb/index
+     *  INb/INd/index
+     *  INd/INe/index
+     * Every node is latched, and every node except for the bottom most child
+     * (INe) must be logged.
+     */
+    public void searchDeletableSubTree(IN parent,
+                                       byte[] key,
+                                       ArrayList<SplitInfo> nodeLadder)
+        throws DatabaseException,
+               NodeNotEmptyException,
+               CursorsExistException {
+
+        assert (parent!=null);
+        assert (key!= null);
+        assert parent.isLatchOwnerForWrite();
+
+        int index;
+        IN child = null;
+
+        /* Save the lowest IN in the path that has multiple entries. */
+        IN lowestMultipleEntryIN = null;
+
+        do {
+            if (parent.getNEntries() == 0) {
+                break;
+            }
+
+            /* Remember if this is the lowest multiple point. */
+            if (parent.getNEntries() > 1) {
+                lowestMultipleEntryIN = parent;
+            }
+
+            index = parent.findEntry(key, false, false);
+            assert index >= 0;
+
+            /* Get the child node that matches. */
+            child = (IN) parent.fetchTarget(index);
+            child.latch(CacheMode.UNCHANGED);
+            nodeLadder.add(new SplitInfo(parent, child, index));
+
+            /* Continue down a level */
+            parent = child;
+        } while (!(parent instanceof BIN));
+
+        /*
+         * See if there is a reason we can't delete this BIN -- i.e.
+         * new items have been inserted, or a cursor exists on it.
+         */
+        if ((child != null) && (child instanceof BIN)) {
+            if (child.getNEntries() != 0) {
+                throw NodeNotEmptyException.NODE_NOT_EMPTY;
+            }
+
+            /*
+             * This case can happen if we are keeping a BIN on an empty
+             * cursor as we traverse.
+             */
+            if (((BIN) child).nCursors() > 0) {
+                throw CursorsExistException.CURSORS_EXIST;
+            }
+        }
+
+        if (lowestMultipleEntryIN != null) {
+
+            /*
+             * Release all nodes up to the pair that holds the detach
+             * point. We won't be needing those nodes, since they'll be
+             * pruned and won't need to be updated.
+             */
+            ListIterator<SplitInfo> iter = nodeLadder.listIterator(nodeLadder.size());
+            while (iter.hasPrevious()) {
+                SplitInfo info = iter.previous();
+                if (info.parent == lowestMultipleEntryIN) {
+                    break;
+                } else {
+                    info.child.releaseLatch();
+                    iter.remove();
+                }
+            }
+        } else {
+
+            /*
+             * We actually have to prune off the entire tree. Release
+             * all latches, and clear the node ladder.
+             */
+            releaseNodeLadderLatches(nodeLadder);
+            nodeLadder.clear();
+        }
+    }
+
+    /**
+     * Search the portion of the tree starting at the parent, permitting
+     * preemptive splits.
+     *
+     * When this returns, parent will be unlatched unless parent is the
+     * returned IN.
+     */
+    private IN searchSubTreeSplitsAllowed(IN parent,
+                                          byte[] key,
+                                          long nid,
+                                          CacheMode cacheMode)
+        throws DatabaseException, SplitRequiredException {
+
+        if (parent != null) {
+
+            /*
+             * Search downward until we hit a node that needs a split. In that
+             * case, retreat to the top of the tree and force splits downward.
+             */
+            while (true) {
+                try {
+                    return searchSubTreeUntilSplit
+                        (parent, key, nid, cacheMode);
+                } catch (SplitRequiredException e) {
+                    /* SR [#11144]*/
+                    assert TestHookExecute.doHookIfSet(waitHook);
+
+                    /*
+                     * ForceSplit may itself throw SplitRequiredException if it
+                     * finds that the parent doesn't have room to hold an extra
+                     * entry. Allow the exception to propagate up to a place
+                     * where it's safe to split the parent. We do this rather
+                     * than
+                     */
+                    parent = forceSplit(parent, key, cacheMode);
+                }
+            }
+        } else {
+            return null;
+        }
+    }
+
+    /**
+     * Search the subtree, but throw an exception when we see a node
+     * that has to be split.
+     *
+     * When this returns, parent will be unlatched unless parent is the
+     * returned IN.
+     */
+    private IN searchSubTreeUntilSplit(IN parent,
+                                       byte[] key,
+                                       long nid,
+                                       CacheMode cacheMode)
+        throws DatabaseException, SplitRequiredException {
+
+	assert parent.isLatchOwnerForWrite();
+
+        if (parent.getNodeId() == nid) {
+            parent.releaseLatch();
+            return null;
+        }
+
+        int index;
+        IN child = null;
+	boolean childIsLatched = false;
+	boolean success = false;
+
+        try {
+            do {
+                if (parent.getNEntries() == 0) {
+                    /* No more children, can't descend anymore. */
+		    success = true;
+                    return parent;
+                } else {
+                    /* Look for the entry matching key in the current node. */
+                    index = parent.findEntry(key, false, false);
+                }
+
+                assert index >= 0;
+
+                /* Get the child node that matches. */
+		child = (IN) parent.fetchTarget(index);
+                child.latch(cacheMode);
+		childIsLatched = true;
+
+                /* Throw if we need to split. */
+                if (child.needsSplitting()) {
+		    /* Let the finally clean up child and parent latches. */
+                    throw splitRequiredException;
+                }
+
+                /*
+                 * If this child matches nid, then stop the search and return
+                 * the parent.
+                 */
+                if (child.getNodeId() == nid) {
+                    child.releaseLatch();
+		    childIsLatched = false;
+		    success = true;
+                    return parent;
+                }
+
+                /* Continue down a level */
+                parent.releaseLatch();
+                parent = child;
+            } while (!(parent instanceof BIN));
+
+	    success = true;
+            return parent;
+        } finally {
+	    if (!success) {
+		if (child != null &&
+		    childIsLatched) {
+		    child.releaseLatch();
+		}
+		if (parent != child) {
+		    parent.releaseLatch();
+		}
+	    }
+        }
+    }
+
+    /**
+     * Do pre-emptive splitting in the subtree topped by the "parent" node.
+     * Search down the tree until we get to the BIN level, and split any nodes
+     * that fit the splittable requirement.
+     *
+     * Note that more than one node in the path may be splittable. For example,
+     * a tree might have a level2 IN and a BIN that are both splittable, and
+     * would be encountered by the same insert operation.
+     *
+     * @return the parent to use for retrying the search, which may be
+     * different than the parent parameter passed if the root IN has been
+     * evicted.
+     */
+    private IN forceSplit(IN parent, byte[] key, CacheMode cacheMode)
+        throws DatabaseException, SplitRequiredException {
+
+        ArrayList<SplitInfo> nodeLadder = new ArrayList<SplitInfo>();
+
+	boolean allLeftSideDescent = true;
+	boolean allRightSideDescent = true;
+        int index;
+        IN child = null;
+        IN originalParent = parent;
+        ListIterator<SplitInfo> iter = null;
+
+        boolean isRootLatched = false;
+        boolean success = false;
+        try {
+
+            /*
+             * Latch the root in order to update the root LSN when we're done.
+             * Latch order must be: root, root IN.  We'll leave this method
+             * with the original parent latched.
+             */
+            if (originalParent.isDbRoot()) {
+                rootLatch.acquireExclusive();
+                isRootLatched = true;
+                /* The root IN may have been evicted. [#16173] */
+                parent = (IN) root.fetchTarget(database, null);
+                originalParent = parent;
+            }
+            originalParent.latch(cacheMode);
+
+            /*
+             * Another thread may have crept in and
+             *  - used the last free slot in the parent, making it impossible
+             *    to correctly progagate the split.
+             *  - actually split the root, in which case we may be looking at
+             *    the wrong subtree for this search.
+             * If so, throw and retry from above. SR [#11144]
+             */
+            if (originalParent.needsSplitting() || !originalParent.isRoot()) {
+                throw splitRequiredException;
+            }
+
+            /*
+             * Search downward to the BIN level, saving the information
+             * needed to do a split if necessary.
+             */
+            do {
+                if (parent.getNEntries() == 0) {
+                    /* No more children, can't descend anymore. */
+                    break;
+                } else {
+                    /* Look for the entry matching key in the current node. */
+                    index = parent.findEntry(key, false, false);
+                    if (index != 0) {
+                        allLeftSideDescent = false;
+                    }
+                    if (index != (parent.getNEntries() - 1)) {
+                        allRightSideDescent = false;
+                    }
+                }
+
+                assert index >= 0;
+
+                /*
+                 * Get the child node that matches. We only need to work on
+                 * nodes in residence.
+                 */
+                child = (IN) parent.getTarget(index);
+                if (child == null) {
+                    break;
+                } else {
+                    child.latch(cacheMode);
+                    nodeLadder.add(new SplitInfo(parent, child, index));
+                }
+
+                /* Continue down a level */
+                parent = child;
+            } while (!(parent instanceof BIN));
+
+            boolean startedSplits = false;
+            LogManager logManager =
+                database.getDbEnvironment().getLogManager();
+
+            /*
+             * Process the accumulated nodes from the bottom up. Split each
+             * node if required. If the node should not split, we check if
+             * there have been any splits on the ladder yet. If there are none,
+             * we merely release the node, since there is no update.  If splits
+             * have started, we need to propagate new LSNs upward, so we log
+             * the node and update its parent.
+             *
+             * Start this iterator at the end of the list.
+             */
+            iter = nodeLadder.listIterator(nodeLadder.size());
+            long lastParentForSplit = -1;
+            while (iter.hasPrevious()) {
+                SplitInfo info = iter.previous();
+
+		/*
+		 * Get rid of current entry on nodeLadder so it doesn't get
+		 * unlatched in the finally clause.
+		 */
+                iter.remove();
+                child = info.child;
+                parent = info.parent;
+                index = info.index;
+
+                /* Opportunistically split the node if it is full. */
+                if (child.needsSplitting()) {
+		    int maxEntriesPerNode = (child.containsDuplicates() ?
+					     maxDupTreeEntriesPerNode :
+					     maxMainTreeEntriesPerNode);
+                    if (allLeftSideDescent || allRightSideDescent) {
+                        child.splitSpecial(parent,
+                                           index,
+                                           maxEntriesPerNode,
+                                           key,
+                                           allLeftSideDescent,
+                                           cacheMode);
+                    } else {
+                        child.split(parent, index, maxEntriesPerNode,
+                                    cacheMode);
+                    }
+                    lastParentForSplit = parent.getNodeId();
+                    startedSplits = true;
+
+                    /*
+                     * If the DB root IN was logged, update the DB tree's child
+                     * reference.  Now the MapLN is logically dirty, but the
+                     * change hasn't been logged. Set the rootIN to be dirty
+                     * again, to force flushing the rootIN and mapLN in the
+                     * next checkpoint. Be sure to flush the MapLN
+                     * if we ever evict the root.
+                     */
+                    if (parent.isDbRoot()) {
+                        assert isRootLatched;
+                        root.setLsn(parent.getLastFullVersion());
+                        parent.setDirty(true);
+                    }
+                } else {
+                    if (startedSplits) {
+                        long newLsn = 0;
+
+                        /*
+                         * If this child was the parent of a split, it's
+                         * already logged by the split call. We just need to
+                         * propagate the logging upwards. If this child is just
+                         * a link in the chain upwards, log it.
+                         */
+                        if (lastParentForSplit == child.getNodeId()) {
+                            newLsn = child.getLastFullVersion();
+                        } else {
+                            newLsn = child.optionalLog(logManager);
+                        }
+                        parent.updateEntry(index, newLsn);
+                    }
+                }
+                child.releaseLatch();
+                child = null;
+            }
+            success = true;
+        } finally {
+            if (!success) {
+
+		/*
+		 * This will only happen if an exception is thrown and we leave
+		 * things in an intermediate state.
+		 */
+                if (child != null) {
+		    child.releaseLatch();
+		}
+
+		if (nodeLadder.size() > 0) {
+		    iter = nodeLadder.listIterator(nodeLadder.size());
+		    while (iter.hasPrevious()) {
+			SplitInfo info = iter.previous();
+			info.child.releaseLatch();
+		    }
+                }
+
+                originalParent.releaseLatch();
+            }
+
+            if (isRootLatched) {
+                rootLatch.release();
+            }
+        }
+        return originalParent;
+    }
+
+    /**
+     * Helper to obtain the root IN with shared root latching.  Optionally
+     * updates the generation of the root when latching it.
+     */
+    public IN getRootIN(CacheMode cacheMode)
+        throws DatabaseException {
+
+	return getRootINInternal(cacheMode, false/*exclusive*/);
+    }
+
+    /**
+     * Helper to obtain the root IN with exclusive root latching.  Optionally
+     * updates the generation of the root when latching it.
+     */
+    public IN getRootINLatchedExclusive(CacheMode cacheMode)
+        throws DatabaseException {
+
+	return getRootINInternal(cacheMode, true/*exclusive*/);
+    }
+
+    private IN getRootINInternal(CacheMode cacheMode, boolean exclusive)
+        throws DatabaseException {
+
+	rootLatch.acquireShared();
+        IN rootIN = null;
+        try {
+            if (rootExists()) {
+		rootIN = (IN) root.fetchTarget(database, null);
+		if (exclusive) {
+		    rootIN.latch(cacheMode);
+		} else {
+		    rootIN.latchShared(cacheMode);
+		}
+            }
+            return rootIN;
+        } finally {
+	    rootLatch.release();
+        }
+    }
+
+    public IN getResidentRootIN(boolean latched)
+	throws DatabaseException {
+
+        IN rootIN = null;
+	if (rootExists()) {
+	    rootIN = (IN) root.getTarget();
+	    if (rootIN != null && latched) {
+		rootIN.latchShared(CacheMode.UNCHANGED);
+	    }
+	}
+	return rootIN;
+    }
+
+    /**
+     * Inserts a new LN into the tree.
+     * @param ln The LN to insert into the tree.
+     * @param key Key value for the node
+     * @param allowDuplicates whether to allow duplicates to be inserted
+     * @param cursor the cursor to update to point to the newly inserted
+     * key/data pair, or null if no cursor should be updated.
+     * @return true if LN was inserted, false if it was a duplicate
+     * duplicate or if an attempt was made to insert a duplicate when
+     * allowDuplicates was false.
+     */
+    public boolean insert(LN ln,
+                          byte[] key,
+                          boolean allowDuplicates,
+                          CursorImpl cursor,
+                          LockResult lnLock,
+                          ReplicationContext repContext)
+        throws DatabaseException {
+
+        validateInsertArgs(allowDuplicates);
+
+        EnvironmentImpl env = database.getDbEnvironment();
+        LogManager logManager = env.getLogManager();
+        INList inMemoryINs = env.getInMemoryINs();
+
+        /* Find and latch the relevant BIN. */
+        BIN bin = null;
+        try {
+            bin = findBinForInsert(key, logManager, inMemoryINs, cursor);
+            assert bin.isLatchOwnerForWrite();
+
+            /* Make a child reference as a candidate for insertion. */
+            ChildReference newLNRef =
+		new ChildReference(ln, key, DbLsn.NULL_LSN);
+
+	    /*
+	     * If we're doing a put that is not a putCurrent, then the cursor
+	     * passed in may not be pointing to BIN (it was set to the BIN that
+	     * the search landed on which may be different than BIN).  Set the
+	     * BIN correctly here so that adjustCursorsForInsert doesn't blow
+	     * an assertion.  We'll finish the job by setting the index below.
+	     */
+	    cursor.setBIN(bin);
+
+            int index = bin.insertEntry1(newLNRef);
+            if ((index & IN.INSERT_SUCCESS) != 0) {
+
+                /*
+                 * Update the cursor to point to the entry that has been
+                 * successfully inserted.
+                 */
+                index &= ~IN.INSERT_SUCCESS;
+		cursor.updateBin(bin, index);
+
+                /* Log the new LN. */
+                long newLsn = DbLsn.NULL_LSN;
+
+		try {
+		    newLsn = ln.optionalLog
+                        (env, database, key, DbLsn.NULL_LSN,
+                         cursor.getLocker(), repContext);
+		} finally {
+                    if ((newLsn == DbLsn.NULL_LSN) &&
+                	!database.isDeferredWriteMode()) {
+
+                        /*
+                         * Possible buffer overflow, out-of-memory, or I/O
+                         * exception during logging.  The BIN entry will
+                         * contain a NULL_LSN.  To prevent an exception during
+                         * a fetch, we set the KnownDeleted flag.  We do not
+                         * call BIN.deleteEntry because cursors will not be
+                         * adjusted.  We do not add this entry to the
+                         * compressor queue to avoid complexity (this is rare).
+                         * [13126, 12605, 11271]
+                         */
+                        bin.setKnownDeleted(index);
+                    }
+		}
+		lnLock.setAbortLsn(DbLsn.NULL_LSN, true, true);
+                bin.updateEntry(index, newLsn);
+
+                traceInsert(Level.FINER, env, bin, ln, newLsn, index);
+                return true;
+            } else {
+
+                /*
+		 * Entry may have been a duplicate. Insertion was not
+		 * successful.
+		 */
+                index &= ~IN.EXACT_MATCH;
+		cursor.updateBin(bin, index);
+
+                /*
+                 * The key in the BIN slot and the key of the new LN may be
+                 * non-identical but compare as equal by the btree comparator.
+                 * This is disallowed for databases with duplicates configured.
+                 * [#15704]
+                 */
+                if (database.getSortedDuplicates() &&
+                    database.getBtreeComparator() != null &&
+                    !Arrays.equals(key, bin.getKey(index))) {
+                    throw new IllegalArgumentException
+                        ("Custom Btree comparator matches two non-identical " +
+                         "keys in a Database with duplicates configured");
+                }
+
+                LN currentLN = null;
+		boolean isDup = false;
+		Node n = bin.fetchTarget(index);
+		if (n == null || n instanceof LN) {
+		    currentLN = (LN) n;
+		} else {
+                    isDup = true;
+		}
+
+                /* If an LN is present, lock it and check deleted-ness. */
+		boolean isDeleted = false;
+                LockResult currentLock = null;
+
+                if (!isDup) {
+                    if (currentLN == null) {
+                        /* The LN was cleaned. */
+                        isDeleted = true;
+                    } else {
+                        currentLock = cursor.lockLNDeletedAllowed
+                            (currentLN, LockType.WRITE);
+                        currentLN = currentLock.getLN();
+                        /* The BIN/index may have changed while locking. */
+                        bin = cursor.getBIN();
+                        index = cursor.getIndex();
+                        if (cursor.getDupBIN() != null) {
+
+                            /*
+                             * A dup tree appeared during locking.  We will
+                             * position to a different dup tree entry later in
+                             * insertDuplicate, so we must remove the cursor
+                             * from this dup tree entry.  This is a rare case
+                             * so performance is not an issue.
+                             */
+                            cursor.clearDupBIN(true /*alreadyLatched*/);
+                            isDup = true;
+                        } else if (bin.isEntryKnownDeleted(index) ||
+                                   currentLN == null ||
+                                   currentLN.isDeleted()) {
+                            /* The current LN is deleted/cleaned. */
+                            isDeleted = true;
+                        }
+                    }
+                }
+
+                if (isDeleted) {
+
+                    /*
+                     * Set the abort LSN to that of the lock held on the
+                     * current LN, if the current LN was previously locked by
+                     * this txn.  This is needed when we change the node ID of
+                     * this slot.
+                     *
+                     * If reusing a slot with a deleted LN deleted in a prior
+                     * transaction (the LockGrantType is NEW or UPGRADE),
+                     * always set abortKnownDeleted=true.  It may be that the
+                     * existing slot is PENDING_DELETED, but we restore to
+                     * KNOWN_DELETED in the event of an abort.
+                     */
+                    long abortLsn = bin.getLsn(index);
+                    boolean abortKnownDeleted = true;
+                    if (currentLN != null &&
+                        currentLock.getLockGrant() == LockGrantType.EXISTING) {
+                        long nodeId = currentLN.getNodeId();
+                        Locker locker = cursor.getLocker();
+			WriteLockInfo info = locker.getWriteLockInfo(nodeId);
+			abortLsn = info.getAbortLsn();
+			abortKnownDeleted = info.getAbortKnownDeleted();
+                        /* Copy the size/DatabaseImpl of the existing lock. */
+                        lnLock.copyAbortInfo(info);
+                    }
+		    lnLock.setAbortLsn(abortLsn, abortKnownDeleted);
+
+                    /*
+                     * Current entry is a deleted entry. Replace it with LN.
+                     * Pass NULL_LSN for the oldLsn parameter of the log()
+                     * method because the old LN was counted obsolete when it
+                     * was deleted.
+                     */
+                    long newLsn = ln.optionalLog(env,
+                                                 database,
+                                                 key,
+                                                 DbLsn.NULL_LSN,
+                                                 cursor.getLocker(),
+                                                 repContext);
+
+                    /*
+                     * When reusing a slot, the key is replaced in the BIN
+                     * slot.  This ensures that the correct key value is used
+                     * when the new key is non-identical to the key in the slot
+                     * but is considered equal by the btree comparator.
+                     * [#15704]
+                     */
+                    bin.updateEntry(index, ln, newLsn, key);
+                    bin.clearKnownDeleted(index);
+                    bin.clearPendingDeleted(index);
+
+                    traceInsert(Level.FINER, env, bin, ln, newLsn, index);
+                    return true;
+                } else {
+
+		    /*
+		     * Attempt to insert a duplicate in an existing dup tree
+                     * or create a dup tree if none exists.
+		     */		
+		    return insertDuplicate
+                        (key, bin, ln, logManager, inMemoryINs, cursor, lnLock,
+                         allowDuplicates, repContext);
+                }
+            }
+        } finally {
+            cursor.releaseBIN();
+        }
+    }
+
+    /**
+     * Attempts to insert a duplicate at the current cursor BIN position.  If
+     * an existing dup tree exists, insert into it; otherwise, create a new
+     * dup tree and place the new LN and the existing LN into it.  If the
+     * current BIN entry contains an LN, the caller guarantees that it is not
+     * deleted.
+     *
+     * @return true if duplicate inserted successfully, false if it was a
+     * duplicate duplicate, false if a there is an existing LN and
+     * allowDuplicates is false.
+     */
+    private boolean insertDuplicate(byte[] key,
+				    BIN bin,
+                                    LN newLN,
+                                    LogManager logManager,
+                                    INList inMemoryINs,
+                                    CursorImpl cursor,
+				    LockResult lnLock,
+                                    boolean allowDuplicates,
+                                    ReplicationContext repContext)
+        throws DatabaseException {
+
+        EnvironmentImpl env = database.getDbEnvironment();
+	int index = cursor.getIndex();
+        boolean successfulInsert = false;
+
+        DIN dupRoot = null;
+        Node n = bin.fetchTarget(index);
+	long binNid = bin.getNodeId();
+
+        if (n instanceof DIN) {
+            DBIN dupBin = null;
+
+            /*
+             * A duplicate tree exists.  Find the relevant DBIN and insert the
+             * new entry into it.
+             */
+            try {
+                CacheMode cacheMode = cursor.getCacheMode();
+                dupRoot = (DIN) n;
+                dupRoot.latch(cacheMode);
+
+                /* Lock the DupCountLN before logging any LNs. */
+                LockResult dclLockResult =
+                    cursor.lockDupCountLN(dupRoot, LockType.WRITE);
+                /* The BIN/index may have changed during locking. */
+                bin = cursor.getBIN();
+                index = cursor.getIndex();
+
+                /*
+                 * Do not proceed if duplicates are not allowed and there are
+                 * one or more duplicates already present.  Note that if the
+                 * dup count is zero, we allow the insert.
+                 */
+                if (!allowDuplicates) {
+
+                    /*
+                     * dupRoot could have been changed during the dcl lock so
+                     * we need to grab it again here so that we release the
+                     * latch on the correct dupRoot in the finally below.
+                     */
+                    dupRoot = (DIN) bin.fetchTarget(index);
+                    DupCountLN dcl = (DupCountLN) dclLockResult.getLN();
+                    if (dcl.getDupCount() > 0) {
+                        return false;
+                    }
+                }
+
+                /*
+                 * Split the dup root if necessary.  The dup root may have
+                 * changed during locking above or by the split, so refetch it.
+                 * In either case it will be latched.
+                 */
+                maybeSplitDuplicateRoot(bin, index, cacheMode);
+                dupRoot = (DIN) bin.fetchTarget(index);
+
+                /*
+                 * Search the duplicate tree for the right place to insert this
+                 * new record. Releases the latch on duplicateRoot. If the
+                 * duplicateRoot got logged as a result of some splitting,
+                 * update the BIN's LSN information. The SortedLSNTreeWalker
+                 * relies on accurate LSNs in the in-memory tree.
+                 */
+                byte[] newLNKey = newLN.getData();
+                long previousLsn = dupRoot.getLastFullVersion();
+                try {
+                    dupBin = (DBIN) searchSubTreeSplitsAllowed
+                        (dupRoot, newLNKey, -1, cacheMode);
+                } catch (SplitRequiredException e) {
+
+                    /*
+                     * Shouldn't happen -- we have the DIN in the root of the
+                     * dup tree latched during this insert, so there should be
+                     * no possibility of being unable to insert a new entry
+                     * into the DIN root of the dup tree.
+                     */
+                    throw new DatabaseException(e) ;
+                }
+
+                long currentLsn = dupRoot.getLastFullVersion();
+                if (currentLsn != previousLsn) {
+                    bin.updateEntry(index, currentLsn);
+                }
+
+                /* Release the BIN latch to increase concurrency. */
+                cursor.releaseBIN();
+                bin = null;
+
+                /* The search above released the dup root latch. */
+                dupRoot = null;
+
+                /*
+                 * Try to insert a new reference object. If successful, we'll
+                 * log the LN and update the LSN in the reference.
+                 */
+                ChildReference newLNRef =
+                    new ChildReference(newLN, newLNKey, DbLsn.NULL_LSN);
+
+                int dupIndex = dupBin.insertEntry1(newLNRef);
+                if ((dupIndex & IN.INSERT_SUCCESS) != 0) {
+
+                    /*
+                     * Update the cursor to point to the entry that has been
+                     * successfully inserted.
+                     */
+		    dupIndex &= ~IN.INSERT_SUCCESS;
+		    cursor.updateDBin(dupBin, dupIndex);
+
+                    /* Log the new LN. */
+                    long newLsn = DbLsn.NULL_LSN;
+		    try {
+			newLsn = newLN.optionalLog
+                            (env, database, key, DbLsn.NULL_LSN,
+                             cursor.getLocker(), repContext);
+                    } finally {
+                        if ((newLsn == DbLsn.NULL_LSN) &&
+                            (!database.isDeferredWriteMode())) {
+
+                            /*
+                             * See Tree.insert for an explanation of handling
+                             * of IOException and OOME.
+                             */
+                            dupBin.setKnownDeleted(dupIndex);
+                        }
+		    }
+
+		    lnLock.setAbortLsn(DbLsn.NULL_LSN, true, true);
+
+		    /*
+                     * Use updateEntry to be sure to mark the dupBin as dirty.
+                     */
+		    dupBin.updateEntry(dupIndex, newLsn);
+
+                    traceInsertDuplicate(Level.FINER,
+                                         database.getDbEnvironment(),
+                                         dupBin, newLN, newLsn, binNid);
+                    successfulInsert = true;
+                } else {
+
+                    /*
+                     * The insert was not successful. Either this is a
+                     * duplicate duplicate or there is an existing entry but
+                     * that entry is deleted.
+                     */
+                    dupIndex &= ~IN.EXACT_MATCH;
+		    cursor.updateDBin(dupBin, dupIndex);
+                    LN currentLN = (LN) dupBin.fetchTarget(dupIndex);
+
+                    /* If an LN is present, lock it and check deleted-ness. */
+                    boolean isDeleted = false;
+                    LockResult currentLock = null;
+                    if (currentLN == null) {
+                        /* The LN was cleaned. */
+                        isDeleted = true;
+                    } else {
+                        currentLock = cursor.lockLNDeletedAllowed
+                            (currentLN, LockType.WRITE);
+                        currentLN = currentLock.getLN();
+
+                        /*
+                         * The BIN may have been latched while locking above.
+                         * Release the latch here because we released it above
+                         * to improve concurrency, and we will latch it again
+                         * below to increment the duplicate count. [#15574]
+                         */
+                        cursor.releaseBIN();
+
+                        /* The DBIN/index may have changed while locking. */
+                        dupBin = cursor.getDupBIN();
+			dupIndex = cursor.getDupIndex();
+                        if (dupBin.isEntryKnownDeleted(dupIndex) ||
+                            currentLN == null ||
+                            currentLN.isDeleted()) {
+                            /* The current LN is deleted/cleaned. */
+                            isDeleted = true;
+                        }
+                    }
+
+                    if (isDeleted) {
+                        /* See Tree.insert for an explanation. */
+                        long abortLsn = dupBin.getLsn(dupIndex);
+                        boolean abortKnownDeleted = true;
+                        if (currentLN != null &&
+                            currentLock.getLockGrant() ==
+                            LockGrantType.EXISTING) {
+                            long nodeId = currentLN.getNodeId();
+                            Locker locker = cursor.getLocker();
+			    WriteLockInfo info =
+				locker.getWriteLockInfo(nodeId);
+			    abortLsn = info.getAbortLsn();
+			    abortKnownDeleted = info.getAbortKnownDeleted();
+                            /* Copy size/DatabaseImpl of the existing lock. */
+                            lnLock.copyAbortInfo(info);
+			}
+			lnLock.setAbortLsn(abortLsn, abortKnownDeleted);
+
+                        /*
+                         * Current entry is a deleted entry. Replace it with
+                         * LN.  Pass NULL_LSN for the oldLsn parameter of the
+                         * log() method because the old LN was counted obsolete
+                         * when it was deleted.
+                         */
+                        long newLsn = newLN.optionalLog
+                            (env, database, key, DbLsn.NULL_LSN,
+                             cursor.getLocker(), repContext);
+
+                        /*
+                         * When reusing a slot, the key is replaced in the DBIN
+                         * slot.  This ensures that the correct key value is
+                         * used when the new key is non-identical to the key in
+                         * the slot but is considered equal by the duplicate
+                         * comparator.  [#15704]
+                         */
+                        dupBin.updateEntry(dupIndex, newLN, newLsn, newLNKey);
+                        dupBin.clearKnownDeleted(dupIndex);
+                        dupBin.clearPendingDeleted(dupIndex);
+
+                        traceInsertDuplicate(Level.FINER,
+                                             database.getDbEnvironment(),
+                                             dupBin, newLN, newLsn, binNid);
+                        successfulInsert = true;
+                    } else {
+                        /* Duplicate duplicate. */
+                        successfulInsert = false;
+                    }
+                }
+
+                /*
+                 * To avoid latching out of order (up the tree), release the
+                 * DBIN latch before latching the BIN and dup root.
+                 */
+                dupBin.releaseLatch();
+                dupBin = null;
+
+		if (successfulInsert) {
+                    cursor.latchBIN();
+                    dupRoot =
+                        cursor.getLatchedDupRoot(false /*isDBINLatched*/);
+                    cursor.releaseBIN();
+                    dupRoot.incrementDuplicateCount
+                        (dclLockResult, key, cursor.getLocker(),
+                         true /*increment*/);
+		}
+            } finally {
+                if (dupBin != null) {
+                    dupBin.releaseLatch();
+                }
+		
+                if (dupRoot != null) {
+                    dupRoot.releaseLatch();
+                }
+            }
+        } else if (n instanceof LN) {
+
+            /*
+             * There is no duplicate tree yet.  The existing LN is guaranteed
+             * to be non-deleted, so to insert we must create a dup tree.
+             */
+            if (!allowDuplicates) {
+                return false;
+            }
+
+            /*
+             * Mutate the current BIN/LN pair into a BIN/DupCountLN/DIN/DBIN/LN
+             * duplicate tree.  Log the new entries.
+             */
+            try {
+		lnLock.setAbortLsn(DbLsn.NULL_LSN, true, true);
+                dupRoot = createDuplicateTree
+                    (key, logManager, inMemoryINs, newLN, cursor, repContext);
+            } finally {
+                if (dupRoot != null) {
+                    dupRoot.releaseLatch();
+                    successfulInsert = true;
+                } else {
+                    successfulInsert = false;
+                }
+            }
+        } else {
+            throw new InconsistentNodeException
+                ("neither LN or DIN found in BIN");
+        }
+
+        return successfulInsert;
+    }
+
+    /**
+     * Check if the duplicate root needs to be split.  The current duplicate
+     * root is latched.  Exit with the new root (even if it's unchanged)
+     * latched and the old root (unless the root is unchanged) unlatched.
+     *
+     * @param bin the BIN containing the duplicate root.
+     * @param index the index of the duplicate root in bin.
+     * @return true if the duplicate root was split.
+     */
+    private boolean maybeSplitDuplicateRoot(BIN bin,
+                                            int index,
+                                            CacheMode cacheMode)
+        throws DatabaseException {
+
+        DIN curRoot = (DIN) bin.fetchTarget(index);
+
+        if (curRoot.needsSplitting()) {
+
+            EnvironmentImpl env = database.getDbEnvironment();
+            LogManager logManager = env.getLogManager();
+            INList inMemoryINs = env.getInMemoryINs();
+
+            /*
+             * Make a new root DIN, giving it an id key from the previous root.
+             */
+            byte[] rootIdKey = curRoot.getKey(0);
+            DIN newRoot = new DIN(database,
+                                  rootIdKey,
+                                  maxDupTreeEntriesPerNode,
+                                  curRoot.getDupKey(),
+                                  curRoot.getDupCountLNRef(),
+                                  curRoot.getLevel() + 1);
+
+            newRoot.latch(cacheMode);
+            long curRootLsn = 0;
+            long logLsn = 0;
+            try {
+                newRoot.setIsRoot(true);
+                curRoot.setDupCountLN(null);
+                curRoot.setIsRoot(false);
+
+                /*
+                 * Make the new root DIN point to the old root DIN, and then
+                 * log. We should be able to insert into the root because the
+                 * root is newly created.
+                 */
+                try {
+                    curRootLsn =
+                        curRoot.optionalLogProvisional(logManager, newRoot);
+                    boolean insertOk = newRoot.insertEntry
+                        (new ChildReference(curRoot, rootIdKey,
+                                            bin.getLsn(index)));
+                    assert insertOk;
+
+                    logLsn = newRoot.optionalLog(logManager);
+                } catch (DatabaseException e) {
+
+                    /* Something went wrong when we tried to log. */
+                    curRoot.setIsRoot(true);
+                    throw e;
+                }
+
+                inMemoryINs.add(newRoot);
+                bin.updateNode(index, newRoot, logLsn, null /*lnSlotKey*/);
+                curRoot.split(newRoot, 0, maxDupTreeEntriesPerNode, cacheMode);
+            } finally {
+                curRoot.releaseLatch();
+            }
+            traceSplitRoot(Level.FINE, TRACE_DUP_ROOT_SPLIT,
+			   newRoot, logLsn, curRoot, curRootLsn);
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    /**
+     * Convert an existing BIN entry from a single (non-duplicate) LN to a new
+     * DIN/DupCountLN->DBIN->LN subtree.
+     *
+     * @param key the key of the entry which will become the duplicate key
+     * for the duplicate subtree.
+     * @param logManager the logManager
+     * @param inMemoryINs the in memory IN list
+     * @param newLN the new record to be inserted
+     * @param cursor points to the target position for this new dup tree.
+     * @return the new duplicate subtree root (a DIN).  It is latched
+     * when it is returned and the caller should unlatch it.  If new entry
+     * to be inserted is a duplicate of the existing LN, null is returned.
+     */
+    private DIN createDuplicateTree(byte[] key,
+                                    LogManager logManager,
+                                    INList inMemoryINs,
+                                    LN newLN,
+                                    CursorImpl cursor,
+                                    ReplicationContext repContext)
+        throws DatabaseException {
+
+        EnvironmentImpl env = database.getDbEnvironment();
+        DIN dupRoot = null;
+        DBIN dupBin = null;
+	boolean dupBinIsLatched = false;
+        BIN bin = cursor.getBIN();
+        int index = cursor.getIndex();
+
+        /*
+         * fetchTarget returned an LN before this method was called, and we're
+         * still latched, so the target should never be null here.
+         */
+        LN existingLN = (LN) bin.fetchTarget(index);
+ 	boolean existingLNIsDeleted = bin.isEntryKnownDeleted(index) ||
+ 	    existingLN.isDeleted();
+        assert existingLN != null;
+
+        byte[] existingKey = existingLN.getData();
+        byte[] newLNKey = newLN.getData();
+
+        /* Check for duplicate duplicates. */
+        boolean keysEqual = Key.compareKeys
+            (newLNKey, existingKey, database.getDuplicateComparator()) == 0;
+        if (keysEqual) {
+            return null;
+        }
+
+        /*
+         * Replace the existing LN with a duplicate tree.
+         *
+         * Once we create a dup tree, we don't revert back to the LN.  Create
+         * a DupCountLN to hold the count for this dup tree. Since we don't
+         * roll back the internal nodes of a duplicate tree, we need to create
+         * a pre-transaction version of the DupCountLN. This version must hold
+         * a count of either 0 or 1, depending on whether the current
+         * transaction created the exising lN or not. If the former, the count
+         * must roll back to 0, if the latter, the count must roll back to 1.
+         *
+         * Note that we are logging a sequence of nodes and must make sure the
+         * log can be correctly recovered even if the entire sequence doesn't
+         * make it to the log. We need to make all children provisional to the
+         * DIN. This works:
+         *
+         * Entry 1: (provisional) DupCountLN (first version)
+         * Entry 2: (provisional) DupBIN
+         * Entry 3: DIN
+         * Entry 4: DupCountLN (second version, incorporating the new count.
+         *           This can't be provisional because we need to possibly
+         *            roll it back.)
+         * Entry 5: new LN.
+         * See [SR #10203] for a description of the bug that existed before
+         * this change.
+         */
+
+        /* Create the first version of DupCountLN and log it. (Entry 1). */
+        Locker locker = cursor.getLocker();
+ 	long nodeId = existingLN.getNodeId();
+
+ 	/*
+ 	 * If the existing entry is known to be deleted or was created by this
+ 	 * transaction, then the DCL should get rolled back to 0, not 1.
+ 	 * [13726].
+ 	 */
+ 	int startingCount =
+ 	    (locker.createdNode(nodeId) ||
+ 	     existingLNIsDeleted ||
+ 	     (locker.getWriteLockInfo(nodeId).getAbortKnownDeleted()) ? 0 : 1);
+
+        DupCountLN dupCountLN = new DupCountLN(database.getDbEnvironment(),
+                                               startingCount);
+        long firstDupCountLNLsn = dupCountLN.optionalLogProvisional
+            (env, database, key, DbLsn.NULL_LSN,
+             ReplicationContext.NO_REPLICATE);
+
+        /* Make the duplicate root and DBIN. */
+        dupRoot = new DIN(database,
+                          existingKey,                   // idkey
+                          maxDupTreeEntriesPerNode,
+                          key,                           // dup key
+                          new ChildReference
+                          (dupCountLN, key, firstDupCountLNLsn),
+                          2);                            // level
+        CacheMode cacheMode = cursor.getCacheMode();
+        dupRoot.latch(cacheMode);
+        dupRoot.setIsRoot(true);
+
+        dupBin = new DBIN(database,
+                          existingKey,                   // idkey
+                          maxDupTreeEntriesPerNode,
+                          key,                           // dup key
+                          1);                            // level
+        dupBin.latch(cacheMode);
+	dupBinIsLatched = true;
+
+        /*
+         * Attach the existing LN child to the duplicate BIN. Since this is a
+         * newly created BIN, insertEntry will be successful.
+         */
+        ChildReference newExistingLNRef = new ChildReference
+            (existingLN, existingKey, bin.getLsn(index), bin.getState(index));
+
+        boolean insertOk = dupBin.insertEntry(newExistingLNRef);
+        assert insertOk;
+
+        try {
+
+            /* Entry 2: DBIN. */
+            long dbinLsn = dupBin.optionalLogProvisional(logManager, dupRoot);
+            inMemoryINs.add(dupBin);
+
+            /* Attach the duplicate BIN to the duplicate IN root. */
+            dupRoot.setEntry(0, dupBin, dupBin.getKey(0),
+                             dbinLsn, dupBin.getState(0));
+
+            /* Entry 3:  DIN */
+            long dinLsn = dupRoot.optionalLog(logManager);
+            inMemoryINs.add(dupRoot);
+
+            /*
+             * Now that the DIN is logged, we've created a duplicate tree that
+             * holds the single, preexisting LN. We can safely create the non
+             * provisional LNs that pertain to this insert -- the new LN and
+             * the new DupCountLN.
+             *
+             * We request a lock while holding latches which is usually
+             * forbidden, but safe in this case since we know it will be
+             * immediately granted (we just created dupCountLN above).
+             */
+            LockResult lockResult = locker.lock
+                (dupCountLN.getNodeId(), LockType.WRITE, false /*noWait*/,
+                 database);
+            lockResult.setAbortLsn(firstDupCountLNLsn, false);
+
+            dupCountLN.setDupCount(2);
+            long dupCountLsn = dupCountLN.optionalLog
+                (env, database, key, firstDupCountLNLsn, locker,
+                 ReplicationContext.NO_REPLICATE);
+            dupRoot.updateDupCountLNRef(dupCountLsn);
+
+            /* Add the newly created LN. */
+            long newLsn = newLN.optionalLog
+                (env, database, key, DbLsn.NULL_LSN, locker, repContext);
+            int dupIndex = dupBin.insertEntry1
+                (new ChildReference(newLN, newLNKey, newLsn));
+            dupIndex &= ~IN.INSERT_SUCCESS;
+            cursor.updateDBin(dupBin, dupIndex);
+
+            /*
+             * Adjust any cursors positioned on the mutated BIN entry to point
+             * to the DBIN at the location of the entry we moved there.  The
+             * index of the moved entry is 1 or 0, the XOR of the index of the
+             * new entry.
+             */
+            bin.adjustCursorsForMutation(index, dupBin, dupIndex ^ 1, cursor);
+            dupBin.releaseLatch();
+	    dupBinIsLatched = false;
+
+            /*
+             * Update the "regular" BIN to point to the new duplicate tree
+             * instead of the existing LN.  Clear the MIGRATE flag since it
+             * applies only to the original LN.
+             */
+            bin.updateNode(index, dupRoot, dinLsn, null /*lnSlotKey*/);
+            bin.setMigrate(index, false);
+
+            traceMutate(Level.FINE, bin, existingLN, newLN, newLsn,
+                        dupCountLN, dupCountLsn, dupRoot, dinLsn,
+                        dupBin, dbinLsn);
+        } catch (DatabaseException e) {
+
+            /*
+             * Strictly speaking, it's not necessary to release latches,
+             * because if we fail to log the entries, we just throw them away,
+             * but our unit tests check for 0 latches held in the event of a
+             * logging error.
+             */
+	    if (dupBinIsLatched) {
+		dupBin.releaseLatch();
+	    }
+            dupRoot.releaseLatch();
+            throw e;
+        }
+        return dupRoot;
+    }
+
+    /**
+     * Validate args passed to insert.  Presently this just means making sure
+     * that if they say duplicates are allowed that the database supports
+     * duplicates.
+     */
+    private void validateInsertArgs(boolean allowDuplicates)
+        throws DatabaseException {
+
+        if (allowDuplicates && !database.getSortedDuplicates()) {
+            throw new DatabaseException
+                ("allowDuplicates passed to insert but database doesn't " +
+                 "have allow duplicates set.");
+        }
+    }
+
+    /**
+     * Find the BIN that is relevant to the insert.  If the tree doesn't exist
+     * yet, then create the first IN and BIN.
+     * @return the BIN that was found or created and return it latched.
+     */
+    private BIN findBinForInsert(byte[] key,
+                                 LogManager logManager,
+                                 INList inMemoryINs,
+                                 CursorImpl cursor)
+        throws DatabaseException {
+
+	BIN bin;
+
+        /* First try using the BIN at the cursor position to avoid a search. */
+        bin = cursor.latchBIN();
+        if (bin != null) {
+            if (!bin.needsSplitting() && bin.isKeyInBounds(key)) {
+                return bin;
+            } else {
+                bin.releaseLatch();
+            }
+        }
+
+	boolean rootLatchIsHeld = false;
+        try {
+	    long logLsn;
+
+	    /*
+	     * We may have to try several times because of a small
+	     * timing window, explained below.
+	     */
+	    while (true) {
+		rootLatchIsHeld = true;
+		rootLatch.acquireShared();
+		if (!rootExists()) {
+		    rootLatch.release();
+		    rootLatch.acquireExclusive();
+		    if (rootExists()) {
+			rootLatch.release();
+			rootLatchIsHeld = false;
+			continue;
+		    }
+
+                    CacheMode cacheMode = cursor.getCacheMode();
+
+		    /*
+		     * This is an empty tree, either because it's brand new
+		     * tree or because everything in it was deleted. Create an
+		     * IN and a BIN.  We could latch the rootIN here, but
+		     * there's no reason to since we're just creating the
+		     * initial tree and we have the rootLatch held. Log the
+		     * nodes as soon as they're created, but remember that
+		     * referred-to children must come before any references to
+		     * their LSNs.
+		     */
+                    /* First BIN in the tree, log provisionally right away. */
+                    bin = new BIN(database, key, maxMainTreeEntriesPerNode, 1);
+                    bin.latch(cacheMode);
+                    logLsn = bin.optionalLogProvisional(logManager, null);
+
+		    /*
+                     * Log the root right away. Leave the root dirty, because
+                     * the MapLN is not being updated, and we want to avoid
+                     * this scenario from [#13897], where the LN has no
+                     * possible parent.
+                     *  provisional BIN
+                     *  root IN
+                     *  checkpoint start
+                     *  LN is logged
+                     *  checkpoint end
+                     *  BIN is dirtied, but is not part of checkpoint
+                     */
+
+		    IN rootIN =
+			new IN(database, key, maxMainTreeEntriesPerNode, 2);
+
+		    /*
+		     * OK to latch the root after a child BIN because it's
+		     * during creation.
+		     */
+		    rootIN.latch(cacheMode);
+		    rootIN.setIsRoot(true);
+
+		    boolean insertOk = rootIN.insertEntry
+			(new ChildReference(bin, key, logLsn));
+		    assert insertOk;
+
+                    logLsn = rootIN.optionalLog(logManager);
+                    rootIN.setDirty(true);  /*force re-logging, see [#13897]*/
+
+                    root = makeRootChildReference(rootIN,
+                                                  new byte[0],
+                                                  logLsn);
+
+		    rootIN.releaseLatch();
+
+		    /* Add the new nodes to the in memory list. */
+		    inMemoryINs.add(bin);
+		    inMemoryINs.add(rootIN);
+		    rootLatch.release();
+		    rootLatchIsHeld = false;
+
+		    break;
+		} else {
+		    rootLatch.release();
+		    rootLatchIsHeld = false;
+
+		    /*
+		     * There's a tree here, so search for where we should
+		     * insert. However, note that a window exists after we
+		     * release the root latch. We release the latch because the
+		     * search method expects to take the latch. After the
+		     * release and before search, the INCompressor may come in
+		     * and delete the entire tree, so search may return with a
+		     * null.
+		     */
+		    IN in =
+                        searchSplitsAllowed(key, -1, cursor.getCacheMode());
+		    if (in == null) {
+			/* The tree was deleted by the INCompressor. */
+			continue;
+		    } else {
+			/* search() found a BIN where this key belongs. */
+			bin = (BIN) in;
+			break;
+		    }
+		}
+	    }
+        } finally {
+	    if (rootLatchIsHeld) {
+		rootLatch.release();
+	    }
+        }
+
+        /* testing hook to insert item into log. */
+        assert TestHookExecute.doHookIfSet(ckptHook);
+
+        return bin;
+    }
+
+    /*
+     * Given a subtree root (an IN), remove it and all of its children from the
+     * in memory IN list. Also count removed nodes as obsolete and gather the
+     * set of file summaries that should be logged. The localTracker will be
+     * flushed to the log later.
+     */
+    private void accountForSubtreeRemoval(INList inList,
+                                          IN subtreeRoot,
+                                          LocalUtilizationTracker localTracker)
+        throws DatabaseException {
+
+        subtreeRoot.accountForSubtreeRemoval(inList, localTracker);
+
+        Tracer.trace(Level.FINE, database.getDbEnvironment(),
+		     "SubtreeRemoval: subtreeRoot = " +
+		     subtreeRoot.getNodeId());
+    }
+
+    /*
+     * Logging support
+     */
+
+    /**
+     * @see Loggable#getLogSize
+     */
+    public int getLogSize() {
+        int size = 1;                          // rootExists
+        if (root != null) {
+            size += root.getLogSize();
+        }
+        return size;
+    }
+
+    /**
+     * @see Loggable#writeToLog
+     */
+    public void writeToLog(ByteBuffer logBuffer) {
+        byte booleans = (byte) ((root != null) ? 1 : 0);
+        logBuffer.put(booleans);
+        if (root != null) {
+            root.writeToLog(logBuffer);
+        }
+    }
+
+    /**
+     * @see Loggable#readFromLog
+     */
+    public void readFromLog(ByteBuffer itemBuffer, byte entryVersion) {
+        boolean rootExists = false;
+        byte booleans = itemBuffer.get();
+        rootExists = (booleans & 1) != 0;
+        if (rootExists) {
+            root = makeRootChildReference();
+            root.readFromLog(itemBuffer, entryVersion);
+        }
+    }
+
+    /**
+     * @see Loggable#dumpLog
+     */
+    public void dumpLog(StringBuffer sb, boolean verbose) {
+        sb.append("<root>");
+        if (root != null) {
+            root.dumpLog(sb, verbose);
+        }
+        sb.append("</root>");
+    }
+
+    /**
+     * @see Loggable#getTransactionId
+     */
+    public long getTransactionId() {
+	return 0;
+    }
+
+    /**
+     * @see Loggable#logicalEquals
+     * Always return false, this item should never be compared.
+     */
+    public boolean logicalEquals(Loggable other) {
+        return false;
+    }
+
+    /**
+     * rebuildINList is used by recovery to add all the resident nodes to the
+     * IN list.
+     */
+    public void rebuildINList()
+        throws DatabaseException {
+
+        INList inMemoryList = database.getDbEnvironment().getInMemoryINs();
+
+        if (root != null) {
+            rootLatch.acquireShared();
+            try {
+                Node rootIN = root.getTarget();
+                if (rootIN != null) {
+                    rootIN.rebuildINList(inMemoryList);
+                }
+            } finally {
+                rootLatch.release();
+            }
+        }
+    }
+
+    /*
+     * Debugging stuff.
+     */
+    public void dump()
+        throws DatabaseException {
+
+        System.out.println(dumpString(0));
+    }
+
+    public String dumpString(int nSpaces)
+        throws DatabaseException {
+
+        StringBuffer sb = new StringBuffer();
+        sb.append(TreeUtils.indent(nSpaces));
+        sb.append("<tree>");
+        sb.append('\n');
+        if (root != null) {
+            sb.append(DbLsn.dumpString(root.getLsn(), nSpaces));
+            sb.append('\n');
+            IN rootIN = (IN) root.getTarget();
+            if (rootIN == null) {
+                sb.append("<in/>");
+            } else {
+                sb.append(rootIN.toString());
+            }
+            sb.append('\n');
+        }
+        sb.append(TreeUtils.indent(nSpaces));
+        sb.append("</tree>");
+        return sb.toString();
+    }
+
+    /**
+     * Unit test support to validate subtree pruning. Didn't want to make root
+     * access public.
+     */
+    boolean validateDelete(int index)
+        throws DatabaseException {
+
+        rootLatch.acquireShared();
+        try {
+            IN rootIN = (IN) root.fetchTarget(database, null);
+            return rootIN.validateSubtreeBeforeDelete(index);
+        } finally {
+            rootLatch.release();
+        }
+    }
+
+    /**
+     * Debugging check that all resident nodes are on the INList and no stray
+     * nodes are present in the unused portion of the IN arrays.
+     */
+    public void validateINList(IN parent)
+        throws DatabaseException {
+
+        if (parent == null) {
+            parent = (IN) root.getTarget();
+        }
+        if (parent != null) {
+            INList inList = database.getDbEnvironment().getInMemoryINs();
+            if (!inList.contains(parent)) {
+                throw new DatabaseException
+                    ("IN " + parent.getNodeId() + " missing from INList");
+            }
+            for (int i = 0;; i += 1) {
+                try {
+                    Node node = parent.getTarget(i);
+                    if (i >= parent.getNEntries()) {
+                        if (node != null) {
+                            throw new DatabaseException
+                                ("IN " + parent.getNodeId() +
+                                 " has stray node " + node.getNodeId() +
+                                 " at index " + i);
+                        }
+                        byte[] key = parent.getKey(i);
+                        if (key != null) {
+                            throw new DatabaseException
+                                ("IN " + parent.getNodeId() +
+                                 " has stray key " + key +
+                                 " at index " + i);
+                        }
+                    }
+                    if (node instanceof IN) {
+                        validateINList((IN) node);
+                    }
+                } catch (ArrayIndexOutOfBoundsException e) {
+                    break;
+                }
+            }
+        }
+    }
+
+    /* For unit testing only. */
+    public void setWaitHook(TestHook hook) {
+        waitHook = hook;
+    }
+
+    /* For unit testing only. */
+    public void setSearchHook(TestHook hook) {
+        searchHook = hook;
+    }
+
+    /* For unit testing only. */
+    public void setCkptHook(TestHook hook) {
+        ckptHook = hook;
+    }
+
+    /**
+     * Send trace messages to the java.util.logger. Don't rely on the logger
+     * alone to conditionalize whether we send this message, we don't even want
+     * to construct the message if the level is not enabled.
+     */
+    private void traceSplitRoot(Level level,
+                                String splitType,
+                                IN newRoot,
+                                long newRootLsn,
+                                IN oldRoot,
+                                long oldRootLsn) {
+        Logger logger = database.getDbEnvironment().getLogger();
+        if (logger.isLoggable(level)) {
+            StringBuffer sb = new StringBuffer();
+            sb.append(splitType);
+            sb.append(" newRoot=").append(newRoot.getNodeId());
+            sb.append(" newRootLsn=").
+		append(DbLsn.getNoFormatString(newRootLsn));
+            sb.append(" oldRoot=").append(oldRoot.getNodeId());
+            sb.append(" oldRootLsn=").
+		append(DbLsn.getNoFormatString(oldRootLsn));
+            logger.log(level, sb.toString());
+        }
+    }
+
+    /**
+     * Send trace messages to the java.util.logger. Don't rely on the logger
+     * alone to conditionalize whether we send this message, we don't even want
+     * to construct the message if the level is not enabled.
+     */
+    private void traceMutate(Level level,
+                             BIN theBin,
+                             LN existingLn,
+                             LN newLn,
+                             long newLsn,
+                             DupCountLN dupCountLN,
+                             long dupRootLsn,
+                             DIN dupRoot,
+                             long ddinLsn,
+                             DBIN dupBin,
+                             long dbinLsn) {
+        Logger logger = database.getDbEnvironment().getLogger();
+        if (logger.isLoggable(level)) {
+            StringBuffer sb = new StringBuffer();
+            sb.append(TRACE_MUTATE);
+            sb.append(" existingLn=");
+            sb.append(existingLn.getNodeId());
+            sb.append(" newLn=");
+            sb.append(newLn.getNodeId());
+            sb.append(" newLnLsn=");
+            sb.append(DbLsn.getNoFormatString(newLsn));
+            sb.append(" dupCountLN=");
+            sb.append(dupCountLN.getNodeId());
+            sb.append(" dupRootLsn=");
+            sb.append(DbLsn.getNoFormatString(dupRootLsn));
+            sb.append(" rootdin=");
+            sb.append(dupRoot.getNodeId());
+            sb.append(" ddinLsn=");
+            sb.append(DbLsn.getNoFormatString(ddinLsn));
+            sb.append(" dbin=");
+            sb.append(dupBin.getNodeId());
+            sb.append(" dbinLsn=");
+            sb.append(DbLsn.getNoFormatString(dbinLsn));
+            sb.append(" bin=");
+            sb.append(theBin.getNodeId());
+	
+            logger.log(level, sb.toString());
+        }
+    }
+
+    /**
+     * Send trace messages to the java.util.logger. Don't rely on the logger
+     * alone to conditionalize whether we send this message, we don't even want
+     * to construct the message if the level is not enabled.
+     */
+    private void traceInsert(Level level,
+                             EnvironmentImpl env,
+                             BIN insertingBin,
+                             LN ln,
+                             long lnLsn,
+			     int index) {
+        Logger logger = env.getLogger();
+        if (logger.isLoggable(level)) {
+            StringBuffer sb = new StringBuffer();
+            sb.append(TRACE_INSERT);
+            sb.append(" bin=");
+            sb.append(insertingBin.getNodeId());
+            sb.append(" ln=");
+            sb.append(ln.getNodeId());
+            sb.append(" lnLsn=");
+            sb.append(DbLsn.getNoFormatString(lnLsn));
+            sb.append(" index=");
+	    sb.append(index);
+	
+            logger.log(level, sb.toString());
+        }
+    }
+
+    /**
+     * Send trace messages to the java.util.logger. Don't rely on the logger
+     * alone to conditionalize whether we send this message, we don't even want
+     * to construct the message if the level is not enabled.
+     */
+    private void traceInsertDuplicate(Level level,
+                                      EnvironmentImpl env,
+                                      BIN insertingDBin,
+                                      LN ln,
+                                      long lnLsn,
+                                      long binNid) {
+        Logger logger = env.getLogger();
+        if (logger.isLoggable(level)) {
+            StringBuffer sb = new StringBuffer();
+            sb.append(TRACE_INSERT_DUPLICATE);
+            sb.append(" dbin=");
+            sb.append(insertingDBin.getNodeId());
+            sb.append(" bin=");
+            sb.append(binNid);
+            sb.append(" ln=");
+            sb.append(ln.getNodeId());
+            sb.append(" lnLsn=");
+            sb.append(DbLsn.getNoFormatString(lnLsn));
+	
+            logger.log(level, sb.toString());
+        }
+    }
+
+    private static class SplitInfo {
+        IN parent;
+        IN child;
+        int index;
+
+        SplitInfo(IN parent, IN child, int index) {
+            this.parent = parent;
+            this.child = child;
+            this.index = index;
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/tree/TreeLocation.java b/src/com/sleepycat/je/tree/TreeLocation.java
new file mode 100644
index 0000000000000000000000000000000000000000..767002777e1f274efe83bfe74d440772e52b8d01
--- /dev/null
+++ b/src/com/sleepycat/je/tree/TreeLocation.java
@@ -0,0 +1,48 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TreeLocation.java,v 1.17.2.2 2010/01/04 15:30:36 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import com.sleepycat.je.utilint.DbLsn;
+
+/*
+ * TreeLocation is a cursor like object that keeps track of a location
+ * in a tree. It's used during recovery.
+ */
+public class TreeLocation {
+    public BIN bin;         // parent BIN for the target LN
+    public int index;       // index of where the LN is or should go
+    public byte[] lnKey;    // the key that represents this LN in this BIN
+    public long childLsn = DbLsn.NULL_LSN; // current LSN value in that slot.
+
+    public void reset() {
+        bin = null;
+        index = -1;
+        lnKey = null;
+        childLsn = DbLsn.NULL_LSN;
+    }
+
+    @Override
+    public String toString() {
+	StringBuffer sb = new StringBuffer("<TreeLocation bin=\"");
+	if (bin == null) {
+	    sb.append("null");
+	} else {
+	    sb.append(bin.getNodeId());
+	}
+	sb.append("\" index=\"");
+	sb.append(index);
+	sb.append("\" lnKey=\"");
+	sb.append(Key.dumpString(lnKey,0));
+	sb.append("\" childLsn=\"");
+	sb.append(DbLsn.toString(childLsn));
+	sb.append("\">");
+	return sb.toString();
+    }
+}
+
diff --git a/src/com/sleepycat/je/tree/TreeStats.java b/src/com/sleepycat/je/tree/TreeStats.java
new file mode 100644
index 0000000000000000000000000000000000000000..5f9d9cb3b9d368393bf22f5d63e3c968d7efe4a0
--- /dev/null
+++ b/src/com/sleepycat/je/tree/TreeStats.java
@@ -0,0 +1,20 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TreeStats.java,v 1.14.2.2 2010/01/04 15:30:36 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+/**
+ * A class that provides interesting stats about a particular tree.
+ */
+public final class TreeStats {
+
+    /**
+     * Number of times the root was split.
+     */
+    public int nRootSplits = 0;
+}
diff --git a/src/com/sleepycat/je/tree/TreeUtils.java b/src/com/sleepycat/je/tree/TreeUtils.java
new file mode 100644
index 0000000000000000000000000000000000000000..fe415f08db18a6ccee804dfcfaa70a44ffaaceb0
--- /dev/null
+++ b/src/com/sleepycat/je/tree/TreeUtils.java
@@ -0,0 +1,28 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TreeUtils.java,v 1.27.2.2 2010/01/04 15:30:36 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+/**
+ * Miscellaneous Tree utilities.
+ */
+public class TreeUtils {
+
+    static private final String SPACES =
+	"                                " +
+	"                                " +
+	"                                " +
+	"                                ";
+
+    /**
+     * For tree dumper.
+     */
+    public static String indent(int nSpaces) {
+	return SPACES.substring(0, nSpaces);
+    }
+}
diff --git a/src/com/sleepycat/je/tree/TreeWalkerStatsAccumulator.java b/src/com/sleepycat/je/tree/TreeWalkerStatsAccumulator.java
new file mode 100644
index 0000000000000000000000000000000000000000..f1531cc8f006bc9ed6a6486f8f55b84c218c2679
--- /dev/null
+++ b/src/com/sleepycat/je/tree/TreeWalkerStatsAccumulator.java
@@ -0,0 +1,28 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TreeWalkerStatsAccumulator.java,v 1.7.2.2 2010/01/04 15:30:36 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+/**
+ * Accumulates stats about a tree during tree walking.
+ */
+public interface TreeWalkerStatsAccumulator {
+    public void processIN(IN node, Long nid, int level);
+
+    public void processBIN(BIN node, Long nid, int level);
+
+    public void processDIN(DIN node, Long nid, int level);
+
+    public void processDBIN(DBIN node, Long nid, int level);
+
+    public void processDupCountLN(DupCountLN node, Long nid);
+
+    public void incrementLNCount();
+
+    public void incrementDeletedLNCount();
+}
diff --git a/src/com/sleepycat/je/tree/WithRootLatched.java b/src/com/sleepycat/je/tree/WithRootLatched.java
new file mode 100644
index 0000000000000000000000000000000000000000..08ad9590dcf0f5b0030ccaa322c5642348acd236
--- /dev/null
+++ b/src/com/sleepycat/je/tree/WithRootLatched.java
@@ -0,0 +1,20 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: WithRootLatched.java,v 1.15.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import com.sleepycat.je.DatabaseException;
+
+public interface WithRootLatched {
+
+    /**
+     * doWork is called while the tree's root latch is held.
+     */
+    public IN doWork(ChildReference root)
+	throws DatabaseException;
+}
diff --git a/src/com/sleepycat/je/txn/BasicLocker.java b/src/com/sleepycat/je/txn/BasicLocker.java
new file mode 100644
index 0000000000000000000000000000000000000000..c8f96c5b8ddb4373185ac1b5f2a631cc9e2d1644
--- /dev/null
+++ b/src/com/sleepycat/je/txn/BasicLocker.java
@@ -0,0 +1,444 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: BasicLocker.java,v 1.102.2.3 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Set;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.LockStats;
+import com.sleepycat.je.dbi.CursorImpl;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * A non-transactional Locker that simply tracks locks and releases them when
+ * releaseNonTxnLocks or operationEnd is called.
+ */
+public class BasicLocker extends Locker {
+
+    /*
+     * A BasicLocker can release all locks, so there is no need to distinguish
+     * between read and write locks.
+     *
+     * ownedLock is used for the first lock obtained, and ownedLockSet is
+     * instantiated and used only if more than one lock is obtained.  This is
+     * an optimization for the common case where only one lock is held by a
+     * non-transactional locker.
+     *
+     * There's no need to track memory utilization for these non-txnal lockers,
+     * because the lockers are short lived.
+     */
+    private Long ownedLock;
+    private Set<Long> ownedLockSet;
+
+    /**
+     * Creates a BasicLocker.
+     */
+    protected BasicLocker(EnvironmentImpl env)
+        throws DatabaseException {
+
+        super(env,
+              false, // readUncommittedDefault
+              false, // noWait
+              0);    // mandatedId
+    }
+
+    public static BasicLocker createBasicLocker(EnvironmentImpl env)
+        throws DatabaseException {
+
+	BasicLocker ret = null;
+	try {
+	    ret = new BasicLocker(env);
+	    ret.initApiReadLock();
+	} catch (DatabaseException DE) {
+	    ret.operationEnd(false);
+	    throw DE;
+	}
+	return ret;
+    }
+
+    /**
+     * Creates a BasicLocker with a noWait argument.
+     */
+    protected BasicLocker(EnvironmentImpl env, boolean noWait)
+        throws DatabaseException {
+
+        super(env,
+              false, // readUncommittedDefault
+              noWait,
+              0);    // mandatedId
+    }
+
+    public static BasicLocker createBasicLocker(EnvironmentImpl env,
+						boolean noWait)
+        throws DatabaseException {
+
+	BasicLocker ret = null;
+	try {
+	    ret = new BasicLocker(env, noWait);
+	    ret.initApiReadLock();
+	} catch (DatabaseException DE) {
+	    ret.operationEnd(false);
+	    throw DE;
+	}
+	return ret;
+    }
+
+    /**
+     * Creates a BasicLocker with the noWait and noAPIReadLock arguments.
+     */
+    protected BasicLocker(EnvironmentImpl env,
+			  boolean noWait,
+			  boolean noAPIReadLock)
+        throws DatabaseException {
+
+        super(env,
+              false, // readUncommittedDefault
+              noWait,
+	      noAPIReadLock,
+              0);    // mandatedId
+    }
+
+    public static BasicLocker createBasicLocker(EnvironmentImpl env,
+						boolean noWait,
+						boolean noAPIReadLock)
+        throws DatabaseException {
+
+	BasicLocker ret = null;
+	try {
+	    ret = new BasicLocker(env, noWait, noAPIReadLock);
+	    ret.initApiReadLock();
+	} catch (DatabaseException DE) {
+	    ret.operationEnd(false);
+	    throw DE;
+	}
+	return ret;
+    }
+
+    /**
+     * BasicLockers always have a fixed id, because they are never used for
+     * recovery.
+     */
+    protected long generateId(TxnManager txnManager,
+                              long ignore /* mandatedId */) {
+        return TxnManager.NULL_TXN_ID;
+    }
+
+    protected void checkState(boolean ignoreCalledByAbort)
+        throws DatabaseException {
+        /* Do nothing. */
+    }
+
+    /**
+     * @see Locker#lockInternal
+     * @Override
+     */
+    LockResult lockInternal(long nodeId,
+                            LockType lockType,
+                            boolean noWait,
+                            DatabaseImpl database)
+        throws DatabaseException {
+
+	/* Does nothing in BasicLocker. synchronized is for posterity. */
+	synchronized (this) {
+	    checkState(false);
+	}
+
+	long timeout = 0;
+        boolean useNoWait = noWait || defaultNoWait;
+        if (!useNoWait) {
+            synchronized (this) {
+                timeout = getLockTimeout();
+            }
+        }
+
+        /* Ask for the lock. */
+        LockGrantType grant = lockManager.lock
+            (nodeId, this, lockType, timeout, useNoWait, database);
+
+        return new LockResult(grant, null);
+    }
+
+    /**
+     * Get the txn that owns the lock on this node. Return null if there's no
+     * owning txn found.
+     */
+    public Locker getWriteOwnerLocker(long nodeId)
+        throws DatabaseException {
+
+        return lockManager.getWriteOwnerLocker(Long.valueOf(nodeId));
+    }
+
+    /**
+     * Is never transactional.
+     */
+    public boolean isTransactional() {
+        return false;
+    }
+
+    /**
+     * Is never serializable isolation.
+     */
+    public boolean isSerializableIsolation() {
+        return false;
+    }
+
+    /**
+     * Is never read-committed isolation.
+     */
+    public boolean isReadCommittedIsolation() {
+        return false;
+    }
+
+    /**
+     * No transactional locker is available.
+     */
+    public Txn getTxnLocker() {
+        return null;
+    }
+
+    /**
+     * Throws UnsupportedOperationException unconditionally.
+     *
+     * If we were to create a new BasicLocker here, it would not share locks
+     * with this locker, which violates the definition of this method.  This
+     * method is not currently called in direct uses of BasicLocker and is
+     * overridden by subclasses where it is allowed (e.g., ThreadLocker and
+     * ReadCommittedLocker).
+     */
+    public Locker newNonTxnLocker()
+        throws DatabaseException {
+
+        throw new UnsupportedOperationException();
+    }
+
+    /**
+     * Releases all locks, since all locks held by this locker are
+     * non-transactional.
+     */
+    public void releaseNonTxnLocks()
+        throws DatabaseException {
+
+        /*
+         * Don't remove locks from txn's lock collection until iteration is
+         * done, lest we get a ConcurrentModificationException during deadlock
+	 * graph "display".  [#9544]
+         */
+        if (ownedLock != null) {
+            lockManager.release(ownedLock, this);
+            ownedLock = null;
+        }
+        if (ownedLockSet != null) {
+            Iterator<Long> iter = ownedLockSet.iterator();
+            while (iter.hasNext()) {
+                Long nid = iter.next();
+                lockManager.release(nid, this);
+            }
+
+            /* Now clear lock collection. */
+            ownedLockSet.clear();
+        }
+
+        /* Unload delete info, but don't wake up the compressor. */
+        synchronized (this) {
+            if ((deleteInfo != null) &&
+		(deleteInfo.size() > 0)) {
+                envImpl.addToCompressorQueue(deleteInfo.values(),
+                                             false); // no wakeup
+                deleteInfo.clear();
+            }
+        }
+    }
+
+    /**
+     * Release locks and close the cursor at the end of the operation.
+     */
+    public void nonTxnOperationEnd()
+        throws DatabaseException {
+
+        operationEnd(true);
+    }
+
+    /**
+     * Release locks and close the cursor at the end of the operation.
+     */
+    public void operationEnd(boolean operationOK)
+        throws DatabaseException {
+
+        releaseNonTxnLocks();
+
+        /* Close this Locker. */
+        close();
+    }
+
+    /**
+     * Transfer any MapLN locks to the db handle.
+     */
+    public void setHandleLockOwner(boolean operationOK,
+                                   Database dbHandle,
+                                   boolean dbIsClosing)
+	throws DatabaseException {
+
+        if (dbHandle != null) {
+            if (operationOK && !dbIsClosing) {
+                transferHandleLockToHandle(dbHandle);
+            } else {
+
+                /*
+                 * Release DB if there is a failure.  This is done by Txn abort
+                 * by calling Database.invalidate, but for a non-transactional
+                 * locker must be done here.  [#13415]
+                 */
+                envImpl.getDbTree().
+                    releaseDb(DbInternal.dbGetDatabaseImpl(dbHandle));
+            }
+            unregisterHandle(dbHandle);
+        }
+    }
+
+    /**
+     * This txn doesn't store cursors.
+     */
+    public void registerCursor(CursorImpl cursor)
+	throws DatabaseException {
+    }
+
+    /**
+     * This txn doesn't store cursors.
+     */
+    public void unRegisterCursor(CursorImpl cursor)
+	throws DatabaseException {
+    }
+
+    /*
+     * Transactional methods are all no-oped.
+     */
+
+    /**
+     * @return the abort LSN for this node.
+     */
+    public long getAbortLsn(long nodeId)
+        throws DatabaseException {
+
+        return DbLsn.NULL_LSN;
+    }
+
+    /**
+     * @return a dummy WriteLockInfo for this node.
+     */
+    public WriteLockInfo getWriteLockInfo(long nodeId)
+	throws DatabaseException {
+
+	return WriteLockInfo.basicWriteLockInfo;
+    }
+
+    public void markDeleteAtTxnEnd(DatabaseImpl db, boolean deleteAtCommit)
+        throws DatabaseException {
+
+        if (deleteAtCommit) {
+            /* releaseDb will be called by startAndFinishDelete. */
+            db.startAndFinishDelete();
+        } else {
+            envImpl.getDbTree().releaseDb(db);
+        }
+    }
+
+    /**
+     * Add a lock to set owned by this transaction.
+     */
+    protected void addLock(Long nodeId,
+                           LockType type,
+                           LockGrantType grantStatus)
+        throws DatabaseException {
+
+        if ((ownedLock != null &&
+	    ownedLock.equals(nodeId)) ||
+            (ownedLockSet != null &&
+	     ownedLockSet.contains(nodeId))) {
+            return; // Already owned
+        }
+        if (ownedLock == null) {
+            ownedLock = nodeId;
+        } else {
+            if (ownedLockSet == null) {
+                ownedLockSet = new HashSet<Long>();
+            }
+            ownedLockSet.add(nodeId);
+        }
+    }
+
+    /**
+     * Remove a lock from the set owned by this txn.
+     */
+    void removeLock(long nodeId)
+        throws DatabaseException {
+
+        if (ownedLock != null &&
+	    ownedLock == nodeId) {
+            ownedLock = null;
+        } else if (ownedLockSet != null) {
+            ownedLockSet.remove(nodeId);
+        }
+    }
+
+    /**
+     * Always false for this txn.
+     */
+    public boolean createdNode(long nodeId)
+        throws DatabaseException {
+
+        return false;
+    }
+
+    /**
+     * A lock is being demoted. Move it from the write collection into the read
+     * collection.
+     */
+    void moveWriteToReadLock(long nodeId, Lock lock) {
+    }
+
+    /**
+     * stats
+     */
+    public LockStats collectStats(LockStats stats)
+        throws DatabaseException {
+
+	if (ownedLock != null) {
+	    Lock l = lockManager.lookupLock(ownedLock);
+	    if (l != null) {
+		if (l.isOwnedWriteLock(this)) {
+		    stats.setNWriteLocks(stats.getNWriteLocks() + 1);
+		} else {
+		    stats.setNReadLocks(stats.getNReadLocks() + 1);
+		}
+	    }
+	}
+        if (ownedLockSet != null) {
+            Iterator<Long> iter = ownedLockSet.iterator();
+
+            while (iter.hasNext()) {
+                Long nid = iter.next();
+		Lock l = lockManager.lookupLock(nid);
+		if (l != null) {
+		    if (l.isOwnedWriteLock(this)) {
+			stats.setNWriteLocks(stats.getNWriteLocks() + 1);
+		    } else {
+			stats.setNReadLocks(stats.getNReadLocks() + 1);
+		    }
+		}
+            }
+        }
+        return stats;
+    }
+}
diff --git a/src/com/sleepycat/je/txn/BuddyLocker.java b/src/com/sleepycat/je/txn/BuddyLocker.java
new file mode 100644
index 0000000000000000000000000000000000000000..a80c2afc64308dc74dd0e80e5d2fa1cf87a51832
--- /dev/null
+++ b/src/com/sleepycat/je/txn/BuddyLocker.java
@@ -0,0 +1,147 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: BuddyLocker.java,v 1.12.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+
+/**
+ * Extends BasicLocker to share locks with another specific locker.
+ *
+ * <p>In general, a BuddyLocker can be used whenever the primary (API) locker
+ * is in use, and we need to lock a node and release that lock before the
+ * primary locker transaction ends.  In other words, for this particular lock
+ * we don't want to use two-phase locking.  To accomplish that we use a
+ * separate BuddyLocker instance to hold the lock, while sharing locks with the
+ * primary locker.  The BuddyLocker can be closed to release this particular
+ * lock, without releasing the other locks held by the primary locker.</p>
+ *
+ * <p>In particular, a BuddyLocker is used when acquiring a RANGE_INSERT lock.
+ * RANGE_INSERT only needs to be held until the point we have inserted the new
+ * node into the BIN.  A separate locker is therefore used so we can release
+ * that lock separately when the insertion into the BIN is complete.  But the
+ * RANGE_INSERT lock must not conflict with locks held by the primary locker.
+ * So a BuddyLocker is used that shares locks with the primary locker.</p>
+ */
+public class BuddyLocker extends BasicLocker {
+
+    private Locker buddy;
+
+    /**
+     * Creates a BuddyLocker.
+     */
+    protected BuddyLocker(EnvironmentImpl env, Locker buddy)
+        throws DatabaseException {
+
+        super(env);
+        this.buddy = buddy;
+    }
+
+    public static BuddyLocker createBuddyLocker(EnvironmentImpl env,
+						Locker buddy)
+        throws DatabaseException {
+
+	BuddyLocker ret = null;
+	try {
+	    ret = new BuddyLocker(env, buddy);
+	    ret.initApiReadLock();
+	} catch (DatabaseException DE) {
+	    ret.operationEnd(false);
+	    throw DE;
+	}
+	return ret;
+    }
+
+    /**
+     * Returns the buddy locker.
+     */
+    Locker getBuddy() {
+        return buddy;
+    }
+
+    /**
+     * Forwards this call to the buddy locker.  This object itself is never
+     * transactional but the buddy may be.
+     */
+    @Override
+    public Txn getTxnLocker() {
+        return buddy.getTxnLocker();
+    }
+
+    /**
+     * Forwards this call to the base class and to the buddy locker.
+     */
+    @Override
+    public void releaseNonTxnLocks()
+        throws DatabaseException {
+
+        super.releaseNonTxnLocks();
+        buddy.releaseNonTxnLocks();
+    }
+
+    /**
+     * Returns whether this locker can share locks with the given locker.
+     */
+    @Override
+    public boolean sharesLocksWith(Locker other) {
+
+        if (super.sharesLocksWith(other)) {
+            return true;
+        } else {
+            return buddy == other;
+        }
+    }
+
+    /**
+     * Returns the lock timeout of the buddy locker, since this locker has no
+     * independent timeout.
+     */
+    @Override
+    public long getLockTimeout() {
+        return buddy.getLockTimeout();
+    }
+
+    /**
+     * Returns the transaction timeout of the buddy locker, since this locker
+     * has no independent timeout.
+     */
+    @Override
+    public long getTxnTimeout() {
+        return buddy.getTxnTimeout();
+    }
+
+    /**
+     * Sets the lock timeout of the buddy locker, since this locker has no
+     * independent timeout.
+     */
+    @Override
+    public void setLockTimeout(long timeout) {
+        buddy.setLockTimeout(timeout);
+    }
+
+    /**
+     * Sets the transaction timeout of the buddy locker, since this locker has
+     * no independent timeout.
+     */
+    @Override
+    public void setTxnTimeout(long timeout) {
+        buddy.setTxnTimeout(timeout);
+    }
+
+    /**
+     * Returns whether the buddy locker is timed out, since this locker has no
+     * independent timeout.
+     */
+    @Override
+    public boolean isTimedOut()
+        throws DatabaseException {
+
+        return buddy.isTimedOut();
+    }
+}
diff --git a/src/com/sleepycat/je/txn/DummyLockManager.java b/src/com/sleepycat/je/txn/DummyLockManager.java
new file mode 100644
index 0000000000000000000000000000000000000000..d75ea44e3a3f875f9bb096d1e0b1059fa36e3ebd
--- /dev/null
+++ b/src/com/sleepycat/je/txn/DummyLockManager.java
@@ -0,0 +1,235 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DummyLockManager.java,v 1.11.2.3 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import java.util.Set;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DeadlockException;
+import com.sleepycat.je.LockStats;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.MemoryBudget;
+
+/**
+ * DummyLockManager performs no locking for DS mode.
+ */
+public class DummyLockManager extends LockManager {
+
+    /*
+     * Even though a user may specify isNoLocking for performance reasons, JE
+     * will sometimes still use transactions internally (e.g. to create
+     * internal db's).  So we can not completely eliminate the Lock Manager
+     * Instead, when isNoLocking is specified, we keep a txnal Lock Manager
+     * around for use by transactional Lockers.  Delegate to that as needed.
+     * [#16453]
+     */
+    private LockManager superiorLockManager;
+
+    public DummyLockManager(EnvironmentImpl envImpl,
+                            LockManager superiorLockManager)
+    	throws DatabaseException {
+
+        super(envImpl);
+        this.superiorLockManager = superiorLockManager;
+    }
+
+    /**
+     * @see LockManager#lookupLock
+     */
+    protected Lock lookupLock(Long nodeId)
+	throws DatabaseException {
+
+        Lock ret = superiorLockManager.lookupLock(nodeId);
+	return ret;
+    }
+
+    /**
+     * @see LockManager#attemptLock
+     */
+    protected LockAttemptResult attemptLock(Long nodeId,
+                                            Locker locker,
+                                            LockType type,
+                                            boolean nonBlockingRequest)
+        throws DatabaseException {
+
+        if (locker.isTransactional()) {
+            return superiorLockManager.attemptLock
+                (nodeId, locker, type, nonBlockingRequest);
+        } else {
+            return new LockAttemptResult(null, LockGrantType.NEW, true);
+        }
+    }
+
+    /**
+     * @see LockManager#makeTimeoutMsg
+     */
+    protected DeadlockException makeTimeoutMsg(String lockOrTxn,
+					       Locker locker,
+					       long nodeId,
+					       LockType type,
+					       LockGrantType grantType,
+					       Lock useLock,
+					       long timeout,
+					       long start,
+					       long now,
+					       DatabaseImpl database)
+        throws DatabaseException {
+
+        if (locker.isTransactional()) {
+            return superiorLockManager.makeTimeoutMsg
+                (lockOrTxn, locker, nodeId, type, grantType, useLock,
+                 timeout, start, now, database);
+        } else {
+            return null;
+        }
+    }
+
+    /**
+     * @see LockManager#releaseAndNotifyTargets
+     */
+    protected Set<Locker> releaseAndFindNotifyTargets(long nodeId,
+                                                      Locker locker)
+        throws DatabaseException {
+
+        if (locker.isTransactional()) {
+            return superiorLockManager.
+                releaseAndFindNotifyTargets(nodeId, locker);
+        } else {
+            return null;
+        }
+    }
+
+    /**
+     * @see LockManager#transfer
+     */
+    void transfer(long nodeId,
+                  Locker owningLocker,
+                  Locker destLocker,
+                  boolean demoteToRead)
+        throws DatabaseException {
+
+        if (owningLocker.isTransactional()) {
+            superiorLockManager.transfer
+                (nodeId, owningLocker, destLocker, demoteToRead);
+        } else {
+            return;
+        }
+    }
+
+    /**
+     * @see LockManager#transferMultiple
+     */
+    void transferMultiple(long nodeId,
+                          Locker owningLocker,
+                          Locker[] destLockers)
+        throws DatabaseException {
+
+        if (owningLocker.isTransactional()) {
+            superiorLockManager.transferMultiple
+                (nodeId, owningLocker, destLockers);
+        } else {
+            return;
+        }
+    }
+
+    /**
+     * @see LockManager#demote
+     */
+    void demote(long nodeId, Locker locker)
+        throws DatabaseException {
+
+        if (locker.isTransactional()) {
+            superiorLockManager.demote(nodeId, locker);
+        } else {
+            return;
+        }
+    }
+
+    /**
+     * @see LockManager#isLocked
+     */
+    boolean isLocked(Long nodeId)
+        throws DatabaseException {
+
+	return superiorLockManager.isLocked(nodeId);
+    }
+
+    /**
+     * @see LockManager#isOwner
+     */
+    boolean isOwner(Long nodeId, Locker locker, LockType type)
+        throws DatabaseException {
+
+	return superiorLockManager.isOwner(nodeId, locker, type);
+    }
+
+    /**
+     * @see LockManager#isWaiter
+     */
+    boolean isWaiter(Long nodeId, Locker locker)
+        throws DatabaseException {
+
+	return superiorLockManager.isWaiter(nodeId, locker);
+    }
+
+    /**
+     * @see LockManager#nWaiters
+     */
+    int nWaiters(Long nodeId)
+        throws DatabaseException {
+
+	return superiorLockManager.nWaiters(nodeId);
+    }
+
+    /**
+     * @see LockManager#nOwners
+     */
+    int nOwners(Long nodeId)
+        throws DatabaseException {
+
+	return superiorLockManager.nOwners(nodeId);
+    }
+
+    /**
+     * @see LockManager#getWriterOwnerLocker
+     */
+    Locker getWriteOwnerLocker(Long nodeId)
+        throws DatabaseException {
+
+	return superiorLockManager.getWriteOwnerLocker(nodeId);
+    }
+
+    /**
+     * @see LockManager#validateOwnership
+     */
+    protected boolean validateOwnership(Long nodeId,
+                                        Locker locker,
+                                        LockType type,
+                                        boolean flushFromWaiters,
+					MemoryBudget mb)
+        throws DatabaseException {
+
+        if (locker.isTransactional()) {
+            return superiorLockManager.validateOwnership
+                (nodeId, locker, type, flushFromWaiters, mb);
+        } else {
+            return true;
+        }
+    }
+
+    /**
+     * @see LockManager#dumpLockTable
+     */
+    protected void dumpLockTable(LockStats stats)
+        throws DatabaseException {
+
+        superiorLockManager.dumpLockTable(stats);
+    }
+}
diff --git a/src/com/sleepycat/je/txn/LatchedLockManager.java b/src/com/sleepycat/je/txn/LatchedLockManager.java
new file mode 100644
index 0000000000000000000000000000000000000000..dce79f4da246f3da46f06fec951d21353347a0fa
--- /dev/null
+++ b/src/com/sleepycat/je/txn/LatchedLockManager.java
@@ -0,0 +1,302 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LatchedLockManager.java,v 1.20.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import java.util.Set;
+
+import com.sleepycat.je.DeadlockException;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.LockStats;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.latch.Latch;
+
+/**
+ * LatchedLockManager uses latches to implement its critical sections.
+ */
+public class LatchedLockManager extends LockManager {
+
+    public LatchedLockManager(EnvironmentImpl envImpl)
+    	throws DatabaseException {
+
+        super(envImpl);
+    }
+
+    /**
+     * @see LockManager#lookupLock
+     */
+    protected Lock lookupLock(Long nodeId)
+	throws DatabaseException {
+
+	int lockTableIndex = getLockTableIndex(nodeId);
+	Latch latch = lockTableLatches[lockTableIndex];
+	latch.acquire();
+	try {
+	    return lookupLockInternal(nodeId, lockTableIndex);
+	} finally {
+	    latch.release();
+	}
+    }
+
+    /**
+     * @see LockManager#attemptLock
+     */
+    protected LockAttemptResult attemptLock(Long nodeId,
+                                            Locker locker,
+                                            LockType type,
+                                            boolean nonBlockingRequest)
+        throws DatabaseException {
+
+	int lockTableIndex = getLockTableIndex(nodeId);
+	Latch latch = lockTableLatches[lockTableIndex];
+        latch.acquire();
+        try {
+            return attemptLockInternal(nodeId, locker, type,
+                                       nonBlockingRequest, lockTableIndex);
+        } finally {
+            latch.release();
+        }
+    }
+
+    /**
+     * @see LockManager#makeTimeoutMsg
+     */
+    protected DeadlockException makeTimeoutMsg(String lockOrTxn,
+					       Locker locker,
+					       long nodeId,
+					       LockType type,
+					       LockGrantType grantType,
+					       Lock useLock,
+					       long timeout,
+					       long start,
+					       long now,
+					       DatabaseImpl database)
+        throws DatabaseException {
+
+	int lockTableIndex = getLockTableIndex(nodeId);
+	Latch latch = lockTableLatches[lockTableIndex];
+        latch.acquire();
+        try {
+            return makeTimeoutMsgInternal(lockOrTxn, locker,
+                                          nodeId, type, grantType,
+                                          useLock, timeout, start, now,
+					  database);
+        } finally {
+            latch.release();
+        }
+    }
+
+    /**
+     * @see LockManager#releaseAndNotifyTargets
+     */
+    protected Set<Locker> releaseAndFindNotifyTargets(long nodeId, 
+                                                      Locker locker)
+        throws DatabaseException {
+
+	long nid = nodeId;
+	int lockTableIndex = getLockTableIndex(nid);
+	Latch latch = lockTableLatches[lockTableIndex];
+        latch.acquire();
+        try {
+            return releaseAndFindNotifyTargetsInternal
+		(nodeId, locker, lockTableIndex);
+        } finally {
+            latch.release();
+        }
+    }
+
+    /**
+     * @see LockManager#transfer
+     */
+    void transfer(long nodeId,
+                  Locker owningLocker,
+                  Locker destLocker,
+                  boolean demoteToRead)
+        throws DatabaseException {
+
+	int lockTableIndex = getLockTableIndex(nodeId);
+	Latch latch = lockTableLatches[lockTableIndex];
+        latch.acquire();
+        try {
+            transferInternal(nodeId, owningLocker, destLocker,
+			     demoteToRead, lockTableIndex);
+        } finally {
+            latch.release();
+        }
+    }
+
+    /**
+     * @see LockManager#transferMultiple
+     */
+    void transferMultiple(long nodeId,
+                          Locker owningLocker,
+                          Locker[] destLockers)
+        throws DatabaseException {
+
+	int lockTableIndex = getLockTableIndex(nodeId);
+	Latch latch = lockTableLatches[lockTableIndex];
+        latch.acquire();
+        try {
+            transferMultipleInternal(nodeId, owningLocker,
+				     destLockers, lockTableIndex);
+        } finally {
+            latch.release();
+        }
+    }
+
+    /**
+     * @see LockManager#demote
+     */
+    void demote(long nodeId, Locker locker)
+        throws DatabaseException {
+
+	int lockTableIndex = getLockTableIndex(nodeId);
+	Latch latch = lockTableLatches[lockTableIndex];
+        latch.acquire();
+        try {
+            demoteInternal(nodeId, locker, lockTableIndex);
+        } finally {
+            latch.release();
+        }
+    }
+
+    /**
+     * @see LockManager#isLocked
+     */
+    boolean isLocked(Long nodeId)
+        throws DatabaseException {
+
+	int lockTableIndex = getLockTableIndex(nodeId);
+	Latch latch = lockTableLatches[lockTableIndex];
+        latch.acquire();
+        try {
+            return isLockedInternal(nodeId, lockTableIndex);
+        } finally {
+            latch.release();
+        }
+    }
+
+    /**
+     * @see LockManager#isOwner
+     */
+    boolean isOwner(Long nodeId, Locker locker, LockType type)
+        throws DatabaseException {
+
+	int lockTableIndex = getLockTableIndex(nodeId);
+	Latch latch = lockTableLatches[lockTableIndex];
+        latch.acquire();
+        try {
+            return isOwnerInternal(nodeId, locker, type, lockTableIndex);
+        } finally {
+            latch.release();
+        }
+    }
+
+    /**
+     * @see LockManager#isWaiter
+     */
+    boolean isWaiter(Long nodeId, Locker locker)
+        throws DatabaseException {
+
+	int lockTableIndex = getLockTableIndex(nodeId);
+	Latch latch = lockTableLatches[lockTableIndex];
+        latch.acquire();
+        try {
+            return isWaiterInternal(nodeId, locker, lockTableIndex);
+        } finally {
+            latch.release();
+        }
+    }
+
+    /**
+     * @see LockManager#nWaiters
+     */
+    int nWaiters(Long nodeId)
+        throws DatabaseException {
+
+	int lockTableIndex = getLockTableIndex(nodeId);
+	Latch latch = lockTableLatches[lockTableIndex];
+        latch.acquire();
+        try {
+            return nWaitersInternal(nodeId, lockTableIndex);
+        } finally {
+            latch.release();
+        }
+    }
+
+    /**
+     * @see LockManager#nOwners
+     */
+    int nOwners(Long nodeId)
+        throws DatabaseException {
+
+	int lockTableIndex = getLockTableIndex(nodeId);
+	Latch latch = lockTableLatches[lockTableIndex];
+        latch.acquire();
+        try {
+            return nOwnersInternal(nodeId, lockTableIndex);
+        } finally {
+            latch.release();
+        }
+    }
+
+    /**
+     * @see LockManager#getWriterOwnerLocker
+     */
+    Locker getWriteOwnerLocker(Long nodeId)
+        throws DatabaseException {
+
+	int lockTableIndex = getLockTableIndex(nodeId);
+	Latch latch = lockTableLatches[lockTableIndex];
+        latch.acquire();
+        try {
+            return getWriteOwnerLockerInternal(nodeId, lockTableIndex);
+        } finally {
+            latch.release();
+        }
+    }
+
+    /**
+     * @see LockManager#validateOwnership
+     */
+    protected boolean validateOwnership(Long nodeId,
+                                        Locker locker,
+                                        LockType type,
+                                        boolean flushFromWaiters,
+					MemoryBudget mb)
+        throws DatabaseException {
+
+	int lockTableIndex = getLockTableIndex(nodeId);
+	Latch latch = lockTableLatches[lockTableIndex];
+        latch.acquire();
+        try {
+            return validateOwnershipInternal
+		(nodeId, locker, type, flushFromWaiters, mb, lockTableIndex);
+        } finally {
+            latch.release();
+        }
+    }
+
+    /**
+     * @see LockManager#dumpLockTable
+     */
+    protected void dumpLockTable(LockStats stats)
+        throws DatabaseException {
+
+	for (int i = 0; i < nLockTables; i++) {
+	    lockTableLatches[i].acquire();
+	    try {
+		dumpLockTableInternal(stats, i);
+	    } finally {
+		lockTableLatches[i].release();
+	    }
+	}
+    }
+}
diff --git a/src/com/sleepycat/je/txn/Lock.java b/src/com/sleepycat/je/txn/Lock.java
new file mode 100644
index 0000000000000000000000000000000000000000..7a23939e40451aeefa1129a63516f4db32fdb33e
--- /dev/null
+++ b/src/com/sleepycat/je/txn/Lock.java
@@ -0,0 +1,128 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Lock.java,v 1.73.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import java.util.List;
+import java.util.Set;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.MemoryBudget;
+
+/**
+ * A Lock embodies the lock state of a NodeId.  It includes a set of owners and
+ * a list of waiters.
+ */
+interface Lock {
+
+    /**
+     * Get a list of waiters for debugging and error messages.
+     */
+    public List<LockInfo> getWaitersListClone();
+
+    /**
+     * Remove this locker from the waiter list.
+     */
+    public void flushWaiter(Locker locker,
+                            MemoryBudget mb,
+                            int lockTableIndex);
+
+    /**
+     * Get a new Set of the owners.
+     */
+    public Set<LockInfo> getOwnersClone();
+
+    /**
+     * Return true if locker is an owner of this Lock for lockType,
+     * false otherwise.
+     *
+     * This method is only used by unit tests.
+     */
+    public boolean isOwner(Locker locker, LockType lockType);
+
+    /**
+     * Return true if locker is an owner of this Lock and this is a write
+     * lock.
+     */
+    public boolean isOwnedWriteLock(Locker locker);
+
+    /**
+     * Return true if locker is a waiter on this Lock.
+     *
+     * This method is only used by unit tests.
+     */
+    public boolean isWaiter(Locker locker);
+
+    public int nWaiters();
+
+    public int nOwners();
+
+    /**
+     * Attempts to acquire the lock and returns the LockGrantType.
+     *
+     * Assumes we hold the lockTableLatch when entering this method.
+     */
+    public LockAttemptResult lock(LockType requestType,
+				  Locker locker,
+				  boolean nonBlockingRequest,
+				  MemoryBudget mb,
+				  int lockTableIndex)
+	throws DatabaseException;
+
+    /**
+     * Releases a lock and moves the next waiter(s) to the owners.
+     * @return
+     *  - null if we were not the owner,
+     *  - a non-empty set if owners should be notified after releasing,
+     *  - an empty set if no notification is required.
+     */
+    public Set<Locker> release(Locker locker, 
+                               MemoryBudget mb, 
+                               int lockTableIndex);
+
+    /**
+     * Downgrade a write lock to a read lock.
+     */
+    public void demote(Locker locker);
+
+    /**
+     * Transfer a lock from one transaction to another. Make sure that this
+     * destination locker is only present as a single reader or writer.
+     */
+    public Lock transfer(Long nodeId,
+			 Locker currentLocker,
+			 Locker destLocker,
+			 MemoryBudget mb,
+			 int lockTableIndex)
+        throws DatabaseException;
+
+    /**
+     * Transfer a lock from one transaction to many others. Only really needed
+     * for case where a write handle lock is being transferred to multiple read
+     * handles.
+     */
+    public Lock transferMultiple(Long nodeId,
+				 Locker currentLocker,
+				 Locker[] destLockers,
+				 MemoryBudget mb,
+				 int lockTableIndex)
+        throws DatabaseException;
+
+    /**
+     * Return the locker that has a write ownership on this lock. If no
+     * write owner exists, return null.
+     */
+    public Locker getWriteOwnerLocker();
+
+    public boolean isThin();
+
+    /**
+     * Debug dumper.
+     */
+    public String toString();
+}
diff --git a/src/com/sleepycat/je/txn/LockAttemptResult.java b/src/com/sleepycat/je/txn/LockAttemptResult.java
new file mode 100644
index 0000000000000000000000000000000000000000..c416e81b4e2e5dc6559cabee0ff29f60f11e79b3
--- /dev/null
+++ b/src/com/sleepycat/je/txn/LockAttemptResult.java
@@ -0,0 +1,27 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LockAttemptResult.java,v 1.2.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+/**
+ * This is just a struct to hold a multi-value return.
+ */
+class LockAttemptResult {
+    boolean success;
+    Lock useLock;
+    LockGrantType lockGrant;
+
+    LockAttemptResult(Lock useLock,
+		      LockGrantType lockGrant,
+		      boolean success) {
+
+	this.useLock = useLock;
+	this.lockGrant = lockGrant;
+	this.success = success;
+    }
+}
diff --git a/src/com/sleepycat/je/txn/LockConflict.java b/src/com/sleepycat/je/txn/LockConflict.java
new file mode 100644
index 0000000000000000000000000000000000000000..7ff2750881aa3f00ad9da5d3c91a46097fcaafad
--- /dev/null
+++ b/src/com/sleepycat/je/txn/LockConflict.java
@@ -0,0 +1,55 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LockConflict.java,v 1.7.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+/**
+ * LockConflict is a type safe enumeration of lock conflict types.  Methods on
+ * LockConflict objects are used to determine whether a conflict exists and, if
+ * so, how it should be handled.
+ */
+class LockConflict {
+
+    static final LockConflict ALLOW   = new LockConflict(true, false);
+    static final LockConflict BLOCK   = new LockConflict(false, false);
+    static final LockConflict RESTART = new LockConflict(false, true);
+
+    private boolean allowed;
+    private boolean restart;
+
+    /**
+     * No conflict types can be defined outside this class.
+     */
+    private LockConflict(boolean allowed, boolean restart) {
+        this.allowed = allowed;
+        this.restart= restart;
+    }
+
+    /**
+     * This method is called first to determine whether the locks is allowed.
+     * If true, there is no conflict.  If false, there is a conflict and the
+     * requester must wait for or be denied the lock, or (if getRestart returns
+     * true) an exception should be thrown to cause the requester's operation
+     * to be restarted.
+     */
+    boolean getAllowed() {
+        return allowed;
+    }
+
+    /**
+     * This method is called when getAllowed returns false to determine whether
+     * an exception should be thrown to cause the requester's operation to be
+     * restarted.  If getAllowed returns false and this method returns false,
+     * the requester should wait for or be denied the lock, depending on the
+     * request mode.  If getAllowed returns true, this method will always
+     * return false.
+     */
+    boolean getRestart() {
+        return restart;
+    }
+}
diff --git a/src/com/sleepycat/je/txn/LockGrantType.java b/src/com/sleepycat/je/txn/LockGrantType.java
new file mode 100644
index 0000000000000000000000000000000000000000..c6e376faa5609d298cae6f8e1eed1f6b5daab260
--- /dev/null
+++ b/src/com/sleepycat/je/txn/LockGrantType.java
@@ -0,0 +1,44 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LockGrantType.java,v 1.21.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+/**
+ * LockGrantType is an enumeration of the possible results of a lock attempt.
+ */
+public class LockGrantType {
+    private String name;
+
+    /* Grant types */
+    public static final LockGrantType NEW =
+	new LockGrantType("NEW");
+    public static final LockGrantType WAIT_NEW =
+	new LockGrantType("WAIT_NEW");
+    public static final LockGrantType PROMOTION =
+	new LockGrantType("PROMOTION");
+    public static final LockGrantType WAIT_PROMOTION =
+	new LockGrantType("WAIT_PROMOTION");
+    public static final LockGrantType EXISTING =
+	new LockGrantType("EXISTING");
+    public static final LockGrantType DENIED =
+	new LockGrantType("DENIED");
+    public static final LockGrantType WAIT_RESTART =
+	new LockGrantType("WAIT_RESTART");
+    public static final LockGrantType NONE_NEEDED =
+	new LockGrantType("NONE_NEEDED");
+
+    /* No lock types can be defined outside this class */
+    private LockGrantType(String name) {
+        this.name = name;
+    }
+
+    @Override
+    public String toString() {
+        return name;
+    }
+}
diff --git a/src/com/sleepycat/je/txn/LockImpl.java b/src/com/sleepycat/je/txn/LockImpl.java
new file mode 100644
index 0000000000000000000000000000000000000000..0aca9716728ad604e79dca68f8ff212d1f3f24fd
--- /dev/null
+++ b/src/com/sleepycat/je/txn/LockImpl.java
@@ -0,0 +1,1007 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LockImpl.java,v 1.8.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.MemoryBudget;
+
+/**
+ * A Lock embodies the lock state of a NodeId.  It includes a set of owners and
+ * a list of waiters.
+ */
+public // for Sizeof
+class LockImpl implements Lock {
+    private static final int REMOVE_LOCKINFO_OVERHEAD =
+        0 - MemoryBudget.LOCKINFO_OVERHEAD;
+
+    /**
+     * A single locker always appears only once in the logical set of owners.
+     * The owners set is always in one of the following states.
+     *
+     * 1- Empty
+     * 2- A single writer
+     * 3- One or more readers
+     * 4- Multiple writers or a mix of readers and writers, all for
+     * txns which share locks (all ThreadLocker instances for the same
+     * thread)
+     *
+     * Both ownerSet and waiterList are a collection of LockInfo.  Since the
+     * common case is that there is only one owner or waiter, we have added an
+     * optimization to avoid the cost of collections.  FirstOwner and
+     * firstWaiter are used for the first owner or waiter of the lock, and the
+     * corresponding collection is instantiated and used only if more owners
+     * arrive.
+     *
+     * In terms of memory accounting, we count up the cost of each added or
+     * removed LockInfo, but not the cost of the HashSet/List entry
+     * overhead. We could do the latter for more precise accounting.
+     */
+    private LockInfo firstOwner;
+    private Set<LockInfo> ownerSet;
+    private LockInfo firstWaiter;
+    private List<LockInfo> waiterList;
+
+    /**
+     * Create a Lock.
+     */
+    public LockImpl() {
+    }
+
+    /* Used when mutating from a ThinLockImpl to a LockImpl. */
+    LockImpl(LockInfo firstOwner) {
+        this.firstOwner = firstOwner;
+    }
+
+    /**
+     * The first waiter goes into the firstWaiter member variable.  Once the
+     * waiterList is made, all appended waiters go into waiterList, even after
+     * the firstWaiter goes away and leaves that field null, so as to leave the
+     * list ordered.
+     */
+    private void addWaiterToEndOfList(LockInfo waiter,
+                                      MemoryBudget mb,
+                                      int lockTableIndex) {
+        /* Careful: order important! */
+        if (waiterList == null) {
+            if (firstWaiter == null) {
+                firstWaiter = waiter;
+            } else {
+                waiterList = new ArrayList<LockInfo>();
+                waiterList.add(waiter);
+            }
+        } else {
+            waiterList.add(waiter);
+        }
+        mb.updateLockMemoryUsage
+            (MemoryBudget.LOCKINFO_OVERHEAD, lockTableIndex);
+    }
+
+    /**
+     * Add this waiter to the front of the list.
+     */
+    private void addWaiterToHeadOfList(LockInfo waiter,
+                                       MemoryBudget mb,
+                                       int lockTableIndex) {
+        /* Shuffle the current first waiter down a slot. */
+        if (firstWaiter != null) {
+            if (waiterList == null) {
+                waiterList = new ArrayList<LockInfo>();
+            }
+            waiterList.add(0, firstWaiter);
+        }
+
+        firstWaiter = waiter;
+        mb.updateLockMemoryUsage
+            (MemoryBudget.LOCKINFO_OVERHEAD, lockTableIndex);
+    }
+
+    /**
+     * Get a list of waiters for debugging and error messages.
+     */
+    public List<LockInfo> getWaitersListClone() {
+        List<LockInfo> dumpWaiters = new ArrayList<LockInfo>();
+        if (firstWaiter != null) {
+            dumpWaiters.add(firstWaiter);
+        }
+
+        if (waiterList != null) {
+            dumpWaiters.addAll(waiterList);
+        }
+
+        return dumpWaiters;
+    }
+
+    /**
+     * Remove this locker from the waiter list.
+     */
+    public void flushWaiter(Locker locker,
+                            MemoryBudget mb,
+                            int lockTableIndex) {
+        if ((firstWaiter != null) && (firstWaiter.getLocker() == locker)) {
+            firstWaiter = null;
+            mb.updateLockMemoryUsage
+                (REMOVE_LOCKINFO_OVERHEAD, lockTableIndex);
+        } else if (waiterList != null) {
+            Iterator<LockInfo> iter = waiterList.iterator();
+            while (iter.hasNext()) {
+                LockInfo info = iter.next();
+                if (info.getLocker() == locker) {
+                    iter.remove();
+                    mb.updateLockMemoryUsage
+                        (REMOVE_LOCKINFO_OVERHEAD, lockTableIndex);
+                    return;
+                }
+            }
+        }
+    }
+
+    private void addOwner(LockInfo newLock,
+                          MemoryBudget mb,
+                          int lockTableIndex) {
+        if (firstOwner == null) {
+            firstOwner = newLock;
+        } else {
+            if (ownerSet == null) {
+                ownerSet = new HashSet<LockInfo>();
+            }
+            ownerSet.add(newLock);
+        }
+        mb.updateLockMemoryUsage
+            (MemoryBudget.LOCKINFO_OVERHEAD, lockTableIndex);
+    }
+
+    /**
+     * Get a new Set of the owners.
+     */
+    public Set<LockInfo> getOwnersClone() {
+
+        /* No need to update memory usage, the returned Set is transient. */
+        Set<LockInfo> owners;
+        if (ownerSet != null) {
+            owners = new HashSet<LockInfo>(ownerSet);
+        } else {
+            owners = new HashSet<LockInfo>();
+        }
+        if (firstOwner != null) {
+            owners.add(firstOwner);
+        }
+        return owners;
+    }
+
+    /**
+     * Remove this LockInfo from the owner set and clear its memory budget.
+     */
+    private boolean flushOwner(LockInfo oldOwner,
+                               MemoryBudget mb,
+                               int lockTableIndex) {
+        boolean removed = false;
+        if (oldOwner != null) {
+            if (firstOwner == oldOwner) {
+                firstOwner = null;
+                removed = true;
+            } else if (ownerSet != null) {
+                removed = ownerSet.remove(oldOwner);
+            }
+        }
+
+        if (removed) {
+            mb.updateLockMemoryUsage(REMOVE_LOCKINFO_OVERHEAD, lockTableIndex);
+        }
+        return removed;
+    }
+
+    /**
+     * Remove this locker from the owner set.
+     */
+    private LockInfo flushOwner(Locker locker,
+                                MemoryBudget mb,
+                                int lockTableIndex) {
+        LockInfo flushedInfo = null;
+        if ((firstOwner != null) &&
+            (firstOwner.getLocker() == locker)) {
+            flushedInfo = firstOwner;
+            firstOwner = null;
+        } else if (ownerSet != null) {
+            Iterator<LockInfo> iter = ownerSet.iterator();
+            while (iter.hasNext()) {
+                LockInfo o = iter.next();
+                if (o.getLocker() == locker) {
+                    iter.remove();
+                    flushedInfo = o;
+                }
+            }
+        }
+        if (flushedInfo != null) {
+            mb.updateLockMemoryUsage(REMOVE_LOCKINFO_OVERHEAD, lockTableIndex);
+        }
+
+        return flushedInfo;
+    }
+
+    /**
+     * Returns the owner LockInfo for a locker, or null if locker is not an
+     * owner.
+     */
+    private LockInfo getOwnerLockInfo(Locker locker) {
+        if ((firstOwner != null) && (firstOwner.getLocker() == locker)) {
+            return firstOwner;
+        }
+
+        if (ownerSet != null) {
+            Iterator<LockInfo> iter = ownerSet.iterator();
+            while (iter.hasNext()) {
+                LockInfo o = iter.next();
+                if (o.getLocker() == locker) {
+                    return o;
+                }
+            }
+        }
+
+        return null;
+    }
+
+    /**
+     * Return true if locker is an owner of this Lock for lockType,
+     * false otherwise.
+     *
+     * This method is only used by unit tests.
+     */
+    public boolean isOwner(Locker locker, LockType lockType) {
+        LockInfo o = getOwnerLockInfo(locker);
+        if (o != null) {
+            LockType ownedLockType = o.getLockType();
+            if (lockType == ownedLockType) {
+                return true;
+            }
+            LockUpgrade upgrade = ownedLockType.getUpgrade(lockType);
+            if (!upgrade.getPromotion()) {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    /**
+     * Return true if locker is an owner of this Lock and this is a write
+     * lock.
+     */
+    public boolean isOwnedWriteLock(Locker locker) {
+        LockInfo o = getOwnerLockInfo(locker);
+        return o != null && o.getLockType().isWriteLock();
+    }
+
+    /**
+     * Return true if locker is a waiter on this Lock.
+     *
+     * This method is only used by unit tests.
+     */
+    public boolean isWaiter(Locker locker) {
+
+        if (firstWaiter != null) {
+            if (firstWaiter.getLocker() == locker) {
+                return true;
+            }
+        }
+
+        if (waiterList != null) {
+            Iterator<LockInfo> iter = waiterList.iterator();
+            while (iter.hasNext()) {
+                LockInfo info = iter.next();
+                if (info.getLocker() == locker) {
+                    return true;
+                }
+            }
+        }
+        return false;
+    }
+
+    public int nWaiters() {
+        int count = 0;
+        if (firstWaiter != null) {
+            count++;
+        }
+        if (waiterList != null) {
+            count += waiterList.size();
+        }
+        return count;
+    }
+
+    public int nOwners() {
+        int count = 0;
+        if (firstOwner != null) {
+            count++;
+        }
+
+        if (ownerSet != null) {
+            count += ownerSet.size();
+        }
+        return count;
+    }
+
+    /**
+     * Attempts to acquire the lock and returns the LockGrantType.
+     *
+     * Assumes we hold the lockTableLatch when entering this method.
+     */
+    public LockAttemptResult lock(LockType requestType,
+                                  Locker locker,
+                                  boolean nonBlockingRequest,
+                                  MemoryBudget mb,
+                                  int lockTableIndex) {
+
+        assert validateRequest(locker); // intentional side effect
+
+        /* Request an ordinary lock by checking the owners list. */
+        LockInfo newLock = new LockInfo(locker, requestType);
+        LockGrantType grant =
+            tryLock(newLock, nWaiters() == 0, mb, lockTableIndex);
+
+        /* Do we have to wait for this lock? */
+        if (grant == LockGrantType.WAIT_NEW ||
+            grant == LockGrantType.WAIT_PROMOTION ||
+            grant == LockGrantType.WAIT_RESTART) {
+
+            /*
+             * If the request type can cause a restart and a restart conflict
+             * does not already exist, then we have to check the waiters list
+             * for restart conflicts.  A restart conflict must take precedence
+             * or it may be missed.
+             */
+            if (requestType.getCausesRestart() &&
+                grant != LockGrantType.WAIT_RESTART) {
+                LockInfo waiter = null;
+                Iterator<LockInfo> iter = null;
+
+                if (waiterList != null) {
+                    iter = waiterList.iterator();
+                }
+
+                if (firstWaiter != null) {
+                    waiter = firstWaiter;
+                } else if ((iter != null) && (iter.hasNext())) {
+                    waiter = iter.next();
+                }
+
+                while (waiter != null) {
+
+                    /*
+                     * Check for a restart conflict.  Ignore LockType.RESTART
+                     * in the waiter list when checking for conflicts.
+                     */
+                    Locker waiterLocker = waiter.getLocker();
+                    LockType waiterType = waiter.getLockType();
+                    if (waiterType != LockType.RESTART &&
+                        locker != waiterLocker &&
+                        !locker.sharesLocksWith(waiterLocker)) {
+                        LockConflict conflict =
+                            waiterType.getConflict(requestType);
+                        if (conflict.getRestart()) {
+                            grant = LockGrantType.WAIT_RESTART;
+                            break;
+                        }
+                    }
+
+                    /* Move to the next waiter, if it's in the list. */
+                    if ((iter != null) && (iter.hasNext())) {
+                        waiter = iter.next();
+                    } else {
+                        waiter = null;
+                    }
+                }
+            }
+
+            /* Add the waiter or deny the lock as appropriate. */
+            if (nonBlockingRequest) {
+                grant = LockGrantType.DENIED;
+            } else {
+                if (grant == LockGrantType.WAIT_PROMOTION) {
+                    addWaiterToHeadOfList(newLock, mb, lockTableIndex);
+                } else {
+                    assert grant == LockGrantType.WAIT_NEW ||
+                           grant == LockGrantType.WAIT_RESTART;
+
+                    /*
+                     * If waiting to restart, change the lock type to RESTART
+                     * to avoid granting the lock later.  We wait until the
+                     * RESTART waiter moves to the head of waiter list to
+                     * prevent the requester from spinning performing repeated
+                     * restarts, but we don't grant the lock.
+                     */
+                    if (grant == LockGrantType.WAIT_RESTART) {
+                        newLock.setLockType(LockType.RESTART);
+                    }
+
+                    addWaiterToEndOfList(newLock, mb, lockTableIndex);
+                }
+            }
+        }
+
+        /* Set 'success' later. */
+        return new LockAttemptResult(this, grant, false);
+    }
+
+    /**
+     * Releases a lock and moves the next waiter(s) to the owners.
+     * @return
+     * null if we were not the owner,
+     * a non-empty set if owners should be notified after releasing,
+     * an empty set if no notification is required.
+     */
+    public Set<Locker> release(Locker locker, 
+                               MemoryBudget mb, 
+                               int lockTableIndex) {
+
+        LockInfo removedLock = flushOwner(locker, mb, lockTableIndex);
+        if (removedLock == null) {
+            /* Not owner. */
+            return null;
+        }
+
+        Set<Locker> lockersToNotify = Collections.emptySet();
+
+        if (nWaiters() == 0) {
+            /* No more waiters, so no one to notify. */
+            return lockersToNotify;
+        }
+
+        /*
+         * Move the next set of waiters to the owners set. Iterate through the
+         * firstWaiter field, then the waiterList.
+         */
+        LockInfo waiter = null;
+        Iterator<LockInfo> iter = null;
+        boolean isFirstWaiter = false;
+
+        if (waiterList != null) {
+            iter = waiterList.iterator();
+        }
+
+        if (firstWaiter != null) {
+            waiter = firstWaiter;
+            isFirstWaiter = true;
+        } else if ((iter != null) && (iter.hasNext())) {
+            waiter = iter.next();
+        }
+
+        while (waiter != null) {
+            /* Make the waiter an owner if the lock can be acquired. */
+            LockType waiterType = waiter.getLockType();
+            Locker waiterLocker = waiter.getLocker();
+            LockGrantType grant;
+            if (waiterType == LockType.RESTART) {
+                /* Special case for restarts: see rangeInsertConflict. */
+                grant = rangeInsertConflict(waiterLocker) ?
+                    LockGrantType.WAIT_NEW : LockGrantType.NEW;
+            } else {
+                /* Try locking. */
+                grant = tryLock(waiter, true, mb, lockTableIndex);
+            }
+            /* Check if granted. */
+            if (grant == LockGrantType.NEW ||
+                grant == LockGrantType.EXISTING ||
+                grant == LockGrantType.PROMOTION) {
+                /* Remove it from the waiters list. */
+                if (isFirstWaiter) {
+                    firstWaiter = null;
+                } else {
+                    iter.remove();
+                }
+                if (lockersToNotify == Collections.EMPTY_SET) {
+                    lockersToNotify = new HashSet<Locker>();
+                }
+                lockersToNotify.add(waiterLocker);
+                mb.updateLockMemoryUsage
+                    (REMOVE_LOCKINFO_OVERHEAD, lockTableIndex);
+            } else {
+                assert grant == LockGrantType.WAIT_NEW ||
+                       grant == LockGrantType.WAIT_PROMOTION ||
+                       grant == LockGrantType.WAIT_RESTART;
+                /* Stop on first waiter that cannot be an owner. */
+                break;
+            }
+
+            /* Move to the next waiter, if it's in the list. */
+            if ((iter != null) && (iter.hasNext())) {
+                waiter = iter.next();
+                isFirstWaiter = false;
+            } else {
+                waiter = null;
+            }
+        }
+        return lockersToNotify;
+    }
+
+    /**
+     * Called from lock() to try locking a new request, and from release() to
+     * try locking a waiting request.
+     *
+     * @param newLock is the lock that is requested.
+     *
+     * @param firstWaiterInLine determines whether to grant the lock when a
+     * NEW lock can be granted, but other non-conflicting owners exist; for
+     * example, when a new READ lock is requested but READ locks are held by
+     * other owners.  This parameter should be true if the requestor is the
+     * first waiter in line (or if there are no waiters), and false otherwise.
+     *
+     * @param mb is the current memory budget.
+     *
+     * @return LockGrantType.EXISTING, NEW, PROMOTION, WAIT_RESTART, WAIT_NEW
+     * or WAIT_PROMOTION.
+     */
+    private LockGrantType tryLock(LockInfo newLock,
+                                  boolean firstWaiterInLine,
+                                  MemoryBudget mb,
+                                  int lockTableIndex) {
+
+        /* If no one owns this right now, just grab it. */
+        if (nOwners() == 0) {
+            addOwner(newLock, mb, lockTableIndex);
+            return LockGrantType.NEW;
+        }
+
+        Locker locker = newLock.getLocker();
+        LockType requestType = newLock.getLockType();
+        LockUpgrade upgrade = null;
+        LockInfo lockToUpgrade = null;
+        boolean ownerExists = false;
+        boolean ownerConflicts = false;
+
+        /*
+         * Iterate through the current owners. See if there is a current owner
+         * who has to be upgraded from read to write. Also track whether there
+         * is a conflict with another owner.
+         */
+        LockInfo owner = null;
+        Iterator<LockInfo> iter = null;
+
+        if (ownerSet != null) {
+            iter = ownerSet.iterator();
+        }
+
+        if (firstOwner != null) {
+            owner = firstOwner;
+        } else if ((iter != null) && (iter.hasNext())) {
+            owner = iter.next();
+        }
+
+        while (owner != null) {
+            Locker ownerLocker = owner.getLocker();
+            LockType ownerType = owner.getLockType();
+            if (locker == ownerLocker) {
+
+                /*
+                 * Requestor currently holds this lock: check for upgrades.
+                 * If no type change is needed, return EXISTING now to avoid
+                 * iterating further; otherwise, we need to check for conflicts
+                 * before granting the upgrade.
+                 */
+                assert (upgrade == null); // An owner should appear only once
+                upgrade = ownerType.getUpgrade(requestType);
+                if (upgrade.getUpgrade() == null) {
+                    return LockGrantType.EXISTING;
+                } else {
+                    lockToUpgrade = owner;
+                }
+            } else {
+
+                /*
+                 * Requestor does not hold this lock: check for conflicts.
+                 * If the owner shares locks with the requestor, ignore it;
+                 * otherwise, if a restart conflict exists, return it now;
+                 * otherwise, save the conflict information.
+                 */
+                if (!locker.sharesLocksWith(ownerLocker)) {
+                    LockConflict conflict = ownerType.getConflict(requestType);
+                    if (conflict.getRestart()) {
+                        return LockGrantType.WAIT_RESTART;
+                    } else {
+                        if (!conflict.getAllowed()) {
+                            ownerConflicts = true;
+                        }
+                        ownerExists = true;
+                    }
+                }
+            }
+
+            /* Move on to the next owner. */
+            if ((iter != null) && (iter.hasNext())) {
+                owner = iter.next();
+            } else {
+                owner = null;
+            }
+        }
+
+        /* Now handle the upgrade or conflict as appropriate. */
+        if (upgrade != null) {
+            /* The requestor holds this lock. */
+            LockType upgradeType = upgrade.getUpgrade();
+            assert upgradeType != null;
+            if (!ownerConflicts) {
+                /* No conflict: grant the upgrade.  */
+                lockToUpgrade.setLockType(upgradeType);
+                return upgrade.getPromotion() ?
+                    LockGrantType.PROMOTION : LockGrantType.EXISTING;
+            } else {
+                /* Upgrade cannot be granted at this time. */
+                return LockGrantType.WAIT_PROMOTION;
+            }
+        } else {
+            /* The requestor doesn't hold this lock. */
+            if (!ownerConflicts && (!ownerExists || firstWaiterInLine)) {
+                /* No conflict: grant the lock. */
+                addOwner(newLock, mb, lockTableIndex);
+                return LockGrantType.NEW;
+            } else {
+                /* Lock cannot be granted at this time. */
+                return LockGrantType.WAIT_NEW;
+            }
+        }
+    }
+
+    /**
+     * Called from release() when a RESTART request is waiting to determine if
+     * any RANGE_INSERT owners exist.  We can't call tryLock for a RESTART
+     * lock because it must never be granted.
+     */
+    private boolean rangeInsertConflict(Locker waiterLocker) {
+
+        LockInfo owner = null;
+        Iterator<LockInfo> iter = null;
+
+        if (ownerSet != null) {
+            iter = ownerSet.iterator();
+        }
+
+        if (firstOwner != null) {
+            owner = firstOwner;
+        } else if ((iter != null) && (iter.hasNext())) {
+            owner = iter.next();
+        }
+
+        while (owner != null) {
+            Locker ownerLocker = owner.getLocker();
+            if (ownerLocker != waiterLocker &&
+                !ownerLocker.sharesLocksWith(waiterLocker) &&
+                owner.getLockType() == LockType.RANGE_INSERT) {
+                return true;
+            }
+
+            /* Move on to the next owner. */
+            if ((iter != null) && (iter.hasNext())) {
+                owner = iter.next();
+            } else {
+                owner = null;
+            }
+        }
+
+        return false;
+    }
+
+    /**
+     * Downgrade a write lock to a read lock.
+     */
+    public void demote(Locker locker) {
+        LockInfo owner = getOwnerLockInfo(locker);
+        if (owner != null) {
+            LockType type = owner.getLockType();
+            if (type.isWriteLock()) {
+                owner.setLockType((type == LockType.RANGE_WRITE) ?
+                                  LockType.RANGE_READ : LockType.READ);
+            }
+        }
+    }
+
+    /**
+     * Transfer a lock from one transaction to another. Make sure that this
+     * destination locker is only present as a single reader or writer.
+     */
+    public Lock transfer(Long nodeId,
+                         Locker currentLocker,
+                         Locker destLocker,
+                         MemoryBudget mb,
+                         int lockTableIndex)
+        throws DatabaseException {
+
+        /*
+         * Remove all the old owners held by the dest locker. Take all the
+         * owners held by currentLocker and make them dest lockers.
+         */
+        int numRemovedLockInfos = 0;
+
+        if (firstOwner != null) {
+            if (firstOwner.getLocker() == destLocker) {
+                firstOwner = null;
+                numRemovedLockInfos++;
+            } else if (firstOwner.getLocker() == currentLocker) {
+                setNewLocker(nodeId, firstOwner, destLocker);
+            }
+        }
+
+        if (ownerSet != null) {
+            Iterator<LockInfo> iter = ownerSet.iterator();
+            while (iter.hasNext()) {
+                LockInfo owner = iter.next();
+                if (owner.getLocker() == destLocker) {
+                    iter.remove();
+                    numRemovedLockInfos++;
+                } else if (owner.getLocker() == currentLocker) {
+                    setNewLocker(nodeId, owner, destLocker);
+                }
+            }
+        }
+
+        /* Check the waiters, remove any that belonged to the dest locker. */
+        if ((firstWaiter != null) && (firstWaiter.getLocker() == destLocker)) {
+            firstWaiter = null;
+            numRemovedLockInfos++;
+        }
+        if (waiterList != null) {
+            Iterator<LockInfo> iter = waiterList.iterator();
+            while (iter.hasNext()) {
+                LockInfo info = iter.next();
+                if (info.getLocker() == destLocker) {
+                    iter.remove();
+                    numRemovedLockInfos++;
+                }
+            }
+        }
+
+        mb.updateLockMemoryUsage(0 - (numRemovedLockInfos *
+                                      MemoryBudget.LOCKINFO_OVERHEAD),
+                                 lockTableIndex);
+        return this;
+    }
+
+    private void setNewLocker(Long nodeId,
+                              LockInfo owner,
+                              Locker destLocker)
+        throws DatabaseException {
+                
+        owner.setLocker(destLocker);
+        destLocker.addLock(nodeId, owner.getLockType(), LockGrantType.NEW);
+    }
+
+    /**
+     * Transfer a lock from one transaction to many others. Only really needed
+     * for case where a write handle lock is being transferred to multiple read
+     * handles.
+     */
+    public Lock transferMultiple(Long nodeId,
+                                 Locker currentLocker,
+                                 Locker[] destLockers,
+                                 MemoryBudget mb,
+                                 int lockTableIndex)
+        throws DatabaseException {
+
+        LockInfo oldOwner = null;
+
+        if (destLockers.length == 1) {
+            return transfer(nodeId, currentLocker, destLockers[0],
+                            mb, lockTableIndex);
+        } else {
+
+            /*
+             * First remove any ownership of the dest txns.
+             */
+            if (firstOwner != null) {
+                for (int i = 0; i < destLockers.length; i++) {
+                    if (firstOwner.getLocker() == destLockers[i]) {
+                        firstOwner = null;
+                        break;
+                    }
+                }
+            }
+
+            if (ownerSet != null) {
+                Iterator<LockInfo> ownersIter = ownerSet.iterator();
+                while (ownersIter.hasNext()) {
+                    LockInfo o = ownersIter.next();
+                    for (int i = 0; i < destLockers.length; i++) {
+                        if (o.getLocker() == destLockers[i]) {
+                            ownersIter.remove();
+                            break;
+                        }
+                    }
+                }
+            }
+
+            /*
+             * Create the clones
+             */
+            if (firstOwner != null) {
+                oldOwner = cloneLockInfo(nodeId,
+                                         firstOwner,
+                                         currentLocker,
+                                         destLockers,
+                                         mb,
+                                         lockTableIndex);
+            }
+
+            if ((ownerSet != null) && (oldOwner == null))  {
+                Iterator<LockInfo> ownersIter = ownerSet.iterator();
+                while (ownersIter.hasNext()) {
+                    LockInfo o = ownersIter.next();
+                    oldOwner = cloneLockInfo(nodeId,
+                                             o,
+                                             currentLocker,
+                                             destLockers,
+                                             mb,
+                                             lockTableIndex);
+                    if (oldOwner != null) {
+                        break;
+                    }
+                }
+            }
+
+            /*
+             * Check the waiters, remove any that belonged to the dest locker.
+             */
+            if (firstWaiter != null) {
+                for (int i = 0; i < destLockers.length; i++) {
+                    if (firstWaiter.getLocker() == destLockers[i]) {
+                        firstWaiter = null;
+                        break;
+                    }
+                }
+            }
+
+            if (waiterList != null) {
+                Iterator<LockInfo> iter = waiterList.iterator();
+                while (iter.hasNext()) {
+                    LockInfo o = iter.next();
+                    for (int i = 0; i < destLockers.length; i++) {
+                        if (o.getLocker() == destLockers[i]) {
+                            iter.remove();
+                            break;
+                        }
+                    }
+                }
+            }
+        }
+
+        boolean removed = flushOwner(oldOwner, mb, lockTableIndex);
+        assert removed;
+        return this;
+    }
+
+    /**
+     * If oldOwner is the current owner, clone it and transform it into a dest
+     * locker.
+     */
+    private LockInfo cloneLockInfo(Long nodeId,
+                                   LockInfo oldOwner,
+                                   Locker currentLocker,
+                                   Locker[] destLockers,
+                                   MemoryBudget mb,
+                                   int lockTableIndex)
+           throws DatabaseException {
+
+        if (oldOwner.getLocker() == currentLocker) {
+            try {
+                LockType lockType = oldOwner.getLockType();
+                for (int i = 0; i < destLockers.length; i++) {
+                    LockInfo clonedLockInfo = (LockInfo) oldOwner.clone();
+                    clonedLockInfo.setLocker(destLockers[i]);
+                    destLockers[i].addLock(nodeId, lockType,
+                                           LockGrantType.NEW);
+                    addOwner(clonedLockInfo, mb, lockTableIndex);
+                }
+                return oldOwner;
+            } catch (CloneNotSupportedException e) {
+                throw new DatabaseException(e);
+            }
+        } else {
+            return null;
+        }
+    }
+
+    /**
+     * Return the locker that has a write ownership on this lock. If no
+     * write owner exists, return null.
+     */
+    public Locker getWriteOwnerLocker() {
+
+        LockInfo owner = null;
+        Iterator<LockInfo> iter = null;
+
+        if (ownerSet != null) {
+            iter = ownerSet.iterator();
+        }
+
+        if (firstOwner != null) {
+            owner = firstOwner;
+        } else if ((iter != null) && (iter.hasNext())) {
+            owner = iter.next();
+        }
+
+        while (owner != null) {
+            /* Return locker if it owns a write lock. */
+            if (owner.getLockType().isWriteLock()) {
+                return owner.getLocker();
+            }
+
+            /* Move on to the next owner. */
+            if ((iter != null) && (iter.hasNext())) {
+                owner = iter.next();
+            } else {
+                owner = null;
+            }
+        }
+
+        return null;
+    }
+
+    /**
+     * Debugging aid, validation before a lock request.
+     */
+    private boolean validateRequest(Locker locker) {
+        if (firstWaiter != null) {
+            if (firstWaiter.getLocker() == locker) {
+                assert false : "locker " + locker +
+                                " is already on waiters list.";
+            }
+        }
+
+        if (waiterList != null) {
+            Iterator<LockInfo> iter = waiterList.iterator();
+            while (iter.hasNext()) {
+                LockInfo o = iter.next();
+                if (o.getLocker() == locker) {
+                    assert false : "locker " + locker +
+                        " is already on waiters list.";
+                }
+            }
+        }
+        return true;
+    }
+
+    public boolean isThin() {
+        return false;
+    }
+
+    /**
+     * Debug dumper.
+     */
+    @Override
+    public String toString() {
+        StringBuffer sb = new StringBuffer();
+        sb.append(" LockAddr:").append(System.identityHashCode(this));
+        sb.append(" Owners:");
+        if (nOwners() == 0) {
+            sb.append(" (none)");
+        } else {
+            if (firstOwner != null) {
+                sb.append(firstOwner);
+            }
+
+            if (ownerSet != null) {
+                Iterator<LockInfo> iter = ownerSet.iterator();
+                while (iter.hasNext()) {
+                    LockInfo info = iter.next();
+                    sb.append(info);
+                }
+            }
+        }
+
+        sb.append(" Waiters:");
+        if (nWaiters() == 0) {
+            sb.append(" (none)");
+        } else {
+            sb.append(getWaitersListClone());
+        }
+        return sb.toString();
+    }
+}
diff --git a/src/com/sleepycat/je/txn/LockInfo.java b/src/com/sleepycat/je/txn/LockInfo.java
new file mode 100644
index 0000000000000000000000000000000000000000..947ed7e22fb3da883569ffe3fbef44d50a1ec9d0
--- /dev/null
+++ b/src/com/sleepycat/je/txn/LockInfo.java
@@ -0,0 +1,122 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LockInfo.java,v 1.36.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import java.util.Collections;
+import java.util.Map;
+import java.util.WeakHashMap;
+
+import com.sleepycat.je.utilint.Tracer;
+
+/**
+ * LockInfo is a class that embodies information about a lock instance.  The
+ * holding thread and the locktype are all contained in the object.
+ *
+ * This class is public for unit tests.
+ */
+public class LockInfo implements Cloneable {
+    protected Locker locker;
+    protected LockType lockType;
+
+    private static boolean deadlockStackTrace = false;
+    private static Map<LockInfo, StackTraceAtLockTime> traceExceptionMap =
+        Collections.synchronizedMap(new WeakHashMap<LockInfo, 
+                                    StackTraceAtLockTime>());
+    @SuppressWarnings("serial")
+    private static class StackTraceAtLockTime extends Exception {}
+
+    /**
+     * Called when the je.txn.deadlockStackTrace property is changed.
+     */
+    static void setDeadlockStackTrace(boolean enable) {
+        deadlockStackTrace = enable;
+    }
+
+    /**
+     * For unit testing only.
+     */
+    public static boolean getDeadlockStackTrace() {
+        return deadlockStackTrace;
+    }
+
+    /**
+     * Construct a new LockInfo.  public for Sizeof program.
+     */
+    public LockInfo(Locker locker, LockType lockType) {
+	this.locker = locker;
+	this.lockType = lockType;
+
+        if (deadlockStackTrace) {
+            traceExceptionMap.put(this, new StackTraceAtLockTime());
+        }
+    }
+
+    /**
+     * Change this lockInfo over to the prescribed locker.
+     */
+    void setLocker(Locker locker) {
+	this.locker = locker;
+    }
+
+    /**
+     * @return The transaction associated with this Lock.
+     */
+    Locker getLocker() {
+	return locker;
+    }
+
+    /**
+     * @return The LockType associated with this Lock.
+     */
+    void setLockType(LockType lockType) {
+	this.lockType = lockType;
+    }
+
+    /**
+     * @return The LockType associated with this Lock.
+     */
+    LockType getLockType() {
+	return lockType;
+    }
+
+    @Override
+    public Object clone()
+        throws CloneNotSupportedException {
+
+        return super.clone();
+    }
+
+    /**
+     * Debugging
+     */
+    public void dump() {
+	System.out.println(this);
+    }
+
+    @Override
+    public String toString() {
+        StringBuffer buf = new StringBuffer(500);
+
+        buf.append("<LockInfo locker=\"");
+        buf.append(locker);
+        buf.append("\" type=\"");
+        buf.append(lockType);
+        buf.append("\"/>");
+
+        if (deadlockStackTrace) {
+            Exception traceException = traceExceptionMap.get(this);
+            if (traceException != null) {
+                buf.append(" lock taken at: ");
+                buf.append(Tracer.getStackTrace(traceException));
+            }
+        }
+
+        return buf.toString();
+    }
+}
diff --git a/src/com/sleepycat/je/txn/LockManager.java b/src/com/sleepycat/je/txn/LockManager.java
new file mode 100644
index 0000000000000000000000000000000000000000..1b89e56e168aef23e8794eb86f353e0b35d3ae95
--- /dev/null
+++ b/src/com/sleepycat/je/txn/LockManager.java
@@ -0,0 +1,1038 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LockManager.java,v 1.142.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import java.util.Collection;
+import java.util.ConcurrentModificationException;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DeadlockException;
+import com.sleepycat.je.EnvironmentMutableConfig;
+import com.sleepycat.je.LockStats;
+import com.sleepycat.je.RunRecoveryException;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.DbConfigManager;
+import com.sleepycat.je.dbi.EnvConfigObserver;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.dbi.RangeRestartException;
+import com.sleepycat.je.latch.Latch;
+import com.sleepycat.je.latch.LatchStats;
+import com.sleepycat.je.latch.LatchSupport;
+
+/**
+ * LockManager manages locks.
+ *
+ * Note that locks are counted as taking up part of the JE cache;
+ */
+public abstract class LockManager implements EnvConfigObserver {
+
+    /*
+     * The total memory cost for a lock is the Lock object, plus its entry and
+     * key in the lock hash table.
+     *
+     * The addition and removal of Lock objects, and the corresponding cost of
+     * their hashmap entry and key are tracked through the LockManager.
+     */
+    static final long TOTAL_LOCKIMPL_OVERHEAD =
+        MemoryBudget.LOCKIMPL_OVERHEAD +
+        MemoryBudget.HASHMAP_ENTRY_OVERHEAD +
+        MemoryBudget.LONG_OVERHEAD;
+
+    static final long TOTAL_THINLOCKIMPL_OVERHEAD =
+        MemoryBudget.THINLOCKIMPL_OVERHEAD +
+        MemoryBudget.HASHMAP_ENTRY_OVERHEAD +
+        MemoryBudget.LONG_OVERHEAD;
+
+    private static final long REMOVE_TOTAL_LOCKIMPL_OVERHEAD =
+        0 - TOTAL_LOCKIMPL_OVERHEAD;
+
+    private static final long REMOVE_TOTAL_THINLOCKIMPL_OVERHEAD =
+        0 - TOTAL_THINLOCKIMPL_OVERHEAD;
+
+    private static final long THINLOCK_MUTATE_OVERHEAD =
+        MemoryBudget.LOCKIMPL_OVERHEAD -
+        MemoryBudget.THINLOCKIMPL_OVERHEAD +
+        MemoryBudget.LOCKINFO_OVERHEAD;
+
+    protected int nLockTables = 1;
+    protected Latch[] lockTableLatches;
+    private Map<Long,Lock>[] lockTables;          // keyed by nodeId
+    private EnvironmentImpl envImpl;
+    private MemoryBudget memoryBudget;
+
+    private long nRequests; // stats: number of time a request was made
+    private long nWaits;    // stats: number of time a request blocked
+
+    private static RangeRestartException rangeRestartException =
+        new RangeRestartException();
+    private static boolean lockTableDump = false;
+
+    /* 
+     * @SuppressWarnings is used to stifle a type safety complaint about the 
+     * assignment of lockTables = new Map[nLockTables]. There's no way to 
+     * specify the type of the array.
+     */
+    @SuppressWarnings("unchecked")
+    public LockManager(EnvironmentImpl envImpl)
+        throws DatabaseException {
+                
+        DbConfigManager configMgr = envImpl.getConfigManager();
+        nLockTables = configMgr.getInt(EnvironmentParams.N_LOCK_TABLES);
+        lockTables = new Map[nLockTables];
+        lockTableLatches = new Latch[nLockTables];
+        for (int i = 0; i < nLockTables; i++) {
+            lockTables[i] = new HashMap<Long,Lock>();
+            lockTableLatches[i] = new Latch("Lock Table " + i);
+        }
+        this.envImpl = envImpl;
+        memoryBudget = envImpl.getMemoryBudget();
+        nRequests = 0;
+        nWaits = 0;
+
+        /* Initialize mutable properties and register for notifications. */
+        envConfigUpdate(configMgr, null);
+        envImpl.addConfigObserver(this);
+    }
+
+    /**
+     * Process notifications of mutable property changes.
+     */
+    public void envConfigUpdate(DbConfigManager configMgr,
+                                EnvironmentMutableConfig ignore)
+        throws DatabaseException {
+
+        LockInfo.setDeadlockStackTrace(configMgr.getBoolean
+            (EnvironmentParams.TXN_DEADLOCK_STACK_TRACE));
+        setLockTableDump(configMgr.getBoolean
+            (EnvironmentParams.TXN_DUMPLOCKS));
+    }
+
+    /**
+     * Called when the je.txn.dumpLocks property is changed.
+     */
+    static void setLockTableDump(boolean enable) {
+        lockTableDump = enable;
+    }
+
+    protected int getLockTableIndex(Long nodeId) {
+        return (((int) nodeId.longValue()) & 0x7fffffff) %
+            nLockTables;
+    }
+
+    protected int getLockTableIndex(long nodeId) {
+        return (((int) nodeId) & 0x7fffffff) % nLockTables;
+    }
+
+    /**
+     * Attempt to acquire a lock of <i>type</i> on <i>nodeId</i>.  If the lock
+     * acquisition would result in a deadlock, throw an exception.<br> If the
+     * requested lock is not currently available, block until it is or until
+     * timeout milliseconds have elapsed.<br> If a lock of <i>type</i> is
+     * already held, return EXISTING.<br> If a WRITE lock is held and a READ
+     * lock is requested, return PROMOTION.<br>
+     *
+     * If a lock request is for a lock that is not currently held, return
+     * either NEW or DENIED depending on whether the lock is granted or
+     * not.<br>
+     *
+     * @param nodeId The NodeId to lock.
+     *
+     * @param locker The Locker to lock this on behalf of.
+     *
+     * @param type The lock type requested.
+     *
+     * @param timeout milliseconds to time out after if lock couldn't be
+     * obtained.  0 means block indefinitely.  Not used if nonBlockingRequest
+     * is true.
+     *
+     * @param nonBlockingRequest if true, means don't block if lock can't be
+     * acquired, and ignore the timeout parameter.
+     *
+     * @return a LockGrantType indicating whether the request was fulfilled
+     * or not.  LockGrantType.NEW means the lock grant was fulfilled and
+     * the caller did not previously hold the lock.  PROMOTION means the
+     * lock was granted and it was a promotion from READ to WRITE.  EXISTING
+     * means the lock was already granted (not a promotion).  DENIED means
+     * the lock was not granted either because the timeout passed without
+     * acquiring the lock or timeout was -1 and the lock was not immediately
+     * available.
+     *
+     * @throws DeadlockException if acquiring the lock would result in
+     * a deadlock.
+     */
+    public LockGrantType lock(long nodeId,
+                              Locker locker,
+                              LockType type,
+                              long timeout,
+                              boolean nonBlockingRequest,
+                              DatabaseImpl database)
+        throws DeadlockException, DatabaseException {
+
+        assert timeout >= 0;
+
+        /*
+         * Lock on locker before latching the lockTable to avoid having another
+         * notifier perform the notify before the waiter is actually waiting.
+         */
+        synchronized (locker) {
+            Long nid = Long.valueOf(nodeId);
+            LockAttemptResult result =
+                attemptLock(nid, locker, type, nonBlockingRequest);
+            /* Got the lock, return. */
+            if (result.success ||
+                result.lockGrant == LockGrantType.DENIED) {
+                return result.lockGrant;
+            }
+
+            assert checkNoLatchesHeld(nonBlockingRequest):
+                LatchSupport.countLatchesHeld() +
+                " latches held while trying to lock, lock table =" +
+                LatchSupport.latchesHeldToString();
+
+            /*
+             * We must have gotten WAIT_* from the lock request. We know that
+             * this is a blocking request, because if it wasn't, Lock.lock
+             * would have returned DENIED. Go wait!
+             */
+            assert !nonBlockingRequest;
+            try {
+                boolean doWait = true;
+
+                /*
+                 * Before blocking, check locker timeout. We need to check here
+                 * or lock timeouts will always take precedence and we'll never
+                 * actually get any txn timeouts.
+                 */
+                if (locker.isTimedOut()) {
+                    if (validateOwnership(nid, locker, type, true,
+                                          memoryBudget)) {
+                        doWait = false;
+                    } else {
+                        DeadlockException DE =
+                            makeTimeoutMsg("Transaction", locker, nodeId, type,
+                                           result.lockGrant,
+                                           result.useLock,
+                                           locker.getTxnTimeout(),
+                                           locker.getTxnStartMillis(),
+                                           System.currentTimeMillis(),
+                                           database);
+                        throw DE;
+                    }
+                }
+
+                boolean keepTime = (timeout > 0);
+                long startTime = (keepTime ? System.currentTimeMillis() : 0);
+                while (doWait) {
+                    locker.setWaitingFor(result.useLock);
+                    try {
+                        locker.wait(timeout);
+                    } catch (InterruptedException IE) {
+                        throw new RunRecoveryException(envImpl, IE);
+                    }
+
+                    boolean lockerTimedOut = locker.isTimedOut();
+                    long now = System.currentTimeMillis();
+                    boolean thisLockTimedOut =
+                        (keepTime && (now - startTime >= timeout));
+                    boolean isRestart =
+                        (result.lockGrant == LockGrantType.WAIT_RESTART);
+
+                    /*
+                     * Re-check for ownership of the lock following wait.  If
+                     * we timed out and we don't have ownership then flush this
+                     * lock from both the waiters and owners while under the
+                     * lock table latch.  See SR 10103.
+                     */
+                    if (validateOwnership(nid, locker, type,
+                                          lockerTimedOut ||
+                                          thisLockTimedOut ||
+                                          isRestart,
+                                          memoryBudget)) {
+                        break;
+                    } else {
+
+                        /*
+                         * After a restart conflict the lock will not be held.
+                         */
+                        if (isRestart) {
+                            throw rangeRestartException;
+                        }
+
+                        if (thisLockTimedOut) {
+                            locker.setOnlyAbortable();
+                            DeadlockException DE =
+                                makeTimeoutMsg("Lock", locker, nodeId, type,
+                                               result.lockGrant,
+                                               result.useLock,
+                                               timeout, startTime, now,
+                                               database);
+                            throw DE;
+                        }
+
+                        if (lockerTimedOut) {
+                            locker.setOnlyAbortable();
+                            DeadlockException DE =
+                                makeTimeoutMsg("Transaction", locker,
+                                               nodeId, type,
+                                               result.lockGrant,
+                                               result.useLock,
+                                               locker.getTxnTimeout(),
+                                               locker.getTxnStartMillis(),
+                                               now, database);
+                            throw DE;
+                        }
+                    }
+                }
+            } finally {
+                locker.setWaitingFor(null);
+                assert EnvironmentImpl.maybeForceYield();
+            }
+
+            locker.addLock(nid, type, result.lockGrant);
+
+            return result.lockGrant;
+        }
+    }
+
+    abstract protected Lock lookupLock(Long nodeId)
+        throws DatabaseException;
+
+    protected Lock lookupLockInternal(Long nodeId, int lockTableIndex)
+        throws DatabaseException {
+
+        /* Get the target lock. */
+        Map<Long,Lock> lockTable = lockTables[lockTableIndex];
+        Lock useLock = lockTable.get(nodeId);
+        return useLock;
+    }
+
+    abstract protected LockAttemptResult
+        attemptLock(Long nodeId,
+                    Locker locker,
+                    LockType type,
+                    boolean nonBlockingRequest)
+        throws DatabaseException;
+
+    protected LockAttemptResult
+        attemptLockInternal(Long nodeId,
+                            Locker locker,
+                            LockType type,
+                            boolean nonBlockingRequest,
+                            int lockTableIndex)
+        throws DatabaseException {
+
+        nRequests++;
+
+        /* Get the target lock. */
+        Map<Long,Lock> lockTable = lockTables[lockTableIndex];
+        Lock useLock = lockTable.get(nodeId);
+        if (useLock == null) {
+            useLock = new ThinLockImpl();
+            lockTable.put(nodeId, useLock);
+            memoryBudget.updateLockMemoryUsage
+                (TOTAL_THINLOCKIMPL_OVERHEAD, lockTableIndex);
+        }
+
+        /*
+         * Attempt to lock.  Possible return values are NEW, PROMOTION, DENIED,
+         * EXISTING, WAIT_NEW, WAIT_PROMOTION, WAIT_RESTART.
+         */
+        LockAttemptResult lar = useLock.lock(type, locker, nonBlockingRequest,
+                                             memoryBudget, lockTableIndex);
+        if (lar.useLock != useLock) {
+            /* The lock mutated from ThinLockImpl to LockImpl. */
+            useLock = lar.useLock;
+            lockTable.put(nodeId, useLock);
+            /* We still have the overhead of the hashtable (locktable). */
+            memoryBudget.updateLockMemoryUsage
+                (THINLOCK_MUTATE_OVERHEAD, lockTableIndex);
+        }
+        LockGrantType lockGrant = lar.lockGrant;
+        boolean success = false;
+
+        /* Was the attempt successful? */
+        if ((lockGrant == LockGrantType.NEW) ||
+            (lockGrant == LockGrantType.PROMOTION)) {
+            locker.addLock(nodeId, type, lockGrant);
+            success = true;
+        } else if (lockGrant == LockGrantType.EXISTING) {
+            success = true;
+        } else if (lockGrant == LockGrantType.DENIED) {
+            /* Locker.lock will throw LockNotGrantedException. */
+        } else {
+            nWaits++;
+        }
+        return new LockAttemptResult(useLock, lockGrant, success);
+    }
+
+    /**
+     * Create a informative lock or txn timeout message.
+     */
+    protected abstract DeadlockException
+        makeTimeoutMsg(String lockOrTxn,
+                       Locker locker,
+                       long nodeId,
+                       LockType type,
+                       LockGrantType grantType,
+                       Lock useLock,
+                       long timeout,
+                       long start,
+                       long now,
+                       DatabaseImpl database)
+        throws DatabaseException;
+
+    /**
+     * Do the real work of creating an lock or txn timeout message.
+     */
+    protected DeadlockException
+        makeTimeoutMsgInternal(String lockOrTxn,
+                               Locker locker,
+                               long nodeId,
+                               LockType type,
+                               LockGrantType grantType,
+                               Lock useLock,
+                               long timeout,
+                               long start,
+                               long now,
+                               DatabaseImpl database) {
+
+        /*
+         * Because we're accessing parts of the lock, need to have protected
+         * access to the lock table because things can be changing out from
+         * underneath us.  This is a big hammer to grab for so long while we
+         * traverse the graph, but it's only when we have a deadlock and we're
+         * creating a debugging message.
+         *
+         * The alternative would be to handle ConcurrentModificationExceptions
+         * and retry until none of them happen.
+         */
+        if (lockTableDump) {
+            System.out.println("++++++++++ begin lock table dump ++++++++++");
+            for (int i = 0; i < nLockTables; i++) {
+                boolean success = false;
+                for (int j = 0; j < 3 && !success; j++) {
+                    try {
+                        StringBuilder sb = new StringBuilder();
+                        dumpToStringNoLatch(sb, i);
+                        System.out.println(sb.toString());
+                        success = true;
+                        break; // for j...
+                    } catch (ConcurrentModificationException CME) {
+                        continue;
+                    }
+                }
+                if (!success) {
+                    System.out.println("Couldn't dump locktable " + i);
+                }
+            }
+            System.out.println("++++++++++ end lock table dump ++++++++++");
+        }
+
+        StringBuilder sb = new StringBuilder();
+        sb.append(lockOrTxn);
+        sb.append(" expired. Locker ").append(locker);
+        sb.append(": waited for lock");
+
+        if (database!=null) {
+            sb.append(" on database=").append(database.getDebugName());
+        }
+        sb.append(" LockAddr:").append(System.identityHashCode(useLock));
+        sb.append(" node=").append(nodeId);
+        sb.append(" type=").append(type);
+        sb.append(" grant=").append(grantType);
+        sb.append(" timeoutMillis=").append(timeout);
+        sb.append(" startTime=").append(start);
+        sb.append(" endTime=").append(now);
+        Set<LockInfo> owners = useLock.getOwnersClone();
+        List<LockInfo> waiters = useLock.getWaitersListClone();
+        sb.append("\nOwners: ").append(owners);
+        sb.append("\nWaiters: ").append(waiters).append("\n");
+        StringBuilder deadlockInfo = findDeadlock(useLock, locker);
+        if (deadlockInfo != null) {
+            sb.append(deadlockInfo);
+        }
+        DeadlockException ret = new DeadlockException(sb.toString());
+        ret.setOwnerTxnIds(getTxnIds(owners));
+        ret.setWaiterTxnIds(getTxnIds(waiters));
+        ret.setTimeoutMillis(timeout);
+        return ret;
+    }
+
+    private long[] getTxnIds(Collection<LockInfo> c) {
+        long[] ret = new long[c.size()];
+        Iterator<LockInfo> iter = c.iterator();
+        int i = 0;
+        while (iter.hasNext()) {
+            LockInfo info = iter.next();
+            ret[i++] = info.getLocker().getId();
+        }
+
+        return ret;
+    }
+
+    /**
+     * Release a lock and possibly notify any waiters that they have been
+     * granted the lock.
+     *
+     * @param nodeId The node ID of the lock to release.
+     *
+     * @return true if the lock is released successfully, false if
+     * the lock is not currently being held.
+     */
+    public boolean release(long nodeId, Locker locker)
+        throws DatabaseException {
+
+        synchronized (locker) {
+            Set<Locker> newOwners = 
+                releaseAndFindNotifyTargets(nodeId, locker);
+
+            if (newOwners == null) {
+                return false;
+            }
+
+            if (newOwners.size() > 0) {
+
+                /*
+                 * There is a new set of owners and/or there are restart
+                 * waiters that should be notified.
+                 */
+                Iterator<Locker> iter = newOwners.iterator();
+
+                while (iter.hasNext()) {
+                    Locker lockerToNotify = iter.next();
+
+                    /* Use notifyAll to support multiple threads per txn. */
+                    synchronized (lockerToNotify) {
+                        lockerToNotify.notifyAll();
+                    }
+
+                    assert EnvironmentImpl.maybeForceYield();
+                }
+            }
+
+            return true;
+        }
+    }
+
+    /**
+     * Release the lock, and return the set of new owners to notify, if any.
+     *
+     * @return
+     * null if the lock does not exist or the given locker was not the owner,
+     * a non-empty set if owners should be notified after releasing,
+     * an empty set if no notification is required.
+     */
+    protected abstract Set<Locker>
+        releaseAndFindNotifyTargets(long nodeId, Locker locker)
+        throws DatabaseException;
+
+    /**
+     * Do the real work of releaseAndFindNotifyTargets
+     */
+    protected Set<Locker>
+        releaseAndFindNotifyTargetsInternal(long nodeId,
+                                            Locker locker,
+                                            int lockTableIndex)
+        throws DatabaseException {
+
+        Map<Long,Lock> lockTable = lockTables[lockTableIndex];
+        Lock useLock = lockTable.get(nodeId);
+        if (useLock == null) {
+            useLock = lockTable.get(Long.valueOf(nodeId));
+        }
+
+        if (useLock == null) {
+            /* Lock doesn't exist. */
+            return null;
+        }
+
+        Set<Locker> lockersToNotify =
+            useLock.release(locker, memoryBudget, lockTableIndex);
+        if (lockersToNotify == null) {
+            /* Not owner. */
+            return null;
+        }
+
+        /* If it's not in use at all, remove it from the lock table. */
+        if ((useLock.nWaiters() == 0) &&
+            (useLock.nOwners() == 0)) {
+            lockTables[lockTableIndex].remove(nodeId);
+            if (useLock.isThin()) {
+                memoryBudget.updateLockMemoryUsage
+                    (REMOVE_TOTAL_THINLOCKIMPL_OVERHEAD, lockTableIndex);
+            } else {
+                memoryBudget.updateLockMemoryUsage
+                    (REMOVE_TOTAL_LOCKIMPL_OVERHEAD, lockTableIndex);
+            }
+        }
+
+        return lockersToNotify;
+    }
+
+    /**
+     * Transfer ownership a lock from one locker to another locker. We're not
+     * sending any notification to the waiters on the lock table, and the past
+     * and present owner should be ready for the transfer.
+     */
+    abstract void transfer(long nodeId,
+                           Locker owningLocker,
+                           Locker destLocker,
+                           boolean demoteToRead)
+        throws DatabaseException;
+
+    /**
+     * Do the real work of transfer
+     */
+    protected void transferInternal(long nodeId,
+                                    Locker owningLocker,
+                                    Locker destLocker,
+                                    boolean demoteToRead,
+                                    int lockTableIndex)
+        throws DatabaseException {
+
+        Map<Long,Lock> lockTable = lockTables[lockTableIndex];
+        Lock useLock = lockTable.get(Long.valueOf(nodeId));
+
+        assert useLock != null : "Transfer, lock " + nodeId + " was null";
+        if (demoteToRead) {
+            useLock.demote(owningLocker);
+        }
+        Lock newLock =
+            useLock.transfer(nodeId, owningLocker, destLocker,
+                             memoryBudget, lockTableIndex);
+        if (newLock != useLock) {
+            /* The lock mutated from ThinLockImpl to LockImpl. */
+            lockTable.put(nodeId, newLock);
+            /* We still have the overhead of the hashtable (locktable). */
+            memoryBudget.updateLockMemoryUsage
+                (THINLOCK_MUTATE_OVERHEAD, lockTableIndex);
+        }
+        owningLocker.removeLock(nodeId);
+    }
+
+    /**
+     * Transfer ownership a lock from one locker to a set of other txns,
+     * cloning the lock as necessary. This will always be demoted to read, as
+     * we can't have multiple locker owners any other way.  We're not sending
+     * any notification to the waiters on the lock table, and the past and
+     * present owners should be ready for the transfer.
+     */
+    abstract void transferMultiple(long nodeId,
+                                   Locker owningLocker,
+                                   Locker[] destLockers)
+        throws DatabaseException;
+
+    /**
+     * Do the real work of transferMultiple
+     */
+    protected void transferMultipleInternal(long nodeId,
+                                            Locker owningLocker,
+                                            Locker[] destLockers,
+                                            int lockTableIndex)
+        throws DatabaseException {
+
+        Map<Long,Lock> lockTable = lockTables[lockTableIndex];
+        Lock useLock = lockTable.get(Long.valueOf(nodeId));
+
+        assert useLock != null : "Transfer, lock " + nodeId + " was null";
+        useLock.demote(owningLocker);
+
+        Lock newLock =
+            useLock.transferMultiple(nodeId, owningLocker, destLockers,
+                                     memoryBudget, lockTableIndex);
+        if (newLock != useLock) {
+            /* The lock mutated from ThinLockImpl to LockImpl. */
+            lockTable.put(nodeId, newLock);
+            /* We still have the overhead of the hashtable (locktable). */
+            memoryBudget.updateLockMemoryUsage
+                (THINLOCK_MUTATE_OVERHEAD, lockTableIndex);
+        }
+
+
+        owningLocker.removeLock(nodeId);
+    }
+
+    /**
+     * Demote a lock from write to read. Call back to the owning locker to
+     * move this to its read collection.
+     * @param lock The lock to release. If null, use nodeId to find lock
+     * @param locker
+     */
+    abstract void demote(long nodeId, Locker locker)
+        throws DatabaseException;
+
+    /**
+     * Do the real work of demote.
+     */
+    protected void demoteInternal(long nodeId,
+                                  Locker locker,
+                                  int lockTableIndex)
+        throws DatabaseException {
+
+        Map<Long,Lock> lockTable = lockTables[lockTableIndex];
+        Lock useLock = lockTable.get(Long.valueOf(nodeId));
+        useLock.demote(locker);
+        locker.moveWriteToReadLock(nodeId, useLock);
+    }
+
+    /**
+     * Test the status of the lock on nodeId.  If any transaction holds any
+     * lock on it, true is returned.  If no transaction holds a lock on it,
+     * false is returned.
+     *
+     * This method is only used by unit tests.
+     *
+     * @param nodeId The NodeId to check.
+     * @return true if any transaction holds any lock on the nodeid. false
+     * if no lock is held by any transaction.
+     */
+    abstract boolean isLocked(Long nodeId)
+        throws DatabaseException;
+
+    /**
+     * Do the real work of isLocked.
+     */
+    protected boolean isLockedInternal(Long nodeId, int lockTableIndex) {
+
+        Map<Long,Lock> lockTable = lockTables[lockTableIndex];
+        Lock entry = lockTable.get(nodeId);
+        if (entry == null) {
+            return false;
+        }
+
+        return entry.nOwners() != 0;
+    }
+
+    /**
+     * Return true if this locker owns this a lock of this type on given node.
+     *
+     * This method is only used by unit tests.
+     */
+    abstract boolean isOwner(Long nodeId, Locker locker, LockType type)
+        throws DatabaseException;
+
+    /**
+     * Do the real work of isOwner.
+     */
+    protected boolean isOwnerInternal(Long nodeId,
+                                      Locker locker,
+                                      LockType type,
+                                      int lockTableIndex) {
+
+        Map<Long,Lock> lockTable = lockTables[lockTableIndex];
+        Lock entry = lockTable.get(nodeId);
+        if (entry == null) {
+            return false;
+        }
+
+        return entry.isOwner(locker, type);
+    }
+
+    /**
+     * Return true if this locker is waiting on this lock.
+     *
+     * This method is only used by unit tests.
+     */
+    abstract boolean isWaiter(Long nodeId, Locker locker)
+        throws DatabaseException;
+
+    /**
+     * Do the real work of isWaiter.
+     */
+    protected boolean isWaiterInternal(Long nodeId,
+                                       Locker locker,
+                                       int lockTableIndex) {
+
+        Map<Long,Lock> lockTable = lockTables[lockTableIndex];
+        Lock entry = lockTable.get(nodeId);
+        if (entry == null) {
+            return false;
+        }
+
+        return entry.isWaiter(locker);
+    }
+
+    /**
+     * Return the number of waiters for this lock.
+     */
+    abstract int nWaiters(Long nodeId)
+        throws DatabaseException;
+
+    /**
+     * Do the real work of nWaiters.
+     */
+    protected int nWaitersInternal(Long nodeId, int lockTableIndex) {
+
+        Map<Long,Lock> lockTable = lockTables[lockTableIndex];
+        Lock entry = lockTable.get(nodeId);
+        if (entry == null) {
+            return -1;
+        }
+
+        return entry.nWaiters();
+    }
+
+    /**
+     * Return the number of owners of this lock.
+     */
+    abstract int nOwners(Long nodeId)
+        throws DatabaseException;
+
+    /**
+     * Do the real work of nWaiters.
+     */
+    protected int nOwnersInternal(Long nodeId, int lockTableIndex) {
+
+        Map<Long,Lock> lockTable = lockTables[lockTableIndex];
+        Lock entry = lockTable.get(nodeId);
+        if (entry == null) {
+            return -1;
+        }
+
+        return entry.nOwners();
+    }
+
+    /**
+     * @return the transaction that owns the write lock for this
+     */
+    abstract Locker getWriteOwnerLocker(Long nodeId)
+        throws DatabaseException;
+
+    /**
+     * Do the real work of getWriteOwnerLocker.
+     */
+    protected Locker getWriteOwnerLockerInternal(Long nodeId,
+                                                 int lockTableIndex)
+        throws DatabaseException {
+
+        Map<Long,Lock> lockTable = lockTables[lockTableIndex];
+        Lock lock = lockTable.get(nodeId);
+        if (lock == null) {
+            return null;
+        } else if (lock.nOwners() > 1) {
+            /* not a write lock */
+            return null;
+        } else {
+            return lock.getWriteOwnerLocker();
+        }
+    }
+
+    /*
+     * Check if we got ownership while we were waiting.  If we didn't get
+     * ownership, and we timed out, remove this locker from the set of
+     * waiters. Do this in a critical section to prevent any orphaning of the
+     * lock -- we must be in a critical section between the time that we check
+     * ownership and when we flush any waiters (SR #10103)
+     * @return true if you are the owner.
+     */
+    abstract protected boolean validateOwnership(Long nodeId,
+                                                 Locker locker,
+                                                 LockType type,
+                                                 boolean flushFromWaiters,
+                                                 MemoryBudget mb)
+        throws DatabaseException;
+
+    /*
+     * Do the real work of validateOwnershipInternal.
+     */
+    protected boolean validateOwnershipInternal(Long nodeId,
+                                                Locker locker,
+                                                LockType type,
+                                                boolean flushFromWaiters,
+                                                MemoryBudget mb,
+                                                int lockTableIndex)
+        throws DatabaseException {
+
+        if (isOwnerInternal(nodeId, locker, type, lockTableIndex)) {
+            return true;
+        }
+
+        if (flushFromWaiters) {
+            Lock entry = lockTables[lockTableIndex].get(nodeId);
+            if (entry != null) {
+                entry.flushWaiter(locker, mb, lockTableIndex);
+            }
+        }
+        return false;
+    }
+
+    /**
+     * Statistics
+     */
+    public LockStats lockStat(StatsConfig config)
+        throws DatabaseException {
+
+        LockStats stats = new LockStats();
+        stats.setNRequests(nRequests);
+        stats.setNWaits(nWaits);
+        if (config.getClear()) {
+            nWaits = 0;
+            nRequests = 0;
+        }
+
+        for (int i = 0; i < nLockTables; i++) {
+            LatchStats latchStats =
+                lockTableLatches[i].getLatchStats();
+            stats.accumulateLockTableLatchStats(latchStats);
+        }
+
+        /* Dump info about the lock table. */
+        if (!config.getFast()) {
+            dumpLockTable(stats);
+        }
+        return stats;
+    }
+
+    /**
+     * Dump the lock table to the lock stats.
+     */
+    abstract protected void dumpLockTable(LockStats stats)
+        throws DatabaseException;
+
+    /**
+     * Do the real work of dumpLockTableInternal.
+     */
+    protected void dumpLockTableInternal(LockStats stats, int i) {
+        Map<Long,Lock> lockTable = lockTables[i];
+        stats.accumulateNTotalLocks(lockTable.size());
+        Iterator<Lock> iter = lockTable.values().iterator();
+        while (iter.hasNext()) {
+            Lock lock = iter.next();
+            stats.setNWaiters(stats.getNWaiters() +
+                              lock.nWaiters());
+            stats.setNOwners(stats.getNOwners() +
+                             lock.nOwners());
+
+            /* Go through all the owners for a lock. */
+            Iterator<LockInfo> ownerIter = lock.getOwnersClone().iterator();
+            while (ownerIter.hasNext()) {
+                LockInfo info = ownerIter.next();
+                if (info.getLockType().isWriteLock()) {
+                    stats.setNWriteLocks(stats.getNWriteLocks() + 1);
+                } else {
+                    stats.setNReadLocks(stats.getNReadLocks() + 1);
+                }
+            }
+        }
+    }
+
+    /**
+     * Debugging
+     */
+    public void dump()
+        throws DatabaseException {
+
+        System.out.println(dumpToString());
+    }
+
+    public String dumpToString()
+        throws DatabaseException {
+
+        StringBuilder sb = new StringBuilder();
+        for (int i = 0; i < nLockTables; i++) {
+            lockTableLatches[i].acquire();
+            try {
+                dumpToStringNoLatch(sb, i);
+            } finally {
+                lockTableLatches[i].release();
+            }
+        }
+        return sb.toString();
+    }
+
+    private void dumpToStringNoLatch(StringBuilder sb, int whichTable) {
+        Map<Long,Lock> lockTable = lockTables[whichTable];
+        Iterator<Map.Entry<Long,Lock>> entries = 
+            lockTable.entrySet().iterator();
+
+        while (entries.hasNext()) {
+            Map.Entry<Long,Lock> entry = entries.next();
+            Long nid = entry.getKey();
+            Lock lock = entry.getValue();
+            sb.append("---- Node Id: ").append(nid).append("----\n");
+            sb.append(lock);
+            sb.append('\n');
+        }
+    }
+
+    private boolean checkNoLatchesHeld(boolean nonBlockingRequest) {
+        if (nonBlockingRequest) {
+            return true; // don't check if it's a non blocking request.
+        } else {
+            return (LatchSupport.countLatchesHeld() == 0);
+        }
+    }
+
+    private StringBuilder findDeadlock(Lock lock, Locker rootLocker) {
+
+        Set<Locker> ownerSet = new HashSet<Locker>();
+        ownerSet.add(rootLocker);
+        StringBuilder ret = findDeadlock1(ownerSet, lock, rootLocker);
+        if (ret != null) {
+            return ret;
+        } else {
+            return null;
+        }
+    }
+
+    private StringBuilder findDeadlock1(Set<Locker> ownerSet,
+                                       Lock lock,
+                                       Locker rootLocker) {
+
+        Iterator<LockInfo> ownerIter = lock.getOwnersClone().iterator();
+        while (ownerIter.hasNext()) {
+            LockInfo info = ownerIter.next();
+            Locker locker = info.getLocker();
+            Lock waitsFor = locker.getWaitingFor();
+            if (ownerSet.contains(locker) ||
+                locker == rootLocker) {
+                /* Found a cycle. */
+                StringBuilder ret = new StringBuilder();
+                ret.append("Transaction ").append(locker.toString());
+                ret.append(" owns LockAddr:").
+                    append(System.identityHashCode(lock));
+                ret.append(" ").append(info).append("\n");
+                ret.append("Transaction ").append(locker.toString());
+                ret.append(" waits for");
+                if (waitsFor == null) {
+                    ret.append(" nothing");
+                } else {
+                    ret.append(" LockAddr:");
+                    ret.append(System.identityHashCode(waitsFor));
+                }
+                ret.append("\n");
+                return ret;
+            }
+            if (waitsFor != null) {
+                ownerSet.add(locker);
+                StringBuilder sb = findDeadlock1(ownerSet, waitsFor,
+                                                rootLocker);
+                if (sb != null) {
+                    String waitInfo =
+                        "Transaction " + locker + " waits for " +
+                        waitsFor + "\n";
+                    sb.insert(0, waitInfo);
+                    return sb;
+                }
+                ownerSet.remove(locker); // is this necessary?
+            }
+        }
+
+        return null;
+    }
+}
diff --git a/src/com/sleepycat/je/txn/LockResult.java b/src/com/sleepycat/je/txn/LockResult.java
new file mode 100644
index 0000000000000000000000000000000000000000..83620a4432dcbb8f9fdc3a70aa6ed26cf8b7819e
--- /dev/null
+++ b/src/com/sleepycat/je/txn/LockResult.java
@@ -0,0 +1,72 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LockResult.java,v 1.18.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import com.sleepycat.je.tree.LN;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * This class is a container to encapsulate a LockGrantType and a WriteLockInfo
+ * so that they can both be returned from writeLock.
+ */
+public class LockResult {
+    private LockGrantType grant;
+    private WriteLockInfo info;
+    private LN ln;
+
+    /* Made public for unittests */
+    public LockResult(LockGrantType grant, WriteLockInfo info) {
+	this.grant = grant;
+	this.info = info;
+    }
+
+    public LN getLN() {
+	return ln;
+    }
+
+    public void setLN(LN ln) {
+	this.ln = ln;
+    }
+
+    public LockGrantType getLockGrant() {
+	return grant;
+    }
+
+    public void setAbortLsn(long abortLsn, boolean abortKnownDeleted) {
+	setAbortLsnInternal(abortLsn, abortKnownDeleted, false);
+    }
+
+    public void setAbortLsn(long abortLsn,
+			    boolean abortKnownDeleted,
+			    boolean createdThisTxn) {
+	setAbortLsnInternal(abortLsn, abortKnownDeleted, createdThisTxn);
+    }
+
+    private void setAbortLsnInternal(long abortLsn,
+				     boolean abortKnownDeleted,
+				     boolean createdThisTxn) {
+	/* info can be null if this is called on behalf of a BasicLocker. */
+	if (info != null &&
+	    info.neverLocked) {
+	    /* Only set if not null, otherwise keep NULL_LSN as abortLsn. */
+	    if (abortLsn != DbLsn.NULL_LSN) {
+		info.abortLsn = abortLsn;
+		info.abortKnownDeleted = abortKnownDeleted;
+	    }
+	    info.createdThisTxn = createdThisTxn;
+	    info.neverLocked = false;
+	}
+    }
+
+    public void copyAbortInfo(WriteLockInfo fromInfo) {
+        if (info != null) {
+            info.copyAbortInfo(fromInfo);
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/txn/LockType.java b/src/com/sleepycat/je/txn/LockType.java
new file mode 100644
index 0000000000000000000000000000000000000000..27cfb287d93078c05b66c23a3a09f9285b679311
--- /dev/null
+++ b/src/com/sleepycat/je/txn/LockType.java
@@ -0,0 +1,206 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LockType.java,v 1.23.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+/**
+ * LockType is a type safe enumeration of all lock types.  Methods on LockType
+ * objects can be used to determine whether a type conflicts with another
+ * type or can be upgraded to another type.
+ */
+public class LockType {
+
+    /**
+     * Lock types.  Indexes must be kept manually synchronized in the matrixes
+     * below.
+     */
+    public static final LockType READ =
+	new LockType(0, false, "READ");
+    public static final LockType WRITE =
+	new LockType(1, true, "WRITE");
+    public static final LockType RANGE_READ =
+	new LockType(2, false, "RANGE_READ");
+    public static final LockType RANGE_WRITE =
+	new LockType(3, true, "RANGE_WRITE");
+    public static final LockType RANGE_INSERT =
+	new LockType(4, false, "RANGE_INSERT");
+
+    /**
+     * NONE is used for requesting a dirty read and does not appear in the
+     * conflict or upgrade matrices.
+     */
+    public static final LockType NONE =
+	new LockType(5, false, "NONE");
+
+    /**
+     * RESTART is used for waiting for a restart and does not appear in the
+     * conflict or upgrade matrices.
+     */
+    public static final LockType RESTART =
+	new LockType(6, false, "RESTART");
+
+    /**
+     * Whenever the conflict matrix is changed be sure to update this.  For
+     * every type that can cause a RESTART result call setCausesRestart.  This
+     * could have been determined programmatically but I chose to maintain it
+     * manually to avoid extra code size.
+     */
+    static {
+        RANGE_READ.setCausesRestart();
+        RANGE_WRITE.setCausesRestart();
+    }
+
+    /**
+     * Lock conflict matrix.
+     * @see #getConflict
+     */
+    private static LockConflict[][] conflictMatrix = {
+        { // READ is held and there is a request for:
+            LockConflict.ALLOW,   // READ
+            LockConflict.BLOCK,   // WRITE
+            LockConflict.ALLOW,   // RANGE_READ
+            LockConflict.BLOCK,   // RANGE_WRITE
+            LockConflict.ALLOW,   // RANGE_INSERT
+        },
+        { // WRITE is held and there is a request for:
+            LockConflict.BLOCK,   // READ
+            LockConflict.BLOCK,   // WRITE
+            LockConflict.BLOCK,   // RANGE_READ
+            LockConflict.BLOCK,   // RANGE_WRITE
+            LockConflict.ALLOW,   // RANGE_INSERT
+        },
+        { // RANGE_READ is held and there is a request for:
+            LockConflict.ALLOW,   // READ
+            LockConflict.BLOCK,   // WRITE
+            LockConflict.ALLOW,   // RANGE_READ
+            LockConflict.BLOCK,   // RANGE_WRITE
+            LockConflict.BLOCK,   // RANGE_INSERT
+        },
+        { // RANGE_WRITE is held and there is a request for:
+            LockConflict.BLOCK,   // READ
+            LockConflict.BLOCK,   // WRITE
+            LockConflict.BLOCK,   // RANGE_READ
+            LockConflict.BLOCK,   // RANGE_WRITE
+            LockConflict.BLOCK,   // RANGE_INSERT
+        },
+        { // RANGE_INSERT is held and there is a request for:
+            LockConflict.ALLOW,   // READ
+            LockConflict.ALLOW,   // WRITE
+            LockConflict.RESTART, // RANGE_READ
+            LockConflict.RESTART, // RANGE_WRITE
+            LockConflict.ALLOW,   // RANGE_INSERT
+        },
+    };
+
+    /**
+     * Lock upgrade matrix.
+     * @see #getUpgrade
+     */
+    private static LockUpgrade[][] upgradeMatrix = {
+        { // READ is held and there is a request for:
+            LockUpgrade.EXISTING,                  // READ
+            LockUpgrade.WRITE_PROMOTE,             // WRITE
+            LockUpgrade.RANGE_READ_IMMED,          // RANGE_READ
+            LockUpgrade.RANGE_WRITE_PROMOTE,       // RANGE_WRITE
+            LockUpgrade.ILLEGAL,                   // RANGE_INSERT
+        },
+        { // WRITE is held and there is a request for:
+            LockUpgrade.EXISTING,                  // READ
+            LockUpgrade.EXISTING,                  // WRITE
+            LockUpgrade.RANGE_WRITE_IMMED,         // RANGE_READ
+            LockUpgrade.RANGE_WRITE_IMMED,         // RANGE_WRITE
+            LockUpgrade.ILLEGAL,                   // RANGE_INSERT
+        },
+        { // RANGE_READ is held and there is a request for:
+            LockUpgrade.EXISTING,                  // READ
+            LockUpgrade.RANGE_WRITE_PROMOTE,       // WRITE
+            LockUpgrade.EXISTING,                  // RANGE_READ
+            LockUpgrade.RANGE_WRITE_PROMOTE,       // RANGE_WRITE
+            LockUpgrade.ILLEGAL,                   // RANGE_INSERT
+        },
+        { // RANGE_WRITE is held and there is a request for:
+            LockUpgrade.EXISTING,                  // READ
+            LockUpgrade.EXISTING,                  // WRITE
+            LockUpgrade.EXISTING,                  // RANGE_READ
+            LockUpgrade.EXISTING,                  // RANGE_WRITE
+            LockUpgrade.ILLEGAL,                   // RANGE_INSERT
+        },
+        { // RANGE_INSERT is held and there is a request for:
+            LockUpgrade.ILLEGAL,                   // READ
+            LockUpgrade.ILLEGAL,                   // WRITE
+            LockUpgrade.ILLEGAL,                   // RANGE_READ
+            LockUpgrade.ILLEGAL,                   // RANGE_WRITE
+            LockUpgrade.EXISTING,                  // RANGE_INSERT
+        },
+    };
+
+    private int index;
+    private boolean write;
+    private String name;
+    private boolean causesRestart;
+
+    /**
+     * No lock types can be defined outside this class.
+     */
+    private LockType(int index, boolean write, String name) {
+        this.index = index;
+        this.write = write;
+        this.name = name;
+    }
+
+    /**
+     * Returns true if this is a WRITE or RANGE_WRITE lock.  For RANGE_INSERT,
+     * false is returned because RANGE_INSERT is used to lock the key following
+     * the insertion key, not the insertion key itself.
+     */
+    public final boolean isWriteLock() {
+        return write;
+    }
+
+    /**
+     * Specifies that when this type is requested it can result in
+     * LockGrantType.RESTART.
+     */
+    private void setCausesRestart() {
+        causesRestart = true;
+    }
+
+    /**
+     * Returns whether when this type is requested it can result in
+     * LockGrantType.RESTART.
+     */
+    final boolean getCausesRestart() {
+        return causesRestart;
+    }
+
+    /**
+     * Returns the LockConfict that results when this lock type is held and the
+     * given lock type is requested by another locker.
+     */
+    LockConflict getConflict(LockType requestedType) {
+        return conflictMatrix[index][requestedType.index];
+    }
+
+    /**
+     * Returns the LockUpgrade that results when this lock type is held and the
+     * given lock type is requested by the same locker.
+     *
+     * <p>For the returned LockUpgrade object, getIllegal will never return
+     * true because this method fires an assertion if getIllegal returns true.
+     */
+    LockUpgrade getUpgrade(LockType requestedType) {
+        LockUpgrade upgrade = upgradeMatrix[index][requestedType.index];
+        assert !upgrade.getIllegal() : toString() + " to " + requestedType;
+        return upgrade;
+    }
+
+    @Override
+    public String toString() {
+        return name;
+    }
+}
diff --git a/src/com/sleepycat/je/txn/LockUpgrade.java b/src/com/sleepycat/je/txn/LockUpgrade.java
new file mode 100644
index 0000000000000000000000000000000000000000..21c973532092ab86732b16f631118f67d1df6cd4
--- /dev/null
+++ b/src/com/sleepycat/je/txn/LockUpgrade.java
@@ -0,0 +1,78 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LockUpgrade.java,v 1.7.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+/**
+ * LockUpgrade is a type safe enumeration of lock upgrade types.  Methods on
+ * LockUpgrade objects are used to determine whether an upgrade is needed and,
+ * if so, how it should be handled.
+ */
+class LockUpgrade {
+
+    static final LockUpgrade ILLEGAL = new LockUpgrade
+                   (null, false, true);
+    static final LockUpgrade EXISTING = new LockUpgrade
+                   (null, false, false);
+    static final LockUpgrade WRITE_PROMOTE = new LockUpgrade
+                   (LockType.WRITE, true, false);
+    static final LockUpgrade RANGE_READ_IMMED= new LockUpgrade
+                   (LockType.RANGE_READ, false, false);
+    static final LockUpgrade RANGE_WRITE_IMMED = new LockUpgrade
+                   (LockType.RANGE_WRITE, false, false);
+    static final LockUpgrade RANGE_WRITE_PROMOTE = new LockUpgrade
+                   (LockType.RANGE_WRITE, true, false);
+
+    private LockType upgrade;
+    private boolean promotion;
+    private boolean illegal;
+
+    /**
+     * No upgrade types can be defined outside this class.
+     */
+    private LockUpgrade(LockType upgrade, boolean promotion, boolean illegal) {
+        this.upgrade = upgrade;
+        this.promotion = promotion;
+        this.illegal = illegal;
+    }
+
+    /**
+     * This method is called to determine whether the upgrade is illegal.
+     * If true is returned, an internal error has occurred.  This should never
+     * happen since RANGE_INSERT should never be requested along with other
+     * locks by the same locker; a separate locker is used for RANGE_INSERT
+     * locks.
+     */
+    boolean getIllegal() {
+        return illegal;
+    }
+
+    /**
+     * This method is called first to determine whether an upgrade to a new
+     * lock type is needed, and what the new lock type should be.  If null is
+     * returned, the existing lock should be unchanged and no upgrade is
+     * needed.  If non-null is returned, an upgrade to the returned type should
+     * be performed; in this case, call getPromotion to determine how to do the
+     * upgrade.
+     */
+    LockType getUpgrade() {
+        return upgrade;
+    }
+
+    /**
+     * This method is called when getUpgrade returns non-null to determine
+     * whether the upgrade is a true promotion or can be granted immediately.
+     * A true promotion is a change from read to write locking, and may require
+     * waiting if the write lock conflicts with a lock held by another locker.
+     * An upgrade that is not a promotion is just a type change, and never
+     * causes a lock conflict.
+     */
+    boolean getPromotion() {
+        return promotion;
+    }
+}
diff --git a/src/com/sleepycat/je/txn/Locker.java b/src/com/sleepycat/je/txn/Locker.java
new file mode 100644
index 0000000000000000000000000000000000000000..11b493ca7cdb08eca1fec09bbb17d3a9d49dc8bc
--- /dev/null
+++ b/src/com/sleepycat/je/txn/Locker.java
@@ -0,0 +1,822 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Locker.java,v 1.122.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Hashtable;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.DeadlockException;
+import com.sleepycat.je.LockNotGrantedException;
+import com.sleepycat.je.LockStats;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.dbi.CursorImpl;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.tree.BINReference;
+import com.sleepycat.je.tree.Key;
+
+/**
+ * Locker instances are JE's route to locking and transactional support.  This
+ * class is the abstract base class for BasicLocker, ThreadLocker, Txn,
+ * MasterTxn and ReadonlyTxn.  Locker instances are in fact only a transaction
+ * shell to get to the lock manager, and don't guarantee transactional semantics.
+ * Txn (includes Txns marked autoTxn) MasterTxn and ReadonlyTxn instances are
+ * truly transactional. They have potentially different transaction begin and
+ * end behaviors.
+ */
+public abstract class Locker {
+    @SuppressWarnings("unused")
+    private static final String DEBUG_NAME = Locker.class.getName();
+    protected EnvironmentImpl envImpl;
+    protected LockManager lockManager;
+
+    protected long id;                        // transaction id
+    protected boolean readUncommittedDefault; // read-uncommitted is default
+
+    /* Timeouts */
+    protected boolean defaultNoWait;      // true for non-blocking
+    private long lockTimeoutMillis;       // timeout period for lock, in ms
+    private long txnTimeoutMillis;        // timeout period for txns, in ms
+    private long txnStartMillis;          // for txn timeout determination
+
+    private Lock waitingFor;              // The lock that this txn is
+                                          // waiting for.
+
+    /*
+     * DeleteInfo refers to BINReferences that should be sent to the
+     * INCompressor for asynchronous compressing after the transaction ends.
+     */
+    protected Map<Long,BINReference> deleteInfo;
+
+    /*
+     * To support handle lock transfers, each txn keeps maps handle locks to
+     * database handles. This is maintained as a map where the key is the
+     * handle lock id and the value is a set of database handles that
+     * correspond to that handle lock. This is a 1 - many relationship because
+     * a single handle lock can cover multiple database handles opened by the
+     * same transaction.
+     *
+     * These tables needs synchronization so they are Hashtables, not HashMaps.
+     */
+
+    /* 1-many, used for commits. */
+    protected Map<Long,Set<Database>> handleLockToHandleMap;
+    /*  1-1, used for aborts. */
+    protected Map<Database,Long> handleToHandleLockMap;
+
+    /**
+     * The thread that created this locker.  Used for debugging, and by the
+     * ThreadLocker subclass. Note that thread may be null if the Locker is
+     * instantiated by reading the log.
+     */
+    protected Thread thread;
+
+    /**
+     * Set to false when close() is called.  After that point no other locker
+     * operations should occur.  We can "assert isOpen" in all methods to check
+     * that close() is only called once.
+     */
+    private boolean isOpen = true;
+
+    /**
+     * True if there is no APIReadLock assoc'd with this Locker.
+     */
+    private boolean noAPIReadLock = false;
+
+    /**
+     * Create a locker id. This constructor is called very often, so it should
+     * be as streamlined as possible. It should never be called directly,
+     * because the mandatedId mechanism only works if the generateId() method
+     * is overridden to use the mandatedId value.
+     *
+     * @param lockManager lock manager for this environment
+     * @param readUncommittedDefault if true, this transaction does
+     * read-uncommitted by default
+     * @param noWait if true, non-blocking lock requests are used.
+     */
+    protected Locker(EnvironmentImpl envImpl,
+                     boolean readUncommittedDefault,
+                     boolean noWait,
+                     long mandatedId)
+        throws DatabaseException {
+
+        initLocker(envImpl, readUncommittedDefault, noWait, false, mandatedId);
+    }
+
+    /**
+     * Create a locker id. This constructor is called very often, so it should
+     * be as streamlined as possible. It should never be called directly,
+     * because the mandatedId mechanism only works if the generateId() method
+     * is overridden to use the mandatedId value.
+     *
+     * @param lockManager lock manager for this environment
+     * @param readUncommittedDefault if true, this transaction does
+     * read-uncommitted by default
+     * @param noWait if true, non-blocking lock requests are used.
+     * @param noAPIReadLock if true, the API read lock is not acquired.
+     */
+    protected Locker(EnvironmentImpl envImpl,
+                     boolean readUncommittedDefault,
+                     boolean noWait,
+                     boolean noAPIReadLock,
+                     long mandatedId)
+        throws DatabaseException {
+
+        initLocker(envImpl, readUncommittedDefault, noWait,
+             noAPIReadLock, mandatedId);
+    }
+
+    private void initLocker(EnvironmentImpl envImpl,
+                            boolean readUncommittedDefault,
+                            boolean noWait,
+                            boolean noAPIReadLock,
+                            long mandatedId)
+        throws DatabaseException {
+
+        TxnManager txnManager = envImpl.getTxnManager();
+        this.lockManager = txnManager.getLockManager();
+        this.id = generateId(txnManager, mandatedId);
+        this.envImpl = envImpl;
+        this.readUncommittedDefault = readUncommittedDefault;
+        this.waitingFor = null;
+
+        /* get the default lock timeout. */
+        defaultNoWait = noWait;
+        lockTimeoutMillis = envImpl.getLockTimeout();
+
+        /*
+         * Check the default txn timeout. If non-zero, remember the txn start
+         * time.
+         */
+        txnTimeoutMillis = envImpl.getTxnTimeout();
+
+        if (txnTimeoutMillis != 0) {
+            txnStartMillis = System.currentTimeMillis();
+        } else {
+            txnStartMillis = 0;
+        }
+
+        /* Save the thread used to create the locker. */
+        thread = Thread.currentThread();
+
+        this.noAPIReadLock = noAPIReadLock;
+
+        /*
+         * Do lazy initialization of deleteInfo and handle lock maps, to
+         * conserve memory.
+         */
+    }
+
+    /**
+     * For reading from the log.
+     */
+    Locker() {
+    }
+
+    /**
+     * A Locker has to generate its next id. Some subtypes, like BasicLocker,
+     * have a single id for all instances because they are never used for
+     * recovery. Other subtypes ask the txn manager for an id or use a
+     * specific, mandated id.
+     */
+    protected abstract long generateId(TxnManager txnManager, long mandatedId);
+
+    /**
+     * @return the transaction's id.
+     */
+    public long getId() {
+        return id;
+    }
+
+    /**
+     * @return the default no-wait (non-blocking) setting.
+     */
+    public boolean getDefaultNoWait() {
+        return defaultNoWait;
+    }
+
+    /**
+     * Get the lock timeout period for this locker, in milliseconds
+     *
+     * WARNING: Be sure to always access the timeout with this accessor, since
+     * it is overridden in BuddyLocker.
+     */
+    public synchronized long getLockTimeout() {
+        return lockTimeoutMillis;
+    }
+
+    /**
+     * Set the lock timeout period for any locks in this transaction,
+     * in milliseconds.
+     *
+     * @param timeout The timeout value for the transaction lifetime, in
+     * microseconds. A value of 0 disables timeouts for the transaction.
+     *
+     * @throws IllegalArgumentException If the value of timeout is negative
+     */
+    public synchronized void setLockTimeout(long timeout) {
+
+        if (timeout < 0) {
+            throw new IllegalArgumentException
+                ("the timeout value cannot be negative");
+        } else if (timeout > Math.pow(2, 32)) {
+            throw new IllegalArgumentException
+                ("the timeout value cannot be greater than 2^32");
+        }
+
+        lockTimeoutMillis = timeout;
+    }
+
+    /**
+     * Set the timeout period for this transaction, in milliseconds.
+     *
+     * @param timeout The timeout value for the transaction lifetime, in
+     * microseconds. A value of 0 disables timeouts for the transaction.
+     *
+     * @throws IllegalArgumentException If the value of timeout is negative.
+     */
+    public synchronized void setTxnTimeout(long timeout) {
+
+        if (timeout < 0) {
+            throw new IllegalArgumentException
+                ("the timeout value cannot be negative");
+        } else if (timeout > Math.pow(2, 32)) {
+            throw new IllegalArgumentException
+                ("the timeout value cannot be greater than 2^32");
+        }
+
+        txnTimeoutMillis = timeout;
+        if (txnTimeoutMillis != 0) {
+            txnStartMillis = System.currentTimeMillis();
+        } else {
+            txnStartMillis = 0;
+        }
+    }
+
+    /**
+     * @return true if transaction was created with read-uncommitted as a
+     * default.
+     */
+    public boolean isReadUncommittedDefault() {
+        return readUncommittedDefault;
+    }
+
+    Lock getWaitingFor() {
+        return waitingFor;
+    }
+
+    void setWaitingFor(Lock lock) {
+        waitingFor = lock;
+    }
+
+    /**
+     * Set the state of a transaction to ONLY_ABORTABLE.
+     */
+    public void setOnlyAbortable() {
+        /* no-op unless Txn. */
+    }
+
+    public void initApiReadLock()
+        throws DatabaseException {
+
+        if (!noAPIReadLock) {
+            envImpl.acquireAPIReadLock(this);
+        }
+    }
+
+    protected abstract void checkState(boolean ignoreCalledByAbort)
+        throws DatabaseException;
+
+    /*
+     * Obtain and release locks.
+     */
+
+    /**
+     * Abstract method to a blocking or non-blocking lock of the given type on
+     * the given nodeId.  Unlike the lock() method, this method does not throw
+     * LockNotGrantedException and can therefore be used by nonBlockingLock to
+     * probe for a lock without the overhead of an exception stack trace.
+     *
+     * @param nodeId is the node to lock.
+     *
+     * @param lockType is the type of lock to request.
+     *
+     * @param noWait is true to override the defaultNoWait setting.  If true,
+     * or if defaultNoWait is true, throws LockNotGrantedException if the lock
+     * cannot be granted without waiting.
+     *
+     * @param database is the database containing nodeId.
+     *
+     * @throws DeadlockException if acquiring a blocking lock would result in a
+     * deadlock.
+     */
+    abstract LockResult lockInternal(long nodeId,
+                                     LockType lockType,
+                                     boolean noWait,
+                                     DatabaseImpl database)
+        throws DeadlockException, DatabaseException;
+
+    /**
+     * Request a blocking or non-blocking lock of the given type on the given
+     * nodeId.
+     *
+     * @param nodeId is the node to lock.
+     *
+     * @param lockType is the type of lock to request.
+     *
+     * @param noWait is true to override the defaultNoWait setting.  If true,
+     * or if defaultNoWait is true, throws LockNotGrantedException if the lock
+     * cannot be granted without waiting.
+     *
+     * @param database is the database containing nodeId.
+     *
+     * @throws LockNotGrantedException if a non-blocking lock was denied.
+     *
+     * @throws DeadlockException if acquiring a blocking lock would result in a
+     * deadlock.
+     */
+    public LockResult lock(long nodeId,
+                           LockType lockType,
+                           boolean noWait,
+                           DatabaseImpl database)
+        throws LockNotGrantedException, DeadlockException, DatabaseException {
+
+        assert isOpen;
+
+        LockResult result = lockInternal(nodeId, lockType, noWait, database);
+
+        if (result.getLockGrant() == LockGrantType.DENIED) {
+            /* DENIED can only be returned for a non-blocking lock. */
+            throw new LockNotGrantedException("Non-blocking lock was denied.");
+        } else {
+            return result;
+        }
+    }
+
+    /**
+     * Request a non-blocking lock of the given type on the given nodeId.
+     *
+     * <p>Unlike lock(), this method returns LockGrantType.DENIED if the lock
+     * is denied rather than throwing LockNotGrantedException.  This method
+     * should therefore not be used as the final lock for a user operation,
+     * since in that case LockNotGrantedException should be thrown for a denied
+     * lock.  It is normally used only to probe for a lock, and other recourse
+     * is taken if the lock is denied.</p>
+     *
+     * @param nodeId is the node to lock.
+     *
+     * @param lockType is the type of lock to request.
+     *
+     * @param database is the database containing nodeId.
+     */
+    public LockResult nonBlockingLock(long nodeId,
+                                      LockType lockType,
+                                      DatabaseImpl database)
+        throws DatabaseException {
+
+        assert isOpen;
+
+        return lockInternal(nodeId, lockType, true, database);
+    }
+
+    /**
+     * Release the lock on this LN and remove from the transaction's owning
+     * set.
+     */
+    public boolean releaseLock(long nodeId)
+        throws DatabaseException {
+
+        assert isOpen;
+
+        boolean ret = lockManager.release(nodeId, this);
+        removeLock(nodeId);
+        return ret;
+    }
+
+    /**
+     * Revert this lock from a write lock to a read lock.
+     */
+    public void demoteLock(long nodeId)
+        throws DatabaseException {
+
+        assert isOpen;
+
+        /*
+         * If successful, the lock manager will call back to the transaction
+         * and adjust the location of the lock in the lock collection.
+         */
+        lockManager.demote(nodeId, this);
+    }
+
+    /**
+     * Returns whether this locker is transactional.
+     */
+    public abstract boolean isTransactional();
+
+    /**
+     * Returns whether the isolation level of this locker is serializable.
+     */
+    public abstract boolean isSerializableIsolation();
+
+    /**
+     * Returns whether the isolation level of this locker is read-committed.
+     */
+    public abstract boolean isReadCommittedIsolation();
+
+    /**
+     * Returns the underlying Txn if the locker is transactional, or null if
+     * the locker is non-transactional.  For a Txn-based locker, this method
+     * returns 'this'.  For a BuddyLocker, this method may returns the buddy.
+     */
+    public abstract Txn getTxnLocker();
+
+    /**
+     * Creates a fresh non-transactional locker, while retaining any
+     * transactional locks held by this locker.  This method is called when the
+     * cursor for this locker is cloned.
+     *
+     * <p>This method must return a locker that shares locks with this
+     * locker, e.g., a ThreadLocker.</p>
+     *
+     * <p>In general, transactional lockers return 'this' when this method is
+     * called, while non-transactional lockers return a new instance.</p>
+     */
+    public abstract Locker newNonTxnLocker()
+        throws DatabaseException;
+
+    /**
+     * Releases any non-transactional locks held by this locker.  This method
+     * is called when the cursor moves to a new position or is closed.
+     *
+     * <p>In general, transactional lockers do nothing when this method is
+     * called, while non-transactional lockers release all locks as if
+     * operationEnd were called.</p>
+     */
+    public abstract void releaseNonTxnLocks()
+        throws DatabaseException;
+
+    /**
+     * Releases locks and closes the locker at the end of a non-transactional
+     * cursor operation.  For a transctional cursor this method should do
+     * nothing, since locks must be held until transaction end.
+     */
+    public abstract void nonTxnOperationEnd()
+        throws DatabaseException;
+
+    /**
+     * Returns whether this locker can share locks with the given locker.
+     *
+     * <p>All lockers share locks with a BuddyLocker whose buddy is this
+     * locker.  To support BuddyLocker when overriding this method, always
+     * return true if this implementation (super.sharesLocksWith(...)) returns
+     * true.</p>
+     */
+    public boolean sharesLocksWith(Locker other) {
+        if (other instanceof BuddyLocker) {
+            BuddyLocker buddy = (BuddyLocker) other;
+            return buddy.getBuddy() == this;
+        } else {
+            return false;
+        }
+    }
+
+    /**
+     * The equivalent of calling operationEnd(true).
+     */
+    public final void operationEnd()
+        throws DatabaseException {
+
+        operationEnd(true);
+    }
+
+    /**
+     * A SUCCESS status equals operationOk.
+     */
+    public final void operationEnd(OperationStatus status)
+        throws DatabaseException {
+
+        operationEnd(status == OperationStatus.SUCCESS);
+    }
+
+    /**
+     * Different types of transactions do different things when the operation
+     * ends. Txn does nothing, auto Txn commits or aborts, and BasicLocker (and
+     * its subclasses) just releases locks.
+     *
+     * @param operationOK is whether the operation succeeded, since
+     * that may impact ending behavior. (i.e for an auto Txn)
+     */
+    public abstract void operationEnd(boolean operationOK)
+        throws DatabaseException;
+
+    /**
+     * Should be called by all subclasses when the locker is no longer used.
+     * For Txns and auto Txns this is at commit or abort.  For
+     * non-transactional lockers it is at operationEnd.
+     */
+    void close()
+        throws DatabaseException {
+
+        if (!noAPIReadLock) {
+            envImpl.releaseAPIReadLock(this);
+        }
+
+        isOpen = false;
+    }
+
+    /**
+     * We're at the end of an operation. Move this handle lock to the
+     * appropriate owner.
+     */
+    public abstract void setHandleLockOwner(boolean operationOK,
+                                            Database dbHandle,
+                                            boolean dbIsClosing)
+        throws DatabaseException;
+
+    /**
+     * Tell this transaction about a cursor.
+     */
+    public abstract void registerCursor(CursorImpl cursor)
+        throws DatabaseException;
+
+    /**
+     * Remove a cursor from this txn.
+     */
+    public abstract void unRegisterCursor(CursorImpl cursor)
+        throws DatabaseException;
+
+    /*
+     * Transactional support
+     */
+
+    /**
+     * @return the abort LSN for this node.
+     */
+    public abstract long getAbortLsn(long nodeId)
+        throws DatabaseException;
+
+    /**
+     * @return the WriteLockInfo for this node.
+     */
+    public abstract WriteLockInfo getWriteLockInfo(long nodeId)
+        throws DatabaseException;
+
+    /**
+     * Database operations like remove and truncate leave behind
+     * residual DatabaseImpls that must be purged at transaction
+     * commit or abort.
+     */
+    public abstract void markDeleteAtTxnEnd(DatabaseImpl db,
+                                            boolean deleteAtCommit)
+        throws DatabaseException;
+
+    /**
+     * Add delete information, to be added to the inCompressor queue
+     * when the transaction ends.
+     */
+    public void addDeleteInfo(BIN bin, Key deletedKey)
+        throws DatabaseException {
+
+        synchronized (this) {
+            /* Maintain only one binRef per node. */
+            if (deleteInfo == null) {
+                deleteInfo = new HashMap<Long,BINReference>();
+            }
+            Long nodeId = Long.valueOf(bin.getNodeId());
+            BINReference binRef = deleteInfo.get(nodeId);
+            if (binRef == null) {
+                binRef = bin.createReference();
+                deleteInfo.put(nodeId, binRef);
+            }
+            binRef.addDeletedKey(deletedKey);
+        }
+    }
+
+    /*
+     * Manage locks owned by this transaction. Note that transactions that will
+     * be multithreaded must override these methods and provide synchronized
+     * implementations.
+     */
+
+    /**
+     * Add a lock to set owned by this transaction.
+     */
+    protected abstract void addLock(Long nodeId,
+                                    LockType type,
+                                    LockGrantType grantStatus)
+        throws DatabaseException;
+
+    /**
+     * @return true if this transaction created this node,
+     * for a operation with transactional semantics.
+     */
+    public abstract boolean createdNode(long nodeId)
+        throws DatabaseException;
+
+    /**
+     * Remove the lock from the set owned by this transaction. If specified to
+     * LockManager.release, the lock manager will call this when its releasing
+     * a lock.
+     */
+    abstract void removeLock(long nodeId)
+        throws DatabaseException;
+
+    /**
+     * A lock is being demoted. Move it from the write collection into the read
+     * collection.
+     */
+    abstract void moveWriteToReadLock(long nodeId, Lock lock);
+
+    /**
+     * Get lock count, for per transaction lock stats, for internal debugging.
+     */
+    public abstract LockStats collectStats(LockStats stats)
+        throws DatabaseException;
+
+    /*
+     * Check txn timeout, if set. Called by the lock manager when blocking on a
+     * lock.
+     */
+    public boolean isTimedOut()
+        throws DatabaseException {
+
+        long timeout = getTxnTimeout();
+        if (timeout != 0) {
+            long diff = System.currentTimeMillis() - txnStartMillis;
+            if (diff > timeout) {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    /**
+     * Get the transaction timeout period for this locker, in milliseconds
+     *
+     * public for jca/ra/JELocalTransaction.
+     *
+     * WARNING: Be sure to always access the timeout with this accessor, since
+     * it is overridden in BuddyLocker.
+     */
+    public synchronized long getTxnTimeout() {
+        return txnTimeoutMillis;
+    }
+
+    long getTxnStartMillis() {
+        return txnStartMillis;
+    }
+
+    /**
+     * Remove this Database from the protected Database handle set
+     */
+    void unregisterHandle(Database dbHandle) {
+
+        /*
+         * handleToHandleLockMap may be null if the db handle was never really
+         * added. This might be the case because of an unregisterHandle that
+         * comes from a finally clause, where the db handle was never
+         * successfully opened.
+         */
+        if (handleToHandleLockMap != null) {
+            handleToHandleLockMap.remove(dbHandle);
+        }
+    }
+
+    /**
+     * Remember how handle locks and handles match up.
+     */
+    public void addToHandleMaps(Long handleLockId,
+                                Database databaseHandle) {
+        Set<Database> dbHandleSet = null;
+        if (handleLockToHandleMap == null) {
+
+            /*
+             * We do lazy initialization of the maps, since they're used
+             * infrequently.
+             */
+            handleLockToHandleMap = new Hashtable<Long,Set<Database>>();
+            handleToHandleLockMap = new Hashtable<Database,Long>();
+        } else {
+            dbHandleSet = handleLockToHandleMap.get(handleLockId);
+        }
+
+        if (dbHandleSet == null) {
+            dbHandleSet = new HashSet<Database>();
+            handleLockToHandleMap.put(handleLockId, dbHandleSet);
+        }
+
+        /* Map handle lockIds -> 1 or more database handles. */
+        dbHandleSet.add(databaseHandle);
+        /* Map database handles -> handle lock id */
+        handleToHandleLockMap.put(databaseHandle, handleLockId);
+    }
+
+    /**
+     * @return true if this txn is willing to give up the handle lock to
+     * another txn before this txn ends.
+     */
+    public boolean isHandleLockTransferrable() {
+        return true;
+    }
+
+    /**
+     * The currentTxn passes responsiblity for this db handle lock to a txn
+     * owned by the Database object.
+     */
+    void transferHandleLockToHandle(Database dbHandle)
+        throws DatabaseException {
+
+        /*
+         * Transfer responsiblity for this db lock from this txn to a new
+         * protector.
+         */
+        Locker holderTxn = BasicLocker.createBasicLocker(envImpl, false, true);
+        transferHandleLock(dbHandle, holderTxn, true);
+    }
+
+    /**
+     *
+     */
+    public void transferHandleLock(Database dbHandle,
+                                   Locker destLocker,
+                                   boolean demoteToRead)
+        throws DatabaseException {
+
+        /*
+         * Transfer responsiblity for dbHandle's handle lock from this txn to
+         * destLocker. If the dbHandle's databaseImpl is null, this handle
+         * wasn't opened successfully.
+         */
+        if (DbInternal.dbGetDatabaseImpl(dbHandle) != null) {
+            Long handleLockId = handleToHandleLockMap.get(dbHandle);
+            if (handleLockId != null) {
+                /* We have a handle lock for this db. */
+                long nodeId = handleLockId.longValue();
+
+                /* Move this lock to the destination txn. */
+                lockManager.transfer(nodeId, this, destLocker, demoteToRead);
+
+                /*
+                 * Make the destination txn remember that it now owns this
+                 * handle lock.
+                 */
+                destLocker.addToHandleMaps(handleLockId, dbHandle);
+
+                /* Take this out of the handle lock map. */
+                Set<Database> dbHandleSet =
+                    handleLockToHandleMap.get(handleLockId);
+                Iterator<Database> iter = dbHandleSet.iterator();
+                while (iter.hasNext()) {
+                    if ((iter.next()) == dbHandle) {
+                        iter.remove();
+                        break;
+                    }
+                }
+                if (dbHandleSet.size() == 0) {
+                    handleLockToHandleMap.remove(handleLockId);
+                }
+
+                /*
+                 * This Database must remember what txn owns it's handle lock.
+                 */
+                DbInternal.dbSetHandleLocker(dbHandle, destLocker);
+            }
+        }
+    }
+
+    /*
+     * Helpers
+     */
+    @Override
+    public String toString() {
+        String className = getClass().getName();
+        className = className.substring(className.lastIndexOf('.') + 1);
+
+        return System.identityHashCode(this) + " " + Long.toString(id) + "_" +
+               ((thread == null) ? "" : thread.getName()) + "_" +
+               className;
+    }
+
+    /**
+     * Dump lock table, for debugging
+     */
+    public void dumpLockTable()
+        throws DatabaseException {
+
+        lockManager.dump();
+    }
+}
diff --git a/src/com/sleepycat/je/txn/LockerFactory.java b/src/com/sleepycat/je/txn/LockerFactory.java
new file mode 100644
index 0000000000000000000000000000000000000000..0c71248c7ca94b07312b2465031ceb2897cfae53
--- /dev/null
+++ b/src/com/sleepycat/je/txn/LockerFactory.java
@@ -0,0 +1,262 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LockerFactory.java,v 1.18.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.TransactionConfig;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.ReplicationContext;
+
+/**
+ * Factory of static methods for creating Locker objects.
+ */
+public class LockerFactory {
+
+    /**
+     * Get a locker for a writable operation, checking whether the db and
+     * environment is transactional or not. Must return a non null locker.
+     */
+    public static Locker getWritableLocker(Environment env,
+                                           Transaction userTxn,
+                                           boolean dbIsTransactional,
+                                           boolean autoTxnIsReplicated)
+        throws DatabaseException {
+
+        return getWritableLocker
+	    (env, userTxn, dbIsTransactional, false /*retainNonTxnLocks*/,
+	     autoTxnIsReplicated, null /*autoCommitConfig*/);
+    }
+
+    /**
+     * Get a locker for a writable operation, also specifying whether to retain
+     * non-transactional locks when a new locker must be created.
+     *
+     * @param retainNonTxnLocks is true for DbTree operations, so that the
+     * handle lock may be transferred out of the locker when the operation is
+     * complete.
+     *
+     * @param autoTxnIsReplicated is true if this transaction is
+     * executed on a rep group master, and needs to be broadcast.
+     * Currently, all application-created transactions are of the type
+     * com.sleepycat.je.txn.Txn, and are replicated if the parent
+     * environment is replicated. Auto Txns are trickier because they may
+     * be created for a local write operation, such as log cleaning.
+     */
+    public static Locker getWritableLocker(Environment env,
+                                           Transaction userTxn,
+                                           boolean dbIsTransactional,
+                                           boolean retainNonTxnLocks,
+                                           boolean autoTxnIsReplicated,
+                                           TransactionConfig autoCommitConfig)
+        throws DatabaseException {
+
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+        boolean envIsTransactional = envImpl.isTransactional();
+
+	if (userTxn == null) {
+	    Transaction xaLocker = env.getThreadTransaction();
+	    if (xaLocker != null) {
+		return DbInternal.getLocker(xaLocker);
+	    }
+	}
+
+        if (dbIsTransactional && userTxn == null) {
+
+            if (autoCommitConfig == null) {
+                autoCommitConfig = DbInternal.getDefaultTxnConfig(env);
+            }
+
+            return Txn.createAutoTxn(envImpl,
+                                     autoCommitConfig,
+                                     false, /*noAPIReadLock*/
+                                     (autoTxnIsReplicated ?
+                                     ReplicationContext.MASTER :
+                                     ReplicationContext.NO_REPLICATE));
+
+        } else if (userTxn == null) {
+
+            if (retainNonTxnLocks) {
+                return BasicLocker.createBasicLocker(envImpl);
+            } else {
+                return ThreadLocker.createThreadLocker(envImpl);
+            }
+
+        } else {
+
+            /*
+             * The user provided a transaction, the environment and the
+             * database had better be opened transactionally.
+             */
+            if (!envIsTransactional) {
+                throw new DatabaseException
+		    ("A Transaction cannot be used because the"+
+		     " environment was opened" +
+		     " non-transactionally");
+            }
+            if (!dbIsTransactional) {
+                throw new DatabaseException
+		    ("A Transaction cannot be used because the" +
+		     " database was opened" +
+		     " non-transactionally");
+            }
+
+            /*
+             * Use the locker for the given transaction.  For read-comitted,
+             * wrap the given transactional locker in a special locker for that
+             * isolation level.  But if retainNonTxnLocks we cannot use
+             * read-committed, since retainNonTxnLocks is used for handle locks
+             * that must be retained across operations.
+             */
+            Locker locker = DbInternal.getLocker(userTxn);
+            if (locker.isReadCommittedIsolation() && !retainNonTxnLocks) {
+                return ReadCommittedLocker.
+		    createReadCommittedLocker(envImpl, locker);
+            } else {
+                return locker;
+            }
+        }
+    }
+
+    /**
+     * Get a locker for a read or cursor operation.
+     * See getWritableLocker for an explanation of retainNonTxnLocks.
+     */
+    public static Locker getReadableLocker(Environment env,
+                                           Transaction userTxn,
+                                           boolean dbIsTransactional,
+                                           boolean retainNonTxnLocks,
+                                           boolean readCommittedIsolation)
+        throws DatabaseException {
+
+        if (userTxn != null && !dbIsTransactional) {
+            throw new DatabaseException
+                ("A Transaction cannot be used because the" +
+                 " database was opened" +
+                 " non-transactionally");
+        }
+
+        Locker locker = null;
+	if (userTxn != null) {
+
+            /*
+             * Use the locker for the given transaction.  Request read-comitted
+             * if that isolation level is configured for the transaction, or if
+             * true is passed for the parameter (this is the case when
+             * read-committed is configured for the cursor).
+             */
+            locker = DbInternal.getLocker(userTxn);
+            if (locker.isReadCommittedIsolation()) {
+                readCommittedIsolation = true;
+            }
+        }
+
+        return getReadableLocker(env, locker, retainNonTxnLocks,
+				 readCommittedIsolation);
+    }
+
+    /**
+     * Get a locker for this database handle for a read or cursor operation.
+     * See getWritableLocker for an explanation of retainNonTxnLocks.
+     */
+    public static Locker getReadableLocker(Environment env,
+                                           Database dbHandle,
+                                           Locker locker,
+                                           boolean retainNonTxnLocks,
+                                           boolean readCommittedIsolation)
+        throws DatabaseException {
+
+        DatabaseImpl dbImpl = DbInternal.dbGetDatabaseImpl(dbHandle);
+        if (!dbImpl.isTransactional() &&
+            locker != null &&
+            locker.isTransactional()) {
+            throw new DatabaseException
+                ("A Transaction cannot be used because the" +
+                 " database was opened" +
+                 " non-transactionally");
+        }
+
+        /*
+         * Don't reuse a non-transactional locker unless retaining
+         * non-transactional locks was requested.
+         */
+        if (locker != null &&
+            !locker.isTransactional() &&
+            !retainNonTxnLocks) {
+            locker = null;
+        }
+
+        /*
+         * Request read-comitted if that isolation level is configured for the
+         * locker being reused, or if true is passed for the parameter (this is
+         * the case when read-committed is configured for the cursor).
+         */
+        if (locker != null && locker.isReadCommittedIsolation()) {
+            readCommittedIsolation = true;
+        }
+
+        return getReadableLocker(env, locker, retainNonTxnLocks,
+				 readCommittedIsolation);
+    }
+
+    /**
+     * Get a locker for a read or cursor operation.
+     * See getWritableLocker for an explanation of retainNonTxnLocks.
+     */
+    private static Locker getReadableLocker(Environment env,
+                                            Locker locker,
+                                            boolean retainNonTxnLocks,
+                                            boolean readCommittedIsolation)
+        throws DatabaseException {
+
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+
+	if (locker == null) {
+	    Transaction xaTxn = env.getThreadTransaction();
+	    if (xaTxn != null) {
+		return DbInternal.getLocker(xaTxn);
+	    }
+	}
+
+        if (locker == null) {
+
+            /*
+             * A non-transactional locker is requested.  If we're retaining
+             * non-transactional locks across operations, use a BasicLocker
+             * since the locker may be used across threads; this is used when
+             * acquiring handle locks internally (open, close, remove, etc).
+             * Otherwise, use a ThreadLocker to avoid self-deadlocks within the
+             * same thread; this used for ordinary user operations.
+             */
+            if (retainNonTxnLocks) {
+                locker = BasicLocker.createBasicLocker(envImpl);
+            } else {
+                locker = ThreadLocker.createThreadLocker(envImpl);
+            }
+        } else {
+
+            /*
+             * Use the given locker.  For read-committed, wrap the given
+             * transactional locker in a special locker for that isolation
+             * level.  But if retainNonTxnLocks we cannot use read-committed,
+             * since retainNonTxnLocks is used for handle locks that must be
+             * retained across operations.
+             */
+            if (readCommittedIsolation && !retainNonTxnLocks) {
+                locker = ReadCommittedLocker.
+		    createReadCommittedLocker(envImpl, locker);
+            }
+        }
+        return locker;
+    }
+}
diff --git a/src/com/sleepycat/je/txn/PreparedTxn.java b/src/com/sleepycat/je/txn/PreparedTxn.java
new file mode 100644
index 0000000000000000000000000000000000000000..df344f73c51d1481dd40b67ac159c92a0749467f
--- /dev/null
+++ b/src/com/sleepycat/je/txn/PreparedTxn.java
@@ -0,0 +1,56 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PreparedTxn.java,v 1.6.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.TransactionConfig;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.ReplicationContext;
+
+/**
+ * A PreparedTxn is used at recovery for processing a TXN_PREPARE log entry. It
+ * is provides essentially the same functionality as a TXN but lets the calling
+ * code set the transaction id.
+ */
+public class PreparedTxn extends Txn {
+
+    private PreparedTxn(EnvironmentImpl envImpl,
+                       TransactionConfig config,
+                       long mandatedId)
+        throws DatabaseException {
+
+        super(envImpl, config, true /* noAPIReadLock */, mandatedId);
+        setRepContext(ReplicationContext.NO_REPLICATE);
+    }
+
+    public static PreparedTxn createPreparedTxn(EnvironmentImpl envImpl,
+						TransactionConfig config,
+						long mandatedId)
+        throws DatabaseException {
+
+	PreparedTxn ret = null;
+	try {
+	    ret = new PreparedTxn(envImpl, config, mandatedId);
+	    ret.initApiReadLock();
+	} catch (DatabaseException DE) {
+	    ret.close(false);
+	    throw DE;
+	}
+	return ret;
+    }
+
+    /**
+     * PrepareTxns use the mandated id.
+     */
+    @Override
+    protected long generateId(TxnManager txnManager,
+                              long mandatedId) {
+        return mandatedId;
+    }
+}
diff --git a/src/com/sleepycat/je/txn/ReadCommittedLocker.java b/src/com/sleepycat/je/txn/ReadCommittedLocker.java
new file mode 100644
index 0000000000000000000000000000000000000000..d1dfe1a72b18b983403b28f947b2ca8cb2692bc7
--- /dev/null
+++ b/src/com/sleepycat/je/txn/ReadCommittedLocker.java
@@ -0,0 +1,194 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ReadCommittedLocker.java,v 1.13.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.CursorImpl;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.tree.Key;
+
+/**
+ * Extends BuddyLocker to acquire write locks using the buddy locker (the
+ * transaction locker).  This is used for ReadCommitted (Degree 2) isolation.
+ */
+public class ReadCommittedLocker extends BuddyLocker {
+
+    /**
+     * Creates a ReadCommittedLocker.
+     * @param buddy is a transactional locker that will be used for acquiring
+     * write locks.
+     */
+    private ReadCommittedLocker(EnvironmentImpl env, Locker buddy)
+        throws DatabaseException {
+
+        /*
+         * If the buddy param is a read-committed locker, reach in to get its
+         * transactional buddy locker.
+         */
+        super(env,
+              (buddy instanceof ReadCommittedLocker) ?
+              ((ReadCommittedLocker) buddy).getBuddy() : buddy);
+
+        assert getBuddy().isTransactional();
+    }
+
+    public static
+	ReadCommittedLocker createReadCommittedLocker(EnvironmentImpl env,
+						      Locker buddy)
+        throws DatabaseException {
+
+	ReadCommittedLocker ret = null;
+	try {
+	    ret = new ReadCommittedLocker(env, buddy);
+	    ret.initApiReadLock();
+	} catch (DatabaseException DE) {
+	    ret.operationEnd(false);
+	    throw DE;
+	}
+	return ret;
+    }
+
+    /**
+     * Returns a new ReadCommittedLocker that shares locks with this locker by
+     * virtue of both lockers only holding READ locks.  The buddy locker
+     * underlying both ReadCommittedLocker lockers is the same transactional
+     * locker, so WRITE locks are also shared.
+     */
+    @Override
+    public Locker newNonTxnLocker()
+        throws DatabaseException {
+
+        /*
+         * getBuddy().newNonTxnLocker() will return the transactional buddy
+         * locker itself (same as getBuddy), but we call newNonTxnLocker for
+         * consistency.
+         */
+        return ReadCommittedLocker.createReadCommittedLocker
+	    (envImpl, getBuddy().newNonTxnLocker());
+    }
+
+    /**
+     * Forwards write locks to the buddy locker (the transaction locker).
+     *
+     * @see Locker#lockInternal
+     * @Override
+     */
+    @Override
+    LockResult lockInternal(long nodeId,
+			    LockType lockType,
+                            boolean noWait,
+                            DatabaseImpl database)
+        throws DatabaseException {
+
+        if (lockType.isWriteLock()) {
+            return getBuddy().lockInternal(nodeId, lockType, noWait, database);
+        } else {
+            return super.lockInternal(nodeId, lockType, noWait, database);
+        }
+    }
+
+    /**
+     * Releases the lock from this locker, or if not owned by this locker then
+     * releases it from the buddy locker.
+     */
+    @Override
+    public boolean releaseLock(long nodeId)
+        throws DatabaseException {
+
+	boolean ret = true;
+        if (!lockManager.release(nodeId, this)) {
+            ret = lockManager.release(nodeId, getBuddy());
+        }
+	removeLock(nodeId);
+	return ret;
+    }
+
+    /**
+     * Forwards this method to the transactional buddy.  Since the buddy
+     * handles write locks, it knows whether this transaction created the node.
+     */
+    @Override
+    public boolean createdNode(long nodeId)
+        throws DatabaseException {
+
+        return getBuddy().createdNode(nodeId);
+    }
+
+    /**
+     * Forwards this method to the transactional buddy.  The buddy handles
+     * write locks and therefore handles abort information.
+     */
+    @Override
+    public long getAbortLsn(long nodeId)
+        throws DatabaseException {
+
+        return getBuddy().getAbortLsn(nodeId);
+    }
+
+    /**
+     * @return the WriteLockInfo for this node.
+     */
+    @Override
+    public WriteLockInfo getWriteLockInfo(long nodeId)
+	throws DatabaseException {
+
+	return getBuddy().getWriteLockInfo(nodeId);
+    }
+
+    /**
+     * Forwards this method to the transactional buddy.  The buddy handles
+     * write locks and therefore handles delete information.
+     */
+    @Override
+    public void addDeleteInfo(BIN bin, Key deletedKey)
+        throws DatabaseException {
+
+        getBuddy().addDeleteInfo(bin, deletedKey);
+    }
+
+    /**
+     * Forwards this method to the transactional buddy.  The buddy Txn tracks
+     * cursors.
+     */
+    @Override
+    public void registerCursor(CursorImpl cursor)
+        throws DatabaseException {
+
+        getBuddy().registerCursor(cursor);
+    }
+
+    /**
+     * Forwards this method to the transactional buddy.  The buddy Txn tracks
+     * cursors.
+     */
+    @Override
+    public void unRegisterCursor(CursorImpl cursor)
+        throws DatabaseException {
+
+        getBuddy().unRegisterCursor(cursor);
+    }
+
+    /**
+     * Is always transactional because the buddy locker is transactional.
+     */
+    @Override
+    public boolean isTransactional() {
+        return true;
+    }
+
+    /**
+     * Is always read-committed isolation.
+     */
+    @Override
+    public boolean isReadCommittedIsolation() {
+        return true;
+    }
+}
diff --git a/src/com/sleepycat/je/txn/SyncedLockManager.java b/src/com/sleepycat/je/txn/SyncedLockManager.java
new file mode 100644
index 0000000000000000000000000000000000000000..8c960a913dca38b57d804353f6ca8d540e09b0e8
--- /dev/null
+++ b/src/com/sleepycat/je/txn/SyncedLockManager.java
@@ -0,0 +1,237 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SyncedLockManager.java,v 1.18.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import java.util.Set;
+
+import com.sleepycat.je.DeadlockException;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.LockStats;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.MemoryBudget;
+
+/**
+ * SyncedLockManager uses the synchronized keyword to implement its critical
+ * sections.
+ */
+public class SyncedLockManager extends LockManager {
+
+    public SyncedLockManager(EnvironmentImpl envImpl)
+    	throws DatabaseException {
+
+        super(envImpl);
+    }
+
+    /**
+     * @see LockManager#attemptLock
+     */
+    protected Lock lookupLock(Long nodeId)
+	throws DatabaseException {
+
+	int lockTableIndex = getLockTableIndex(nodeId);
+	synchronized(lockTableLatches[lockTableIndex]) {
+	    return lookupLockInternal(nodeId, lockTableIndex);
+	}
+    }
+
+    /**
+     * @see LockManager#attemptLock
+     */
+    protected LockAttemptResult attemptLock(Long nodeId,
+                                            Locker locker,
+                                            LockType type,
+                                            boolean nonBlockingRequest)
+        throws DatabaseException {
+
+	int lockTableIndex = getLockTableIndex(nodeId);
+        synchronized(lockTableLatches[lockTableIndex]) {
+            return attemptLockInternal
+		(nodeId, locker, type, nonBlockingRequest,
+		 lockTableIndex);
+        }
+    }
+
+    /**
+     * @see LockManager#makeTimeoutMsg
+     */
+    protected DeadlockException makeTimeoutMsg(String lockOrTxn,
+					       Locker locker,
+					       long nodeId,
+					       LockType type,
+					       LockGrantType grantType,
+					       Lock useLock,
+					       long timeout,
+					       long start,
+					       long now,
+					       DatabaseImpl database) {
+
+	int lockTableIndex = getLockTableIndex(nodeId);
+        synchronized(lockTableLatches[lockTableIndex]) {
+            return makeTimeoutMsgInternal(lockOrTxn, locker, nodeId, type,
+                                          grantType, useLock, timeout,
+                                          start, now, database);
+        }
+    }
+
+    /**
+     * @see LockManager#releaseAndNotifyTargets
+     */
+    protected Set<Locker> releaseAndFindNotifyTargets(long nodeId, 
+                                                      Locker locker)
+        throws DatabaseException {
+
+	long nid = nodeId;
+	int lockTableIndex = getLockTableIndex(nid);
+        synchronized(lockTableLatches[lockTableIndex]) {
+            return releaseAndFindNotifyTargetsInternal
+		(nodeId, locker, lockTableIndex);
+        }
+    }
+
+    /**
+     * @see LockManager#transfer
+     */
+    void transfer(long nodeId,
+                  Locker owningLocker,
+                  Locker destLocker,
+                  boolean demoteToRead)
+        throws DatabaseException {
+
+	int lockTableIndex = getLockTableIndex(nodeId);
+        synchronized(lockTableLatches[lockTableIndex]) {
+            transferInternal(nodeId, owningLocker, destLocker,
+			     demoteToRead, lockTableIndex);
+        }
+    }
+
+    /**
+     * @see LockManager#transferMultiple
+     */
+    void transferMultiple(long nodeId,
+                          Locker owningLocker,
+                          Locker[] destLockers)
+        throws DatabaseException {
+
+	int lockTableIndex = getLockTableIndex(nodeId);
+        synchronized(lockTableLatches[lockTableIndex]) {
+            transferMultipleInternal(nodeId, owningLocker,
+				     destLockers, lockTableIndex);
+        }
+    }
+
+    /**
+     * @see LockManager#demote
+     */
+    void demote(long nodeId, Locker locker)
+        throws DatabaseException {
+
+	int lockTableIndex = getLockTableIndex(nodeId);
+        synchronized(lockTableLatches[lockTableIndex]) {
+            demoteInternal(nodeId, locker, lockTableIndex);
+        }
+    }
+
+    /**
+     * @see LockManager#isLocked
+     */
+    boolean isLocked(Long nodeId) {
+
+	int lockTableIndex = getLockTableIndex(nodeId);
+        synchronized(lockTableLatches[lockTableIndex]) {
+            return isLockedInternal(nodeId, lockTableIndex);
+        }
+    }
+
+    /**
+     * @see LockManager#isOwner
+     */
+    boolean isOwner(Long nodeId, Locker locker, LockType type) {
+
+	int lockTableIndex = getLockTableIndex(nodeId);
+        synchronized(lockTableLatches[lockTableIndex]) {
+            return isOwnerInternal(nodeId, locker, type, lockTableIndex);
+        }
+    }
+
+    /**
+     * @see LockManager#isWaiter
+     */
+    boolean isWaiter(Long nodeId, Locker locker) {
+
+	int lockTableIndex = getLockTableIndex(nodeId);
+        synchronized(lockTableLatches[lockTableIndex]) {
+            return isWaiterInternal(nodeId, locker, lockTableIndex);
+        }
+    }
+
+    /**
+     * @see LockManager#nWaiters
+     */
+    int nWaiters(Long nodeId) {
+
+	int lockTableIndex = getLockTableIndex(nodeId);
+        synchronized(lockTableLatches[lockTableIndex]) {
+            return nWaitersInternal(nodeId, lockTableIndex);
+        }
+    }
+
+    /**
+     * @see LockManager#nOwners
+     */
+    int nOwners(Long nodeId) {
+
+	int lockTableIndex = getLockTableIndex(nodeId);
+        synchronized(lockTableLatches[lockTableIndex]) {
+            return nOwnersInternal(nodeId, lockTableIndex);
+        }
+    }
+
+    /**
+     * @see LockManager#getWriterOwnerLocker
+     */
+    Locker getWriteOwnerLocker(Long nodeId)
+        throws DatabaseException {
+
+	int lockTableIndex = getLockTableIndex(nodeId);
+        synchronized(lockTableLatches[lockTableIndex]) {
+            return getWriteOwnerLockerInternal(nodeId, lockTableIndex);
+        }
+    }
+
+    /**
+     * @see LockManager#validateOwnership
+     */
+    protected boolean validateOwnership(Long nodeId,
+                                        Locker locker,
+                                        LockType type,
+                                        boolean flushFromWaiters,
+					MemoryBudget mb)
+        throws DatabaseException {
+
+	int lockTableIndex = getLockTableIndex(nodeId);
+        synchronized(lockTableLatches[lockTableIndex]) {
+            return validateOwnershipInternal
+		(nodeId, locker, type, flushFromWaiters, mb, lockTableIndex);
+        }
+    }
+
+    /**
+     * @see LockManager#dumpLockTable
+     */
+    protected void dumpLockTable(LockStats stats)
+        throws DatabaseException {
+
+	for (int i = 0; i < nLockTables; i++) {
+	    synchronized(lockTableLatches[i]) {
+		dumpLockTableInternal(stats, i);
+	    }
+	}
+    }
+}
diff --git a/src/com/sleepycat/je/txn/ThinLockImpl.java b/src/com/sleepycat/je/txn/ThinLockImpl.java
new file mode 100644
index 0000000000000000000000000000000000000000..51e16078aa17e456378c43ef116f20a7c6a1064f
--- /dev/null
+++ b/src/com/sleepycat/je/txn/ThinLockImpl.java
@@ -0,0 +1,215 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ThinLockImpl.java,v 1.6.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.MemoryBudget;
+
+/**
+ * Implements a lightweight Lock with no waiters and only a single Owner.  If,
+ * during an operation (lock, transfer, or transferMultiple) more than one
+ * owner or waiter is required, then this will mutate to a LockImpl, perform
+ * the requested operation, and return the new LockImpl to the caller.
+ *
+ * public for Sizeof.
+ */
+public class ThinLockImpl extends LockInfo implements Lock {
+
+    /**
+     * Create a Lock.  Public for Sizeof.
+     */
+    public ThinLockImpl() {
+	super(null, null);
+    }
+
+    public List<LockInfo> getWaitersListClone() {
+        return Collections.emptyList();
+    }
+
+    public void flushWaiter(Locker locker,
+			    MemoryBudget mb,
+			    int lockTableIndex) {
+
+	/* Do nothing. */
+	return;
+    }
+
+    public Set<LockInfo> getOwnersClone() {
+
+	Set<LockInfo> ret = new HashSet<LockInfo>();
+	if (locker != null) {
+	    ret.add(this);
+	}
+	return ret;
+    }
+
+    public boolean isOwner(Locker locker, LockType lockType) {
+
+	if (locker == this.locker) {
+	    if (lockType == this.lockType) {
+		return true;
+	    }
+
+	    if (this.lockType != null) {
+		LockUpgrade upgrade = this.lockType.getUpgrade(lockType);
+		if (!upgrade.getPromotion()) {
+		    return true;
+		}
+	    }
+	} else {
+	    return false;
+	}
+	return false;
+    }
+
+    public boolean isOwnedWriteLock(Locker locker) {
+
+	if (locker != this.locker) {
+	    return false;
+	}
+
+	if (this.lockType != null) {
+	    return this.lockType.isWriteLock();
+	} else {
+	    return false;
+	}
+    }
+
+    public boolean isWaiter(Locker locker) {
+
+	/* There can never be waiters on Thin Locks. */
+        return false;
+    }
+
+    public int nWaiters() {
+        return 0;
+    }
+
+    public int nOwners() {
+        return (locker == null ? 0 : 1);
+    }
+
+    public LockAttemptResult lock(LockType requestType,
+				  Locker locker,
+				  boolean nonBlockingRequest,
+				  MemoryBudget mb,
+				  int lockTableIndex)
+	throws DatabaseException {
+
+	if (this.locker != null &&
+	    this.locker != locker) {
+	    /* Lock is already held by someone else so mutate. */
+	    Lock newLock =
+		new LockImpl(new LockInfo(this.locker, this.lockType));
+	    return newLock.lock(requestType, locker, nonBlockingRequest,
+				mb, lockTableIndex);
+	}
+
+	LockGrantType grant = null;
+	if (this.locker == null) {
+	    this.locker = locker;
+	    this.lockType = requestType;
+	    grant = LockGrantType.NEW;
+	} else {
+
+	    /* The requestor holds this lock.  Check for upgrades. */
+	    LockUpgrade upgrade = lockType.getUpgrade(requestType);
+	    if (upgrade.getUpgrade() == null) {
+		grant = LockGrantType.EXISTING;
+	    } else {
+		LockType upgradeType = upgrade.getUpgrade();
+		assert upgradeType != null;
+		this.lockType = upgradeType;
+		grant = (upgrade.getPromotion() ?
+			 LockGrantType.PROMOTION : LockGrantType.EXISTING);
+	    }
+	}
+	return new LockAttemptResult(this, grant, false);
+    }
+
+    public Set<Locker> release(Locker locker, 
+                               MemoryBudget mb, 
+                               int lockTableIndex) {
+
+	if (locker == this.locker) {
+	    this.locker = null;
+	    this.lockType = null;
+	    return Collections.emptySet();
+	} else {
+	    return null;
+	}
+    }
+
+    public void demote(Locker locker) {
+
+	if (this.lockType.isWriteLock()) {
+	    this.lockType = (lockType == LockType.RANGE_WRITE) ?
+		LockType.RANGE_READ : LockType.READ;
+	}
+    }
+
+    public Lock transfer(Long nodeId,
+			 Locker currentLocker,
+			 Locker destLocker,
+			 MemoryBudget mb,
+			 int lockTableIndex)
+        throws DatabaseException {
+
+	Lock newLock = new LockImpl(new LockInfo(this.locker, this.lockType));
+	return newLock.transfer(nodeId, currentLocker, destLocker,
+				mb, lockTableIndex);
+    }
+
+    public Lock transferMultiple(Long nodeId,
+				 Locker currentLocker,
+				 Locker[] destLockers,
+				 MemoryBudget mb,
+				 int lockTableIndex)
+        throws DatabaseException {
+
+	Lock newLock = new LockImpl(new LockInfo(this.locker, this.lockType));
+	return newLock.transferMultiple(nodeId, currentLocker, destLockers,
+					mb, lockTableIndex);
+    }
+
+    public Locker getWriteOwnerLocker() {
+
+	if (lockType != null &&
+	    lockType.isWriteLock()) {
+	    return locker;
+	} else {
+	    return null;
+	}
+    }
+
+    public boolean isThin() {
+	return true;
+    }
+
+    @Override
+    public String toString() {
+
+        StringBuffer sb = new StringBuffer();
+        sb.append(" ThinLockAddr:").append(System.identityHashCode(this));
+        sb.append(" Owner:");
+        if (nOwners() == 0) {
+            sb.append(" (none)");
+        } else {
+	    sb.append(locker);
+        }
+
+        sb.append(" Waiters: (none)");
+        return sb.toString();
+    }
+}
diff --git a/src/com/sleepycat/je/txn/ThreadLocker.java b/src/com/sleepycat/je/txn/ThreadLocker.java
new file mode 100644
index 0000000000000000000000000000000000000000..7146c9aadbb108e7abf5a7469f8ca84974b7a053
--- /dev/null
+++ b/src/com/sleepycat/je/txn/ThreadLocker.java
@@ -0,0 +1,87 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ThreadLocker.java,v 1.20.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+
+/**
+ * Extends BasicLocker to share locks among all lockers for the same thread.
+ * This locker is used when a JE entry point is called with a null transaction
+ * parameter.
+ */
+public class ThreadLocker extends BasicLocker {
+
+    /**
+     * Creates a ThreadLocker.
+     */
+    private ThreadLocker(EnvironmentImpl env)
+        throws DatabaseException {
+
+        super(env);
+    }
+
+    public static ThreadLocker createThreadLocker(EnvironmentImpl env)
+        throws DatabaseException {
+
+	ThreadLocker ret = null;
+	try {
+	    ret = new ThreadLocker(env);
+	    ret.initApiReadLock();
+	} catch (DatabaseException DE) {
+	    ret.operationEnd(false);
+	    throw DE;
+	}
+	return ret;
+    }
+
+    /**
+     * Check that this txn is not used in the wrong thread.
+     */
+    @Override
+    protected void checkState(boolean ignoreCalledByAbort)
+        throws DatabaseException {
+
+        if (thread != Thread.currentThread()) {
+            throw new DatabaseException
+		("Non-transactional Cursors may not be used in multiple " +
+                 "threads; Cursor was created in " + thread +
+		 " but used in " + Thread.currentThread());
+        }
+    }
+
+    /**
+     * Returns a new non-transactional locker that shares locks with this
+     * locker by virtue of being a ThreadLocker for the same thread.
+     */
+    @Override
+    public Locker newNonTxnLocker()
+        throws DatabaseException {
+
+        checkState(false);
+        return ThreadLocker.createThreadLocker(envImpl);
+    }
+
+    /**
+     * Returns whether this locker can share locks with the given locker.
+     * Locks are shared when both are txns are ThreadLocker instances for the
+     * same thread.
+     */
+    @Override
+    public boolean sharesLocksWith(Locker other) {
+
+        if (super.sharesLocksWith(other)) {
+            return true;
+        } else if (other instanceof ThreadLocker) {
+            return thread == ((ThreadLocker) other).thread;
+        } else {
+            return false;
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/txn/Txn.java b/src/com/sleepycat/je/txn/Txn.java
new file mode 100644
index 0000000000000000000000000000000000000000..7b7c977c06ba94e0de5d91418e9a1b32a22ab093
--- /dev/null
+++ b/src/com/sleepycat/je/txn/Txn.java
@@ -0,0 +1,1871 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Txn.java,v 1.198.2.5 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import javax.transaction.xa.XAResource;
+import javax.transaction.xa.Xid;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Durability;
+import com.sleepycat.je.LockStats;
+import com.sleepycat.je.RunRecoveryException;
+import com.sleepycat.je.TransactionConfig;
+import com.sleepycat.je.dbi.CursorImpl;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.LogManager;
+import com.sleepycat.je.log.LogUtils;
+import com.sleepycat.je.log.Loggable;
+import com.sleepycat.je.log.ReplicationContext;
+import com.sleepycat.je.log.entry.LNLogEntry;
+import com.sleepycat.je.log.entry.SingleItemEntry;
+import com.sleepycat.je.recovery.RecoveryManager;
+import com.sleepycat.je.tree.LN;
+import com.sleepycat.je.tree.TreeLocation;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.Tracer;
+
+/**
+ * A Txn is the internal representation of a transaction created by a call to
+ * Environment.txnBegin.  This class must support multi-threaded use.
+ */
+public class Txn extends Locker implements Loggable {
+
+    @SuppressWarnings("unused")
+    private static final String DEBUG_NAME =
+        Txn.class.getName();
+
+    private byte txnState;
+
+    /*
+     * Cursors opened under this txn. Implemented as a simple linked list to
+     * conserve on memory.
+     */
+    private CursorImpl cursorSet;
+
+    /* txnState bits. */
+    private static final byte USABLE = 0;
+    private static final byte CLOSED = 1;
+    private static final byte ONLY_ABORTABLE = 2;
+    private static final byte STATE_BITS = 3;
+    /* Set if prepare() has been called on this transaction. */
+    private static final byte IS_PREPARED = 4;
+    /* Set if xa_end(TMSUSPEND) has been called on this transaction. */
+    private static final byte XA_SUSPENDED = 8;
+
+    /*
+     * A Txn can be used by multiple threads. Modification to the read and
+     * write lock collections is done by synchronizing on the txn.
+     */
+    private Set<Long> readLocks;    // Set<Long> (nodeIds)
+    private Map<Long, WriteLockInfo> writeInfo;    // key=nodeid
+
+    private final int READ_LOCK_OVERHEAD = MemoryBudget.HASHSET_ENTRY_OVERHEAD;
+    private final int WRITE_LOCK_OVERHEAD =
+        MemoryBudget.HASHMAP_ENTRY_OVERHEAD +
+        MemoryBudget.WRITE_LOCKINFO_OVERHEAD;
+
+    /*
+     * We have to keep a set of DatabaseCleanupInfo objects so after commit or
+     * abort of Environment.truncateDatabase() or Environment.removeDatabase(),
+     * we can appropriately purge the unneeded MapLN and DatabaseImpl.
+     * Synchronize access to this set on this object.
+     */
+    private Set<DatabaseCleanupInfo> deletedDatabases;
+
+    /*
+     * We need a map of the latest databaseImpl objects to drive the undo
+     * during an abort, because it's too hard to look up the database object in
+     * the mapping tree. (The normal code paths want to take locks, add
+     * cursors, etc.
+     */
+    protected Map<DatabaseId, DatabaseImpl> undoDatabases;
+
+    /* Last LSN logged for this transaction. */
+    protected long lastLoggedLsn = DbLsn.NULL_LSN;
+
+    /*
+     * First LSN logged for this transaction -- used for keeping track of the
+     * first active LSN point, for checkpointing. This field is not persistent.
+     */
+    protected long firstLoggedLsn = DbLsn.NULL_LSN;
+
+    /* The configured durability at the time the transaction was created. */
+    private Durability defaultDurability;
+
+    /* The durability used for the actual commit. */
+    private Durability commitDurability;
+
+    /* Whether to use Serializable isolation (prevent phantoms). */
+    private boolean serializableIsolation;
+
+    /* Whether to use Read-Committed isolation. */
+    private boolean readCommittedIsolation;
+
+    /*
+     * In-memory size, in bytes. A Txn tracks the memory needed for itself and
+     * the readlock, writeInfo, undoDatabases, and deletedDatabases
+     * collections, including the cost of each collection entry. However, the
+     * actual Lock object memory cost is maintained within the Lock class.
+     */
+    private int inMemorySize;
+
+    /*
+     * Accumulated memory budget delta.  Once this exceeds ACCUMULATED_LIMIT we
+     * inform the MemoryBudget that a change has occurred.
+     */
+    private int accumulatedDelta = 0;
+
+    /*
+     * Max allowable accumulation of memory budget changes before MemoryBudget
+     * should be updated. This allows for consolidating multiple calls to
+     * updateXXXMemoryBudget() into one call.  Not declared final so that unit
+     * tests can modify this.  See SR 12273.
+     */
+    public static int ACCUMULATED_LIMIT = 10000;
+
+    /*
+     * Each Txn instance has a handle on a ReplicationContext instance for use
+     * in logging a TxnCommit or TxnAbort log entries.
+     */
+    protected ReplicationContext repContext;
+
+    /*
+     * Used to track mixed mode (sync/durability) transaction API usage. When
+     * the sync based api is removed, these tracking ivs can be as well.
+     */
+    private boolean explicitSyncConfigured = false;
+    private boolean explicitDurabilityConfigured = false;
+
+    /* Determines whether the transaction is auto-commit */
+    private boolean isAutoCommit = false;
+
+
+    /**
+     * Constructor for reading from log.
+     */
+    public Txn() {
+        lastLoggedLsn = DbLsn.NULL_LSN;
+    }
+
+    /**
+     * Create a transaction from Environment.txnBegin. Should only be used in
+     * cases where we are sure we'll set the repContext field before the
+     * transaction is ended. For example, should not be used to create
+     * standalone Txns for a unit test.
+     */
+    protected Txn(EnvironmentImpl envImpl, TransactionConfig config)
+        throws DatabaseException {
+
+        /*
+         * Initialize using the config but don't hold a reference to it, since
+         * it has not been cloned.
+         */
+        super(envImpl, config.getReadUncommitted(), config.getNoWait(), 0);
+        initTxn(envImpl, config);
+    }
+
+    static Txn createTxn(EnvironmentImpl envImpl, TransactionConfig config)
+        throws DatabaseException {
+
+        Txn ret = null;
+        try {
+            ret = envImpl.isReplicated() ?
+                  envImpl.getReplicator().createRepTxn(envImpl, config) :
+                  new Txn(envImpl, config);
+            ret.initApiReadLock();
+        } catch (DatabaseException DE) {
+            if (ret != null) {
+                ret.close(false);
+            }
+            throw DE;
+        }
+        return ret;
+    }
+
+    /**
+     * This is only for use by subtypes which arbitrarily impose a transaction
+     * id value onto the transaction. This is done by implementing a version of
+     * Locker.generateId() which uses the proposed id.
+     */
+    protected Txn(EnvironmentImpl envImpl,
+                  TransactionConfig config,
+                  boolean noAPIReadLock,
+                  long mandatedId)
+        throws DatabaseException {
+
+        /*
+         * Initialize using the config but don't hold a reference to it, since
+         * it has not been cloned.
+         */
+        super(envImpl,
+              config.getReadUncommitted(),
+              config.getNoWait(),
+              noAPIReadLock,
+              mandatedId);
+        initTxn(envImpl, config);
+    }
+
+    /**
+     * Create a Txn for use in a unit test, where we won't need a auto Txn or a
+     * com.sleepycat.je.Transaction. In a real life transaction, we don't know
+     * a priori at the time of Txn construction whether the transaction needs
+     * to be replicated.
+     */
+    public Txn(EnvironmentImpl envImpl,
+               TransactionConfig config,
+               ReplicationContext repContext)
+        throws DatabaseException {
+
+        /*
+         * Initialize using the config but don't hold a reference to it, since
+         * it has not been cloned.
+         */
+        super(envImpl, config.getReadUncommitted(), config.getNoWait(), 0);
+        initTxn(envImpl, config);
+        setRepContext(repContext);
+    }
+
+    public static Txn createTxn(EnvironmentImpl envImpl,
+                                TransactionConfig config,
+                                ReplicationContext repContext)
+        throws DatabaseException {
+
+        Txn ret = null;
+        try {
+            ret = envImpl.isReplicated() ?
+                  envImpl.getReplicator().createRepTxn(envImpl,
+                                                       config,
+                                                       repContext) :
+                  new Txn(envImpl, config, repContext);
+            ret.initApiReadLock();
+        } catch (DatabaseException DE) {
+            if (ret != null) {
+                ret.close(false);
+            }
+            throw DE;
+        }
+        return ret;
+    }
+
+    public static Txn createAutoTxn(EnvironmentImpl envImpl,
+                                    TransactionConfig config,
+                                    boolean noAPIReadLock,
+                                    ReplicationContext repContext)
+        throws DatabaseException {
+
+        Txn ret = null;
+        try {
+            ret = envImpl.isReplicated() ?
+                envImpl.getReplicator().createRepTxn(envImpl,
+                                                     config,
+                                                     noAPIReadLock,
+                                                     0 /* mandatedId */) :
+                new Txn(envImpl, config, noAPIReadLock, 0 /* mandatedId */);
+            ret.isAutoCommit = true;
+            ret.setRepContext(repContext);
+            ret.initApiReadLock();
+        } catch (DatabaseException DE) {
+            if (ret != null) {
+                ret.close(false);
+            }
+            throw DE;
+        }
+        return ret;
+    }
+
+    private void initTxn(EnvironmentImpl envImpl,
+                         TransactionConfig config)
+        throws DatabaseException {
+
+        serializableIsolation = config.getSerializableIsolation();
+        readCommittedIsolation = config.getReadCommitted();
+        defaultDurability = config.getDurability();
+        if (defaultDurability == null) {
+            explicitDurabilityConfigured = false;
+            defaultDurability = config.getDurabilityFromSync();
+        } else {
+            explicitDurabilityConfigured = true;
+        }
+        explicitSyncConfigured =
+            config.getSync() || config.getNoSync() || config.getWriteNoSync();
+
+        assert(!(explicitDurabilityConfigured && explicitSyncConfigured));
+
+        lastLoggedLsn = DbLsn.NULL_LSN;
+        firstLoggedLsn = DbLsn.NULL_LSN;
+
+        txnState = USABLE;
+
+        txnBeginHook(config);
+
+        /*
+         * Note: readLocks, writeInfo, undoDatabases, deleteDatabases are
+         * initialized lazily in order to conserve memory. WriteInfo and
+         * undoDatabases are treated as a package deal, because they are both
+         * only needed if a transaction does writes.
+         *
+         * When a lock is added to this transaction, we add the collection
+         * entry overhead to the memory cost, but don't add the lock
+         * itself. That's taken care of by the Lock class.
+         */
+        updateMemoryUsage(MemoryBudget.TXN_OVERHEAD);
+
+        this.envImpl.getTxnManager().registerTxn(this);
+    }
+
+    /**
+     * UserTxns get a new unique id for each instance.
+     */
+    protected long generateId(TxnManager txnManager,
+                              long ignore /* mandatedId */) {
+        return txnManager.getNextTxnId();
+    }
+
+    /**
+     * Access to last LSN.
+     */
+    public long getLastLsn() {
+        return lastLoggedLsn;
+    }
+
+    /**
+     *
+     * Returns the durability used for the commit operation. It's only
+     * available after a commit operation has been initiated.
+     *
+     * @return the durability associated with the commit, or null if the
+     * commit has not yet been initiated.
+     */
+    public Durability getCommitDurability() {
+        return commitDurability;
+    }
+
+    /**
+    *
+    * Returns the durability associated the transaction at the time it's first
+    * created.
+    *
+    * @return the durability associated with the transaction at creation.
+    */
+   public Durability getDefaultDurability() {
+       return defaultDurability;
+   }
+
+    public boolean getPrepared() {
+        return (txnState & IS_PREPARED) != 0;
+    }
+
+    public void setPrepared(boolean prepared) {
+        if (prepared) {
+            txnState |= IS_PREPARED;
+        } else {
+            txnState &= ~IS_PREPARED;
+        }
+    }
+
+    public void setSuspended(boolean suspended) {
+        if (suspended) {
+            txnState |= XA_SUSPENDED;
+        } else {
+            txnState &= ~XA_SUSPENDED;
+        }
+    }
+
+    public boolean isSuspended() {
+        return (txnState & XA_SUSPENDED) != 0;
+    }
+
+    /**
+     * Gets a lock on this nodeId and, if it is a write lock, saves an abort
+     * LSN.  Caller will set the abortLsn later, after the write lock has been
+     * obtained.
+     *
+     * @see Locker#lockInternal
+     * @Override
+     */
+    LockResult lockInternal(long nodeId,
+                            LockType lockType,
+                            boolean noWait,
+                            DatabaseImpl database)
+        throws DatabaseException {
+
+        long timeout = 0;
+        boolean useNoWait = noWait || defaultNoWait;
+        synchronized (this) {
+            checkState(false);
+            if (!useNoWait) {
+                timeout = getLockTimeout();
+            }
+        }
+
+        /* Ask for the lock. */
+        LockGrantType grant = lockManager.lock
+            (nodeId, this, lockType, timeout, useNoWait, database);
+
+        WriteLockInfo info = null;
+        if (writeInfo != null) {
+            if (grant != LockGrantType.DENIED && lockType.isWriteLock()) {
+                synchronized (this) {
+                    info = writeInfo.get(Long.valueOf(nodeId));
+                    /* Save the latest version of this database for undoing. */
+                    undoDatabases.put(database.getId(), database);
+                }
+            }
+        }
+
+        return new LockResult(grant, info);
+    }
+
+    public int prepare(Xid xid)
+        throws DatabaseException {
+
+        if ((txnState & IS_PREPARED) != 0) {
+            throw new DatabaseException
+                ("prepare() has already been called for Transaction " +
+                 id + ".");
+        }
+        synchronized (this) {
+            checkState(false);
+            if (checkCursorsForClose()) {
+                throw new DatabaseException
+                    ("Transaction " + id +
+                     " prepare failed because there were open cursors.");
+            }
+
+            setPrepared(true);
+            envImpl.getTxnManager().notePrepare();
+            if (writeInfo == null) {
+                return XAResource.XA_RDONLY;
+            }
+
+            SingleItemEntry prepareEntry =
+                new SingleItemEntry(LogEntryType.LOG_TXN_PREPARE,
+                                    new TxnPrepare(id, xid));
+            /* Flush required. */
+            LogManager logManager = envImpl.getLogManager();
+            logManager.logForceFlush(prepareEntry,
+                                     true,  // fsyncrequired
+                                     ReplicationContext.NO_REPLICATE);
+        }
+        return XAResource.XA_OK;
+    }
+
+    public void commit(Xid xid)
+        throws DatabaseException {
+
+        commit(TransactionConfig.SYNC);
+        envImpl.getTxnManager().unRegisterXATxn(xid, true);
+        return;
+    }
+
+    public void abort(Xid xid)
+        throws DatabaseException {
+
+        abort(true /* forceFlush */);
+        envImpl.getTxnManager().unRegisterXATxn(xid, false);
+        return;
+    }
+
+    /**
+     * Call commit() with the default sync configuration property.
+     */
+    public long commit()
+        throws DatabaseException {
+
+        return commit(defaultDurability);
+    }
+
+    /**
+     * Commit this transaction
+     * 1. Releases read locks
+     * 2. Writes a txn commit record into the log
+     * 3. Flushes the log to disk.
+     * 4. Add deleted LN info to IN compressor queue
+     * 5. Release all write locks
+     *
+     * If any step of this fails, we must convert this transaction to an abort.
+     */
+    public long commit(Durability durability)
+        throws DatabaseException {
+
+        /*
+         * A replication exception that cannot abort the transaction since the
+         * commit record has been written.
+         */
+        DatabaseException repNoAbortException = null;
+
+        /* A replication exception requiring a transaction abort. */
+        DatabaseException repAbortException = null;
+
+        this.commitDurability = durability;
+
+        try {
+            long commitLsn = DbLsn.NULL_LSN;
+            synchronized (this) {
+                checkState(false);
+                if (checkCursorsForClose()) {
+                    throw new DatabaseException
+                        ("Transaction " + id +
+                         " commit failed because there were open cursors.");
+                }
+
+                /*
+                 * Save transferred write locks, if any.  Their abort LSNs are
+                 * counted as obsolete further below.  Create the list lazily
+                 * to avoid creating it in the normal case (no handle locks).
+                 */
+                List<WriteLockInfo> transferredWriteLockInfo = null;
+
+                /* Transfer handle locks to their owning handles. */
+                if (handleLockToHandleMap != null) {
+                    Iterator<Map.Entry<Long, Set<Database>>> handleLockIter =
+                        handleLockToHandleMap.entrySet().iterator();
+                    while (handleLockIter.hasNext()) {
+                        Map.Entry<Long, Set<Database>> entry =
+                            handleLockIter.next();
+                        Long nodeId = entry.getKey();
+                        if (writeInfo != null) {
+                            WriteLockInfo info = writeInfo.get(nodeId);
+                            if (info != null) {
+                                if (transferredWriteLockInfo == null) {
+                                    transferredWriteLockInfo =
+                                        new ArrayList<WriteLockInfo>();
+                                }
+                                transferredWriteLockInfo.add(info);
+                            }
+                        }
+                        transferHandleLockToHandleSet(nodeId, 
+                                                      entry.getValue());
+                    }
+                }
+
+                /*
+                 * Release all read locks, clear lock collection. Optimize for
+                 * the case where there are no read locks.
+                 */
+                int numReadLocks = clearReadLocks();
+
+                /*
+                 * Log the commit if we ever logged any modifications for this
+                 * txn. Refraining from logging empty commits is more efficient
+                 * and makes for fewer edge cases for HA. Note that this is not
+                 * the same as the question of whether we have held any write
+                 * locks. Various scenarios, like RMW txns and
+                 * Cursor.putNoOverwrite can take write locks without having
+                 * actually made any modifications.  
+                 * 
+                 * If we have outstanding write locks, we must release them
+                 * even if we haven't logged a commit. With dbhandle write
+                 * locks, we may have held the write lock but then had it
+                 * transferred away.
+                 */
+                int numWriteLocks = 0;
+                if (writeInfo != null) {
+                    numWriteLocks = writeInfo.size();
+
+                    /* 
+                     * If nothing was written to log for this txn, no need to
+                     * log a commit. 
+                     */
+                    if (firstLoggedLsn != DbLsn.NULL_LSN) {
+
+                        try {
+                            preLogCommitHook();
+                        } catch (DatabaseException preCommitException) {
+                            repAbortException = preCommitException;
+                            throw preCommitException;
+                        }
+
+                        try {
+                            commitLsn = 
+                                logCommitEntry(durability.getLocalSync());
+                        } catch (Exception e) {
+                            /* Cleanup and propagate the exception. */
+                            postLogAbortHook();
+                            throw e;
+                        }
+
+                        try {
+                            postLogCommitHook();
+                        } catch (DatabaseException postCommitException) {
+                            repNoAbortException = postCommitException;
+                        }
+                    }
+
+                    /*
+                     * Set database state for deletes before releasing any
+                     * write locks.
+                     */
+                    setDeletedDatabaseState(true);
+
+                    /*
+                     * Used to prevent double counting abortLNS if there is
+                     * more then one node with the same abortLSN in this txn.
+                     * Two nodes with the same abortLSN occur when a deleted
+                     * slot is reused in the same txn.
+                     */
+                    Set<Long> alreadyCountedLsnSet = new HashSet<Long>();
+
+                    /* Release all write locks, clear lock collection. */
+                    Iterator<Map.Entry<Long, WriteLockInfo>> iter =
+                        writeInfo.entrySet().iterator();
+                    while (iter.hasNext()) {
+                        Map.Entry<Long, WriteLockInfo> entry = iter.next();
+                        Long nodeId = entry.getKey();
+                        lockManager.release(nodeId, this);
+                        /* Count obsolete LSNs for released write locks. */
+                        countWriteAbortLSN(entry.getValue(),
+                                           alreadyCountedLsnSet);
+                    }
+                    writeInfo = null;
+
+                    /* Count obsolete LSNs for transferred write locks. */
+                    if (transferredWriteLockInfo != null) {
+                        for (int i = 0;
+                             i < transferredWriteLockInfo.size();
+                             i += 1) {
+                            WriteLockInfo info =
+                                transferredWriteLockInfo.get(i);
+                            countWriteAbortLSN(info, alreadyCountedLsnSet);
+                        }
+                    }
+
+                    /* Unload delete info, but don't wake up the compressor. */
+                    if ((deleteInfo != null) && deleteInfo.size() > 0) {
+                        envImpl.addToCompressorQueue(deleteInfo.values(),
+                                                     false); // don't wakeup
+                        deleteInfo.clear();
+                    }
+                }
+
+                traceCommit(numWriteLocks, numReadLocks);
+            }
+
+            /*
+             * Purge any databaseImpls not needed as a result of the commit.
+             * Be sure to do this outside the synchronization block, to avoid
+             * conflict w/checkpointer.
+             */
+            cleanupDatabaseImpls(true);
+
+            /*
+             * Unregister this txn. Be sure to do this outside the
+             * synchronization block, to avoid conflict w/checkpointer.
+             */
+            close(true);
+            if (repNoAbortException == null) {
+                return commitLsn;
+            }
+        } catch (RunRecoveryException e) {
+
+            /* May have received a thread interrupt. */
+            throw e;
+        } catch (Error e) {
+            envImpl.invalidate(e);
+            throw e;
+        } catch (Exception t) {
+            try {
+
+                /*
+                 * If the exception thrown is a DatabaseException it indicates
+                 * that the write() call hit an IOException, probably out of
+                 * disk space, and attempted to rewrite all commit records as
+                 * abort records.  Since the abort records are already
+                 * rewritten (or at least attempted to be rewritten), there is
+                 * no reason to have abort attempt to write an abort record
+                 * again.  See [11271].
+                 */
+                /*
+                 * TODO: We need an explicit indication for an IOException in
+                 * the HA release.  Replication hooks may throw
+                 * DatabaseException instances that do not represent
+                 * IOExceptions.
+                 */
+                abortInternal(durability.getLocalSync() ==
+                              Durability.SyncPolicy.SYNC,
+                              ((repAbortException != null) ||
+                               !(t instanceof DatabaseException)));
+                Tracer.trace(envImpl, "Txn", "commit",
+                             "Commit of transaction " + id + " failed", t);
+            } catch (Error e) {
+                envImpl.invalidate(e);
+                throw e;
+            } catch (Exception abortT2) {
+                throw new DatabaseException
+                    ("Failed while attempting to commit transaction " +
+                     id +
+                     ". The attempt to abort and clean up also failed. " +
+                     "The original exception seen from commit = " +
+                     t.getMessage() +
+                     " The exception from the cleanup = " +
+                     abortT2.getMessage(),
+                     t);
+            }
+
+            if (t == repAbortException) {
+                /*
+                 * Don't wrap the replication exception, since the application
+                 * may need to catch and handle it; it's already a Database
+                 * exception.
+                 */
+                throw repAbortException;
+            }
+            /* Now throw an exception that shows the commit problem. */
+            throw new DatabaseException
+                ("Failed while attempting to commit transaction " + id +
+                 ", aborted instead. Original exception = " +
+                 t.getMessage(), t);
+        }
+        assert(repNoAbortException != null);
+        /* Rethrow any pending post-commit replication exceptions. */
+        throw repNoAbortException;
+    }
+
+    /**
+     * Creates and logs the txn commit entry, enforcing the flush/Sync behavior.
+     *
+     * @param flushSyncBehavior the local durability requirements
+     *
+     * @return the LSN denoting the commit log entry
+     *
+     * @throws DatabaseException
+     */
+    private long logCommitEntry(Durability.SyncPolicy flushSyncBehavior)
+            throws DatabaseException {
+
+        LogManager logManager = envImpl.getLogManager();
+        SingleItemEntry commitEntry =
+            new SingleItemEntry(LogEntryType.LOG_TXN_COMMIT,
+                                new TxnCommit(id, lastLoggedLsn,
+                                              getReplicatorNodeId()));
+
+        switch (flushSyncBehavior) {
+
+            case SYNC:
+                return logManager.logForceFlush(commitEntry,
+                                                true, // fsyncRequired
+                                                repContext);
+
+            case WRITE_NO_SYNC:
+                return logManager.logForceFlush(commitEntry,
+                                                false, // fsyncRequired
+                                                repContext);
+
+            default:
+                return logManager.log(commitEntry, repContext);
+        }
+    }
+
+    /**
+     * Count the abortLSN as obsolete.  Do not count if a slot with a deleted
+     * LN was reused (abortKnownDeleted), to avoid double counting.  And count
+     * each abortLSN only once.
+     */
+    private void countWriteAbortLSN(WriteLockInfo info,
+                                    Set<Long> alreadyCountedLsnSet)
+        throws DatabaseException {
+
+        if (info.abortLsn != DbLsn.NULL_LSN &&
+            !info.abortKnownDeleted) {
+            Long longLsn = Long.valueOf(info.abortLsn);
+            if (!alreadyCountedLsnSet.contains(longLsn)) {
+                envImpl.getLogManager().countObsoleteNode
+                    (info.abortLsn, null, info.abortLogSize, info.abortDb);
+                alreadyCountedLsnSet.add(longLsn);
+            }
+        }
+    }
+
+    /**
+     * Abort this transaction. Steps are:
+     * 1. Release LN read locks.
+     * 2. Write a txn abort entry to the log. This is only for log
+     *    file cleaning optimization and there's no need to guarantee a
+     *    flush to disk.
+     * 3. Find the last LN log entry written for this txn, and use that
+     *    to traverse the log looking for nodes to undo. For each node,
+     *    use the same undo logic as recovery to rollback the transaction. Note
+     *    that we walk the log in order to undo in reverse order of the
+     *    actual operations. For example, suppose the txn did this:
+     *       delete K1/D1 (in LN 10)
+     *       create K1/D1 (in LN 20)
+     *    If we process LN10 before LN 20, we'd inadvertently create a
+     *    duplicate tree of "K1", which would be fatal for the mapping tree.
+     * 4. Release the write lock for this LN.
+     */
+    public long abort(boolean forceFlush)
+        throws DatabaseException {
+
+        return abortInternal(forceFlush,
+                             true);     // writeAbortRecord
+    }
+
+    private long abortInternal(boolean forceFlush,
+                               boolean writeAbortRecord)
+        throws DatabaseException {
+
+        try {
+            long abortLsn;
+
+            synchronized (this) {
+                checkState(true);
+
+                /* Log the abort. */
+                abortLsn = DbLsn.NULL_LSN;
+                try {
+                    if (writeInfo != null) {
+                        if (writeAbortRecord) {
+                            SingleItemEntry abortEntry = new SingleItemEntry
+                                (LogEntryType.LOG_TXN_ABORT,
+                                 new TxnAbort(id, lastLoggedLsn,
+                                              getReplicatorNodeId()));
+                            if (forceFlush) {
+                                abortLsn = envImpl.getLogManager().
+                                    logForceFlush(abortEntry,
+                                                  true /*fsyncRequired*/,
+                                                  repContext);
+                            } else {
+                                abortLsn = envImpl.getLogManager().
+                                    log(abortEntry, repContext);
+                            }
+                        }
+                    }
+                } finally {
+
+                    /* Undo the changes. */
+                    undo();
+                }
+            }
+
+            /*
+             * Purge any databaseImpls not needed as a result of the abort.  Be
+             * sure to do this outside the synchronization block, to avoid
+             * conflict w/checkpointer.
+             */
+            cleanupDatabaseImpls(false);
+
+            synchronized (this) {
+                boolean openCursors = checkCursorsForClose();
+                Tracer.trace(Level.FINE,
+                             envImpl,
+                             "Abort:id = " + id +
+                             " openCursors= " + openCursors);
+                if (openCursors) {
+                    throw new DatabaseException
+                        ("Transaction " + id +
+                         " detected open cursors while aborting");
+                }
+                /* Unload any db handles protected by this txn. */
+                if (handleToHandleLockMap != null) {
+                    Iterator<Database> handleIter =
+                        handleToHandleLockMap.keySet().iterator();
+                    while (handleIter.hasNext()) {
+                        Database handle = handleIter.next();
+                        DbInternal.dbInvalidate(handle);
+                    }
+                }
+
+                return abortLsn;
+            }
+        } finally {
+
+            /*
+             * Unregister this txn, must be done outside synchronization block
+             * to avoid conflict w/checkpointer.
+             */
+            close(false);
+        }
+    }
+
+    /**
+     * Rollback the changes to this txn's write locked nodes.
+     */
+    protected void undo()
+        throws DatabaseException {
+
+        Long nodeId = null;
+        long undoLsn = lastLoggedLsn;
+        LogManager logManager = envImpl.getLogManager();
+
+        try {
+            Set<Long> alreadyUndone = new HashSet<Long>();
+            TreeLocation location = new TreeLocation();
+            while (undoLsn != DbLsn.NULL_LSN) {
+
+                LNLogEntry undoEntry =
+                    (LNLogEntry) logManager.getLogEntry(undoLsn);
+                LN undoLN = undoEntry.getLN();
+                nodeId = Long.valueOf(undoLN.getNodeId());
+
+                /*
+                 * Only process this if this is the first time we've seen this
+                 * node. All log entries for a given node have the same
+                 * abortLsn, so we don't need to undo it multiple times.
+                 */
+                if (!alreadyUndone.contains(nodeId)) {
+                    alreadyUndone.add(nodeId);
+                    DatabaseId dbId = undoEntry.getDbId();
+                    DatabaseImpl db = undoDatabases.get(dbId);
+                    undoLN.postFetchInit(db, undoLsn);
+                    long abortLsn = undoEntry.getAbortLsn();
+                    boolean abortKnownDeleted =
+                        undoEntry.getAbortKnownDeleted();
+                    RecoveryManager.undo(Level.FINER, db, location, undoLN,
+                                         undoEntry.getKey(),
+                                         undoEntry.getDupKey(), undoLsn,
+                                         abortLsn, abortKnownDeleted,
+                                         null, false);
+
+                    /*
+                     * The LN undone is counted as obsolete if it is not a
+                     * deleted LN.  Deleted LNs are counted as obsolete when
+                     * they are logged.
+                     */
+                    if (!undoLN.isDeleted()) {
+                        logManager.countObsoleteNode
+                            (undoLsn,
+                             null,  // type
+                             undoLN.getLastLoggedSize(), db);
+                    }
+                }
+
+                /* Move on to the previous log entry for this txn. */
+                undoLsn = undoEntry.getUserTxn().getLastLsn();
+            }
+        } catch (RuntimeException e) {
+            throw new DatabaseException("Txn undo for node=" + nodeId +
+                                        " LSN=" +
+                                        DbLsn.getNoFormatString(undoLsn), e);
+        } catch (DatabaseException e) {
+            Tracer.trace(envImpl, "Txn", "undo",
+                         "for node=" + nodeId + " LSN=" +
+                         DbLsn.getNoFormatString(undoLsn), e);
+            throw e;
+        }
+
+        /*
+         * Release all read locks after the undo (since the undo may need to
+         * read in mapLNs).
+         */
+        if (readLocks != null) {
+            clearReadLocks();
+        }
+
+        /* Set database state for deletes before releasing any write locks. */
+        setDeletedDatabaseState(false);
+
+        /* Throw away write lock collection. */
+        if (writeInfo != null) {
+            clearWriteLocks();
+        }
+
+        /*
+         * Let the delete related info (binreferences and dbs) get gc'ed. Don't
+         * explicitly iterate and clear -- that's far less efficient, gives GC
+         * wrong input.  
+         */
+        deleteInfo = null;
+    }
+
+    private int clearWriteLocks()
+        throws DatabaseException {
+
+        int numWriteLocks = writeInfo.size();
+        /* Release all write locks, clear lock collection. */
+        Iterator<Map.Entry<Long, WriteLockInfo>> iter =
+            writeInfo.entrySet().iterator();
+        while (iter.hasNext()) {
+            Map.Entry<Long, WriteLockInfo> entry = iter.next();
+            Long nodeId = entry.getKey();
+            lockManager.release(nodeId, this);
+        }
+        writeInfo = null;
+        return numWriteLocks;
+    }
+
+    private int clearReadLocks()
+        throws DatabaseException {
+
+        int numReadLocks = 0;
+        if (readLocks != null) {
+            numReadLocks = readLocks.size();
+            Iterator<Long> iter = readLocks.iterator();
+            while (iter.hasNext()) {
+                Long rLockNid = iter.next();
+                lockManager.release(rLockNid, this);
+            }
+            readLocks = null;
+        }
+        return numReadLocks;
+    }
+
+    /**
+     * Called by the recovery manager when logging a transaction aware object.
+     * This method is synchronized by the caller, by being called within the
+     * log latch. Record the last LSN for this transaction, to create the
+     * transaction chain, and also record the LSN in the write info for abort
+     * logic.
+     */
+    public void addLogInfo(long lastLsn)
+        throws DatabaseException {
+
+        synchronized (this) {
+            /* Save the last LSN  for maintaining the transaction LSN chain. */
+            lastLoggedLsn = lastLsn;
+
+            /* Save handle to LSN for aborts. */
+
+            /*
+             * If this is the first LSN, save it for calculating the first LSN
+             * of any active txn, for checkpointing.
+             */
+            if (firstLoggedLsn == DbLsn.NULL_LSN) {
+                firstLoggedLsn = lastLsn;
+            }
+        }
+    }
+
+    /**
+     * @return first logged LSN, to aid recovery rollback.
+     */
+    long getFirstActiveLsn()
+        throws DatabaseException {
+
+        synchronized (this) {
+            return firstLoggedLsn;
+        }
+    }
+
+    /**
+     * @param dbImpl databaseImpl to remove
+     * @param deleteAtCommit true if this databaseImpl should be cleaned on
+     *    commit, false if it should be cleaned on abort.
+     * @param mb environment memory budget.
+     */
+    public void markDeleteAtTxnEnd(DatabaseImpl dbImpl, boolean deleteAtCommit)
+        throws DatabaseException {
+
+        synchronized (this) {
+            int delta = 0;
+            if (deletedDatabases == null) {
+                deletedDatabases = new HashSet<DatabaseCleanupInfo>();
+                delta += MemoryBudget.HASHSET_OVERHEAD;
+            }
+
+            deletedDatabases.add(new DatabaseCleanupInfo(dbImpl,
+                                                         deleteAtCommit));
+            delta += MemoryBudget.HASHSET_ENTRY_OVERHEAD +
+                MemoryBudget.OBJECT_OVERHEAD;
+            updateMemoryUsage(delta);
+
+            /* releaseDb will be called by cleanupDatabaseImpls. */
+        }
+    }
+
+    /*
+     * Leftover databaseImpls that are a by-product of database operations like
+     * removeDatabase(), truncateDatabase() will be deleted after the write
+     * locks are released. However, do set the database state appropriately
+     * before the locks are released.
+     */
+    private void setDeletedDatabaseState(boolean isCommit)
+        throws DatabaseException {
+
+        if (deletedDatabases != null) {
+            Iterator<DatabaseCleanupInfo> iter = deletedDatabases.iterator();
+            while (iter.hasNext()) {
+                DatabaseCleanupInfo info = iter.next();
+                if (info.deleteAtCommit == isCommit) {
+                    info.dbImpl.startDeleteProcessing();
+                }
+            }
+        }
+    }
+
+    /**
+     * Cleanup leftover databaseImpls that are a by-product of database
+     * operations like removeDatabase(), truncateDatabase().
+     *
+     * This method must be called outside the synchronization on this txn,
+     * because it calls finishDeleteProcessing, which gets the TxnManager's
+     * allTxns latch. The checkpointer also gets the allTxns latch, and within
+     * that latch, needs to synchronize on individual txns, so we must avoid a
+     * latching hiearchy conflict.
+     */
+    protected void cleanupDatabaseImpls(boolean isCommit)
+        throws DatabaseException {
+
+        if (deletedDatabases != null) {
+            /* Make a copy of the deleted databases while synchronized. */
+            DatabaseCleanupInfo[] infoArray;
+            synchronized (this) {
+                infoArray = new DatabaseCleanupInfo[deletedDatabases.size()];
+                deletedDatabases.toArray(infoArray);
+            }
+            for (int i = 0; i < infoArray.length; i += 1) {
+                DatabaseCleanupInfo info = infoArray[i];
+                if (info.deleteAtCommit == isCommit) {
+                    /* releaseDb will be called by finishDeleteProcessing. */
+                    info.dbImpl.finishDeleteProcessing();
+                } else {
+                    envImpl.getDbTree().releaseDb(info.dbImpl);
+                }
+            }
+            deletedDatabases = null;
+        }
+    }
+
+    /**
+     * Add lock to the appropriate queue.
+     */
+    protected void addLock(Long nodeId,
+                           LockType type,
+                           LockGrantType grantStatus)
+        throws DatabaseException {
+
+        synchronized (this) {
+            int delta = 0;
+            if (type.isWriteLock()) {
+                if (writeInfo == null) {
+                    writeInfo = new HashMap<Long, WriteLockInfo>();
+                    undoDatabases = new HashMap<DatabaseId, DatabaseImpl>();
+                    delta += MemoryBudget.TWOHASHMAPS_OVERHEAD;
+                }
+
+                writeInfo.put(nodeId, new WriteLockInfo());
+                delta += WRITE_LOCK_OVERHEAD;
+
+                if ((grantStatus == LockGrantType.PROMOTION) ||
+                    (grantStatus == LockGrantType.WAIT_PROMOTION)) {
+                    readLocks.remove(nodeId);
+                    delta -= READ_LOCK_OVERHEAD;
+                }
+                updateMemoryUsage(delta);
+            } else {
+                addReadLock(nodeId);
+            }
+        }
+    }
+
+    private void addReadLock(Long nodeId) {
+        int delta = 0;
+        if (readLocks == null) {
+            readLocks = new HashSet<Long>();
+            delta = MemoryBudget.HASHSET_OVERHEAD;
+        }
+
+        readLocks.add(nodeId);
+        delta += READ_LOCK_OVERHEAD;
+        updateMemoryUsage(delta);
+    }
+
+    /**
+     * Remove the lock from the set owned by this transaction. If specified to
+     * LockManager.release, the lock manager will call this when its releasing
+     * a lock. Usually done because the transaction doesn't need to really keep
+     * the lock, i.e for a deleted record.
+     */
+    void removeLock(long nodeId)
+        throws DatabaseException {
+
+        /*
+         * We could optimize by passing the lock type so we know which
+         * collection to look in. Be careful of demoted locks, which have
+         * shifted collection.
+         *
+         * Don't bother updating memory utilization here -- we'll update at
+         * transaction end.
+         */
+        synchronized (this) {
+            if ((readLocks != null) &&
+                readLocks.remove(nodeId)) {
+                updateMemoryUsage(0 - READ_LOCK_OVERHEAD);
+            } else if ((writeInfo != null) &&
+                       (writeInfo.remove(nodeId) != null)) {
+                updateMemoryUsage(0 - WRITE_LOCK_OVERHEAD);
+            }
+        }
+    }
+
+    /**
+     * A lock is being demoted. Move it from the write collection into the read
+     * collection.
+     */
+    void moveWriteToReadLock(long nodeId, Lock lock) {
+
+        boolean found = false;
+        synchronized (this) {
+            if ((writeInfo != null) &&
+                (writeInfo.remove(nodeId) != null)) {
+                found = true;
+                updateMemoryUsage(0 - WRITE_LOCK_OVERHEAD);
+            }
+
+            assert found : "Couldn't find lock for Node " + nodeId +
+                " in writeInfo Map.";
+            addReadLock(nodeId);
+        }
+    }
+
+    private void updateMemoryUsage(int delta) {
+        inMemorySize += delta;
+        accumulatedDelta += delta;
+        if (accumulatedDelta > ACCUMULATED_LIMIT ||
+            accumulatedDelta < -ACCUMULATED_LIMIT) {
+            envImpl.getMemoryBudget().updateTxnMemoryUsage(accumulatedDelta);
+            accumulatedDelta = 0;
+        }
+    }
+
+    /**
+     * Returns the amount of memory currently budgeted for this transaction.
+     */
+    int getBudgetedMemorySize() {
+        return inMemorySize - accumulatedDelta;
+    }
+
+    /**
+     * @return true if this transaction created this node. We know that this
+     * is true if the node is write locked and has a null abort LSN.
+     */
+    public boolean createdNode(long nodeId)
+        throws DatabaseException {
+
+        boolean created = false;
+        synchronized (this) {
+            if (writeInfo != null) {
+                WriteLockInfo info = writeInfo.get(nodeId);
+                if (info != null) {
+                    created = info.createdThisTxn;
+                }
+            }
+        }
+        return created;
+    }
+
+    /**
+     * @return the abortLsn for this node.
+     */
+    public long getAbortLsn(long nodeId)
+        throws DatabaseException {
+
+        WriteLockInfo info = null;
+        synchronized (this) {
+            if (writeInfo != null) {
+                info = writeInfo.get(nodeId);
+            }
+        }
+
+        if (info == null) {
+            return DbLsn.NULL_LSN;
+        } else {
+            return info.abortLsn;
+        }
+    }
+
+    /**
+     * @return the WriteLockInfo for this node.
+     */
+    public WriteLockInfo getWriteLockInfo(long nodeId)
+        throws DatabaseException {
+
+        WriteLockInfo wli = null;
+        synchronized (this) {
+            if (writeInfo != null) {
+                wli = writeInfo.get(nodeId);
+            }
+        }
+
+        if (wli == null) {
+            throw new UnsupportedOperationException
+                ("writeInfo is null in Txn.getWriteLockInfo");
+        }
+        return wli;
+    }
+
+    /**
+     * Is always transactional.
+     */
+    public boolean isTransactional() {
+        return true;
+    }
+
+    /**
+     * Determines whether this is an auto transaction.
+     */
+    public boolean isAutoTxn() {
+        return isAutoCommit;
+    }
+
+    /**
+     * Is serializable isolation if so configured.
+     */
+    public boolean isSerializableIsolation() {
+        return serializableIsolation;
+    }
+
+    /**
+     * Is read-committed isolation if so configured.
+     */
+    public boolean isReadCommittedIsolation() {
+        return readCommittedIsolation;
+    }
+
+    /**
+     * @hidden
+     *
+     * Returns true if the sync api was used for configuration
+     */
+    public boolean getExplicitSyncConfigured() {
+        return explicitSyncConfigured;
+    }
+
+    /**
+     * @hidden
+     * Returns true if the durability api was used for configuration.
+     */
+    public boolean getExplicitDurabilityConfigured() {
+        return explicitDurabilityConfigured;
+    }
+
+    /**
+     * This is a transactional locker.
+     */
+    public Txn getTxnLocker() {
+        return this;
+    }
+
+    /**
+     * Returns 'this', since this locker holds no non-transactional locks.
+     * Since this is returned, sharing of locks is obviously supported.
+     */
+    public Locker newNonTxnLocker()
+        throws DatabaseException {
+
+        return this;
+    }
+
+    /**
+     * This locker holds no non-transactional locks.
+     */
+    public void releaseNonTxnLocks()
+        throws DatabaseException {
+    }
+
+    /**
+     * Created transactions do nothing at the end of the operation.
+     */
+    public void nonTxnOperationEnd()
+        throws DatabaseException {
+    }
+
+    /*
+     * @see com.sleepycat.je.txn.Locker#operationEnd(boolean)
+     */
+    public void operationEnd(boolean operationOK)
+        throws DatabaseException {
+
+        if (!isAutoCommit) {
+            /* Created transactions do nothing at the end of the operation. */
+            return;
+        }
+
+        if (operationOK) {
+            commit();
+        } else {
+            abort(false);    // no sync required
+        }
+    }
+
+    /*
+     * @see com.sleepycat.je.txn.Locker#setHandleLockOwner
+     * (boolean, com.sleepycat.je.Database, boolean)
+     */
+    public void setHandleLockOwner(boolean operationOK,
+                                   Database dbHandle,
+                                   boolean dbIsClosing)
+        throws DatabaseException {
+
+        if (isAutoCommit) {
+            /* Transfer locks on an auto commit */
+            if (operationOK) {
+                if (!dbIsClosing) {
+                    transferHandleLockToHandle(dbHandle);
+                }
+                unregisterHandle(dbHandle);
+            }
+            /* Done if auto commit */
+            return;
+        }
+
+        /* Created transactions don't transfer locks until commit. */
+        if (dbIsClosing) {
+
+            /*
+             * If the Database handle is closing, take it out of the both the
+             * handle lock map and the handle map. We don't need to do any
+             * transfers at commit time, and we don't need to do any
+             * invalidations at abort time.
+             */
+            Long handleLockId = handleToHandleLockMap.get(dbHandle);
+            if (handleLockId != null) {
+                Set<Database> dbHandleSet =
+                    handleLockToHandleMap.get(handleLockId);
+                boolean removed = dbHandleSet.remove(dbHandle);
+                assert removed :
+                    "Can't find " + dbHandle + " from dbHandleSet";
+                if (dbHandleSet.size() == 0) {
+                    Object foo = handleLockToHandleMap.remove(handleLockId);
+                    assert (foo != null) :
+                        "Can't find " + handleLockId +
+                        " from handleLockIdtoHandleMap.";
+                }
+            }
+
+            unregisterHandle(dbHandle);
+
+        } else {
+
+            /*
+             * If the db is still open, make sure the db knows this txn is its
+             * handle lock protector and that this txn knows it owns this db
+             * handle.
+             */
+            if (dbHandle != null) {
+                DbInternal.dbSetHandleLocker(dbHandle, this);
+            }
+        }
+    }
+
+    /**
+     * Cursors operating under this transaction are added to the collection.
+     */
+    public void registerCursor(CursorImpl cursor)
+        throws DatabaseException {
+
+        synchronized(this) {
+            /* Add to the head of the list. */
+            cursor.setLockerNext(cursorSet);
+            if (cursorSet != null) {
+                cursorSet.setLockerPrev(cursor);
+            }
+            cursorSet = cursor;
+        }
+    }
+
+    /**
+     * Remove a cursor from the collection.
+     */
+    public void unRegisterCursor(CursorImpl cursor)
+        throws DatabaseException {
+
+        synchronized (this) {
+            CursorImpl prev = cursor.getLockerPrev();
+            CursorImpl next = cursor.getLockerNext();
+            if (prev == null) {
+                cursorSet = next;
+            } else {
+                prev.setLockerNext(next);
+            }
+
+            if (next != null) {
+                next.setLockerPrev(prev);
+            }
+            cursor.setLockerPrev(null);
+            cursor.setLockerNext(null);
+        }
+    }
+
+    /**
+     * @return true if this txn is willing to give up the handle lock to
+     * another txn before this txn ends.
+     */
+    @Override
+    public boolean isHandleLockTransferrable() {
+        return false;
+    }
+
+    /**
+     * Check if all cursors associated with the txn are closed. If not, those
+     * open cursors will be forcibly closed.
+     * @return true if open cursors exist
+     */
+    private boolean checkCursorsForClose()
+        throws DatabaseException {
+
+        CursorImpl c = cursorSet;
+        while (c != null) {
+            if (!c.isClosed()) {
+                return true;
+            }
+            c = c.getLockerNext();
+        }
+
+        return false;
+    }
+
+    /**
+     * stats
+     */
+    public LockStats collectStats(LockStats stats)
+        throws DatabaseException {
+
+        synchronized (this) {
+            int nReadLocks = (readLocks == null) ? 0 : readLocks.size();
+            stats.setNReadLocks(stats.getNReadLocks() + nReadLocks);
+            int nWriteLocks = (writeInfo == null) ? 0 : writeInfo.size();
+            stats.setNWriteLocks(stats.getNWriteLocks() + nWriteLocks);
+            stats.accumulateNTotalLocks(nReadLocks + nWriteLocks);
+        }
+
+        return stats;
+    }
+
+    /**
+     * Set the state of a transaction to ONLY_ABORTABLE.
+     */
+    @Override
+    public void setOnlyAbortable() {
+        txnState &= ~STATE_BITS;
+        txnState |= ONLY_ABORTABLE;
+    }
+
+    /**
+     * Get the state of a transaction's ONLY_ABORTABLE.
+     */
+    public boolean getOnlyAbortable() {
+        return (txnState & ONLY_ABORTABLE) != 0;
+    }
+
+    /**
+     * Throw an exception if the transaction is not open.
+     *
+     * If calledByAbort is true, it means we're being called
+     * from abort().
+     *
+     * Caller must invoke with "this" synchronized.
+     */
+    protected void checkState(boolean calledByAbort)
+        throws DatabaseException {
+
+        boolean ok = false;
+        boolean onlyAbortable = false;
+        byte state = (byte) (txnState & STATE_BITS);
+        ok = (state == USABLE);
+        onlyAbortable = (state == ONLY_ABORTABLE);
+
+        if (!calledByAbort && onlyAbortable) {
+
+            /*
+             * It's ok for FindBugs to whine about id not being synchronized.
+             */
+            throw new IllegalStateException
+                ("Transaction " + id + " must be aborted.");
+        }
+
+        if (ok ||
+            (calledByAbort && onlyAbortable)) {
+            return;
+        }
+
+        /*
+         * It's ok for FindBugs to whine about id not being synchronized.
+         */
+        throw new IllegalStateException
+            ("Transaction " + id + " has been closed.");
+    }
+
+    /**
+     * Different subclasses find a repContext at different times, depending on
+     * when they have the context to know whether a transaction should be
+     * replicated. Auto Txns set this at construction time, Txns set this when
+     * the transaction is configured, ReplicatedTxns set it when the txn commit
+     * or abort arrives.
+     */
+    public void setRepContext(ReplicationContext repContext) {
+        this.repContext = repContext;
+    }
+
+    /**
+     */
+    protected void close(boolean isCommit)
+        throws DatabaseException {
+
+        synchronized (this) {
+            txnState &= ~STATE_BITS;
+            txnState |= CLOSED;
+        }
+
+        /*
+         * UnregisterTxn must be called outside the synchronization on this
+         * txn, because it gets the TxnManager's allTxns latch. The
+         * checkpointer also gets the allTxns latch, and within that latch,
+         * needs to synchronize on individual txns, so we must avoid a latching
+         * hierarchy conflict.
+         */
+        envImpl.getTxnManager().unRegisterTxn(this, isCommit);
+
+        /* Close this Locker. */
+        close();
+    }
+
+    public boolean isClosed() {
+        return ((txnState & CLOSED) != 0);
+    }
+
+    /* Non replicated txns don't use a node id. */
+    protected int getReplicatorNodeId() {
+        return 0;
+    }
+
+    /*
+     * Log support
+     */
+
+    /**
+     * @see Loggable#getLogSize
+     */
+    public int getLogSize() {
+        return LogUtils.getPackedLongLogSize(id) +
+            LogUtils.getPackedLongLogSize(lastLoggedLsn);
+    }
+
+    /**
+     * @see Loggable#writeToLog
+     */
+    /*
+     * It's ok for FindBugs to whine about id not being synchronized.
+     */
+    public void writeToLog(ByteBuffer logBuffer) {
+        LogUtils.writePackedLong(logBuffer, id);
+        LogUtils.writePackedLong(logBuffer, lastLoggedLsn);
+    }
+
+    /**
+     * @see Loggable#readFromLog
+     *
+     * It's ok for FindBugs to whine about id not being synchronized.
+     */
+    public void readFromLog(ByteBuffer logBuffer, byte entryVersion) {
+        id = LogUtils.readLong(logBuffer, (entryVersion < 6));
+        lastLoggedLsn = LogUtils.readLong(logBuffer, (entryVersion < 6));
+    }
+
+    /**
+     * @see Loggable#dumpLog
+     */
+    public void dumpLog(StringBuffer sb, boolean verbose) {
+        sb.append("<txn id=\"");
+        sb.append(getId());
+        sb.append("\">");
+        sb.append(DbLsn.toString(lastLoggedLsn));
+        sb.append("</txn>");
+    }
+
+    /**
+     * @see Loggable#getTransactionId
+     */
+    public long getTransactionId() {
+        return getId();
+    }
+
+    /**
+     * @see Loggable#logicalEquals
+     */
+    public boolean logicalEquals(Loggable other) {
+
+        if (!(other instanceof Txn)) {
+            return false;
+        }
+
+        return id == ((Txn) other).id;
+    }
+
+    /**
+     * Transfer a single handle lock to the set of corresponding handles at
+     * commit time.
+     */
+    private void transferHandleLockToHandleSet(Long handleLockId,
+                                               Set<Database> dbHandleSet)
+        throws DatabaseException {
+
+        /* Create a set of destination transactions */
+        int numHandles = dbHandleSet.size();
+        Database[] dbHandles = new Database[numHandles];
+        dbHandles = dbHandleSet.toArray(dbHandles);
+        Locker[] destTxns = new Locker[numHandles];
+        for (int i = 0; i < numHandles; i++) {
+            destTxns[i] = BasicLocker.createBasicLocker(envImpl);
+        }
+
+        /* Move this lock to the destination txns. */
+        long nodeId = handleLockId.longValue();
+        lockManager.transferMultiple(nodeId, this, destTxns);
+
+        for (int i = 0; i < numHandles; i++) {
+
+            /*
+             * Make this handle and its handle protector txn remember each
+             * other.
+             */
+            destTxns[i].addToHandleMaps(handleLockId, dbHandles[i]);
+            DbInternal.dbSetHandleLocker(dbHandles[i], destTxns[i]);
+        }
+    }
+
+    /**
+     * Send trace messages to the java.util.logger. Don't rely on the logger
+     * alone to conditionalize whether we send this message, we don't even want
+     * to construct the message if the level is not enabled.  The string
+     * construction can be numerous enough to show up on a performance profile.
+     */
+    private void traceCommit(int numWriteLocks, int numReadLocks) {
+        Logger logger = envImpl.getLogger();
+        if (logger.isLoggable(Level.FINE)) {
+            StringBuffer sb = new StringBuffer();
+            sb.append(" Commit:id = ").append(id);
+            sb.append(" numWriteLocks=").append(numWriteLocks);
+            sb.append(" numReadLocks = ").append(numReadLocks);
+            Tracer.trace(Level.FINE, envImpl, sb.toString());
+        }
+    }
+
+    /**
+     * Store information about a DatabaseImpl that will have to be
+     * purged at transaction commit or abort. This handles cleanup after
+     * operations like Environment.truncateDatabase,
+     * Environment.removeDatabase. Cleanup like this is done outside the
+     * usual transaction commit or node undo processing, because
+     * the mapping tree is always auto Txn'ed to avoid deadlock and is
+     * essentially  non-transactional.
+     */
+    private static class DatabaseCleanupInfo {
+        DatabaseImpl dbImpl;
+
+        /* if true, clean on commit. If false, clean on abort. */
+        boolean deleteAtCommit;
+
+        DatabaseCleanupInfo(DatabaseImpl dbImpl,
+                            boolean deleteAtCommit) {
+            this.dbImpl = dbImpl;
+            this.deleteAtCommit = deleteAtCommit;
+        }
+    }
+
+    /* Transaction hooks used for replication support. */
+
+    /**
+     * A replicated environment introduces some new considerations when entering
+     * a transaction scope via an Environment.transactionBegin() operation.
+     *
+     * On a Replica, the transactionBegin() operation must wait until the
+     * Replica has synched up to where it satisfies the ConsistencyPolicy that
+     * is in effect.
+     *
+     * On a Master, the transactionBegin() must wait until the Feeder has
+     * sufficient connections to ensure that it can satisfy the
+     * ReplicaAckPolicy, since if it does not, it will fail at commit() and the
+     * work done in the transaction will need to be undone.
+     *
+     * This hook provides the mechanism for implementing the above support for
+     * replicated transactions. It ignores all non-replicated transactions.
+     *
+     * The hook throws ReplicaStateException, if a Master switches to a
+     * Replica state while waiting for its Replicas connections. Changes from a
+     * Replica to a Master are handled transparently to the application.
+     * Exceptions manifest themselves as DatabaseException at the interface to
+     * minimize use of Replication based exceptions in core JE.
+     *
+     * @param config the transaction config that applies to the txn
+     *
+     * @throw DatabaseException if there is a failure
+     */
+    protected void txnBeginHook(TransactionConfig config)
+        throws DatabaseException {
+        /* Overridden by Txn subclasses when appropriate */
+    }
+
+    /**
+     * This hook is invoked before the commit of a transaction that made changes
+     * to a replicated environment. It's invoked for transactions
+     * executed on the master or replica, but is only relevant to transactions
+     * being done on the master. When invoked for a transaction on a replica the
+     * implementation just returns.
+     *
+     * The hook is invoked at a very specific point in the normal commit
+     * sequence: immediately before the commit log entry is written to the log.
+     * It represents the last chance to abort the transaction and provides an
+     * opportunity to make some final checks before allowing the commit can go
+     * ahead. Note that it should be possible to abort the transaction at the
+     * time the hook is invoked.
+     *
+     * After invocation of the "pre" hook one of the "post" hooks:
+     * postLogCommitHook or postLogAbortHook must always be invoked.
+     *
+     * Exceptions thrown by this hook result in the transaction being aborted
+     * and the exception being propagated back to the application.
+     *
+     * @param txn the transaction being committed
+     *
+     * @throws DatabaseException if there was a problem and that the transaction
+     * should be aborted.
+     */
+    protected void preLogCommitHook()
+        throws DatabaseException {
+        /* Overridden by Txn subclasses when appropriate */
+    }
+
+    /**
+     * This hook is invoked after the commit record has been written to the log,
+     * but before write locks have been released, so that other application
+     * cannot see the changes made by the transaction. At this point the
+     * transaction has been committed by the Master.
+     *
+     * Exceptions thrown by this hook result in the transaction being completed
+     * on the Master, that is, locks are released, etc. and the exception is
+     * propagated back to the application.
+     *
+     * @throws DatabaseException to indicate that there was a replication
+     * related problem that needs to be communicated back to the application.
+     */
+    protected void postLogCommitHook() throws DatabaseException {
+        /* Overridden by Txn subclasses when appropriate */
+    }
+
+
+    /**
+     * Invoked if the transaction associated with the preLogCommitHook was
+     * subsequently aborted, for example due to a lack of disk space. This
+     * method is responsible for any cleanup that may need to be done as a
+     * result of the abort.
+     *
+     * Note that only one of the "post" hooks (commit or abort) is invoked
+     * following the invocation of the "pre" hook.
+     */
+    protected void postLogAbortHook() {
+        /* Overridden by Txn subclasses when appropriate */
+    }
+}
diff --git a/src/com/sleepycat/je/txn/TxnAbort.java b/src/com/sleepycat/je/txn/TxnAbort.java
new file mode 100644
index 0000000000000000000000000000000000000000..1fb85e547737b4ea770d53e6fbb0fc57f3b86d34
--- /dev/null
+++ b/src/com/sleepycat/je/txn/TxnAbort.java
@@ -0,0 +1,49 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TxnAbort.java,v 1.26.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import com.sleepycat.je.log.Loggable;
+
+
+/**
+ * This class writes out a transaction commit or transaction end record.
+ */
+public class TxnAbort extends TxnEnd {
+    public TxnAbort(long id, long lastLsn, int masterId) {
+        super(id, lastLsn, masterId);
+    }
+
+    /**
+     * For constructing from the log.
+     */
+    public TxnAbort() {
+    }
+
+    /*
+     * Log support
+     */
+
+    protected String getTagName() {
+        return "TxnAbort";
+    }
+
+    /**
+     * @see Loggable#logicalEquals
+     */
+    public boolean logicalEquals(Loggable other) {
+
+        if (!(other instanceof TxnAbort))
+            return false;
+
+        TxnAbort otherAbort = (TxnAbort) other;
+
+        return ((id == otherAbort.id) && 
+                (repMasterNodeId == otherAbort.repMasterNodeId));
+    }
+}
diff --git a/src/com/sleepycat/je/txn/TxnCommit.java b/src/com/sleepycat/je/txn/TxnCommit.java
new file mode 100644
index 0000000000000000000000000000000000000000..a73918c91c83db2c4e99c904540e2794a8450952
--- /dev/null
+++ b/src/com/sleepycat/je/txn/TxnCommit.java
@@ -0,0 +1,49 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TxnCommit.java,v 1.28.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import com.sleepycat.je.log.Loggable;
+
+
+/**
+ * This class writes out a transaction commit or transaction end record.
+ */
+public class TxnCommit extends TxnEnd {
+    public TxnCommit(long id, long lastLsn, int masterId) {
+        super(id, lastLsn, masterId);
+    }
+
+    /**
+     * For constructing from the log.
+     */
+    public TxnCommit() {
+    }
+
+    /*
+     * Log support
+     */
+
+    protected String getTagName() {
+        return "TxnCommit";
+    }
+
+    /**
+     * @see Loggable#logicalEquals
+     */
+    public boolean logicalEquals(Loggable other) {
+
+        if (!(other instanceof TxnCommit))
+            return false;
+
+        TxnCommit otherCommit = (TxnCommit) other;
+
+        return ((id == otherCommit.id) && 
+                (repMasterNodeId == otherCommit.repMasterNodeId));
+    }
+}
diff --git a/src/com/sleepycat/je/txn/TxnEnd.java b/src/com/sleepycat/je/txn/TxnEnd.java
new file mode 100644
index 0000000000000000000000000000000000000000..8105d68c5efcda37e236bb5e6a0b9ff393d14e9f
--- /dev/null
+++ b/src/com/sleepycat/je/txn/TxnEnd.java
@@ -0,0 +1,126 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TxnEnd.java,v 1.41.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import java.nio.ByteBuffer;
+import java.sql.Timestamp;
+
+import com.sleepycat.je.log.LogUtils;
+import com.sleepycat.je.log.Loggable;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * This class writes out a transaction commit or transaction end record.
+ */
+public abstract class TxnEnd implements Loggable {
+
+    protected long id;
+    protected Timestamp time;
+    private long lastLsn;
+
+    /* For replication - master node which wrote this record. */
+    int repMasterNodeId;
+
+    TxnEnd(long id, long lastLsn, int repMasterNodeId) {
+        this.id = id;
+        time = new Timestamp(System.currentTimeMillis());
+        this.lastLsn = lastLsn;
+        this.repMasterNodeId = repMasterNodeId;
+    }
+
+    /**
+     * For constructing from the log
+     */
+    public TxnEnd() {
+        lastLsn = DbLsn.NULL_LSN;
+    }
+
+    /*
+     * Accessors.
+     */
+    public long getId() {
+        return id;
+    }
+
+    public Timestamp getTime() {
+        return time;
+    }
+
+    long getLastLsn() {
+        return lastLsn;
+    }
+
+    public int getMasterNodeId() {
+        return repMasterNodeId;
+    }
+
+    protected abstract String getTagName();
+
+    /*
+     * Log support for writing.
+     */
+
+    /**
+     * @see Loggable#getLogSize
+     */
+    public int getLogSize() {
+        return LogUtils.getPackedLongLogSize(id) +
+            LogUtils.getTimestampLogSize(time) +
+            LogUtils.getPackedLongLogSize(lastLsn) +
+            LogUtils.getPackedIntLogSize(repMasterNodeId);
+
+    }
+
+    /**
+     * @see Loggable#writeToLog
+     */
+    public void writeToLog(ByteBuffer logBuffer) {
+        LogUtils.writePackedLong(logBuffer, id);
+        LogUtils.writeTimestamp(logBuffer, time);
+        LogUtils.writePackedLong(logBuffer, lastLsn);
+        LogUtils.writePackedInt(logBuffer, repMasterNodeId);
+    }
+
+    /**
+     * @see Loggable#readFromLog
+     */
+    public void readFromLog(ByteBuffer logBuffer, byte entryVersion) {
+
+        /* The versions < 6 are unpacked. */
+        boolean isUnpacked = (entryVersion < 6);
+        id = LogUtils.readLong(logBuffer, isUnpacked);
+        time = LogUtils.readTimestamp(logBuffer, isUnpacked);
+        lastLsn = LogUtils.readLong(logBuffer, isUnpacked);
+
+        if (entryVersion >= 6) {
+            repMasterNodeId = LogUtils.readInt(logBuffer,
+                                               false /* unpacked */);
+        }
+    }
+
+    /**
+     * @see Loggable#dumpLog
+     */
+    public void dumpLog(StringBuffer sb, boolean verbose) {
+        sb.append("<").append(getTagName());
+        sb.append(" id=\"").append(id);
+        sb.append("\" time=\"").append(time);
+        sb.append("\" master=\"").append(repMasterNodeId);
+        sb.append("\">");
+	sb.append(DbLsn.toString(lastLsn));
+        sb.append("</").append(getTagName()).append(">");
+    }
+
+    /**
+     * @see Loggable#getTransactionId
+     */
+    public long getTransactionId() {
+	return id;
+    }
+}
diff --git a/src/com/sleepycat/je/txn/TxnManager.java b/src/com/sleepycat/je/txn/TxnManager.java
new file mode 100644
index 0000000000000000000000000000000000000000..edcc6e2fe675b44f6828d035ea03e330fa411fb1
--- /dev/null
+++ b/src/com/sleepycat/je/txn/TxnManager.java
@@ -0,0 +1,403 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TxnManager.java,v 1.81.2.4 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicLong;
+
+import javax.transaction.xa.Xid;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.LockStats;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.TransactionConfig;
+import com.sleepycat.je.TransactionStats;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * Class to manage transactions.  Basically a Set of all transactions with add
+ * and remove methods and a latch around the set.
+ */
+public class TxnManager {
+
+    /*
+     * All NullTxns share the same id so as not to eat from the id number
+     * space.
+     *
+     * Negative transaction ids are used by the master node of a replication
+     * group. That sequence begins at -10 to avoid conflict with the
+     * NULL_TXN_ID and leave room for other special purpose ids.
+     */
+    static final long NULL_TXN_ID = -1;
+    private static final long FIRST_NEGATIVE_ID = -10;
+    private LockManager lockManager;
+    private EnvironmentImpl envImpl;
+    private Set<Txn> allTxns;
+    /* Maps Xids to Txns. */
+    private Map<Xid, Txn> allXATxns;
+    /* Maps Threads to Txns when there are thread implied transactions. */
+    private Map<Thread, Transaction> thread2Txn;
+
+    /*
+     * Positive and negative transaction ids are used in a replicated system,
+     * to let replicated transactions intermingle with local transactions.
+     */
+    private AtomicLong lastUsedLocalTxnId;
+    private AtomicLong lastUsedReplicatedTxnId;
+    private int nActiveSerializable;
+
+    /* Locker Stats */
+    private long numBegins;
+    private long numCommits;
+    private long numAborts;
+    private long numXAPrepares;
+    private long numXACommits;
+    private long numXAAborts;
+
+    public TxnManager(EnvironmentImpl envImpl)
+        throws DatabaseException {
+
+        if (EnvironmentImpl.getFairLatches()) {
+            lockManager = new LatchedLockManager(envImpl);
+        } else {
+            lockManager = new SyncedLockManager(envImpl);
+        }
+
+        if (envImpl.isNoLocking()) {
+            lockManager = new DummyLockManager(envImpl, lockManager);
+        }
+
+        this.envImpl = envImpl;
+        allTxns = new HashSet<Txn>();
+        allXATxns = Collections.synchronizedMap(new HashMap<Xid, Txn>());
+        thread2Txn = new ConcurrentHashMap<Thread, Transaction>();
+
+        numBegins = 0;
+        numCommits = 0;
+        numAborts = 0;
+        numXAPrepares = 0;
+        numXACommits = 0;
+        numXAAborts = 0;
+        lastUsedLocalTxnId = new AtomicLong(0);
+        lastUsedReplicatedTxnId = new AtomicLong(FIRST_NEGATIVE_ID);
+    }
+
+    /**
+     * Set the txn id sequence.
+     */
+    public void setLastTxnId(long lastReplicatedTxnId, long lastLocalId) {
+        lastUsedReplicatedTxnId.set(lastReplicatedTxnId);
+        lastUsedLocalTxnId.set(lastLocalId);
+    }
+
+    /**
+     * Get the last used id, for checkpoint info.
+     */
+    public long getLastLocalTxnId() {
+        return lastUsedLocalTxnId.get();
+    }
+
+    public long getLastReplicatedTxnId() {
+        return lastUsedReplicatedTxnId.get();
+    }
+
+    public long getNextReplicatedTxnId() {
+        return lastUsedReplicatedTxnId.decrementAndGet();
+    }
+
+    /**
+     * Get the next transaction id to use.
+     */
+    long getNextTxnId() {
+        assert(!(envImpl.isReplicated() && envImpl.getReplicator().isMaster()));
+        return lastUsedLocalTxnId.incrementAndGet();
+    }
+
+    /*
+     * Only set the replicated txn id if the replayTxnId represents a
+     * newer, later value in the replication stream.
+     */
+    public void updateFromReplay(long replayTxnId) {
+
+        assert replayTxnId < 0 :
+            "replay txn id is unexpectedly positive " + replayTxnId;
+
+        while (true) {
+            long currentVal = lastUsedReplicatedTxnId.get();
+            if (replayTxnId < currentVal) {
+
+                /*
+                 * This replayTxnId is newer than any other replicatedTxnId
+                 * known by this node.
+                 */
+                boolean ok = lastUsedReplicatedTxnId.weakCompareAndSet
+                    (currentVal, replayTxnId);
+                if (ok) {
+                    break;
+                }
+            } else {
+                break;
+            }
+        }
+    }
+
+    /**
+     * Create a new transaction.
+     * @param parent for nested transactions, not yet supported
+     * @param txnConfig specifies txn attributes
+     * @return the new txn
+     */
+    public Txn txnBegin(Transaction parent, TransactionConfig txnConfig)
+        throws DatabaseException {
+
+        if (parent != null) {
+            throw new DatabaseException
+                ("Nested transactions are not supported yet.");
+        }
+
+        return Txn.createTxn(envImpl, txnConfig);
+    }
+
+    /**
+     * Give transactions and environment access to lock manager.
+     */
+    public LockManager getLockManager() {
+        return lockManager;
+    }
+
+    /**
+     * Called when txn is created.
+     */
+    void registerTxn(Txn txn)
+        throws DatabaseException {
+
+        synchronized (allTxns) {
+            allTxns.add(txn);
+            if (txn.isSerializableIsolation()) {
+                nActiveSerializable++;
+            }
+            numBegins++;
+        }
+    }
+
+    /**
+     * Called when txn ends.
+     */
+    void unRegisterTxn(Txn txn, boolean isCommit)
+        throws DatabaseException {
+
+        synchronized (allTxns) {
+            allTxns.remove(txn);
+
+            /* Remove any accumulated MemoryBudget delta for the Txn. */
+            envImpl.getMemoryBudget().
+                updateTxnMemoryUsage(0 - txn.getBudgetedMemorySize());
+            if (isCommit) {
+                numCommits++;
+            } else {
+                numAborts++;
+            }
+            if (txn.isSerializableIsolation()) {
+                nActiveSerializable--;
+            }
+        }
+    }
+
+    /**
+     * Called when txn is created.
+     */
+    public void registerXATxn(Xid xid, Txn txn, boolean isPrepare)
+        throws DatabaseException {
+
+        if (!allXATxns.containsKey(xid)) {
+            allXATxns.put(xid, txn);
+            envImpl.getMemoryBudget().updateTxnMemoryUsage
+                (MemoryBudget.HASHMAP_ENTRY_OVERHEAD);
+        }
+
+        if (isPrepare) {
+            numXAPrepares++;
+        }
+    }
+
+    /**
+     * Called when XATransaction is prepared.
+     */
+    public void notePrepare() {
+        numXAPrepares++;
+    }
+
+    /**
+     * Called when txn ends.
+     */
+    void unRegisterXATxn(Xid xid, boolean isCommit)
+        throws DatabaseException {
+
+        if (allXATxns.remove(xid) == null) {
+            throw new DatabaseException
+                ("XA Transaction " + xid +
+                 " can not be unregistered.");
+        }
+        envImpl.getMemoryBudget().updateTxnMemoryUsage
+            (0 - MemoryBudget.HASHMAP_ENTRY_OVERHEAD);
+        if (isCommit) {
+            numXACommits++;
+        } else {
+            numXAAborts++;
+        }
+    }
+
+    /**
+     * Retrieve a Txn object from an Xid.
+     */
+    public Txn getTxnFromXid(Xid xid)
+        throws DatabaseException {
+
+        return allXATxns.get(xid);
+    }
+
+    /**
+     * Called when txn is assoc'd with this thread.
+     */
+    public void setTxnForThread(Transaction txn) {
+
+        Thread curThread = Thread.currentThread();
+        thread2Txn.put(curThread, txn);
+    }
+
+    /**
+     * Called when txn is assoc'd with this thread.
+     */
+    public Transaction unsetTxnForThread()
+        throws DatabaseException {
+
+        Thread curThread = Thread.currentThread();
+        return thread2Txn.remove(curThread);
+    }
+
+    /**
+     * Retrieve a Txn object for this Thread.
+     */
+    public Transaction getTxnForThread()
+        throws DatabaseException {
+
+        return thread2Txn.get(Thread.currentThread());
+    }
+
+    public Xid[] XARecover()
+        throws DatabaseException {
+
+        Set<Xid> xidSet = allXATxns.keySet();
+        Xid[] ret = new Xid[xidSet.size()];
+        ret = xidSet.toArray(ret);
+
+        return ret;
+    }
+
+    /**
+     * Returns whether there are any active serializable transactions,
+     * excluding the transaction given (if non-null).  This is intentionally
+     * returned without latching, since latching would not make the act of
+     * reading an integer more atomic than it already is.
+     */
+    public boolean
+        areOtherSerializableTransactionsActive(Locker excludeLocker) {
+        int exclude =
+            (excludeLocker != null &&
+             excludeLocker.isSerializableIsolation()) ?
+            1 : 0;
+        return (nActiveSerializable - exclude > 0);
+    }
+
+    /**
+     * Get the earliest LSN of all the active transactions, for checkpoint.
+     * Returns NULL_LSN is no transaction is currently active.
+     */
+    public long getFirstActiveLsn()
+        throws DatabaseException {
+
+        /*
+         * Note that the latching hierarchy calls for syncroninzing on
+         * allTxns first, then synchronizing on individual txns.
+         */
+        long firstActive = DbLsn.NULL_LSN;
+        synchronized (allTxns) {
+            Iterator<Txn> iter = allTxns.iterator();
+            while (iter.hasNext()) {
+                long txnFirstActive = iter.next().getFirstActiveLsn();
+                if (firstActive == DbLsn.NULL_LSN) {
+                    firstActive = txnFirstActive;
+                } else if (txnFirstActive != DbLsn.NULL_LSN) {
+                    if (DbLsn.compareTo(txnFirstActive, firstActive) < 0) {
+                        firstActive = txnFirstActive;
+                    }
+                }
+            }
+        }
+        return firstActive;
+    }
+
+    /*
+     * Statistics
+     */
+
+    /**
+     * Collect transaction related stats.
+     */
+    public TransactionStats txnStat(StatsConfig config)
+        throws DatabaseException {
+
+        TransactionStats stats = new TransactionStats();
+        synchronized (allTxns) {
+            stats.setNBegins(numBegins);
+            stats.setNCommits(numCommits);
+            stats.setNAborts(numAborts);
+            stats.setNXAPrepares(numXAPrepares);
+            stats.setNXACommits(numXACommits);
+            stats.setNXAAborts(numXAAborts);
+            stats.setNActive(allTxns.size());
+            TransactionStats.Active[] activeSet =
+                new TransactionStats.Active[stats.getNActive()];
+            stats.setActiveTxns(activeSet);
+            Iterator<Txn> iter = allTxns.iterator();
+            int i = 0;
+            while (iter.hasNext()) {
+                Locker txn = iter.next();
+                activeSet[i] = new TransactionStats.Active
+                    (txn.toString(), txn.getId(), 0);
+                i++;
+            }
+            if (config.getClear()) {
+                numCommits = 0;
+                numAborts = 0;
+                numXACommits = 0;
+                numXAAborts = 0;
+            }
+        }
+        return stats;
+    }
+
+    /**
+     * Collect lock related stats.
+     */
+    public LockStats lockStat(StatsConfig config)
+        throws DatabaseException {
+
+        return lockManager.lockStat(config);
+    }
+}
diff --git a/src/com/sleepycat/je/txn/TxnPrepare.java b/src/com/sleepycat/je/txn/TxnPrepare.java
new file mode 100644
index 0000000000000000000000000000000000000000..334bea32115e6a7e53a866d3e8836e5854e8f16f
--- /dev/null
+++ b/src/com/sleepycat/je/txn/TxnPrepare.java
@@ -0,0 +1,101 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TxnPrepare.java,v 1.17.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import java.nio.ByteBuffer;
+
+import javax.transaction.xa.Xid;
+
+import com.sleepycat.je.log.LogUtils;
+import com.sleepycat.je.log.Loggable;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * This class writes out a transaction prepare record.
+ */
+public class TxnPrepare extends TxnEnd implements Loggable {
+
+    private Xid xid;
+
+    public TxnPrepare(long id, Xid xid) {
+	/* LastLSN is never used. */
+        super(id, DbLsn.NULL_LSN, 0 /* masterNodeId, never replicated. */);
+	this.xid = xid;
+    }
+
+    /**
+     * For constructing from the log.
+     */
+    public TxnPrepare() {
+    }
+
+    public Xid getXid() {
+	return xid;
+    }
+
+    /*
+     * Log support
+     */
+
+    protected String getTagName() {
+        return "TxnPrepare";
+    }
+
+    /**
+     * @see Loggable#getLogSize
+     */
+    @Override
+    public int getLogSize() {
+        return LogUtils.getPackedLongLogSize(id) +
+            LogUtils.getTimestampLogSize(time) +
+            LogUtils.getXidSize(xid);
+    }
+
+    /**
+     * @see Loggable#writeToLog
+     */
+    @Override
+    public void writeToLog(ByteBuffer logBuffer) {
+        LogUtils.writePackedLong(logBuffer, id);
+        LogUtils.writeTimestamp(logBuffer, time);
+	LogUtils.writeXid(logBuffer, xid);
+    }
+
+    /**
+     * @see Loggable#readFromLog
+     */
+    @Override
+    public void readFromLog(ByteBuffer logBuffer, byte entryVersion) {
+        boolean unpacked = (entryVersion < 6);
+        id = LogUtils.readLong(logBuffer, unpacked);
+        time = LogUtils.readTimestamp(logBuffer, unpacked);
+	xid = LogUtils.readXid(logBuffer);
+    }
+
+    /**
+     * @see Loggable#dumpLog
+     */
+    @Override
+    public void dumpLog(StringBuffer sb, boolean verbose) {
+        sb.append("<").append(getTagName());
+        sb.append(" id=\"").append(id);
+        sb.append("\" time=\"").append(time);
+        sb.append("\">");
+	sb.append(xid); // xid already formatted as xml
+        sb.append("</").append(getTagName()).append(">");
+    }
+
+    /**
+     * @see Loggable#logicalEquals
+     * Always return false, this item should never be compared.
+     */
+    public boolean logicalEquals(Loggable other) {
+        return false;
+    }
+}
diff --git a/src/com/sleepycat/je/txn/WriteLockInfo.java b/src/com/sleepycat/je/txn/WriteLockInfo.java
new file mode 100644
index 0000000000000000000000000000000000000000..d880d4fe1cda6f234d63615502dee9d2f0a32ad3
--- /dev/null
+++ b/src/com/sleepycat/je/txn/WriteLockInfo.java
@@ -0,0 +1,82 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: WriteLockInfo.java,v 1.19.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.utilint.DbLsn;
+
+/*
+ * Lock and abort LSN kept for each write locked node. Allows us to log with
+ * the correct abort LSN.
+ */
+public class WriteLockInfo {
+
+    /*
+     * The original LSN. This is stored in the LN log entry.  May be null if
+     * the node was created by this transaction.
+     */
+    long abortLsn = DbLsn.NULL_LSN;
+
+    /*
+     * The original setting of the knownDeleted flag.  It parallels abortLsn.
+     */
+    boolean abortKnownDeleted;
+
+    /*
+     * Size of the original log entry, or zero if abortLsn is NULL_LSN or if
+     * the size is not known.  Used for obsolete counting during a commit.
+     */
+    int abortLogSize;
+
+    /*
+     * The database of the node, or null if abortLsn is NULL_LSN.  Used for
+     * obsolete counting during a commit.
+     */
+    DatabaseImpl abortDb;
+
+    /*
+     * True if the node has never been locked before. Used so we can determine
+     * when to set abortLsn.
+     */
+    boolean neverLocked;
+
+    /*
+     * True if the node was created this transaction.
+     */
+    boolean createdThisTxn;
+
+    static final WriteLockInfo basicWriteLockInfo =
+	new WriteLockInfo();
+
+    public // for Sizeof
+    WriteLockInfo() {
+	abortLsn = DbLsn.NULL_LSN;
+	abortKnownDeleted = false;
+	neverLocked = true;
+	createdThisTxn = false;
+    }
+
+    public boolean getAbortKnownDeleted() {
+	return abortKnownDeleted;
+    }
+
+    public long getAbortLsn() {
+	return abortLsn;
+    }
+
+    public void setAbortInfo(DatabaseImpl db, int logSize) {
+        abortDb = db;
+        abortLogSize = logSize;
+    }
+
+    public void copyAbortInfo(WriteLockInfo fromInfo) {
+        abortDb = fromInfo.abortDb;
+        abortLogSize = fromInfo.abortLogSize;
+    }
+}
diff --git a/src/com/sleepycat/je/txn/package.html b/src/com/sleepycat/je/txn/package.html
new file mode 100644
index 0000000000000000000000000000000000000000..0857c5844e0de184302ca024777e3c3e13151457
--- /dev/null
+++ b/src/com/sleepycat/je/txn/package.html
@@ -0,0 +1,26 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
+<html>
+<head>
+<!--
+
+ See the file LICENSE for redistribution information.
+
+ Copyright (c) 2002,2010 Oracle.  All rights reserved.
+
+
+ $Id: package.html,v 1.8.2.2 2010/01/04 15:30:37 cwl Exp $
+-->
+</head>
+<body bgcolor="white">
+
+Provides classes and interfaces for Transactions in JDB.
+
+
+<h2>Package Specification</h2>
+
+(None)
+
+<!-- Put @see and @since tags down here. -->
+
+</body>
+</html>
diff --git a/src/com/sleepycat/je/util/DbBackup.java b/src/com/sleepycat/je/util/DbBackup.java
new file mode 100644
index 0000000000000000000000000000000000000000..00c6c773760bf94885e3b9151f70752f85d36f86
--- /dev/null
+++ b/src/com/sleepycat/je/util/DbBackup.java
@@ -0,0 +1,253 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbBackup.java,v 1.15.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.util;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * DbBackup is a helper class for stopping and restarting JE background
+ * activity in an open environment in order to simplify backup operations. It
+ * also lets the application create a backup which can support restoring the
+ * environment to a specific point in time.
+ * <p>
+ * <b>Backing up without DbBackup:</b>
+ * Because JE has an append only log file architecture, it is always possible
+ * to do a hot backup without the use of DbBackup by copying all log files
+ * (.jdb files) to your archival location. As long as the log files are copied
+ * in alphabetical order, (numerical in effect) <i>and</i> all log files are
+ * copied, the environment can be successfully backed up without any need to
+ * stop database operations or background activity. This means that your
+ * backup operation must do a loop to check for the creation of new log files
+ * before deciding that the backup is finished. For example:
+ * <pre>
+ * time    files in                    activity
+ *         environment
+ *
+ *  t0     000000001.jdb     Backup starts copying file 1
+ *         000000003.jdb
+ *         000000004.jdb
+ *
+ *  t1     000000001.jdb     JE log cleaner migrates portion of file 3 to newly
+ *         000000004.jdb     created file 5 and deletes file 3. Backup finishes
+ *         000000005.jdb     file 1, starts copying file 4. Backup MUST include
+ *                           file 5 for a consistent backup!
+ *
+ *  t2     000000001.jdb     Backup finishes copying file 4, starts and finishes
+ *         000000004.jdb     file 5, has caught up. Backup ends.
+ *         000000005.jdb
+ *</pre>
+ * <p>
+ * In the example above, the backup operation must be sure to copy file 5,
+ * which came into existence after the backup had started. If the backup
+ * stopped operations at file 4, the backup set would include only file 1 and
+ * 4, omitting file 3, which would be an inconsistent set.
+ * <p>
+ * Also note that log file 5 may not have filled up before it was copied to
+ * archival storage. On the next backup, there might be a newer, larger version
+ * of file 5, and that newer version should replace the older file 5 in archive
+ * storage.
+ * <p>
+ * <b>Backup up with DbBackup</b>
+ * <p>
+ * DbBackup helps simplify application backup by defining the set of files that
+ * must be copied for each backup operation. If the environment directory has
+ * read/write protection, the application must pass DbBackup an open,
+ * read/write environment handle.
+ * <p>
+ * When entering backup mode, JE
+ * determines the set of log files needed for a consistent backup, and freezes
+ * all changes to those files. The application can copy that defined set of
+ * files and finish operation without checking for the ongoing creation of new
+ * files. Also, there will be no need to check for a newer version of the last
+ * file on the next backup.
+ * <p>
+ * In the example above, if DbBackupHelper was used at t0, the application
+ * would only have to copy files 1, 3 and 4 to back up. On a subsequent backup,
+ * the application could start its copying at file 5. There would be no need
+ * to check for a newer version of file 4.
+ * <p>
+ * An example usage:
+ * <pre>
+ *
+ *    Environment env = new Environment(...);
+ *    DbBackup backupHelper = new DbBackup(env);
+ *
+ *    // Find the file number of the last file in the previous backup
+ *    // persistently, by either checking the backup archive, or saving
+ *    // state in a persistent file.
+ *    long lastFileCopiedInPrevBackup =  ...
+ *
+ *    // Start backup, find out what needs to be copied.
+ *    backupHelper.startBackup();
+ *    try {
+ *        String[] filesForBackup =
+ *             backupHelper.getLogFilesInBackupSet(lastFileCopiedInPrevBackup);
+ *
+ *        // Copy the files to archival storage.
+ *        myApplicationCopyMethod(filesForBackup)
+
+ *        // Update our knowledge of the last file saved in the backup set,
+ *        // so we can copy less on the next backup
+ *        lastFileCopiedInPrevBackup = backupHelper.getLastFileInBackupSet();
+ *        myApplicationSaveLastFile(lastFileCopiedInBackupSet);
+ *    } finally {
+ *        // Remember to exit backup mode, or all log files won't be cleaned
+ *        // and disk usage will bloat.
+ *       backupHelper.endBackup();
+ *   }
+ */
+public class DbBackup {
+
+    private EnvironmentImpl envImpl;
+    private boolean backupStarted;
+    private long lastFileInBackupSet = -1;
+    private boolean envIsReadOnly;
+
+    /**
+     * DbBackup must be created with an open, valid environment handle.
+     * If the environment directory has read/write permissions, the environment
+     * handle must be configured for read/write.
+     */
+    public DbBackup(Environment env)
+        throws DatabaseException {
+
+        /* Check that the Environment is open. */
+        env.checkHandleIsValid();
+        envImpl = DbInternal.envGetEnvironmentImpl(env);
+        FileManager fileManager = envImpl.getFileManager();
+
+        /*
+         * If the environment is writable, we need a r/w environment handle
+         * in order to flip the file.
+         */
+        envIsReadOnly = fileManager.checkEnvHomePermissions(true);
+        if ((!envIsReadOnly) && envImpl.isReadOnly()) {
+            throw new DatabaseException(this.getClass().getName() +
+                                " requires a read/write Environment handle");
+        }
+    }
+
+    /**
+     * Start backup mode in order to determine the definitive backup set needed
+     * for this point in time. After calling this method, log cleaning will be
+     * disabled until endBackup() is called. Be sure to call endBackup() to
+     * re-enable log cleaning or disk space usage will bloat.
+     */
+    public synchronized void startBackup()
+        throws DatabaseException {
+	
+        if (backupStarted) {
+            throw new DatabaseException(this.getClass().getName() +
+                                         ".startBackup was already called");
+        }
+
+        backupStarted = true;
+
+        try {
+            /* Prevent any file deletions. */
+            envImpl.getCleaner().setDeleteProhibited();
+
+            FileManager fileManager = envImpl.getFileManager();
+
+            /*
+             * Flip the log so that we can know that the list of files
+             * corresponds to a given point.
+             */
+            if (envIsReadOnly) {
+                lastFileInBackupSet = fileManager.getLastFileNum().longValue();
+            } else {
+                long newFileNum =  envImpl.forceLogFileFlip();
+                lastFileInBackupSet = DbLsn.getFileNumber(newFileNum) - 1;
+            }
+        } catch (DatabaseException e) {
+            backupStarted = false;
+            throw e;
+        }
+    }
+
+    /**
+     * End backup mode, thereby re-enabling normal JE log cleaning.
+     */
+    public synchronized void endBackup()
+        throws DatabaseException {
+	
+        checkBackupStarted();
+
+        try {
+            envImpl.getCleaner().clearDeleteProhibited();
+        } finally {
+            backupStarted = false;
+        }
+    }
+
+    /**
+     * Can only be called in backup mode, after startBackup() has been called.
+     *
+     * @return the file number of the last file in the current backup set.
+     * Save this value to reduce the number of files that must be copied at
+     * the next backup session.
+     */
+    public synchronized long getLastFileInBackupSet()
+        throws DatabaseException {
+	
+        checkBackupStarted();
+        return lastFileInBackupSet;
+    }
+
+    /**
+     * Get the list of all files that are needed for the environment at the
+     * point of time when backup mode started.  Can only be called in backup
+     * mode, after startBackup() has been called.
+     *
+     * @return the names of all files in the backup set, sorted in alphabetical
+     * order.
+     */
+    public synchronized String[] getLogFilesInBackupSet()
+        throws DatabaseException {
+
+        checkBackupStarted();
+        return envImpl.getFileManager().listFiles(0, lastFileInBackupSet);
+    }
+
+    /**
+     * Get the minimum list of files that must be copied for this backup. This
+     * consists of the set of backup files that are greater than the last file
+     * copied in the previous backup session.  Can only be called in backup
+     * mode, after startBackup() has been called.
+     *
+     * @param lastFileCopiedInPrevBackup file number of last file copied in the
+     * last backup session, obtained from getLastFileInBackupSet().
+     *
+     * @return the names of all the files in the backup set that come after
+     * lastFileCopiedInPrevBackup.
+     */
+    public synchronized
+        String[] getLogFilesInBackupSet(long lastFileCopiedInPrevBackup)
+        throws DatabaseException {
+	
+        checkBackupStarted();
+        FileManager fileManager = envImpl.getFileManager();
+        return fileManager.listFiles(lastFileCopiedInPrevBackup + 1,
+                                      lastFileInBackupSet);
+    }
+
+    private void checkBackupStarted()
+        throws DatabaseException {
+
+        if (!backupStarted) {
+            throw new DatabaseException( this.getClass().getName() +
+                                         ".startBackup was not called");
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/util/DbCacheSize.java b/src/com/sleepycat/je/util/DbCacheSize.java
new file mode 100644
index 0000000000000000000000000000000000000000..a202639b1c3d06fca5747cf588d7e0740f707cc8
--- /dev/null
+++ b/src/com/sleepycat/je/util/DbCacheSize.java
@@ -0,0 +1,620 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2005,2008 Oracle.  All rights reserved.
+ *
+ * $Id: DbCacheSize.java,v 1.16 2008/01/24 14:59:29 linda Exp $
+ */
+
+package com.sleepycat.je.util;
+
+import java.io.File;
+import java.io.PrintStream;
+import java.math.BigInteger;
+import java.text.NumberFormat;
+import java.util.Random;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.utilint.CmdUtil;
+
+/**
+ * Estimating JE in-memory sizes as a function of key and data size is not
+ * straightforward for two reasons. There is some fixed overhead for each btree
+ * internal node, so tree fanout and degree of node sparseness impacts memory
+ * consumption. In addition, JE compresses some of the internal nodes where
+ * possible, but compression depends on on-disk layouts.
+ * <p>
+ * DbCacheSize is an aid for estimating cache sizes. To get an estimate of the
+ * in-memory footprint for a given database, specify the number of records and
+ * record characteristics and DbCacheSize will return a minimum and maximum
+ * estimate of the cache size required for holding the database in memory.
+ * If the user specifies the record's data size, the utility will return both
+ * values for holding just the internal nodes of the btree, and for holding the
+ * entire database in cache.
+ *
+ * <p>
+ * Note that "cache size" is a percentage more than "btree size", to cover
+ * general environment resources like log buffers. Each invocation of the
+ * utility returns an estimate for a single database in an environment.  For an
+ * environment with multiple databases, run the utility for each database, add
+ * up the btree sizes, and then add 10 percent.
+ *
+ * <p>
+ * Note that the utility does not yet cover duplicate records and the API is
+ * subject to change release to release.
+ *
+ * <p>
+ * The only required parameters are the number of records and key size.
+ * Data size, non-tree cache overhead, btree fanout, and other parameters
+ * can also be provided. For example:
+ *
+ * <pre>
+ * $ java DbCacheSize -records 554719 -key 16 -data 100
+ * Inputs: records=554719 keySize=16 dataSize=100 nodeMax=128 density=80%
+ * overhead=10%
+ *
+ *     Cache Size      Btree Size  Description
+ * --------------  --------------  -----------
+ *     30,547,440      27,492,696  Minimum, internal nodes only
+ *     41,460,720      37,314,648  Maximum, internal nodes only
+ *    114,371,644     102,934,480  Minimum, internal nodes and leaf nodes
+ *    125,284,924     112,756,432  Maximum, internal nodes and leaf nodes
+ *
+ * Btree levels: 3
+ * </pre>
+ *
+ * <p>
+ * This says that the minimum cache size to hold only the internal nodes of the
+ * btree in cache is approximately 30MB. The maximum size to hold the entire
+ * database in cache, both internal nodes and datarecords, is 125Mb.
+ * See {@link DbCacheSize#main} for full parameter descriptions.
+ */
+public class DbCacheSize {
+
+    private static final NumberFormat INT_FORMAT =
+        NumberFormat.getIntegerInstance();
+
+    private static final String HEADER =
+        "    Cache Size      Btree Size  Description\n" +
+        "--------------  --------------  -----------";
+    //   12345678901234  12345678901234
+    //                 12
+    private static final int COLUMN_WIDTH = 14;
+    private static final int COLUMN_SEPARATOR = 2;
+
+    private long records;
+    private int keySize;
+    private int dataSize;
+    private int nodeMax;
+    private int density;
+    private long overhead;
+    private long minInBtreeSize;
+    private long maxInBtreeSize;
+    private long minInCacheSize;
+    private long maxInCacheSize;
+    private long maxInBtreeSizeWithData;
+    private long maxInCacheSizeWithData;
+    private long minInBtreeSizeWithData;
+    private long minInCacheSizeWithData;
+    private int nLevels = 1;
+
+    public DbCacheSize (long records,
+			int keySize,
+			int dataSize,
+			int nodeMax,
+			int density,
+			long overhead) {
+	this.records = records;
+	this.keySize = keySize;
+	this.dataSize = dataSize;
+	this.nodeMax = nodeMax;
+	this.density = density;
+	this.overhead = overhead;
+    }
+	
+    public long getMinCacheSizeInternalNodesOnly() {
+	return minInCacheSize;
+    }
+
+    public long getMaxCacheSizeInternalNodesOnly() {
+	return maxInCacheSize;
+    }
+
+    public long getMinBtreeSizeInternalNodesOnly() {
+	return minInBtreeSize;
+    }
+
+    public long getMaxBtreeSizeInternalNodesOnly() {
+	return maxInBtreeSize;
+    }
+
+    public long getMinCacheSizeWithData() {
+	return minInCacheSizeWithData;
+    }
+
+    public long getMaxCacheSizeWithData() {
+	return maxInCacheSizeWithData;
+    }
+
+    public long getMinBtreeSizeWithData() {
+	return minInBtreeSizeWithData;
+    }
+
+    public long getMaxBtreeSizeWithData() {
+	return maxInBtreeSizeWithData;
+    }
+
+    public int getNLevels() {
+	return nLevels;
+    }
+
+    public static void main(String[] args) {
+
+        try {
+            long records = 0;
+            int keySize = 0;
+            int dataSize = -1;
+            int nodeMax = 128;
+            int density = 80;
+            long overhead = 0;
+            File measureDir = null;
+            boolean measureRandom = false;
+
+            for (int i = 0; i < args.length; i += 1) {
+                String name = args[i];
+                String val = null;
+                if (i < args.length - 1 && !args[i + 1].startsWith("-")) {
+                    i += 1;
+                    val = args[i];
+                }
+                if (name.equals("-records")) {
+                    if (val == null) {
+                        usage("No value after -records");
+                    }
+                    try {
+                        records = Long.parseLong(val);
+                    } catch (NumberFormatException e) {
+                        usage(val + " is not a number");
+                    }
+                    if (records <= 0) {
+                        usage(val + " is not a positive integer");
+                    }
+                } else if (name.equals("-key")) {
+                    if (val == null) {
+                        usage("No value after -key");
+                    }
+                    try {
+                        keySize = Integer.parseInt(val);
+                    } catch (NumberFormatException e) {
+                        usage(val + " is not a number");
+                    }
+                    if (keySize <= 0) {
+                        usage(val + " is not a positive integer");
+                    }
+                } else if (name.equals("-data")) {
+                    if (val == null) {
+                        usage("No value after -data");
+                    }
+                    try {
+                        dataSize = Integer.parseInt(val);
+                    } catch (NumberFormatException e) {
+                        usage(val + " is not a number");
+                    }
+                    if (dataSize < 0) {
+                        usage(val + " is not a non-negative integer");
+                    }
+                } else if (name.equals("-nodemax")) {
+                    if (val == null) {
+                        usage("No value after -nodemax");
+                    }
+                    try {
+                        nodeMax = Integer.parseInt(val);
+                    } catch (NumberFormatException e) {
+                        usage(val + " is not a number");
+                    }
+                    if (nodeMax <= 0) {
+                        usage(val + " is not a positive integer");
+                    }
+                } else if (name.equals("-density")) {
+                    if (val == null) {
+                        usage("No value after -density");
+                    }
+                    try {
+                        density = Integer.parseInt(val);
+                    } catch (NumberFormatException e) {
+                        usage(val + " is not a number");
+                    }
+                    if (density < 1 || density > 100) {
+                        usage(val + " is not betwen 1 and 100");
+                    }
+                } else if (name.equals("-overhead")) {
+                    if (val == null) {
+                        usage("No value after -overhead");
+                    }
+                    try {
+                        overhead = Long.parseLong(val);
+                    } catch (NumberFormatException e) {
+                        usage(val + " is not a number");
+                    }
+                    if (overhead < 0) {
+                        usage(val + " is not a non-negative integer");
+                    }
+                } else if (name.equals("-measure")) {
+                    if (val == null) {
+                        usage("No value after -measure");
+                    }
+                    measureDir = new File(val);
+                } else if (name.equals("-measurerandom")) {
+                    measureRandom = true;
+                } else {
+                    usage("Unknown arg: " + name);
+                }
+            }
+
+            if (records == 0) {
+                usage("-records not specified");
+            }
+
+            if (keySize == 0) {
+                usage("-key not specified");
+            }
+
+	    DbCacheSize dbCacheSize = new DbCacheSize
+		(records, keySize, dataSize, nodeMax, density, overhead);
+	    dbCacheSize.caclulateCacheSizes();
+	    dbCacheSize.printCacheSizes(System.out);
+
+            if (measureDir != null) {
+                measure(System.out, measureDir, records, keySize, dataSize,
+                        nodeMax, measureRandom);
+            }
+        } catch (Throwable e) {
+            e.printStackTrace(System.out);
+        }
+    }
+
+    private static void usage(String msg) {
+
+        if (msg != null) {
+            System.out.println(msg);
+        }
+
+        System.out.println
+            ("usage:" +
+             "\njava "  + CmdUtil.getJavaCommand(DbCacheSize.class) +
+             "\n   -records <count>" +
+             "\n      # Total records (key/data pairs); required" +
+             "\n   -key <bytes> " +
+             "\n      # Average key bytes per record; required" +
+             "\n  [-data <bytes>]" +
+             "\n      # Average data bytes per record; if omitted no leaf" +
+             "\n      # node sizes are included in the output" +
+             "\n  [-nodemax <entries>]" +
+             "\n      # Number of entries per Btree node; default: 128" +
+             "\n  [-density <percentage>]" +
+             "\n      # Percentage of node entries occupied; default: 80" +
+             "\n  [-overhead <bytes>]" +
+             "\n      # Overhead of non-Btree objects (log buffers, locks," +
+             "\n      # etc); default: 10% of total cache size" +
+             "\n  [-measure <environmentHomeDirectory>]" +
+             "\n      # An empty directory used to write a database to find" +
+             "\n      # the actual cache size; default: do not measure" +
+             "\n  [-measurerandom" +
+             "\n      # With -measure insert randomly generated keys;" +
+             "\n      # default: insert sequential keys");
+
+        System.exit(2);
+    }
+
+    private void caclulateCacheSizes() {
+        int nodeAvg = (nodeMax * density) / 100;
+        long nBinEntries = (records * nodeMax) / nodeAvg;
+        long nBinNodes = (nBinEntries + nodeMax - 1) / nodeMax;
+
+        long nInNodes = 0;
+	long lnSize = 0;
+
+        for (long n = nBinNodes; n > 0; n /= nodeMax) {
+            nInNodes += n;
+            nLevels += 1;
+        }
+
+        minInBtreeSize = nInNodes *
+	    calcInSize(nodeMax, nodeAvg, keySize, true);
+        maxInBtreeSize = nInNodes *
+	    calcInSize(nodeMax, nodeAvg, keySize, false);
+	minInCacheSize = calculateOverhead(minInBtreeSize, overhead);
+	maxInCacheSize = calculateOverhead(maxInBtreeSize, overhead);
+
+        if (dataSize >= 0) {
+            lnSize = records * calcLnSize(dataSize);
+        }
+
+	maxInBtreeSizeWithData = maxInBtreeSize + lnSize;
+	maxInCacheSizeWithData = calculateOverhead(maxInBtreeSizeWithData,
+						    overhead);
+	minInBtreeSizeWithData = minInBtreeSize + lnSize;
+	minInCacheSizeWithData = calculateOverhead(minInBtreeSizeWithData,
+						    overhead);
+    }
+
+    private void printCacheSizes(PrintStream out) {
+	
+        out.println("Inputs:" +
+                    " records=" + records +
+                    " keySize=" + keySize +
+                    " dataSize=" + dataSize +
+                    " nodeMax=" + nodeMax +
+                    " density=" + density + '%' +
+                    " overhead=" + ((overhead > 0) ? overhead : 10) + "%");
+
+        out.println();
+        out.println(HEADER);
+        out.println(line(minInBtreeSize, minInCacheSize,
+			 "Minimum, internal nodes only"));
+        out.println(line(maxInBtreeSize, maxInCacheSize,
+			 "Maximum, internal nodes only"));
+        if (dataSize >= 0) {
+            out.println(line(minInBtreeSizeWithData,
+			     minInCacheSizeWithData,
+			     "Minimum, internal nodes and leaf nodes"));
+            out.println(line(maxInBtreeSizeWithData,
+			     maxInCacheSizeWithData,
+                        "Maximum, internal nodes and leaf nodes"));
+        } else {
+            out.println("\nTo get leaf node sizing specify -data");
+        }
+
+        out.println("\nBtree levels: " + nLevels);
+    }
+
+    private int calcInSize(int nodeMax,
+			   int nodeAvg,
+			   int keySize,
+			   boolean lsnCompression) {
+
+        /* Fixed overhead */
+        int size = MemoryBudget.IN_FIXED_OVERHEAD;
+
+        /* Byte state array plus keys and nodes arrays */
+        size += MemoryBudget.byteArraySize(nodeMax) +
+                (nodeMax * (2 * MemoryBudget.OBJECT_ARRAY_ITEM_OVERHEAD));
+
+        /* LSN array */
+	if (lsnCompression) {
+	    size += MemoryBudget.byteArraySize(nodeMax * 2);
+	} else {
+	    size += MemoryBudget.ARRAY_OVERHEAD +
+                    (nodeMax * MemoryBudget.LONG_OVERHEAD);
+	}
+
+        /* Keys for populated entries plus the identifier key */
+        size += (nodeAvg + 1) * MemoryBudget.byteArraySize(keySize);
+
+        return size;
+    }
+
+    private int calcLnSize(int dataSize) {
+
+        return MemoryBudget.LN_OVERHEAD +
+               MemoryBudget.byteArraySize(dataSize);
+    }
+
+    private long calculateOverhead(long btreeSize, long overhead) {
+        long cacheSize;
+        if (overhead == 0) {
+            cacheSize = (100 * btreeSize) / 90;
+        } else {
+            cacheSize = btreeSize + overhead;
+        }
+	return cacheSize;
+    }
+
+    private String line(long btreeSize,
+			long cacheSize,
+			String comment) {
+
+        StringBuffer buf = new StringBuffer(100);
+
+        column(buf, INT_FORMAT.format(cacheSize));
+        column(buf, INT_FORMAT.format(btreeSize));
+        column(buf, comment);
+
+        return buf.toString();
+    }
+
+    private void column(StringBuffer buf, String str) {
+
+        int start = buf.length();
+
+        while (buf.length() - start + str.length() < COLUMN_WIDTH) {
+            buf.append(' ');
+        }
+
+        buf.append(str);
+
+        for (int i = 0; i < COLUMN_SEPARATOR; i += 1) {
+            buf.append(' ');
+        }
+    }
+
+    private static void measure(PrintStream out,
+                                File dir,
+                                long records,
+                                int keySize,
+                                int dataSize,
+                                int nodeMax,
+                                boolean randomKeys)
+        throws DatabaseException {
+
+        String[] fileNames = dir.list();
+        if (fileNames != null && fileNames.length > 0) {
+            usage("Directory is not empty: " + dir);
+        }
+
+        Environment env = openEnvironment(dir, true);
+        Database db = openDatabase(env, nodeMax, true);
+
+        try {
+            out.println("\nMeasuring with cache size: " +
+                        INT_FORMAT.format(env.getConfig().getCacheSize()));
+            insertRecords(out, env, db, records, keySize, dataSize, randomKeys);
+            printStats(out, env,
+                       "Stats for internal and leaf nodes (after insert)");
+
+            db.close();
+            env.close();
+            env = openEnvironment(dir, false);
+            db = openDatabase(env, nodeMax, false);
+
+            out.println("\nPreloading with cache size: " +
+                        INT_FORMAT.format(env.getConfig().getCacheSize()));
+            preloadRecords(out, db);
+            printStats(out, env,
+                       "Stats for internal nodes only (after preload)");
+        } finally {
+            try {
+                db.close();
+                env.close();
+            } catch (Exception e) {
+                out.println("During close: " + e);
+            }
+        }
+    }
+
+    private static Environment openEnvironment(File dir, boolean allowCreate)
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setAllowCreate(allowCreate);
+        envConfig.setCachePercent(90);
+        return new Environment(dir, envConfig);
+    }
+
+    private static Database openDatabase(Environment env, int nodeMax,
+                                         boolean allowCreate)
+        throws DatabaseException {
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(allowCreate);
+        dbConfig.setNodeMaxEntries(nodeMax);
+        return env.openDatabase(null, "foo", dbConfig);
+    }
+
+    private static void insertRecords(PrintStream out,
+                                      Environment env,
+                                      Database db,
+                                      long records,
+                                      int keySize,
+                                      int dataSize,
+                                      boolean randomKeys)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry(new byte[dataSize]);
+        BigInteger bigInt = BigInteger.ZERO;
+        Random rnd = new Random(123);
+
+        for (int i = 0; i < records; i += 1) {
+
+            if (randomKeys) {
+                byte[] a = new byte[keySize];
+                rnd.nextBytes(a);
+                key.setData(a);
+            } else {
+                bigInt = bigInt.add(BigInteger.ONE);
+                byte[] a = bigInt.toByteArray();
+                if (a.length < keySize) {
+                    byte[] a2 = new byte[keySize];
+                    System.arraycopy(a, 0, a2, a2.length - a.length, a.length);
+                    a = a2;
+                } else if (a.length > keySize) {
+                    out.println("*** Key doesn't fit value=" + bigInt +
+                                " byte length=" + a.length);
+                    return;
+                }
+                key.setData(a);
+            }
+
+            OperationStatus status = db.putNoOverwrite(null, key, data);
+            if (status == OperationStatus.KEYEXIST && randomKeys) {
+                i -= 1;
+                out.println("Random key already exists -- retrying");
+                continue;
+            }
+            if (status != OperationStatus.SUCCESS) {
+                out.println("*** " + status);
+                return;
+            }
+
+            if (i % 10000 == 0) {
+                EnvironmentStats stats = env.getStats(null);
+                if (stats.getNNodesScanned() > 0) {
+                    out.println("*** Ran out of cache memory at record " + i +
+                                " -- try increasing the Java heap size ***");
+                    return;
+                }
+                out.print(".");
+                out.flush();
+            }
+        }
+    }
+
+    private static void preloadRecords(final PrintStream out,
+                                       final Database db)
+        throws DatabaseException {
+
+        Thread thread = new Thread() {
+            public void run() {
+                while (true) {
+                    try {
+                        out.print(".");
+                        out.flush();
+                        Thread.sleep(5 * 1000);
+                    } catch (InterruptedException e) {
+                        break;
+                    }
+                }
+            }
+        };
+        thread.start();
+        /* Use default values for preload */
+        db.preload(null); 
+        thread.interrupt();
+        try {
+            thread.join();
+        } catch (InterruptedException e) {
+            e.printStackTrace(out);
+        }
+    }
+
+    private static void printStats(PrintStream out,
+                                   Environment env,
+                                   String msg)
+        throws DatabaseException {
+
+        out.println();
+        out.println(msg + ':');
+
+        EnvironmentStats stats = env.getStats(null);
+
+        out.println("CacheSize=" +
+                    INT_FORMAT.format(stats.getCacheTotalBytes()) +
+                    " BtreeSize=" +
+                    INT_FORMAT.format(stats.getDataBytes()) +
+                    " NCacheMiss=" +
+                    INT_FORMAT.format(stats.getNCacheMiss()));
+
+        if (stats.getNNodesScanned() > 0) {
+            out.println("*** All records did not fit in the cache ***");
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/util/DbDump.java b/src/com/sleepycat/je/util/DbDump.java
new file mode 100644
index 0000000000000000000000000000000000000000..5575e3f7f3194df18ec7fb48eb0caa3abe57f730
--- /dev/null
+++ b/src/com/sleepycat/je/util/DbDump.java
@@ -0,0 +1,384 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbDump.java,v 1.56.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.util;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.Iterator;
+import java.util.List;
+import java.util.logging.Level;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.JEVersion;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.utilint.CmdUtil;
+import com.sleepycat.je.utilint.Tracer;
+
+/**
+ * Dump the contents of a database. This utility may be used programmatically
+ * or from the command line.
+ *<pre>
+ * java com.sleepycat.je.util.DbDump
+ *   -h &lt;dir&gt;           # environment home directory
+ *  [-f &lt;fileName&gt;]     # output file, for non -rR dumps
+ *  [-l]                # list databases in the environment
+ *  [-p]                # output printable characters
+ *  [-r]                # salvage mode
+ *  [-R]                # aggressive salvage mode
+ *  [-d] &lt;directory&gt;    # directory for *.dump files (salvage mode)
+ *  [-s &lt;databaseName&gt;] # database to dump
+ *  [-v]                # verbose in salvage mode
+ *  [-V]                # print JE version number
+ *</pre>
+ * See {@link DbDump#main} for a full description of the 
+ * command line arguments. To dump a database to a stream from code:
+ *
+ * <pre>
+ *    DbDump dump = new DbDump(env, databaseName, outputStream, boolean);
+ *    dump.dump();
+ * </pre>
+ *
+ *<p>
+ * Because a <code>DATA=END</code> marker is used to terminate the dump of
+ * each database, multiple databases can be dumped and loaded using a single
+ * stream. The {@link DbDump#dump} method leaves the stream positioned after
+ * the last line written and the {@link DbLoad#load} method leaves the stream
+ * positioned after the last line read.</p>
+ */
+public class DbDump {
+    private static final int VERSION = 3;
+
+    protected File envHome = null;
+    protected Environment env;
+    protected String dbName = null;
+    protected boolean formatUsingPrintable;
+    private boolean dupSort;
+    private String outputFileName = null;
+    protected String outputDirectory = null;
+    protected PrintStream outputFile = null;
+    protected boolean doScavengerRun = false;
+    protected boolean doAggressiveScavengerRun = false;
+    protected boolean verbose = false;
+
+    private static final String usageString =
+	"usage: " + CmdUtil.getJavaCommand(DbDump.class) + "\n" +
+        "  -h <dir> # environment home directory\n" +
+        "  [-f <fileName>]     # output file, for non -rR dumps\n" +
+        "  [-l]                # list databases in the environment\n" +
+        "  [-p]                # output printable characters\n" +
+        "  [-r]                # salvage mode\n" +
+        "  [-R]                # aggressive salvage mode\n" +
+        "  [-d] <directory>    # directory for *.dump files (salvage mode)\n" +
+        "  [-s <databaseName>] # database to dump\n" +
+        "  [-v]                # verbose in salvage mode\n" +
+        "  [-V]                # print JE version number\n";
+
+    private DbDump() {
+    }
+
+    /**
+     * @deprecated Please use the 4-arg ctor without outputDirectory instead.
+     */
+    public DbDump(Environment env,
+		  String dbName,
+		  PrintStream outputFile,
+		  String outputDirectory,
+		  boolean formatUsingPrintable) {
+	init(env, dbName, outputFile, formatUsingPrintable);
+    }
+
+    /**
+     * Create a DbDump object for a specific environment and database.
+     *
+     * @param env The Environment containing the database to dump.
+     * @param dbName The name of the database to dump.
+     * @param outputFile The output stream to dump the database to.
+     * @param formatUsingPrintable true if the dump should use printable
+     * characters.
+     */
+    public DbDump(Environment env,
+		  String dbName,
+		  PrintStream outputFile,
+		  boolean formatUsingPrintable) {
+	init(env, dbName, outputFile, formatUsingPrintable);
+    }
+
+    private void init(Environment env,
+		      String dbName,
+		      PrintStream outputFile,
+		      boolean formatUsingPrintable) {
+        try {
+            this.envHome = env.getHome();
+        } catch (DatabaseException e) {
+            IllegalArgumentException iae = new IllegalArgumentException();
+            iae.initCause(e);
+            throw iae;
+        }
+	this.env = env;
+	this.dbName = dbName;
+	this.outputFile = outputFile;
+	this.formatUsingPrintable = formatUsingPrintable;
+    }
+
+    /**
+     * The main used by the DbDump utility.
+     *
+     * @param argv The arguments accepted by the DbDump utility.
+     *
+     * <pre>
+     * usage: java { com.sleepycat.je.util.DbDump | -jar
+     * je-&lt;version&gt;.jar DbDump }
+     *             [-f output-file] [-l] [-p] [-V]
+     *             [-s database] -h dbEnvHome [-rR] [-v]
+     *             [-d directory]
+     * </pre>
+     *
+     * <p>
+     * -f - the file to dump to.<br>
+     * -l - list the databases in the environment.<br>
+     * -p - If characters in either the key or data items
+     * are printing characters (as defined by isprint(3)), use printing
+     * characters in file to represent them. This option permits users to use
+     * standard text editors and tools to modify the contents of
+     * databases.<br>
+     * -V - display the version of the JE library.<br>
+     * -s database - the database to dump.<br>
+     * -h dbEnvHome - the directory containing the database environment.<br>
+     * -r - Salvage data from a possibly corrupt file. When used on a
+     * uncorrupted database, this option should return equivalent data to a
+     * normal dump, but most likely in a different order. The data for each
+     * database is saved into &lt;databaseName&gt;.dump files in the current
+     * directory. <br>
+     * -d directory - the output directory for *.dump files (salvage mode)
+     * <br>
+     * -R - Aggressively salvage data from a possibly corrupt file. The -R flag
+     * differs from the -r option in that it will return all possible data
+     * from the file at the risk of also returning already deleted or
+     * otherwise nonsensical items. Data dumped in this fashion will almost
+     * certainly have to be edited by hand or other means before the data is
+     * ready for reload into another database. The data for each database is
+     * saved into &lt;databaseName&gt;.dump files in the current directory.
+     * <br>
+     * -v - print progress information to stdout for -r or -R mode. <br></p>
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public static void main(String argv[])
+	throws DatabaseException, IOException {
+
+	DbDump dumper = new DbDump();
+	boolean listDbs = dumper.parseArgs(argv);
+	if (dumper.doScavengerRun) {
+	    dumper.openEnv(false);
+	    dumper = new DbScavenger(dumper.env,
+				     dumper.outputDirectory,
+                                     dumper.formatUsingPrintable,
+                                     dumper.doAggressiveScavengerRun,
+                                     dumper.verbose);
+	    ((DbScavenger) dumper).setDumpCorruptedBounds(true);
+	}
+
+	if (listDbs) {
+	    dumper.listDbs();
+	    System.exit(0);
+	}
+
+	try {
+	    dumper.dump();
+	} catch (Throwable T) {
+	    T.printStackTrace();
+	} finally {
+	    dumper.env.close();
+	    if (dumper.outputFile != null &&
+		dumper.outputFile != System.out) {
+		dumper.outputFile.close();
+	    }
+	}
+    }
+
+    private void listDbs()
+	throws DatabaseException {
+
+	openEnv(true);
+
+	List<String> dbNames = env.getDatabaseNames();
+	Iterator<String> iter = dbNames.iterator();
+	while (iter.hasNext()) {
+	    String name = iter.next();
+	    System.out.println(name);
+	}
+    }
+
+    protected void printUsage(String msg) {
+	System.err.println(msg);
+	System.err.println(usageString);
+	System.exit(-1);
+    }
+
+    protected boolean parseArgs(String argv[])
+	throws IOException {
+
+	int argc = 0;
+	int nArgs = argv.length;
+	boolean listDbs = false;
+	while (argc < nArgs) {
+	    String thisArg = argv[argc++];
+	    if (thisArg.equals("-p")) {
+		formatUsingPrintable = true;
+	    } else if (thisArg.equals("-V")) {
+		System.out.println(JEVersion.CURRENT_VERSION);
+		System.exit(0);
+	    } else if (thisArg.equals("-l")) {
+		listDbs = true;
+	    } else if (thisArg.equals("-r")) {
+		doScavengerRun = true;
+	    } else if (thisArg.equals("-R")) {
+		doScavengerRun = true;
+		doAggressiveScavengerRun = true;
+	    } else if (thisArg.equals("-f")) {
+		if (argc < nArgs) {
+		    outputFileName = argv[argc++];
+		} else {
+		    printUsage("-f requires an argument");
+		}
+	    } else if (thisArg.equals("-h")) {
+		if (argc < nArgs) {
+		    String envDir = argv[argc++];
+                    envHome = new File(envDir);
+		} else {
+		    printUsage("-h requires an argument");
+		}
+	    } else if (thisArg.equals("-d")) {
+		if (argc < nArgs) {
+		    outputDirectory = argv[argc++];
+		} else {
+		    printUsage("-d requires an argument");
+		}
+	    } else if (thisArg.equals("-s")) {
+		if (argc < nArgs) {
+		    dbName = argv[argc++];
+		} else {
+		    printUsage("-s requires an argument");
+		}
+	    } else if (thisArg.equals("-v")) {
+                verbose = true;
+            } else {
+                printUsage(thisArg + " is not a valid option.");
+	    }
+	}
+
+	if (envHome == null) {
+	    printUsage("-h is a required argument");
+	}
+
+	if (!listDbs &&
+	    !doScavengerRun) {
+	    if (dbName == null) {
+		printUsage("Must supply a database name if -l not supplied.");
+	    }
+	}
+
+	if (outputFileName == null) {
+	    outputFile = System.out;
+	} else {
+	    outputFile = new PrintStream(new FileOutputStream(outputFileName));
+	}
+
+	return listDbs;
+    }
+
+    /*
+     * Begin DbDump API.  From here on there should be no calls to printUsage,
+     * System.xxx.print, or System.exit.
+     */
+    protected void openEnv(boolean doRecovery)
+	throws DatabaseException {
+
+	if (env == null) {
+            EnvironmentConfig envConfiguration = new EnvironmentConfig();
+            envConfiguration.setReadOnly(true);
+	    /* Don't run recovery. */
+	    envConfiguration.setConfigParam
+		(EnvironmentParams.ENV_RECOVERY.getName(),
+		 doRecovery ? "true" : "false");
+	    env = new Environment(envHome, envConfiguration);
+	}
+    }
+
+    /**
+     * Perform the dump.
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public void dump()
+	throws IOException, DatabaseException {
+
+	openEnv(true);
+
+	Tracer.trace(Level.INFO, DbInternal.envGetEnvironmentImpl(env),
+		     "DbDump.dump of " + dbName + " starting");
+
+	DatabaseEntry foundKey = new DatabaseEntry();
+	DatabaseEntry foundData = new DatabaseEntry();
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setReadOnly(true);
+        DbInternal.setUseExistingConfig(dbConfig, true);
+        Database db = env.openDatabase(null, dbName, dbConfig);
+	dupSort = db.getConfig().getSortedDuplicates();
+
+	printHeader(outputFile, dupSort, formatUsingPrintable);
+
+	Cursor cursor = db.openCursor(null, null);
+	while (cursor.getNext(foundKey, foundData, LockMode.DEFAULT) ==
+               OperationStatus.SUCCESS) {
+	    dumpOne(outputFile, foundKey.getData(), formatUsingPrintable);
+	    dumpOne(outputFile, foundData.getData(), formatUsingPrintable);
+	}
+	cursor.close();
+	db.close();
+	outputFile.println("DATA=END");
+
+	Tracer.trace(Level.INFO, DbInternal.envGetEnvironmentImpl(env),
+		     "DbDump.dump of " + dbName + " ending");
+    }
+
+    protected void printHeader(PrintStream o,
+			       boolean dupSort,
+			       boolean formatUsingPrintable) {
+	o.println("VERSION=" + VERSION);
+	if (formatUsingPrintable) {
+	    o.println("format=print");
+	} else {
+	    o.println("format=bytevalue");
+	}
+	o.println("type=btree");
+	o.println("dupsort=" + (dupSort ? "1" : "0"));
+	o.println("HEADER=END");
+    }
+
+    protected void dumpOne(PrintStream o, byte[] ba,
+			   boolean formatUsingPrintable) {
+        StringBuffer sb = new StringBuffer();
+        sb.append(' ');
+        CmdUtil.formatEntry(sb, ba, formatUsingPrintable);
+        o.println(sb.toString());
+    }
+}
diff --git a/src/com/sleepycat/je/util/DbLoad.java b/src/com/sleepycat/je/util/DbLoad.java
new file mode 100644
index 0000000000000000000000000000000000000000..8955d8bb2fecd30fe8fb88d301471588cfd05516
--- /dev/null
+++ b/src/com/sleepycat/je/util/DbLoad.java
@@ -0,0 +1,620 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbLoad.java,v 1.52.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.util;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.util.Date;
+import java.util.logging.Level;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.JEVersion;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.utilint.CmdUtil;
+import com.sleepycat.je.utilint.Tracer;
+
+/**
+ * Loads a database from a dump file generated by {@link DbDump}. 
+ * This utility may be used programmatically or from the command line.
+ *<pre>
+ * java com.sleepycat.je.util.DbLoad
+ *     -h &lt;dir&gt;            # environment home directory
+ *    [-f &lt;fileName&gt;]      # input file
+ *    [-n ]                # no overwrite mode
+ *    [-T]                 # input file is in text mode
+ *    [-I]                 # ignore unknown parameters
+ *    [-c name=value]      # config values
+ *    [-s &lt;databaseName&gt; ] # database to load
+ *    [-v]                 # show progress
+ *    [-V]                 # print JE version number
+ *</pre>
+ * See {@link DbLoad#main} for a full description of the 
+ * command line arguments. To load a database to a stream from code:
+ *
+ * <pre>
+ *    DbLoad loader = new DbLoad();
+ *    loader.setEnv(env);
+ *    loader.setDbName(dbName);
+ *    loader.setInputStream(stream);
+ *    loader.setNoOverwrite(noOvrwr);
+ *    loader.setTextFileMode(tfm);
+ *    loader.load();
+ * </pre>
+ *
+ * <p>Because a <code>DATA=END</code> marker is used to terminate the dump of
+ * each database, multiple databases can be dumped and loaded using a single
+ * stream. The {@link DbDump#dump} method leaves the stream positioned after
+ * the last line written and the {@link DbLoad#load} method leaves the stream
+ * positioned after the last line read.</p>
+ */
+public class DbLoad {
+    private static final boolean DEBUG = false;
+
+    protected Environment env;
+    private boolean formatUsingPrintable;
+    private String dbName;
+    private BufferedReader reader;
+    private boolean noOverwrite;
+    private boolean textFileMode;
+    private boolean dupSort;
+    private boolean ignoreUnknownConfig;
+    private boolean commandLine;
+    private long progressInterval;
+    private long totalLoadBytes;
+
+    private static final String usageString =
+	"usage: " + CmdUtil.getJavaCommand(DbLoad.class) + "\n" +
+        "       -h <dir>             # environment home directory\n" +
+        "       [-f <fileName>]      # input file\n" +
+        "       [-n ]                # no overwrite mode\n" +
+        "       [-T]                 # input file is in text mode\n" +
+        "       [-I]                 # ignore unknown parameters\n" +
+	"       [-c name=value]      # config values\n" +
+        "       [-s <databaseName> ] # database to load\n" +
+        "       [-v]                 # show progress\n" +
+        "       [-V]                 # print JE version number";
+
+    /**
+     * The main used by the DbLoad utility.
+     *
+     * @param argv The arguments accepted by the DbLoad utility.
+     *
+     * <pre>
+     * usage: java { com.sleepycat.je.util.DbLoad | -jar
+     * je-&lt;version&gt;.jar DbLoad }
+     *             [-f input-file] [-n] [-V] [-v] [-T] [-I]
+     *             [-c name=value]
+     *             [-s database] -h dbEnvHome
+     * </pre>
+     *
+     * <p>-f - the file to load from (in DbDump format)<br>
+     * -n - no overwrite mode.  Do not overwrite existing data.<br>
+     * -V - display the version of the JE library.<br>
+     * -T - input file is in Text mode.<br>
+     * -I - ignore unknown parameters in the config file.<br></p>
+     *
+     * <p>The -T option allows JE applications to easily load text files into
+     * databases.</p>
+     *
+     * <p>The -I option allows loading databases that were dumped with the
+     * Berkeley DB C product, when the dump file contains parameters not known
+     * to JE.</p>
+     *
+     * <p>The input must be paired lines of text, where the first line of the
+     * pair is the key item, and the second line of the pair is its
+     * corresponding data item.</p>
+     *
+     * <p>A simple escape mechanism, where newline and backslash (\) characters
+     * are special, is applied to the text input. Newline characters are
+     * interpreted as record separators. Backslash characters in the text will
+     * be interpreted in one of two ways: If the backslash character precedes
+     * another backslash character, the pair will be interpreted as a literal
+     * backslash. If the backslash character precedes any other character, the
+     * two characters following the backslash will be interpreted as a
+     * hexadecimal specification of a single character; for example, \0a is a
+     * newline character in the ASCII character set.</p>
+     *
+     * <p>For this reason, any backslash or newline characters that naturally
+     * occur in the text input must be escaped to avoid misinterpretation by
+     * db_load.</p>
+     *
+     * <p>-c name=value - Specify configuration options ignoring any value they
+     * may have based on the input. The command-line format is name=value. See
+     * the Supported Keywords section below for a list of keywords supported by
+     * the -c option.</p>
+     *
+     * <p>-s database - the database to load.<br>
+     * -h dbEnvHome - the directory containing the database environment.<br>
+     * -v - report progress <br></p>
+     *
+     * <p>Supported Keywords<br>
+     * version=N - specify the version of the input file.  Currently only
+     * version 3 is supported.<br>
+     * format - specify the format of the file.  Allowable values are "print"
+     * and "bytevalue".<br>
+     * dupsort - specify whether the database allows duplicates or not.
+     * Allowable values are "true" and "false".<br>
+     * type - specifies the type of database.  Only "btree" is allowed.<br>
+     * database - specifies the name of the database to be loaded.<br></p>
+     *
+     * @throws DatabaseException if a failure occurs.
+     */
+    public static void main(String argv[])
+	throws DatabaseException, IOException {
+
+	DbLoad loader = parseArgs(argv);
+
+        try {
+            loader.load();
+        } catch (Throwable e) {
+            e.printStackTrace();
+        }
+
+	loader.env.close();
+    }
+
+    private static void printUsage(String msg) {
+	System.err.println(msg);
+	System.err.println(usageString);
+	System.exit(-1);
+    }
+
+    private static DbLoad parseArgs(String argv[])
+	throws IOException, DatabaseException {
+
+	boolean noOverwrite = false;
+	boolean textFileMode = false;
+	boolean ignoreUnknownConfig = false;
+	boolean showProgressInterval = false;
+
+	int argc = 0;
+	int nArgs = argv.length;
+	String inputFileName = null;
+	File envHome = null;
+	String dbName = null;
+        long progressInterval = 0;
+	DbLoad ret = new DbLoad();
+        ret.setCommandLine(true);
+
+	while (argc < nArgs) {
+	    String thisArg = argv[argc++].trim();
+	    if (thisArg.equals("-n")) {
+		noOverwrite = true;
+	    } else if (thisArg.equals("-T")) {
+		textFileMode = true;
+	    } else if (thisArg.equals("-I")) {
+		ignoreUnknownConfig = true;
+	    } else if (thisArg.equals("-V")) {
+		System.out.println(JEVersion.CURRENT_VERSION);
+		System.exit(0);
+	    } else if (thisArg.equals("-f")) {
+		if (argc < nArgs) {
+		    inputFileName = argv[argc++];
+		} else {
+		    printUsage("-f requires an argument");
+		}
+	    } else if (thisArg.equals("-h")) {
+		if (argc < nArgs) {
+		    envHome = new File(argv[argc++]);
+		} else {
+		    printUsage("-h requires an argument");
+		}
+	    } else if (thisArg.equals("-s")) {
+		if (argc < nArgs) {
+		    dbName = argv[argc++];
+		} else {
+		    printUsage("-s requires an argument");
+		}
+	    } else if (thisArg.equals("-c")) {
+		if (argc < nArgs) {
+                    try {
+                        ret.loadConfigLine(argv[argc++]);
+                    } catch (IllegalArgumentException e) {
+                        printUsage("-c: " + e.getMessage());
+                    }
+		} else {
+		    printUsage("-c requires an argument");
+		}
+	    } else if (thisArg.equals("-v")) {
+		showProgressInterval = true;
+            }
+	}
+
+	if (envHome == null) {
+	    printUsage("-h is a required argument");
+	}
+
+	long totalLoadBytes = 0;
+	InputStream is;
+	if (inputFileName == null) {
+	    is = System.in;
+            if (showProgressInterval) {
+
+                /*
+                 * Can't show progress if we don't know how big the stream
+                 * is.
+                 */
+                printUsage("-v requires -f");
+            }
+	} else {
+	    is = new FileInputStream(inputFileName);
+	    if (showProgressInterval) {
+		totalLoadBytes = ((FileInputStream) is).getChannel().size();
+		/* Use 5% intervals. */
+		progressInterval = totalLoadBytes / 20;
+	    }
+	}
+	BufferedReader reader = new BufferedReader(new InputStreamReader(is));
+
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setAllowCreate(true);
+	Environment env = new Environment(envHome, envConfig);
+	ret.setEnv(env);
+	ret.setDbName(dbName);
+	ret.setInputReader(reader);
+	ret.setNoOverwrite(noOverwrite);
+	ret.setTextFileMode(textFileMode);
+	ret.setIgnoreUnknownConfig(ignoreUnknownConfig);
+        ret.setProgressInterval(progressInterval);
+        ret.setTotalLoadBytes(totalLoadBytes);
+	return ret;
+    }
+
+    /*
+     * Begin DbLoad API.  From here on there should be no calls to printUsage,
+     * System.xxx.print, or System.exit.
+     */
+
+    /**
+     * Creates a DbLoad object.
+     */
+    public DbLoad() {
+    }
+
+    /**
+     * If true, enables output of warning messages.  Command line behavior is
+     * not available via the public API.
+     */
+    private void setCommandLine(boolean commandLine) {
+        this.commandLine = commandLine;
+    }
+
+    /**
+     * Sets the Environment to load from.
+     *
+     * @param env The environment.
+     */
+    public void setEnv(Environment env) {
+	this.env = env;
+    }
+
+    /**
+     * Sets the database name to load.
+     *
+     * @param dbName database name
+     */
+    public void setDbName(String dbName) {
+	this.dbName = dbName;
+    }
+
+    /**
+     * Sets the BufferedReader to load from.
+     *
+     * @param reader The BufferedReader.
+     */
+    public void setInputReader(BufferedReader reader) {
+	this.reader = reader;
+    }
+
+    /**
+     * Sets whether the load should overwrite existing data or not.
+     *
+     * @param noOverwrite True if existing data should not be overwritten.
+     */
+    public void setNoOverwrite(boolean noOverwrite) {
+	this.noOverwrite = noOverwrite;
+    }
+
+    /**
+     * Sets whether the load data is in text file format.
+     *
+     * @param textFileMode True if the load data is in text file format.
+     */
+    public void setTextFileMode(boolean textFileMode) {
+	this.textFileMode = textFileMode;
+    }
+
+    /**
+     * Sets whether to ignore unknown parameters in the config file. This
+     * allows loading databases that were dumped with the Berkeley DB C
+     * product, when the dump file contains parameters not known to JE.
+     *
+     * @param ignoreUnknownConfigMode True to ignore unknown parameters in
+     * the config file.
+     */
+    public void setIgnoreUnknownConfig(boolean ignoreUnknownConfigMode) {
+	this.ignoreUnknownConfig = ignoreUnknownConfigMode;
+    }
+
+    /**
+     * If progressInterval is set, progress status messages are generated to
+     * stdout at set percentages of the load.
+     *
+     * @param progressInterval Specifies the percentage intervals for status
+     * messages. If 0, no messages are generated.
+     */
+    public void setProgressInterval(long progressInterval) {
+        this.progressInterval = progressInterval;
+    }
+
+    /**
+     * Used for progress status messages. Must be set to greater than
+     * 0 if the progressInterval is greater than 0.
+     *
+     * @param totalLoadBytes number of input bytes to be loaded.
+     */
+    public void setTotalLoadBytes(long totalLoadBytes) {
+        this.totalLoadBytes = totalLoadBytes;
+    }
+
+    public boolean load()
+	throws IOException, DatabaseException {
+
+	Tracer.trace(Level.INFO, DbInternal.envGetEnvironmentImpl(env),
+		     "DbLoad.load of " + dbName + " starting");
+
+        if (progressInterval > 0) {
+            System.out.println("Load start: " + new Date());
+        }
+
+        if (textFileMode) {
+            formatUsingPrintable = true;
+        } else {
+            loadHeader();
+        }
+
+        if (dbName == null) {
+            throw new IllegalArgumentException
+                ("Must supply a database name if -l not supplied.");
+        }
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setSortedDuplicates(dupSort);
+        dbConfig.setAllowCreate(true);
+        Database db = env.openDatabase(null, dbName, dbConfig);
+
+        loadData(db);
+
+        db.close();
+
+        Tracer.trace(Level.INFO, DbInternal.envGetEnvironmentImpl(env),
+                     "DbLoad.load of " + dbName + " ending.");
+
+        if (progressInterval > 0) {
+            System.out.println("Load end: " + new Date());
+        }
+
+        return true;
+    }
+
+    private void loadConfigLine(String line)
+	throws DatabaseException {
+
+	int equalsIdx = line.indexOf('=');
+	if (equalsIdx < 0) {
+	    throw new IllegalArgumentException
+                ("Invalid header parameter: " + line);
+	}
+
+	String keyword = line.substring(0, equalsIdx).trim().toLowerCase();
+	String value = line.substring(equalsIdx + 1).trim();
+
+	if (keyword.equals("version")) {
+	    if (DEBUG) {
+		System.out.println("Found version: " + line);
+	    }
+	    if (!value.equals("3")) {
+		throw new IllegalArgumentException
+                    ("Version " + value + " is not supported.");
+	    }
+	} else if (keyword.equals("format")) {
+	    value = value.toLowerCase();
+	    if (value.equals("print")) {
+		formatUsingPrintable = true;
+	    } else if (value.equals("bytevalue")) {
+		formatUsingPrintable = false;
+	    } else {
+		throw new IllegalArgumentException
+		    (value + " is an unknown value for the format keyword");
+	    }
+	    if (DEBUG) {
+		System.out.println("Found format: " + formatUsingPrintable);
+	    }
+	} else if (keyword.equals("dupsort")) {
+	    value = value.toLowerCase();
+	    if (value.equals("true") ||
+		value.equals("1")) {
+		dupSort = true;
+	    } else if (value.equals("false") ||
+		       value.equals("0")) {
+		dupSort = false;
+	    } else {
+		throw new IllegalArgumentException
+		    (value + " is an unknown value for the dupsort keyword");
+	    }
+	    if (DEBUG) {
+		System.out.println("Found dupsort: " + dupSort);
+	    }
+	} else if (keyword.equals("type")) {
+	    value = value.toLowerCase();
+	    if (!value.equals("btree")) {
+		throw new IllegalArgumentException
+                    (value + " is not a supported database type.");
+	    }
+	    if (DEBUG) {
+		System.out.println("Found type: " + line);
+	    }
+	} else if (keyword.equals("database")) {
+	    if (dbName == null) {
+		dbName = value;
+	    }
+	    if (DEBUG) {
+		System.out.println("DatabaseImpl: " + dbName);
+	    }
+	} else if (!ignoreUnknownConfig) {
+	    throw new IllegalArgumentException
+                ("'" + line + "' is not understood.");
+	}
+    }
+
+    private void loadHeader()
+	throws IOException, DatabaseException {
+
+	if (DEBUG) {
+	    System.out.println("loading header");
+	}
+	String line = reader.readLine();
+	while (line != null &&
+	       !line.equals("HEADER=END")) {
+	    loadConfigLine(line);
+	    line = reader.readLine();
+	}
+    }
+
+    private void loadData(Database db)
+	throws DatabaseException, IOException {
+
+	String keyLine = reader.readLine();
+	String dataLine = null;
+        int count = 0;
+	long totalBytesRead = 0;
+        long lastTime = System.currentTimeMillis();
+	long bytesReadThisInterval = 0;
+
+	while (keyLine != null &&
+	       !keyLine.equals("DATA=END")) {
+	    dataLine = reader.readLine();
+            if (dataLine == null) {
+                throw new DatabaseException("No data to match key " +
+                                            keyLine);
+            }
+	    /* Add one for \n or \r. */
+	    bytesReadThisInterval += dataLine.length() + 1;
+	    byte[] keyBytes = loadLine(keyLine.trim());
+	    byte[] dataBytes = loadLine(dataLine.trim());
+
+	    DatabaseEntry key = new DatabaseEntry(keyBytes);
+	    DatabaseEntry data = new DatabaseEntry(dataBytes);
+
+	    if (noOverwrite) {
+		if (db.putNoOverwrite(null, key, data) ==
+		    OperationStatus.KEYEXIST) {
+                    /* Calling println is OK only from command line. */
+                    if (commandLine) {
+                        System.err.println("Key exists: " + key);
+                    }
+		}
+	    } else {
+		db.put(null, key, data);
+	    }
+
+            count++;
+            if ((progressInterval > 0) &&
+		(bytesReadThisInterval > progressInterval)) {
+		totalBytesRead += bytesReadThisInterval;
+		bytesReadThisInterval -= progressInterval;
+                long now = System.currentTimeMillis();
+                System.out.println("loaded " + count + " records  " +
+                                   (now - lastTime) + " ms - % completed: " +
+				   ((100 * totalBytesRead) / totalLoadBytes));
+                lastTime = now;
+            }
+
+	    keyLine = reader.readLine();
+	    if (keyLine == null) {
+		throw new DatabaseException("No \"DATA=END\"");
+	    }
+	    bytesReadThisInterval += keyLine.length() + 1;
+	}
+    }
+
+    private byte[] loadLine(String line)
+	throws DatabaseException {
+
+	if (formatUsingPrintable) {
+	    return readPrintableLine(line);
+	}
+	int nBytes = line.length() / 2;
+	byte[] ret = new byte[nBytes];
+	int charIdx = 0;
+	for (int i = 0; i < nBytes; i++, charIdx += 2) {
+	    int b2 = Character.digit(line.charAt(charIdx), 16);
+	    b2 <<= 4;
+	    b2 += Character.digit(line.charAt(charIdx + 1), 16);
+	    ret[i] = (byte) b2;
+	}
+	return ret;
+    }
+
+    private static byte backSlashValue =
+	((byte) ('\\')) & 0xff;
+
+    private byte[] readPrintableLine(String line)
+	throws DatabaseException {
+
+	/* nBytes is the max number of bytes that this line could turn into. */
+	int maxNBytes = line.length();
+	byte[] ba = new byte[maxNBytes];
+	int actualNBytes = 0;
+
+	for (int charIdx = 0; charIdx < maxNBytes; charIdx++) {
+	    char c = line.charAt(charIdx);
+	    if (c == '\\') {
+		if (++charIdx < maxNBytes) {
+		    char c1 = line.charAt(charIdx);
+		    if (c1 == '\\') {
+			ba[actualNBytes++] = backSlashValue;
+		    } else {
+			if (++charIdx < maxNBytes) {
+			    char c2 = line.charAt(charIdx);
+			    int b = Character.digit(c1, 16);
+			    b <<= 4;
+			    b += Character.digit(c2, 16);
+			    ba[actualNBytes++] = (byte) b;
+			} else {
+			    throw new DatabaseException("Corrupted file");
+			}
+		    }
+		} else {
+		    throw new DatabaseException("Corrupted file");
+		}
+	    } else {
+		ba[actualNBytes++] = (byte) (c & 0xff);
+	    }
+	}
+
+	if (maxNBytes == actualNBytes) {
+	    return ba;
+	} else {
+	    byte[] ret = new byte[actualNBytes];
+	    System.arraycopy(ba, 0, ret, 0, actualNBytes);
+	    return ret;
+	}
+    }
+}
diff --git a/src/com/sleepycat/je/util/DbPrintLog.java b/src/com/sleepycat/je/util/DbPrintLog.java
new file mode 100644
index 0000000000000000000000000000000000000000..63c48459ce3378dfb8f7168918a68c9fb7a0ca83
--- /dev/null
+++ b/src/com/sleepycat/je/util/DbPrintLog.java
@@ -0,0 +1,208 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbPrintLog.java,v 1.46.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.util;
+
+import java.io.File;
+import java.io.IOException;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.DumpFileReader;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.log.PrintFileReader;
+import com.sleepycat.je.log.StatsFileReader;
+import com.sleepycat.je.tree.Key;
+import com.sleepycat.je.tree.Key.DumpType;
+import com.sleepycat.je.utilint.CmdUtil;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * Dumps the contents of the log in XML format to System.out.
+ *
+ * <p>To print an environment log:</p>
+ *
+ * <pre>
+ *      DbPrintLog.main(argv);
+ * </pre>
+ */
+public class DbPrintLog {
+
+    /**
+     * Dump a JE log into human readable form.
+     */
+    private void dump(File envHome,
+		      String entryTypes,
+		      String txnIds,
+		      long startLsn,
+		      long endLsn,
+		      boolean verbose,
+                      boolean stats)
+        throws IOException, DatabaseException {
+
+        EnvironmentImpl env =
+	    CmdUtil.makeUtilityEnvironment(envHome, true);
+        FileManager fileManager = env.getFileManager();
+        fileManager.setIncludeDeletedFiles(true);
+        int readBufferSize =
+            env.getConfigManager().getInt
+            (EnvironmentParams.LOG_ITERATOR_READ_SIZE);
+
+        /* Make a reader. */
+        DumpFileReader reader = null;
+        if (stats) {
+            reader = new StatsFileReader(env, readBufferSize, startLsn, endLsn,
+                                         entryTypes, txnIds, verbose);
+        } else {
+            reader =  new PrintFileReader(env, readBufferSize, startLsn,
+					  endLsn, entryTypes, txnIds, verbose);
+        }
+
+        /* Enclose the output in a tag to keep proper XML syntax. */
+        System.out.println("<DbPrintLog>");
+        while (reader.readNextEntry()) {
+        }
+        reader.summarize();
+        System.out.println("</DbPrintLog>");
+        env.close();
+    }
+
+    /**
+     * The main used by the DbPrintLog utility.
+     *
+     * @param argv An array of command line arguments to the DbPrintLog
+     * utility.
+     *
+     * <pre>
+     * usage: java { com.sleepycat.je.util.DbPrintLog | -jar
+     * je-&lt;version&gt;.jar DbPrintLog }
+     *  -h &lt;envHomeDir&gt;
+     *  -e  &lt;end file number, in hex&gt;
+     *  -k  &lt;binary|hex|text|obfuscate&gt; (format for dumping the key)
+     *  -s  &lt;start file number, in hex&gt;
+     *  -tx &lt;targeted txn ids, comma separated
+     *  -ty &lt;targeted entry types, comma separated
+     *  -v  &lt;true | false&gt; (verbose option
+     *      If true, full entry is printed,
+     *      else short version. True by default.)
+     * All arguments are optional
+     * </pre>
+     */
+    public static void main(String[] argv) {
+        try {
+            int whichArg = 0;
+            String entryTypes = null;
+            String txnIds = null;
+            long startLsn = DbLsn.NULL_LSN;
+            long endLsn = DbLsn.NULL_LSN;
+            boolean verbose = true;
+            boolean stats = false;
+
+            /* Default to looking in current directory. */
+            File envHome = new File(".");
+            Key.DUMP_TYPE = DumpType.BINARY;
+
+            while (whichArg < argv.length) {
+                String nextArg = argv[whichArg];
+                if (nextArg.equals("-h")) {
+                    whichArg++;
+                    envHome = new File(CmdUtil.getArg(argv, whichArg));
+                } else if (nextArg.equals("-ty")) {
+                    whichArg++;
+                    entryTypes = CmdUtil.getArg(argv, whichArg);
+                } else if (nextArg.equals("-tx")) {
+                    whichArg++;
+                    txnIds = CmdUtil.getArg(argv, whichArg);
+                } else if (nextArg.equals("-s")) {
+                    whichArg++;
+		    String arg = CmdUtil.getArg(argv, whichArg);
+		    int slashOff = arg.indexOf("/");
+		    if (slashOff < 0) {
+			long startFileNum = CmdUtil.readLongNumber(arg);
+			startLsn = DbLsn.makeLsn(startFileNum, 0);
+		    } else {
+			long startFileNum =
+			    CmdUtil.readLongNumber(arg.substring(0, slashOff));
+			long startOffset = CmdUtil.readLongNumber
+			    (arg.substring(slashOff + 1));
+			startLsn = DbLsn.makeLsn(startFileNum, startOffset);
+		    }
+                } else if (nextArg.equals("-e")) {
+                    whichArg++;
+		    String arg = CmdUtil.getArg(argv, whichArg);
+		    int slashOff = arg.indexOf("/");
+		    if (slashOff < 0) {
+			long endFileNum = CmdUtil.readLongNumber(arg);
+			endLsn = DbLsn.makeLsn(endFileNum, 0);
+		    } else {
+			long endFileNum =
+			    CmdUtil.readLongNumber(arg.substring(0, slashOff));
+			long endOffset = CmdUtil.readLongNumber
+			    (arg.substring(slashOff + 1));
+			endLsn = DbLsn.makeLsn(endFileNum, endOffset);
+		    }
+                } else if (nextArg.equals("-k")) {
+                    whichArg++;
+                    String dumpType = CmdUtil.getArg(argv, whichArg);
+                    if (dumpType.equalsIgnoreCase("text")) {
+                        Key.DUMP_TYPE = DumpType.TEXT;
+                    } else if (dumpType.equalsIgnoreCase("hex")) {
+			Key.DUMP_TYPE = DumpType.HEX;
+                    } else if (dumpType.equalsIgnoreCase("binary")) {
+			Key.DUMP_TYPE = DumpType.BINARY;
+                    } else if (dumpType.equalsIgnoreCase("obfuscate")) {
+			Key.DUMP_TYPE = DumpType.OBFUSCATE;
+		    } else {
+			System.err.println
+			    (dumpType +
+			     " is not a supported dump format type.");
+		    }
+                } else if (nextArg.equals("-q")) {
+                    whichArg++;
+                    verbose = false;
+                } else if (nextArg.equals("-S")) {
+                    whichArg++;
+                    stats = true;
+                } else {
+		    System.err.println
+                        (nextArg + " is not a supported option.");
+                    usage();
+		    System.exit(-1);
+                }
+                whichArg++;
+            }
+
+            DbPrintLog printer = new DbPrintLog();
+            printer.dump(envHome, entryTypes, txnIds,
+			 startLsn, endLsn, verbose, stats);
+
+        } catch (Throwable e) {
+            e.printStackTrace();
+            System.out.println(e.getMessage());
+            usage();
+            System.exit(1);
+        }
+    }
+
+    private static void usage() {
+        System.out.println("Usage: " +
+                           CmdUtil.getJavaCommand(DbPrintLog.class));
+        System.out.println(" -h  <envHomeDir>");
+        System.out.println(" -e  <end file number or LSN, in hex>");
+        System.out.println(" -k  <binary|text|hex|obfuscate> " +
+			   "(format for dumping the key)");
+        System.out.println(" -s  <start file number or LSN, in hex>");
+        System.out.println(" -tx <targetted txn ids, comma separated>");
+        System.out.println(" -ty <targetted entry types, comma separated>");
+        System.out.println(" -S  show Summary of log entries");
+        System.out.println(" -q  if specified, concise version is printed");
+	System.out.println("     Default is verbose version.)");
+        System.out.println("All arguments are optional");
+    }
+}
diff --git a/src/com/sleepycat/je/util/DbRecover.java b/src/com/sleepycat/je/util/DbRecover.java
new file mode 100644
index 0000000000000000000000000000000000000000..a3036ddb68ac55f9d73363a16a6003c9c9b3709a
--- /dev/null
+++ b/src/com/sleepycat/je/util/DbRecover.java
@@ -0,0 +1,93 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbRecover.java,v 1.17.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.util;
+
+import java.io.File;
+
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.utilint.CmdUtil;
+
+/**
+ * DbRecover is a utility that allows the user to resume use of the environment
+ * from a given time point. Not for general use yet!
+ */
+public class DbRecover {
+
+    public static void main(String[] argv) {
+        try {
+            int whichArg = 0;
+            boolean seenFile = false;
+            boolean seenOffset = false;
+
+            long truncateFileNum = -1;
+            long truncateOffset = -1;
+
+            /*
+             * Usage: -h <envHomeDir>
+	     -f <file number, in hex>
+             *        -o <offset, in hex. The log is truncated at the position
+             *            including this offset>
+             */
+            File envHome = new File("."); // default to current directory
+            while (whichArg < argv.length) {
+                String nextArg = argv[whichArg];
+
+                if (nextArg.equals("-h")) {
+                    whichArg++;
+                    envHome = new File(CmdUtil.getArg(argv, whichArg));
+                } else if (nextArg.equals("-f")) {
+                    whichArg++;
+                    truncateFileNum =
+                        CmdUtil.readLongNumber(CmdUtil.getArg(argv, whichArg));
+                    seenFile = true;
+                } else if (nextArg.equals("-o")) {
+                    whichArg++;
+                    truncateOffset =
+                        CmdUtil.readLongNumber(CmdUtil.getArg(argv, whichArg));
+                    seenOffset = true;
+                } else {
+                    throw new IllegalArgumentException
+                        (nextArg + " is not a supported option.");
+                }
+                whichArg++;
+            }
+
+            if ((!seenFile) || (!seenOffset)) {
+                usage();
+                System.exit(1);
+            }
+
+            /* Make a read/write environment */
+            EnvironmentImpl env =
+		CmdUtil.makeUtilityEnvironment(envHome, false);
+
+            /* Go through the file manager to get the JE file. Truncate. */
+            env.getFileManager().truncateLog(truncateFileNum, truncateOffset);
+
+            env.close();
+        } catch (Exception e) {
+	    e.printStackTrace();
+            System.out.println(e.getMessage());
+            usage();
+            System.exit(1);
+        }
+    }
+
+    private static void usage() {
+        System.out.println("Usage: " +
+                           CmdUtil.getJavaCommand(DbRecover.class));
+        System.out.println("                 -h <environment home>");
+        System.out.println("(optional)");
+        System.out.println("                 -f <file number, in hex>");
+        System.out.println("                 -o <offset, in hex>");
+        System.out.println("Log file is truncated at position starting at" +
+                           " and inclusive of the offset. Beware, not " +
+                           " for general purpose use yet!");
+    }
+}
diff --git a/src/com/sleepycat/je/util/DbRunAction.java b/src/com/sleepycat/je/util/DbRunAction.java
new file mode 100644
index 0000000000000000000000000000000000000000..19569ded0e2b32e6a134c5cff601921a6c19d699
--- /dev/null
+++ b/src/com/sleepycat/je/util/DbRunAction.java
@@ -0,0 +1,401 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbRunAction.java,v 1.46.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.util;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.text.DecimalFormat;
+
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.EnvironmentMutableConfig;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.cleaner.VerifyUtils;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.utilint.CmdUtil;
+
+/**
+ * @hidden
+ * DbRunAction is a debugging aid that can invoke a JE operation or background
+ * activity from the command line.
+ *
+ *   batchClean calls Environment.cleanLog() in a loop
+ *   checkpoint calls Environment.checkpoint() with force=true
+ *   compress calls Environment.compress
+ *   evict calls Environment.preload, then evictMemory
+ *   removeDb calls Environment.removeDatabase, but doesn't do any cleaning
+ *   removeDbAndClean calls removeDatabase, then cleanLog in a loop
+ *   activateCleaner wakes up the cleaner, and then the main thread waits
+ *     until you type "y" to the console before calling Environment.close().
+ *     The control provided by the prompt is necessary for daemon activities
+ *     because often threads check and bail out if the environment is closed.
+ *   verifyUtilization calls CleanerTestUtils.verifyUtilization() to compare
+ *     utilization as calculated by UtilizationProfile to utilization as
+ *     calculated by UtilizationFileReader.
+ */
+public class DbRunAction {
+
+    private static final int BATCH_CLEAN = 1;   // app-driven batch cleaning
+    private static final int COMPRESS = 2;
+    private static final int EVICT = 3;
+    private static final int CHECKPOINT = 4;
+    private static final int REMOVEDB = 5;
+    private static final int REMOVEDB_AND_CLEAN = 6;
+    private static final int ACTIVATE_CLEANER_THREADS = 7;
+                                           // wake up cleaner threads
+    private static final int VERIFY_UTILIZATION = 8;
+
+    public static void main(String[] argv) {
+
+        long recoveryStart = 0;
+        long actionStart = 0;
+        long actionEnd = 0;
+
+        try {
+            int whichArg = 0;
+            if (argv.length == 0) {
+                usage();
+                System.exit(1);
+            }
+
+            String dbName = null;
+            int doAction = 0;
+            String envHome = ".";
+            boolean readOnly = false;
+            boolean printStats = false;
+
+            while (whichArg < argv.length) {
+                String nextArg = argv[whichArg];
+
+                if (nextArg.equals("-h")) {
+                    whichArg++;
+                    envHome = CmdUtil.getArg(argv, whichArg);
+                } else if (nextArg.equals("-a")) {
+                    whichArg++;
+                    String action = CmdUtil.getArg(argv, whichArg);
+                    if (action.equalsIgnoreCase("batchClean")) {
+                        doAction = BATCH_CLEAN;
+                    } else if (action.equalsIgnoreCase("compress")) {
+                        doAction = COMPRESS;
+                    } else if (action.equalsIgnoreCase("checkpoint")) {
+                        doAction = CHECKPOINT;
+                    } else if (action.equalsIgnoreCase("evict")) {
+                        doAction = EVICT;
+                    } else if (action.equalsIgnoreCase("removedb")) {
+                        doAction = REMOVEDB;
+                    } else if (action.equalsIgnoreCase("removedbAndClean")) {
+                        doAction = REMOVEDB_AND_CLEAN;
+                    } else if (action.equalsIgnoreCase("activateCleaner")) {
+                        doAction = ACTIVATE_CLEANER_THREADS;
+                    } else if (action.equalsIgnoreCase("verifyUtilization")) {
+                        doAction = VERIFY_UTILIZATION;
+                    } else {
+                        usage();
+                        System.exit(1);
+                    }
+                } else if (nextArg.equals("-ro")) {
+                    readOnly = true;
+                } else if (nextArg.equals("-s")) {
+                    dbName = argv[++whichArg];
+                } else if (nextArg.equals("-stats")) {
+                    printStats = true;
+                } else {
+                    throw new IllegalArgumentException
+                        (nextArg + " is not a supported option.");
+                }
+                whichArg++;
+            }
+
+            EnvironmentConfig envConfig = new EnvironmentConfig();
+
+            /* Do debug log to the console. */
+            envConfig.setConfigParam
+                (EnvironmentParams.JE_LOGGING_CONSOLE.getName(), "true");
+
+            /* Don't debug log to the database log. */
+            if (readOnly) {
+                envConfig.setConfigParam
+                    (EnvironmentParams.JE_LOGGING_DBLOG.getName(), "false");
+
+                envConfig.setReadOnly(true);
+            }
+
+            /*
+             * If evicting, scan the given database first and don't run the
+             * background evictor.
+             */
+            if (doAction == EVICT) {
+                envConfig.setConfigParam
+                    (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false");
+                envConfig.setConfigParam
+                    (EnvironmentParams.EVICTOR_CRITICAL_PERCENTAGE.getName(),
+                     "1000");
+            }
+
+            recoveryStart = System.currentTimeMillis();
+
+            Environment env =
+		new Environment(new File(envHome), envConfig);
+
+            CheckpointConfig forceConfig = new CheckpointConfig();
+            forceConfig.setForce(true);
+
+            Thread statsPrinter = null;
+            if (printStats) {
+                statsPrinter = new StatsPrinter(env);
+                statsPrinter.start();
+            }
+            
+            boolean promptForShutdown = false;
+            actionStart = System.currentTimeMillis();
+            switch(doAction) {
+            case BATCH_CLEAN:
+                /* Since this is batch cleaning, repeat until no progress. */
+                while (true) {
+                    int nFiles = env.cleanLog();
+                    System.out.println("Files cleaned: " + nFiles);
+                    if (nFiles == 0) {
+                        break;
+                    }
+                }
+                env.checkpoint(forceConfig);
+                break;
+            case COMPRESS:
+                env.compress();
+                break;
+            case CHECKPOINT:
+                env.checkpoint(forceConfig);
+                break;
+            case EVICT:
+                preload(env, dbName);
+                break;
+            case REMOVEDB:
+                removeAndClean(env, dbName, false);
+                break;
+            case REMOVEDB_AND_CLEAN:
+                removeAndClean(env, dbName, true);
+                break;
+            case ACTIVATE_CLEANER_THREADS:
+                EnvironmentImpl envImpl =
+                    DbInternal.envGetEnvironmentImpl(env);
+                envImpl.getCleaner().wakeup();
+                promptForShutdown = true;
+                break;
+            case VERIFY_UTILIZATION:
+                EnvironmentImpl envImpl2 =
+                    DbInternal.envGetEnvironmentImpl(env);
+                VerifyUtils. verifyUtilization
+                    (envImpl2,
+                     true,  // expectAccurateObsoleteLNCount
+                     true,  // expectAccurateObsoleteLNSize
+                     true); // expectAccurateDbUtilization
+                break;
+            }
+            actionEnd = System.currentTimeMillis();
+
+            if (promptForShutdown) {
+
+                /*
+                 * If the requested action is a daemon driven one, we don't
+                 * want the main thread to shutdown the environment until we
+                 * say we're ready
+                 */
+                waitForShutdown();
+            }
+            if (statsPrinter != null) {
+                statsPrinter.interrupt();
+                statsPrinter.join();
+            }
+            env.close();
+        } catch (Exception e) {
+            e.printStackTrace();
+            System.out.println(e.getMessage());
+            usage();
+            System.exit(1);
+        } finally {
+            DecimalFormat f = new DecimalFormat();
+            f.setMaximumFractionDigits(2);
+
+            long recoveryDuration = actionStart - recoveryStart;
+            System.out.println("\nrecovery time = " +
+                               f.format(recoveryDuration) +
+                               " millis " +
+                               f.format((double)recoveryDuration/60000) +
+                               " minutes");
+
+            long actionDuration = actionEnd - actionStart;
+            System.out.println("action time = " +
+                               f.format(actionDuration) +
+                               " millis " +
+                               f.format(actionDuration/60000) +
+                               " minutes");
+        }
+    }
+
+    private static void removeAndClean(Environment env,
+                                       String name,
+                                       boolean doCleaning)
+        throws DatabaseException {
+
+        long a, c, d, e, f;
+
+        Transaction txn = null;
+        CheckpointConfig force = new CheckpointConfig();
+        force.setForce(true);
+
+        a = System.currentTimeMillis();
+        env.removeDatabase(txn, name);
+        c = System.currentTimeMillis();
+
+        int cleanedCount = 0;
+        if (doCleaning) {
+            while (env.cleanLog() > 0) {
+                cleanedCount++;
+            }
+        }
+        d = System.currentTimeMillis();
+
+        System.out.println("cleanedCount=" + cleanedCount);
+        e = 0;
+        f = 0;
+        if (cleanedCount > 0) {
+            e = System.currentTimeMillis();
+            env.checkpoint(force);
+            f = System.currentTimeMillis();
+        }
+
+        System.out.println("Remove of " + name  +
+                           " remove: " + getSecs(a, c) +
+                           " clean: " + getSecs(c, d) +
+                           " checkpoint: " + getSecs(e, f));
+    }
+
+    private static String getSecs(long start, long end) {
+        return (end-start) / 1000 + " secs";
+    }
+
+    private static void preload(Environment env, String dbName)
+        throws DatabaseException {
+
+        System.out.println("Preload starting");
+        Database db = env.openDatabase(null, dbName, null);
+        Cursor cursor = db.openCursor(null, null);
+        try {
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            int count = 0;
+            while (cursor.getNext(key, data, LockMode.DEFAULT) ==
+                   OperationStatus.SUCCESS) {
+                count++;
+                if ((count % 50000) == 0) {
+                    System.out.println(count + "...");
+                }
+            }
+            System.out.println("Preloaded " + count + " records");
+        } finally {
+            cursor.close();
+            db.close();
+        }
+    }
+
+    @SuppressWarnings("unused")
+    private static void doEvict(Environment env)
+        throws DatabaseException {
+    	
+        /* Push the cache size down by half to force eviction. */
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+        long cacheUsage = envImpl.getMemoryBudget().getCacheMemoryUsage();
+        EnvironmentMutableConfig c = new EnvironmentMutableConfig();
+        c.setCacheSize(cacheUsage/2);
+        env.setMutableConfig(c);
+
+        long start = System.currentTimeMillis();
+        env.evictMemory();
+        long end = System.currentTimeMillis();
+
+        DecimalFormat f = new DecimalFormat();
+        f.setMaximumFractionDigits(2);
+        System.out.println("evict time=" + f.format(end-start));
+    }
+
+    private static void waitForShutdown()
+        throws IOException {
+
+        System.out.println
+	    ("Wait for daemon activity to run. When ready to stop, type (y)");
+        BufferedReader reader =
+            new BufferedReader(new InputStreamReader(System.in));
+        do {
+            String val = reader.readLine();
+            if (val != null &&
+		(val.equalsIgnoreCase("y") ||
+                 val.equalsIgnoreCase("yes"))) {
+                break;
+            } else {
+                System.out.println("Shutdown? (y)");
+            }
+        } while (true);
+    }
+
+    private static class StatsPrinter extends Thread {
+
+        private Environment env;
+
+        StatsPrinter(Environment env) {
+            this.env = env;
+        }
+
+        @Override
+        public void run() {
+
+            StatsConfig clearConfig = new StatsConfig();
+            clearConfig.setClear(true);
+
+            while (true) {
+                try {
+                    synchronized (this) {
+                        wait(30 * 1000);
+                    }
+                    EnvironmentStats stats = env.getStats(clearConfig);
+                    System.out.println("\n" + stats + "\n");
+                } catch (DatabaseException e) {
+                    e.printStackTrace();
+                    break;
+                } catch (InterruptedException e) {
+                    break;
+                }
+            }
+        }
+    }
+
+    private static void usage() {
+        System.out.println("Usage: \n " +
+			   CmdUtil.getJavaCommand(DbRunAction.class));
+	System.out.println("  -h <environment home> ");
+        System.out.println("  -a <batchClean|compress|evict|checkpoint|" +
+                           "removeDb|removeDbAndClean|activateCleaner|" +
+                           "verifyUtilization>");
+        System.out.println("  -ro (read-only - defaults to read-write)");
+        System.out.println("  -s <dbName> (for removeDb)");
+        System.out.println("  -stats (print every 30 seconds)");
+    }
+}
diff --git a/src/com/sleepycat/je/util/DbScavenger.java b/src/com/sleepycat/je/util/DbScavenger.java
new file mode 100644
index 0000000000000000000000000000000000000000..b7563b36f21e446ab64546fc046029b15ed2350d
--- /dev/null
+++ b/src/com/sleepycat/je/util/DbScavenger.java
@@ -0,0 +1,446 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbScavenger.java,v 1.24.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.util;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.DbConfigManager;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.log.LastFileReader;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.ScavengerFileReader;
+import com.sleepycat.je.log.entry.LNLogEntry;
+import com.sleepycat.je.log.entry.LogEntry;
+import com.sleepycat.je.tree.LN;
+import com.sleepycat.je.tree.MapLN;
+import com.sleepycat.je.tree.NameLN;
+import com.sleepycat.je.utilint.BitMap;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * Used to retrieve as much data as possible from a corrupted environment. 
+ * This utility is meant to be used programmatically, and is the equivalent
+ * to the -R or -r options for {@link DbDump}.
+ * <p>
+ * To scavenge a database:
+ *<pre>
+ *  DbScavenger scavenger =
+ *      new DbScavenger(env, outputDirectory, <boolean>, <boolean>, <boolean>);
+ *  scavenger.dump();
+ *</pre> 
+ *
+ *<p>
+ * The recovered databases will put placed in the outputDirectory with ".dump"
+ * file suffixes.  The format of the .dump files will be suitable for use with
+ * DbLoad.
+ */
+
+public class DbScavenger extends DbDump {
+    private static final int FLUSH_INTERVAL = 100;
+    private int readBufferSize;
+    private EnvironmentImpl envImpl;
+
+    /*
+     * Set of committed txn ids that have been seen so far.
+     */
+    private BitMap committedTxnIdsSeen;
+
+    /*
+     * Set of LN Node Ids that have been seen so far.
+     */
+    private BitMap nodeIdsSeen;
+
+    /*
+     * Map of database id to database names.
+     */
+    private Map<Integer, String> dbIdToName;
+
+    /*
+     * Map of database id to Boolean (dupSort).
+     */
+    private Map<Integer, Boolean> dbIdToDupSort;
+
+    /*
+     * Map of database id to the .dump file output stream for that database.
+     */
+    private Map<Integer, PrintStream> dbIdToOutputStream;
+
+    private boolean dumpCorruptedBounds = false;
+
+    private int flushCounter = 0;
+    private long lastTime;
+
+    /**
+     * Create a DbScavenger object for a specific environment.
+     * <p>
+     * @param env The Environment containing the database to dump.
+     * @param outputDirectory The directory to create the .dump files in.
+     * @param formatUsingPrintable true if the dump should use printable 
+     * characters.
+     * @param doAggressiveScavengerRun true if true, then all data records are
+     *  dumped, regardless of whether they are the latest version or not.
+     * @param verbose true if status output should be written to System.out
+     * during scavenging.
+     */
+    public DbScavenger(Environment env,
+		       String outputDirectory,
+                       boolean formatUsingPrintable,
+                       boolean doAggressiveScavengerRun,
+                       boolean verbose) {
+	super(env, null, null, formatUsingPrintable);
+
+        this.doAggressiveScavengerRun = doAggressiveScavengerRun;
+	this.dbIdToName = new HashMap<Integer, String>();
+	this.dbIdToDupSort = new HashMap<Integer, Boolean>();
+	this.dbIdToOutputStream = new HashMap<Integer, PrintStream>();
+        this.verbose = verbose;
+	this.outputDirectory = outputDirectory;
+    }
+
+    /**
+     * Set to true if corrupted boundaries should be dumped out.
+     */
+    public void setDumpCorruptedBounds(boolean dumpCorruptedBounds) {
+	this.dumpCorruptedBounds = dumpCorruptedBounds;
+    }
+
+    /**
+     * Start the scavenger run.
+     */
+    @Override
+    public void dump()
+	throws IOException, DatabaseException {
+
+	openEnv(false);
+
+	envImpl = DbInternal.envGetEnvironmentImpl(env);
+	DbConfigManager cm = envImpl.getConfigManager();
+	try {
+	    readBufferSize =
+		cm.getInt(EnvironmentParams.LOG_ITERATOR_READ_SIZE);
+	} catch (DatabaseException DBE) {
+	    readBufferSize = 8192;
+	}
+
+	/*
+	 * Find the end of the log.
+	 */
+	LastFileReader reader = new LastFileReader(envImpl, readBufferSize);
+	while (reader.readNextEntry()) {
+	}
+
+	/* Tell the fileManager where the end of the log is. */
+	long lastUsedLsn = reader.getLastValidLsn();
+	long nextAvailableLsn = reader.getEndOfLog();
+	envImpl.getFileManager().setLastPosition(nextAvailableLsn,
+						 lastUsedLsn,
+						 reader.getPrevOffset());
+
+        try {
+            /* Pass 1: Scavenge the dbtree. */
+            if (verbose) {
+                System.out.println("Pass 1: " + new Date());
+            }
+            scavengeDbTree(lastUsedLsn, nextAvailableLsn);
+
+            /* Pass 2: Scavenge the databases. */
+            if (verbose) {
+                System.out.println("Pass 2: " + new Date());
+            }
+            scavenge(lastUsedLsn, nextAvailableLsn);
+
+            if (verbose) {
+                System.out.println("End: " + new Date());
+            }
+        } finally {
+            closeOutputStreams();
+        }
+    }
+
+    /*
+     * Scan the log looking for records that are relevant for scavenging the db
+     * tree.
+     */
+    private void scavengeDbTree(long lastUsedLsn, long nextAvailableLsn)
+	throws IOException, DatabaseException {
+
+	committedTxnIdsSeen = new BitMap();
+	nodeIdsSeen = new BitMap();
+
+	final ScavengerFileReader scavengerReader =
+	    new ScavengerFileReader(envImpl, readBufferSize, lastUsedLsn,
+				    DbLsn.NULL_LSN, nextAvailableLsn) {
+		protected void processEntryCallback(LogEntry entry,
+						    LogEntryType entryType)
+		    throws DatabaseException {
+
+		    processDbTreeEntry(entry, entryType);
+		}
+	    };
+
+        scavengerReader.setTargetType(LogEntryType.LOG_MAPLN_TRANSACTIONAL);
+        scavengerReader.setTargetType(LogEntryType.LOG_MAPLN);
+        scavengerReader.setTargetType(LogEntryType.LOG_NAMELN_TRANSACTIONAL);
+        scavengerReader.setTargetType(LogEntryType.LOG_NAMELN);
+        scavengerReader.setTargetType(LogEntryType.LOG_TXN_COMMIT);
+        scavengerReader.setTargetType(LogEntryType.LOG_TXN_ABORT);
+	lastTime = System.currentTimeMillis();
+	long fileNum = -1;
+	while (scavengerReader.readNextEntry()) {
+	    fileNum = reportProgress(fileNum,
+				     scavengerReader.getLastLsn());
+	}
+    }
+
+    private long reportProgress(long fileNum, long lastLsn) {
+
+        long currentFile = DbLsn.getFileNumber(lastLsn);
+        if (verbose) {
+            if (currentFile != fileNum) {
+                long now = System.currentTimeMillis();
+                System.out.println("processing file " +
+                                   FileManager.getFileName(currentFile,
+                                                           ".jdb  ") +
+                                   (now-lastTime) + " ms");
+                lastTime = now;
+            }
+        }
+
+        return currentFile;
+    }
+
+    /*
+     * Look at an entry and determine if it should be processed for scavenging.
+     */
+    private boolean checkProcessEntry(LogEntry entry,
+				      LogEntryType entryType,
+				      boolean pass2) {
+	boolean isTransactional = entryType.isTransactional();
+
+	/*
+	 * If entry is txnal...
+	 *  if a commit record, add to committed txn id set
+	 *  if an abort record, ignore it and don't process.
+	 *  if an LN, check if it's in the committed txn id set.
+	 *     If it is, continue processing, otherwise ignore it.
+	 */
+	if (isTransactional) {
+	    long txnId = entry.getTransactionId();
+	    if (entryType.equals(LogEntryType.LOG_TXN_COMMIT)) {
+		committedTxnIdsSeen.set(txnId);
+		/* No need to process this entry further. */
+		return false;
+	    }
+
+	    if (entryType.equals(LogEntryType.LOG_TXN_ABORT)) {
+		/* No need to process this entry further. */
+		return false;
+	    }
+
+	    if (!committedTxnIdsSeen.get(txnId)) {
+		return false;
+	    }
+	}
+
+	/*
+	 * Check the nodeid to see if we've already seen it or not.
+	 */
+	if (entry instanceof LNLogEntry) {
+	    LNLogEntry lnEntry = (LNLogEntry) entry;
+	    LN ln = lnEntry.getLN();
+	    long nodeId = ln.getNodeId();
+	    boolean isDelDupLN =
+		entryType.equals(LogEntryType.
+				 LOG_DEL_DUPLN_TRANSACTIONAL) ||
+		entryType.equals(LogEntryType.LOG_DEL_DUPLN);
+
+	    /*
+	     * If aggressive, don't worry about whether this node has been
+	     * dumped already.
+	     */
+	    if (pass2 && doAggressiveScavengerRun) {
+		return !isDelDupLN;
+	    }
+	    if (nodeIdsSeen.get(nodeId)) {
+		return false;
+	    } else {
+		nodeIdsSeen.set(nodeId);
+		if (isDelDupLN) {
+
+		    /*
+		     * For deleted LN's, note the NodeId has having been
+		     * processed, but, don't output them.
+		     */
+		    return false;
+		} else {
+		    return true;
+		}
+	    }
+	}
+
+	return false;
+    }
+
+    /*
+     * Called once for each log entry during the pass 1 (scavenging the dbtree.
+     */
+    private void processDbTreeEntry(LogEntry entry, LogEntryType entryType)
+	throws DatabaseException {
+
+	boolean processThisEntry =
+	    checkProcessEntry(entry, entryType, false);
+
+	if (processThisEntry &&
+	    (entry instanceof LNLogEntry)) {
+	    LNLogEntry lnEntry = (LNLogEntry) entry;
+	    LN ln = lnEntry.getLN();
+	    if (ln instanceof NameLN) {
+		String name = new String(lnEntry.getKey());
+		Integer dbId = Integer.valueOf(((NameLN) ln).getId().getId());
+		if (dbIdToName.containsKey(dbId) &&
+		    !dbIdToName.get(dbId).equals(name)) {
+		    throw new DatabaseException
+			("Already name mapped for dbId: " + dbId +
+			 " changed from " + dbIdToName.get(dbId) +
+			 " to " + name);
+		} else {
+		    dbIdToName.put(dbId, name);
+		}
+	    }
+
+	    if (ln instanceof MapLN) {
+		DatabaseImpl db = ((MapLN) ln).getDatabase();
+		Integer dbId = Integer.valueOf(db.getId().getId());
+		Boolean dupSort = Boolean.valueOf(db.getSortedDuplicates());
+		if (dbIdToDupSort.containsKey(dbId)) {
+		    throw new DatabaseException
+			("Already saw dupSort entry for dbId: " + dbId);
+		} else {
+		    dbIdToDupSort.put(dbId, dupSort);
+		}
+	    }
+	}
+    }
+
+    /*
+     * Pass 2: scavenge the regular (non-dbtree) environment.
+     */
+    private void scavenge(long lastUsedLsn, long nextAvailableLsn)
+	throws IOException, DatabaseException {
+
+	final ScavengerFileReader scavengerReader =
+	    new ScavengerFileReader(envImpl, readBufferSize, lastUsedLsn,
+				    DbLsn.NULL_LSN, nextAvailableLsn) {
+		protected void processEntryCallback(LogEntry entry,
+						    LogEntryType entryType)
+		    throws DatabaseException {
+
+		    processRegularEntry(entry, entryType);
+		}
+	    };
+
+	/*
+	 * Note: committed transaction id map has been created already, no
+	 * need to read TXN_COMMITS on this pass.
+	 */
+        scavengerReader.setTargetType(LogEntryType.LOG_LN_TRANSACTIONAL);
+        scavengerReader.setTargetType(LogEntryType.LOG_LN);
+        scavengerReader.setTargetType
+	    (LogEntryType.LOG_DEL_DUPLN_TRANSACTIONAL);
+        scavengerReader.setTargetType(LogEntryType.LOG_DEL_DUPLN);
+	scavengerReader.setDumpCorruptedBounds(dumpCorruptedBounds);
+
+	long progressFileNum = -1;
+	while (scavengerReader.readNextEntry()) {
+	    progressFileNum = reportProgress(progressFileNum,
+                                             scavengerReader.getLastLsn());
+	}
+    }
+
+    /*
+     * Process an entry during pass 2.
+     */
+    private void processRegularEntry(LogEntry entry, LogEntryType entryType)
+	throws DatabaseException {
+
+	boolean processThisEntry =
+	    checkProcessEntry(entry, entryType, true);
+
+	if (processThisEntry) {
+	    LNLogEntry lnEntry = (LNLogEntry) entry;
+	    Integer dbId = Integer.valueOf(lnEntry.getDbId().getId());
+	    PrintStream out = getOutputStream(dbId);
+
+            LN ln = lnEntry.getLN();
+            byte[] keyData = lnEntry.getKey();
+            byte[] data = ln.getData();
+            if (data != null) {
+                dumpOne(out, keyData, formatUsingPrintable);
+                dumpOne(out, data, formatUsingPrintable);
+		if ((++flushCounter % FLUSH_INTERVAL) == 0) {
+		    out.flush();
+		    flushCounter = 0;
+		}
+            }
+	}
+    }
+
+    /*
+     * Return the output stream for the .dump file for database with id dbId.
+     * If an output stream has not already been created, then create one.
+     */
+    private PrintStream getOutputStream(Integer dbId)
+	throws DatabaseException {
+
+	try {
+	    PrintStream ret = dbIdToOutputStream.get(dbId);
+	    if (ret != null) {
+		return ret;
+	    }
+	    String name = dbIdToName.get(dbId);
+	    if (name == null) {
+		name = "db" + dbId;
+	    }
+	    File file = new File(outputDirectory, name + ".dump");
+	    ret = new PrintStream(new FileOutputStream(file), false);
+	    dbIdToOutputStream.put(dbId, ret);
+	    Boolean dupSort = dbIdToDupSort.get(dbId);
+	    if (dupSort == null) {
+		dupSort = Boolean.valueOf(false);
+	    }
+	    printHeader(ret, dupSort.booleanValue(), formatUsingPrintable);
+	    return ret;
+	} catch (IOException IOE) {
+	    throw new DatabaseException(IOE);
+	}
+    }
+
+    private void closeOutputStreams() {
+
+        Iterator<PrintStream> iter = dbIdToOutputStream.values().iterator();
+        while (iter.hasNext()) {
+            PrintStream s = iter.next();
+            s.println("DATA=END");
+            s.close();
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/util/DbSpace.java b/src/com/sleepycat/je/util/DbSpace.java
new file mode 100644
index 0000000000000000000000000000000000000000..c47854447db24ffd42c5de44d469cca075318820
--- /dev/null
+++ b/src/com/sleepycat/je/util/DbSpace.java
@@ -0,0 +1,294 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbSpace.java,v 1.32.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.util;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.SortedMap;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.JEVersion;
+import com.sleepycat.je.cleaner.FileSummary;
+import com.sleepycat.je.cleaner.UtilizationProfile;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.UtilizationFileReader;
+import com.sleepycat.je.utilint.CmdUtil;
+
+/**
+ * DbSpace displays the disk space utilization for an environment.
+ * <pre>
+ * java com.sleepycat.je.util.DbSpace
+ *          -h &lt;dir&gt;# environment home directory
+ *         [-q]     # quiet, print grand totals only
+ *         [-u]     # sort by utilization
+ *         [-d]     # dump file summary details
+ *         [-r]     # recalculate utilization (reads entire log)
+ *         [-V]     # print JE version number
+ * </pre>
+ */
+
+public class DbSpace {
+
+    private static final String USAGE =
+	"usage: " + CmdUtil.getJavaCommand(DbSpace.class) + "\n" +
+        "       -h <dir> # environment home directory\n" +
+        "       [-q]     # quiet, print grand totals only\n" +
+        "       [-u]     # sort by utilization\n" +
+        "       [-d]     # dump file summary details\n" +
+        "       [-r]     # recalculate utilization (reads entire log)\n" +
+        "       [-V]     # print JE version number";
+
+    public static void main(String argv[])
+	throws DatabaseException {
+
+	DbSpace space = new DbSpace();
+	space.parseArgs(argv);
+
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setReadOnly(true);
+        Environment env = new Environment(space.envHome, envConfig);
+        space.envImpl = DbInternal.envGetEnvironmentImpl(env);
+
+	try {
+	    space.print(System.out);
+	    System.exit(0);
+	} catch (Throwable e) {
+            e.printStackTrace(System.err);
+            System.exit(1);
+	} finally {
+            try {
+                env.close();
+            } catch (Throwable e) {
+                e.printStackTrace(System.err);
+                System.exit(1);
+            }
+	}
+    }
+
+    private File envHome = null;
+    private EnvironmentImpl envImpl;
+    private boolean quiet = false;
+    private boolean sorted = false;
+    private boolean details = false;
+    private boolean recalc = false;
+
+    private DbSpace() {
+    }
+
+    public DbSpace(Environment env,
+		   boolean quiet,
+                   boolean details,
+                   boolean sorted) {
+        this(DbInternal.envGetEnvironmentImpl(env), quiet, details, sorted);
+    }
+
+    public DbSpace(EnvironmentImpl envImpl,
+		   boolean quiet,
+                   boolean details,
+                   boolean sorted) {
+	this.envImpl = envImpl;
+	this.quiet = quiet;
+        this.details = details;
+	this.sorted = sorted;
+    }
+
+    private void printUsage(String msg) {
+        if (msg != null) {
+            System.err.println(msg);
+        }
+	System.err.println(USAGE);
+	System.exit(-1);
+    }
+
+    private void parseArgs(String argv[]) {
+
+	int argc = 0;
+	int nArgs = argv.length;
+
+        if (nArgs == 0) {
+	    printUsage(null);
+            System.exit(0);
+        }
+
+	while (argc < nArgs) {
+	    String thisArg = argv[argc++];
+	    if (thisArg.equals("-q")) {
+		quiet = true;
+            } else if (thisArg.equals("-u")) {
+		sorted = true;
+            } else if (thisArg.equals("-d")) {
+		details = true;
+            } else if (thisArg.equals("-r")) {
+		recalc = true;
+	    } else if (thisArg.equals("-V")) {
+		System.out.println(JEVersion.CURRENT_VERSION);
+		System.exit(0);
+	    } else if (thisArg.equals("-h")) {
+		if (argc < nArgs) {
+		    envHome = new File(argv[argc++]);
+		} else {
+		    printUsage("-h requires an argument");
+		}
+	    }
+	}
+
+	if (envHome == null) {
+	    printUsage("-h is a required argument");
+	}
+    }
+
+    public void print(PrintStream out)
+	throws IOException, DatabaseException {
+
+        UtilizationProfile profile = envImpl.getUtilizationProfile();
+        SortedMap<Long,FileSummary> map = profile.getFileSummaryMap(true);
+        Map<Long, FileSummary> recalcMap =
+            recalc ? UtilizationFileReader.calcFileSummaryMap(envImpl)
+                   : null;
+        int fileIndex = 0;
+
+        Summary totals = new Summary();
+        Summary[] summaries = null;
+        if (!quiet) {
+            summaries = new Summary[map.size()];
+        }
+
+	Iterator<Map.Entry<Long,FileSummary>> iter = map.entrySet().iterator();
+	while (iter.hasNext()) {
+	    Map.Entry<Long,FileSummary> entry = iter.next();
+	    Long fileNum = entry.getKey();
+	    FileSummary fs = entry.getValue();
+            FileSummary recalcFs = null;
+            if (recalcMap != null) {
+                 recalcFs = recalcMap.get(fileNum);
+            }
+            Summary summary = new Summary(fileNum, fs, recalcFs);
+            if (summaries != null) {
+                summaries[fileIndex] = summary;
+            }
+            if (details) {
+                out.println
+                    ("File 0x" + Long.toHexString(fileNum.longValue()) +
+                     ": " + fs);
+                if (recalcMap != null) {
+                    out.println
+                        ("Recalculated File 0x" +
+                         Long.toHexString(fileNum.longValue()) +
+                         ": " + recalcFs);
+                }
+            }
+            totals.add(summary);
+            fileIndex += 1;
+        }
+
+        if (details) {
+            out.println();
+        }
+        out.println(recalc ? Summary.RECALC_HEADER : Summary.HEADER);
+
+        if (summaries != null) {
+            if (sorted) {
+                Arrays.sort(summaries, new Comparator<Summary>() {
+                    public int compare(Summary s1, Summary s2) {
+                        return s1.utilization() - s2.utilization();
+                    }
+                });
+            }
+            for (int i = 0; i < summaries.length; i += 1) {
+                summaries[i].print(out, recalc);
+            }
+        }
+
+        totals.print(out, recalc);
+    }
+
+    private static class Summary {
+
+        static final String HEADER = "  File    Size (KB)  % Used\n" +
+                                     "--------  ---------  ------";
+                                   // 12345678  123456789     123
+                                   //         12         12345
+                                   // TOTALS:
+
+        static final String RECALC_HEADER =
+                   "  File    Size (KB)  % Used  % Used (recalculated)\n" +
+                   "--------  ---------  ------  ------";
+                 // 12345678  123456789     123     123
+                 //         12         12345   12345
+                 // TOTALS:
+
+        Long fileNum;
+        long totalSize;
+        long obsoleteSize;
+        long recalcObsoleteSize;
+
+        Summary() {}
+
+        Summary(Long fileNum, FileSummary summary, FileSummary recalcSummary)
+            throws DatabaseException {
+
+            this.fileNum = fileNum;
+            totalSize = summary.totalSize;
+            obsoleteSize = summary.getObsoleteSize();
+            if (recalcSummary != null) {
+                recalcObsoleteSize = recalcSummary.getObsoleteSize();
+            }
+        }
+
+        void add(Summary o) {
+            totalSize += o.totalSize;
+            obsoleteSize += o.obsoleteSize;
+            recalcObsoleteSize += o.recalcObsoleteSize;
+        }
+
+        void print(PrintStream out, boolean recalc) {
+            if (fileNum != null) {
+                pad(out, Long.toHexString(fileNum.longValue()), 8, '0');
+            } else {
+                out.print(" TOTALS ");
+            }
+            int kb = (int) (totalSize / 1024);
+            out.print("  ");
+            pad(out, Integer.toString(kb), 9, ' ');
+            out.print("     ");
+            pad(out, Integer.toString(utilization()), 3, ' ');
+            if (recalc) {
+                out.print("     ");
+                pad(out, Integer.toString(recalcUtilization()), 3, ' ');
+            }
+            out.println();
+        }
+
+        int utilization() {
+            return UtilizationProfile.utilization(obsoleteSize, totalSize);
+        }
+
+        int recalcUtilization() {
+            return UtilizationProfile.utilization
+                (recalcObsoleteSize, totalSize);
+        }
+
+        private void pad(PrintStream out, String val, int digits,
+                           char padChar) {
+            int padSize = digits - val.length();
+            for (int i = 0; i < padSize; i += 1) {
+                out.print(padChar);
+            }
+            out.print(val);
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/util/DbStat.java b/src/com/sleepycat/je/util/DbStat.java
new file mode 100644
index 0000000000000000000000000000000000000000..4e343a45ab848f574664ff4bab73e1a02d288186
--- /dev/null
+++ b/src/com/sleepycat/je/util/DbStat.java
@@ -0,0 +1,144 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbStat.java,v 1.27.2.2 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.util;
+
+import java.io.File;
+import java.io.PrintStream;
+import java.util.logging.Level;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DatabaseStats;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.JEVersion;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.utilint.Tracer;
+
+class DbStat extends DbVerify {
+    /*
+    private String usageString =
+	"usage: " + CmdUtil.getJavaCommand(DbStat.class) + "\n" +
+	"               [-V] -s database -h dbEnvHome [-v progressInterval]\n";
+    */
+
+    private int progressInterval = 0;
+
+    static public void main(String argv[])
+	throws DatabaseException {
+
+	DbStat stat = new DbStat();
+	stat.parseArgs(argv);
+
+	int ret = 0;
+	try {
+	    if (!stat.stats(System.err)) {
+		ret = 1;
+	    }
+	} catch (Throwable T) {
+	    ret = 1;
+	    T.printStackTrace(System.err);
+	}
+
+	try {
+	    stat.env.close();
+	} catch (Throwable ignored) {
+
+	    /*
+	     * Klockwork - ok
+	     * Don't say anything about exceptions here.
+	     */
+	}
+	System.exit(ret);
+    }
+
+    DbStat() {
+    }
+
+    public DbStat(Environment env, String dbName) {
+	super(env, dbName, false);
+    }
+
+    @Override
+    void parseArgs(String argv[]) {
+
+	int argc = 0;
+	int nArgs = argv.length;
+	while (argc < nArgs) {
+	    String thisArg = argv[argc++];
+	    if (thisArg.equals("-V")) {
+		System.out.println(JEVersion.CURRENT_VERSION);
+		System.exit(0);
+	    } else if (thisArg.equals("-h")) {
+		if (argc < nArgs) {
+		    envHome = new File(argv[argc++]);
+		} else {
+		    printUsage("-h requires an argument");
+		}
+	    } else if (thisArg.equals("-s")) {
+		if (argc < nArgs) {
+		    dbName = argv[argc++];
+		} else {
+		    printUsage("-s requires an argument");
+		}
+	    } else if (thisArg.equals("-v")) {
+		if (argc < nArgs) {
+		    progressInterval = Integer.parseInt(argv[argc++]);
+		    if (progressInterval <= 0) {
+			printUsage("-v requires a positive argument");
+		    }
+		} else {
+		    printUsage("-v requires an argument");
+		}
+	    }
+	}
+
+	if (envHome == null) {
+	    printUsage("-h is a required argument");
+	}
+
+	if (dbName == null) {
+	    printUsage("-s is a required argument");
+	}
+    }
+
+    public boolean stats(PrintStream out)
+	throws DatabaseException {
+
+	try {
+	    openEnv();
+
+	    Tracer.trace(Level.INFO, DbInternal.envGetEnvironmentImpl(env),
+			 "DbStat.stats of " + dbName + " starting");
+
+	    DatabaseConfig dbConfig = new DatabaseConfig();
+	    dbConfig.setReadOnly(true);
+	    dbConfig.setAllowCreate(false);
+	    DbInternal.setUseExistingConfig(dbConfig, true);
+	    Database db = env.openDatabase(null, dbName, dbConfig);
+
+	    StatsConfig statsConfig = new StatsConfig();
+	    if (progressInterval > 0) {
+		statsConfig.setShowProgressInterval(progressInterval);
+		statsConfig.setShowProgressStream(out);
+	    }
+
+	    DatabaseStats stats = db.getStats(statsConfig);
+	    out.println(stats);
+
+	    db.close();
+	    Tracer.trace(Level.INFO, DbInternal.envGetEnvironmentImpl(env),
+			 "DbStat.stats of " + dbName + " ending");
+	} catch (DatabaseException DE) {
+	    return false;
+	}
+	return true;
+    }
+}
diff --git a/src/com/sleepycat/je/util/DbVerify.java b/src/com/sleepycat/je/util/DbVerify.java
new file mode 100644
index 0000000000000000000000000000000000000000..215fd00f0a0579ca6986858e4173d61ce36eb27a
--- /dev/null
+++ b/src/com/sleepycat/je/util/DbVerify.java
@@ -0,0 +1,388 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbVerify.java,v 1.50.2.4 2010/01/04 15:30:37 cwl Exp $
+ */
+
+package com.sleepycat.je.util;
+
+import java.io.File;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.logging.Level;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DatabaseStats;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.JEVersion;
+import com.sleepycat.je.VerifyConfig;
+import com.sleepycat.je.cleaner.VerifyUtils;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.DbTree;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.utilint.CmdUtil;
+import com.sleepycat.je.utilint.Tracer;
+
+/**
+ * Verifies the internal structures of a database.
+ *
+ * <p>To verify a database and write the errors to stream:</p>
+ *
+ * <pre>
+ *    DbVerify verifier = new DbVerify(env, dbName, quiet);
+ *    verifier.verify();
+ * </pre>
+ */
+public class DbVerify {
+
+    private static final String usageString =
+	"usage: " + CmdUtil.getJavaCommand(DbVerify.class) + "\n" +
+        "       -h <dir>             # environment home directory\n" +
+        "       [-c ]                # check cleaner metadata\n" +
+        "       [-q ]                # quiet, exit with success or failure\n" +
+        "       [-s <databaseName> ] # database to verify\n" +
+        "       [-v <interval>]      # progress notification interval\n" +
+        "       [-V]                 # print JE version number";
+
+    File envHome = null;
+    Environment env;
+    String dbName = null;
+    boolean quiet = false;
+    boolean checkLsns = false;
+    boolean openReadOnly = true;
+    private boolean doClose;
+
+    private int progressInterval = 0;
+
+    /**
+     * The main used by the DbVerify utility.
+     *
+     * @param argv The arguments accepted by the DbVerify utility.
+     *
+     * <pre>
+     * usage: java { com.sleepycat.je.util.DbVerify | -jar
+     * je-&lt;version&gt;.jar DbVerify }
+     *             [-q] [-V] -s database -h dbEnvHome [-v progressInterval]
+     * </pre>
+     *
+     * <p>
+     * -V - show the version of the JE library.<br>
+     * -s - specify the database to verify<br>
+     * -h - specify the environment directory<br>
+     * -q - work quietly and don't display errors<br>
+     * -v - report intermediate statistics every progressInterval Leaf
+     *  Nodes
+     * </p>
+     * @throws DatabaseException if a failure occurs.
+     */
+    public static void main(String argv[])
+	throws DatabaseException {
+
+	DbVerify verifier = new DbVerify();
+	verifier.parseArgs(argv);
+
+        boolean ret = false;
+	try {
+            ret = verifier.verify(System.err);
+	} catch (Throwable T) {
+	    if (!verifier.quiet) {
+		T.printStackTrace(System.err);
+	    }
+	} finally {
+
+	    verifier.closeEnv();
+
+            /*
+             * Show the status, only omit if the user asked for a quiet run and
+             * didn't specify a progress interval, in which case we can assume
+             * that they really don't want any status output.
+             *
+             * If the user runs this from the command line, presumably they'd
+             * like to see the status.
+             */
+            if ((!verifier.quiet) || (verifier.progressInterval > 0)) {
+                System.err.println("Exit status = " + ret);
+            }
+
+	    System.exit(ret ? 0 : -1);
+	}
+    }
+
+    DbVerify() {
+        doClose = true;
+    }
+
+    /**
+     * Creates a DbVerify object for a specific environment and database.
+     *
+     * @param env The Environment containing the database to verify.
+     *
+     * @param dbName The name of the database to verify.
+     *
+     * @param quiet true if the verification should not produce errors to the
+     * output stream
+     */
+    public DbVerify(Environment env,
+		    String dbName,
+		    boolean quiet) {
+	this.env = env;
+	this.dbName = dbName;
+	this.quiet = quiet;
+        doClose = false;
+    }
+
+    void printUsage(String msg) {
+	System.err.println(msg);
+	System.err.println(usageString);
+	System.exit(-1);
+    }
+
+    void parseArgs(String argv[]) {
+
+	int argc = 0;
+	int nArgs = argv.length;
+	while (argc < nArgs) {
+	    String thisArg = argv[argc++];
+	    if (thisArg.equals("-q")) {
+		quiet = true;
+	    } else if (thisArg.equals("-V")) {
+		System.out.println(JEVersion.CURRENT_VERSION);
+		System.exit(0);
+	    } else if (thisArg.equals("-h")) {
+		if (argc < nArgs) {
+		    envHome = new File(argv[argc++]);
+		} else {
+		    printUsage("-h requires an argument");
+		}
+	    } else if (thisArg.equals("-s")) {
+		if (argc < nArgs) {
+		    dbName = argv[argc++];
+		} else {
+		    printUsage("-s requires an argument");
+		}
+	    } else if (thisArg.equals("-v")) {
+		if (argc < nArgs) {
+		    progressInterval = Integer.parseInt(argv[argc++]);
+		    if (progressInterval <= 0) {
+			printUsage("-v requires a positive argument");
+		    }
+		} else {
+		    printUsage("-v requires an argument");
+		}
+            } else if (thisArg.equals("-c")) {
+                checkLsns = true;
+            } else if (thisArg.equals("-rw")) {
+
+                /*
+                 * Unadvertised option. Open the environment read/write so that
+                 * a checkLsns pass gets an accurate root LSN to start from in
+                 * the event that a recovery split the root.  A read/only
+                 * environment open will keep any logging in the log buffers,
+                 * and the LSNs stored in the INs will be converted to
+                 * DbLsn.NULL_LSN.
+                 */
+                openReadOnly = false;
+            }
+	}
+
+	if (envHome == null) {
+	    printUsage("-h is a required argument");
+	}
+    }
+
+    void openEnv()
+	throws DatabaseException {
+
+	if (env == null) {
+            EnvironmentConfig envConfig = new EnvironmentConfig();
+            envConfig.setReadOnly(openReadOnly);
+	    env = new Environment(envHome, envConfig);
+	}
+    }
+
+    void closeEnv()
+	throws DatabaseException {
+
+	try {
+	    if (env != null) {
+	        env.close();
+	    }
+        } finally {
+            env = null;
+	}
+    }
+
+    /**
+     * Verifies a database and write errors found to a stream.
+     *
+     * @param out The stream to write errors to.
+     *
+     * @return true if the verification found no errors.
+     */
+    public boolean verify(PrintStream out)
+	throws DatabaseException {
+
+	boolean ret = true;
+	try {
+            VerifyConfig verifyConfig = new VerifyConfig();
+            verifyConfig.setPrintInfo(!quiet);
+            if (progressInterval > 0) {
+                verifyConfig.setShowProgressInterval(progressInterval);
+                verifyConfig.setShowProgressStream(out);
+            }
+
+	    openEnv();
+            EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+
+            /* If no database is specified, verify all. */
+            List<String> dbNameList = null;
+            List<String> internalDbs = null;
+            DbTree dbMapTree = envImpl.getDbTree();
+
+            if (dbName == null) {
+                dbNameList = env.getDatabaseNames();
+
+                dbNameList.addAll(dbMapTree.getInternalDbNames());
+                internalDbs = dbMapTree.getInternalNoLookupDbNames();
+            } else {
+                dbNameList = new ArrayList<String>();
+                dbNameList.add(dbName);
+                internalDbs = new ArrayList<String>();
+            }
+
+            /* Check application data. */
+            Iterator<String> iter = dbNameList.iterator();
+            while (iter.hasNext()) {
+                String targetDb = iter.next();
+                Tracer.trace(Level.INFO, envImpl,
+                             "DbVerify.verify of " + targetDb + " starting");
+
+                DatabaseConfig dbConfig = new DatabaseConfig();
+                dbConfig.setReadOnly(true);
+                dbConfig.setAllowCreate(false);
+                DbInternal.setUseExistingConfig(dbConfig, true);
+                Database db = env.openDatabase(null, targetDb, dbConfig);
+
+                try {
+                    if (!verifyOneDbImpl(DbInternal.dbGetDatabaseImpl(db),
+                                         targetDb,
+                                         verifyConfig,
+                                         out)) {
+                        ret = false;
+                    }
+                } finally {
+                    if (db != null) {
+                        db.close();
+                    }
+                    Tracer.trace(Level.INFO, envImpl,
+                                 "DbVerify.verify of " + targetDb + " ending");
+                }
+            }
+
+            /*
+             * Check internal databases, which don't have to be opened
+             * through a Database handle.
+             */
+            iter = internalDbs.iterator();
+            while (iter.hasNext()) {
+                String targetDb = iter.next();
+                Tracer.trace(Level.INFO, envImpl,
+                             "DbVerify.verify of " + targetDb + " starting");
+
+                try {
+                    DatabaseImpl dbImpl = dbMapTree.getDb(null, targetDb,
+                                                          null);
+                    try {
+                        if (!verifyOneDbImpl(dbImpl,  targetDb,
+                                             verifyConfig, out)) {
+                            ret = false;
+                        }
+                    } finally {
+                        dbMapTree.releaseDb(dbImpl);
+                    }
+                } finally {
+                    Tracer.trace(Level.INFO, envImpl,
+                                 "DbVerify.verify of " + targetDb + " ending");
+                }
+            }
+
+            if (doClose) {
+                closeEnv();
+            }
+        } catch (DatabaseException DE) {
+	    ret = false;
+            try {
+                closeEnv();
+	    } catch (Throwable ignored) {
+
+		/*
+		 * Klockwork - ok
+		 * Don't say anything about exceptions here.
+		 */
+	    }
+	    throw DE;
+        }
+
+	return ret;
+    }
+
+    private boolean verifyOneDbImpl(DatabaseImpl dbImpl,
+                                    String name,
+                                    VerifyConfig verifyConfig,
+                                    PrintStream out)
+        throws DatabaseException {
+        boolean status = true;
+
+        if (verifyConfig.getPrintInfo()) {
+            out.println("Verifying database " + name);
+        }
+
+        if (checkLsns) {
+            /* Check the obsolete lsns */
+            if (verifyConfig.getPrintInfo()) {
+                out.println("Checking obsolete offsets for " + name);
+            }
+            try {
+                VerifyUtils.checkLsns(dbImpl, out);
+            } catch (DatabaseException e) {
+                if (verifyConfig.getPrintInfo()) {
+                    out.println("Problem from checkLsns:" + e);
+                }
+                status = false;
+            }
+        } else {
+
+            /*
+             * Check the tree. Use DatabaseImpl.verify so we can get a status
+             * return.
+             */
+            if (verifyConfig.getPrintInfo()) {
+                out.println("Checking tree for " + name);
+            }
+            DatabaseStats stats = dbImpl.getEmptyStats();
+            status = dbImpl.verify(verifyConfig, stats);
+            if (verifyConfig.getPrintInfo()) {
+
+                /*
+                 * Intentionally use print, not println, because
+                 * stats.toString() puts in a newline too.
+                 */
+                out.print(stats);
+            }
+        }
+
+        if (verifyConfig.getPrintInfo()) {
+            out.println();
+        }
+
+        return status;
+    }
+}
diff --git a/src/com/sleepycat/je/util/package.html b/src/com/sleepycat/je/util/package.html
new file mode 100644
index 0000000000000000000000000000000000000000..8c123879244637fd599ee7c86ab592b4f3bf2bd8
--- /dev/null
+++ b/src/com/sleepycat/je/util/package.html
@@ -0,0 +1,26 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
+<html>
+<head>
+<!--
+
+ See the file LICENSE for redistribution information.
+
+ Copyright (c) 2002,2010 Oracle.  All rights reserved.
+
+ $Id: package.html,v 1.13.2.2 2010/01/04 15:30:37 cwl Exp $
+
+-->
+</head>
+<body>
+Supporting utilities.
+
+<h2>Package Specification</h2>
+This package provides support for activities like
+loading and dumping data. Most utilities can be used as a command line
+tool or called programmatically.
+
+@see <a href="{@docRoot}/../GettingStartedGuide/commandlinetools.html"
+        target="_top">[Getting Started Guide]</a>
+
+</body>
+</html>
diff --git a/src/com/sleepycat/je/utilint/Adler32.java b/src/com/sleepycat/je/utilint/Adler32.java
new file mode 100644
index 0000000000000000000000000000000000000000..d41757aab47bd53b1eb26dc3221c350caf1027b2
--- /dev/null
+++ b/src/com/sleepycat/je/utilint/Adler32.java
@@ -0,0 +1,151 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Adler32.java,v 1.16.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.je.utilint;
+
+import java.util.zip.Checksum;
+
+import com.sleepycat.je.dbi.EnvironmentImpl;
+
+/**
+ * Adler32 checksum implementation.
+ *
+ * This class is used rather than the native java.util.zip.Adler32 class
+ * because we have seen a JIT problem when calling the Adler32 class using
+ * the Server JVM on Linux and Solaris.  Specifically, we suspect this may
+ * be Bug Parade number 4965907.  See SR [#9376].  We also believe that this
+ * bug is fixed in Java 5 and therefore only use this class conditionally
+ * if we find that we're in a 1.4 JVM. [#13354].
+ *
+ * The Adler32 checksum is discussed in RFC1950.  The sample implementation
+ * from this RFC is shown below:
+ *
+ * <pre>
+ *    #define BASE 65521  largest prime smaller than 65536
+ *    unsigned long update_adler32(unsigned long adler,
+ *       unsigned char *buf, int len)
+ *    {
+ *      unsigned long s1 = adler & 0xffff;
+ *      unsigned long s2 = (adler >> 16) & 0xffff;
+ *      int n;
+ *
+ *      for (n = 0; n < len; n++) {
+ *        s1 = (s1 + buf[n]) % BASE;
+ *        s2 = (s2 + s1)     % BASE;
+ *      }
+ *      return (s2 << 16) + s1;
+ *    }
+ *
+ *    unsigned long adler32(unsigned char *buf, int len)
+ *    {
+ *      return update_adler32(1L, buf, len);
+ *    }
+ * </pre>
+ *
+ * The NMAX optimization is so that we don't have to do modulo calculations
+ * on every iteration.  NMAX is the max number of additions to make
+ * before you have to perform the modulo calculation.
+ */
+public class Adler32 implements Checksum {
+
+    /* This class and the ctor are public for the unit tests. */
+    public static class ChunkingAdler32 extends java.util.zip.Adler32 {
+	int adler32ChunkSize = 0;
+
+	public ChunkingAdler32(int adler32ChunkSize) {
+	    this.adler32ChunkSize = adler32ChunkSize;
+	}
+
+        @Override
+	public void update(byte[] b, int off, int len) {
+	    if (len < adler32ChunkSize) {
+		super.update(b, off, len);
+		return;
+	    }
+
+	    int i = 0;
+	    while (i < len) {
+		int bytesRemaining = len - i;
+		int nBytesThisChunk =
+		    Math.min(bytesRemaining, adler32ChunkSize);
+		super.update(b, off + i, nBytesThisChunk);
+		i += nBytesThisChunk;
+	    }
+	}
+    }
+
+    public static Checksum makeChecksum() {
+        if (EnvironmentImpl.USE_JAVA5_ADLER32) {
+	    int adler32ChunkSize = EnvironmentImpl.getAdler32ChunkSize();
+	    if (adler32ChunkSize > 0) {
+		return new ChunkingAdler32(adler32ChunkSize);
+	    } else {
+		return new java.util.zip.Adler32();
+	    }
+	} else {
+	    return new Adler32();
+	}
+    }
+
+    private long adler = 1;
+
+    /*
+     * BASE is the largest prime number smaller than 65536
+     * NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1
+     */
+    private static final int BASE = 65521;
+    private static final int NMAX = 5552;
+
+    /**
+     * Update current Adler-32 checksum given the specified byte.
+     */
+    public void update(int b) {
+        long s1 = adler & 0xffff;
+        long s2 = (adler >> 16) & 0xffff;
+        s1 = (s1 + (b & 0xff)) % BASE;
+        s2 = (s1 + s2) % BASE;
+        adler = (s2 << 16) | s1;
+    }
+
+    /**
+     * Update current Adler-32 checksum given the specified byte array.
+     */
+    public void update(byte[] b, int off, int len) {
+        long s1 = adler & 0xffff;
+        long s2 = (adler >> 16) & 0xffff;
+
+        while (len > 0) {
+            int k = len < NMAX ? len : NMAX;
+            len -= k;
+
+	    /* This does not benefit from loop unrolling. */
+            while (k-- > 0) {
+                s1 += (b[off++] & 0xff);
+                s2 += s1;
+            }
+
+            s1 %= BASE;
+            s2 %= BASE;
+        }
+        adler = (s2 << 16) | s1;
+    }
+
+    /**
+     * Reset Adler-32 checksum to initial value.
+     */
+    public void reset() {
+        adler = 1;
+    }
+
+    /**
+     * Returns current checksum value.
+     */
+    public long getValue() {
+        return adler;
+    }
+}
diff --git a/src/com/sleepycat/je/utilint/BitMap.java b/src/com/sleepycat/je/utilint/BitMap.java
new file mode 100644
index 0000000000000000000000000000000000000000..1d8a8f69aa978272793dd7cd5be49f09e62e0c27
--- /dev/null
+++ b/src/com/sleepycat/je/utilint/BitMap.java
@@ -0,0 +1,125 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: BitMap.java,v 1.10.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.je.utilint;
+
+import java.util.BitSet;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+
+/**
+ * Bitmap which supports indexing with long arguments. java.util.BitSet
+ * provides all the functionality and performance we need, but requires integer
+ * indexing.
+ *
+ * Long indexing is implemented by keeping a Map of java.util.BitSets, where
+ * each bitset covers 2^16 bits worth of values. The Bitmap may be sparse, in
+ * that each segment is only instantiated when needed.
+ *
+ * Note that this class is currently not thread safe; adding a new bitset
+ * segment is not protected.
+ */
+public class BitMap {
+
+    private static final int SEGMENT_SIZE = 16;
+    private static final int SEGMENT_MASK = 0xffff;
+
+    /*
+     * Map of segment value -> bitset, where the segment value is index >>16
+     */
+    private Map<Long, BitSet> bitSegments;
+
+    public BitMap() {
+        bitSegments = new HashMap<Long, BitSet>();
+    }
+
+    /*
+     * @throws IndexOutOfBoundsException if index is negative.
+     */
+    public void set(long index)
+        throws IndexOutOfBoundsException {
+
+        if (index < 0) {
+            throw new IndexOutOfBoundsException(index + " is negative.");
+        }
+
+        BitSet bitset = getBitSet(index, true);
+	if (bitset == null) {
+	    throw new IllegalArgumentException(index + " is out of bounds");
+	}
+        int useIndex = getIntIndex(index);
+        bitset.set(useIndex);
+    }
+
+    /*
+     * @throws IndexOutOfBoundsException if index is negative.
+     */
+    public boolean get(long index)
+        throws IndexOutOfBoundsException {
+
+        if (index < 0) {
+            throw new IndexOutOfBoundsException(index + " is negative.");
+        }
+
+        BitSet bitset = getBitSet(index, false);
+        if (bitset == null) {
+            return false;
+        }
+
+        int useIndex = getIntIndex(index);
+        return bitset.get(useIndex);
+    }
+
+    /*
+     * Since the BitMap is implemented by a collection of BitSets, return
+     * the one which covers the numeric range for this index.
+     *
+     * @param index the bit we want to access
+     * @param allowCreate if true, return the BitSet that would hold this
+     * index even if it wasn't previously set. If false, return null
+     * if the bit has not been set.
+     */
+    private BitSet getBitSet(long index, boolean allowCreate) {
+
+        Long segmentId = Long.valueOf(index >> SEGMENT_SIZE);
+
+        BitSet bitset = bitSegments.get(segmentId);
+        if (allowCreate) {
+            if (bitset == null) {
+                bitset = new BitSet();
+                bitSegments.put(segmentId, bitset);
+            }
+        }
+
+        return bitset;
+    }
+
+    private int getIntIndex(long index) {
+        return (int) (index & SEGMENT_MASK);
+    }
+
+    /* For unit testing. */
+    int getNumSegments() {
+        return bitSegments.size();
+    }
+
+    /*
+     * Currently for unit testing, though note that java.util.BitSet does
+     * support cardinality().
+     */
+    int cardinality() {
+        int count = 0;
+        Iterator<BitSet> iter = bitSegments.values().iterator();
+        while (iter.hasNext()) {
+            BitSet b = iter.next();
+            count += b.cardinality();
+        }
+        return count;
+    }
+}
diff --git a/src/com/sleepycat/je/utilint/CmdUtil.java b/src/com/sleepycat/je/utilint/CmdUtil.java
new file mode 100644
index 0000000000000000000000000000000000000000..b58ab5be472a2fc91da1eb7b68749898c3df3231
--- /dev/null
+++ b/src/com/sleepycat/je/utilint/CmdUtil.java
@@ -0,0 +1,127 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: CmdUtil.java,v 1.28.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.je.utilint;
+
+import java.io.File;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+
+/**
+ * Convenience methods for command line utilities.
+ */
+public class CmdUtil {
+    public static String getArg(String[] argv, int whichArg)
+        throws IllegalArgumentException {
+
+        if (whichArg < argv.length) {
+            return argv[whichArg];
+        } else {
+            throw new IllegalArgumentException();
+        }
+    }
+
+    /**
+     * Parse a string into a long. If the string starts with 0x, this is a hex
+     * number, else it's decimal.
+     */
+    public static long readLongNumber(String longVal) {
+        if (longVal.startsWith("0x")) {
+            return Long.parseLong(longVal.substring(2), 16);
+        } else {
+            return Long.parseLong(longVal);
+        }
+    }
+
+    private static final String printableChars =
+	"!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
+	"[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~";
+
+    public static void formatEntry(StringBuffer sb,
+                                   byte[] entryData,
+                                   boolean formatUsingPrintable) {
+	for (int i = 0; i < entryData.length; i++) {
+	    int b = entryData[i] & 0xff;
+	    if (formatUsingPrintable) {
+		if (isPrint(b)) {
+		    if (b == 0134) {  /* backslash */
+			sb.append('\\');
+		    }
+		    sb.append(printableChars.charAt(b - 33));
+		} else {
+		    sb.append('\\');
+		    String hex = Integer.toHexString(b);
+		    if (b < 16) {
+			sb.append('0');
+		    }
+		    sb.append(hex);
+		}
+	    } else {
+		String hex = Integer.toHexString(b);
+		if (b < 16) {
+		    sb.append('0');
+		}
+		sb.append(hex);
+	    }
+	}
+    }
+
+    private static boolean isPrint(int b) {
+	return (b < 0177) && (040 < b);
+    }
+
+    /**
+     * Create an environment suitable for utilities. Utilities should in
+     * general send trace output to the console and not to the db log.
+     */
+    public static EnvironmentImpl makeUtilityEnvironment(File envHome,
+							 boolean readOnly)
+        throws DatabaseException {
+
+        EnvironmentConfig config = new EnvironmentConfig();
+        config.setReadOnly(readOnly);
+
+        /* Don't debug log to the database log. */
+        config.setConfigParam(EnvironmentParams.JE_LOGGING_DBLOG.getName(),
+			      "false");
+        /* Do debug log to the console. */
+        config.setConfigParam(EnvironmentParams.JE_LOGGING_CONSOLE.getName(),
+			      "true");
+
+        /* Set logging level to only show errors. */
+        config.setConfigParam(EnvironmentParams.JE_LOGGING_LEVEL.getName(),
+			      "SEVERE");
+
+        /* Don't run recovery. */
+        config.setConfigParam(EnvironmentParams.ENV_RECOVERY.getName(),
+			      "false");
+
+	EnvironmentImpl envImpl =
+            new EnvironmentImpl(envHome,
+                                config,
+                                null,   // sharedCacheEnv
+                                false); // replicationIntended
+	return envImpl;
+    }
+
+    /**
+     * Returns a description of the java command for running a utility, without
+     * arguments.  For utilities the last name of the class name can be
+     * specified when "-jar je.jar" is used.
+     */
+    public static String getJavaCommand(Class<?> cls) {
+
+        String clsName = cls.getName();
+        String lastName = clsName.substring(clsName.lastIndexOf('.') + 1);
+
+        return "java { " + cls.getName() + " | -jar je-<version>.jar " + lastName + " }";
+    }
+}
diff --git a/src/com/sleepycat/je/utilint/DaemonRunner.java b/src/com/sleepycat/je/utilint/DaemonRunner.java
new file mode 100644
index 0000000000000000000000000000000000000000..9ca36edae7b9dc32dddfab30c78fbf034999ad59
--- /dev/null
+++ b/src/com/sleepycat/je/utilint/DaemonRunner.java
@@ -0,0 +1,23 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DaemonRunner.java,v 1.8.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.je.utilint;
+
+import com.sleepycat.je.ExceptionListener;
+
+/**
+ * An object capable of running (run/pause/shutdown/etc) a daemon thread.
+ * See DaemonThread for details.
+ */
+public interface DaemonRunner {
+    void setExceptionListener(ExceptionListener exceptionListener);
+    void runOrPause(boolean run);
+    void requestShutdown();
+    void shutdown();
+    int getNWakeupRequests();
+}
diff --git a/src/com/sleepycat/je/utilint/DaemonThread.java b/src/com/sleepycat/je/utilint/DaemonThread.java
new file mode 100644
index 0000000000000000000000000000000000000000..beb45b2f2a9093015a761eaf22845d0bf02192c1
--- /dev/null
+++ b/src/com/sleepycat/je/utilint/DaemonThread.java
@@ -0,0 +1,265 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DaemonThread.java,v 1.64.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.je.utilint;
+
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DeadlockException;
+import com.sleepycat.je.ExceptionListener;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.RunRecoveryException;
+
+/**
+ * A daemon thread.
+ */
+public abstract class DaemonThread implements DaemonRunner, Runnable {
+    private static final int JOIN_MILLIS = 10;
+    private long waitTime;
+    private Object synchronizer = new Object();
+    private Thread thread;
+    private ExceptionListener exceptionListener;
+    protected String name;
+    protected int nWakeupRequests;
+    protected boolean stifleExceptionChatter = false;
+
+    /* Fields shared between threads must be 'volatile'. */
+    private volatile boolean shutdownRequest = false;
+    private volatile boolean paused = false;
+
+    /* This is not volatile because it is only an approximation. */
+    private boolean running = false;
+
+    /* Fields for DaemonErrorListener, enabled only during testing. */
+    private EnvironmentImpl envImpl;
+    private static final String ERROR_LISTENER = "setErrorListener";
+
+    public DaemonThread(long waitTime, String name, EnvironmentImpl envImpl) {
+        this.waitTime = waitTime;
+        this.name = name;
+        this.envImpl = envImpl;
+    }
+
+    public void setExceptionListener(ExceptionListener exceptionListener) {
+        this.exceptionListener = exceptionListener;
+    }
+
+    /**
+     * For testing.
+     */
+    public ExceptionListener getExceptionListener() {
+        return exceptionListener;
+    }
+
+    /**
+     * For testing.
+     */
+    public Thread getThread() {
+        return thread;
+    }
+
+    /**
+     * If run is true, starts the thread if not started or unpauses it
+     * if already started; if run is false, pauses the thread if
+     * started or does nothing if not started.
+     */
+    public void runOrPause(boolean run) {
+        if (run) {
+            paused = false;
+            if (thread != null) {
+                wakeup();
+            } else {
+                thread = new Thread(this, name);
+                thread.setDaemon(true);
+                thread.start();
+            }
+        } else {
+            paused = true;
+        }
+    }
+
+    public void requestShutdown() {
+	shutdownRequest = true;
+    }
+
+    /**
+     * Requests shutdown and calls join() to wait for the thread to stop.
+     */
+    public void shutdown() {
+        if (thread != null) {
+            shutdownRequest = true;
+            while (thread.isAlive()) {
+                synchronized (synchronizer) {
+                    synchronizer.notifyAll();
+                }
+                try {
+                    thread.join(JOIN_MILLIS);
+                } catch (InterruptedException e) {
+
+		    /*
+		     * Klockwork - ok
+		     * Don't say anything about exceptions here.
+		     */
+		}
+            }
+            thread = null;
+        }
+    }
+
+    @Override
+    public String toString() {
+        StringBuffer sb = new StringBuffer();
+        sb.append("<DaemonThread name=\"").append(name).append("\"/>");
+        return sb.toString();
+    }
+
+    public void wakeup() {
+        if (!paused) {
+            synchronized (synchronizer) {
+                synchronizer.notifyAll();
+            }
+        }
+    }
+
+    public void run() {
+        while (!shutdownRequest) {
+            try {
+                /* Do a unit of work. */
+                int numTries = 0;
+                long maxRetries = nDeadlockRetries();
+                while (numTries <= maxRetries &&
+                       !shutdownRequest &&
+                       !paused) {
+                    try {
+                        nWakeupRequests++;
+                        running = true;
+                        onWakeup();
+                        break;
+                    } catch (DeadlockException e) {
+                    } finally {
+                        running = false;
+                    }
+                    numTries++;
+                }
+                /* Wait for notify, timeout or interrupt. */
+                if (!shutdownRequest) {
+                    synchronized (synchronizer) {
+                        if (waitTime == 0 || paused) {
+                            synchronizer.wait();
+                        } else {
+                            synchronizer.wait(waitTime);
+                        }
+                    }
+                }
+            } catch (InterruptedException IE) {
+                if (exceptionListener != null) {
+                    exceptionListener.exceptionThrown
+                        (DbInternal.makeExceptionEvent(IE, name));
+                }
+		if (!stifleExceptionChatter) {
+		    System.err.println
+			("Shutting down " + this + " due to exception: " + IE);
+		}
+                shutdownRequest = true;
+
+		assert checkErrorListener(IE);
+            } catch (Exception E) {
+                if (exceptionListener != null) {
+                    exceptionListener.exceptionThrown
+                        (DbInternal.makeExceptionEvent(E, name));
+                }
+		if (!stifleExceptionChatter) {
+		    System.err.println(this + " caught exception: " + E);
+		    E.printStackTrace(System.err);
+
+                    /*
+                     * If the exception caused the environment to become
+                     * invalid, then shutdownRequest will have been set to true
+                     * by EnvironmentImpl.invalidate, which is called by the
+                     * RunRecoveryException ctor.
+                     */
+                    System.err.println
+                        (shutdownRequest ? "Exiting" : "Continuing");
+                }
+
+		assert checkErrorListener(E);
+	    } catch (Error ERR) {
+                assert checkErrorListener(ERR);
+                throw ERR;
+            }
+        }
+    }
+
+    /* 
+     * If Daemon Thread throws errors and exceptions, this function will catch
+     * it and throw a RunRecoveryException, and fail the test.
+     */
+    public boolean checkErrorListener(Throwable t) {
+        if (Boolean.getBoolean(ERROR_LISTENER)) {
+            System.err.println(name + " " + Tracer.getStackTrace(t));
+
+	    /*
+	     * We don't throw out a RunRecoveryException but just new
+	     * a RunRecoveryException here. This is because the instantiation
+	     * of the exception is enough to invalidate the environment, and
+	     * this won't change the signature of run() method.
+	     */
+            new RunRecoveryException(envImpl, "Daemon Thread Failed");
+	}
+
+	return true;
+    }
+
+    /**
+     * Returns the number of retries to perform when Deadlock Exceptions
+     * occur.
+     */
+    protected long nDeadlockRetries()
+        throws DatabaseException {
+
+        return 0;
+    }
+
+    /**
+     * onWakeup is synchronized to ensure that multiple invocations of the
+     * DaemonThread aren't made.
+     */
+    abstract protected void onWakeup()
+        throws DatabaseException;
+
+    /**
+     * Returns whether shutdown has been requested.  This method should be
+     * used to to terminate daemon loops.
+     */
+    protected boolean isShutdownRequested() {
+        return shutdownRequest;
+    }
+
+    /**
+     * Returns whether the daemon is currently paused/disabled.  This method
+     * should be used to to terminate daemon loops.
+     */
+    protected boolean isPaused() {
+        return paused;
+    }
+
+    /**
+     * Returns whether the onWakeup method is currently executing.  This is
+     * only an approximation and is used to avoid unnecessary wakeups.
+     */
+    public boolean isRunning() {
+        return running;
+    }
+
+    /**
+     * For unit testing.
+     */
+    public int getNWakeupRequests() {
+        return nWakeupRequests;
+    }
+}
diff --git a/src/com/sleepycat/je/utilint/DatabaseUtil.java b/src/com/sleepycat/je/utilint/DatabaseUtil.java
new file mode 100644
index 0000000000000000000000000000000000000000..fe8335dc2f4245dd41efdcf69dab9e31dcaa931e
--- /dev/null
+++ b/src/com/sleepycat/je/utilint/DatabaseUtil.java
@@ -0,0 +1,57 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DatabaseUtil.java,v 1.4.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.je.utilint;
+
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * Utils for use in the db package.
+ */
+public class DatabaseUtil {
+
+    /**
+     * Throw an exception if the parameter is null.
+     */
+    static public void checkForNullParam(Object param, String name) {
+        if (param == null) {
+            throw new NullPointerException(name + " cannot be null");
+        }
+    }
+
+    /**
+     * Throw an exception if the dbt is null or the data field is not set.
+     */
+    static public void checkForNullDbt(DatabaseEntry dbt,
+				String name,
+				boolean checkData) {
+        if (dbt == null) {
+            throw new NullPointerException
+		("DatabaseEntry " + name + " cannot be null");
+        }
+
+        if (checkData) {
+            if (dbt.getData() == null) {
+                throw new NullPointerException
+		    ("Data field for DatabaseEntry " +
+		     name + " cannot be null");
+            }
+        }
+    }
+
+    /**
+     * Throw an exception if the key dbt has the partial flag set.  This method
+     * should be called for all put() operations.
+     */
+    static public void checkForPartialKey(DatabaseEntry dbt) {
+        if (dbt.getPartial()) {
+            throw new IllegalArgumentException
+		("A partial key DatabaseEntry is not allowed");
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/utilint/DbLsn.java b/src/com/sleepycat/je/utilint/DbLsn.java
new file mode 100644
index 0000000000000000000000000000000000000000..291f9c0ca3c5207a75b819875ed865395a4e938f
--- /dev/null
+++ b/src/com/sleepycat/je/utilint/DbLsn.java
@@ -0,0 +1,185 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbLsn.java,v 1.56.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.je.utilint;
+
+import java.util.Arrays;
+
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.tree.TreeUtils;
+
+/**
+ * DbLsn is a class that operates on Log Sequence Numbers (LSNs). An LSN is a
+ * long comprised of a file number (32b) and offset within that file (32b)
+ * which references a unique record in the database environment log.  While
+ * LSNs are represented as long's, we operate on them using an abstraction and
+ * return longs from these methods so that we don't have to worry about the
+ * lack of unsigned quantities.
+ */
+public class DbLsn {
+    static final long INT_MASK = 0xFFFFFFFFL;
+
+    public static final long MAX_FILE_OFFSET = 0xFFFFFFFFL;
+
+    public static final long NULL_LSN = -1;
+
+    private DbLsn() {
+    }
+
+    public static long makeLsn(long fileNumber, long fileOffset) {
+	return fileOffset & INT_MASK |
+	    ((fileNumber & INT_MASK) << 32);
+    }
+
+    public static long longToLsn(Long lsn) {
+	if (lsn == null) {
+	    return NULL_LSN;
+	}
+
+	return lsn.longValue();
+    }
+
+    /**
+     * Return the file number for this DbLsn.
+     * @return the number for this DbLsn.
+     */
+    public static long getFileNumber(long lsn) {
+	return (lsn >> 32) & INT_MASK;
+    }
+
+    /**
+     * Return the file offset for this DbLsn.
+     * @return the offset for this DbLsn.
+     */
+    public static long getFileOffset(long lsn) {
+	return (lsn & INT_MASK);
+    }
+
+    private static int compareLong(long l1, long l2) {
+	if (l1 < l2) {
+	    return -1;
+	} else if (l1 > l2) {
+	    return 1;
+	} else {
+	    return 0;
+	}
+    }
+
+    public static int compareTo(long lsn1, long lsn2) {
+	if (lsn1 == NULL_LSN ||
+	    lsn2 == NULL_LSN) {
+	    throw new NullPointerException("lsn1=" + lsn1 +
+                                           " lsn2=" + lsn2);
+	}
+
+        long fileNumber1 = getFileNumber(lsn1);
+        long fileNumber2 = getFileNumber(lsn2);
+        if (fileNumber1 == fileNumber2) {
+            return compareLong(getFileOffset(lsn1), getFileOffset(lsn2));
+        } else {
+            return compareLong(fileNumber1, fileNumber2);
+        }
+    }
+
+    public static String toString(long lsn) {
+	return "<DbLsn val=\"0x" +
+            Long.toHexString(getFileNumber(lsn)) +
+            "/0x" +
+            Long.toHexString(getFileOffset(lsn)) +
+	    "\"/>";
+    }
+
+    public static String getNoFormatString(long lsn) {
+        return "0x" + Long.toHexString(getFileNumber(lsn)) + "/0x" +
+            Long.toHexString(getFileOffset(lsn));
+    }
+
+    public static String dumpString(long lsn, int nSpaces) {
+        StringBuffer sb = new StringBuffer();
+        sb.append(TreeUtils.indent(nSpaces));
+        sb.append(toString(lsn));
+        return sb.toString();
+    }
+
+    /**
+     * Return the logsize in bytes between these two LSNs. This is an
+     * approximation; the logs might actually be a little more or less in
+     * size. This assumes that no log files have been cleaned.
+     */
+    public static long getNoCleaningDistance(long thisLsn,
+					     long otherLsn,
+					     long logFileSize) {
+        long diff = 0;
+
+	assert thisLsn != NULL_LSN;
+        /* First figure out how many files lay between the two. */
+        long myFile = getFileNumber(thisLsn);
+	if (otherLsn == NULL_LSN) {
+	    otherLsn = 0;
+	}
+        long otherFile = getFileNumber(otherLsn);
+        if (myFile == otherFile) {
+            diff = Math.abs(getFileOffset(thisLsn) - getFileOffset(otherLsn));
+        } else if (myFile > otherFile) {
+            diff = calcDiff(myFile - otherFile,
+                            logFileSize, thisLsn, otherLsn);
+        } else {
+            diff = calcDiff(otherFile - myFile,
+                            logFileSize, otherLsn, thisLsn);
+        }
+        return diff;
+    }
+
+    /**
+     * Return the logsize in bytes between these two LSNs. This is an
+     * approximation; the logs might actually be a little more or less in
+     * size. This assumes that log files might have been cleaned.
+     */
+    public static long getWithCleaningDistance(long thisLsn,
+					       FileManager fileManager,
+					       long otherLsn,
+					       long logFileSize) {
+        long diff = 0;
+
+	assert thisLsn != NULL_LSN;
+        /* First figure out how many files lay between the two. */
+        long myFile = getFileNumber(thisLsn);
+	if (otherLsn == NULL_LSN) {
+	    otherLsn = 0;
+	}
+        long otherFile = getFileNumber(otherLsn);
+        if (myFile == otherFile) {
+            diff = Math.abs(getFileOffset(thisLsn) - getFileOffset(otherLsn));
+        } else {
+            /* Figure out how many files lie between. */
+            Long[] fileNums = fileManager.getAllFileNumbers();
+            int myFileIdx = Arrays.binarySearch(fileNums,
+                                                Long.valueOf(myFile));
+            int otherFileIdx =
+                Arrays.binarySearch(fileNums, Long.valueOf(otherFile));
+            if (myFileIdx > otherFileIdx) {
+                diff = calcDiff(myFileIdx - otherFileIdx,
+                                logFileSize, thisLsn, otherLsn);
+            } else {
+                diff = calcDiff(otherFileIdx - myFileIdx,
+                                logFileSize, otherLsn, thisLsn);
+            }
+        }
+        return diff;
+    }
+
+    private static long calcDiff(long fileDistance,
+				 long logFileSize,
+				 long laterLsn,
+				 long earlierLsn) {
+        long diff = fileDistance * logFileSize;
+        diff += getFileOffset(laterLsn);
+        diff -= getFileOffset(earlierLsn);
+        return diff;
+    }
+}
diff --git a/src/com/sleepycat/je/utilint/EventTrace.java b/src/com/sleepycat/je/utilint/EventTrace.java
new file mode 100644
index 0000000000000000000000000000000000000000..03d3346a48f3724a2827dc357fedc7f4fa8118e8
--- /dev/null
+++ b/src/com/sleepycat/je/utilint/EventTrace.java
@@ -0,0 +1,332 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EventTrace.java,v 1.16.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.je.utilint;
+
+import java.io.PrintStream;
+
+/**
+ * Internal class used for transient event tracing.  Subclass this with
+ * specific events.  Subclasses should have toString methods for display and
+ * events should be added by calling EventTrace.addEvent();
+ */
+public class EventTrace {
+    private static int MAX_EVENTS = 100;
+
+    public static final boolean TRACE_EVENTS = false;
+
+    static int currentEvent = 0;
+
+    static final EventTrace[] events = new EventTrace[MAX_EVENTS];
+    static final int[] threadIdHashes = new int[MAX_EVENTS];
+    static boolean disableEvents = false;
+
+    protected String comment;
+
+    public EventTrace(String comment) {
+	this.comment = comment;
+    }
+
+    public EventTrace() {
+	comment = null;
+    }
+
+    @Override
+    public String toString() {
+	return comment;
+    }
+
+    /**
+     * Always return true so this method can be used with asserts:
+     * i.e. assert addEvent(xxx);
+     */
+    public static boolean addEvent(EventTrace event) {
+	if (disableEvents) {
+	    return true;
+	}
+	int nextEventIdx = currentEvent++ % MAX_EVENTS;
+	events[nextEventIdx] = event;
+	threadIdHashes[nextEventIdx] =
+	    System.identityHashCode(Thread.currentThread());
+        return true;
+    }
+
+    /*
+     * Always return true so this method can be used with asserts:
+     * i.e. assert addEvent(xxx);
+     */
+    public static boolean addEvent(String comment) {
+	if (disableEvents) {
+	    return true;
+	}
+	return addEvent(new EventTrace(comment));
+    }
+
+    public static void dumpEvents() {
+	dumpEvents(System.out);
+    }
+
+    public static void dumpEvents(PrintStream out) {
+
+	if (disableEvents) {
+	    return;
+	}
+	out.println("----- Event Dump -----");
+	EventTrace[] oldEvents = events;
+	int[] oldThreadIdHashes = threadIdHashes;
+	disableEvents = true;
+
+	int j = 0;
+	for (int i = currentEvent; j < MAX_EVENTS; i++) {
+	    EventTrace ev = oldEvents[i % MAX_EVENTS];
+	    if (ev != null) {
+		int thisEventIdx = i % MAX_EVENTS;
+		out.print(oldThreadIdHashes[thisEventIdx] + " ");
+		out.println(j + "(" + thisEventIdx + "): " + ev);
+	    }
+	    j++;
+	}
+    }
+
+    public static class ExceptionEventTrace extends EventTrace {
+	private Exception event;
+
+	public ExceptionEventTrace() {
+            event = new Exception();
+	}
+
+        @Override
+	public String toString() {
+            return Tracer.getStackTrace(event);
+	}
+    }
+}
+
+    /*
+    public static class EvictEvent extends EventTrace {
+	long nodeId;
+	int addr;
+
+	public EvictEvent(String comment, long nodeId, int addr) {
+	    super(comment);
+	    this.nodeId = nodeId;
+	    this.addr = addr;
+	}
+
+	public static void addEvent(String comment, IN node) {
+	    long nodeId = node.getNodeId();
+	    int addr = System.identityHashCode(node);
+	    EventTrace.addEvent(new EvictEvent(comment, nodeId, addr));
+	}
+
+	public String toString() {
+	    StringBuffer sb = new StringBuffer(comment);
+	    sb.append(" IN: ").append(nodeId);
+	    sb.append(" sIH ").append(addr);
+	    return sb.toString();
+	}
+    }
+
+    public static class CursorTrace extends EventTrace {
+	long nodeId;
+	int index;
+
+	public CursorTrace(String comment, long nodeId, int index) {
+	    super(comment);
+	    this.nodeId = nodeId;
+	    this.index = index;
+	}
+
+	public static void addEvent(String comment, CursorImpl cursor) {
+	    long nodeId =
+		(cursor.getBIN() == null) ? -1 : cursor.getBIN().getNodeId();
+	    EventTrace.addEvent
+		(new CursorTrace(comment, nodeId, cursor.getIndex()));
+	}
+
+	public String toString() {
+	    StringBuffer sb = new StringBuffer(comment);
+	    sb.append(" BIN: ").append(nodeId);
+	    sb.append(" idx: ").append(index);
+	    return sb.toString();
+	}
+    }
+    */
+
+
+/*
+    class CursorEventTrace extends EventTrace {
+	private String comment;
+	private Node node1;
+	private Node node2;
+
+	CursorEventTrace(String comment, Node node1, Node node2) {
+	    this.comment = comment;
+	    this.node1 = node1;
+	    this.node2 = node2;
+	}
+
+	public String toString() {
+	    StringBuffer sb = new StringBuffer(comment);
+	    if (node1 != null) {
+		sb.append(" ");
+		sb.append(node1.getNodeId());
+	    }
+	    if (node2 != null) {
+		sb.append(" ");
+		sb.append(node2.getNodeId());
+	    }
+	    return sb.toString();
+	}
+    }
+
+*/
+/*
+
+    static class UndoEventTrace extends EventTrace {
+	private String comment;
+	private boolean success;
+	private Node node;
+	private DbLsn logLsn;
+	private Node parent;
+	private boolean found;
+	private boolean replaced;
+	private boolean inserted;
+	private DbLsn replacedLsn;
+	private DbLsn abortLsn;
+	private int index;
+
+	UndoEventTrace(String comment) {
+	    this.comment = comment;
+	}
+
+	UndoEventTrace(boolean success,
+		       Node node,
+		       DbLsn logLsn,
+		       Node parent,
+		       boolean found,
+		       boolean replaced,
+		       boolean inserted,
+		       DbLsn replacedLsn,
+		       DbLsn abortLsn,
+		       int index) {
+	    this.comment = null;
+	    this.success = success;
+	    this.node = node;
+	    this.logLsn = logLsn;
+	    this.parent = parent;
+	    this.found = found;
+	    this.replaced = replaced;
+	    this.inserted = inserted;
+	    this.replacedLsn = replacedLsn;
+	    this.abortLsn = abortLsn;
+	    this.index = index;
+	}
+
+	public String toString() {
+	    if (comment != null) {
+		return comment;
+	    }
+	    StringBuffer sb = new StringBuffer();
+            sb.append(" success=").append(success);
+            sb.append(" node=");
+            sb.append(node.getNodeId());
+            sb.append(" logLsn=");
+            sb.append(logLsn.getNoFormatString());
+            if (parent != null) {
+                sb.append(" parent=").append(parent.getNodeId());
+            }
+            sb.append(" found=");
+            sb.append(found);
+            sb.append(" replaced=");
+            sb.append(replaced);
+            sb.append(" inserted=");
+            sb.append(inserted);
+            if (replacedLsn != null) {
+                sb.append(" replacedLsn=");
+                sb.append(replacedLsn.getNoFormatString());
+            }
+            if (abortLsn != null) {
+                sb.append(" abortLsn=");
+                sb.append(abortLsn.getNoFormatString());
+            }
+            sb.append(" index=").append(index);
+	    return sb.toString();
+	}
+    }
+ */
+/*
+    class CursorAdjustEventTrace extends EventTrace {
+	private int insertIndex;
+	private int cursorIndex;
+	private long nodeid;
+
+	CursorAdjustEventTrace(int insertIndex, int cursorIndex) {
+	    this.insertIndex = insertIndex;
+	    this.cursorIndex = cursorIndex;
+	    this.nodeid = getNodeId();
+	}
+
+	public String toString() {
+	    StringBuffer sb = new StringBuffer("cursor adjust ");
+	    sb.append(insertIndex).append(" ");
+	    sb.append(cursorIndex).append(" ");
+	    sb.append(nodeid);
+	    return sb.toString();
+	}
+    }
+
+*/
+/*
+    class CompressEventTrace extends EventTrace {
+	private int entryIndex;
+	private long nodeid;
+
+	CompressEventTrace(int entryIndex) {
+	    this.entryIndex = entryIndex;
+	    this.nodeid = getNodeId();
+	}
+
+	public String toString() {
+	    StringBuffer sb = new StringBuffer("bin compress ");
+	    sb.append(entryIndex).append(" ");
+	    sb.append(nodeid);
+	    return sb.toString();
+	}
+    }
+
+*/
+/*
+    class TreeEventTrace extends EventTrace {
+	private String comment;
+	private Node node1;
+	private Node node2;
+
+	TreeEventTrace(String comment, Node node1, Node node2) {
+	    this.comment = comment;
+	    this.node1 = node1;
+	    this.node2 = node2;
+	}
+
+	public String toString() {
+	    StringBuffer sb = new StringBuffer(comment);
+	    if (node1 != null) {
+		sb.append(" ");
+		sb.append(node1.getNodeId());
+	    }
+	    if (node2 != null) {
+		sb.append(" ");
+		sb.append(node2.getNodeId());
+	    }
+	    return sb.toString();
+	}
+    }
+
+*/
+
+
diff --git a/src/com/sleepycat/je/utilint/FileMapper.java b/src/com/sleepycat/je/utilint/FileMapper.java
new file mode 100644
index 0000000000000000000000000000000000000000..c2214961ccd2e5da244df7a5438458b28832f18a
--- /dev/null
+++ b/src/com/sleepycat/je/utilint/FileMapper.java
@@ -0,0 +1,242 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: FileMapper.java,v 1.9.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.je.utilint;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import com.sleepycat.bind.tuple.LongBinding;
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.log.LogEntryType;
+
+/**
+ * A FileMapper instance represents the VLSN->LSN mappings for a single log
+ * file. There are persistent FileMappers that are stored in the log, and
+ * temporary instances that are used for collecting mappings found in the
+ * log during recovery.
+ *
+ * Note that we only need to store the file offset portion of the lsn
+ * persistently on disk, because the file number is self evident. We still need
+ * to use longs in memory to represent the offset, since the file offset is an
+ * unsigned int.
+ */
+public class FileMapper {
+
+    /* File number for target file. */
+    private long fileNumber;
+
+    /* 
+     * The last VLSN in this file that can be a sync matchpoint. Used at
+     * recovery time, to transfer information from the recovery scan of the log
+     * to initialize the VLSNIndex.
+     */
+    private VLSN lastSyncVLSN;
+
+    /*
+     * The last VLSN in this file which is a replicated commit record. Akin
+     * to lastSyncVLSN, but used specifically to determine if a syncup is
+     * rolling back past a committed txn, and therefore whether the syncup
+     * needs to be a hard recovery, or can just be a soft partial rollback.
+     */
+    private VLSN lastCommitVLSN;
+
+    /*
+     * The file offset is really an unsigned int on disk, but must be
+     * represented as a long in Java.
+     */
+    private Map<Long,Long> vlsnToFileOffsetMap;
+
+    /*
+     * True if there are changes to vlsnToFileOffsetMap that are not on
+     * disk.
+     */
+    private boolean dirty;
+
+    public FileMapper(long fileNumber) {
+        this.fileNumber = fileNumber;
+        this.vlsnToFileOffsetMap = new HashMap<Long,Long>();
+        lastSyncVLSN = VLSN.NULL_VLSN;
+        lastCommitVLSN = VLSN.NULL_VLSN;
+    }
+
+    /* For reading from disk */
+    private FileMapper() {
+    }
+
+    public void setFileNumber(long fileNumber) {
+        this.fileNumber = fileNumber;
+    }
+
+    public long getFileNumber() {
+        return fileNumber;
+    }
+
+    public VLSN getLastSyncVLSN() {
+        return lastSyncVLSN;
+    }
+
+    public VLSN getLastCommitVLSN() {
+        return lastCommitVLSN;
+    }
+
+    public void writeToDatabase(Database fileMapperDb)
+        throws DatabaseException {
+
+        if (dirty) {
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+
+            LongBinding.longToEntry(fileNumber, key);
+            FileMapperBinding mapperBinding =
+                new FileMapperBinding();
+            mapperBinding.objectToEntry(this, data);
+            OperationStatus status = fileMapperDb.put(null, key, data);
+            if (status != OperationStatus.SUCCESS) {
+                throw new DatabaseException
+                    ("Unable to write VLSN mapping "+
+                     " for file " + fileNumber +
+                     " status=" + status);
+            }
+            dirty = false;
+        }
+    }
+
+    /*
+     * Initialize this from the database. Assumes that there are no
+     * mappings currently stored.
+     */
+    public static FileMapper readFromDatabase(DatabaseEntry data) {
+        FileMapperBinding mapperBinding = new FileMapperBinding();
+        FileMapper mapper = (FileMapper)mapperBinding.entryToObject(data);
+        return mapper;
+    }
+
+    /** Record the LSN location for this VLSN. */
+    public void putLSN(long vlsn, 
+                       long lsn, 
+                       LogEntryType entryType) {
+
+        assert DbLsn.getFileNumber(lsn) == fileNumber:
+            "unexpected lsn file num=" +  DbLsn.getFileNumber(lsn) +
+            " while file mapper file number=" + fileNumber;
+
+        vlsnToFileOffsetMap.put(vlsn, DbLsn.getFileOffset(lsn));
+        if (entryType.isSyncPoint()) {
+            VLSN thisVLSN = new VLSN(vlsn);
+            if (lastSyncVLSN.compareTo(thisVLSN) < 0) {
+                lastSyncVLSN = thisVLSN;
+            }
+        }
+
+        if (LogEntryType.LOG_TXN_COMMIT.equals(entryType)) {
+            VLSN thisVLSN = new VLSN(vlsn);
+            if (lastCommitVLSN.compareTo(thisVLSN) < 0) {
+                lastCommitVLSN = thisVLSN;
+            }
+        }
+
+        dirty = true;
+    }
+
+    /**
+     * Put all the VLSN->LSN mappings in the file mapper parameter into this
+     * one.
+     */
+    public void putAll(FileMapper other) {
+        assert other.fileNumber == fileNumber : "bad file number = " +
+            other.fileNumber;
+        vlsnToFileOffsetMap.putAll(other.vlsnToFileOffsetMap);
+
+        if (lastSyncVLSN.compareTo(other.lastSyncVLSN) < 0) {
+            lastSyncVLSN = other.lastSyncVLSN;
+        }
+
+        if (lastCommitVLSN.compareTo(other.lastCommitVLSN) < 0) {
+            lastCommitVLSN = other.lastCommitVLSN;
+        }
+
+        dirty = true;
+    }
+
+    /* Retrieve the LSN location for this VLSN. */
+    public long getLSN(long vlsn) {
+        return DbLsn.makeLsn(fileNumber, vlsnToFileOffsetMap.get(vlsn));
+    }
+
+    /**
+     * Individual mappings are removed if this VLSN is written more than
+     * once to the log, as might happen on some kind of replay.
+     */
+    public void removeLSN(long vlsn) {
+        vlsnToFileOffsetMap.remove(vlsn);
+        dirty = true;
+    }
+
+    /**
+     * Return the set of VLSNs in this mapper.
+     */
+    public Set<Long> getVLSNs() {
+        return vlsnToFileOffsetMap.keySet();
+    }
+
+    @Override
+    public String toString() {
+        StringBuilder sb = new StringBuilder("<FileMapper fileNumber=");
+        sb.append(fileNumber).append(" ");
+        sb.append(" lastSync=").append(lastSyncVLSN).append(" ");
+        sb.append(" lastCommit=").append(lastCommitVLSN).append(" ");
+        sb.append(vlsnToFileOffsetMap);
+        sb.append("/>");
+        return sb.toString();
+    }
+
+    /**
+     * Marshals a FileMapper to a byte buffer to store in the database.
+     * Doesn't persist the file number, because that's the key of the database.
+     * TODO: use packed numbers for the map in HA release.
+     */
+    private static class FileMapperBinding extends TupleBinding<FileMapper> {
+
+        public FileMapper entryToObject(TupleInput ti) {
+            FileMapper mapper = new FileMapper();
+            mapper.lastSyncVLSN = new VLSN(ti.readPackedLong());
+            mapper.lastCommitVLSN = new VLSN(ti.readPackedLong());
+
+            mapper.vlsnToFileOffsetMap = new HashMap<Long,Long>();
+            int nEntries = ti.readInt();
+            for (int i = 0; i < nEntries; i++) {
+                long vlsnSeq = ti.readLong();
+                long fileOffset = ti.readUnsignedInt();
+                mapper.vlsnToFileOffsetMap.put(vlsnSeq, fileOffset);
+            }
+            return mapper;
+        }
+
+        public void objectToEntry(FileMapper mapper, TupleOutput to) {
+            to.writePackedLong(mapper.lastSyncVLSN.getSequence());
+            to.writePackedLong(mapper.lastCommitVLSN.getSequence());
+
+            int nEntries = mapper.vlsnToFileOffsetMap.size();
+            to.writeInt(nEntries);
+            for (Map.Entry<Long,Long> entry : 
+                     mapper.vlsnToFileOffsetMap.entrySet()) {
+                to.writeLong(entry.getKey());
+                to.writeUnsignedInt(entry.getValue());
+            }
+        }
+    }
+}
+
diff --git a/src/com/sleepycat/je/utilint/HexFormatter.java b/src/com/sleepycat/je/utilint/HexFormatter.java
new file mode 100644
index 0000000000000000000000000000000000000000..436dd88648f3b0e4c1e139a33a22e8771988e515
--- /dev/null
+++ b/src/com/sleepycat/je/utilint/HexFormatter.java
@@ -0,0 +1,19 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: HexFormatter.java,v 1.13.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.je.utilint;
+
+public class HexFormatter {
+    static public String formatLong(long l) {
+	StringBuffer sb = new StringBuffer();
+	sb.append(Long.toHexString(l));
+	sb.insert(0, "0000000000000000".substring(0, 16 - sb.length()));
+	sb.insert(0, "0x");
+	return sb.toString();
+    }
+}
diff --git a/src/com/sleepycat/je/utilint/InternalException.java b/src/com/sleepycat/je/utilint/InternalException.java
new file mode 100644
index 0000000000000000000000000000000000000000..e86b0f847b70f06081379b0fe7e43bdb1fc8e5fd
--- /dev/null
+++ b/src/com/sleepycat/je/utilint/InternalException.java
@@ -0,0 +1,25 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: InternalException.java,v 1.19.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.je.utilint;
+
+import com.sleepycat.je.DatabaseException;
+
+/**
+ * Some internal inconsistency exception.
+ */
+public class InternalException extends DatabaseException {
+
+    public InternalException() {
+	super();
+    }
+
+    public InternalException(String message) {
+	super(message);
+    }
+}
diff --git a/src/com/sleepycat/je/utilint/JarMain.java b/src/com/sleepycat/je/utilint/JarMain.java
new file mode 100644
index 0000000000000000000000000000000000000000..afb36379e6bad87ddcf279a731a8939dc93f110e
--- /dev/null
+++ b/src/com/sleepycat/je/utilint/JarMain.java
@@ -0,0 +1,53 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: JarMain.java,v 1.7.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.je.utilint;
+
+import java.lang.reflect.Method;
+
+/**
+ * Used as the main class for the manifest of the je.jar file, and so it is
+ * executed when running: java -jar je.jar.  The first argument must be the
+ * final part of the class name of a utility in the com.sleepycat.je.util
+ * package, e.g., DbDump.  All following parameters are passed to the main
+ * method of the utility and are processed as usual.
+ *
+ * Apart from the package, this class is ambivalent about the name of the
+ * utility specified; the only requirement is that it must be a public static
+ * class and must contain a public static main method.
+ */
+public class JarMain {
+
+    private static final String USAGE = "usage: java <utility> [options...]";
+    private static final String PREFIX = "com.sleepycat.je.util.";
+
+    public static void main(String[] args) {
+        try {
+            if (args.length < 1) {
+                usage("Missing utility name");
+            }
+            Class<?> cls = Class.forName(PREFIX + args[0]);
+
+            Method mainMethod = cls.getMethod
+                ("main", new Class[] { String[].class });
+
+            String[] mainArgs = new String[args.length - 1];
+            System.arraycopy(args, 1, mainArgs, 0, mainArgs.length);
+
+            mainMethod.invoke(null, new Object[] { mainArgs });
+        } catch (Throwable e) {
+            usage(e.toString());
+        }
+    }
+
+    private static void usage(String msg) {
+        System.err.println(msg);
+	System.err.println(USAGE);
+	System.exit(-1);
+    }
+}
diff --git a/src/com/sleepycat/je/utilint/PropUtil.java b/src/com/sleepycat/je/utilint/PropUtil.java
new file mode 100644
index 0000000000000000000000000000000000000000..5e990b4b80407f579afcdd11e8edc9c50fca0467
--- /dev/null
+++ b/src/com/sleepycat/je/utilint/PropUtil.java
@@ -0,0 +1,85 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PropUtil.java,v 1.26.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.je.utilint;
+
+import java.util.Enumeration;
+import java.util.Properties;
+import java.util.Set;
+
+import com.sleepycat.je.DatabaseException;
+
+/**
+ * Convenience methods for handling JE properties.
+ */
+public class PropUtil {
+
+    /**
+     * @return true if the property is set to "true".
+     */
+    public static boolean getBoolean(Properties props, String propName) {
+        String value = props.getProperty(propName);
+        if ((value != null) && (value.equalsIgnoreCase("true"))) {
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    /**
+     * Validate properties in the property bag. If null was passed, return an
+     * empty property object, else return the original property object.
+     *
+     * @throws DatabaseException if the property bag contains
+     * a property not specified in the set of allowed properties.
+     */
+    @SuppressWarnings("unchecked")
+	public static Properties validateProps(Properties props,
+                                           Set allowedProps,
+                                           String apiMethod)
+        throws DatabaseException {
+
+        if (props == null) {
+            return new Properties();
+        } else {
+            if (props.size() > 0) {
+                Enumeration e = props.propertyNames();
+                while (e.hasMoreElements()) {
+                    String propName = (String) e.nextElement();
+                    validateProp(propName, allowedProps, apiMethod);
+                }
+            }
+            return props;
+        }
+    }
+
+    /**
+     * @throws DatabaseException if the property is not valid.
+     */
+    @SuppressWarnings("unchecked")
+	public static void validateProp(String propName,
+                                    Set allowedProps,
+                                    String apiMethod)
+        throws DatabaseException {
+
+        if (!allowedProps.contains(propName)) {
+            throw new DatabaseException
+		(propName + " is not a valid property for " + apiMethod);
+        }
+    }
+
+    /**
+     * Convert microseconds to milliseconds, ensuring that any microsecond
+     * value greater than zero converts to at least one millisecond to avoid a
+     * zero millisecond result since Object.wait(0) waits forever.
+     */
+    public static long microsToMillis(long micros) {
+        return (micros + 999) / 1000;
+    }
+}
+
diff --git a/src/com/sleepycat/je/utilint/TestHook.java b/src/com/sleepycat/je/utilint/TestHook.java
new file mode 100644
index 0000000000000000000000000000000000000000..146789fd07628cf3b8c95476d6ccef7098d564ee
--- /dev/null
+++ b/src/com/sleepycat/je/utilint/TestHook.java
@@ -0,0 +1,37 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TestHook.java,v 1.11.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.je.utilint;
+
+import java.io.IOException;
+
+/**
+ * TestHook is used induce testing behavior that can't be provoked externally.
+ * For example, unit tests may use hooks to throw IOExceptions, or to cause
+ * waiting behavior.
+ *
+ * To use this, a unit test should extend TestHook with a class that overrides
+ * the desired method. The desired code will have a method that allows the unit
+ * test to specify a hook, and will execute the hook if it is non-null.
+ * This should be done within an assert like so:
+ *
+ *    assert TestHookExecute(myTestHook);
+ *
+ * See Tree.java for examples.
+ */
+public interface TestHook {
+
+    public void hookSetup();
+
+    public void doIOHook()
+	throws IOException;
+
+    public void doHook();
+
+    public Object getHookValue();
+}
diff --git a/src/com/sleepycat/je/utilint/TestHookExecute.java b/src/com/sleepycat/je/utilint/TestHookExecute.java
new file mode 100644
index 0000000000000000000000000000000000000000..fabb6ae32b3e2cf8f33b9345942fd1b729869252
--- /dev/null
+++ b/src/com/sleepycat/je/utilint/TestHookExecute.java
@@ -0,0 +1,31 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TestHookExecute.java,v 1.7.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.je.utilint;
+
+/**
+ * Execute a test hook if set. This wrapper is used so that test hook execution
+ * can be packaged into a single statement that can be done within an assert
+ * statement.
+ */
+public class TestHookExecute {
+
+    public static boolean doHookSetupIfSet(TestHook testHook) {
+        if (testHook != null) {
+            testHook.hookSetup();
+        }
+        return true;
+    }
+
+    public static boolean doHookIfSet(TestHook testHook) {
+        if (testHook != null) {
+            testHook.doHook();
+        }
+        return true;
+    }
+}
diff --git a/src/com/sleepycat/je/utilint/TinyHashSet.java b/src/com/sleepycat/je/utilint/TinyHashSet.java
new file mode 100644
index 0000000000000000000000000000000000000000..58c23f657065606b4669234d53085b9ec0e967d4
--- /dev/null
+++ b/src/com/sleepycat/je/utilint/TinyHashSet.java
@@ -0,0 +1,132 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TinyHashSet.java,v 1.9.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.je.utilint;
+
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+import java.util.Set;
+
+/**
+ * TinyHashSet is used to optimize (for speed, not space) the case where a
+ * HashSet generally holds a single element.  This saves us the cost of
+ * creating the HashSet and related elements as well as call Object.hashCode().
+ *
+ * If single != null, it's the only element in the TinyHashSet.  If set != null
+ * then there are multiple elements in the TinyHashSet.  It should never be
+ * true that (single != null) && (set != null).
+ */
+public class TinyHashSet<T> {
+
+    private Set<T> set;
+    private T single;
+
+    /*
+     * Will return a fuzzy value if the not under synchronized control.
+     */
+    public int size() {
+        if (single != null) {
+            return 1;
+        } else if (set != null) {
+            return set.size();
+        } else {
+            return 0;
+        }
+    }
+
+    public boolean remove(T o) {
+        assert (single == null) || (set == null);
+        if (single != null) {
+            if (single == o ||
+                single.equals(o)) {
+                single = null;
+                return true;
+            } else {
+                return false;
+            }
+        } else if (set != null) {
+            return set.remove(o);
+        } else {
+            return false;
+        }
+    }
+
+    public boolean add(T o) {
+        assert (single == null) || (set == null);
+        if (set != null) {
+            return set.add(o);
+        } else if (single == null) {
+            single = o;
+            return true;
+        } else {
+            set = new HashSet<T>();
+            set.add(single);
+            single = null;
+            return set.add(o);
+        }
+    }
+
+    public Set<T> copy() {
+        assert (single == null) || (set == null);
+        if (set != null) {
+            return new HashSet<T>(set);
+        } else {
+            Set<T> ret = new HashSet<T>();
+            if (single != null) {
+                ret.add(single);
+            }
+            return ret;
+        }
+    }
+
+    public Iterator<T> iterator() {
+        assert (single == null) || (set == null);
+        if (set != null) {
+            return set.iterator();
+        } else {
+            return new SingleElementIterator<T>(single, this);
+        }
+    }
+
+    /*
+     * Iterator that is used to just return one element.
+     */
+    public static class SingleElementIterator<T> implements Iterator<T> {
+        T theObject;
+        TinyHashSet<T> theSet;
+        boolean returnedTheObject = false;
+
+        SingleElementIterator(T o, TinyHashSet<T> theSet) {
+            theObject = o;
+            this.theSet = theSet;
+            returnedTheObject = (o == null);
+        }
+
+        public boolean hasNext() {
+            return !returnedTheObject;
+        }
+
+        public T next() {
+            if (returnedTheObject) {
+                throw new NoSuchElementException();
+            }
+
+            returnedTheObject = true;
+            return theObject;
+        }
+
+        public void remove() {
+            if (theObject == null ||
+                !returnedTheObject) {
+                throw new IllegalStateException();
+            }
+            theSet.remove(theObject);
+        }
+    }
+}
diff --git a/src/com/sleepycat/je/utilint/Tracer.java b/src/com/sleepycat/je/utilint/Tracer.java
new file mode 100644
index 0000000000000000000000000000000000000000..77920287bda05024745dfee210d16607d31a9606
--- /dev/null
+++ b/src/com/sleepycat/je/utilint/Tracer.java
@@ -0,0 +1,254 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Tracer.java,v 1.53.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.je.utilint;
+
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.nio.ByteBuffer;
+import java.sql.Timestamp;
+import java.util.Calendar;
+import java.util.logging.Level;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.config.ConfigParam;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.LogManager;
+import com.sleepycat.je.log.LogUtils;
+import com.sleepycat.je.log.Loggable;
+import com.sleepycat.je.log.ReplicationContext;
+import com.sleepycat.je.log.entry.SingleItemEntry;
+
+/**
+ * The Tracer generates debug messages that are sent to the java.util.Logging
+ * facility. There are three log handlers set up for logging -- the database
+ * log itself, an output file, and stdout (the "console").  By default, only
+ * the database file is enabled.
+ */
+public class Tracer implements Loggable {
+
+    /*
+     * Name pattern for tracing output that's been directed into a log file by
+     * enabling the file handler.
+     */
+    public static final String INFO_FILES = "je.info";
+
+    /*
+     * Contents of a debug message.
+     */
+    private Timestamp time;
+    private String msg;
+
+    /**
+     * Create a new debug record.
+     */
+    public Tracer(String msg) {
+        this.time = getCurrentTimestamp();
+        this.msg = msg;
+    }
+
+    /**
+     * Create trace record that will be filled in from the log.
+     */
+    public Tracer() {
+    }
+
+    /*
+     * Static utility methods for submitting information for logging in the
+     * text log file, the database log, and stdout.
+     */
+
+    /**
+     * Logger method for recording a general message.
+     */
+    public static void trace(Level logLevel,
+                             EnvironmentImpl envImpl,
+                             String msg) {
+        envImpl.getLogger().log(logLevel, msg);
+    }
+
+    /**
+     * Logger method for recording an exception and stacktrace.
+     */
+    public static void trace(EnvironmentImpl envImpl,
+                             String sourceClass,
+                             String sourceMethod,
+                             String msg,
+                             Throwable t) {
+
+        /*
+         * Give it to the Logger, which will funnel it to stdout and/or the
+         * text file and/or the database log file
+         */
+        envImpl.getLogger().logp(Level.SEVERE,
+				 sourceClass,
+				 sourceMethod,
+				 msg + "\n" + Tracer.getStackTrace(t));
+    }
+
+    /**
+     * Parse a logging level config parameter, and return a more explanatory
+     * error message if it doesn't parse.
+     */
+    public static Level parseLevel(EnvironmentImpl envImpl,
+                                   ConfigParam configParam)
+        throws DatabaseException {
+
+        Level level = null;
+        try {
+            String levelVal = envImpl.getConfigManager().get(configParam);
+            level = Level.parse(levelVal);
+        } catch (IllegalArgumentException e) {
+            throw new DatabaseException("Problem parsing parameter " +
+					configParam.getName() +
+					": " + e.getMessage(), e);
+        }
+        return level;
+    }
+
+    /*
+     * Helpers
+     */
+    public String getMessage() {
+        return msg;
+    }
+
+    /**
+     * @return a timestamp for "now"
+     */
+    private Timestamp getCurrentTimestamp() {
+        Calendar cal = Calendar.getInstance();
+        return new Timestamp(cal.getTime().getTime());
+    }
+
+    /**
+     * @return the stacktrace for an exception
+     */
+    public static String getStackTrace(Throwable t) {
+        StringWriter s = new StringWriter();
+        t.printStackTrace(new PrintWriter(s));
+        String stackTrace = s.toString();
+        stackTrace = stackTrace.replaceAll("<", "&lt;");
+        stackTrace = stackTrace.replaceAll(">", "&gt;");
+        return stackTrace;
+    }
+
+    /*
+     * Logging support
+     */
+
+    /**
+     * Convenience method to create a log entry containing this trace msg.
+     */
+    public long log(LogManager logManager)
+        throws DatabaseException {
+
+        return logManager.log(new SingleItemEntry(LogEntryType.LOG_TRACE,
+                                                  this),
+                              ReplicationContext.NO_REPLICATE);
+    }
+
+    /**
+     * @see Loggable#getLogSize()
+     */
+    public int getLogSize() {
+        return (LogUtils.getTimestampLogSize(time) +
+                LogUtils.getStringLogSize(msg));
+    }
+
+    /**
+     * @see Loggable#writeToLog
+     */
+    public void writeToLog(ByteBuffer logBuffer) {
+        /* Load the header. */
+        LogUtils.writeTimestamp(logBuffer, time);
+        LogUtils.writeString(logBuffer, msg);
+    }
+
+    /**
+     * @see Loggable#readFromLog
+     */
+    public void readFromLog(ByteBuffer itemBuffer, byte entryVersion) {
+        /* See how many we want to read direct. */
+        boolean unpacked = (entryVersion < 6);
+        time = LogUtils.readTimestamp(itemBuffer, unpacked);
+        msg = LogUtils.readString(itemBuffer, unpacked);
+    }
+
+    /**
+     * @see Loggable#dumpLog
+     */
+    public void dumpLog(StringBuffer sb, boolean verbose) {
+        sb.append("<Dbg time=\"");
+        sb.append(time);
+        sb.append("\">");
+        sb.append("<msg val=\"");
+        sb.append(msg);
+        sb.append("\"/>");
+        sb.append("</Dbg>");
+    }
+
+    /**
+     * @see Loggable#getTransactionId
+     */
+    public long getTransactionId() {
+	return 0;
+    }
+
+    /**
+     * @see Loggable#logicalEquals
+     */
+    public boolean logicalEquals(Loggable other) {
+
+        if (!(other instanceof Tracer))
+            return false;
+
+        return msg.equals(((Tracer) other).msg);
+    }
+
+    @Override
+    public String toString() {
+        return (time + "/" + msg);
+    }
+
+    /**
+     * For unit tests.
+     */
+
+    /**
+     *  Just in case it's ever used as a hash key.
+     */
+    @Override
+    public int hashCode() {
+        return toString().hashCode();
+    }
+
+    /**
+     * Override Object.equals
+     */
+    @Override
+    public boolean equals(Object obj) {
+        /* Same instance? */
+        if (this == obj) {
+            return true;
+        }
+
+        /* Is it another Tracer? */
+        if (!(obj instanceof Tracer)) {
+            return false;
+        }
+
+        /*
+	 * We could compare all the fields individually, but since they're all
+	 * placed in our toString() method, we can just compare the String
+	 * version of each offer.
+	 */
+        return (toString().equals(obj.toString()));
+    }
+}
diff --git a/src/com/sleepycat/je/utilint/TracerFormatter.java b/src/com/sleepycat/je/utilint/TracerFormatter.java
new file mode 100644
index 0000000000000000000000000000000000000000..8ffed0f39ac846d6e3e05a8ee577f6f730c84e3b
--- /dev/null
+++ b/src/com/sleepycat/je/utilint/TracerFormatter.java
@@ -0,0 +1,73 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TracerFormatter.java,v 1.5.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.je.utilint;
+
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.logging.Formatter;
+import java.util.logging.LogRecord;
+
+/**
+ * Formatter for debug log file output.
+ */
+public class TracerFormatter extends Formatter {
+
+    private Date date = new Date();
+    private DateFormat formatter;
+
+    public TracerFormatter() {
+        date = new Date();
+        formatter = makeDateFormat();
+    }
+
+    /* The date and formatter are not thread safe. */
+    private synchronized String getDate(long millis) {
+	date.setTime(millis);
+        return formatter.format(date);
+    }
+
+    /**
+     * Format the log record in this form:
+     *   <short date> <short time> <message level> <message>
+     * @param record the log record to be formatted.
+     * @return a formatted log record
+     */
+    public String format(LogRecord record) {
+	StringBuilder sb = new StringBuilder();
+
+        String dateVal = getDate(record.getMillis());
+        sb.append(dateVal);
+	sb.append(" ");
+	sb.append(record.getLevel().getLocalizedName());
+	sb.append(" ");
+        sb.append(formatMessage(record));
+        sb.append("\n");
+
+	if (record.getThrown() != null) {
+	    try {
+	        StringWriter sw = new StringWriter();
+	        PrintWriter pw = new PrintWriter(sw);
+	        record.getThrown().printStackTrace(pw);
+	        pw.close();
+		sb.append(sw.toString());
+	    } catch (Exception ex) {
+                /* Ignored. */
+	    }
+	}
+	return sb.toString();
+    }
+
+    /* For unit test support */
+    public static DateFormat makeDateFormat() {
+        return new SimpleDateFormat("yyyy-MM-dd HH:mm:ss:SSS:z");
+    }
+}
diff --git a/src/com/sleepycat/je/utilint/VLSN.java b/src/com/sleepycat/je/utilint/VLSN.java
new file mode 100644
index 0000000000000000000000000000000000000000..c188c1992e8b2529fa85097dbe2e08123c938f7c
--- /dev/null
+++ b/src/com/sleepycat/je/utilint/VLSN.java
@@ -0,0 +1,184 @@
+ /*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: VLSN.java,v 1.23.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+package com.sleepycat.je.utilint;
+
+import java.nio.ByteBuffer;
+
+import com.sleepycat.je.log.LogException;
+import com.sleepycat.je.log.Loggable;
+
+public class VLSN implements Loggable, Comparable<VLSN> {
+
+    public static final int LOG_SIZE = 8;
+
+    public static final VLSN NULL_VLSN = new VLSN(-1);
+    public static final VLSN FIRST_VLSN = new VLSN(1);
+
+    /*
+     * A replicated log entry is identified by a sequence id. We may change the
+     * VLSN implementation so it's not a first-class object, in order to reduce
+     * its in-memory footprint. In that case, the VLSN value would be a long,
+     * and this class would provide static utility methods.
+     */
+    private long sequence;   // sequence number
+
+    public VLSN(long sequence) {
+        this.sequence = sequence;
+    }
+
+    /**
+     * Constructor for VLSNs that are read from disk.
+     */
+    public VLSN() {
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (obj == null) {
+            return false;
+        }
+
+        if (!(obj instanceof VLSN)) {
+            return false;
+        }
+
+        VLSN otherVLSN = (VLSN) obj;
+        return (otherVLSN.sequence == sequence);
+    }
+
+    @Override
+    public int hashCode() {
+        return Long.valueOf(sequence).hashCode();
+    }
+
+    public long getSequence() {
+        return sequence;
+    }
+
+    /**
+     * Return a VLSN which would follow this one.
+     */
+    public VLSN getNext() {
+        if (this.equals(NULL_VLSN)) {
+            return FIRST_VLSN;
+        } else {
+            return new VLSN(sequence + 1);
+        }
+    }
+
+    /**
+     * Return a VLSN which would precede this one.
+     */
+    public VLSN getPrev() {
+        if (this.equals(NULL_VLSN)) {
+            return NULL_VLSN;
+        } else {
+            return new VLSN(sequence - 1);
+        }
+    }
+
+    /**
+     * Return true if this VLSN's sequence directly follows the "other"
+     * VLSN. This handles the case where "other" is a NULL_VLSN.
+     */
+    public boolean follows(VLSN other) {
+        return (((other == NULL_VLSN) && sequence == 1) ||
+                ((other != NULL_VLSN) &&
+                 (other.getSequence() == (sequence - 1))));
+    }
+
+    /**
+     * Compares this VLSN's sequence with the specified VLSN's sequence for
+     * order. Returns a negative integer, zero, or a positive integer as this
+     * sequence is less than, equal to, or greater than the specified sequence.
+     */
+    public int compareTo(VLSN other) {
+
+        if (this == NULL_VLSN &&
+            other == NULL_VLSN) {
+            return 0;
+        }
+
+        if (this == NULL_VLSN) {
+            /* If "this" is null, the other VLSN is always greater. */
+            return -1;
+        }
+
+        if (other == NULL_VLSN) {
+            /* If the "other" is null, this VLSN is always greater. */
+            return 1;
+        } else {
+            long otherSequence = other.getSequence();
+            if ((sequence - otherSequence) > 0) {
+                return 1;
+            } else if (sequence == otherSequence) {
+                return 0;
+            } else {
+                return -1;
+            }
+        }
+    }
+
+    /**
+     * @see Loggable#getLogSize
+     */
+    public int getLogSize() {
+        return LOG_SIZE;
+    }
+
+    /**
+     * @see Loggable#writeToLog
+     */
+    public void writeToLog(ByteBuffer buffer) {
+        buffer.putLong(sequence);
+    }
+
+    /*
+     *  Reading from a byte buffer
+     */
+
+    /**
+     * @see Loggable#readFromLog
+     */
+    public void readFromLog(ByteBuffer buffer, byte entryVersion)
+        throws LogException {
+        sequence = buffer.getLong();
+    }
+
+    /**
+     * @see Loggable#dumpLog
+     */
+    public void dumpLog(StringBuffer sb, boolean verbose) {
+        sb.append("<vlsn v=\"").append(this).append("\">");
+    }
+
+    /**
+     * @see Loggable#getTransactionId
+     */
+    public long getTransactionId() {
+        return 0;
+    }
+
+    /**
+     * @see Loggable#logicalEquals
+     */
+    public boolean logicalEquals(Loggable other) {
+
+        if (!(other instanceof VLSN))
+            return false;
+
+        return sequence == ((VLSN) other).sequence;
+    }
+
+    @Override
+    public String toString() {
+        StringBuffer sb = new StringBuffer();
+        sb.append(sequence);
+        return sb.toString();
+    }
+}
diff --git a/src/com/sleepycat/persist/BasicCursor.java b/src/com/sleepycat/persist/BasicCursor.java
new file mode 100644
index 0000000000000000000000000000000000000000..fb25c3849796aaefc4604471c148b9f683bb0af3
--- /dev/null
+++ b/src/com/sleepycat/persist/BasicCursor.java
@@ -0,0 +1,237 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: BasicCursor.java,v 1.12.2.3 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist;
+
+import java.util.Iterator;
+
+/* <!-- begin JE only --> */
+import com.sleepycat.je.CacheMode;
+/* <!-- end JE only --> */
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.util.keyrange.RangeCursor;
+
+/**
+ * Implements EntityCursor and uses a ValueAdapter so that it can enumerate
+ * either keys or entities.
+ *
+ * @author Mark Hayes
+ */
+class BasicCursor<V> implements EntityCursor<V> {
+
+    RangeCursor cursor;
+    ValueAdapter<V> adapter;
+    boolean updateAllowed;
+    DatabaseEntry key;
+    DatabaseEntry pkey;
+    DatabaseEntry data;
+
+    BasicCursor(RangeCursor cursor,
+                ValueAdapter<V> adapter,
+                boolean updateAllowed) {
+        this.cursor = cursor;
+        this.adapter = adapter;
+        this.updateAllowed = updateAllowed;
+        key = adapter.initKey();
+        pkey = adapter.initPKey();
+        data = adapter.initData();
+    }
+
+    public V first()
+        throws DatabaseException {
+
+        return first(null);
+    }
+
+    public V first(LockMode lockMode)
+        throws DatabaseException {
+
+        return returnValue(cursor.getFirst(key, pkey, data, lockMode));
+    }
+
+    public V last()
+        throws DatabaseException {
+
+        return last(null);
+    }
+
+    public V last(LockMode lockMode)
+        throws DatabaseException {
+
+        return returnValue(cursor.getLast(key, pkey, data, lockMode));
+    }
+
+    public V next()
+        throws DatabaseException {
+
+        return next(null);
+    }
+
+    public V next(LockMode lockMode)
+        throws DatabaseException {
+
+        return returnValue(cursor.getNext(key, pkey, data, lockMode));
+    }
+
+    public V nextDup()
+        throws DatabaseException {
+
+        return nextDup(null);
+    }
+
+    public V nextDup(LockMode lockMode)
+        throws DatabaseException {
+
+        checkInitialized();
+        return returnValue(cursor.getNextDup(key, pkey, data, lockMode));
+    }
+
+    public V nextNoDup()
+        throws DatabaseException {
+
+        return nextNoDup(null);
+    }
+
+    public V nextNoDup(LockMode lockMode)
+        throws DatabaseException {
+
+        return returnValue(cursor.getNextNoDup(key, pkey, data, lockMode));
+    }
+
+    public V prev()
+        throws DatabaseException {
+
+        return prev(null);
+    }
+
+    public V prev(LockMode lockMode)
+        throws DatabaseException {
+
+        return returnValue(cursor.getPrev(key, pkey, data, lockMode));
+    }
+
+    public V prevDup()
+        throws DatabaseException {
+
+        return prevDup(null);
+    }
+
+    public V prevDup(LockMode lockMode)
+        throws DatabaseException {
+
+        checkInitialized();
+        return returnValue(cursor.getPrevDup(key, pkey, data, lockMode));
+    }
+
+    public V prevNoDup()
+        throws DatabaseException {
+
+        return prevNoDup(null);
+    }
+
+    public V prevNoDup(LockMode lockMode)
+        throws DatabaseException {
+
+        return returnValue(cursor.getPrevNoDup(key, pkey, data, lockMode));
+    }
+
+    public V current()
+        throws DatabaseException {
+
+        return current(null);
+    }
+
+    public V current(LockMode lockMode)
+        throws DatabaseException {
+
+        checkInitialized();
+        return returnValue(cursor.getCurrent(key, pkey, data, lockMode));
+    }
+
+    public int count()
+        throws DatabaseException {
+
+        checkInitialized();
+        return cursor.count();
+    }
+
+    public Iterator<V> iterator() {
+        return iterator(null);
+    }
+
+    public Iterator<V> iterator(LockMode lockMode) {
+        return new BasicIterator(this, lockMode);
+    }
+
+    public boolean update(V entity)
+        throws DatabaseException {
+
+        if (!updateAllowed) {
+            throw new UnsupportedOperationException
+                ("Update not allowed on a secondary index");
+        }
+        checkInitialized();
+        adapter.valueToData(entity, data);
+        return cursor.putCurrent(data) == OperationStatus.SUCCESS;
+    }
+
+    public boolean delete()
+        throws DatabaseException {
+
+        checkInitialized();
+        return cursor.delete() == OperationStatus.SUCCESS;
+    }
+
+    public EntityCursor<V> dup()
+        throws DatabaseException {
+
+        return new BasicCursor<V>(cursor.dup(true), adapter, updateAllowed);
+    }
+
+    public void close()
+        throws DatabaseException {
+
+        cursor.close();
+    }
+
+    /* <!-- begin JE only --> */
+    public void setCacheMode(CacheMode cacheMode) {
+        cursor.getCursor().setCacheMode(cacheMode);
+    }
+    /* <!-- end JE only --> */
+
+    /* <!-- begin JE only --> */
+    public CacheMode getCacheMode() {
+        return cursor.getCursor().getCacheMode();
+    }
+    /* <!-- end JE only --> */
+
+    void checkInitialized()
+        throws IllegalStateException {
+
+        if (!cursor.isInitialized()) {
+            throw new IllegalStateException
+                ("Cursor is not initialized at a valid position");
+        }
+    }
+
+    V returnValue(OperationStatus status) {
+        V value;
+        if (status == OperationStatus.SUCCESS) {
+            value = adapter.entryToValue(key, pkey, data);
+        } else {
+            value = null;
+        }
+        /* Clear entries to save memory. */
+        adapter.clearEntries(key, pkey, data);
+        return value;
+    }
+}
diff --git a/src/com/sleepycat/persist/BasicIndex.java b/src/com/sleepycat/persist/BasicIndex.java
new file mode 100644
index 0000000000000000000000000000000000000000..6e78fc412125cbba157a77da3f32ac8dca2b0fb6
--- /dev/null
+++ b/src/com/sleepycat/persist/BasicIndex.java
@@ -0,0 +1,270 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: BasicIndex.java,v 1.14.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.CursorConfig;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.util.keyrange.KeyRange;
+import com.sleepycat.util.keyrange.RangeCursor;
+
+/**
+ * Implements EntityIndex using a ValueAdapter.  This class is abstract and
+ * does not implement get()/map()/sortedMap() because it doesn't have access
+ * to the entity binding.
+ *
+ * @author Mark Hayes
+ */
+abstract class BasicIndex<K,E> implements EntityIndex<K,E> {
+
+    static final DatabaseEntry NO_RETURN_ENTRY;
+    static {
+        NO_RETURN_ENTRY = new DatabaseEntry();
+        NO_RETURN_ENTRY.setPartial(0, 0, true);
+    }
+
+    Database db;
+    boolean transactional;
+    boolean sortedDups;
+    boolean locking;
+    Class<K> keyClass;
+    EntryBinding keyBinding;
+    KeyRange emptyRange;
+    ValueAdapter<K> keyAdapter;
+    ValueAdapter<E> entityAdapter;
+
+    BasicIndex(Database db,
+               Class<K> keyClass,
+               EntryBinding keyBinding,
+               ValueAdapter<E> entityAdapter)
+        throws DatabaseException {
+
+        this.db = db;
+        DatabaseConfig config = db.getConfig();
+        transactional = config.getTransactional();
+        sortedDups = config.getSortedDuplicates();
+        locking =
+            DbCompat.getInitializeLocking(db.getEnvironment().getConfig());
+
+        this.keyClass = keyClass;
+        this.keyBinding = keyBinding;
+        this.entityAdapter = entityAdapter;
+
+        emptyRange = new KeyRange(config.getBtreeComparator());
+        keyAdapter = new KeyValueAdapter(keyClass, keyBinding);
+    }
+
+    /*
+     * Of the EntityIndex methods only get()/map()/sortedMap() are not
+     * implemented here and therefore must be implemented by subclasses.
+     */
+
+    public boolean contains(K key)
+        throws DatabaseException {
+
+        return contains(null, key, null);
+    }
+
+    public boolean contains(Transaction txn, K key, LockMode lockMode)
+        throws DatabaseException {
+
+        DatabaseEntry keyEntry = new DatabaseEntry();
+        DatabaseEntry dataEntry = NO_RETURN_ENTRY;
+        keyBinding.objectToEntry(key, keyEntry);
+
+        OperationStatus status = db.get(txn, keyEntry, dataEntry, lockMode);
+        return (status == OperationStatus.SUCCESS);
+    }
+
+    public long count()
+        throws DatabaseException {
+
+        if (DbCompat.DATABASE_COUNT) {
+            return DbCompat.getDatabaseCount(db);
+        } else {
+            long count = 0;
+            DatabaseEntry key = NO_RETURN_ENTRY;
+            DatabaseEntry data = NO_RETURN_ENTRY;
+            CursorConfig cursorConfig = locking ?
+                CursorConfig.READ_UNCOMMITTED : null;
+            Cursor cursor = db.openCursor(null, cursorConfig);
+            try {
+                OperationStatus status = cursor.getFirst(key, data, null);
+                while (status == OperationStatus.SUCCESS) {
+                    if (sortedDups) {
+                        count += cursor.count();
+                    } else {
+                        count += 1;
+                    }
+                    status = cursor.getNextNoDup(key, data, null);
+                }
+            } finally {
+                cursor.close();
+            }
+            return count;
+        }
+    }
+
+    public boolean delete(K key)
+        throws DatabaseException {
+
+        return delete(null, key);
+    }
+
+    public boolean delete(Transaction txn, K key)
+        throws DatabaseException {
+
+        DatabaseEntry keyEntry = new DatabaseEntry();
+        keyBinding.objectToEntry(key, keyEntry);
+
+        OperationStatus status = db.delete(txn, keyEntry);
+        return (status == OperationStatus.SUCCESS);
+    }
+
+    public EntityCursor<K> keys()
+        throws DatabaseException {
+
+        return keys(null, null);
+    }
+
+    public EntityCursor<K> keys(Transaction txn, CursorConfig config)
+        throws DatabaseException {
+
+        return cursor(txn, emptyRange, keyAdapter, config);
+    }
+
+    public EntityCursor<E> entities()
+        throws DatabaseException {
+
+        return cursor(null, emptyRange, entityAdapter, null);
+    }
+
+    public EntityCursor<E> entities(Transaction txn,
+                                    CursorConfig config)
+        throws DatabaseException {
+
+        return cursor(txn, emptyRange, entityAdapter, config);
+    }
+
+    public EntityCursor<K> keys(K fromKey, boolean fromInclusive,
+                                K toKey, boolean toInclusive)
+        throws DatabaseException {
+
+        return cursor(null, fromKey, fromInclusive, toKey, toInclusive,
+                      keyAdapter, null);
+    }
+
+    public EntityCursor<K> keys(Transaction txn,
+                                K fromKey,
+				boolean fromInclusive,
+                                K toKey,
+				boolean toInclusive,
+                                CursorConfig config)
+        throws DatabaseException {
+
+        return cursor(txn, fromKey, fromInclusive, toKey, toInclusive,
+                      keyAdapter, config);
+    }
+
+    public EntityCursor<E> entities(K fromKey, boolean fromInclusive,
+                                    K toKey, boolean toInclusive)
+        throws DatabaseException {
+
+        return cursor(null, fromKey, fromInclusive, toKey, toInclusive,
+                      entityAdapter, null);
+    }
+
+    public EntityCursor<E> entities(Transaction txn,
+                                    K fromKey,
+				    boolean fromInclusive,
+                                    K toKey,
+				    boolean toInclusive,
+                                    CursorConfig config)
+        throws DatabaseException {
+
+        return cursor(txn, fromKey, fromInclusive, toKey, toInclusive,
+                      entityAdapter, config);
+    }
+
+    /*
+    public ForwardCursor<K> unsortedKeys(KeySelector<K> selector)
+        throws DatabaseException {
+
+        return unsortedKeys(null, selector, null);
+    }
+
+    public ForwardCursor<K> unsortedKeys(Transaction txn,
+                                         KeySelector<K> selector,
+                                         CursorConfig config)
+        throws DatabaseException {
+
+        throw new UnsupportedOperationException();
+    }
+
+    public ForwardCursor<E> unsortedEntities(KeySelector<K> selector)
+        throws DatabaseException {
+
+        return unsortedEntities(null, selector, null);
+    }
+
+    public ForwardCursor<E> unsortedEntities(Transaction txn,
+                                             KeySelector<K> selector,
+                                             CursorConfig config)
+        throws DatabaseException {
+
+        throw new UnsupportedOperationException();
+    }
+    */
+
+    private <V> EntityCursor<V> cursor(Transaction txn,
+                                       K fromKey,
+				       boolean fromInclusive,
+                                       K toKey,
+				       boolean toInclusive,
+                                       ValueAdapter<V> adapter,
+                                       CursorConfig config)
+        throws DatabaseException {
+
+        DatabaseEntry fromEntry = null;
+        if (fromKey != null) {
+            fromEntry = new DatabaseEntry();
+            keyBinding.objectToEntry(fromKey, fromEntry);
+        }
+        DatabaseEntry toEntry = null;
+        if (toKey != null) {
+            toEntry = new DatabaseEntry();
+            keyBinding.objectToEntry(toKey, toEntry);
+        }
+        KeyRange range = emptyRange.subRange
+            (fromEntry, fromInclusive, toEntry, toInclusive);
+        return cursor(txn, range, adapter, config);
+    }
+
+    private <V> EntityCursor<V> cursor(Transaction txn,
+                                       KeyRange range,
+                                       ValueAdapter<V> adapter,
+                                       CursorConfig config)
+        throws DatabaseException {
+
+        Cursor cursor = db.openCursor(txn, config);
+        RangeCursor rangeCursor =
+            new RangeCursor(range, null/*pkRange*/, sortedDups, cursor);
+        return new BasicCursor<V>(rangeCursor, adapter, isUpdateAllowed());
+    }
+
+    abstract boolean isUpdateAllowed();
+}
diff --git a/src/com/sleepycat/persist/BasicIterator.java b/src/com/sleepycat/persist/BasicIterator.java
new file mode 100644
index 0000000000000000000000000000000000000000..245102c77d5d57f1ca92eb7a7dcda3bc265b125b
--- /dev/null
+++ b/src/com/sleepycat/persist/BasicIterator.java
@@ -0,0 +1,83 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: BasicIterator.java,v 1.9.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist;
+
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.util.RuntimeExceptionWrapper;
+
+/**
+ * Implements Iterator for an arbitrary EntityCursor.
+ *
+ * @author Mark Hayes
+ */
+class BasicIterator<V> implements Iterator<V> {
+
+    private EntityCursor<V> entityCursor;
+    private ForwardCursor<V> forwardCursor;
+    private LockMode lockMode;
+    private V nextValue;
+
+    /**
+     * An EntityCursor is given and the remove() method is supported.
+     */
+    BasicIterator(EntityCursor<V> entityCursor, LockMode lockMode) {
+        this.entityCursor = entityCursor;
+        this.forwardCursor = entityCursor;
+        this.lockMode = lockMode;
+    }
+
+    /**
+     * A ForwardCursor is given and the remove() method is not supported.
+     */
+    BasicIterator(ForwardCursor<V> forwardCursor, LockMode lockMode) {
+        this.forwardCursor = forwardCursor;
+        this.lockMode = lockMode;
+    }
+
+    public boolean hasNext() {
+        if (nextValue == null) {
+            try {
+                nextValue = forwardCursor.next(lockMode);
+            } catch (DatabaseException e) {
+                throw new RuntimeExceptionWrapper(e);
+            }
+            return nextValue != null;
+        } else {
+            return true;
+        }
+    }
+
+    public V next() {
+        if (hasNext()) {
+            V v = nextValue;
+            nextValue = null;
+            return v;
+        } else {
+            throw new NoSuchElementException();
+        }
+    }
+
+    public void remove() {
+        if (entityCursor == null) {
+            throw new UnsupportedOperationException();
+        }
+        try {
+            if (!entityCursor.delete()) {
+                throw new IllegalStateException
+                    ("Record at cursor position is already deleted");
+            }
+        } catch (DatabaseException e) {
+            throw new RuntimeExceptionWrapper(e);
+        }
+    }
+}
diff --git a/src/com/sleepycat/persist/DataValueAdapter.java b/src/com/sleepycat/persist/DataValueAdapter.java
new file mode 100644
index 0000000000000000000000000000000000000000..6a6c9c281a8855a0c34ff57b95b0c6cff5c2adce
--- /dev/null
+++ b/src/com/sleepycat/persist/DataValueAdapter.java
@@ -0,0 +1,57 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DataValueAdapter.java,v 1.9.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * A ValueAdapter where the "value" is the data, although the data in this case
+ * is the primary key in a KeysIndex.
+ *
+ * @author Mark Hayes
+ */
+class DataValueAdapter<V> implements ValueAdapter<V> {
+
+    private EntryBinding dataBinding;
+
+    DataValueAdapter(Class<V> keyClass, EntryBinding dataBinding) {
+        this.dataBinding = dataBinding;
+    }
+
+    public DatabaseEntry initKey() {
+        return new DatabaseEntry();
+    }
+
+    public DatabaseEntry initPKey() {
+        return null;
+    }
+
+    public DatabaseEntry initData() {
+        return new DatabaseEntry();
+    }
+
+    public void clearEntries(DatabaseEntry key,
+                             DatabaseEntry pkey,
+                             DatabaseEntry data) {
+        key.setData(null);
+        data.setData(null);
+    }
+
+    public V entryToValue(DatabaseEntry key,
+                          DatabaseEntry pkey,
+                          DatabaseEntry data) {
+        return (V) dataBinding.entryToObject(data);
+    }
+
+    public void valueToData(V value, DatabaseEntry data) {
+        throw new UnsupportedOperationException
+            ("Cannot change the data in a key-only index");
+    }
+}
diff --git a/src/com/sleepycat/persist/DatabaseNamer.java b/src/com/sleepycat/persist/DatabaseNamer.java
new file mode 100644
index 0000000000000000000000000000000000000000..73de9ac43087e8fe1df70fb62d5343844229095c
--- /dev/null
+++ b/src/com/sleepycat/persist/DatabaseNamer.java
@@ -0,0 +1,111 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DatabaseNamer.java,v 1.4.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist;
+
+import com.sleepycat.je.Database; // for javadoc
+
+/**
+ * <!-- begin JE only -->
+ * @hidden
+ * <!-- end JE only -->
+ * Determines the file names to use for primary and secondary databases.
+ *
+ * <p>Each {@link PrimaryIndex} and {@link SecondaryIndex} is represented
+ * internally as a Berkeley DB {@link Database}.  The file names of primary and
+ * secondary indices must be unique within the environment, so that each index
+ * is stored in a separate database file.</p>
+ *
+ * <p>By default, the file names of primary and secondary databases are
+ * defined as follows.</p>
+ *
+ * <p>The syntax of a primary index database file name is:</p>
+ * <pre>   STORE_NAME-ENTITY_CLASS</pre>
+ * <p>Where STORE_NAME is the name parameter passed to {@link
+ * EntityStore#EntityStore EntityStore} and ENTITY_CLASS is name of the class
+ * passed to {@link EntityStore#getPrimaryIndex getPrimaryIndex}.</p>
+ *
+ * <p>The syntax of a secondary index database file name is:</p>
+ * <pre>   STORE_NAME-ENTITY_CLASS-KEY_NAME</pre>
+ * <p>Where KEY_NAME is the secondary key name passed to {@link
+ * EntityStore#getSecondaryIndex getSecondaryIndex}.</p>
+ *
+ * <p>The default naming described above is implemented by the built-in {@link
+ * DatabaseNamer#DEFAULT} object.  An application may supply a custom {@link
+ * DatabaseNamer} to overrride the default naming scheme.  For example, a
+ * custom namer could place all database files in a subdirectory with the name
+ * of the store.  A custom namer could also be used to name files according to
+ * specific file system restrictions.</p>
+ *
+ * <p>The custom namer object must be an instance of the {@code DatabaseNamer}
+ * interface and is configured using {@link StoreConfig#setDatabaseNamer
+ * setDatabaseNamer}.</p>
+ *
+ * <p>When copying or removing all databases in a store, there is one further
+ * consideration.  There are two internal databases that must be kept with the
+ * other databases in the store in order for the store to be used.  These
+ * contain the data formats and sequences for the store.  Their entity class
+ * names are:</p>
+ *
+ * <pre>   com.sleepycat.persist.formats</pre>
+ * <pre>   com.sleepycat.persist.sequences</pre>
+ *
+ * <p>With default database naming, databases with the following names will be
+ * present each store.</p>
+ *
+ * <pre>   STORE_NAME-com.sleepycat.persist.formats</pre>
+ * <pre>   STORE_NAME-com.sleepycat.persist.sequences</pre>
+ *
+ * <p>These databases must normally be included with copies of other databases
+ * in the store.  They should not be modified by the application.</p>
+ */
+public interface DatabaseNamer {
+
+    /**
+     * Returns the name of the file to be used to store the dataabase for the
+     * given store, entity class and key.  This method may not return null.
+     *
+     * @param storeName the name of the {@link EntityStore}.
+     *
+     * @param entityClassName the complete name of the entity class for a
+     * primary or secondary index.
+     *
+     * @param keyName the key name identifying a secondary index, or null for
+     * a primary index.
+     */
+    public String getFileName(String storeName,
+                              String entityClassName,
+                              String keyName);
+
+    /**
+     * The default database namer.
+     *
+     * <p>The {@link #getFileName getFileName} method of this namer returns the
+     * {@code storeName}, {@code entityClassName} and {@code keyName}
+     * parameters as follows:<p>
+     *
+     * <pre class="code">
+     * if (keyName != null) {
+     *     return storeName + '-' + entityClassName + '-' + keyName;
+     * } else {
+     *     return storeName + '-' + entityClassName;
+     * }</pre>
+     */
+    public static final DatabaseNamer DEFAULT = new DatabaseNamer() {
+
+        public String getFileName(String storeName,
+                                  String entityClassName,
+                                  String keyName) {
+            if (keyName != null) {
+                return storeName + '-' + entityClassName + '-' + keyName;
+            } else {
+                return storeName + '-' + entityClassName;
+            }
+        }
+    };
+}
diff --git a/src/com/sleepycat/persist/EntityCursor.java b/src/com/sleepycat/persist/EntityCursor.java
new file mode 100644
index 0000000000000000000000000000000000000000..30b3a2a5ed62255ca35f29cf51c14e17274bdf31
--- /dev/null
+++ b/src/com/sleepycat/persist/EntityCursor.java
@@ -0,0 +1,637 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EntityCursor.java,v 1.12.2.3 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist;
+
+import java.util.Iterator;
+
+/* <!-- begin JE only --> */
+import com.sleepycat.je.CacheMode;
+/* <!-- end JE only --> */
+import com.sleepycat.je.CursorConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.persist.model.Relationship;
+import com.sleepycat.persist.model.SecondaryKey;
+
+/**
+ * Traverses entity values or key values and allows deleting or updating the
+ * entity at the current cursor position.  The value type (V) is either an
+ * entity class or a key class, depending on how the cursor was opened.
+ *
+ * <p>{@code EntityCursor} objects are <em>not</em> thread-safe.  Cursors
+ * should be opened, used and closed by a single thread.</p>
+ *
+ * <p>Cursors are opened using the {@link EntityIndex#keys} and {@link
+ * EntityIndex#entities} family of methods.  These methods are available for
+ * objects of any class that implements {@link EntityIndex}: {@link
+ * PrimaryIndex}, {@link SecondaryIndex}, and the indices returned by {@link
+ * SecondaryIndex#keysIndex} and {@link SecondaryIndex#subIndex}.  A {@link
+ * ForwardCursor}, which implements a subset of cursor operations, is also
+ * available via the {@link EntityJoin#keys} and {@link EntityJoin#entities}
+ * methods.</p>
+ *
+ * <p>Values are always returned by a cursor in key order, where the key is
+ * defined by the underlying {@link EntityIndex}.  For example, a cursor on a
+ * {@link SecondaryIndex} returns values ordered by secondary key, while an
+ * index on a {@link PrimaryIndex} or a {@link SecondaryIndex#subIndex} returns
+ * values ordered by primary key.</p>
+ *
+ * <p><em>WARNING:</em> Cursors must always be closed to prevent resource leaks
+ * which could lead to the index becoming unusable or cause an
+ * <code>OutOfMemoryError</code>.  To ensure that a cursor is closed in the
+ * face of exceptions, call {@link #close} in a finally block.  For example,
+ * the following code traverses all Employee entities and closes the cursor
+ * whether or not an exception occurs:</p>
+ *
+ * <pre class="code">
+ * {@literal @Entity}
+ * class Employee {
+ *
+ *     {@literal @PrimaryKey}
+ *     long id;
+ *
+ *     {@literal @SecondaryKey(relate=MANY_TO_ONE)}
+ *     String department;
+ *
+ *     String name;
+ *
+ *     private Employee() {}
+ * }
+ *
+ * EntityStore store = ...
+ *
+ * {@code PrimaryIndex<Long,Employee>} primaryIndex =
+ *     store.getPrimaryIndex(Long.class, Employee.class);
+ *
+ * {@code EntityCursor<Employee>} cursor = primaryIndex.entities();
+ * try {
+ *     for (Employee entity = cursor.first();
+ *                   entity != null;
+ *                   entity = cursor.next()) {
+ *         // Do something with the entity...
+ *     }
+ * } finally {
+ *     cursor.close();
+ * }</pre>
+ *
+ * <h3>Initializing the Cursor Position</h3>
+ *
+ * <p>When it is opened, a cursor is not initially positioned on any value; in
+ * other words, it is uninitialized.  Most methods in this interface initialize
+ * the cursor position but certain methods, for example, {@link #current} and
+ * {@link #delete}, throw {@link IllegalStateException} when called for an
+ * uninitialized cursor.</p>
+ *
+ * <p>Note that the {@link #next} and {@link #prev} methods return the first or
+ * last value respectively for an uninitialized cursor.  This allows the loop
+ * in the example above to be rewritten as follows:</p>
+ *
+ * <pre class="code">
+ * {@code EntityCursor<Employee>} cursor = primaryIndex.entities();
+ * try {
+ *     Employee entity;
+ *     while ((entity = cursor.next()) != null) {
+ *         // Do something with the entity...
+ *     }
+ * } finally {
+ *     cursor.close();
+ * }</pre>
+ *
+ * <h3>Cursors and Iterators</h3>
+ *
+ * <p>The {@link #iterator} method can be used to return a standard Java {@code
+ * Iterator} that returns the same values that the cursor returns.  For
+ * example:</p>
+ *
+ * <pre class="code">
+ * {@code EntityCursor<Employee>} cursor = primaryIndex.entities();
+ * try {
+ *     {@code Iterator<Employee>} i = cursor.iterator();
+ *     while (i.hasNext()) {
+ *          Employee entity = i.next();
+ *         // Do something with the entity...
+ *     }
+ * } finally {
+ *     cursor.close();
+ * }</pre>
+ *
+ * <p>The {@link Iterable} interface is also extended by {@link EntityCursor}
+ * to allow using the cursor as the target of a Java "foreach" statement:</p>
+ *
+ * <pre class="code">
+ * {@code EntityCursor<Employee>} cursor = primaryIndex.entities();
+ * try {
+ *     for (Employee entity : cursor) {
+ *         // Do something with the entity...
+ *     }
+ * } finally {
+ *     cursor.close();
+ * }</pre>
+ *
+ * <p>The iterator uses the cursor directly, so any changes to the cursor
+ * position impact the iterator and vice versa.  The iterator advances the
+ * cursor by calling {@link #next()} when {@link Iterator#hasNext} or {@link
+ * Iterator#next} is called.  Because of this interaction, to keep things
+ * simple it is best not to mix the use of an {@code EntityCursor}
+ * {@code Iterator} with the use of the {@code EntityCursor} traversal methods
+ * such as {@link #next()}, for a single {@code EntityCursor} object.</p>
+ *
+ * <h3>Key Ranges</h3>
+ *
+ * <p>A key range may be specified when opening the cursor, to restrict the
+ * key range of the cursor to a subset of the complete range of keys in the
+ * index.  A {@code fromKey} and/or {@code toKey} parameter may be specified
+ * when calling {@link EntityIndex#keys(Object,boolean,Object,boolean)} or
+ * {@link EntityIndex#entities(Object,boolean,Object,boolean)}.  The key
+ * arguments may be specified as inclusive or exclusive values.</p>
+ *
+ * <p>Whenever a cursor with a key range is moved, the key range bounds will be
+ * checked, and the cursor will never be positioned outside the range.  The
+ * {@link #first} cursor value is the first existing value in the range, and
+ * the {@link #last} cursor value is the last existing value in the range.  For
+ * example, the following code traverses Employee entities with keys from 100
+ * (inclusive) to 200 (exclusive):</p>
+ *
+ * <pre class="code">
+ * {@code EntityCursor<Employee>} cursor = primaryIndex.entities(100, true, 200, false);
+ * try {
+ *     for (Employee entity : cursor) {
+ *         // Do something with the entity...
+ *     }
+ * } finally {
+ *     cursor.close();
+ * }</pre>
+ *
+ * <h3>Duplicate Keys</h3>
+ *
+ * <p>When using a cursor for a {@link SecondaryIndex}, the keys in the index
+ * may be non-unique (duplicates) if {@link SecondaryKey#relate} is {@link
+ * Relationship#MANY_TO_ONE MANY_TO_ONE} or {@link Relationship#MANY_TO_MANY
+ * MANY_TO_MANY}.  For example, a {@code MANY_TO_ONE} {@code
+ * Employee.department} secondary key is non-unique because there are multiple
+ * Employee entities with the same department key value.  The {@link #nextDup},
+ * {@link #prevDup}, {@link #nextNoDup} and {@link #prevNoDup} methods may be
+ * used to control how non-unique keys are returned by the cursor.</p>
+ *
+ * <p>{@link #nextDup} and {@link #prevDup} return the next or previous value
+ * only if it has the same key as the current value, and null is returned when
+ * a different key is encountered.  For example, these methods can be used to
+ * return all employees in a given department.</p>
+ *
+ * <p>{@link #nextNoDup} and {@link #prevNoDup} return the next or previous
+ * value with a unique key, skipping over values that have the same key.  For
+ * example, these methods can be used to return the first employee in each
+ * department.</p>
+ *
+ * <p>For example, the following code will find the first employee in each
+ * department with {@link #nextNoDup} until it finds a department name that
+ * matches a particular regular expression.  For each matching department it
+ * will find all employees in that department using {@link #nextDup}.</p>
+ *
+ * <pre class="code">
+ * {@code SecondaryIndex<String,Long,Employee>} secondaryIndex =
+ *     store.getSecondaryIndex(primaryIndex, String.class, "department");
+ *
+ * String regex = ...;
+ * {@code EntityCursor<Employee>} cursor = secondaryIndex.entities();
+ * try {
+ *     for (Employee entity = cursor.first();
+ *                   entity != null;
+ *                   entity = cursor.nextNoDup()) {
+ *         if (entity.department.matches(regex)) {
+ *             while (entity != null) {
+ *                 // Do something with the matching entities...
+ *                 entity = cursor.nextDup();
+ *             }
+ *         }
+ *     }
+ * } finally {
+ *     cursor.close();
+ * }</pre>
+ *
+ * <h3>Updating and Deleting Entities with a Cursor</h3>
+ *
+ * <p>The {@link #update} and {@link #delete} methods operate on the entity at
+ * the current cursor position.  Cursors on any type of index may be used to
+ * delete entities.  For example, the following code deletes all employees in
+ * departments which have names that match a particular regular expression:</p>
+ *
+ * <pre class="code">
+ * {@code SecondaryIndex<String,Long,Employee>} secondaryIndex =
+ *     store.getSecondaryIndex(primaryIndex, String.class, "department");
+ *
+ * String regex = ...;
+ * {@code EntityCursor<Employee>} cursor = secondaryIndex.entities();
+ * try {
+ *     for (Employee entity = cursor.first();
+ *                   entity != null;
+ *                   entity = cursor.nextNoDup()) {
+ *         if (entity.department.matches(regex)) {
+ *             while (entity != null) {
+ *                 cursor.delete();
+ *                 entity = cursor.nextDup();
+ *             }
+ *         }
+ *     }
+ * } finally {
+ *     cursor.close();
+ * }</pre>
+ *
+ * <p>Note that the cursor can be moved to the next (or previous) value after
+ * deleting the entity at the current position.  This is an important property
+ * of cursors, since without it you would not be able to easily delete while
+ * processing multiple values with a cursor.  A cursor positioned on a deleted
+ * entity is in a special state.  In this state, {@link #current} will return
+ * null, {@link #delete} will return false, and {@link #update} will return
+ * false.</p>
+ *
+ * <p>The {@link #update} method is supported only if the value type is an
+ * entity class (not a key class) and the underlying index is a {@link
+ * PrimaryIndex}; in other words, for a cursor returned by one of the {@link
+ * PrimaryIndex#entities} methods.  For example, the following code changes all
+ * employee names to uppercase:</p>
+ *
+ * <pre class="code">
+ * {@code EntityCursor<Employee>} cursor = primaryIndex.entities();
+ * try {
+ *     for (Employee entity = cursor.first();
+ *                   entity != null;
+ *                   entity = cursor.next()) {
+ *         entity.name = entity.name.toUpperCase();
+ *         cursor.update(entity);
+ *     }
+ * } finally {
+ *     cursor.close();
+ * }</pre>
+ *
+ * @author Mark Hayes
+ */
+public interface EntityCursor<V> extends ForwardCursor<V> {
+
+    /**
+     * Moves the cursor to the first value and returns it, or returns null if
+     * the cursor range is empty.
+     *
+     * <p>{@link LockMode#DEFAULT} is used implicitly.</p>
+     *
+     * @return the first value, or null if the cursor range is empty.
+     */
+    V first()
+        throws DatabaseException;
+
+    /**
+     * Moves the cursor to the first value and returns it, or returns null if
+     * the cursor range is empty.
+     *
+     * @param lockMode the lock mode to use for this operation, or null to
+     * use {@link LockMode#DEFAULT}.
+     *
+     * @return the first value, or null if the cursor range is empty.
+     */
+    V first(LockMode lockMode)
+        throws DatabaseException;
+
+    /**
+     * Moves the cursor to the last value and returns it, or returns null if
+     * the cursor range is empty.
+     *
+     * <p>{@link LockMode#DEFAULT} is used implicitly.</p>
+     *
+     * @return the last value, or null if the cursor range is empty.
+     */
+    V last()
+        throws DatabaseException;
+
+    /**
+     * Moves the cursor to the last value and returns it, or returns null if
+     * the cursor range is empty.
+     *
+     * @param lockMode the lock mode to use for this operation, or null to
+     * use {@link LockMode#DEFAULT}.
+     *
+     * @return the last value, or null if the cursor range is empty.
+     */
+    V last(LockMode lockMode)
+        throws DatabaseException;
+
+    /**
+     * Moves the cursor to the next value and returns it, or returns null
+     * if there are no more values in the cursor range.  If the cursor is
+     * uninitialized, this method is equivalent to {@link #first}.
+     *
+     * <p>{@link LockMode#DEFAULT} is used implicitly.</p>
+     *
+     * @return the next value, or null if there are no more values in the
+     * cursor range.
+     */
+    V next()
+        throws DatabaseException;
+
+    /**
+     * Moves the cursor to the next value and returns it, or returns null
+     * if there are no more values in the cursor range.  If the cursor is
+     * uninitialized, this method is equivalent to {@link #first}.
+     *
+     * @param lockMode the lock mode to use for this operation, or null to
+     * use {@link LockMode#DEFAULT}.
+     *
+     * @return the next value, or null if there are no more values in the
+     * cursor range.
+     */
+    V next(LockMode lockMode)
+        throws DatabaseException;
+
+    /**
+     * Moves the cursor to the next value with the same key (duplicate) and
+     * returns it, or returns null if no more values are present for the key at
+     * the current position.
+     *
+     * <p>{@link LockMode#DEFAULT} is used implicitly.</p>
+     *
+     * @return the next value with the same key, or null if no more values are
+     * present for the key at the current position.
+     *
+     * @throws IllegalStateException if the cursor is uninitialized.
+     */
+    V nextDup()
+        throws DatabaseException;
+
+    /**
+     * Moves the cursor to the next value with the same key (duplicate) and
+     * returns it, or returns null if no more values are present for the key at
+     * the current position.
+     *
+     * @param lockMode the lock mode to use for this operation, or null to
+     * use {@link LockMode#DEFAULT}.
+     *
+     * @return the next value with the same key, or null if no more values are
+     * present for the key at the current position.
+     *
+     * @throws IllegalStateException if the cursor is uninitialized.
+     */
+    V nextDup(LockMode lockMode)
+        throws DatabaseException;
+
+    /**
+     * Moves the cursor to the next value with a different key and returns it,
+     * or returns null if there are no more unique keys in the cursor range.
+     * If the cursor is uninitialized, this method is equivalent to {@link
+     * #first}.
+     *
+     * <p>{@link LockMode#DEFAULT} is used implicitly.</p>
+     *
+     * @return the next value with a different key, or null if there are no
+     * more unique keys in the cursor range.
+     */
+    V nextNoDup()
+        throws DatabaseException;
+
+    /**
+     * Moves the cursor to the next value with a different key and returns it,
+     * or returns null if there are no more unique keys in the cursor range.
+     * If the cursor is uninitialized, this method is equivalent to {@link
+     * #first}.
+     *
+     * @param lockMode the lock mode to use for this operation, or null to
+     * use {@link LockMode#DEFAULT}.
+     *
+     * @return the next value with a different key, or null if there are no
+     * more unique keys in the cursor range.
+     */
+    V nextNoDup(LockMode lockMode)
+        throws DatabaseException;
+
+    /**
+     * Moves the cursor to the previous value and returns it, or returns null
+     * if there are no preceding values in the cursor range.  If the cursor is
+     * uninitialized, this method is equivalent to {@link #last}.
+     *
+     * <p>{@link LockMode#DEFAULT} is used implicitly.</p>
+     *
+     * @return the previous value, or null if there are no preceding values in
+     * the cursor range.
+     */
+    V prev()
+        throws DatabaseException;
+
+    /**
+     * Moves the cursor to the previous value and returns it, or returns null
+     * if there are no preceding values in the cursor range.  If the cursor is
+     * uninitialized, this method is equivalent to {@link #last}.
+     *
+     * @param lockMode the lock mode to use for this operation, or null to
+     * use {@link LockMode#DEFAULT}.
+     *
+     * @return the previous value, or null if there are no preceding values in
+     * the cursor range.
+     */
+    V prev(LockMode lockMode)
+        throws DatabaseException;
+
+    /**
+     * Moves the cursor to the previous value with the same key (duplicate) and
+     * returns it, or returns null if no preceding values are present for the
+     * key at the current position.
+     *
+     * <p>{@link LockMode#DEFAULT} is used implicitly.</p>
+     *
+     * @return the previous value with the same key, or null if no preceding
+     * values are present for the key at the current position.
+     *
+     * @throws IllegalStateException if the cursor is uninitialized.
+     */
+    V prevDup()
+        throws DatabaseException;
+
+    /**
+     * Moves the cursor to the previous value with the same key (duplicate) and
+     * returns it, or returns null if no preceding values are present for the
+     * key at the current position.
+     *
+     * @param lockMode the lock mode to use for this operation, or null to
+     * use {@link LockMode#DEFAULT}.
+     *
+     * @return the previous value with the same key, or null if no preceding
+     * values are present for the key at the current position.
+     *
+     * @throws IllegalStateException if the cursor is uninitialized.
+     */
+    V prevDup(LockMode lockMode)
+        throws DatabaseException;
+
+    /**
+     * Moves the cursor to the preceding value with a different key and returns
+     * it, or returns null if there are no preceding unique keys in the cursor
+     * range.  If the cursor is uninitialized, this method is equivalent to
+     * {@link #last}.
+     *
+     * <p>{@link LockMode#DEFAULT} is used implicitly.</p>
+     *
+     * @return the previous value with a different key, or null if there are no
+     * preceding unique keys in the cursor range.
+     */
+    V prevNoDup()
+        throws DatabaseException;
+
+    /**
+     * Moves the cursor to the preceding value with a different key and returns
+     * it, or returns null if there are no preceding unique keys in the cursor
+     * range.  If the cursor is uninitialized, this method is equivalent to
+     * {@link #last}.
+     *
+     * @param lockMode the lock mode to use for this operation, or null to
+     * use {@link LockMode#DEFAULT}.
+     *
+     * @return the previous value with a different key, or null if there are no
+     * preceding unique keys in the cursor range.
+     */
+    V prevNoDup(LockMode lockMode)
+        throws DatabaseException;
+
+    /**
+     * Returns the value at the cursor position, or null if the value at the
+     * cursor position has been deleted.
+     *
+     * <p>{@link LockMode#DEFAULT} is used implicitly.</p>
+     *
+     * @return the value at the cursor position, or null if it has been
+     * deleted.
+     *
+     * @throws IllegalStateException if the cursor is uninitialized.
+     */
+    V current()
+        throws DatabaseException;
+
+    /**
+     * Returns the value at the cursor position, or null if the value at the
+     * cursor position has been deleted.
+     *
+     * @param lockMode the lock mode to use for this operation, or null to
+     * use {@link LockMode#DEFAULT}.
+     *
+     * @return the value at the cursor position, or null if it has been
+     * deleted.
+     *
+     * @throws IllegalStateException if the cursor is uninitialized.
+     */
+    V current(LockMode lockMode)
+        throws DatabaseException;
+
+    /**
+     * Returns the number of values (duplicates) for the key at the cursor
+     * position, or returns zero if all values for the key have been deleted,
+     * Returns one or zero if the underlying index has unique keys.
+     *
+     * <p>{@link LockMode#DEFAULT} is used implicitly.</p>
+     *
+     * @return the number of duplicates, or zero if all values for the current
+     * key have been deleted.
+     *
+     * @throws IllegalStateException if the cursor is uninitialized.
+     */
+    int count()
+        throws DatabaseException;
+
+    /**
+     * Returns an iterator over the key range, starting with the value
+     * following the current position or at the first value if the cursor is
+     * uninitialized.
+     *
+     * <p>{@link LockMode#DEFAULT} is used implicitly.</p>
+     *
+     * @return the iterator.
+     */
+    Iterator<V> iterator();
+
+    /**
+     * Returns an iterator over the key range, starting with the value
+     * following the current position or at the first value if the cursor is
+     * uninitialized.
+     *
+     * @param lockMode the lock mode to use for all operations performed
+     * using the iterator, or null to use {@link LockMode#DEFAULT}.
+     *
+     * @return the iterator.
+     */
+    Iterator<V> iterator(LockMode lockMode);
+
+    /**
+     * Replaces the entity at the cursor position with the given entity.
+     *
+     * @param entity the entity to replace the entity at the current position.
+     *
+     * @return true if successful or false if the entity at the current
+     * position was previously deleted.
+     *
+     * @throws IllegalStateException if the cursor is uninitialized.
+     *
+     * @throws UnsupportedOperationException if the index is read only or if
+     * the value type is not an entity type.
+     */
+    boolean update(V entity)
+        throws DatabaseException;
+
+    /**
+     * Deletes the entity at the cursor position.
+     *
+     * @throws IllegalStateException if the cursor is uninitialized.
+     *
+     * @throws UnsupportedOperationException if the index is read only.
+     *
+     * @return true if successful or false if the entity at the current
+     * position has been deleted.
+     */
+    boolean delete()
+        throws DatabaseException;
+
+    /**
+     * Duplicates the cursor at the cursor position.  The returned cursor will
+     * be initially positioned at the same position as this current cursor, and
+     * will inherit this cursor's {@link Transaction} and {@link CursorConfig}.
+     *
+     * @return the duplicated cursor.
+     */
+    EntityCursor<V> dup()
+        throws DatabaseException;
+
+    /**
+     * Closes the cursor.
+     */
+    void close()
+        throws DatabaseException;
+
+    /* <!-- begin JE only --> */
+    /**
+     * Changes the {@code CacheMode} used for operations performed using this
+     * cursor.  For a newly opened cursor, the default is {@link
+     * CacheMode#DEFAULT}.
+     *
+     * @param cacheMode is the {@code CacheMode} to use for subsequent
+     * operations using this cursor.
+     *
+     * @see CacheMode
+     */
+    void setCacheMode(CacheMode cacheMode);
+    /* <!-- end JE only --> */
+
+    /* <!-- begin JE only --> */
+    /**
+     * Returns the {@code CacheMode} used for operations performed using this
+     * cursor.  For a newly opened cursor, the default is {@link
+     * CacheMode#DEFAULT}.
+     *
+     * @return the CacheMode object used for operations performed with this
+     * cursor.
+     *
+     * @see CacheMode
+     */
+    CacheMode getCacheMode();
+    /* <!-- end JE only --> */
+}
diff --git a/src/com/sleepycat/persist/EntityIndex.java b/src/com/sleepycat/persist/EntityIndex.java
new file mode 100644
index 0000000000000000000000000000000000000000..81e7ded6d3c253bf88256298ef4ef7dfac079d64
--- /dev/null
+++ b/src/com/sleepycat/persist/EntityIndex.java
@@ -0,0 +1,975 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EntityIndex.java,v 1.22.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist;
+
+import java.util.Map;
+import java.util.SortedMap;
+
+import com.sleepycat.collections.StoredMap;
+import com.sleepycat.collections.StoredSortedMap;
+import com.sleepycat.je.CursorConfig;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException; // for javadoc
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.SecondaryDatabase; // for javadoc
+import com.sleepycat.je.Transaction;
+
+/**
+ * The interface for accessing keys and entities via a primary or secondary
+ * index.
+ *
+ * <p>{@code EntityIndex} objects are thread-safe.  Multiple threads may safely
+ * call the methods of a shared {@code EntityIndex} object.</p>
+ *
+ * <p>An index is conceptually a <em>map</em>. {key:value} mappings are
+ * stored in the index and accessed by key.  In fact, for interoperability with
+ * other libraries that use the standard Java {@link Map} or {@link SortedMap}
+ * interfaces, an {@code EntityIndex} may be accessed via these standard
+ * interfaces by calling the {@link #map} or {@link #sortedMap} methods.</p>
+ *
+ * <p>{@code EntityIndex} is an interface that is implemented by several
+ * classes in this package for different purposes.  Depending on the context,
+ * the key type (K) and value type (V) of the index take on different meanings.
+ * The different classes that implement {@code EntityIndex} are:</p>
+ * <ul>
+ * <li>{@link PrimaryIndex} maps primary keys to entities.</li>
+ * <li>{@link SecondaryIndex} maps secondary keys to entities.</li>
+ * <li>{@link SecondaryIndex#keysIndex} maps secondary keys to primary
+ * keys.</li>
+ * <li>{@link SecondaryIndex#subIndex} maps primary keys to entities, for the
+ * subset of entities having a specified secondary key.</li>
+ * </ul>
+ *
+ * <p>In all cases, the index key type (K) is a primary or secondary key class.
+ * The index value type (V) is an entity class in all cases except for a {@link
+ * SecondaryIndex#keysIndex}, when it is a primary key class.</p>
+ *
+ * <p>In the following example, a {@code Employee} entity with a {@code
+ * MANY_TO_ONE} secondary key is defined.</p>
+ *
+ * <pre class="code">
+ * {@literal @Entity}
+ * class Employee {
+ *
+ *     {@literal @PrimaryKey}
+ *     long id;
+ *
+ *     {@literal @SecondaryKey(relate=MANY_TO_ONE)}
+ *     String department;
+ *
+ *     String name;
+ *
+ *     private Employee() {}
+ * }</pre>
+ *
+ * <p>Consider that we have stored the entities below:</p>
+ *
+ * <p><table class="code" border="1">
+ *   <tr><th colspan="3">Entities</th></tr>
+ *   <tr><th>ID</th><th>Department</th><th>Name</th></tr>
+ *   <tr><td>1</td><td>Engineering</td><td>Jane Smith</td></tr>
+ *   <tr><td>2</td><td>Sales</td><td>Joan Smith</td></tr>
+ *   <tr><td>3</td><td>Engineering</td><td>John Smith</td></tr>
+ *   <tr><td>4</td><td>Sales</td><td>Jim Smith</td></tr>
+ * </table></p>
+*
+ * <p>{@link PrimaryIndex} maps primary keys to entities:</p>
+ *
+ * <pre class="code">
+ * {@code PrimaryIndex<Long,Employee>} primaryIndex =
+ *     store.getPrimaryIndex(Long.class, Employee.class);</pre>
+ *
+ * <p><table class="code" border="1">
+ *   <tr><th colspan="4">primaryIndex</th></tr>
+ *   <tr><th>Primary Key</th><th colspan="3">Entity</th></tr>
+ *   <tr><td>1</td><td>1</td><td>Engineering</td><td>Jane Smith</td></tr>
+ *   <tr><td>2</td><td>2</td><td>Sales</td><td>Joan Smith</td></tr>
+ *   <tr><td>3</td><td>3</td><td>Engineering</td><td>John Smith</td></tr>
+ *   <tr><td>4</td><td>4</td><td>Sales</td><td>Jim Smith</td></tr>
+ * </table></p>
+ *
+ * <p>{@link SecondaryIndex} maps secondary keys to entities:</p>
+ *
+ * <pre class="code">
+ * {@code SecondaryIndex<String,Long,Employee>} secondaryIndex =
+ *     store.getSecondaryIndex(primaryIndex, String.class, "department");</pre>
+ *
+ * <p><table class="code" border="1">
+ *   <tr><th colspan="4">secondaryIndex</th></tr>
+ *   <tr><th>Secondary Key</th><th colspan="3">Entity</th></tr>
+ *   <tr><td>Engineering</td><td>1</td><td>Engineering</td><td>Jane Smith</td></tr>
+ *   <tr><td>Engineering</td><td>3</td><td>Engineering</td><td>John Smith</td></tr>
+ *   <tr><td>Sales</td><td>2</td><td>Sales</td><td>Joan Smith</td></tr>
+ *   <tr><td>Sales</td><td>4</td><td>Sales</td><td>Jim Smith</td></tr>
+ * </table></p>
+ *
+ * <p>{@link SecondaryIndex#keysIndex} maps secondary keys to primary
+ * keys:</p>
+ *
+ * <pre class="code">
+ * {@code EntityIndex<String,Long>} keysIndex = secondaryIndex.keysIndex();</pre>
+ *
+ * <p><table class="code" border="1">
+ *   <tr><th colspan="4">keysIndex</th></tr>
+ *   <tr><th>Secondary Key</th><th colspan="3">Primary Key</th></tr>
+ *   <tr><td>Engineering</td><td>1</td></tr>
+ *   <tr><td>Engineering</td><td>3</td></tr>
+ *   <tr><td>Sales</td><td>2</td></tr>
+ *   <tr><td>Sales</td><td>4</td></tr>
+ * </table></p>
+ *
+ * <p>{@link SecondaryIndex#subIndex} maps primary keys to entities, for the
+ * subset of entities having a specified secondary key:</p>
+ *
+ * <pre class="code">
+ * {@code EntityIndex<Long,Entity>} subIndex = secondaryIndex.subIndex("Engineering");</pre>
+ *
+ * <p><table class="code" border="1">
+ *   <tr><th colspan="4">subIndex</th></tr>
+ *   <tr><th>Primary Key</th><th colspan="3">Entity</th></tr>
+ *   <tr><td>1</td><td>1</td><td>Engineering</td><td>Jane Smith</td></tr>
+ *   <tr><td>3</td><td>3</td><td>Engineering</td><td>John Smith</td></tr>
+ * </table></p>
+ *
+ * <h3>Accessing the Index</h3>
+ *
+ * <p>An {@code EntityIndex} provides a variety of methods for retrieving
+ * entities from an index.  It also provides methods for deleting entities.
+ * However, it does not provide methods for inserting and updating.  To insert
+ * and update entities, use the {@link PrimaryIndex#put} family of methods in
+ * the {@link PrimaryIndex} class.</p>
+ *
+ * <p>An {@code EntityIndex} supports two mechanisms for retrieving
+ * entities:</p>
+ * <ol>
+ * <li>The {@link #get} method returns a single value for a given key.  If there
+ * are multiple values with the same secondary key (duplicates), it returns the
+ * first entity in the duplicate set.</li>
+ * <li>An {@link EntityCursor} can be obtained using the {@link #keys} and
+ * {@link #entities} family of methods.  A cursor can be used to return all
+ * values in the index, including duplicates.  A cursor can also be used to
+ * return values within a specified range of keys.</li>
+ * </ol>
+ *
+ * <p>Using the example entities above, calling {@link #get} on the primary
+ * index will always return the employee with the given ID, or null if no such
+ * ID exists.  But calling {@link #get} on the secondary index will retrieve
+ * the first employee in the given department, which may not be very
+ * useful:</p>
+ *
+ * <pre class="code">
+ * Employee emp = primaryIndex.get(1);      // Returns by unique ID
+ * emp = secondaryIndex.get("Engineering"); // Returns first in department</pre>
+ *
+ * <p>Using a cursor, you can iterate through all duplicates in the secondary
+ * index:</p>
+ *
+ * <pre class="code">
+ * {@code EntityCursor<Employee>} cursor = secondaryIndex.entities();
+ * try {
+ *     for (Employee entity : cursor) {
+ *         if (entity.department.equals("Engineering")) {
+ *             // Do something with the entity...
+ *         }
+ *     }
+ * } finally {
+ *     cursor.close();
+ * }</pre>
+ *
+ * <p>But for a large database it is much more efficient to iterate over only
+ * those entities with the secondary key you're searching for.  This could be
+ * done by restricting a cursor to a range of keys:</p>
+ *
+ * <pre class="code">
+ * {@code EntityCursor<Employee>} cursor =
+ *     secondaryIndex.entities("Engineering", true, "Engineering", true);
+ * try {
+ *     for (Employee entity : cursor) {
+ *         // Do something with the entity...
+ *     }
+ * } finally {
+ *     cursor.close();
+ * }</pre>
+ *
+ * <p>However, when you are interested only in the entities with a particular
+ * secondary key value, it is more convenient to use a sub-index:</p>
+ *
+ * <pre class="code">
+ * {@code EntityIndex<Long,Entity>} subIndex = secondaryIndex.subIndex("Engineering");
+ * {@code EntityCursor<Employee>} cursor = subIndex.entities();
+ * try {
+ *     for (Employee entity : cursor) {
+ *         // Do something with the entity...
+ *     }
+ * } finally {
+ *     cursor.close();
+ * }</pre>
+ *
+ * <p>In addition to being more convenient than a cursor range, a sub-index
+ * allows retrieving by primary key:</p>
+ *
+ * <pre class="code">
+ * Employee emp = subIndex.get(1);</pre>
+ *
+ * <p>When using a sub-index, all operations performed on the sub-index are
+ * restricted to the single key that was specified when the sub-index was
+ * created.  For example, the following returns null because employee 2 is not
+ * in the Engineering department and therefore is not part of the
+ * sub-index:</p>
+ *
+ * <pre class="code">
+ * Employee emp = subIndex.get(2);</pre>
+ *
+ * <p>For more information on using cursors and cursor ranges, see {@link
+ * EntityCursor}.</p>
+ *
+ * <p>Note that when using an index, keys and values are stored and retrieved
+ * by value not by reference.  In other words, if an entity object is stored
+ * and then retrieved, or retrieved twice, each object will be a separate
+ * instance.  For example, in the code below the assertion will always
+ * fail.</p>
+ * <pre class="code">
+ * MyKey key = ...;
+ * MyEntity entity1 = index.get(key);
+ * MyEntity entity2 = index.get(key);
+ * assert entity1 == entity2; // always fails!
+ * </pre>
+ *
+ * <h3>Deleting from the Index</h3>
+ *
+ * <p>Any type of index may be used to delete entities with a specified key by
+ * calling {@link #delete}.  The important thing to keep in mind is that
+ * <em>all entities</em> with the specified key are deleted.  In a primary index,
+ * at most a single entity is deleted:</p>
+ *
+ * <pre class="code">
+ * primaryIndex.delete(1); // Deletes a single employee by unique ID</pre>
+ *
+ * <p>But in a secondary index, multiple entities may be deleted:</p>
+ *
+ * <pre class="code">
+ * secondaryIndex.delete("Engineering"); // Deletes all Engineering employees</pre>
+ *
+ * <p>This begs this question: How can a single entity be deleted without
+ * knowing its primary key?  The answer is to use cursors.  After locating an
+ * entity using a cursor, the entity can be deleted by calling {@link
+ * EntityCursor#delete}.</p>
+ *
+ * <h3>Transactions</h3>
+ *
+ * <p>Transactions can be used to provide standard ACID (Atomicity,
+ * Consistency, Integrity and Durability) guarantees when retrieving, storing
+ * and deleting entities.  This section provides a brief overview of how to use
+ * transactions with the Direct Persistence Layer.  For more information on
+ * using transactions, see <a
+ * href="{@docRoot}/../TransactionGettingStarted/index.html">Writing
+ * Transactional Applications</a>.</p>
+ *
+ * <p>Transactions may be used only with a transactional {@link EntityStore},
+ * which is one for which {@link StoreConfig#setTransactional
+ * StoreConfig.setTransactional(true)} has been called.  Likewise, a
+ * transactional store may only be used with a transactional {@link
+ * Environment}, which is one for which {@link
+ * EnvironmentConfig#setTransactional EnvironmentConfig.setTransactional(true)}
+ * has been called.  For example:</p>
+ *
+ * <pre class="code">
+ * EnvironmentConfig envConfig = new EnvironmentConfig();
+ * envConfig.setTransactional(true);
+ * envConfig.setAllowCreate(true);
+ * Environment env = new Environment(new File("/my/data"), envConfig);
+ *
+ * StoreConfig storeConfig = new StoreConfig();
+ * storeConfig.setTransactional(true);
+ * storeConfig.setAllowCreate(true);
+ * EntityStore store = new EntityStore(env, "myStore", storeConfig);</pre>
+ *
+ * <p>Transactions are represented by {@link Transaction} objects, which are
+ * part of the {@link com.sleepycat.je Base API}.  Transactions are created
+ * using the {@link Environment#beginTransaction Environment.beginTransaction}
+ * method.</p>
+ *
+ * <p>A transaction will include all operations for which the transaction
+ * object is passed as a method argument.  All retrieval, storage and deletion
+ * methods have an optional {@link Transaction} parameter for this purpose.
+ * When a transaction is passed to a method that opens a cursor, all retrieval,
+ * storage and deletion operations performed using that cursor will be included
+ * in the transaction.</p>
+ *
+ * <p>A transaction may be committed by calling {@link Transaction#commit} or
+ * aborted by calling {@link Transaction#abort}.  For example, two employees
+ * may be deleted atomically with a transaction; other words, either both are
+ * deleted or neither is deleted:</p>
+ *
+ * <pre class="code">
+ * Transaction txn = env.beginTransaction(null, null);
+ * try {
+ *     primaryIndex.delete(txn, 1);
+ *     primaryIndex.delete(txn, 2);
+ *     txn.commit();
+ *     txn = null;
+ * } finally {
+ *     if (txn != null) {
+ *         txn.abort();
+ *     }
+ * }</pre>
+ *
+ * <p><em>WARNING:</em> Transactions must always be committed or aborted to
+ * prevent resource leaks which could lead to the index becoming unusable or
+ * cause an <code>OutOfMemoryError</code>.  To ensure that a transaction is
+ * aborted in the face of exceptions, call {@link Transaction#abort} in a
+ * finally block.</p>
+ *
+ * <p>For a transactional store, storage and deletion operations are always
+ * transaction protected, whether or not a transaction is explicitly used.  A
+ * null transaction argument means to perform the operation using auto-commit,
+ * or the implied thread transaction if an XAEnvironment is being used.  A
+ * transaction is automatically started as part of the operation and is
+ * automatically committed if the operation completes successfully.  The
+ * transaction is automatically aborted if an exception occurs during the
+ * operation, and the exception is re-thrown to the caller.  For example, each
+ * employee is deleted using a an auto-commit transaction below, but it is
+ * possible that employee 1 will be deleted and employee 2 will not be deleted,
+ * if an error or crash occurs while deleting employee 2:</p>
+ *
+ * <pre class="code">
+ * primaryIndex.delete(null, 1);
+ * primaryIndex.delete(null, 2);</pre>
+ *
+ * <p>When retrieving entities, a null transaction argument means to perform
+ * the operation non-transactionally.  The operation is performed outside the
+ * scope of any transaction, without providing transactional ACID guarantees.
+ * If an implied thread transaction is present (i.e. if an XAEnvironment is
+ * being used), that transaction is used.  When a non-transactional store is
+ * used, transactional ACID guarantees are also not provided.</p>
+ *
+ * <p>For non-transactional and auto-commit usage, overloaded signatures for
+ * retrieval, storage and deletion methods are provided to avoid having to pass
+ * a null transaction argument.  For example, {@link #delete} may be called
+ * instead of {@link #delete(Transaction,Object)}.  For example, the following
+ * code is equivalent to the code above where null was passed for the
+ * transaction:</p>
+ *
+ * <pre class="code">
+ * primaryIndex.delete(1);
+ * primaryIndex.delete(2);</pre>
+ *
+ * <p>For retrieval methods the overloaded signatures also include an optional
+ * {@link LockMode} parameter, and overloaded signatures for opening cursors
+ * include an optional {@link CursorConfig} parameter.  These parameters are
+ * described further below in the Locking and Lock Modes section.</p>
+ *
+ * <h3>Transactions and Cursors</h3>
+ *
+ * <p>There are two special consideration when using cursors with transactions.
+ * First, for a transactional store, a non-null transaction must be passed to
+ * methods that open a cursor if that cursor will be used to delete or update
+ * entities.  Cursors do not perform auto-commit when a null transaction is
+ * explicitly passed or implied by the method signature.  For example, the
+ * following code will throw {@link DatabaseException} when the {@link
+ * EntityCursor#delete} method is called:</p>
+ *
+ * <pre class="code">
+ * // <strong>Does not work with a transactional store!</strong>
+ * {@code EntityCursor<Employee>} cursor = primaryIndex.entities();
+ * try {
+ *     for (Employee entity : cursor) {
+ *         cursor.delete(); // <strong>Will throw DatabaseException.</strong>
+ *     }
+ * } finally {
+ *     cursor.close();
+ * }</pre>
+ *
+ * <p>Instead, the {@link #entities(Transaction,CursorConfig)} signature must
+ * be used and a non-null transaction must be passed:</p>
+ *
+ * <pre class="code">
+ * {@code EntityCursor<Employee>} cursor = primaryIndex.entities(txn, null);
+ * try {
+ *     for (Employee entity : cursor) {
+ *         cursor.delete();
+ *     }
+ * } finally {
+ *     cursor.close();
+ * }</pre>
+ *
+ * <p>The second consideration is that error handling is more complex when
+ * using both transactions and cursors, for the following reasons:</p>
+ * <ol>
+ * <li>When an exception occurs, the transaction should be aborted.</li>
+ * <li>Cursors must be closed whether or not an exception occurs.</li>
+ * <li>Cursors must be closed before committing or aborting the
+ * transaction.</li>
+ * </ol>
+ *
+ * <p>For example:</p>
+ *
+ * <pre class="code">
+ * Transaction txn = env.beginTransaction(null, null);
+ * {@code EntityCursor<Employee>} cursor = null;
+ * try {
+ *     cursor = primaryIndex.entities(txn, null);
+ *     for (Employee entity : cursor) {
+ *         cursor.delete();
+ *     }
+ *     cursor.close();
+ *     cursor = null;
+ *     txn.commit();
+ *     txn = null;
+ * } finally {
+ *     if (cursor != null) {
+ *         cursor.close();
+ *     }
+ *     if (txn != null) {
+ *         txn.abort();
+ *     }
+ * }</pre>
+ *
+ * <h3>Locking and Lock Modes</h3>
+ *
+ * <p>This section provides a brief overview of locking and describes how lock
+ * modes are used with the Direct Persistence Layer.  For more information on
+ * locking, see <a
+ * href="{@docRoot}/../TransactionGettingStarted/index.html">Writing
+ * Transactional Applications</a>.</p>
+ *
+ * <p>When using transactions, locks are normally acquired on each entity that
+ * is retrieved or stored.  The locks are used to isolate one transaction from
+ * another.  Locks are normally released only when the transaction is committed
+ * or aborted.</p>
+ *
+ * <p>When not using transactions, locks are also normally acquired on each
+ * entity that is retrieved or stored.  However, these locks are released when
+ * the operation is complete.  When using cursors, in order to provide
+ * <em>cursor stability</em> locks are held until the cursor is moved to a
+ * different entity or closed.</p>
+ *
+ * <p>This default locking behavior provides full transactional ACID guarantees
+ * and cursor stability.  However, application performance can sometimes be
+ * improved by compromising these guarantees.  As described in <a
+ * href="{@docRoot}/../TransactionGettingStarted/index.html">Writing
+ * Transactional Applications</a>, the {@link LockMode} and {@link
+ * CursorConfig} parameters are two of the mechanisms that can be used to make
+ * compromises.</p>
+ *
+ * <p>For example, imagine that you need an approximate count of all entities
+ * matching certain criterion, and it is acceptable for entities to be changed
+ * by other threads or other transactions while performing this query.  {@link
+ * LockMode#READ_UNCOMMITTED} can be used to perform the retrievals without
+ * acquiring any locks.  This reduces memory consumption, does less processing,
+ * and can improve concurrency.</p>
+ *
+ * <pre class="code">
+ * {@code EntityCursor<Employee>} cursor = primaryIndex.entities(txn, null);
+ * try {
+ *     Employee entity;
+ *     while ((entity = cursor.next(LockMode.READ_UNCOMMITTED)) != null) {
+ *         // Examine the entity and accumulate totals...
+ *     }
+ * } finally {
+ *     cursor.close();
+ * }</pre>
+ *
+ * <p>The {@link LockMode} parameter specifies locking behavior on a
+ * per-operation basis.  If null or {@link LockMode#DEFAULT} is specified, the
+ * default lock mode is used.</p>
+ *
+ * <p>It is also possible to specify the default locking behavior for a cursor
+ * using {@link CursorConfig}.  The example below is equivalent to the example
+ * above:</p>
+ *
+ * <pre class="code">
+ * CursorConfig config = new CursorConfig();
+ * config.setReadUncommitted(true);
+ * {@code EntityCursor<Employee>} cursor = primaryIndex.entities(txn, config);
+ * try {
+ *     Employee entity;
+ *     while ((entity = cursor.next()) != null) {
+ *         // Examine the entity and accumulate totals...
+ *     }
+ * } finally {
+ *     cursor.close();
+ * }</pre>
+ *
+ * <p>The use of other lock modes, cursor configuration, and transaction
+ * configuration are discussed in <a
+ * href="{@docRoot}/../TransactionGettingStarted/index.html">Writing
+ * Transactional Applications</a>.</p>
+ *
+ * <p>Deadlock handling is another important topic discussed in <a
+ * href="{@docRoot}/../TransactionGettingStarted/index.html">Writing
+ * Transactional Applications</a>.  To go along with that material, here we
+ * show a deadlock handling loop in the context of the Direct Persistence
+ * Layer.  The example below shows deleting all entities in a primary index in
+ * a single transaction.  If a deadlock occurs, the transaction is aborted and
+ * the operation is retried.</p>
+ *
+ * <pre class="code">
+ * int retryCount = 0;
+ * boolean retry = true;
+ * while (retry) {
+ *     Transaction txn = env.beginTransaction(null, null);
+ *     {@code EntityCursor<Employee>} cursor = null;
+ *     try {
+ *         cursor = primaryIndex.entities(txn, null);
+ *         for (Employee entity : cursor) {
+ *             cursor.delete();
+ *         }
+ *         cursor.close();
+ *         cursor = null;
+ *         txn.commit();
+ *         txn = null;
+ *         retry = false;
+ *     } catch (DeadlockException e) {
+ *         retryCount += 1;
+ *         if (retryCount &gt;= MAX_DEADLOCK_RETRIES) {
+ *             throw e;
+ *         }
+ *     } finally {
+ *         if (cursor != null) {
+ *             cursor.close();
+ *         }
+ *         if (txn != null) {
+ *             txn.abort();
+ *         }
+ *     }
+ *  }</pre>
+ *
+ * <h3>Low Level Access</h3>
+ *
+ * <p>Each Direct Persistence Layer index is associated with an underlying
+ * {@link Database} or {@link SecondaryDatabase} defined in the {@link
+ * com.sleepycat.je Base API}.  At this level, an index is a Btree managed by
+ * the Berkeley DB Java Edition transactional storage engine.  Although you may
+ * never need to work at the {@code Base API} level, keep in mind that some
+ * types of performance tuning can be done by configuring the underlying
+ * databases.  See the {@link EntityStore} class for more information on
+ * database and sequence configuration.</p>
+ *
+ * <p>If you wish to access an index using the {@code Base API}, you may call
+ * the {@link PrimaryIndex#getDatabase} or {@link SecondaryIndex#getDatabase}
+ * method to get the underlying database.  To translate between entity or key
+ * objects and {@link DatabaseEntry} objects at this level, use the bindings
+ * returned by {@link PrimaryIndex#getEntityBinding}, {@link
+ * PrimaryIndex#getKeyBinding}, and {@link SecondaryIndex#getKeyBinding}.</p>
+ *
+ * @author Mark Hayes
+ */
+public interface EntityIndex<K,V> {
+
+    /**
+     * Checks for existence of a key in this index.
+     *
+     * <p>The operation will not be transaction protected, and {@link
+     * LockMode#DEFAULT} is used implicitly.</p>
+     *
+     * @param key the key to search for.
+     *
+     * @return whether the key exists in the index.
+     */
+    boolean contains(K key)
+        throws DatabaseException;
+
+    /**
+     * Checks for existence of a key in this index.
+     *
+     * @param txn the transaction used to protect this operation, or null
+     * if the operation should not be transaction protected.
+     *
+     * @param key the key to search for.
+     *
+     * @param lockMode the lock mode to use for this operation, or null to
+     * use {@link LockMode#DEFAULT}.
+     *
+     * @return whether the key exists in the index.
+     */
+    boolean contains(Transaction txn, K key, LockMode lockMode)
+        throws DatabaseException;
+
+    /**
+     * Gets an entity via a key of this index.
+     *
+     * <p>The operation will not be transaction protected, and {@link
+     * LockMode#DEFAULT} is used implicitly.</p>
+     *
+     * @param key the key to search for.
+     *
+     * @return the value mapped to the given key, or null if the key is not
+     * present in the index.
+     */
+    V get(K key)
+        throws DatabaseException;
+
+    /**
+     * Gets an entity via a key of this index.
+     *
+     * @param txn the transaction used to protect this operation, or null
+     * if the operation should not be transaction protected.
+     *
+     * @param key the key to search for.
+     *
+     * @param lockMode the lock mode to use for this operation, or null to
+     * use {@link LockMode#DEFAULT}.
+     *
+     * @return the value mapped to the given key, or null if the key is not
+     * present in the index.
+     */
+    V get(Transaction txn, K key, LockMode lockMode)
+        throws DatabaseException;
+
+    /**
+     * Returns a non-transactional count of the entities in this index.
+     *
+     * <p>This operation is faster than obtaining a count by scanning the index
+     * manually, and will not perturb the current contents of the cache.
+     * However, the count is not guaranteed to be accurate if there are
+     * concurrent updates.</p>
+     *
+     * @return the number of entities in this index.
+     */
+    long count()
+        throws DatabaseException;
+
+    /**
+     * Deletes all entities with a given index key.
+     *
+     * <p>Auto-commit is used implicitly if the store is transactional.</p>
+     *
+     * @param key the key to search for.
+     *
+     * @return whether any entities were deleted.
+     */
+    boolean delete(K key)
+        throws DatabaseException;
+
+    /**
+     * Deletes all entities with a given index key.
+     *
+     * @param txn the transaction used to protect this operation, null to use
+     * auto-commit, or null if the store is non-transactional.
+     *
+     * @param key the key to search for.
+     *
+     * @return whether any entities were deleted.
+     */
+    boolean delete(Transaction txn, K key)
+        throws DatabaseException;
+
+    /**
+     * Opens a cursor for traversing all keys in this index.
+     *
+     * <p>The operations performed with the cursor will not be transaction
+     * protected, and {@link CursorConfig#DEFAULT} is used implicitly.  If the
+     * store is transactional, the cursor may not be used to update or delete
+     * entities.</p>
+     *
+     * @return the cursor.
+     */
+    EntityCursor<K> keys()
+        throws DatabaseException;
+
+    /**
+     * Opens a cursor for traversing all keys in this index.
+     *
+     * @param txn the transaction used to protect all operations performed with
+     * the cursor, or null if the operations should not be transaction
+     * protected.  If null is specified and the store is transactional, the
+     * cursor may not be used to update or delete entities.
+     *
+     * @param config the cursor configuration that determines the default lock
+     * mode used for all cursor operations, or null to implicitly use {@link
+     * CursorConfig#DEFAULT}.
+     *
+     * @return the cursor.
+     */
+    EntityCursor<K> keys(Transaction txn, CursorConfig config)
+        throws DatabaseException;
+
+    /**
+     * Opens a cursor for traversing all entities in this index.
+     *
+     * <p>The operations performed with the cursor will not be transaction
+     * protected, and {@link CursorConfig#DEFAULT} is used implicitly.  If the
+     * store is transactional, the cursor may not be used to update or delete
+     * entities.</p>
+     *
+     * @return the cursor.
+     */
+    EntityCursor<V> entities()
+        throws DatabaseException;
+
+    /**
+     * Opens a cursor for traversing all entities in this index.
+     *
+     * @param txn the transaction used to protect all operations performed with
+     * the cursor, or null if the operations should not be transaction
+     * protected.  If null is specified and the store is transactional, the
+     * cursor may not be used to update or delete entities.
+     *
+     * @param config the cursor configuration that determines the default lock
+     * mode used for all cursor operations, or null to implicitly use {@link
+     * CursorConfig#DEFAULT}.
+     *
+     * @return the cursor.
+     */
+    EntityCursor<V> entities(Transaction txn,
+                             CursorConfig config)
+        throws DatabaseException;
+
+    /**
+     * Opens a cursor for traversing keys in a key range.
+     *
+     * <p>The operations performed with the cursor will not be transaction
+     * protected, and {@link CursorConfig#DEFAULT} is used implicitly.  If the
+     * store is transactional, the cursor may not be used to update or delete
+     * entities.</p>
+     *
+     * @param fromKey is the lower bound of the key range, or null if the range
+     * has no lower bound.
+     *
+     * @param fromInclusive is true if keys greater than or equal to fromKey
+     * should be included in the key range, or false if only keys greater than
+     * fromKey should be included.
+     *
+     * @param toKey is the upper bound of the key range, or null if the range
+     * has no upper bound.
+     *
+     * @param toInclusive is true if keys less than or equal to toKey should be
+     * included in the key range, or false if only keys less than toKey should
+     * be included.
+     *
+     * @return the cursor.
+     */
+    EntityCursor<K> keys(K fromKey,
+                         boolean fromInclusive,
+                         K toKey,
+                         boolean toInclusive)
+        throws DatabaseException;
+
+    /**
+     * Opens a cursor for traversing keys in a key range.
+     *
+     * @param txn the transaction used to protect all operations performed with
+     * the cursor, or null if the operations should not be transaction
+     * protected.  If null is specified and the store is transactional, the
+     * cursor may not be used to update or delete entities.
+     *
+     * @param fromKey is the lower bound of the key range, or null if the range
+     * has no lower bound.
+     *
+     * @param fromInclusive is true if keys greater than or equal to fromKey
+     * should be included in the key range, or false if only keys greater than
+     * fromKey should be included.
+     *
+     * @param toKey is the upper bound of the key range, or null if the range
+     * has no upper bound.
+     *
+     * @param toInclusive is true if keys less than or equal to toKey should be
+     * included in the key range, or false if only keys less than toKey should
+     * be included.
+     *
+     * @param config the cursor configuration that determines the default lock
+     * mode used for all cursor operations, or null to implicitly use {@link
+     * CursorConfig#DEFAULT}.
+     *
+     * @return the cursor.
+     */
+    EntityCursor<K> keys(Transaction txn,
+                         K fromKey,
+                         boolean fromInclusive,
+                         K toKey,
+                         boolean toInclusive,
+                         CursorConfig config)
+        throws DatabaseException;
+
+    /**
+     * Opens a cursor for traversing entities in a key range.
+     *
+     * <p>The operations performed with the cursor will not be transaction
+     * protected, and {@link CursorConfig#DEFAULT} is used implicitly.  If the
+     * store is transactional, the cursor may not be used to update or delete
+     * entities.</p>
+     *
+     * @param fromKey is the lower bound of the key range, or null if the range
+     * has no lower bound.
+     *
+     * @param fromInclusive is true if keys greater than or equal to fromKey
+     * should be included in the key range, or false if only keys greater than
+     * fromKey should be included.
+     *
+     * @param toKey is the upper bound of the key range, or null if the range
+     * has no upper bound.
+     *
+     * @param toInclusive is true if keys less than or equal to toKey should be
+     * included in the key range, or false if only keys less than toKey should
+     * be included.
+     *
+     * @return the cursor.
+     */
+    EntityCursor<V> entities(K fromKey,
+                             boolean fromInclusive,
+                             K toKey,
+                             boolean toInclusive)
+        throws DatabaseException;
+
+    /**
+     * Opens a cursor for traversing entities in a key range.
+     *
+     * @param txn the transaction used to protect all operations performed with
+     * the cursor, or null if the operations should not be transaction
+     * protected.  If null is specified and the store is transactional, the
+     * cursor may not be used to update or delete entities.
+     *
+     * @param fromKey is the lower bound of the key range, or null if the range
+     * has no lower bound.
+     *
+     * @param fromInclusive is true if keys greater than or equal to fromKey
+     * should be included in the key range, or false if only keys greater than
+     * fromKey should be included.
+     *
+     * @param toKey is the upper bound of the key range, or null if the range
+     * has no upper bound.
+     *
+     * @param toInclusive is true if keys less than or equal to toKey should be
+     * included in the key range, or false if only keys less than toKey should
+     * be included.
+     *
+     * @param config the cursor configuration that determines the default lock
+     * mode used for all cursor operations, or null to implicitly use {@link
+     * CursorConfig#DEFAULT}.
+     *
+     * @return the cursor.
+     */
+    EntityCursor<V> entities(Transaction txn,
+                             K fromKey,
+                             boolean fromInclusive,
+                             K toKey,
+                             boolean toInclusive,
+                             CursorConfig config)
+        throws DatabaseException;
+
+    /*
+     * Opens a cursor for traversing all keys in this index in arbitrary order.
+     *
+     * <p>Normally entities and keys are returned in key order.  This method
+     * takes advantage of optimizations in the Berkeley DB engine to return
+     * entities in physical storage order, potentially decreasing the amount of
+     * physical I/O.</p>
+     *
+     * <p>The operations performed with the cursor will not be transaction
+     * protected, and {@link CursorConfig#DEFAULT} is used implicitly.</p>
+     *
+     * @param selector the filter for selecting keys to be returned, or null
+     * to select all keys.
+     *
+     * @return the cursor.
+     *
+    ForwardCursor<K> unsortedKeys(KeySelector<K> selector)
+        throws DatabaseException;
+     */
+
+    /*
+     * Opens a cursor for traversing all keys in this index in arbitrary order.
+     *
+     * <p>Normally entities and keys are returned in key order.  This method
+     * takes advantage of optimizations in the Berkeley DB engine to return
+     * entities in physical storage order, potentially decreasing the amount of
+     * physical I/O.</p>
+     *
+     * @param txn the transaction used to protect all operations performed with
+     * the cursor, or null if the operations should not be transaction
+     * protected.
+     *
+     * @param selector the filter for selecting keys to be returned, or null
+     * to select all keys.
+     *
+     * @param config the cursor configuration that determines the default lock
+     * mode used for all cursor operations, or null to implicitly use {@link
+     * CursorConfig#DEFAULT}.
+     *
+     * @return the cursor.
+     *
+    ForwardCursor<K> unsortedKeys(Transaction txn,
+                                  KeySelector<K> selector,
+                                  CursorConfig config)
+        throws DatabaseException;
+     */
+
+    /*
+     * Opens a cursor for traversing all entities in this index in arbitrary
+     * order.
+     *
+     * <p>Normally entities and keys are returned in key order.  This method
+     * takes advantage of optimizations in the Berkeley DB engine to return
+     * entities in physical storage order, potentially decreasing the amount of
+     * physical I/O.</p>
+     *
+     * <p>The operations performed with the cursor will not be transaction
+     * protected, and {@link CursorConfig#DEFAULT} is used implicitly.</p>
+
+     * @param selector the filter for selecting keys to be returned, or null
+     * to select all keys.
+     *
+     * @return the cursor.
+     *
+    ForwardCursor<V> unsortedEntities(KeySelector<K> selector)
+        throws DatabaseException;
+     */
+
+    /*
+     * Opens a cursor for traversing all entities in this index in arbitrary
+     * order.
+     *
+     * <p>Normally entities and keys are returned in key order.  This method
+     * takes advantage of optimizations in the Berkeley DB engine to return
+     * entities in physical storage order, potentially decreasing the amount of
+     * physical I/O.</p>
+     *
+     * @param txn the transaction used to protect all operations performed with
+     * the cursor, or null if the operations should not be transaction
+     * protected.
+     *
+     * @param selector the filter for selecting keys to be returned, or null
+     * to select all keys.
+     *
+     * @param config the cursor configuration that determines the default lock
+     * mode used for all cursor operations, or null to implicitly use {@link
+     * CursorConfig#DEFAULT}.
+     *
+     * @return the cursor.
+     *
+    ForwardCursor<V> unsortedEntities(Transaction txn,
+                                      KeySelector<K> selector,
+                                      CursorConfig config)
+        throws DatabaseException;
+     */
+
+    /**
+     * Returns a standard Java map based on this entity index.  The {@link
+     * StoredMap} returned is defined by the {@linkplain
+     * com.sleepycat.collections Collections API}.  Stored collections conform
+     * to the standard Java collections framework interface.
+     *
+     * @return the map.
+     */
+    Map<K,V> map();
+
+    /**
+     * Returns a standard Java sorted map based on this entity index.  The
+     * {@link StoredSortedMap} returned is defined by the {@linkplain
+     * com.sleepycat.collections Collections API}.  Stored collections conform
+     * to the standard Java collections framework interface.
+     *
+     * @return the map.
+     */
+    SortedMap<K,V> sortedMap();
+}
diff --git a/src/com/sleepycat/persist/EntityJoin.java b/src/com/sleepycat/persist/EntityJoin.java
new file mode 100644
index 0000000000000000000000000000000000000000..811e3f16d9750bba541b5fc837e1e4546850a9df
--- /dev/null
+++ b/src/com/sleepycat/persist/EntityJoin.java
@@ -0,0 +1,326 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EntityJoin.java,v 1.10.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.CursorConfig;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.JoinCursor;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+
+/**
+ * Performs an equality join on two or more secondary keys.
+ *
+ * <p>{@code EntityJoin} objects are thread-safe.  Multiple threads may safely
+ * call the methods of a shared {@code EntityJoin} object.</p>
+ *
+ * <p>An equality join is a match on all entities in a given primary index that
+ * have two or more specific secondary key values.  Note that key ranges may
+ * not be matched by an equality join, only exact keys are matched.</p>
+ *
+ * <p>For example:</p>
+ * <pre class="code">
+ *  // Index declarations -- see {@link <a href="package-summary.html#example">package summary example</a>}.
+ *  //
+ *  {@literal PrimaryIndex<String,Person> personBySsn;}
+ *  {@literal SecondaryIndex<String,String,Person> personByParentSsn;}
+ *  {@literal SecondaryIndex<Long,String,Person> personByEmployerIds;}
+ *  Employer employer = ...;
+ *
+ *  // Match on all Person objects having parentSsn "111-11-1111" and also
+ *  // containing an employerId of employer.id.  In other words, match on all
+ *  // of Bob's children that work for a given employer.
+ *  //
+ *  {@literal EntityJoin<String,Person> join = new EntityJoin(personBySsn);}
+ *  join.addCondition(personByParentSsn, "111-11-1111");
+ *  join.addCondition(personByEmployerIds, employer.id);
+ *
+ *  // Perform the join operation by traversing the results with a cursor.
+ *  //
+ *  {@literal ForwardCursor<Person> results = join.entities();}
+ *  try {
+ *      for (Person person : results) {
+ *          System.out.println(person.ssn + ' ' + person.name);
+ *      }
+ *  } finally {
+ *      results.close();
+ *  }</pre>
+ *
+ * @author Mark Hayes
+ */
+public class EntityJoin<PK,E> {
+
+    private PrimaryIndex<PK,E> primary;
+    private List<Condition> conditions;
+
+    /**
+     * Creates a join object for a given primary index.
+     *
+     * @param index the primary index on which the join will operate.
+     */
+    public EntityJoin(PrimaryIndex<PK,E> index) {
+        primary = index;
+        conditions = new ArrayList<Condition>();
+    }
+
+    /**
+     * Adds a secondary key condition to the equality join.  Only entities
+     * having the given key value in the given secondary index will be returned
+     * by the join operation.
+     *
+     * @param index the secondary index containing the given key value.
+     *
+     * @param key the key value to match during the join.
+     */
+    public <SK> void addCondition(SecondaryIndex<SK,PK,E> index, SK key) {
+
+        /* Make key entry. */
+        DatabaseEntry keyEntry = new DatabaseEntry();
+        index.getKeyBinding().objectToEntry(key, keyEntry);
+
+        /* Use keys database if available. */
+        Database db = index.getKeysDatabase();
+        if (db == null) {
+            db = index.getDatabase();
+        }
+
+        /* Add condition. */
+        conditions.add(new Condition(db, keyEntry));
+    }
+
+    /**
+     * Opens a cursor that returns the entities qualifying for the join.  The
+     * join operation is performed as the returned cursor is accessed.
+     *
+     * <p>The operations performed with the cursor will not be transaction
+     * protected, and {@link CursorConfig#DEFAULT} is used implicitly.</p>
+     *
+     * @return the cursor.
+     *
+     * @throws IllegalStateException if less than two conditions were added.
+     */
+    public ForwardCursor<E> entities()
+        throws DatabaseException {
+
+        return entities(null, null);
+    }
+
+    /**
+     * Opens a cursor that returns the entities qualifying for the join.  The
+     * join operation is performed as the returned cursor is accessed.
+     *
+     * @param txn the transaction used to protect all operations performed with
+     * the cursor, or null if the operations should not be transaction
+     * protected.
+     *
+     * @param config the cursor configuration that determines the default lock
+     * mode used for all cursor operations, or null to implicitly use {@link
+     * CursorConfig#DEFAULT}.
+     *
+     * @return the cursor.
+     *
+     * @throws IllegalStateException if less than two conditions were added.
+     */
+    public ForwardCursor<E> entities(Transaction txn, CursorConfig config)
+        throws DatabaseException {
+
+        return new JoinForwardCursor<E>(txn, config, false);
+    }
+
+    /**
+     * Opens a cursor that returns the primary keys of entities qualifying for
+     * the join.  The join operation is performed as the returned cursor is
+     * accessed.
+     *
+     * <p>The operations performed with the cursor will not be transaction
+     * protected, and {@link CursorConfig#DEFAULT} is used implicitly.</p>
+     *
+     * @return the cursor.
+     *
+     * @throws IllegalStateException if less than two conditions were added.
+     */
+    public ForwardCursor<PK> keys()
+        throws DatabaseException {
+
+        return keys(null, null);
+    }
+
+    /**
+     * Opens a cursor that returns the primary keys of entities qualifying for
+     * the join.  The join operation is performed as the returned cursor is
+     * accessed.
+     *
+     * @param txn the transaction used to protect all operations performed with
+     * the cursor, or null if the operations should not be transaction
+     * protected.
+     *
+     * @param config the cursor configuration that determines the default lock
+     * mode used for all cursor operations, or null to implicitly use {@link
+     * CursorConfig#DEFAULT}.
+     *
+     * @return the cursor.
+     *
+     * @throws IllegalStateException if less than two conditions were added.
+     */
+    public ForwardCursor<PK> keys(Transaction txn, CursorConfig config)
+        throws DatabaseException {
+
+        return new JoinForwardCursor<PK>(txn, config, true);
+    }
+
+    private static class Condition {
+
+        private Database db;
+        private DatabaseEntry key;
+
+        Condition(Database db, DatabaseEntry key) {
+            this.db = db;
+            this.key = key;
+        }
+
+        Cursor openCursor(Transaction txn, CursorConfig config)
+            throws DatabaseException {
+
+            OperationStatus status;
+            Cursor cursor = db.openCursor(txn, config);
+            try {
+                DatabaseEntry data = BasicIndex.NO_RETURN_ENTRY;
+                status = cursor.getSearchKey(key, data, null);
+            } catch (DatabaseException e) {
+                try {
+                    cursor.close();
+                } catch (DatabaseException ignored) {}
+                throw e;
+            }
+            if (status == OperationStatus.SUCCESS) {
+                return cursor;
+            } else {
+                cursor.close();
+                return null;
+            }
+        }
+    }
+
+    private class JoinForwardCursor<V> implements ForwardCursor<V> {
+
+        private Cursor[] cursors;
+        private JoinCursor joinCursor;
+        private boolean doKeys;
+
+        JoinForwardCursor(Transaction txn, CursorConfig config, boolean doKeys)
+            throws DatabaseException {
+
+            this.doKeys = doKeys;
+            try {
+                cursors = new Cursor[conditions.size()];
+                for (int i = 0; i < cursors.length; i += 1) {
+                    Condition cond = conditions.get(i);
+                    Cursor cursor = cond.openCursor(txn, config);
+                    if (cursor == null) {
+                        /* Leave joinCursor null. */
+                        doClose(null);
+                        return;
+                    }
+                    cursors[i] = cursor;
+                }
+                joinCursor = primary.getDatabase().join(cursors, null);
+            } catch (DatabaseException e) {
+                /* doClose will throw e. */
+                doClose(e);
+            }
+        }
+
+        public V next()
+            throws DatabaseException {
+
+            return next(null);
+        }
+
+        public V next(LockMode lockMode)
+            throws DatabaseException {
+
+            if (joinCursor == null) {
+                return null;
+            }
+            if (doKeys) {
+                DatabaseEntry key = new DatabaseEntry();
+                OperationStatus status = joinCursor.getNext(key, lockMode);
+                if (status == OperationStatus.SUCCESS) {
+                    EntryBinding binding = primary.getKeyBinding();
+                    return (V) binding.entryToObject(key);
+                }
+            } else {
+                DatabaseEntry key = new DatabaseEntry();
+                DatabaseEntry data = new DatabaseEntry();
+                OperationStatus status =
+                    joinCursor.getNext(key, data, lockMode);
+                if (status == OperationStatus.SUCCESS) {
+                    EntityBinding binding = primary.getEntityBinding();
+                    return (V) binding.entryToObject(key, data);
+                }
+            }
+            return null;
+        }
+
+        public Iterator<V> iterator() {
+            return iterator(null);
+        }
+
+        public Iterator<V> iterator(LockMode lockMode) {
+            return new BasicIterator<V>(this, lockMode);
+        }
+
+        public void close()
+            throws DatabaseException {
+
+            doClose(null);
+        }
+
+        private void doClose(DatabaseException firstException)
+            throws DatabaseException {
+
+            if (joinCursor != null) {
+                try {
+                    joinCursor.close();
+                    joinCursor = null;
+                } catch (DatabaseException e) {
+                    if (firstException == null) {
+                        firstException = e;
+                    }
+                }
+            }
+            for (int i = 0; i < cursors.length; i += 1) {
+                Cursor cursor = cursors[i];
+                if (cursor != null) {
+                    try {
+                        cursor.close();
+                        cursors[i] = null;
+                    } catch (DatabaseException e) {
+                        if (firstException == null) {
+                            firstException = e;
+                        }
+                    }
+                }
+            }
+            if (firstException != null) {
+                throw firstException;
+            }
+        }
+    }
+}
diff --git a/src/com/sleepycat/persist/EntityStore.java b/src/com/sleepycat/persist/EntityStore.java
new file mode 100644
index 0000000000000000000000000000000000000000..4a9c71bf314cd77516207d460fd3502fe706a3c5
--- /dev/null
+++ b/src/com/sleepycat/persist/EntityStore.java
@@ -0,0 +1,698 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EntityStore.java,v 1.35.2.3 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist;
+
+import java.util.Set;
+
+import com.sleepycat.je.Database; // for javadoc
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.SecondaryConfig;
+import com.sleepycat.je.Sequence;
+import com.sleepycat.je.SequenceConfig;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.persist.evolve.EvolveConfig;
+import com.sleepycat.persist.evolve.EvolveStats;
+import com.sleepycat.persist.evolve.IncompatibleClassException;
+import com.sleepycat.persist.evolve.Mutations;
+import com.sleepycat.persist.impl.Store;
+import com.sleepycat.persist.model.DeleteAction;
+import com.sleepycat.persist.model.Entity; // for javadoc
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.SecondaryKey;
+
+/**
+ * A store for managing persistent entity objects.
+ *
+ * <p>{@code EntityStore} objects are thread-safe.  Multiple threads may safely
+ * call the methods of a shared {@code EntityStore} object.</p>
+ *
+ * <p>See the {@link <a href="package-summary.html#example">package
+ * summary example</a>} for an example of using an {@code EntityStore}.</p>
+ *
+ * <p>Before creating an <code>EntityStore</code> you must create an {@link
+ * Environment} object using the Berkeley DB engine API.  The environment may
+ * contain any number of entity stores and their associated databases, as well
+ * as other databases not associated with an entity store.</p>
+ *
+ * <p>An entity store is based on an {@link EntityModel}: a data model which
+ * defines persistent classes (<em>entity classes</em>), primary keys,
+ * secondary keys, and relationships between entities.  A primary index is
+ * created for each entity class.  An associated secondary index is created for
+ * each secondary key.  The {@link Entity}, {@link PrimaryKey} and {@link
+ * SecondaryKey} annotations may be used to define entities and keys.</p>
+ *
+ * <p>To use an <code>EntityStore</code>, first obtain {@link PrimaryIndex} and
+ * {@link SecondaryIndex} objects by calling {@link #getPrimaryIndex
+ * getPrimaryIndex} and {@link #getSecondaryIndex getSecondaryIndex}.  Then use
+ * these indices to store and access entity records by key.</p>
+ *
+ * <p>Although not normally needed, you can also use the entity store along
+ * with the {@link com.sleepycat.je Base API}.  Methods in the {@link
+ * PrimaryIndex} and {@link SecondaryIndex} classes may be used to obtain
+ * databases and bindings.  The databases may be used directly for accessing
+ * entity records.  The bindings should be called explicitly to translate
+ * between {@link com.sleepycat.je.DatabaseEntry} objects and entity model
+ * objects.</p>
+ *
+ * <p>Each primary and secondary index is associated internally with a {@link
+ * Database}.  With any of the above mentioned use cases, methods are provided
+ * that may be used for database performance tuning.  The {@link
+ * #setPrimaryConfig setPrimaryConfig} and {@link #setSecondaryConfig
+ * setSecondaryConfig} methods may be called anytime before a database is
+ * opened via {@link #getPrimaryIndex getPrimaryIndex} or {@link
+ * #getSecondaryIndex getSecondaryIndex}.  The {@link #setSequenceConfig
+ * setSequenceConfig} method may be called anytime before {@link #getSequence
+ * getSequence} is called or {@link #getPrimaryIndex getPrimaryIndex} is called
+ * for a primary index associated with that sequence.</p>
+ *
+ * <!-- begin JE only -->
+ * <h3>Database Names</h3>
+ *
+ * <p>The database names of primary and secondary indices are designed to be
+ * unique within the environment and identifiable for debugging and use with
+ * tools such as {@link com.sleepycat.je.util.DbDump} and {@link
+ * com.sleepycat.je.util.DbLoad}.</p>
+ *
+ * <p>The syntax of a primary index database name is:</p>
+ * <pre>   persist#STORE_NAME#ENTITY_CLASS</pre>
+ * <p>Where STORE_NAME is the name parameter passed to {@link #EntityStore
+ * EntityStore} and ENTITY_CLASS is name of the class passed to {@link
+ * #getPrimaryIndex getPrimaryIndex}.</p>
+ *
+ * <p>The syntax of a secondary index database name is:</p>
+ * <pre>   persist#STORE_NAME#ENTITY_CLASS#KEY_NAME</pre>
+ * <p>Where KEY_NAME is the secondary key name passed to {@link
+ * #getSecondaryIndex getSecondaryIndex}.</p>
+ *
+ * <p>Although you should never have to construct these names manually,
+ * understanding their syntax is useful for several reasons:</p>
+ * <ul>
+ * <li>Exception messages sometimes contain the database name, from which you
+ * can identify the entity class and secondary key.</li>
+ * <li>If you create other databases in the same environment that are not
+ * part of an <code>EntityStore</code>, to avoid naming conflicts the other
+ * database names should not begin with <code>"persist#"</code>.</li>
+ * <li>If you are using {@link com.sleepycat.je.util.DbDump} or {@link
+ * com.sleepycat.je.util.DbLoad} to perform a backup or copy databases between
+ * environments, knowing the database names can be useful.  Normally you will
+ * dump or load all database names starting with
+ * <code>"persist#STORE_NAME#"</code>.</li>
+ * </ul>
+ *
+ * <p>If you are copying all databases in a store as mentioned in the last
+ * point above, there is one further consideration.  There are two internal
+ * databases that must be kept with the other databases in the store in order
+ * for the store to be used.  These contain the data formats and sequences for
+ * the store:</p>
+ * <pre>   persist#STORE_NAME#com.sleepycat.persist.formats</pre>
+ * <pre>   persist#STORE_NAME#com.sleepycat.persist.sequences</pre>
+ * <p>These databases must normally be included with copies of other databases
+ * in the store.  They should not be modified by the application.</p>
+ *
+ * <p>For example, the following code snippet removes all databases for a given
+ * store in a single transaction.</p>
+ * <pre class="code">
+ *  Environment env = ...
+ *  EntityStore store = ...
+ *  Transaction txn = env.beginTransaction(null, null);
+ *  String prefix = "persist#" + store.getStoreName() + "#";
+ *  for (String dbName : env.getDatabaseNames()) {
+ *      if (dbName.startsWith(prefix)) {
+ *          env.removeDatabase(txn, dbName);
+ *      }
+ *  }
+ *  txn.commit();</pre>
+ *
+ * <!-- end JE only -->
+ *
+ * @author Mark Hayes
+ */
+public class EntityStore {
+
+    private Store store;
+
+    /**
+     * Opens an entity store in a given environment.
+     *
+     * @param env an open Berkeley DB Environment.
+     *
+     * @param storeName the name of the entity store within the given
+     * environment.  An empty string is allowed.  Named stores may be used to
+     * distinguish multiple sets of persistent entities for the same entity
+     * classes in a single environment.  Underlying database names are prefixed
+     * with the store name.
+     *
+     * @param config the entity store configuration, or null to use default
+     * configuration properties.
+     *
+     * @throws IncompatibleClassException if an incompatible class change has
+     * been made and mutations are not configured for handling the change.  See
+     * {@link com.sleepycat.persist.evolve Class Evolution} for more
+     * information.
+     */
+    public EntityStore(Environment env, String storeName, StoreConfig config)
+        throws DatabaseException, IncompatibleClassException  {
+
+        store = new Store(env, storeName, config, false /*rawAccess*/);
+    }
+
+    /**
+     * Returns the environment associated with this store.
+     *
+     * @return the environment.
+     */
+    public Environment getEnvironment() {
+        return store.getEnvironment();
+    }
+
+    /**
+     * Returns a copy of the entity store configuration.
+     *
+     * @return the config.
+     */
+    public StoreConfig getConfig() {
+        return store.getConfig();
+    }
+
+    /**
+     * Returns the name of this store.
+     *
+     * @return the name.
+     */
+    public String getStoreName() {
+        return store.getStoreName();
+    }
+
+    /* <!-- begin JE only --> */
+    /**
+     * Returns the names of all entity stores in the given environment.
+     *
+     * @return the store names.  An empty set is returned if no stores are
+     * present.
+     */
+    public static Set<String> getStoreNames(Environment env)
+        throws DatabaseException {
+
+        return Store.getStoreNames(env);
+    }
+    /* <!-- end JE only --> */
+
+    /**
+     * Returns the current entity model for this store.  The current model is
+     * derived from the configured entity model and the live entity class
+     * definitions.
+     *
+     * @return the model.
+     */
+    public EntityModel getModel() {
+        return store.getModel();
+    }
+
+    /**
+     * Returns the set of mutations that were configured when the store was
+     * opened, or if none were configured, the set of mutations that were
+     * configured and stored previously.
+     *
+     * @return the mutations.
+     */
+    public Mutations getMutations() {
+        return store.getMutations();
+    }
+
+    /**
+     * Returns the primary index for a given entity class, opening it if
+     * necessary.
+     *
+     * <p>If they are not already open, the primary and secondary databases for
+     * the entity class are created/opened together in a single internal
+     * transaction.  When the secondary indices are opened, that can cascade to
+     * open other related primary indices.</p>
+     *
+     * @param primaryKeyClass the class of the entity's primary key field, or
+     * the corresponding primitive wrapper class if the primary key field type
+     * is a primitive.
+     *
+     * @param entityClass the entity class for which to open the primary index.
+     *
+     * @return the primary index.
+     *
+     * @throws IllegalArgumentException if the entity class or classes
+     * referenced by it are not persistent, or the primary key class does not
+     * match the entity's primary key field, or if metadata for the entity or
+     * primary key is invalid.
+     */
+    public <PK,E> PrimaryIndex<PK,E> getPrimaryIndex(Class<PK> primaryKeyClass,
+                                                     Class<E> entityClass)
+        throws DatabaseException {
+
+        return store.getPrimaryIndex
+            (primaryKeyClass, primaryKeyClass.getName(),
+             entityClass, entityClass.getName());
+    }
+
+    /**
+     * Returns a secondary index for a given primary index and secondary key,
+     * opening it if necessary.
+     *
+     * <p><em>NOTE:</em> If the secondary key field is declared in a subclass
+     * of the entity class, use {@link #getSubclassIndex} instead.</p>
+     *
+     * <p>If a {@link SecondaryKey#relatedEntity} is used and the primary index
+     * for the related entity is not already open, it will be opened by this
+     * method.  That will, in turn, open its secondary indices, which can
+     * cascade to open other primary indices.</p>
+     *
+     * @param primaryIndex the primary index associated with the returned
+     * secondary index.  The entity class of the primary index, or one of its
+     * superclasses, must contain a secondary key with the given secondary key
+     * class and key name.
+     *
+     * @param keyClass the class of the secondary key field, or the
+     * corresponding primitive wrapper class if the secondary key field type is
+     * a primitive.
+     *
+     * @param keyName the name of the secondary key field, or the {@link
+     * SecondaryKey#name} if this name annotation property was specified.
+     *
+     * @return the secondary index.
+     *
+     * @throws IllegalArgumentException if the entity class or one of its
+     * superclasses does not contain a key field of the given key class and key
+     * name, or if the metadata for the secondary key is invalid.
+     */
+    public <SK,PK,E> SecondaryIndex<SK,PK,E>
+        getSecondaryIndex(PrimaryIndex<PK,E> primaryIndex,
+                          Class<SK> keyClass,
+                          String keyName)
+        throws DatabaseException {
+
+        return store.getSecondaryIndex
+            (primaryIndex, primaryIndex.getEntityClass(),
+             primaryIndex.getEntityClass().getName(),
+             keyClass, keyClass.getName(), keyName);
+    }
+
+    /**
+     * Returns a secondary index for a secondary key in an entity subclass,
+     * opening it if necessary.
+     *
+     * <p>If a {@link SecondaryKey#relatedEntity} is used and the primary index
+     * for the related entity is not already open, it will be opened by this
+     * method.  That will, in turn, open its secondary indices, which can
+     * cascade to open other primary indices.</p>
+     *
+     * @param primaryIndex the primary index associated with the returned
+     * secondary index.  The entity class of the primary index, or one of its
+     * superclasses, must contain a secondary key with the given secondary key
+     * class and key name.
+     *
+     * @param entitySubclass a subclass of the entity class for the primary
+     * index.  The entity subclass must contain a secondary key with the given
+     * secondary key class and key name.
+     *
+     * @param keyClass the class of the secondary key field, or the
+     * corresponding primitive wrapper class if the secondary key field type is
+     * a primitive.
+     *
+     * @param keyName the name of the secondary key field, or the {@link
+     * SecondaryKey#name} if this name annotation property was specified.
+     *
+     * @return the secondary index.
+     *
+     * @throws IllegalArgumentException if the given entity subclass does not
+     * contain a key field of the given key class and key name, or if the
+     * metadata for the secondary key is invalid.
+     */
+    public <SK,PK,E1,E2 extends E1> SecondaryIndex<SK,PK,E2>
+        getSubclassIndex(PrimaryIndex<PK,E1> primaryIndex,
+                         Class<E2> entitySubclass,
+                         Class<SK> keyClass,
+                         String keyName)
+        throws DatabaseException {
+
+        /* Make subclass metadata available before getting the index. */
+        getModel().getClassMetadata(entitySubclass.getName());
+
+        return store.getSecondaryIndex
+            (primaryIndex, entitySubclass,
+             primaryIndex.getEntityClass().getName(),
+             keyClass, keyClass.getName(), keyName);
+    }
+
+    /**
+     * Performs conversion of unevolved objects in order to reduce lazy
+     * conversion overhead.  Evolution may be performed concurrently with
+     * normal access to the store.
+     *
+     * <p>Conversion is performed one entity class at a time.  An entity class
+     * is converted only if it has {@link Mutations} associated with it via
+     * {@link StoreConfig#setMutations StoreConfig.setMutations}.</p>
+     *
+     * <p>Conversion of an entity class is performed by reading each entity,
+     * converting it if necessary, and updating it if conversion was performed.
+     * When all instances of an entity class are converted, references to the
+     * appropriate {@link Mutations} are deleted.  Therefore, if this method is
+     * called twice successfully without changing class definitions, the second
+     * call will do nothing.</p>
+     *
+     * @see com.sleepycat.persist.evolve Class Evolution
+     */
+    public EvolveStats evolve(EvolveConfig config)
+        throws DatabaseException {
+
+        return store.evolve(config);
+    }
+
+    /**
+     * Deletes all instances of this entity class and its (non-entity)
+     * subclasses.
+     *
+     * <p>The primary database and all secondary databases for the given entity
+     * class will be truncated.  The primary and secondary databases associated
+     * with the entity class must not be open except by this store, since
+     * database truncation is only possible when the database is not open.  The
+     * databases to be truncated will be closed before performing this
+     * operation, if they were previously opened by this store.</p>
+     *
+     * <p>Auto-commit is used implicitly if the store is transactional.</p>
+     *
+     * @param entityClass the entity class whose instances are to be deleted.
+     */
+    public void truncateClass(Class entityClass)
+        throws DatabaseException {
+
+        store.truncateClass(null, entityClass);
+    }
+
+    /**
+     * Deletes all instances of this entity class and its (non-entity)
+     * subclasses.
+     *
+     * <p>The primary database and all secondary databases for the given entity
+     * class will be truncated.  The primary and secondary databases associated
+     * with the entity class must not be open except by this store, since
+     * database truncation is only possible when the database is not open.  The
+     * databases to be truncated will be closed before performing this
+     * operation, if they were previously opened by this store.</p>
+     *
+     * @param txn the transaction used to protect this operation, null to use
+     * auto-commit, or null if the store is non-transactional.
+     *
+     * @param entityClass the entity class whose instances are to be deleted.
+     */
+    public void truncateClass(Transaction txn, Class entityClass)
+        throws DatabaseException {
+
+        store.truncateClass(txn, entityClass);
+    }
+
+    /* <!-- begin JE only --> */
+    /**
+     * Flushes each modified index to disk that was opened in deferred-write
+     * mode.
+     *
+     * <p>All indexes are opened in deferred-write mode if true was passed to
+     * {@link StoreConfig#setDeferredWrite} for the store.</p>
+     *
+     * <p>Alternatively, individual databases may be configured for deferred
+     * write using {@link DatabaseConfig#setDeferredWrite} along with {@link
+     * #getPrimaryConfig} and {@link #setPrimaryConfig}.  Caution should be
+     * used when configuring only some databases for deferred-write, since
+     * durability will be different for these databases than for other
+     * databases in the same store.</p>
+     *
+     * <p>This method is functionally equivalent to calling {@link
+     * Database#sync} for each deferred-write index Database that is open for
+     * this store.  However, while {@link Database#sync} flushes the log to
+     * disk each time it is called, this method flushes the log only once after
+     * syncing all databases; this method therefore causes less I/O than
+     * calling {@link Database#sync} multiple times.</p>
+     *
+     * <p>Instead of calling this method, {@link Environment#sync} may be used.
+     * The difference is that this method will only flush the databases for
+     * this store, while {@link Environment#sync} will sync all deferred-write
+     * databases currently open for the environment and will also perform a
+     * full checkpoint.  This method is therefore less expensive than a full
+     * sync of the environment.</p>
+     */
+    public void sync()
+        throws DatabaseException {
+
+        store.sync();
+    }
+    /* <!-- end JE only --> */
+
+    /**
+     * Closes the primary and secondary databases for the given entity class
+     * that were opened via this store.  The caller must ensure that the
+     * primary and secondary indices obtained from this store are no longer in
+     * use.
+     *
+     * @param entityClass the entity class whose databases are to be closed.
+     */
+    public void closeClass(Class entityClass)
+        throws DatabaseException {
+
+        store.closeClass(entityClass);
+    }
+
+    /**
+     * Closes all databases and sequences that were opened via this store.  The
+     * caller must ensure that no databases opened via this store are in use.
+     */
+    public void close()
+        throws DatabaseException {
+
+        store.close();
+    }
+
+    /**
+     * Returns a named sequence for using Berkeley DB engine API directly,
+     * opening it if necessary.
+     *
+     * @param name the sequence name, which is normally defined using the
+     * {@link PrimaryKey#sequence} annotation property.
+     *
+     * @return the open sequence for the given sequence name.
+     */
+    public Sequence getSequence(String name)
+        throws DatabaseException {
+
+        return store.getSequence(name);
+    }
+
+    /**
+     * Returns the default Berkeley DB engine API configuration for a named key
+     * sequence.
+     *
+     * </p>The returned configuration is as follows.  All other properties have
+     * default values.</p>
+     * <ul>
+     * <li>The {@link SequenceConfig#setInitialValue InitialValue} is one.</li>
+     * <li>The {@link SequenceConfig#setRange Range} minimum is one.</li>
+     * <li>The {@link SequenceConfig#setCacheSize CacheSize} is 100.</li>
+     * <li>{@link SequenceConfig#setAutoCommitNoSync AutoCommitNoSync} is
+     * true.</li>
+     * <li>{@link SequenceConfig#setAllowCreate AllowCreate} is set to true
+     * if the store is not {@link StoreConfig#setReadOnly ReadOnly}.</li>
+     * </ul>
+     *
+     * @param name the sequence name, which is normally defined using the
+     * {@link PrimaryKey#sequence} annotation property.
+     *
+     * @return the default configuration for the given sequence name.
+     */
+    public SequenceConfig getSequenceConfig(String name) {
+        return store.getSequenceConfig(name);
+    }
+
+    /**
+     * Configures a named key sequence using the Berkeley DB engine API.
+     *
+     * <p>To be compatible with the entity model and the Direct Persistence
+     * Layer, the configuration should be retrieved using {@link
+     * #getSequenceConfig getSequenceConfig}, modified, and then passed to this
+     * method.</p>
+     *
+     * <p>If the range is changed to include the value zero, see {@link
+     * PrimaryKey} for restrictions.</p>
+     *
+     * @param name the sequence name, which is normally defined using the
+     * {@link PrimaryKey#sequence} annotation property.
+     *
+     * @param config the configuration to use for the given sequence name.
+     *
+     * @throws IllegalArgumentException if the configuration is incompatible
+     * with the entity model or the Direct Persistence Layer.
+     *
+     * @throws IllegalStateException if the sequence has already been opened.
+     */
+    public void setSequenceConfig(String name, SequenceConfig config) {
+        store.setSequenceConfig(name, config);
+    }
+
+    /**
+     * Returns the default primary database Berkeley DB engine API
+     * configuration for an entity class.
+     *
+     * </p>The returned configuration is as follows.  All other properties have
+     * default values.</p>
+     * <ul>
+     * <li>{@link DatabaseConfig#setTransactional Transactional} is set to
+     * match {@link StoreConfig#setTransactional StoreConfig}.</li>
+     * <li>{@link DatabaseConfig#setAllowCreate AllowCreate} is set to true
+     * if the store is not {@link StoreConfig#setReadOnly ReadOnly}.</li>
+     * <li>{@link DatabaseConfig#setReadOnly ReadOnly} is set to match
+     * {@link StoreConfig#setReadOnly StoreConfig}.</li>
+     * <!-- begin JE only --> *
+     * <li>{@link DatabaseConfig#setDeferredWrite DeferredWrite} is set to
+     * match {@link StoreConfig#setDeferredWrite StoreConfig}.</li>
+     * <li>{@link DatabaseConfig#setTemporary Temporary} is set to
+     * match {@link StoreConfig#setTemporary StoreConfig}.</li>
+     * <!-- end JE only --> *
+     * <li>{@link DatabaseConfig#setBtreeComparator BtreeComparator} is set to
+     * an internal class if a key comparator is used.</li>
+     * </ul>
+     *
+     * @param entityClass the entity class identifying the primary database.
+     *
+     * @return the default configuration for the given entity class.
+     */
+    public DatabaseConfig getPrimaryConfig(Class entityClass) {
+        return store.getPrimaryConfig(entityClass);
+    }
+
+    /**
+     * Configures the primary database for an entity class using the Berkeley
+     * DB engine API.
+     *
+     * <p>To be compatible with the entity model and the Direct Persistence
+     * Layer, the configuration should be retrieved using {@link
+     * #getPrimaryConfig getPrimaryConfig}, modified, and then passed to this
+     * method.  The following configuration properties may not be changed:</p>
+     * <ul>
+     * <li>{@link DatabaseConfig#setSortedDuplicates SortedDuplicates}</li>
+     * <!-- begin JE only --> *
+     * <li>{@link DatabaseConfig#setTemporary Temporary}</li>
+     * <!-- end JE only --> *
+     * <li>{@link DatabaseConfig#setBtreeComparator BtreeComparator}</li>
+     * </ul>
+     *
+     * @param entityClass the entity class identifying the primary database.
+     *
+     * @param config the configuration to use for the given entity class.
+     *
+     * @throws IllegalArgumentException if the configuration is incompatible
+     * with the entity model or the Direct Persistence Layer.
+     *
+     * @throws IllegalStateException if the database has already been opened.
+     */
+    public void setPrimaryConfig(Class entityClass, DatabaseConfig config) {
+        store.setPrimaryConfig(entityClass, config);
+    }
+
+    /**
+     * Returns the default secondary database Berkeley DB engine API
+     * configuration for an entity class and key name.
+     *
+     * </p>The returned configuration is as follows.  All other properties have
+     * default values.</p>
+     * <ul>
+     * <li>{@link DatabaseConfig#setTransactional Transactional} is set to
+     * match the primary database.</li>
+     * <li>{@link DatabaseConfig#setAllowCreate AllowCreate} is set to true
+     * if the primary database is not {@link DatabaseConfig#setReadOnly
+     * ReadOnly}.</li>
+     * <li>{@link DatabaseConfig#setReadOnly ReadOnly} is set to match
+     * the primary database.</li>
+     * <!-- begin JE only --> *
+     * <li>{@link DatabaseConfig#setDeferredWrite DeferredWrite} is set to
+     * match the primary database.</li>
+     * <li>{@link DatabaseConfig#setTemporary Temporary} is set to
+     * match {@link StoreConfig#setTemporary StoreConfig}.</li>
+     * <!-- end JE only --> *
+     * <li>{@link DatabaseConfig#setBtreeComparator BtreeComparator} is set to
+     * an internal class if a key comparator is used.</li>
+     * <li>{@link DatabaseConfig#setSortedDuplicates SortedDuplicates} is set
+     * according to {@link SecondaryKey#relate}.</p>
+     * <li>{@link SecondaryConfig#setAllowPopulate AllowPopulate} is set to
+     * true when a secondary key is added to an existing primary index.</li>
+     * <li>{@link SecondaryConfig#setKeyCreator KeyCreator} or {@link
+     * SecondaryConfig#setMultiKeyCreator MultiKeyCreator} is set to an
+     * internal instance.</p>
+     * <li>{@link SecondaryConfig#setForeignMultiKeyNullifier
+     * ForeignMultiKeyNullifier} is set to an internal instance if {@link
+     * SecondaryKey#onRelatedEntityDelete} is {@link DeleteAction#NULLIFY}.</li>
+     * </ul>
+     *
+     * @param entityClass the entity class containing the given secondary key
+     * name.
+     *
+     * @param keyName the name of the secondary key field, or the {@link
+     * SecondaryKey#name} if this name annotation property was specified.
+     *
+     * @return the default configuration for the given secondary key.
+     */
+    public SecondaryConfig getSecondaryConfig(Class entityClass,
+                                              String keyName) {
+        return store.getSecondaryConfig(entityClass, keyName);
+    }
+
+    /**
+     * Configures a secondary database for an entity class and key name using
+     * the Berkeley DB engine API.
+     *
+     * <p>To be compatible with the entity model and the Direct Persistence
+     * Layer, the configuration should be retrieved using {@link
+     * #getSecondaryConfig getSecondaryConfig}, modified, and then passed to
+     * this method.  The following configuration properties may not be
+     * changed:</p>
+     * <ul>
+     * <li>{@link DatabaseConfig#setSortedDuplicates SortedDuplicates}</li>
+     * <li>{@link DatabaseConfig#setBtreeComparator BtreeComparator}</li>
+     * <li>{@link DatabaseConfig#setDuplicateComparator
+     * DuplicateComparator}</li>
+     * <!-- begin JE only --> *
+     * <li>{@link DatabaseConfig#setTemporary Temporary}</li>
+     * <!-- end JE only --> *
+     * <li>{@link SecondaryConfig#setAllowPopulate AllowPopulate}</li>
+     * <li>{@link SecondaryConfig#setKeyCreator KeyCreator}</li>
+     * <li>{@link SecondaryConfig#setMultiKeyCreator MultiKeyCreator}</li>
+     * <li>{@link SecondaryConfig#setForeignKeyNullifier
+     * ForeignKeyNullifier}</li>
+     * <li>{@link SecondaryConfig#setForeignMultiKeyNullifier
+     * ForeignMultiKeyNullifier}</li>
+     * <li>{@link SecondaryConfig#setForeignKeyDeleteAction
+     * ForeignKeyDeleteAction}</li>
+     * <li>{@link SecondaryConfig#setForeignKeyDatabase
+     * ForeignKeyDatabase}</li>
+     * </ul>
+     *
+     * @param entityClass the entity class containing the given secondary key
+     * name.
+     *
+     * @param keyName the name of the secondary key field, or the {@link
+     * SecondaryKey#name} if this name annotation property was specified.
+     *
+     * @param config the configuration to use for the given secondary key.
+     *
+     * @throws IllegalArgumentException if the configuration is incompatible
+     * with the entity model or the Direct Persistence Layer.
+     *
+     * @throws IllegalStateException if the database has already been opened.
+     */
+    public void setSecondaryConfig(Class entityClass,
+                                   String keyName,
+                                   SecondaryConfig config) {
+        store.setSecondaryConfig(entityClass, keyName, config);
+    }
+}
diff --git a/src/com/sleepycat/persist/EntityValueAdapter.java b/src/com/sleepycat/persist/EntityValueAdapter.java
new file mode 100644
index 0000000000000000000000000000000000000000..0bd285eda8019973506eaea1806d4fb98fb8dc96
--- /dev/null
+++ b/src/com/sleepycat/persist/EntityValueAdapter.java
@@ -0,0 +1,62 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EntityValueAdapter.java,v 1.8.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * A ValueAdapter where the "value" is the entity.
+ *
+ * @author Mark Hayes
+ */
+class EntityValueAdapter<V> implements ValueAdapter<V> {
+
+    private EntityBinding entityBinding;
+    private boolean isSecondary;
+
+    EntityValueAdapter(Class<V> entityClass,
+                       EntityBinding entityBinding,
+                       boolean isSecondary) {
+        this.entityBinding = entityBinding;
+        this.isSecondary = isSecondary;
+    }
+
+    public DatabaseEntry initKey() {
+        return new DatabaseEntry();
+    }
+
+    public DatabaseEntry initPKey() {
+        return isSecondary ? (new DatabaseEntry()) : null;
+    }
+
+    public DatabaseEntry initData() {
+        return new DatabaseEntry();
+    }
+
+    public void clearEntries(DatabaseEntry key,
+                             DatabaseEntry pkey,
+                             DatabaseEntry data) {
+        key.setData(null);
+        if (isSecondary) {
+            pkey.setData(null);
+        }
+        data.setData(null);
+    }
+
+    public V entryToValue(DatabaseEntry key,
+                          DatabaseEntry pkey,
+                          DatabaseEntry data) {
+        return (V) entityBinding.entryToObject(isSecondary ? pkey : key, data);
+    }
+
+    public void valueToData(V value, DatabaseEntry data) {
+        entityBinding.objectToData(value, data);
+    }
+}
diff --git a/src/com/sleepycat/persist/ForwardCursor.java b/src/com/sleepycat/persist/ForwardCursor.java
new file mode 100644
index 0000000000000000000000000000000000000000..c6aa9a95aa19afd90cc201f76e8442c29942b68b
--- /dev/null
+++ b/src/com/sleepycat/persist/ForwardCursor.java
@@ -0,0 +1,87 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ForwardCursor.java,v 1.9.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist;
+
+import java.util.Iterator;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.LockMode;
+
+/**
+ * Cursor operations limited to traversing forward.  See {@link EntityCursor}
+ * for general information on cursors.
+ *
+ * <p>{@code ForwardCursor} objects are <em>not</em> thread-safe.  Cursors
+ * should be opened, used and closed by a single thread.</p>
+ *
+ * <p><em>WARNING:</em> Cursors must always be closed to prevent resource leaks
+ * which could lead to the index becoming unusable or cause an
+ * <code>OutOfMemoryError</code>.  To ensure that a cursor is closed in the
+ * face of exceptions, close it in a finally block.</p>
+ *
+ * @author Mark Hayes
+ */
+public interface ForwardCursor<V> extends Iterable<V> {
+
+    /**
+     * Moves the cursor to the next value and returns it, or returns null
+     * if there are no more values in the cursor range.  If the cursor is
+     * uninitialized, this method returns the first value.
+     *
+     * <p>{@link LockMode#DEFAULT} is used implicitly.</p>
+     *
+     * @return the next value, or null if there are no more values in the
+     * cursor range.
+     */
+    V next()
+        throws DatabaseException;
+
+    /**
+     * Moves the cursor to the next value and returns it, or returns null
+     * if there are no more values in the cursor range.  If the cursor is
+     * uninitialized, this method returns the first value.
+     *
+     * @param lockMode the lock mode to use for this operation, or null to
+     * use {@link LockMode#DEFAULT}.
+     *
+     * @return the next value, or null if there are no more values in the
+     * cursor range.
+     */
+    V next(LockMode lockMode)
+        throws DatabaseException;
+
+    /**
+     * Returns an iterator over the key range, starting with the value
+     * following the current position or at the first value if the cursor is
+     * uninitialized.
+     *
+     * <p>{@link LockMode#DEFAULT} is used implicitly.</p>
+     *
+     * @return the iterator.
+     */
+    Iterator<V> iterator();
+
+    /**
+     * Returns an iterator over the key range, starting with the value
+     * following the current position or at the first value if the cursor is
+     * uninitialized.
+     *
+     * @param lockMode the lock mode to use for all operations performed
+     * using the iterator, or null to use {@link LockMode#DEFAULT}.
+     *
+     * @return the iterator.
+     */
+    Iterator<V> iterator(LockMode lockMode);
+
+    /**
+     * Closes the cursor.
+     */
+    void close()
+        throws DatabaseException;
+}
diff --git a/src/com/sleepycat/persist/KeySelector.java b/src/com/sleepycat/persist/KeySelector.java
new file mode 100644
index 0000000000000000000000000000000000000000..a9ac17330c8e46681ffb686047f929311bf5c8c7
--- /dev/null
+++ b/src/com/sleepycat/persist/KeySelector.java
@@ -0,0 +1,41 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: KeySelector.java,v 1.9.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist;
+
+/**
+ * This is package-private to hide it until we implemented unsorted access.
+ *
+ * Implemented to select keys to be returned by an unsorted {@code
+ * ForwardCursor}.
+ *
+ * <p>The reason for implementing a selector, rather than filtering the objects
+ * returned by the {@link ForwardCursor}, is to improve performance when not
+ * all keys are to be processed.  Keys are passed to this interface without
+ * retrieving record data or locking, so it is less expensive to return false
+ * from this method than to retrieve the object from the cursor.</p>
+ *
+ * see EntityIndex#unsortedKeys
+ * see EntityIndex#unsortedEntities
+ *
+ * @author Mark Hayes
+ */
+interface KeySelector<K> {
+
+    /**
+     * Returns whether a given key should be returned via the cursor.
+     *
+     * <p>This method should not assume that the given key is for a committed
+     * record or not, nor should it assume that the key will be returned via
+     * the cursor if this method returns true.  The record for this key will
+     * not be locked until this method returns.  If, when the record is locked,
+     * the record is found to be uncommitted or deleted, the key will not be
+     * returned via the cursor.</p>
+     */
+    boolean selectKey(K key);
+}
diff --git a/src/com/sleepycat/persist/KeyValueAdapter.java b/src/com/sleepycat/persist/KeyValueAdapter.java
new file mode 100644
index 0000000000000000000000000000000000000000..1c2e2135e0b39bc7a623db5f3ec96c734d09450b
--- /dev/null
+++ b/src/com/sleepycat/persist/KeyValueAdapter.java
@@ -0,0 +1,56 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: KeyValueAdapter.java,v 1.8.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * A ValueAdapter where the "value" is the key (the primary key in a primary
+ * index or the secondary key in a secondary index).
+ *
+ * @author Mark Hayes
+ */
+class KeyValueAdapter<V> implements ValueAdapter<V> {
+
+    private EntryBinding keyBinding;
+
+    KeyValueAdapter(Class<V> keyClass, EntryBinding keyBinding) {
+        this.keyBinding = keyBinding;
+    }
+
+    public DatabaseEntry initKey() {
+        return new DatabaseEntry();
+    }
+
+    public DatabaseEntry initPKey() {
+        return null;
+    }
+
+    public DatabaseEntry initData() {
+        return BasicIndex.NO_RETURN_ENTRY;
+    }
+
+    public void clearEntries(DatabaseEntry key,
+                             DatabaseEntry pkey,
+                             DatabaseEntry data) {
+        key.setData(null);
+    }
+
+    public V entryToValue(DatabaseEntry key,
+                          DatabaseEntry pkey,
+                          DatabaseEntry data) {
+        return (V) keyBinding.entryToObject(key);
+    }
+
+    public void valueToData(V value, DatabaseEntry data) {
+        throw new UnsupportedOperationException
+            ("Cannot change the data in a key-only index");
+    }
+}
diff --git a/src/com/sleepycat/persist/KeysIndex.java b/src/com/sleepycat/persist/KeysIndex.java
new file mode 100644
index 0000000000000000000000000000000000000000..55e6e625c41ee9df1de48f3c5d02557a625b2af0
--- /dev/null
+++ b/src/com/sleepycat/persist/KeysIndex.java
@@ -0,0 +1,88 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: KeysIndex.java,v 1.10.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist;
+
+import java.util.Map;
+import java.util.SortedMap;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.collections.StoredSortedMap;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+
+/**
+ * The EntityIndex returned by SecondaryIndex.keysIndex().  This index maps
+ * secondary key to primary key.  In Berkeley DB internal terms, this is a
+ * secondary database that is opened without associating it with a primary.
+ *
+ * @author Mark Hayes
+ */
+class KeysIndex<SK,PK> extends BasicIndex<SK,PK> {
+
+    private EntryBinding pkeyBinding;
+    private SortedMap<SK,PK> map;
+
+    KeysIndex(Database db,
+              Class<SK> keyClass,
+              EntryBinding keyBinding,
+              Class<PK> pkeyClass,
+              EntryBinding pkeyBinding)
+        throws DatabaseException {
+
+        super(db, keyClass, keyBinding,
+              new DataValueAdapter<PK>(pkeyClass, pkeyBinding));
+        this.pkeyBinding = pkeyBinding;
+    }
+
+    /*
+     * Of the EntityIndex methods only get()/map()/sortedMap() are implemented
+     * here.  All other methods are implemented by BasicIndex.
+     */
+
+    public PK get(SK key)
+        throws DatabaseException {
+
+        return get(null, key, null);
+    }
+
+    public PK get(Transaction txn, SK key, LockMode lockMode)
+        throws DatabaseException {
+
+        DatabaseEntry keyEntry = new DatabaseEntry();
+        DatabaseEntry pkeyEntry = new DatabaseEntry();
+        keyBinding.objectToEntry(key, keyEntry);
+
+        OperationStatus status = db.get(txn, keyEntry, pkeyEntry, lockMode);
+
+        if (status == OperationStatus.SUCCESS) {
+            return (PK) pkeyBinding.entryToObject(pkeyEntry);
+        } else {
+            return null;
+        }
+    }
+
+    public Map<SK,PK> map() {
+        return sortedMap();
+    }
+
+    public synchronized SortedMap<SK,PK> sortedMap() {
+        if (map == null) {
+            map = new StoredSortedMap(db, keyBinding, pkeyBinding, true);
+        }
+        return map;
+    }
+
+    boolean isUpdateAllowed() {
+        return false;
+    }
+}
diff --git a/src/com/sleepycat/persist/PrimaryIndex.java b/src/com/sleepycat/persist/PrimaryIndex.java
new file mode 100644
index 0000000000000000000000000000000000000000..d09eb0226711e2d45423298010e846c059f8a800
--- /dev/null
+++ b/src/com/sleepycat/persist/PrimaryIndex.java
@@ -0,0 +1,538 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PrimaryIndex.java,v 1.21.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist;
+
+import java.util.Map;
+import java.util.SortedMap;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.collections.StoredSortedMap;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.persist.impl.PersistEntityBinding;
+import com.sleepycat.persist.impl.PersistKeyAssigner;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.PrimaryKey;
+
+/**
+ * The primary index for an entity class and its primary key.
+ *
+ * <p>{@code PrimaryIndex} objects are thread-safe.  Multiple threads may
+ * safely call the methods of a shared {@code PrimaryIndex} object.</p>
+ *
+ * <p>{@code PrimaryIndex} implements {@link EntityIndex} to map the primary
+ * key type (PK) to the entity type (E).</p>
+ *
+ * <p>The {@link Entity} annotation may be used to define an entity class and
+ * the {@link PrimaryKey} annotation may be used to define a primary key as
+ * shown in the following example.</p>
+ *
+ * <pre class="code">
+ * {@literal @Entity}
+ * class Employee {
+ *
+ *     {@literal @PrimaryKey}
+ *     long id;
+ *
+ *     String name;
+ *
+ *     Employee(long id, String name) {
+ *         this.id = id;
+ *         this.name = name;
+ *     }
+ *
+ *     private Employee() {} // For bindings
+ * }</pre>
+ *
+ * <p>To obtain the {@code PrimaryIndex} for a given entity class, call {@link
+ * EntityStore#getPrimaryIndex EntityStore.getPrimaryIndex}, passing the
+ * primary key class and the entity class.  For example:</p>
+ *
+ * <pre class="code">
+ * EntityStore store = new EntityStore(...);
+ *
+ * {@code PrimaryIndex<Long,Employee>} primaryIndex =
+ *     store.getPrimaryIndex(Long.class, Employee.class);</pre>
+ * </pre>
+ *
+ * <p>Note that {@code Long.class} is passed as the primary key class, but the
+ * primary key field has the primitive type {@code long}.  When a primitive
+ * primary key field is used, the corresponding primitive wrapper class is used
+ * to access the primary index.  For more information on key field types, see
+ * {@link PrimaryKey}.</p>
+ *
+ * <p>The {@code PrimaryIndex} provides the primary storage and access methods
+ * for the instances of a particular entity class.  Entities are inserted and
+ * updated in the {@code PrimaryIndex} by calling a method in the family of
+ * {@link #put} methods.  The {@link #put} method will insert the entity if no
+ * entity with the same primary key already exists.  If an entity with the same
+ * primary key does exist, it will update the entity and return the existing
+ * (old) entity.  For example:</p>
+ *
+ * <pre class="code">
+ * Employee oldEntity;
+ * oldEntity = primaryIndex.put(new Employee(1, "Jane Smith"));    // Inserts an entity
+ * assert oldEntity == null;
+ * oldEntity = primaryIndex.put(new Employee(2, "Joan Smith"));    // Inserts an entity
+ * assert oldEntity == null;
+ * oldEntity = primaryIndex.put(new Employee(2, "Joan M. Smith")); // Updates an entity
+ * assert oldEntity != null;</pre>
+ *
+ * <p>The {@link #putNoReturn} method can be used to avoid the overhead of
+ * returning the existing entity, when the existing entity is not important to
+ * the application.  The return type of {@link #putNoReturn} is void.  For
+ * example:</p>
+ *
+ * <pre class="code">
+ * primaryIndex.putNoReturn(new Employee(1, "Jane Smith"));    // Inserts an entity
+ * primaryIndex.putNoReturn(new Employee(2, "Joan Smith"));    // Inserts an entity
+ * primaryIndex.putNoReturn(new Employee(2, "Joan M. Smith")); // Updates an entity</pre>
+ *
+ * <p>The {@link #putNoOverwrite} method can be used to ensure that an existing
+ * entity is not overwritten.  {@link #putNoOverwrite} returns true if the
+ * entity was inserted, or false if an existing entity exists and no action was
+ * taken.  For example:<p>
+ *
+ * <pre class="code">
+ * boolean inserted;
+ * inserted = primaryIndex.putNoOverwrite(new Employee(1, "Jane Smith"));    // Inserts an entity
+ * assert inserted;
+ * inserted = primaryIndex.putNoOverwrite(new Employee(2, "Joan Smith"));    // Inserts an entity
+ * assert inserted;
+ * inserted = primaryIndex.putNoOverwrite(new Employee(2, "Joan M. Smith")); // <strong>No action was taken!</strong>
+ * assert !inserted;</pre>
+ *
+ * <p>Primary key values must be unique, in other words, each instance of a
+ * given entity class must have a distinct primary key value.  Rather than
+ * assigning the unique primary key values yourself, a <em>sequence</em> can be
+ * used to assign sequential integer values automatically, starting with the
+ * value 1 (one).  A sequence is defined using the {@link PrimaryKey#sequence}
+ * annotation property.  For example:</p>
+ *
+ * <pre class="code">
+ * {@literal @Entity}
+ * class Employee {
+ *
+ *     {@literal @PrimaryKey(sequence="ID")}
+ *     long id;
+ *
+ *     String name;
+ *
+ *     Employee(String name) {
+ *         this.name = name;
+ *     }
+ *
+ *     private Employee() {} // For bindings
+ * }</pre>
+ *
+ * <p>The name of the sequence used above is "ID".  Any name can be used.  If
+ * the same sequence name is used in more than one entity class, the sequence
+ * will be shared by those classes, in other words, a single sequence of
+ * integers will be used for all instances of those classes.  See {@link
+ * PrimaryKey#sequence} for more information.</p>
+ *
+ * <p>Any method in the family of {@link #put} methods may be used to insert
+ * entities where the primary key is assigned from a sequence.  When the {@link
+ * #put} method returns, the primary key field of the entity object will be set
+ * to the assigned key value.  For example:</p>
+ *
+ * <pre class="code">
+ * Employee employee;
+ * employee = new Employee("Jane Smith");
+ * primaryIndex.putNoReturn(employee);    // Inserts an entity
+ * assert employee.id == 1;
+ * employee = new Employee("Joan Smith");
+ * primaryIndex.putNoReturn(employee);    // Inserts an entity
+ * assert employee.id == 2;</pre>
+ *
+ * <p>This begs the question:  How do you update an existing entity, without
+ * assigning a new primary key?  The answer is that the {@link #put} methods
+ * will only assign a new key from the sequence if the primary key field is
+ * zero or null (for reference types).  If an entity with a non-zero and
+ * non-null key field is passed to a {@link #put} method, any existing entity
+ * with that primary key value will be updated.  For example:</p>
+ *
+ * <pre class="code">
+ * Employee employee;
+ * employee = new Employee("Jane Smith");
+ * primaryIndex.putNoReturn(employee);    // Inserts an entity
+ * assert employee.id == 1;
+ * employee = new Employee("Joan Smith");
+ * primaryIndex.putNoReturn(employee);    // Inserts an entity
+ * assert employee.id == 2;
+ * employee.name = "Joan M. Smith";
+ * primaryIndex.putNoReturn(employee);    // Updates an existing entity
+ * assert employee.id == 2;</pre>
+ *
+ * <p>Since {@code PrimaryIndex} implements the {@link EntityIndex} interface,
+ * it shares the common index methods for retrieving and deleting entities,
+ * opening cursors and using transactions.  See {@link EntityIndex} for more
+ * information on these topics.</p>
+ *
+ * <p>Note that when using an index, keys and values are stored and retrieved
+ * by value not by reference.  In other words, if an entity object is stored
+ * and then retrieved, or retrieved twice, each object will be a separate
+ * instance.  For example, in the code below the assertion will always
+ * fail.</p>
+ * <pre class="code">
+ * MyKey key = ...;
+ * MyEntity entity1 = new MyEntity(key, ...);
+ * index.put(entity1);
+ * MyEntity entity2 = index.get(key);
+ * assert entity1 == entity2; // always fails!
+ * </pre>
+ *
+ * @author Mark Hayes
+ */
+public class PrimaryIndex<PK,E> extends BasicIndex<PK,E> {
+
+    private Class<E> entityClass;
+    private EntityBinding entityBinding;
+    private SortedMap<PK,E> map;
+    private PersistKeyAssigner keyAssigner;
+
+    /**
+     * Creates a primary index without using an <code>EntityStore</code>.
+     *
+     * <p>This constructor is not normally needed and is provided for
+     * applications that wish to use custom bindings along with the Direct
+     * Persistence Layer.  Normally, {@link EntityStore#getPrimaryIndex
+     * getPrimaryIndex} is used instead.</p>
+     *
+     * <p>Note that when this constructor is used directly, primary keys cannot
+     * be automatically assigned from a sequence.  The key assignment feature
+     * requires knowledge of the primary key field, which is only available if
+     * an <code>EntityStore</code> is used.  Of course, primary keys may be
+     * assigned from a sequence manually before calling the <code>put</code>
+     * methods in this class.</p>
+     *
+     * @param database the primary database.
+     *
+     * @param keyClass the class of the primary key.
+     *
+     * @param keyBinding the binding to be used for primary keys.
+     *
+     * @param entityClass the class of the entities stored in this index.
+     *
+     * @param entityBinding the binding to be used for entities.
+     */
+    public PrimaryIndex(Database database,
+                        Class<PK> keyClass,
+                        EntryBinding<PK> keyBinding,
+                        Class<E> entityClass,
+                        EntityBinding<E> entityBinding)
+        throws DatabaseException {
+
+        super(database, keyClass, keyBinding,
+              new EntityValueAdapter(entityClass, entityBinding, false));
+
+        this.entityClass = entityClass;
+        this.entityBinding = entityBinding;
+
+        if (entityBinding instanceof PersistEntityBinding) {
+            keyAssigner =
+                ((PersistEntityBinding) entityBinding).getKeyAssigner();
+        }
+    }
+
+    /**
+     * Returns the underlying database for this index.
+     *
+     * @return the database.
+     */
+    public Database getDatabase() {
+        return db;
+    }
+
+    /**
+     * Returns the primary key class for this index.
+     *
+     * @return the key class.
+     */
+    public Class<PK> getKeyClass() {
+        return keyClass;
+    }
+
+    /**
+     * Returns the primary key binding for this index.
+     *
+     * @return the key binding.
+     */
+    public EntryBinding<PK> getKeyBinding() {
+        return keyBinding;
+    }
+
+    /**
+     * Returns the entity class for this index.
+     *
+     * @return the entity class.
+     */
+    public Class<E> getEntityClass() {
+        return entityClass;
+    }
+
+    /**
+     * Returns the entity binding for this index.
+     *
+     * @return the entity binding.
+     */
+    public EntityBinding<E> getEntityBinding() {
+        return entityBinding;
+    }
+
+    /**
+     * Inserts an entity and returns null, or updates it if the primary key
+     * already exists and returns the existing entity.
+     *
+     * <p>If a {@link PrimaryKey#sequence} is used and the primary key field of
+     * the given entity is null or zero, this method will assign the next value
+     * from the sequence to the primary key field of the given entity.</p>
+     *
+     * <p>Auto-commit is used implicitly if the store is transactional.</p>
+     *
+     * @param entity the entity to be inserted or updated.
+     *
+     * @return the existing entity that was updated, or null if the entity was
+     * inserted.
+     */
+    public E put(E entity)
+        throws DatabaseException {
+
+        return put(null, entity);
+    }
+
+    /**
+     * Inserts an entity and returns null, or updates it if the primary key
+     * already exists and returns the existing entity.
+     *
+     * <p>If a {@link PrimaryKey#sequence} is used and the primary key field of
+     * the given entity is null or zero, this method will assign the next value
+     * from the sequence to the primary key field of the given entity.</p>
+     *
+     * @param txn the transaction used to protect this operation, null to use
+     * auto-commit, or null if the store is non-transactional.
+     *
+     * @param entity the entity to be inserted or updated.
+     *
+     * @return the existing entity that was updated, or null if the entity was
+     * inserted.
+     */
+    public E put(Transaction txn, E entity)
+        throws DatabaseException {
+
+        DatabaseEntry keyEntry = new DatabaseEntry();
+        DatabaseEntry dataEntry = new DatabaseEntry();
+        assignKey(entity, keyEntry);
+
+        boolean autoCommit = false;
+	Environment env = db.getEnvironment();
+        if (transactional &&
+	    txn == null &&
+	    DbCompat.getThreadTransaction(env) == null) {
+            txn = env.beginTransaction(null, null);
+            autoCommit = true;
+        }
+
+        boolean failed = true;
+        Cursor cursor = db.openCursor(txn, null);
+        LockMode lockMode = locking ? LockMode.RMW : null;
+        try {
+            while (true) {
+                OperationStatus status =
+                    cursor.getSearchKey(keyEntry, dataEntry, lockMode);
+                if (status == OperationStatus.SUCCESS) {
+                    E existing =
+                        (E) entityBinding.entryToObject(keyEntry, dataEntry);
+                    entityBinding.objectToData(entity, dataEntry);
+                    cursor.put(keyEntry, dataEntry);
+                    failed = false;
+                    return existing;
+                } else {
+                    entityBinding.objectToData(entity, dataEntry);
+                    status = cursor.putNoOverwrite(keyEntry, dataEntry);
+                    if (status != OperationStatus.KEYEXIST) {
+                        failed = false;
+                        return null;
+                    }
+                }
+            }
+        } finally {
+            cursor.close();
+            if (autoCommit) {
+                if (failed) {
+                    txn.abort();
+                } else {
+                    txn.commit();
+                }
+            }
+        }
+    }
+
+    /**
+     * Inserts an entity, or updates it if the primary key already exists (does
+     * not return the existing entity).  This method may be used instead of
+     * {@link #put(Object)} to save the overhead of returning the existing
+     * entity.
+     *
+     * <p>If a {@link PrimaryKey#sequence} is used and the primary key field of
+     * the given entity is null or zero, this method will assign the next value
+     * from the sequence to the primary key field of the given entity.</p>
+     *
+     * <p>Auto-commit is used implicitly if the store is transactional.</p>
+     *
+     * @param entity the entity to be inserted or updated.
+     */
+    public void putNoReturn(E entity)
+        throws DatabaseException {
+
+        putNoReturn(null, entity);
+    }
+
+    /**
+     * Inserts an entity, or updates it if the primary key already exists (does
+     * not return the existing entity).  This method may be used instead of
+     * {@link #put(Transaction,Object)} to save the overhead of returning the
+     * existing entity.
+     *
+     * <p>If a {@link PrimaryKey#sequence} is used and the primary key field of
+     * the given entity is null or zero, this method will assign the next value
+     * from the sequence to the primary key field of the given entity.</p>
+     *
+     * @param txn the transaction used to protect this operation, null to use
+     * auto-commit, or null if the store is non-transactional.
+     *
+     * @param entity the entity to be inserted or updated.
+     */
+    public void putNoReturn(Transaction txn, E entity)
+        throws DatabaseException {
+
+        DatabaseEntry keyEntry = new DatabaseEntry();
+        DatabaseEntry dataEntry = new DatabaseEntry();
+        assignKey(entity, keyEntry);
+        entityBinding.objectToData(entity, dataEntry);
+
+        db.put(txn, keyEntry, dataEntry);
+    }
+
+    /**
+     * Inserts an entity and returns true, or returns false if the primary key
+     * already exists.
+     *
+     * <p>If a {@link PrimaryKey#sequence} is used and the primary key field of
+     * the given entity is null or zero, this method will assign the next value
+     * from the sequence to the primary key field of the given entity.</p>
+     *
+     * <p>Auto-commit is used implicitly if the store is transactional.</p>
+     *
+     * @param entity the entity to be inserted.
+     *
+     * @return true if the entity was inserted, or false if an entity with the
+     * same primary key is already present.
+     */
+    public boolean putNoOverwrite(E entity)
+        throws DatabaseException {
+
+        return putNoOverwrite(null, entity);
+    }
+
+    /**
+     * Inserts an entity and returns true, or returns false if the primary key
+     * already exists.
+     *
+     * <p>If a {@link PrimaryKey#sequence} is used and the primary key field of
+     * the given entity is null or zero, this method will assign the next value
+     * from the sequence to the primary key field of the given entity.</p>
+     *
+     * @param txn the transaction used to protect this operation, null to use
+     * auto-commit, or null if the store is non-transactional.
+     *
+     * @param entity the entity to be inserted.
+     *
+     * @return true if the entity was inserted, or false if an entity with the
+     * same primary key is already present.
+     */
+    public boolean putNoOverwrite(Transaction txn, E entity)
+        throws DatabaseException {
+
+        DatabaseEntry keyEntry = new DatabaseEntry();
+        DatabaseEntry dataEntry = new DatabaseEntry();
+        assignKey(entity, keyEntry);
+        entityBinding.objectToData(entity, dataEntry);
+
+        OperationStatus status = db.putNoOverwrite(txn, keyEntry, dataEntry);
+
+        return (status == OperationStatus.SUCCESS);
+    }
+
+    /**
+     * If we are assigning primary keys from a sequence, assign the next key
+     * and set the primary key field.
+     */
+    private void assignKey(E entity, DatabaseEntry keyEntry)
+        throws DatabaseException {
+
+        if (keyAssigner != null) {
+            if (!keyAssigner.assignPrimaryKey(entity, keyEntry)) {
+                entityBinding.objectToKey(entity, keyEntry);
+            }
+        } else {
+            entityBinding.objectToKey(entity, keyEntry);
+        }
+    }
+
+    /*
+     * Of the EntityIndex methods only get()/map()/sortedMap() are implemented
+     * here.  All other methods are implemented by BasicIndex.
+     */
+
+    public E get(PK key)
+        throws DatabaseException {
+
+        return get(null, key, null);
+    }
+
+    public E get(Transaction txn, PK key, LockMode lockMode)
+        throws DatabaseException {
+
+        DatabaseEntry keyEntry = new DatabaseEntry();
+        DatabaseEntry dataEntry = new DatabaseEntry();
+        keyBinding.objectToEntry(key, keyEntry);
+
+        OperationStatus status = db.get(txn, keyEntry, dataEntry, lockMode);
+
+        if (status == OperationStatus.SUCCESS) {
+            return (E) entityBinding.entryToObject(keyEntry, dataEntry);
+        } else {
+            return null;
+        }
+    }
+
+    public Map<PK,E> map() {
+        return sortedMap();
+    }
+
+    public synchronized SortedMap<PK,E> sortedMap() {
+        if (map == null) {
+            map = new StoredSortedMap(db, keyBinding, entityBinding, true);
+        }
+        return map;
+    }
+
+    boolean isUpdateAllowed() {
+        return true;
+    }
+}
diff --git a/src/com/sleepycat/persist/PrimaryKeyValueAdapter.java b/src/com/sleepycat/persist/PrimaryKeyValueAdapter.java
new file mode 100644
index 0000000000000000000000000000000000000000..7b58a03ac357f33a94696f96679c952f1a43b4a8
--- /dev/null
+++ b/src/com/sleepycat/persist/PrimaryKeyValueAdapter.java
@@ -0,0 +1,56 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PrimaryKeyValueAdapter.java,v 1.8.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * A ValueAdapter where the "value" is the primary key.
+ *
+ * @author Mark Hayes
+ */
+class PrimaryKeyValueAdapter<V> implements ValueAdapter<V> {
+
+    private EntryBinding keyBinding;
+
+    PrimaryKeyValueAdapter(Class<V> keyClass, EntryBinding keyBinding) {
+        this.keyBinding = keyBinding;
+    }
+
+    public DatabaseEntry initKey() {
+        return new DatabaseEntry();
+    }
+
+    public DatabaseEntry initPKey() {
+        return new DatabaseEntry();
+    }
+
+    public DatabaseEntry initData() {
+        return BasicIndex.NO_RETURN_ENTRY;
+    }
+
+    public void clearEntries(DatabaseEntry key,
+                             DatabaseEntry pkey,
+                             DatabaseEntry data) {
+        key.setData(null);
+        pkey.setData(null);
+    }
+
+    public V entryToValue(DatabaseEntry key,
+                          DatabaseEntry pkey,
+                          DatabaseEntry data) {
+        return (V) keyBinding.entryToObject(pkey);
+    }
+
+    public void valueToData(V value, DatabaseEntry data) {
+        throw new UnsupportedOperationException
+            ("Cannot change the data in a key-only index");
+    }
+}
diff --git a/src/com/sleepycat/persist/SecondaryIndex.java b/src/com/sleepycat/persist/SecondaryIndex.java
new file mode 100644
index 0000000000000000000000000000000000000000..6547172fd11e15ee6c8b932dc2f9411916b82441
--- /dev/null
+++ b/src/com/sleepycat/persist/SecondaryIndex.java
@@ -0,0 +1,976 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SecondaryIndex.java,v 1.19.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist;
+
+import java.io.FileNotFoundException;
+import java.util.Map;
+import java.util.SortedMap;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.collections.StoredSortedMap;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.persist.model.DeleteAction;
+import com.sleepycat.persist.model.Relationship;
+import com.sleepycat.persist.model.SecondaryKey;
+
+/**
+ * The secondary index for an entity class and a secondary key.
+ *
+ * <p>{@code SecondaryIndex} objects are thread-safe.  Multiple threads may
+ * safely call the methods of a shared {@code SecondaryIndex} object.</p>
+ *
+ * <p>{@code SecondaryIndex} implements {@link EntityIndex} to map the
+ * secondary key type (SK) to the entity type (E).  In other words, entities
+ * are accessed by secondary key values.</p>
+ *
+ * <p>The {@link SecondaryKey} annotation may be used to define a secondary key
+ * as shown in the following example.</p>
+ *
+ * <pre class="code">
+ * {@literal @Entity}
+ * class Employee {
+ *
+ *     {@literal @PrimaryKey}
+ *     long id;
+ *
+ *     {@literal @SecondaryKey(relate=MANY_TO_ONE)}
+ *     String department;
+ *
+ *     String name;
+ *
+ *     private Employee() {}
+ * }</pre>
+ *
+ * <p>Before obtaining a {@code SecondaryIndex}, the {@link PrimaryIndex} must
+ * be obtained for the entity class.  To obtain the {@code SecondaryIndex} call
+ * {@link EntityStore#getSecondaryIndex EntityStore.getSecondaryIndex}, passing
+ * the primary index, the secondary key class and the secondary key name.  For
+ * example:</p>
+ *
+ * <pre class="code">
+ * EntityStore store = new EntityStore(...);
+ *
+ * {@code PrimaryIndex<Long,Employee>} primaryIndex =
+ *     store.getPrimaryIndex(Long.class, Employee.class);
+ *
+ * {@code SecondaryIndex<String,Long,Employee>} secondaryIndex =
+ *     store.getSecondaryIndex(primaryIndex, String.class, "department");</pre>
+ *
+ * <p>Since {@code SecondaryIndex} implements the {@link EntityIndex}
+ * interface, it shares the common index methods for retrieving and deleting
+ * entities, opening cursors and using transactions.  See {@link EntityIndex}
+ * for more information on these topics.</p>
+ *
+ * <p>{@code SecondaryIndex} does <em>not</em> provide methods for inserting
+ * and updating entities.  That must be done using the {@link
+ * PrimaryIndex}.</p>
+ *
+ * <p>Note that a {@code SecondaryIndex} has three type parameters {@code
+ * <SK,PK,E>} or in the example {@code <String,Long,Employee>} while a {@link
+ * PrimaryIndex} has only two type parameters {@code <PK,E>} or {@code
+ * <Long,Employee>}.  This is because a {@code SecondaryIndex} has an extra
+ * level of mapping:  It maps from secondary key to primary key, and then from
+ * primary key to entity.  For example, consider this entity:</p>
+ *
+ * <p><table class="code" border="1">
+ *   <tr><th>ID</th><th>Department</th><th>Name</th></tr>
+ *   <tr><td>1</td><td>Engineering</td><td>Jane Smith</td></tr>
+ * </table></p>
+ *
+ * <p>The {@link PrimaryIndex} maps from id directly to the entity, or from
+ * primary key 1 to the "Jane Smith" entity in the example.  The {@code
+ * SecondaryIndex} maps from department to id, or from secondary key
+ * "Engineering" to primary key 1 in the example, and then uses the {@code
+ * PrimaryIndex} to map from the primary key to the entity.</p>
+ *
+ * <p>Because of this extra type parameter and extra level of mapping, a {@code
+ * SecondaryIndex} can provide more than one mapping, or view, of the entities
+ * in the primary index.  The main mapping of a {@code SecondaryIndex} is to
+ * map from secondary key (SK) to entity (E), or in the example, from the
+ * String department key to the Employee entity.  The {@code SecondaryIndex}
+ * itself, by implementing {@code EntityIndex<SK,E>}, provides this
+ * mapping.</p>
+ *
+ * <p>The second mapping provided by {@code SecondaryIndex} is from secondary
+ * key (SK) to primary key (PK), or in the example, from the String department
+ * key to the Long id key.  The {@link #keysIndex} method provides this
+ * mapping.  When accessing the keys index, the primary key is returned rather
+ * than the entity.  When only the primary key is needed and not the entire
+ * entity, using the keys index is less expensive than using the secondary
+ * index because the primary index does not have to be accessed.</p>
+ *
+ * <p>The third mapping provided by {@code SecondaryIndex} is from primary key
+ * (PK) to entity (E), for the subset of entities having a given secondary key
+ * (SK).  This mapping is provided by the {@link #subIndex} method.  A
+ * sub-index is convenient when you are interested in working with the subset
+ * of entities having a particular secondary key value, for example, all
+ * employees in a given department.</p>
+ *
+ * <p>All three mappings, along with the mapping provided by the {@link
+ * PrimaryIndex}, are shown using example data in the {@link EntityIndex}
+ * interface documentation.  See {@link EntityIndex} for more information.</p>
+ *
+ * <p>Note that when using an index, keys and values are stored and retrieved
+ * by value not by reference.  In other words, if an entity object is stored
+ * and then retrieved, or retrieved twice, each object will be a separate
+ * instance.  For example, in the code below the assertion will always
+ * fail.</p>
+ * <pre class="code">
+ * MyKey key = ...;
+ * MyEntity entity1 = index.get(key);
+ * MyEntity entity2 = index.get(key);
+ * assert entity1 == entity2; // always fails!
+ * </pre>
+ *
+ * <h3>One-to-One Relationships</h3>
+ *
+ * <p>A {@link Relationship#ONE_TO_ONE ONE_TO_ONE} relationship, although less
+ * common than other types of relationships, is the simplest type of
+ * relationship.  A single entity is related to a single secondary key value.
+ * For example:</p>
+ *
+ * <pre class="code">
+ * {@literal @Entity}
+ * class Employee {
+ *
+ *     {@literal @PrimaryKey}
+ *     long id;
+ *
+ *     {@literal @SecondaryKey(relate=ONE_TO_ONE)}
+ *     String ssn;
+ *
+ *     String name;
+ *
+ *     private Employee() {}
+ * }
+ *
+ * {@code SecondaryIndex<String,Long,Employee>} employeeBySsn =
+ *     store.getSecondaryIndex(primaryIndex, String.class, "ssn");</pre>
+ *
+ * <p>With a {@link Relationship#ONE_TO_ONE ONE_TO_ONE} relationship, the
+ * secondary key must be unique; in other words, no two entities may have the
+ * same secondary key value.  If an attempt is made to store an entity having
+ * the same secondary key value as another existing entity, a {@link
+ * DatabaseException} will be thrown.</p>
+ *
+ * <p>Because the secondary key is unique, it is useful to lookup entities by
+ * secondary key using {@link EntityIndex#get}.  For example:</p>
+ *
+ * <pre class="code">
+ * Employee employee = employeeBySsn.get(mySsn);</pre>
+ *
+ * <h3>Many-to-One Relationships</h3>
+ *
+ * <p>A {@link Relationship#MANY_TO_ONE MANY_TO_ONE} relationship is the most
+ * common type of relationship.  One or more entities is related to a single
+ * secondary key value.  For example:</p>
+ *
+ * <pre class="code">
+ * {@literal @Entity}
+ * class Employee {
+ *
+ *     {@literal @PrimaryKey}
+ *     long id;
+ *
+ *     {@literal @SecondaryKey(relate=MANY_TO_ONE)}
+ *     String department;
+ *
+ *     String name;
+ *
+ *     private Employee() {}
+ * }
+ *
+ * {@code SecondaryIndex<String,Long,Employee>} employeeByDepartment =
+ *     store.getSecondaryIndex(primaryIndex, String.class, "department");</pre>
+ *
+ * <p>With a {@link Relationship#MANY_TO_ONE MANY_TO_ONE} relationship, the
+ * secondary key is not required to be unique; in other words, more than one
+ * entity may have the same secondary key value.  In this example, more than
+ * one employee may belong to the same department.</p>
+ *
+ * <p>The most convenient way to access the employees in a given department is
+ * by using a sub-index.  For example:</p>
+ *
+ * <pre class="code">
+ * {@code EntityIndex<Long,Entity>} subIndex = employeeByDepartment.subIndex(myDept);
+ * {@code EntityCursor<Employee>} cursor = subIndex.entities();
+ * try {
+ *     for (Employee entity : cursor) {
+ *         // Do something with the entity...
+ *     }
+ * } finally {
+ *     cursor.close();
+ * }</pre>
+ *
+ * <h3>One-to-Many Relationships</h3>
+ *
+ * <p>In a {@link Relationship#ONE_TO_MANY ONE_TO_MANY} relationship, a single
+ * entity is related to one or more secondary key values.  For example:</p>
+ *
+ * <pre class="code">
+ * {@literal @Entity}
+ * class Employee {
+ *
+ *     {@literal @PrimaryKey}
+ *     long id;
+ *
+ *     {@literal @SecondaryKey(relate=ONE_TO_MANY)}
+ *     {@literal Set<String> emailAddresses = new HashSet<String>;}
+ *
+ *     String name;
+ *
+ *     private Employee() {}
+ * }
+ *
+ * {@code SecondaryIndex<String,Long,Employee>} employeeByEmail =
+ *     store.getSecondaryIndex(primaryIndex, String.class, "emailAddresses");</pre>
+ *
+ * <p>With a {@link Relationship#ONE_TO_MANY ONE_TO_MANY} relationship, the
+ * secondary key must be unique; in other words, no two entities may have the
+ * same secondary key value.  In this example, no two employees may have the
+ * same email address.  If an attempt is made to store an entity having the
+ * same secondary key value as another existing entity, a {@link
+ * DatabaseException} will be thrown.</p>
+ *
+ * <p>Because the secondary key is unique, it is useful to lookup entities by
+ * secondary key using {@link EntityIndex#get}.  For example:</p>
+ *
+ * <pre class="code">
+ * Employee employee = employeeByEmail.get(myEmailAddress);</pre>
+ *
+ * <p>The secondary key field for a {@link Relationship#ONE_TO_MANY
+ * ONE_TO_MANY} relationship must be an array or collection type.  To access
+ * the email addresses of an employee, simply access the collection field
+ * directly.  For example:</p>
+ *
+ * <pre class="code">
+ * Employee employee = primaryIndex.get(1); // Get the entity by primary key
+ * employee.emailAddresses.add(myNewEmail); // Add an email address
+ * primaryIndex.putNoReturn(1, employee);   // Update the entity</pre>
+ *
+ * <h3>Many-to-Many Relationships</h3>
+ *
+ * <p>In a {@link Relationship#MANY_TO_MANY MANY_TO_MANY} relationship, one
+ * or more entities is related to one or more secondary key values.  For
+ * example:</p>
+ *
+ * <pre class="code">
+ * {@literal @Entity}
+ * class Employee {
+ *
+ *     {@literal @PrimaryKey}
+ *     long id;
+ *
+ *     {@literal @SecondaryKey(relate=MANY_TO_MANY)}
+ *     {@literal Set<String> organizations = new HashSet<String>;}
+ *
+ *     String name;
+ *
+ *     private Employee() {}
+ * }
+ *
+ * {@code SecondaryIndex<String,Long,Employee>} employeeByOrganization =
+ *     store.getSecondaryIndex(primaryIndex, String.class, "organizations");</pre>
+ *
+ * <p>With a {@link Relationship#MANY_TO_MANY MANY_TO_MANY} relationship, the
+ * secondary key is not required to be unique; in other words, more than one
+ * entity may have the same secondary key value.  In this example, more than
+ * one employee may belong to the same organization.</p>
+ *
+ * <p>The most convenient way to access the employees in a given organization
+ * is by using a sub-index.  For example:</p>
+ *
+ * <pre class="code">
+ * {@code EntityIndex<Long,Entity>} subIndex = employeeByOrganization.subIndex(myOrg);
+ * {@code EntityCursor<Employee>} cursor = subIndex.entities();
+ * try {
+ *     for (Employee entity : cursor) {
+ *         // Do something with the entity...
+ *     }
+ * } finally {
+ *     cursor.close();
+ * }</pre>
+ *
+ * <p>The secondary key field for a {@link Relationship#MANY_TO_MANY
+ * MANY_TO_MANY} relationship must be an array or collection type.  To access
+ * the organizations of an employee, simply access the collection field
+ * directly.  For example:</p>
+ *
+ * <pre class="code">
+ * Employee employee = primaryIndex.get(1); // Get the entity by primary key
+ * employee.organizations.remove(myOldOrg); // Remove an organization
+ * primaryIndex.putNoReturn(1, employee);   // Update the entity</pre>
+ *
+ * <h3>Foreign Key Constraints for Related Entities</h3>
+ *
+ * <p>In all the examples above the secondary key is treated only as a simple
+ * value, such as a {@code String} department field.  In many cases, that is
+ * sufficient.  But in other cases, you may wish to constrain the secondary
+ * keys of one entity class to be valid primary keys of another entity
+ * class.  For example, a Department entity may also be defined:</p>
+ *
+ * <pre class="code">
+ * {@literal @Entity}
+ * class Department {
+ *
+ *     {@literal @PrimaryKey}
+ *     String name;
+ *
+ *     String missionStatement;
+ *
+ *     private Department() {}
+ * }</pre>
+ *
+ * <p>You may wish to constrain the department field values of the Employee
+ * class in the examples above to be valid primary keys of the Department
+ * entity class.  In other words, you may wish to ensure that the department
+ * field of an Employee will always refer to a valid Department entity.</p>
+ *
+ * <p>You can implement this constraint yourself by validating the department
+ * field before you store an Employee.  For example:</p>
+ *
+ * <pre class="code">
+ * {@code PrimaryIndex<String,Department>} departmentIndex =
+ *     store.getPrimaryIndex(String.class, Department.class);
+ *
+ * void storeEmployee(Employee employee) throws DatabaseException {
+ *     if (departmentIndex.contains(employee.department)) {
+ *         primaryIndex.putNoReturn(employee);
+ *     } else {
+ *         throw new IllegalArgumentException("Department does not exist: " +
+ *                                            employee.department);
+ *     }
+ * }</pre>
+ *
+ * <p>Or, instead you could define the Employee department field as a foreign
+ * key, and this validation will be done for you when you attempt to store the
+ * Employee entity.  For example:</p>
+ *
+ * <pre class="code">
+ * {@literal @Entity}
+ * class Employee {
+ *
+ *     {@literal @PrimaryKey}
+ *     long id;
+ *
+ *     {@literal @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Department.class)}
+ *     String department;
+ *
+ *     String name;
+ *
+ *     private Employee() {}
+ * }</pre>
+ *
+ * <p>The {@code relatedEntity=Department.class} above defines the department
+ * field as a foreign key that refers to a Department entity.  Whenever a
+ * Employee entity is stored, its department field value will be checked to
+ * ensure that a Department entity exists with that value as its primary key.
+ * If no such Department entity exists, then a {@link DatabaseException} is
+ * thrown, causing the transaction to be aborted (assuming that transactions
+ * are used).</p>
+ *
+ * <p>This begs the question:  What happens when a Department entity is deleted
+ * while one or more Employee entities have department fields that refer to
+ * the deleted department's primary key?  If the department were allowed to be
+ * deleted, the foreign key constraint for the Employee department field would
+ * be violated, because the Employee department field would refer to a
+ * department that does not exist.</p>
+ *
+ * <p>By default, when this situation arises the system does not allow the
+ * department to be deleted.  Instead, a {@link DatabaseException} is thrown,
+ * causing the transaction to be aborted.  In this case, in order to delete a
+ * department, the department field of all Employee entities must first be
+ * updated to refer to a different existing department, or set to null.  This
+ * is the responsibility of the application.</p>
+ *
+ * <p>There are two additional ways of handling deletion of a Department
+ * entity.  These alternatives are configured using the {@link
+ * SecondaryKey#onRelatedEntityDelete} annotation property.  Setting this
+ * property to {@link DeleteAction#NULLIFY} causes the Employee department
+ * field to be automatically set to null when the department they refer to is
+ * deleted.  This may or may not be desirable, depending on application
+ * policies.  For example:</p>
+ *
+ * <pre class="code">
+ * {@literal @Entity}
+ * class Employee {
+ *
+ *     {@literal @PrimaryKey}
+ *     long id;
+ *
+ *     {@code @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Department.class,
+ *                                       onRelatedEntityDelete=NULLIFY)}
+ *     String department;
+ *
+ *     String name;
+ *
+ *     private Employee() {}
+ * }</pre>
+ *
+ * <p>The {@link DeleteAction#CASCADE} value, on the other hand, causes the
+ * Employee entities to be automatically deleted when the department they refer
+ * to is deleted.  This is probably not desirable in this particular example,
+ * but is useful for parent-child relationships.  For example:</p>
+ *
+ * <pre class="code">
+ * {@literal @Entity}
+ * class Order {
+ *
+ *     {@literal @PrimaryKey}
+ *     long id;
+ *
+ *     String description;
+ *
+ *     private Order() {}
+ * }
+ *
+ * {@literal @Entity}
+ * class OrderItem {
+ *
+ *     {@literal @PrimaryKey}
+ *     long id;
+ *
+ *     {@code @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Order.class,
+ *                                       onRelatedEntityDelete=CASCADE)}
+ *     long orderId;
+ *
+ *     String description;
+ *
+ *     private OrderItem() {}
+ * }</pre>
+ *
+ * <p>The OrderItem orderId field refers to its "parent" Order entity.  When an
+ * Order entity is deleted, it may be useful to automatically delete its
+ * "child" OrderItem entities.</p>
+ *
+ * <p>For more information, see {@link SecondaryKey#onRelatedEntityDelete}.</p>
+ *
+ * <h3>One-to-Many versus Many-to-One for Related Entities</h3>
+ *
+ * <p>When there is a conceptual Many-to-One relationship such as Employee to
+ * Department as illustrated in the examples above, the relationship may be
+ * implemented either as Many-to-One in the Employee class or as One-to-Many in
+ * the Department class.</p>
+ *
+ * <p>Here is the Many-to-One approach.</p>
+ *
+ * <pre class="code">
+ * {@literal @Entity}
+ * class Employee {
+ *
+ *     {@literal @PrimaryKey}
+ *     long id;
+ *
+ *     {@literal @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Department.class)}
+ *     String department;
+ *
+ *     String name;
+ *
+ *     private Employee() {}
+ * }
+ *
+ * {@literal @Entity}
+ * class Department {
+ *
+ *     {@literal @PrimaryKey}
+ *     String name;
+ *
+ *     String missionStatement;
+ *
+ *     private Department() {}
+ * }</pre>
+ *
+ * <p>And here is the One-to-Many approach.</p>
+ *
+ * <pre class="code">
+ * {@literal @Entity}
+ * class Employee {
+ *
+ *     {@literal @PrimaryKey}
+ *     long id;
+ *
+ *     String name;
+ *
+ *     private Employee() {}
+ * }
+ *
+ * {@literal @Entity}
+ * class Department {
+ *
+ *     {@literal @PrimaryKey}
+ *     String name;
+ *
+ *     String missionStatement;
+ *
+ *     {@literal @SecondaryKey(relate=ONE_TO_MANY, relatedEntity=Employee.class)}
+ *     {@literal Set<Long> employees = new HashSet<Long>;}
+ *
+ *     private Department() {}
+ * }</pre>
+ *
+ * <p>Which approach is best?  The Many-to-One approach better handles large
+ * number of entities on the to-Many side of the relationship because it
+ * doesn't store a collection of keys as an entity field.  With Many-to-One a
+ * Btree is used to store the collection of keys and the Btree can easily
+ * handle very large numbers of keys.  With One-to-Many, each time a related
+ * key is added or removed the entity on the One side of the relationship,
+ * along with the complete collection of related keys, must be updated.
+ * Therefore, if large numbers of keys may be stored per relationship,
+ * Many-to-One is recommended.</p>
+ *
+ * <p>If the number of entities per relationship is not a concern, then you may
+ * wish to choose the approach that is most natural in your application data
+ * model.  For example, if you think of a Department as containing employees
+ * and you wish to modify the Department object each time an employee is added
+ * or removed, then you may wish to store a collection of Employee keys in the
+ * Department object (One-to-Many).</p>
+ *
+ * <p>Note that if you have a One-to-Many relationship and there is no related
+ * entity, then you don't have a choice -- you have to use One-to-Many because
+ * there is no entity on the to-Many side of the relationship where a
+ * Many-to-One key could be defined.  An example is the Employee to email
+ * addresses relationship discussed above:</p>
+ *
+ * <pre class="code">
+ * {@literal @Entity}
+ * class Employee {
+ *
+ *     {@literal @PrimaryKey}
+ *     long id;
+ *
+ *     {@literal @SecondaryKey(relate=ONE_TO_MANY)}
+ *     {@literal Set<String> emailAddresses = new HashSet<String>;}
+ *
+ *     String name;
+ *
+ *     private Employee() {}
+ * }</pre>
+ *
+ * <p>For sake of argument imagine that each employee has thousands of email
+ * addresses and employees frequently add and remove email addresses.  To
+ * avoid the potential performance problems associated with updating the
+ * Employee entity every time an email address is added or removed, you could
+ * create an EmployeeEmailAddress entity and use a Many-to-One relationship as
+ * shown below:</p>
+ *
+ * <pre class="code">
+ * {@literal @Entity}
+ * class Employee {
+ *
+ *     {@literal @PrimaryKey}
+ *     long id;
+ *
+ *     String name;
+ *
+ *     private Employee() {}
+ * }
+ *
+ * {@literal @Entity}
+ * class EmployeeEmailAddress {
+ *
+ *     {@literal @PrimaryKey}
+ *     String emailAddress;
+ *
+ *     {@literal @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Employee.class)}
+ *     long employeeId;
+ *
+ *     private EmployeeEmailAddress() {}
+ * }</pre>
+ *
+ * <h3>Key Placement with Many-to-Many for Related Entities</h3>
+ *
+ * <p>As discussed in the section above, one drawback of a to-Many relationship
+ * (One-to-Many was discussed above and Many-to-Many is discussed here) is that
+ * it requires storing a collection of keys in an entity.  Each time a key is
+ * added or removed, the containing entity must be updated.  This has potential
+ * performance problems when there are large numbers of entities on the to-Many
+ * side of the relationship, in other words, when there are large numbers of
+ * keys in each secondary key field collection.</p>
+ *
+ * <p>If you have a Many-to-Many relationship with a reasonably small number of
+ * entities on one side of the relationship and a large number of entities on
+ * the other side, you can avoid the potential performance problems by defining
+ * the secondary key field on the side with a small number of entities.</p>
+ *
+ * <p>For example, in an Employee-to-Organization relationship, the number of
+ * organizations per employee will normally be reasonably small but the number
+ * of employees per organization may be very large.  Therefore, to avoid
+ * potential performance problems, the secondary key field should be defined in
+ * the Employee class as shown below.</p>
+ *
+ * <pre class="code">
+ * {@literal @Entity}
+ * class Employee {
+ *
+ *     {@literal @PrimaryKey}
+ *     long id;
+ *
+ *     {@literal @SecondaryKey(relate=MANY_TO_MANY, relatedEntity=Organization.class)}
+ *     {@literal Set<String> organizations = new HashSet<String>;}
+ *
+ *     String name;
+ *
+ *     private Employee() {}
+ * }
+ *
+ * {@literal @Entity}
+ * class Organization {
+ *
+ *     {@literal @PrimaryKey}
+ *     String name;
+ *
+ *     String description;
+ * }</pre>
+ *
+ * <p>If instead a {@code Set<Long> members} key had been defined in the
+ * Organization class, this set could potentially have a large number of
+ * elements and performance problems could result.</p>
+ *
+ * <h3>Many-to-Many Versus a Relationship Entity</h3>
+ *
+ * <p>If you have a Many-to-Many relationship with a large number of entities
+ * on <em>both</em> sides of the relationship, you can avoid the potential
+ * performance problems by using a <em>relationship entity</em>.  A
+ * relationship entity defines the relationship between two other entities
+ * using two Many-to-One relationships.</p>
+ *
+ * <p>Imagine a relationship between cars and trucks indicating whenever a
+ * particular truck was passed on the road by a particular car.  A given car
+ * may pass a large number of trucks and a given truck may be passed by a large
+ * number of cars.  First look at a Many-to-Many relationship between these two
+ * entities:</p>
+ *
+ * <pre class="code">
+ * {@literal @Entity}
+ * class Car {
+ *
+ *     {@literal @PrimaryKey}
+ *     String licenseNumber;
+ *
+ *     {@literal @SecondaryKey(relate=MANY_TO_MANY, relatedEntity=Truck.class)}
+ *     {@literal Set<String> trucksPassed = new HashSet<String>;}
+ *
+ *     String color;
+ *
+ *     private Car() {}
+ * }
+ *
+ * {@literal @Entity}
+ * class Truck {
+ *
+ *     {@literal @PrimaryKey}
+ *     String licenseNumber;
+ *
+ *     int tons;
+ *
+ *     private Truck() {}
+ * }</pre>
+ *
+ * <p>With the Many-to-Many approach above, the {@code trucksPassed} set could
+ * potentially have a large number of elements and performance problems could
+ * result.</p>
+ *
+ * <p>To apply the relationship entity approach we define a new entity class
+ * named CarPassedTruck representing a single truck passed by a single car.  We
+ * remove the secondary key from the Car class and use two secondary keys in
+ * the CarPassedTruck class instead.</p>
+ *
+ * <pre class="code">
+ * {@literal @Entity}
+ * class Car {
+ *
+ *     {@literal @PrimaryKey}
+ *     String licenseNumber;
+ *
+ *     String color;
+ *
+ *     private Car() {}
+ * }
+ *
+ * {@literal @Entity}
+ * class Truck {
+ *
+ *     {@literal @PrimaryKey}
+ *     String licenseNumber;
+ *
+ *     int tons;
+ *
+ *     private Truck() {}
+ * }
+ *
+ * {@literal @Entity}
+ * class CarPassedTruck {
+ *
+ *     {@literal @PrimaryKey}
+ *     long id;
+ *
+ *     {@literal @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Car.class)}
+ *     String carLicense;
+ *
+ *     {@literal @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Truck.class)}
+ *     String truckLicense;
+ *
+ *     private CarPassedTruck() {}
+ * }</pre>
+ *
+ * <p>The CarPassedTruck entity can be used to access the relationship by car
+ * license or by truck license.</p>
+ *
+ * <p>You may use the relationship entity approach because of the potential
+ * performance problems mentioned above.  Or, you may choose to use this
+ * approach in order to store other information about the relationship.  For
+ * example, if for each car that passes a truck you wish to record how much
+ * faster the car was going than the truck, then a relationship entity is the
+ * logical place to store that property.  In the example below the
+ * speedDifference property is added to the CarPassedTruck class.</p>
+ *
+ * <pre class="code">
+ * {@literal @Entity}
+ * class CarPassedTruck {
+ *
+ *     {@literal @PrimaryKey}
+ *     long id;
+ *
+ *     {@literal @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Car.class)}
+ *     String carLicense;
+ *
+ *     {@literal @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Truck.class)}
+ *     String truckLicense;
+ *
+ *     int speedDifference;
+ *
+ *     private CarPassedTruck() {}
+ * }</pre>
+ *
+ * <p>Be aware that the relationship entity approach adds overhead compared to
+ * Many-to-Many.  There is one additional entity and one additional secondary
+ * key.  These factors should be weighed against its advantages and the
+ * relevant application access patterns should be considered.</p>
+ *
+ * @author Mark Hayes
+ */
+public class SecondaryIndex<SK,PK,E> extends BasicIndex<SK,E> {
+
+    private SecondaryDatabase secDb;
+    private Database keysDb;
+    private PrimaryIndex priIndex;
+    private EntityBinding entityBinding;
+    private EntityIndex<SK,PK> keysIndex;
+    private SortedMap<SK,E> map;
+
+    /**
+     * Creates a secondary index without using an <code>EntityStore</code>.
+     * When using an {@link EntityStore}, call {@link
+     * EntityStore#getSecondaryIndex getSecondaryIndex} instead.
+     *
+     * <p>This constructor is not normally needed and is provided for
+     * applications that wish to use custom bindings along with the Direct
+     * Persistence Layer.  Normally, {@link EntityStore#getSecondaryIndex
+     * getSecondaryIndex} is used instead.</p>
+     *
+     * @param database the secondary database used for all access other than
+     * via a {@link #keysIndex}.
+     *
+     * @param keysDatabase another handle on the secondary database, opened
+     * without association to the primary, and used only for access via a
+     * {@link #keysIndex}.  If this argument is null and the {@link #keysIndex}
+     * method is called, then the keys database will be opened automatically;
+     * however, the user is then responsible for closing the keys database.  To
+     * get the keys database in order to close it, call {@link
+     * #getKeysDatabase}.
+     *
+     * @param primaryIndex the primary index associated with this secondary
+     * index.
+     *
+     * @param secondaryKeyClass the class of the secondary key.
+     *
+     * @param secondaryKeyBinding the binding to be used for secondary keys.
+     */
+    public SecondaryIndex(SecondaryDatabase database,
+                          Database keysDatabase,
+                          PrimaryIndex<PK,E> primaryIndex,
+                          Class<SK> secondaryKeyClass,
+                          EntryBinding<SK> secondaryKeyBinding)
+        throws DatabaseException {
+
+        super(database, secondaryKeyClass, secondaryKeyBinding,
+              new EntityValueAdapter(primaryIndex.getEntityClass(),
+                                     primaryIndex.getEntityBinding(),
+                                     true));
+        secDb = database;
+        keysDb = keysDatabase;
+        priIndex = primaryIndex;
+        entityBinding = primaryIndex.getEntityBinding();
+    }
+
+    /**
+     * Returns the underlying secondary database for this index.
+     *
+     * @return the secondary database.
+     */
+    public SecondaryDatabase getDatabase() {
+        return secDb;
+    }
+
+    /**
+     * Returns the underlying secondary database that is not associated with
+     * the primary database and is used for the {@link #keysIndex}.
+     *
+     * @return the keys database.
+     */
+    public Database getKeysDatabase() {
+        return keysDb;
+    }
+
+    /**
+     * Returns the primary index associated with this secondary index.
+     *
+     * @return the primary index.
+     */
+    public PrimaryIndex<PK,E> getPrimaryIndex() {
+        return priIndex;
+    }
+
+    /**
+     * Returns the secondary key class for this index.
+     *
+     * @return the class.
+     */
+    public Class<SK> getKeyClass() {
+        return keyClass;
+    }
+
+    /**
+     * Returns the secondary key binding for the index.
+     *
+     * @return the key binding.
+     */
+    public EntryBinding<SK> getKeyBinding() {
+        return keyBinding;
+    }
+
+    /**
+     * Returns a read-only keys index that maps secondary key to primary key.
+     * When accessing the keys index, the primary key is returned rather than
+     * the entity.  When only the primary key is needed and not the entire
+     * entity, using the keys index is less expensive than using the secondary
+     * index because the primary index does not have to be accessed.
+     *
+     * <p>Note the following in the unusual case that you are <em>not</em>
+     * using an <code>EntityStore</code>: This method will open the keys
+     * database, a second database handle for the secondary database, if it is
+     * not already open.  In this case, if you are <em>not</em> using an
+     * <code>EntityStore</code>, then you are responsible for closing the
+     * database returned by {@link #getKeysDatabase} before closing the
+     * environment.  If you <em>are</em> using an <code>EntityStore</code>, the
+     * keys database will be closed automatically by {@link
+     * EntityStore#close}.</p>
+     *
+     * @return the keys index.
+     */
+    public synchronized EntityIndex<SK,PK> keysIndex()
+        throws DatabaseException {
+
+        if (keysIndex == null) {
+            if (keysDb == null) {
+                DatabaseConfig config = secDb.getConfig();
+                config.setReadOnly(true);
+                config.setAllowCreate(false);
+                config.setExclusiveCreate(false);   
+                try {
+                    keysDb = DbCompat.openDatabase
+                        (db.getEnvironment(), null/*txn*/,
+                         DbCompat.getDatabaseFile(secDb),
+                         secDb.getDatabaseName(),
+                         config);
+                } catch (FileNotFoundException e) {
+                    throw new DatabaseException(e);
+                }
+            }
+            keysIndex = new KeysIndex<SK,PK>
+                (keysDb, keyClass, keyBinding,
+                 priIndex.getKeyClass(), priIndex.getKeyBinding());
+        }
+        return keysIndex;
+    }
+
+    /**
+     * Returns an index that maps primary key to entity for the subset of
+     * entities having a given secondary key (duplicates).  A sub-index is
+     * convenient when you are interested in working with the subset of
+     * entities having a particular secondary key value.
+     *
+     * <p>When using a {@link Relationship#MANY_TO_ONE MANY_TO_ONE} or {@link
+     * Relationship#MANY_TO_MANY MANY_TO_MANY} secondary key, the sub-index
+     * represents the left (MANY) side of a relationship.</p>
+     *
+     * @param key the secondary key that identifies the entities in the
+     * sub-index.
+     *
+     * @return the sub-index.
+     */
+    public EntityIndex<PK,E> subIndex(SK key)
+        throws DatabaseException {
+
+        return new SubIndex(this, entityBinding, key);
+    }
+
+    /*
+     * Of the EntityIndex methods only get()/map()/sortedMap() are implemented
+     * here.  All other methods are implemented by BasicIndex.
+     */
+
+    public E get(SK key)
+        throws DatabaseException {
+
+        return get(null, key, null);
+    }
+
+    public E get(Transaction txn, SK key, LockMode lockMode)
+        throws DatabaseException {
+
+        DatabaseEntry keyEntry = new DatabaseEntry();
+        DatabaseEntry pkeyEntry = new DatabaseEntry();
+        DatabaseEntry dataEntry = new DatabaseEntry();
+        keyBinding.objectToEntry(key, keyEntry);
+
+        OperationStatus status =
+            secDb.get(txn, keyEntry, pkeyEntry, dataEntry, lockMode);
+
+        if (status == OperationStatus.SUCCESS) {
+            return (E) entityBinding.entryToObject(pkeyEntry, dataEntry);
+        } else {
+            return null;
+        }
+    }
+
+    public Map<SK,E> map() {
+        return sortedMap();
+    }
+
+    public synchronized SortedMap<SK,E> sortedMap() {
+        if (map == null) {
+            map = new StoredSortedMap(db, keyBinding, entityBinding, true);
+        }
+        return map;
+    }
+
+    boolean isUpdateAllowed() {
+        return false;
+    }
+}
diff --git a/src/com/sleepycat/persist/StoreConfig.java b/src/com/sleepycat/persist/StoreConfig.java
new file mode 100644
index 0000000000000000000000000000000000000000..1979a2291fd697fb132f77d348e5d9a9ca2bab18
--- /dev/null
+++ b/src/com/sleepycat/persist/StoreConfig.java
@@ -0,0 +1,313 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: StoreConfig.java,v 1.18.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment; // for javadoc
+import com.sleepycat.persist.evolve.IncompatibleClassException;
+import com.sleepycat.persist.evolve.Mutations;
+import com.sleepycat.persist.model.AnnotationModel;
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.raw.RawStore; // for javadoc
+
+/**
+ * Configuration properties used with an {@link EntityStore} or {@link
+ * RawStore}.
+ *
+ * <p>{@code StoreConfig} objects are thread-safe.  Multiple threads may safely
+ * call the methods of a shared {@code StoreConfig} object.</p>
+ *
+ * <p>See the {@link <a href="package-summary.html#example">package
+ * summary example</a>} for an example of using a {@code StoreConfig}.</p>
+ *
+ * @author Mark Hayes
+ */
+public class StoreConfig implements Cloneable {
+
+    /**
+     * The default store configuration containing properties as if the
+     * configuration were constructed and not modified.
+     */
+    public static final StoreConfig DEFAULT = new StoreConfig();
+
+    private boolean allowCreate;
+    private boolean exclusiveCreate;
+    private boolean transactional;
+    private boolean readOnly;
+    /* <!-- begin JE only --> */
+    private boolean deferredWrite;
+    private boolean temporary;
+    /* <!-- end JE only --> */
+    private boolean secondaryBulkLoad;
+    private EntityModel model;
+    private Mutations mutations;
+    private DatabaseNamer databaseNamer = DatabaseNamer.DEFAULT;
+
+    /**
+     * Creates an entity store configuration object with default properties.
+     */
+    public StoreConfig() {
+    }
+
+    /**
+     * Returns a shallow copy of the configuration.
+     */
+    public StoreConfig cloneConfig() {
+        try {
+            return (StoreConfig) clone();
+        } catch (CloneNotSupportedException cannotHappen) {
+            return null;
+        }
+    }
+
+    /**
+     * Specifies whether creation of a new store is allowed.  By default this
+     * property is false.
+     *
+     * <p>If this property is false and the internal store metadata database
+     * does not exist, {@link DatabaseException} will be thrown when the store
+     * is opened.</p>
+     */
+    public void setAllowCreate(boolean allowCreate) {
+        this.allowCreate = allowCreate;
+    }
+
+    /**
+     * Returns whether creation of a new store is allowed.
+     */
+    public boolean getAllowCreate() {
+        return allowCreate;
+    }
+
+    /**
+     * Specifies whether opening an existing store is prohibited.  By default
+     * this property is false.
+     *
+     * <p>If this property is true and the internal store metadata database
+     * already exists, {@link DatabaseException} will be thrown when the store
+     * is opened.</p>
+     */
+    public void setExclusiveCreate(boolean exclusiveCreate) {
+        this.exclusiveCreate = exclusiveCreate;
+    }
+
+    /**
+     * Returns whether opening an existing store is prohibited.
+     */
+    public boolean getExclusiveCreate() {
+        return exclusiveCreate;
+    }
+
+    /**
+     * Sets the transactional configuration property.  By default this property
+     * is false.
+     *
+     * <p>This property is true to open all store indices for transactional
+     * access.  True may not be specified if the environment is not also
+     * transactional.</p>
+     */
+    public void setTransactional(boolean transactional) {
+        this.transactional = transactional;
+    }
+
+    /**
+     * Returns the transactional configuration property.
+     */
+    public boolean getTransactional() {
+        return transactional;
+    }
+
+    /**
+     * Sets the read-only configuration property.  By default this property is
+     * false.
+     *
+     * <p>This property is true to open all store indices for read-only access,
+     * or false to open them for read-write access.  False may not be specified
+     * if the environment is read-only.</p>
+     */
+    public void setReadOnly(boolean readOnly) {
+        this.readOnly = readOnly;
+    }
+
+    /**
+     * Returns the read-only configuration property.
+     */
+    public boolean getReadOnly() {
+        return readOnly;
+    }
+
+    /* <!-- begin JE only --> */
+    /**
+     * Sets the deferred-write configuration property.  By default this
+     * property is false.
+     *
+     * <p>This property is true to open all store index databases for
+     * deferred-write access.  True may not be specified if the store is
+     * transactional.</p>
+     *
+     * <p>Deferred write stores avoid disk I/O and are not guaranteed to be
+     * persistent until {@link EntityStore#sync} or {@link Environment#sync} is
+     * called or the store is closed normally. This mode is particularly geared
+     * toward stores that frequently modify and delete data records. See the
+     * Getting Started Guide, Database chapter for a full description of the
+     * mode.</p>
+     *
+     * @see #setTransactional
+     */
+    public void setDeferredWrite(boolean deferredWrite) {
+        this.deferredWrite = deferredWrite;
+    }
+
+    /**
+     * Returns the deferred-write configuration property.
+     */
+    public boolean getDeferredWrite() {
+        return deferredWrite;
+    }
+
+    /**
+     * Sets the temporary configuration property.  By default this property is
+     * false.
+     *
+     * <p>This property is true to open all store databases as temporary
+     * databases.  True may not be specified if the store is transactional.</p>
+     *
+     * <p>Temporary stores avoid disk I/O and are not persistent -- they are
+     * deleted when the store is closed or after a crash. This mode is
+     * particularly geared toward in-memory stores. See the Getting Started
+     * Guide, Database chapter for a full description of the mode.</p>
+     *
+     * @see #setTransactional
+     */
+    public void setTemporary(boolean temporary) {
+        this.temporary = temporary;
+    }
+
+    /**
+     * Returns the temporary configuration property.
+     */
+    public boolean getTemporary() {
+        return temporary;
+    }
+    /* <!-- end JE only --> */
+
+    /**
+     * Sets the bulk-load-secondaries configuration property.  By default this
+     * property is false.
+     *
+     * <p>This property is true to cause the initial creation of secondary
+     * indices to be performed as a bulk load.  If this property is true and
+     * {@link EntityStore#getSecondaryIndex EntityStore.getSecondaryIndex} has
+     * never been called for a secondary index, that secondary index will not
+     * be created or written as records are written to the primary index.  In
+     * addition, if that secondary index defines a foreign key constraint, the
+     * constraint will not be enforced.</p>
+     *
+     * <p>The secondary index will be populated later when the {code
+     * getSecondaryIndex} method is called for the first time for that index,
+     * or when the store is closed and re-opened with this property set to
+     * false and the primary index is obtained.  In either case, the secondary
+     * index is populated by reading through the entire primary index and
+     * adding records to the secondary index as needed.  While populating the
+     * secondary, foreign key constraints will be enforced and an exception is
+     * thrown if a constraint is violated.</p>
+     *
+     * <p>When loading a primary index along with secondary indexes from a
+     * large input data set, configuring a bulk load of the secondary indexes
+     * is sometimes more performant than updating the secondary indexes each
+     * time the primary index is updated.  The absence of foreign key
+     * constraints during the load also provides more flexibility.</p>
+     */
+    public void setSecondaryBulkLoad(boolean secondaryBulkLoad) {
+        this.secondaryBulkLoad = secondaryBulkLoad;
+    }
+
+    /**
+     * Returns the bulk-load-secondaries configuration property.
+     */
+    public boolean getSecondaryBulkLoad() {
+        return secondaryBulkLoad;
+    }
+
+    /**
+     * Sets the entity model that defines entity classes and index keys.
+     *
+     * <p>If null is specified or this method is not called, an {@link
+     * AnnotationModel} instance is used by default.</p>
+     */
+    public void setModel(EntityModel model) {
+        this.model = model;
+    }
+
+    /**
+     * Returns the entity model that defines entity classes and index keys.
+     */
+    public EntityModel getModel() {
+        return model;
+    }
+
+    /**
+     * Configures mutations for performing lazy evolution of stored instances.
+     * Existing mutations for this store are not cleared, so the mutations
+     * required are only those changes that have been made since the store was
+     * last opened.  Some new mutations may override existing specifications,
+     * and some may be supplemental.
+     *
+     * <p>If null is specified and the store already exists, the previously
+     * specified mutations are used.  The mutations are stored persistently in
+     * serialized form.</p>
+     *
+     * <p>Mutations must be available to handle all changes to classes that are
+     * incompatible with the class definitions known to this store.  See {@link
+     * Mutations} and {@link com.sleepycat.persist.evolve Class Evolution} for
+     * more information.</p>
+     *
+     * <p>If an incompatible class change has been made and mutations are not
+     * available for handling the change, {@link IncompatibleClassException}
+     * will be thrown when creating an {@link EntityStore}.</p>
+     */
+    public void setMutations(Mutations mutations) {
+        this.mutations = mutations;
+    }
+
+    /**
+     * Returns the configured mutations for performing lazy evolution of stored
+     * instances.
+     */
+    public Mutations getMutations() {
+        return mutations;
+    }
+
+    /**
+     * <!-- begin JE only -->
+     * @hidden
+     * <!-- end JE only -->
+     * Specifies the object reponsible for naming of files and databases.
+     *
+     * By default this property is {@link DatabaseNamer#DEFAULT}.
+     *
+     * @throws NullPointerException if a null parameter value is passed.
+     */
+    public void setDatabaseNamer(DatabaseNamer databaseNamer) {
+        if (databaseNamer == null) {
+            throw new NullPointerException();
+        }
+        this.databaseNamer = databaseNamer;
+    }
+
+    /**
+     * <!-- begin JE only -->
+     * @hidden
+     * <!-- end JE only -->
+     * Returns the object reponsible for naming of files and databases.
+     */
+    public DatabaseNamer getDatabaseNamer() {
+        return databaseNamer;
+    }
+}
diff --git a/src/com/sleepycat/persist/SubIndex.java b/src/com/sleepycat/persist/SubIndex.java
new file mode 100644
index 0000000000000000000000000000000000000000..2b04662c7d9f51abc21adfc83447d2d9ef0a942f
--- /dev/null
+++ b/src/com/sleepycat/persist/SubIndex.java
@@ -0,0 +1,336 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SubIndex.java,v 1.16.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist;
+
+import java.util.Map;
+import java.util.SortedMap;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.collections.StoredSortedMap;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.CursorConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.SecondaryCursor;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.util.keyrange.KeyRange;
+import com.sleepycat.util.keyrange.RangeCursor;
+
+/**
+ * The EntityIndex returned by SecondaryIndex.subIndex.  A SubIndex, in JE
+ * internal terms, is a duplicates btree for a single key in the main btree.
+ * From the user's viewpoint, the keys are primary keys.  This class implements
+ * that viewpoint.  In general, getSearchBoth and getSearchBothRange are used
+ * where in a normal index getSearchKey and getSearchRange would be used.  The
+ * main tree key is always implied, not passed as a parameter.
+ *
+ * @author Mark Hayes
+ */
+class SubIndex<PK,E> implements EntityIndex<PK,E> {
+
+    private SecondaryIndex<?,PK,E> secIndex;
+    private SecondaryDatabase db;
+    private boolean transactional;
+    private boolean sortedDups;
+    private boolean locking;
+    private DatabaseEntry keyEntry;
+    private Object keyObject;
+    private KeyRange singleKeyRange;
+    private EntryBinding pkeyBinding;
+    private KeyRange emptyPKeyRange;
+    private EntityBinding entityBinding;
+    private ValueAdapter<PK> keyAdapter;
+    private ValueAdapter<E> entityAdapter;
+    private SortedMap<PK,E> map;
+
+    <SK> SubIndex(SecondaryIndex<SK,PK,E> secIndex,
+                  EntityBinding entityBinding,
+                  SK key)
+        throws DatabaseException {
+
+        this.secIndex = secIndex;
+        db = secIndex.getDatabase();
+        transactional = secIndex.transactional;
+        sortedDups = secIndex.sortedDups;
+        locking =
+            DbCompat.getInitializeLocking(db.getEnvironment().getConfig());
+
+        keyObject = key;
+        keyEntry = new DatabaseEntry();
+        secIndex.keyBinding.objectToEntry(key, keyEntry);
+        singleKeyRange = secIndex.emptyRange.subRange(keyEntry);
+
+        PrimaryIndex<PK,E> priIndex = secIndex.getPrimaryIndex();
+        pkeyBinding = priIndex.keyBinding;
+        emptyPKeyRange = priIndex.emptyRange;
+        this.entityBinding = entityBinding;
+
+        keyAdapter = new PrimaryKeyValueAdapter<PK>
+            (priIndex.keyClass, priIndex.keyBinding);
+        entityAdapter = secIndex.entityAdapter;
+    }
+
+    public boolean contains(PK key)
+        throws DatabaseException {
+
+        return contains(null, key, null);
+    }
+
+    public boolean contains(Transaction txn, PK key, LockMode lockMode)
+        throws DatabaseException {
+
+        DatabaseEntry pkeyEntry = new DatabaseEntry();
+        DatabaseEntry dataEntry = BasicIndex.NO_RETURN_ENTRY;
+        pkeyBinding.objectToEntry(key, pkeyEntry);
+
+        OperationStatus status =
+            db.getSearchBoth(txn, keyEntry, pkeyEntry, dataEntry, lockMode);
+        return (status == OperationStatus.SUCCESS);
+    }
+
+    public E get(PK key)
+        throws DatabaseException {
+
+        return get(null, key, null);
+    }
+
+    public E get(Transaction txn, PK key, LockMode lockMode)
+        throws DatabaseException {
+
+        DatabaseEntry pkeyEntry = new DatabaseEntry();
+        DatabaseEntry dataEntry = new DatabaseEntry();
+        pkeyBinding.objectToEntry(key, pkeyEntry);
+
+        OperationStatus status =
+            db.getSearchBoth(txn, keyEntry, pkeyEntry, dataEntry, lockMode);
+
+        if (status == OperationStatus.SUCCESS) {
+            return (E) entityBinding.entryToObject(pkeyEntry, dataEntry);
+        } else {
+            return null;
+        }
+    }
+
+    public long count()
+        throws DatabaseException {
+
+        CursorConfig cursorConfig = locking ?
+            CursorConfig.READ_UNCOMMITTED : null;
+        EntityCursor<PK> cursor = keys(null, cursorConfig);
+        try {
+            if (cursor.next() != null) {
+                return cursor.count();
+            } else {
+                return 0;
+            }
+        } finally {
+            cursor.close();
+        }
+    }
+
+    public boolean delete(PK key)
+        throws DatabaseException {
+
+        return delete(null, key);
+    }
+
+    public boolean delete(Transaction txn, PK key)
+        throws DatabaseException {
+
+        DatabaseEntry pkeyEntry = new DatabaseEntry();
+        DatabaseEntry dataEntry = BasicIndex.NO_RETURN_ENTRY;
+        pkeyBinding.objectToEntry(key, pkeyEntry);
+
+        boolean autoCommit = false;
+	Environment env = db.getEnvironment();
+        if (transactional &&
+	    txn == null &&
+	    DbCompat.getThreadTransaction(env) == null) {
+            txn = env.beginTransaction(null, null);
+            autoCommit = true;
+        }
+
+        boolean failed = true;
+        OperationStatus status;
+        SecondaryCursor cursor = db.openSecondaryCursor(txn, null);
+        try {
+            status = cursor.getSearchBoth
+                (keyEntry, pkeyEntry, dataEntry,
+                 locking ? LockMode.RMW : null);
+            if (status == OperationStatus.SUCCESS) {
+                status = cursor.delete();
+            }
+            failed = false;
+        } finally {
+            cursor.close();
+            if (autoCommit) {
+                if (failed) {
+                    txn.abort();
+                } else {
+                    txn.commit();
+                }
+            }
+        }
+
+        return (status == OperationStatus.SUCCESS);
+    }
+
+    public EntityCursor<PK> keys()
+        throws DatabaseException {
+
+        return keys(null, null);
+    }
+
+    public EntityCursor<PK> keys(Transaction txn, CursorConfig config)
+        throws DatabaseException {
+
+        return cursor(txn, null, keyAdapter, config);
+    }
+
+    public EntityCursor<E> entities()
+        throws DatabaseException {
+
+        return cursor(null, null, entityAdapter, null);
+    }
+
+    public EntityCursor<E> entities(Transaction txn,
+                                    CursorConfig config)
+        throws DatabaseException {
+
+        return cursor(txn, null, entityAdapter, config);
+    }
+
+    public EntityCursor<PK> keys(PK fromKey,
+                                 boolean fromInclusive,
+                                 PK toKey,
+                                 boolean toInclusive)
+        throws DatabaseException {
+
+        return cursor(null, fromKey, fromInclusive, toKey, toInclusive,
+                      keyAdapter, null);
+    }
+
+    public EntityCursor<PK> keys(Transaction txn,
+                                 PK fromKey,
+                                 boolean fromInclusive,
+                                 PK toKey,
+                                 boolean toInclusive,
+                                 CursorConfig config)
+        throws DatabaseException {
+
+        return cursor(txn, fromKey, fromInclusive, toKey, toInclusive,
+                      keyAdapter, config);
+    }
+
+    public EntityCursor<E> entities(PK fromKey,
+                                    boolean fromInclusive,
+                                    PK toKey,
+                                    boolean toInclusive)
+        throws DatabaseException {
+
+        return cursor(null, fromKey, fromInclusive, toKey, toInclusive,
+                      entityAdapter, null);
+    }
+
+    public EntityCursor<E> entities(Transaction txn,
+                                    PK fromKey,
+                                    boolean fromInclusive,
+                                    PK toKey,
+                                    boolean toInclusive,
+                                    CursorConfig config)
+        throws DatabaseException {
+
+        return cursor(txn, fromKey, fromInclusive, toKey, toInclusive,
+                      entityAdapter, config);
+    }
+
+    /*
+    public ForwardCursor<PK> unsortedKeys(KeySelector<PK> selector)
+        throws DatabaseException {
+
+        return unsortedKeys(null, selector, null);
+    }
+
+    public ForwardCursor<PK> unsortedKeys(Transaction txn,
+                                          KeySelector<PK> selector,
+                                          CursorConfig config)
+        throws DatabaseException {
+
+        throw new UnsupportedOperationException();
+    }
+
+    public ForwardCursor<E> unsortedEntities(KeySelector<PK> selector)
+        throws DatabaseException {
+
+        return unsortedEntities(null, selector, null);
+    }
+
+    public ForwardCursor<E> unsortedEntities(Transaction txn,
+                                             KeySelector<PK> selector,
+                                             CursorConfig config)
+        throws DatabaseException {
+
+        throw new UnsupportedOperationException();
+    }
+    */
+
+    private <V> EntityCursor<V> cursor(Transaction txn,
+                                       PK fromKey,
+                                       boolean fromInclusive,
+                                       PK toKey,
+                                       boolean toInclusive,
+                                       ValueAdapter<V> adapter,
+                                       CursorConfig config)
+        throws DatabaseException {
+
+        DatabaseEntry fromEntry = null;
+        if (fromKey != null) {
+            fromEntry = new DatabaseEntry();
+            pkeyBinding.objectToEntry(fromKey, fromEntry);
+        }
+        DatabaseEntry toEntry = null;
+        if (toKey != null) {
+            toEntry = new DatabaseEntry();
+            pkeyBinding.objectToEntry(toKey, toEntry);
+        }
+        KeyRange pkeyRange = emptyPKeyRange.subRange
+            (fromEntry, fromInclusive, toEntry, toInclusive);
+        return cursor(txn, pkeyRange, adapter, config);
+    }
+
+    private <V> EntityCursor<V> cursor(Transaction txn,
+                                       KeyRange pkeyRange,
+                                       ValueAdapter<V> adapter,
+                                       CursorConfig config)
+        throws DatabaseException {
+
+        Cursor cursor = db.openCursor(txn, config);
+        RangeCursor rangeCursor =
+            new RangeCursor(singleKeyRange, pkeyRange, sortedDups, cursor);
+        return new SubIndexCursor<V>(rangeCursor, adapter);
+    }
+
+    public Map<PK,E> map() {
+        return sortedMap();
+    }
+
+    public synchronized SortedMap<PK,E> sortedMap() {
+        if (map == null) {
+            map = (SortedMap) ((StoredSortedMap) secIndex.sortedMap()).
+                duplicatesMap(keyObject, pkeyBinding);
+        }
+        return map;
+    }
+}
diff --git a/src/com/sleepycat/persist/SubIndexCursor.java b/src/com/sleepycat/persist/SubIndexCursor.java
new file mode 100644
index 0000000000000000000000000000000000000000..4bb4fa89c3aa5d04011d55b5b4fa44b68b8df573
--- /dev/null
+++ b/src/com/sleepycat/persist/SubIndexCursor.java
@@ -0,0 +1,65 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SubIndexCursor.java,v 1.9.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.util.keyrange.RangeCursor;
+
+/**
+ * The cursor for a SubIndex treats Dup and NoDup operations specially because
+ * the SubIndex never has duplicates -- the keys are primary keys.  So a
+ * next/prevDup operation always returns null, and a next/prevNoDup operation
+ * actually does next/prev.
+ *
+ * @author Mark Hayes
+ */
+class SubIndexCursor<V> extends BasicCursor<V> {
+
+    SubIndexCursor(RangeCursor cursor, ValueAdapter<V> adapter) {
+        super(cursor, adapter, false/*updateAllowed*/);
+    }
+
+    @Override
+    public EntityCursor<V> dup()
+        throws DatabaseException {
+
+        return new SubIndexCursor<V>(cursor.dup(true), adapter);
+    }
+
+    @Override
+    public V nextDup(LockMode lockMode)
+        throws DatabaseException {
+
+        checkInitialized();
+        return null;
+    }
+
+    @Override
+    public V nextNoDup(LockMode lockMode)
+        throws DatabaseException {
+
+        return returnValue(cursor.getNext(key, pkey, data, lockMode));
+    }
+
+    @Override
+    public V prevDup(LockMode lockMode)
+        throws DatabaseException {
+
+        checkInitialized();
+        return null;
+    }
+
+    @Override
+    public V prevNoDup(LockMode lockMode)
+        throws DatabaseException {
+
+        return returnValue(cursor.getPrev(key, pkey, data, lockMode));
+    }
+}
diff --git a/src/com/sleepycat/persist/ValueAdapter.java b/src/com/sleepycat/persist/ValueAdapter.java
new file mode 100644
index 0000000000000000000000000000000000000000..eda55ce713cc9dd8d32b3d356dd7cb715a2bc069
--- /dev/null
+++ b/src/com/sleepycat/persist/ValueAdapter.java
@@ -0,0 +1,66 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ValueAdapter.java,v 1.7.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist;
+
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * An adapter that translates between database entries (key, primary key, data)
+ * and a "value", which may be either the key, primary key, or entity.  This
+ * interface is used to implement a generic index and cursor (BasicIndex and
+ * BasicCursor).  If we didn't use this approach, we would need separate index
+ * and cursor implementations for each type of value that can be returned.  In
+ * other words, this interface is used to reduce class explosion.
+ *
+ * @author Mark Hayes
+ */
+interface ValueAdapter<V> {
+
+    /**
+     * Creates a DatabaseEntry for the key or returns null if the key is not
+     * needed.
+     */
+    DatabaseEntry initKey();
+
+    /**
+     * Creates a DatabaseEntry for the primary key or returns null if the
+     * primary key is not needed.
+     */
+    DatabaseEntry initPKey();
+
+    /**
+     * Creates a DatabaseEntry for the data or returns null if the data is not
+     * needed.  BasicIndex.NO_RETURN_ENTRY may be returned if the data argument
+     * is required but we don't need it.
+     */
+    DatabaseEntry initData();
+
+    /**
+     * Sets the data array of the given entries to null, based on knowledge of
+     * which entries are non-null and are not NO_RETURN_ENTRY.
+     */
+    void clearEntries(DatabaseEntry key,
+                      DatabaseEntry pkey,
+                      DatabaseEntry data);
+
+    /**
+     * Returns the appropriate "value" (key, primary key, or entity) using the
+     * appropriate bindings for that purpose.
+     */
+    V entryToValue(DatabaseEntry key,
+                   DatabaseEntry pkey,
+                   DatabaseEntry data);
+
+    /**
+     * Converts an entity value to a data entry using an entity binding, or
+     * throws UnsupportedOperationException if this is not appropriate.  Called
+     * by BasicCursor.update.
+     */
+    void valueToData(V value, DatabaseEntry data);
+}
diff --git a/src/com/sleepycat/persist/evolve/Conversion.java b/src/com/sleepycat/persist/evolve/Conversion.java
new file mode 100644
index 0000000000000000000000000000000000000000..e57dbe2fd9e69085f47629b58674cbb907332ba8
--- /dev/null
+++ b/src/com/sleepycat/persist/evolve/Conversion.java
@@ -0,0 +1,438 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Conversion.java,v 1.14.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.evolve;
+
+import java.io.Serializable;
+
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.raw.RawObject;
+import com.sleepycat.persist.raw.RawType;
+
+/**
+ * Converts an old version of an object value to conform to the current class
+ * or field definition.
+ *
+ * <p>The {@code Conversion} interface is implemented by the user.  A
+ * {@code Conversion} instance is passed to the {@link Converter#Converter}
+ * constructor.</p>
+ *
+ * <p>The {@code Conversion} interface extends {@link Serializable} and the
+ * {@code Conversion} instance is serialized for storage using standard Java
+ * serialization.  Normally, the {@code Conversion} class should only have
+ * transient fields that are initialized in the {@link #initialize} method.
+ * While non-transient fields are allowed, care must be taken to only include
+ * fields that are serializable and will not pull in large amounts of data.</p>
+ *
+ * <p>When a class conversion is specified, two special considerations
+ * apply:</p>
+ * <ol>
+ * <li>A class conversion is only applied when to instances of that class.  The
+ * conversion will not be applied when the class when it appears as a
+ * superclass of the instance's class.  In this case, a conversion for the
+ * instance's class must also be specified.</li>
+ * <li>Although field renaming (as well as all other changes) is handled by the
+ * conversion method, a field Renamer is still needed when a secondary key
+ * field is renamed and field Deleter is still needed when a secondary key
+ * field is deleted.  This is necessary for evolution of the metadata;
+ * specifically, if the key name changes the database must be renamed and if
+ * the key field is deleted the secondary database must be deleted.</li>
+ * </ol>
+ *
+ * <p>The {@code Conversion} class must implement the standard equals method.
+ * See {@link #equals} for more information.</p>
+ *
+ * <p>Conversions of simple types are generally simple.  For example, a {@code
+ * String} field that contains only integer values can be easily converted to
+ * an {@code int} field:</p>
+ * <pre class="code">
+ *  // The old class.  Version 0 is implied.
+ *  //
+ *  {@literal @Persistent}
+ *  class Address {
+ *      String zipCode;
+ *      ...
+ *  }
+ *
+ *  // The new class.  A new version number must be assigned.
+ *  //
+ *  {@literal @Persistent(version=1)}
+ *  class Address {
+ *      int zipCode;
+ *      ...
+ *  }
+ *
+ *  // The conversion class.
+ *  //
+ *  class MyConversion1 implements Conversion {
+ *
+ *      public void initialize(EntityModel model) {
+ *          // No initialization needed.
+ *      }
+ *
+ *      public Object convert(Object fromValue) {
+ *          return Integer.valueOf((String) fromValue);
+ *      }
+ *
+ *      {@code @Override}
+ *      public boolean equals(Object o) {
+ *          return o instanceof MyConversion1;
+ *      }
+ *  }
+ *
+ *  // Create a field converter mutation.
+ *  //
+ *  Converter converter = new Converter(Address.class.getName(), 0,
+ *                                      "zipCode", new MyConversion1());
+ *
+ *  // Configure the converter as described {@link Mutations here}.</pre>
+ *
+ * <p>A conversion may perform arbitrary transformations on an object.  For
+ * example, a conversion may transform a single String address field into an
+ * Address object containing four fields for street, city, state and zip
+ * code.</p>
+ * <pre class="code">
+ *  // The old class.  Version 0 is implied.
+ *  //
+ *  {@literal @Entity}
+ *  class Person {
+ *      String address;
+ *      ...
+ *  }
+ *
+ *  // The new class.  A new version number must be assigned.
+ *  //
+ *  {@literal @Entity(version=1)}
+ *  class Person {
+ *      Address address;
+ *      ...
+ *  }
+ *
+ *  // The new address class.
+ *  //
+ *  {@literal @Persistent}
+ *  class Address {
+ *      String street;
+ *      String city;
+ *      String state;
+ *      int zipCode;
+ *      ...
+ *  }
+ *
+ *  class MyConversion2 implements Conversion {
+ *      private transient RawType addressType;
+ *
+ *      public void initialize(EntityModel model) {
+ *          addressType = model.getRawType(Address.class.getName());
+ *      }
+ *
+ *      public Object convert(Object fromValue) {
+ *
+ *          // Parse the old address and populate the new address fields
+ *          //
+ *          String oldAddress = (String) fromValue;
+ *          {@literal Map<String,Object> addressValues = new HashMap<String,Object>();}
+ *          addressValues.put("street", parseStreet(oldAddress));
+ *          addressValues.put("city", parseCity(oldAddress));
+ *          addressValues.put("state", parseState(oldAddress));
+ *          addressValues.put("zipCode", parseZipCode(oldAddress));
+ *
+ *          // Return new raw Address object
+ *          //
+ *          return new RawObject(addressType, addressValues, null);
+ *      }
+ *
+ *      {@code @Override}
+ *      public boolean equals(Object o) {
+ *          return o instanceof MyConversion2;
+ *      }
+ *
+ *      private String parseStreet(String oldAddress) { ... }
+ *      private String parseCity(String oldAddress) { ... }
+ *      private String parseState(String oldAddress) { ... }
+ *      private Integer parseZipCode(String oldAddress) { ... }
+ *  }
+ *
+ *  // Create a field converter mutation.
+ *  //
+ *  Converter converter = new Converter(Person.class.getName(), 0,
+ *                                      "address", new MyConversion2());
+ *
+ *  // Configure the converter as described {@link Mutations here}.</pre>
+ *
+ * <p>Note that when a conversion returns a {@link RawObject}, it must return
+ * it with a {@link RawType} that is current as defined by the current class
+ * definitions.  The proper types can be obtained from the {@link EntityModel}
+ * in the conversion's {@link #initialize initialize} method.</p>
+ *
+ * <p>A variation on the example above is where several fields in a class
+ * (street, city, state and zipCode) are converted to a single field (address).
+ * In this case a class converter rather than a field converter is used.</p>
+ *
+ * <pre class="code">
+ *  // The old class.  Version 0 is implied.
+ *  //
+ *  {@literal @Entity}
+ *  class Person {
+ *      String street;
+ *      String city;
+ *      String state;
+ *      int zipCode;
+ *      ...
+ *  }
+ *
+ *  // The new class.  A new version number must be assigned.
+ *  //
+ *  {@literal @Entity(version=1)}
+ *  class Person {
+ *      Address address;
+ *      ...
+ *  }
+ *
+ *  // The new address class.
+ *  //
+ *  {@literal @Persistent}
+ *  class Address {
+ *      String street;
+ *      String city;
+ *      String state;
+ *      int zipCode;
+ *      ...
+ *  }
+ *
+ *  class MyConversion3 implements Conversion {
+ *      private transient RawType newPersonType;
+ *      private transient RawType addressType;
+ *
+ *      public void initialize(EntityModel model) {
+ *          newPersonType = model.getRawType(Person.class.getName());
+ *          addressType = model.getRawType(Address.class.getName());
+ *      }
+ *
+ *      public Object convert(Object fromValue) {
+ *
+ *          // Get field value maps for old and new objects.
+ *          //
+ *          RawObject person = (RawObject) fromValue;
+ *          {@literal Map<String,Object> personValues = person.getValues();}
+ *          {@literal Map<String,Object> addressValues = new HashMap<String,Object>();}
+ *          RawObject address = new RawObject(addressType, addressValues, null);
+ *
+ *          // Remove the old address fields and insert the new one.
+ *          //
+ *          addressValues.put("street", personValues.remove("street"));
+ *          addressValues.put("city", personValues.remove("city"));
+ *          addressValues.put("state", personValues.remove("state"));
+ *          addressValues.put("zipCode", personValues.remove("zipCode"));
+ *          personValues.put("address", address);
+ *
+ *          return new RawObject(newPersonType, personValues, person.getSuper());
+ *      }
+ *
+ *      {@code @Override}
+ *      public boolean equals(Object o) {
+ *          return o instanceof MyConversion3;
+ *      }
+ *  }
+ *
+ *  // Create a class converter mutation.
+ *  //
+ *  Converter converter = new Converter(Person.class.getName(), 0,
+ *                                      new MyConversion3());
+ *
+ *  // Configure the converter as described {@link Mutations here}.</pre>
+ *
+ *
+ * <p>A conversion can also handle changes to class hierarchies.  For example,
+ * if a "name" field originally declared in class A is moved to its superclass
+ * B, a conversion can move the field value accordingly:</p>
+ *
+ * <pre class="code">
+ *  // The old classes.  Version 0 is implied.
+ *  //
+ *  {@literal @Persistent}
+ *  class A extends B {
+ *      String name;
+ *      ...
+ *  }
+ *  {@literal @Persistent}
+ *  abstract class B {
+ *      ...
+ *  }
+ *
+ *  // The new classes.  A new version number must be assigned.
+ *  //
+ *  {@literal @Persistent(version=1)}
+ *  class A extends B {
+ *      ...
+ *  }
+ *  {@literal @Persistent(version=1)}
+ *  abstract class B {
+ *      String name;
+ *      ...
+ *  }
+ *
+ *  class MyConversion4 implements Conversion {
+ *      private transient RawType newAType;
+ *      private transient RawType newBType;
+ *
+ *      public void initialize(EntityModel model) {
+ *          newAType = model.getRawType(A.class.getName());
+ *          newBType = model.getRawType(B.class.getName());
+ *      }
+ *
+ *      public Object convert(Object fromValue) {
+ *          RawObject oldA = (RawObject) fromValue;
+ *          RawObject oldB = oldA.getSuper();
+ *          {@literal Map<String,Object> aValues = oldA.getValues();}
+ *          {@literal Map<String,Object> bValues = oldB.getValues();}
+ *          bValues.put("name", aValues.remove("name"));
+ *          RawObject newB = new RawObject(newBType, bValues, oldB.getSuper());
+ *          RawObject newA = new RawObject(newAType, aValues, newB);
+ *          return newA;
+ *      }
+ *
+ *      {@code @Override}
+ *      public boolean equals(Object o) {
+ *          return o instanceof MyConversion4;
+ *      }
+ *  }
+ *
+ *  // Create a class converter mutation.
+ *  //
+ *  Converter converter = new Converter(A.class.getName(), 0,
+ *                                      new MyConversion4());
+ *
+ *  // Configure the converter as described {@link Mutations here}.</pre>
+ *
+ * <p>A conversion may return an instance of a different class entirely, as
+ * long as it conforms to current class definitions and is the type expected
+ * in the given context (a subtype of the old type, or a type compatible with
+ * the new field type).  For example, a field that is used to discriminate
+ * between two types of objects could be removed and replaced by two new
+ * subclasses:</p> <pre class="code">
+ *  // The old class.  Version 0 is implied.
+ *  //
+ *  {@literal @Persistent}
+ *  class Pet {
+ *      boolean isCatNotDog;
+ *      ...
+ *  }
+ *
+ *  // The new classes.  A new version number must be assigned to the Pet class.
+ *  //
+ *  {@literal @Persistent(version=1)}
+ *  class Pet {
+ *      ...
+ *  }
+ *  {@literal @Persistent}
+ *  class Cat extends Pet {
+ *      ...
+ *  }
+ *  {@literal @Persistent}
+ *  class Dog extends Pet {
+ *      ...
+ *  }
+ *
+ *  class MyConversion5 implements Conversion {
+ *      private transient RawType newPetType;
+ *      private transient RawType dogType;
+ *      private transient RawType catType;
+ *
+ *      public void initialize(EntityModel model) {
+ *          newPetType = model.getRawType(Pet.class.getName());
+ *          dogType = model.getRawType(Dog.class.getName());
+ *          catType = model.getRawType(Cat.class.getName());
+ *      }
+ *
+ *      public Object convert(Object fromValue) {
+ *          RawObject pet = (RawObject) fromValue;
+ *          {@literal Map<String,Object> petValues = pet.getValues();}
+ *          Boolean isCat = (Boolean) petValues.remove("isCatNotDog");
+ *          RawObject newPet = new RawObject(newPetType, petValues,
+ *                                           pet.getSuper());
+ *          RawType newSubType = isCat ? catType : dogType;
+ *          return new RawObject(newSubType, Collections.emptyMap(), newPet);
+ *      }
+ *
+ *      {@code @Override}
+ *      public boolean equals(Object o) {
+ *          return o instanceof MyConversion5;
+ *      }
+ *  }
+ *
+ *  // Create a class converter mutation.
+ *  //
+ *  Converter converter = new Converter(Pet.class.getName(), 0,
+ *                                      new MyConversion5());
+ *
+ *  // Configure the converter as described {@link Mutations here}.</pre>
+ *
+ * <p>The primary limitation of a conversion is that it may access at most a
+ * single entity instance at one time.  Conversions involving multiple entities
+ * at once may be made by performing a <a
+ * href="package-summary.html#storeConversion">store conversion</a>.</p>
+ *
+ * @see com.sleepycat.persist.evolve Class Evolution
+ * @author Mark Hayes
+ */
+public interface Conversion extends Serializable {
+
+    /**
+     * Initializes the conversion, allowing it to obtain raw type information
+     * from the entity model.
+     */
+    void initialize(EntityModel model);
+
+    /**
+     * Converts an old version of an object value to conform to the current
+     * class or field definition.
+     *
+     * <p>If a {@link RuntimeException} is thrown by this method, it will be
+     * thrown to the original caller.  Similarly, a {@link
+     * IllegalArgumentException} will be thrown to the original caller if the
+     * object returned by this method does not conform to current class
+     * definitions.</p>
+     *
+     * <p>The class of the input and output object may be one of the simple
+     * types or {@link RawObject}.  For primitive types, the primitive wrapper
+     * class is used.</p>
+     *
+     * @param fromValue the object value being converted.  The type of this
+     * value is defined by the old class version that is being converted.
+     *
+     * @return the converted object.  The type of this value must conform to
+     * a current class definition.  If this is a class conversion, it must
+     * be the current version of the class.  If this is a field conversion, it
+     * must be of a type compatible with the current declared type of the
+     * field.
+     */
+    Object convert(Object fromValue);
+
+    /**
+     * The standard {@code equals} method that must be implemented by
+     * conversion class.
+     *
+     * <p>When mutations are specified when opening a store, the specified and
+     * previously stored mutations are compared for equality.  If they are
+     * equal, there is no need to replace the existing mutations in the stored
+     * catalog.  To accurately determine equality, the conversion class must
+     * implement the {@code equals} method.</p>
+     *
+     * <p>If the {@code equals} method is not explicitly implemented by the
+     * conversion class or a superclass other than {@code Object}, {@code
+     * IllegalArgumentException} will be thrown when the store is opened.</p>
+     *
+     * <p>Normally whenever {@code equals} is implemented the {@code hashCode}
+     * method should also be implemented to support hash sets and maps.
+     * However, hash sets and maps containing <code>Conversion</code> objects
+     * are not used by the DPL and therefore the DPL does not require
+     * {@code hashCode} to be implemented.</p>
+     */
+    boolean equals(Object other);
+}
diff --git a/src/com/sleepycat/persist/evolve/Converter.java b/src/com/sleepycat/persist/evolve/Converter.java
new file mode 100644
index 0000000000000000000000000000000000000000..7eec257835a5db05888a784c3df005dba99ca0a5
--- /dev/null
+++ b/src/com/sleepycat/persist/evolve/Converter.java
@@ -0,0 +1,124 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Converter.java,v 1.11.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.evolve;
+
+import java.lang.reflect.Method;
+
+/**
+ * A mutation for converting an old version of an object value to conform to
+ * the current class or field definition.  For example:
+ *
+ * <pre class="code">
+ *  package my.package;
+ *
+ *  // The old class.  Version 0 is implied.
+ *  //
+ *  {@literal @Entity}
+ *  class Person {
+ *      // ...
+ *  }
+ *
+ *  // The new class.  A new version number must be assigned.
+ *  //
+ *  {@literal @Entity(version=1)}
+ *  class Person {
+ *      // Incompatible changes were made here...
+ *  }
+ *
+ *  // Add a converter mutation.
+ *  //
+ *  Mutations mutations = new Mutations();
+ *
+ *  mutations.addConverter(new Converter(Person.class.getName(), 0,
+ *                                       new MyConversion()));
+ *
+ *  // Configure the mutations as described {@link Mutations here}.</pre>
+ *
+ * <p>See {@link Conversion} for more information.</p>
+ *
+ * @see com.sleepycat.persist.evolve Class Evolution
+ * @author Mark Hayes
+ */
+public class Converter extends Mutation {
+
+    private static final long serialVersionUID = 4558176842096181863L;
+
+    private Conversion conversion;
+
+    /**
+     * Creates a mutation for converting all instances of the given class
+     * version to the current version of the class.
+     */
+    public Converter(String className,
+                     int classVersion,
+                     Conversion conversion) {
+        this(className, classVersion, null, conversion);
+    }
+
+    /**
+     * Creates a mutation for converting all values of the given field in the
+     * given class version to a type compatible with the current declared type
+     * of the field.
+     */
+    public Converter(String declaringClassName,
+                     int declaringClassVersion,
+                     String fieldName,
+                     Conversion conversion) {
+        super(declaringClassName, declaringClassVersion, fieldName);
+        this.conversion = conversion;
+
+        /* Require explicit implementation of the equals method. */
+        Class cls = conversion.getClass();
+        try {
+            Method m = cls.getMethod("equals", Object.class);
+            if (m.getDeclaringClass() == Object.class) {
+                throw new IllegalArgumentException
+                    ("Conversion class does not implement the equals method " +
+                     "explicitly (Object.equals is not sufficient): " +
+                     cls.getName());
+            }
+        } catch (NoSuchMethodException e) {
+            throw new IllegalStateException(e);
+        }
+    }
+
+    /**
+     * Returns the converter instance specified to the constructor.
+     */
+    public Conversion getConversion() {
+        return conversion;
+    }
+
+    /**
+     * Returns true if the conversion objects are equal in this object and
+     * given object, and if the {@link Mutation#equals} superclass method
+     * returns true.
+     */
+    @Override
+    public boolean equals(Object other) {
+        if (other instanceof Converter) {
+            Converter o = (Converter) other;
+            return conversion.equals(o.conversion) &&
+                   super.equals(other);
+        } else {
+            return false;
+        }
+    }
+
+    @Override
+    public int hashCode() {
+        return conversion.hashCode() + super.hashCode();
+    }
+
+    @Override
+    public String toString() {
+        return "[Converter " + super.toString() +
+               " Conversion: " + conversion + ']';
+    }
+}
diff --git a/src/com/sleepycat/persist/evolve/DeletedClassException.java b/src/com/sleepycat/persist/evolve/DeletedClassException.java
new file mode 100644
index 0000000000000000000000000000000000000000..297b6353705694284c13aaa70327244b6a4d13a3
--- /dev/null
+++ b/src/com/sleepycat/persist/evolve/DeletedClassException.java
@@ -0,0 +1,24 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DeletedClassException.java,v 1.9.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.evolve;
+
+
+/**
+ * While reading from an index, an instance of a deleted class version was
+ * encountered.
+ *
+ * @see com.sleepycat.persist.evolve Class Evolution
+ * @author Mark Hayes
+ */
+public class DeletedClassException extends RuntimeException {
+
+    public DeletedClassException(String msg) {
+        super(msg);
+    }
+}
diff --git a/src/com/sleepycat/persist/evolve/Deleter.java b/src/com/sleepycat/persist/evolve/Deleter.java
new file mode 100644
index 0000000000000000000000000000000000000000..cee3259b505d7d47534e59a3685b4648af1ed1cd
--- /dev/null
+++ b/src/com/sleepycat/persist/evolve/Deleter.java
@@ -0,0 +1,95 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Deleter.java,v 1.9.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.evolve;
+
+/**
+ * A mutation for deleting an entity class or field.
+ *
+ * <p><strong>WARNING:</strong> The data for the deleted class or field will be
+ * destroyed and will be recoverable only by restoring from backup.  If you
+ * wish to convert the instance data to a different type or format, use a
+ * {@link Conversion} mutation instead.</p>
+ *
+ * <p>For example, to delete a field:</p>
+ *
+ * <pre class="code">
+ *  package my.package;
+ *
+ *  // The old class.  Version 0 is implied.
+ *  //
+ *  {@literal @Entity}
+ *  class Person {
+ *      String name;
+ *      String favoriteColors;
+ *  }
+ *
+ *  // The new class.  A new version number must be assigned.
+ *  //
+ *  {@literal @Entity(version=1)}
+ *  class Person {
+ *      String name;
+ *  }
+ *
+ *  // Add the mutation for deleting a field.
+ *  //
+ *  Mutations mutations = new Mutations();
+ *
+ *  mutations.addDeleter(new Deleter(Person.class.getName(), 0,
+ *                                   "favoriteColors");
+ *
+ *  // Configure the mutations as described {@link Mutations here}.</pre>
+ *
+ * <p>To delete an entity class:</p>
+ *
+ * <pre class="code">
+ *  package my.package;
+ *
+ *  // The old class.  Version 0 is implied.
+ *  //
+ *  {@literal @Entity}
+ *  class Statistics {
+ *      ...
+ *  }
+ *
+ *  // Add the mutation for deleting a class.
+ *  //
+ *  Mutations mutations = new Mutations();
+ *
+ *  mutations.addDeleter(new Deleter("my.package.Statistics", 0));
+ *
+ *  // Configure the mutations as described {@link Mutations here}.</pre>
+ *
+ * @see com.sleepycat.persist.evolve Class Evolution
+ * @author Mark Hayes
+ */
+public class Deleter extends Mutation {
+
+    private static final long serialVersionUID = 446348511871654947L;
+
+    /**
+     * Creates a mutation for deleting an entity class.
+     */
+    public Deleter(String className, int classVersion) {
+        super(className, classVersion, null);
+    }
+
+    /**
+     * Creates a mutation for deleting the given field from all instances of
+     * the given class version.
+     */
+    public Deleter(String declaringClass, int declaringClassVersion,
+                   String fieldName) {
+        super(declaringClass, declaringClassVersion, fieldName);
+    }
+
+    @Override
+    public String toString() {
+        return "[Deleter " + super.toString() + ']';
+    }
+}
diff --git a/src/com/sleepycat/persist/evolve/EntityConverter.java b/src/com/sleepycat/persist/evolve/EntityConverter.java
new file mode 100644
index 0000000000000000000000000000000000000000..2f5fffe6b1d19cb530118664178d90996faffa89
--- /dev/null
+++ b/src/com/sleepycat/persist/evolve/EntityConverter.java
@@ -0,0 +1,88 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EntityConverter.java,v 1.11.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.evolve;
+
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+
+/**
+ * A subclass of Converter that allows specifying keys to be deleted.
+ *
+ * <p>When a Converter is used with an entity class, secondary keys cannot be
+ * automatically deleted based on field deletion, because field Deleter objects
+ * are not used in conjunction with a Converter mutation.  The EntityConverter
+ * can be used instead of a plain Converter to specify the key names to be
+ * deleted.</p>
+ *
+ * <p>It is not currently possible to rename or insert secondary keys when
+ * using a Converter mutation with an entity class.</p>
+ *
+ * @see Converter
+ * @see com.sleepycat.persist.evolve Class Evolution
+ * @author Mark Hayes
+ */
+public class EntityConverter extends Converter {
+
+    private static final long serialVersionUID = -988428985370593743L;
+
+    private Set<String> deletedKeys;
+
+    /**
+     * Creates a mutation for converting all instances of the given entity
+     * class version to the current version of the class.
+     */
+    public EntityConverter(String entityClassName,
+                           int classVersion,
+                           Conversion conversion,
+                           Set<String> deletedKeys) {
+        super(entityClassName, classVersion, null, conversion);
+
+        /* Eclipse objects to assigning with a ternary operator. */
+        if (deletedKeys != null) {
+            this.deletedKeys = new HashSet(deletedKeys);
+        } else {
+            this.deletedKeys = Collections.emptySet();
+        }
+    }
+
+    /**
+     * Returns the set of key names that are to be deleted.
+     */
+    public Set<String> getDeletedKeys() {
+        return Collections.unmodifiableSet(deletedKeys);
+    }
+
+    /**
+     * Returns true if the deleted and renamed keys are equal in this object
+     * and given object, and if the {@link Converter#equals} superclass method
+     * returns true.
+     */
+    @Override
+    public boolean equals(Object other) {
+        if (other instanceof EntityConverter) {
+            EntityConverter o = (EntityConverter) other;
+            return deletedKeys.equals(o.deletedKeys) &&
+                   super.equals(other);
+        } else {
+            return false;
+        }
+    }
+
+    @Override
+    public int hashCode() {
+        return deletedKeys.hashCode() + super.hashCode();
+    }
+
+    @Override
+    public String toString() {
+        return "[EntityConverter " + super.toString() +
+               " DeletedKeys: " + deletedKeys + ']';
+    }
+}
diff --git a/src/com/sleepycat/persist/evolve/EvolveConfig.java b/src/com/sleepycat/persist/evolve/EvolveConfig.java
new file mode 100644
index 0000000000000000000000000000000000000000..ff55502c1bcbd5ddbb15f7aab02d23dca3ae7a5d
--- /dev/null
+++ b/src/com/sleepycat/persist/evolve/EvolveConfig.java
@@ -0,0 +1,76 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EvolveConfig.java,v 1.8.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.evolve;
+
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+
+import com.sleepycat.persist.EntityStore;
+
+/**
+ * Configuration properties for eager conversion of unevolved objects.  This
+ * configuration is used with {@link EntityStore#evolve EntityStore.evolve}.
+ *
+ * @see com.sleepycat.persist.evolve Class Evolution
+ * @author Mark Hayes
+ */
+public class EvolveConfig implements Cloneable {
+
+    private Set<String> classesToEvolve;
+    private EvolveListener listener;
+
+    /**
+     * Creates an evolve configuration with default properties.
+     */
+    public EvolveConfig() {
+        classesToEvolve = new HashSet<String>();
+    }
+
+    /**
+     * Returns a shallow copy of the configuration.
+     */
+    public EvolveConfig cloneConfig() {
+        try {
+            return (EvolveConfig) clone();
+        } catch (CloneNotSupportedException cannotHappen) {
+            return null;
+        }
+    }
+
+    /**
+     * Adds an entity class for a primary index to be converted.  If no classes
+     * are added, all indexes that require evolution will be converted.
+     */
+    public void addClassToEvolve(String entityClass) {
+        classesToEvolve.add(entityClass);
+    }
+
+    /**
+     * Returns an unmodifiable set of the entity classes to be evolved.
+     */
+    public Set<String> getClassesToEvolve() {
+        return Collections.unmodifiableSet(classesToEvolve);
+    }
+
+    /**
+     * Sets a progress listener that is notified each time an entity is read.
+     */
+    public void setEvolveListener(EvolveListener listener) {
+        this.listener = listener;
+    }
+
+    /**
+     * Returns the progress listener that is notified each time an entity is
+     * read.
+     */
+    public EvolveListener getEvolveListener() {
+        return listener;
+    }
+}
diff --git a/src/com/sleepycat/persist/evolve/EvolveEvent.java b/src/com/sleepycat/persist/evolve/EvolveEvent.java
new file mode 100644
index 0000000000000000000000000000000000000000..80734b7699b4d5da554c67bcd392b9fc3f597780
--- /dev/null
+++ b/src/com/sleepycat/persist/evolve/EvolveEvent.java
@@ -0,0 +1,44 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EvolveEvent.java,v 1.5.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.evolve;
+
+/**
+ * The event passed to the EvolveListener interface during eager entity
+ * evolution.
+ *
+ * @see com.sleepycat.persist.evolve Class Evolution
+ * @author Mark Hayes
+ */
+public class EvolveEvent {
+
+    private EvolveStats stats;
+    private String entityClassName;
+
+    EvolveEvent() {
+        this.stats = new EvolveStats();
+    }
+
+    void update(String entityClassName) {
+        this.entityClassName = entityClassName;
+    }
+
+    /**
+     * The cummulative statistics gathered during eager evolution.
+     */
+    public EvolveStats getStats() {
+        return stats;
+    }
+
+    /**
+     * The class name of the current entity class being converted.
+     */
+    public String getEntityClassName() {
+        return entityClassName;
+    }
+}
diff --git a/src/com/sleepycat/persist/evolve/EvolveInternal.java b/src/com/sleepycat/persist/evolve/EvolveInternal.java
new file mode 100644
index 0000000000000000000000000000000000000000..73eae3e82e0ca2116f3cbc22af0eb9e9c6198e1e
--- /dev/null
+++ b/src/com/sleepycat/persist/evolve/EvolveInternal.java
@@ -0,0 +1,38 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EvolveInternal.java,v 1.6.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.evolve;
+
+/**
+ * <!-- begin JE only -->
+ * @hidden
+ * <!-- end JE only -->
+ * Internal access class that should not be used by applications.
+ *
+ * @author Mark Hayes
+ */
+public class EvolveInternal {
+
+    /**
+     * Internal access method that should not be used by applications.
+     */
+    public static EvolveEvent newEvent() {
+        return new EvolveEvent();
+    }
+
+    /**
+     * Internal access method that should not be used by applications.
+     */
+    public static void updateEvent(EvolveEvent event,
+                                   String entityClassName,
+                                   int nRead,
+                                   int nConverted) {
+        event.update(entityClassName);
+        event.getStats().add(nRead, nConverted);
+    }
+}
diff --git a/src/com/sleepycat/persist/evolve/EvolveListener.java b/src/com/sleepycat/persist/evolve/EvolveListener.java
new file mode 100644
index 0000000000000000000000000000000000000000..e529f441aa4c9e334ebd541a764e13f0d5f505b5
--- /dev/null
+++ b/src/com/sleepycat/persist/evolve/EvolveListener.java
@@ -0,0 +1,25 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EvolveListener.java,v 1.9.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.evolve;
+
+/**
+ * The listener interface called during eager entity evolution.
+ *
+ * @see com.sleepycat.persist.evolve Class Evolution
+ * @author Mark Hayes
+ */
+public interface EvolveListener {
+
+    /**
+     * The listener method called during eager entity evolution.
+     *
+     * @return true to continue evolution or false to stop.
+     */
+    boolean evolveProgress(EvolveEvent event);
+}
diff --git a/src/com/sleepycat/persist/evolve/EvolveStats.java b/src/com/sleepycat/persist/evolve/EvolveStats.java
new file mode 100644
index 0000000000000000000000000000000000000000..d6c1043d6439aa82ea72cdaee1911ca2d096b2de
--- /dev/null
+++ b/src/com/sleepycat/persist/evolve/EvolveStats.java
@@ -0,0 +1,43 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EvolveStats.java,v 1.8.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.evolve;
+
+/**
+ * Statistics accumulated during eager entity evolution.
+ *
+ * @see com.sleepycat.persist.evolve Class Evolution
+ * @author Mark Hayes
+ */
+public class EvolveStats {
+
+    private int nRead;
+    private int nConverted;
+
+    EvolveStats() {
+    }
+
+    void add(int nRead, int nConverted) {
+        this.nRead += nRead;
+        this.nConverted += nConverted;
+    }
+
+    /**
+     * The total number of entities read during eager evolution.
+     */
+    public int getNRead() {
+        return nRead;
+    }
+
+    /**
+     * The total number of entities converted during eager evolution.
+     */
+    public int getNConverted() {
+        return nConverted;
+    }
+}
diff --git a/src/com/sleepycat/persist/evolve/IncompatibleClassException.java b/src/com/sleepycat/persist/evolve/IncompatibleClassException.java
new file mode 100644
index 0000000000000000000000000000000000000000..e29178b2fbce99e020a634fbf4959487fd3b480f
--- /dev/null
+++ b/src/com/sleepycat/persist/evolve/IncompatibleClassException.java
@@ -0,0 +1,27 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: IncompatibleClassException.java,v 1.7.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.evolve;
+
+/**
+ * A class has been changed incompatibly and no mutation has been configured to
+ * handle the change or a new class version number has not been assigned.
+ *
+ * @see com.sleepycat.persist.EntityStore#EntityStore EntityStore.EntityStore
+ * @see com.sleepycat.persist.model.Entity#version
+ * @see com.sleepycat.persist.model.Persistent#version
+ *
+ * @see com.sleepycat.persist.evolve Class Evolution
+ * @author Mark Hayes
+ */
+public class IncompatibleClassException extends RuntimeException {
+
+    public IncompatibleClassException(String msg) {
+        super(msg);
+    }
+}
diff --git a/src/com/sleepycat/persist/evolve/Mutation.java b/src/com/sleepycat/persist/evolve/Mutation.java
new file mode 100644
index 0000000000000000000000000000000000000000..ba9655dd398e89dba6fcf0cfe8aa4f7b420a701e
--- /dev/null
+++ b/src/com/sleepycat/persist/evolve/Mutation.java
@@ -0,0 +1,84 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Mutation.java,v 1.9.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.evolve;
+
+import java.io.Serializable;
+
+/**
+ * The base class for all mutations.
+ *
+ * @see com.sleepycat.persist.evolve Class Evolution
+ * @author Mark Hayes
+ */
+public abstract class Mutation implements Serializable {
+
+    private static final long serialVersionUID = -8094431582953129268L;
+
+    private String className;
+    private int classVersion;
+    private String fieldName;
+
+    Mutation(String className, int classVersion, String fieldName) {
+        this.className = className;
+        this.classVersion = classVersion;
+        this.fieldName = fieldName;
+    }
+
+    /**
+     * Returns the class to which this mutation applies.
+     */
+    public String getClassName() {
+        return className;
+    }
+
+    /**
+     * Returns the class version to which this mutation applies.
+     */
+    public int getClassVersion() {
+        return classVersion;
+    }
+
+    /**
+     * Returns the field name to which this mutation applies, or null if this
+     * mutation applies to the class itself.
+     */
+    public String getFieldName() {
+        return fieldName;
+    }
+
+    /**
+     * Returns true if the class name, class version and field name are equal
+     * in this object and given object.
+     */
+    @Override
+    public boolean equals(Object other) {
+        if (other instanceof Mutation) {
+            Mutation o = (Mutation) other;
+            return className.equals(o.className) &&
+                   classVersion == o.classVersion &&
+                   ((fieldName != null) ? fieldName.equals(o.fieldName)
+                                        : (o.fieldName == null));
+        } else {
+            return false;
+        }
+    }
+
+    @Override
+    public int hashCode() {
+        return className.hashCode() +
+               classVersion +
+               ((fieldName != null) ? fieldName.hashCode() : 0);
+    }
+
+    @Override
+    public String toString() {
+        return "Class: " + className + " Version: " + classVersion +
+               ((fieldName != null) ? (" Field: " + fieldName) : "");
+    }
+}
diff --git a/src/com/sleepycat/persist/evolve/Mutations.java b/src/com/sleepycat/persist/evolve/Mutations.java
new file mode 100644
index 0000000000000000000000000000000000000000..06826e5afa818bef1b0f200c12addf375de584fb
--- /dev/null
+++ b/src/com/sleepycat/persist/evolve/Mutations.java
@@ -0,0 +1,201 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Mutations.java,v 1.15.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.evolve;
+
+import java.io.Serializable;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.StoreConfig;
+
+/**
+ * A collection of mutations for configuring class evolution.
+ *
+ * <p>Mutations are configured when a store is opened via {@link
+ * StoreConfig#setMutations StoreConfig.setMutations}.  For example:</p>
+ *
+ * <pre class="code">
+ *  Mutations mutations = new Mutations();
+ *  // Add mutations...
+ *  StoreConfig config = new StoreConfig();
+ *  config.setMutations(mutations);
+ *  EntityStore store = new EntityStore(env, "myStore", config);</pre>
+ *
+ * <p>Mutations cause data conversion to occur lazily as instances are read
+ * from the store.  The {@link EntityStore#evolve EntityStore.evolve} method
+ * may also be used to perform eager conversion.</p>
+ *
+ * <p>Not all incompatible class changes can be handled via mutations.  For
+ * example, complex refactoring may require a transformation that manipulates
+ * multiple entity instances at once.  Such changes are not possible with
+ * mutations but can made by performing a <a
+ * href="package-summary.html#storeConversion">store conversion</a>.</p>
+ *
+ * @see com.sleepycat.persist.evolve Class Evolution
+ * @author Mark Hayes
+ */
+public class Mutations implements Serializable {
+
+    private static final long serialVersionUID = -1744401530444812916L;
+
+    private Map<Mutation,Renamer> renamers;
+    private Map<Mutation,Deleter> deleters;
+    private Map<Mutation,Converter> converters;
+
+    /**
+     * Creates an empty set of mutations.
+     */
+    public Mutations() {
+        renamers = new HashMap<Mutation,Renamer>();
+        deleters = new HashMap<Mutation,Deleter>();
+        converters = new HashMap<Mutation,Converter>();
+    }
+
+    /**
+     * Returns true if no mutations are present.
+     */
+    public boolean isEmpty() {
+        return renamers.isEmpty() &&
+               deleters.isEmpty() &&
+               converters.isEmpty();
+    }
+
+    /**
+     * Adds a renamer mutation.
+     */
+    public void addRenamer(Renamer renamer) {
+        renamers.put(new Key(renamer), renamer);
+    }
+
+    /**
+     * Returns the renamer mutation for the given class, version and field, or
+     * null if none exists.  A null field name should be specified to get a
+     * class renamer.
+     */
+    public Renamer getRenamer(String className,
+                              int classVersion,
+                              String fieldName) {
+        return renamers.get(new Key(className, classVersion, fieldName));
+    }
+
+    /**
+     * Returns an unmodifiable collection of all renamer mutations.
+     */
+    public Collection<Renamer> getRenamers() {
+        return renamers.values();
+    }
+
+    /**
+     * Adds a deleter mutation.
+     */
+    public void addDeleter(Deleter deleter) {
+        deleters.put(new Key(deleter), deleter);
+    }
+
+    /**
+     * Returns the deleter mutation for the given class, version and field, or
+     * null if none exists.  A null field name should be specified to get a
+     * class deleter.
+     */
+    public Deleter getDeleter(String className,
+                              int classVersion,
+                              String fieldName) {
+        return deleters.get(new Key(className, classVersion, fieldName));
+    }
+
+    /**
+     * Returns an unmodifiable collection of all deleter mutations.
+     */
+    public Collection<Deleter> getDeleters() {
+        return deleters.values();
+    }
+
+    /**
+     * Adds a converter mutation.
+     */
+    public void addConverter(Converter converter) {
+        converters.put(new Key(converter), converter);
+    }
+
+    /**
+     * Returns the converter mutation for the given class, version and field,
+     * or null if none exists.  A null field name should be specified to get a
+     * class converter.
+     */
+    public Converter getConverter(String className,
+                                  int classVersion,
+                                  String fieldName) {
+        return converters.get(new Key(className, classVersion, fieldName));
+    }
+
+    /**
+     * Returns an unmodifiable collection of all converter mutations.
+     */
+    public Collection<Converter> getConverters() {
+        return converters.values();
+    }
+
+    private static class Key extends Mutation {
+        static final long serialVersionUID = 2793516787097085621L;
+
+        Key(String className, int classVersion, String fieldName) {
+            super(className, classVersion, fieldName);
+        }
+
+        Key(Mutation mutation) {
+            super(mutation.getClassName(),
+                  mutation.getClassVersion(),
+                  mutation.getFieldName());
+        }
+    }
+
+    /**
+     * Returns true if this collection has the same set of mutations as the
+     * given collection and all mutations are equal.
+     */
+    @Override
+    public boolean equals(Object other) {
+        if (other instanceof Mutations) {
+            Mutations o = (Mutations) other;
+            return renamers.equals(o.renamers) &&
+                   deleters.equals(o.deleters) &&
+                   converters.equals(o.converters);
+        } else {
+            return false;
+        }
+    }
+
+    @Override
+    public int hashCode() {
+        return renamers.hashCode() +
+               deleters.hashCode() +
+               converters.hashCode();
+    }
+
+    @Override
+    public String toString() {
+        StringBuffer buf = new StringBuffer();
+        if (renamers.size() > 0) {
+            buf.append(renamers.values());
+        }
+        if (deleters.size() > 0) {
+            buf.append(deleters.values());
+        }
+        if (converters.size() > 0) {
+            buf.append(converters.values());
+        }
+        if (buf.length() > 0) {
+            return buf.toString();
+        } else {
+            return "[Empty Mutations]";
+        }
+    }
+}
diff --git a/src/com/sleepycat/persist/evolve/Renamer.java b/src/com/sleepycat/persist/evolve/Renamer.java
new file mode 100644
index 0000000000000000000000000000000000000000..a0df83ae57fec69bc3aa774cc328d45bb40ed449
--- /dev/null
+++ b/src/com/sleepycat/persist/evolve/Renamer.java
@@ -0,0 +1,103 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Renamer.java,v 1.10.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.evolve;
+
+/**
+ * A mutation for renaming a class or field without changing the instance or
+ * field value.  For example:
+ * <pre class="code">
+ *  package my.package;
+ *
+ *  // The old class.  Version 0 is implied.
+ *  //
+ *  {@literal @Entity}
+ *  class Person {
+ *      String name;
+ *  }
+ *
+ *  // The new class.  A new version number must be assigned.
+ *  //
+ *  {@literal @Entity(version=1)}
+ *  class Human {
+ *      String fullName;
+ *  }
+ *
+ *  // Add the mutations.
+ *  //
+ *  Mutations mutations = new Mutations();
+ *
+ *  mutations.addRenamer(new Renamer("my.package.Person", 0,
+ *                                   Human.class.getName()));
+ *
+ *  mutations.addRenamer(new Renamer("my.package.Person", 0,
+ *                                   "name", "fullName"));
+ *
+ *  // Configure the mutations as described {@link Mutations here}.</pre>
+ *
+ * @see com.sleepycat.persist.evolve Class Evolution
+ * @author Mark Hayes
+ */
+public class Renamer extends Mutation {
+
+    private static final long serialVersionUID = 2238151684405810427L;
+
+    private String newName;
+
+    /**
+     * Creates a mutation for renaming the class of all instances of the given
+     * class version.
+     */
+    public Renamer(String fromClass, int fromVersion, String toClass) {
+        super(fromClass, fromVersion, null);
+        newName = toClass;
+    }
+
+    /**
+     * Creates a mutation for renaming the given field for all instances of the
+     * given class version.
+     */
+    public Renamer(String declaringClass, int declaringClassVersion,
+                   String fromField, String toField) {
+        super(declaringClass, declaringClassVersion, fromField);
+        newName = toField;
+    }
+
+    /**
+     * Returns the new class or field name specified in the constructor.
+     */
+    public String getNewName() {
+        return newName;
+    }
+
+    /**
+     * Returns true if the new class name is equal in this object and given
+     * object, and if the {@link Mutation#equals} method returns true.
+     */
+    @Override
+    public boolean equals(Object other) {
+        if (other instanceof Renamer) {
+            Renamer o = (Renamer) other;
+            return newName.equals(o.newName) &&
+                   super.equals(other);
+        } else {
+            return false;
+        }
+    }
+
+    @Override
+    public int hashCode() {
+        return newName.hashCode() + super.hashCode();
+    }
+
+    @Override
+    public String toString() {
+        return "[Renamer " + super.toString() +
+               " NewName: " + newName + ']';
+    }
+}
diff --git a/src/com/sleepycat/persist/evolve/package.html b/src/com/sleepycat/persist/evolve/package.html
new file mode 100644
index 0000000000000000000000000000000000000000..69fd90c600337c3606e2382707a4df46ea1289cd
--- /dev/null
+++ b/src/com/sleepycat/persist/evolve/package.html
@@ -0,0 +1,306 @@
+<!-- $Id: package.html,v 1.8 2008/06/06 19:23:03 mark Exp $ -->
+<html>
+<body>
+Utilities for managing class evolution of persistent objects.
+
+<h1>Class Evolution</h1>
+
+<p>For persistent data that is not short lived, changes to persistent classes
+are almost inevitable.  Some changes are compatible with existing types, and
+data conversion for these changes is performed automatically and transparently.
+Other changes are not compatible with existing types.  Mutations can be used to
+explicitly manage many types of incompatible changes.</p>
+
+<p>Not all incompatible class changes can be handled via mutations.  For
+example, complex refactoring may require a transformation that manipulates
+multiple entity instances at once.  Such changes are not possible with
+mutations but can be made by performing a <a href="#storeConversion">store
+conversion</a>.</p>
+
+<p>The different categories of type changes are described below.</p>
+
+<h2>Key Field Changes</h2>
+
+<p>Unlike entity data, key data is not versioned.  Therefore, the physical key
+format for an index is fixed once the index has been opened, and the changes
+allowed for key fields are very limited.  The only changes allowed for key
+fields are:</p>
+<ul>
+<li>The name of a key field may be changed, as long as this change is
+accompanied by a {@link com.sleepycat.persist.evolve.Renamer} mutation.</li>
+<li>A primitive type may be changed to its corresponding primitive wrapper
+type.  This is a compatible change.</li>
+<li>For primary key fields and fields of a composite key class, a primitive
+wrapper type may be changed to its corresponding primitive type.  This is
+allowed because these key fields with reference types may never have null
+values.  This is a compatible change.</li>
+</ul>
+
+<p>Any other changes to a key field are incompatible and may be made only by
+performing a <a href="#storeConversion">store conversion</a>.</p>
+
+<p>Key ordering, including the behavior of a custom {@link
+java.lang.Comparable}, is also fixed, since keys are stored in order in the
+index.  The specifications for key ordering may not be changed, and the
+developer is responsible for not changing the behavior of a {@code Comparable}
+key class.  <strong>WARNING:</strong>: Changing the behavior of a {@code
+Comparable} key class is likely to make the index unusable.</p>
+
+<h2>Compatible Type Changes</h2>
+
+<p>Entity data, unlike key data, is versioned.  Therefore, some changes can be
+made compatibly and other changes can be handled via mutations.  Compatible
+changes are defined below.  To make a compatible class change, a mutation is
+not required; however, the class version must be assigned a new (greater)
+integer value.</p>
+
+<p>Changes to a class hierarchy are compatible in some cases.  A new class may
+be inserted in the hierarchy.  A class may be deleted from the hierarchy as
+long as one of the following is true: 1) it contains no persistent fields, 2)
+any persistent fields are deleted with field Deleter mutations, or 3) the class
+is deleted with a class Deleter mutation.  Classes in an existing hierarchy may
+not be reordered compatibly, and fields may not moved from one class to another
+compatibly; for such changes a class Converter mutation is required.</p>
+
+<p>Changes to field types in entity class definitions are compatible when they
+conform to the Java Language Specification definitions for <a
+href="http://java.sun.com/docs/books/jls/third_edition/html/conversions.html#5.1.2">Widening
+Primitive Conversions</a> and <a
+href="http://java.sun.com/docs/books/jls/third_edition/html/conversions.html#5.1.5">Widening
+Reference Conversions</a>.  For example, a smaller integer
+type may be changed to a larger integer type, and a reference type may be
+changed to one of its supertypes.  Automatic widening conversions are performed
+as described in the Java Language Specification.</p>
+
+<p>Primitive types may also be compatibly changed to their corresponding
+primitive wrapper types, or to the wrapper type for a widened primitive type.
+However, changing from a primitive wrapper type to a primitive type is not a
+compatible change since existing null values could not be represented.</p>
+
+<p>Integer primitive types (byte, short, char, int, long) and their primitive
+wrapper types may be compatibly changed to the BigInteger type.</p>
+
+<p>In addition, adding fields to a class is a compatible change.  When a
+persistent instance of a class is read that does not contain the new field, the
+new field is initialized by the default constructor.</p>
+
+<p>All other changes to instance fields are considered incompatible.
+Incompatible changes may be handled via mutations, as described next.</p>
+
+<p>Note that whenever a class is changed, either compatibly or incompatibly, a
+new (higher) class version number must be assigned.  See {@link
+com.sleepycat.persist.model.Entity#version} and {@link
+com.sleepycat.persist.model.Persistent#version} for information on assigning
+class version numbers.</p>
+
+<h2>Mutations</h2>
+
+<p>There are three types of mutations: {@link
+com.sleepycat.persist.evolve.Renamer}, {@link
+com.sleepycat.persist.evolve.Deleter} and {@link
+com.sleepycat.persist.evolve.Converter}.</p>
+
+<p>A class or field can be renamed using a {@link
+com.sleepycat.persist.evolve.Renamer}.  Renaming is not expensive, since it
+does not involve conversion of instance data.</p>
+
+<p>A class or field can be deleted using a {@link
+com.sleepycat.persist.evolve.Deleter}.</p>
+<ul>
+<li>Deleting an entity class causes removal of the primary and secondary
+indices for the store, on other words, removal of all store entities for that
+class and its subclasses.  Removal is performed when the store is opened.  A
+{@link com.sleepycat.persist.evolve.Deleter} should be used for an entity class
+in all of the following circumstances:
+  <ul>
+  <li>When removing the entity class itself.</li>
+  <li>When removing {@link com.sleepycat.persist.model.Entity} from the class
+  to make it non-persistent.</li>
+  <li>When removing {@link com.sleepycat.persist.model.Entity} from the class
+  and adding {@link com.sleepycat.persist.model.Persistent}, to use it as an
+  embedded persistent class but not an entity class.  The version of the class
+  must be incremented in this case.</li>
+  </ul>
+</li>
+
+<li>Deleting a non-entity class does not itself cause deletion of instance
+data, but is needed to inform DPL that the deleted class will not be used.
+Instances of the deleted class must be handled (discarded or converted to
+another class) by {@link com.sleepycat.persist.evolve.Deleter} or {@link
+com.sleepycat.persist.evolve.Converter} mutations for the field or enclosing
+class that contain embedded instances of the deleted class.  A {@link
+com.sleepycat.persist.evolve.Deleter} should be used for a non-entity class in
+all of the following circumstances:
+  <ul>
+  <li>When removing the persistent class itself.</li>
+  <li>When removing {@link com.sleepycat.persist.model.Persistent} from the
+  class to make it non-persistent.</li>
+  <li>When removing {@link com.sleepycat.persist.model.Persistent} from the
+  class and adding {@link com.sleepycat.persist.model.Entity}, to use it as an
+  entity class but not an embedded persistent class.  The version of the class
+  must be incremented in this case.</li>
+  </ul>
+</li>
+
+<li>Deleting a field causes automatic conversion of the instances containing
+that field, in order to discard the field values.</li>
+</ul>
+
+<p>Other incompatible changes are handled by creating a {@link
+com.sleepycat.persist.evolve.Converter} mutation and implementing a {@link
+com.sleepycat.persist.evolve.Conversion#convert Conversion.convert} method that
+manipulates the raw objects and/or simple values directly.  The {@code convert}
+method is passed an object of the old incompatible type and it returns an
+object of a current type.</p>
+
+<p>Conversions can be specified in two ways: for specific fields or for all
+instances of a class.  A different {@link
+com.sleepycat.persist.evolve.Converter} constructor is used in each case.
+Field-specific conversions are used instead of class conversions when both are
+applicable.</p>
+
+<p>Note that each mutation is applied to a specific class version number.  The
+class version must be explicitly specified in a mutation for two reasons:</p>
+<ol>
+<li>This provides safety in the face of multiple unconverted versions of a
+given type.  Without a version, a single conversion method would have to handle
+multiple input types, and would have to distinguish between them by examining
+the data or type information.</li>
+<li>This allows arbitrary changes to be made.  For example, a series of name
+changes may reuse a given name for more than one version.  To identify the
+specific type being converted or renamed, a version number is needed.</li>
+</ol>
+<p>See {@link com.sleepycat.persist.model.Entity#version} and {@link
+com.sleepycat.persist.model.Persistent#version} for information on assigning
+class version numbers.</p>
+
+<p>Mutations are therefore responsible for converting each existing
+incompatible class version to the current version as defined by a current class
+definition.  For example, consider that class-version A-1 is initially changed
+to A-2 and a mutation is added for converting A-1 to A-2.  If later changes in
+version A-3 occur before converting all A-1 instances to version A-2, the
+converter for A-1 will have to be changed.  Instead of converting from A-1 to
+A-2 it will need to convert from A-1 to A-3.  In addition, a mutation
+converting A-2 to A-3 will be needed.</p>
+
+<p>When a {@link com.sleepycat.persist.evolve.Converter} mutation applies to a
+given object, other mutations that may apply to that object are not
+automatically performed.  It is the responsibility of the {@link
+com.sleepycat.persist.evolve.Converter} to return an object that conforms to
+the current class definition, including renaming fields and classes.  If the
+input object has nested objects or superclasses that also need conversion, the
+converter must perform these nested conversions before returning the final
+converted object.  This rule avoids the complexity and potential errors that
+could result if a converter mutation were automatically combined with other
+mutations in an arbitrary manner.</p>
+
+<p>The {@link com.sleepycat.persist.EntityStore#evolve EntityStore.evolve}
+method may optionally be used to ensure that all instances of an old class
+version are converted to the current version.</p>
+
+<h2>Other Metadata Changes</h2>
+
+<p>When a class that happens to be an entity class is renamed, it remains an
+entity class.  When a field that happens to be a primary or
+secondary key field is renamed, its metadata remains intact as well.</p>
+
+<p>When the {@link com.sleepycat.persist.model.SecondaryKey} annotation is
+added to an <em>existing</em> field, a new index is created automatically.  The
+new index will be populated by reading the entire primary index when the
+primary index is opened.</p>
+
+<p>When the {@link com.sleepycat.persist.model.SecondaryKey} annotation is
+included with a <em>new</em> field, a new index is created automatically.  The
+new field is required to be a reference type (not a primitive) and must be
+initialized to null (the default behavior) in the default constructor.
+Entities will be indexed by the field when they are stored with a non-null key
+value.</p>
+
+<p>When a field with the {@link com.sleepycat.persist.model.SecondaryKey}
+annotation is deleted, or when the {@link
+com.sleepycat.persist.model.SecondaryKey} annotation is removed from a field
+without deleting it, the secondary index is removed (dropped).  Removal occurs
+when the store is opened.</p>
+
+<p>The {@link com.sleepycat.persist.model.SecondaryKey#relate
+SecondaryKey.relate} property may NOT be changed.  All other properties of a
+{@link com.sleepycat.persist.model.SecondaryKey} may be changed, although
+avoiding changes that cause foreign key integrity errors is the responsibility
+of the application developer.  For example, if the {@link
+com.sleepycat.persist.model.SecondaryKey#relatedEntity} property is added but
+not all existing secondary keys reference existing primary keys for the related
+entity, foreign key integrity errors may occur.</p>
+
+<p>The {@link com.sleepycat.persist.model.PrimaryKey} annotation may NOT be
+removed from a field in an entity class.</p>
+
+<p>The {@link com.sleepycat.persist.model.PrimaryKey#sequence} property may be
+added, removed, or changed to a different name.</p>
+
+<p>The {@link com.sleepycat.persist.model.Persistent#proxyFor} property may NOT
+be added, removed, or changed to a different class.</p>
+
+<h2>Warnings on Testing and Backups</h2>
+
+<p>The application developer is responsible for verifying that class evolution
+works properly before deploying with a changed set of persistent classes.  The
+DPL will report errors when old class definitions cannot be evolved, for
+example, when a mutation is missing.  To test that no such errors will occur,
+application test cases must include instances of all persistent classes.</p>
+
+<p>Converter mutations require special testing.  Since the application
+conversion method is allowed to return instances of any type, the DPL cannot
+check that the proper type is returned until the data is accessed.  To avoid
+data access errors, application test cases must cover converter mutations for
+all potential input and output types.</p>
+
+<p>When secondary keys are dropped or entity classes are deleted, the
+underlying databases are deleted and cannot be recovered from the store.  This
+takes place when the store is opened.  It is strongly recommended that a backup
+of the entire store is made before opening the store and causing class
+evolution to proceed.</p>
+
+<h2><a name="storeConversion">Store Conversion<a/></h2>
+
+<p>When mutations are not sufficient for handling class changes, a full store
+conversion may be performed.  This is necessary for two particular types of
+class changes:</p>
+<ul>
+<li>A change to a physical key format, for example, a change from type
+{@code int} to type {@code long}.</li>
+<li>A conversion that involves multiple entities at once, for example,
+combining two separate entity classes into a new single entity class.</li>
+</ul>
+
+<p>To perform a full store conversion, a program is written that performs the
+following steps to copy the data from the old store to a new converted
+store:</p>
+<ol>
+<li>The old store is opened as a {@link com.sleepycat.persist.raw.RawStore} and
+the new store is opened as an {@link com.sleepycat.persist.EntityStore}.</li>
+<li>All entities are read from the old store.  Entities are read using a {@link
+com.sleepycat.persist.raw.RawStore} to allow access to entities for which no
+compatible class exists.</li>
+<li>The {@link com.sleepycat.persist.raw.RawObject} entities are then converted
+to the format desired.  Raw objects can be arbitrarily manipulated as needed.
+The updated raw objects must conform to the new evolved class definitions.</li>
+<li>The updated raw entities are converted to live objects by calling the
+{@link com.sleepycat.persist.model.EntityModel#convertRawObject
+EntityModel.convertRawObject} method of the new store.  This method converts
+raw objects obtained from a different store, as long as they conform to the new
+evolved class definitions.</li>
+<li>The new live objects are written to the new {@link
+com.sleepycat.persist.EntityStore} using a {@link
+com.sleepycat.persist.PrimaryIndex} as usual.</li>
+</ol>
+
+<p>To perform such a conversion, two separate stores must be open at once.
+Both stores may be in the same {@link com.sleepycat.je.Environment}, if
+desired, by giving them different store names.  But since all data is being
+rewritten, there are performance advantages to creating the new store in a new
+fresh environment: the data will be compacted as it is written, and the old
+store can be removed very quickly by deleting the old environment directory
+after the conversion is complete.</p>
+
+</body>
+</html>
diff --git a/src/com/sleepycat/persist/impl/AbstractInput.java b/src/com/sleepycat/persist/impl/AbstractInput.java
new file mode 100644
index 0000000000000000000000000000000000000000..37481023c7a55ae773b5f6c980733d6a9a79d3c5
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/AbstractInput.java
@@ -0,0 +1,40 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: AbstractInput.java,v 1.5.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+/**
+ * Base class for EntityInput implementations.  RecordInput cannot use this
+ * base class because it extends TupleInput, so it repeats the code here.
+ *
+ * @author Mark Hayes
+ */
+abstract class AbstractInput implements EntityInput {
+
+    Catalog catalog;
+    boolean rawAccess;
+
+    AbstractInput(Catalog catalog, boolean rawAccess) {
+        this.catalog = catalog;
+        this.rawAccess = rawAccess;
+    }
+
+    public Catalog getCatalog() {
+        return catalog;
+    }
+
+    public boolean isRawAccess() {
+        return rawAccess;
+    }
+
+    public boolean setRawAccess(boolean rawAccessParam) {
+        boolean original = rawAccess;
+        rawAccess = rawAccessParam;
+        return original;
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/Accessor.java b/src/com/sleepycat/persist/impl/Accessor.java
new file mode 100644
index 0000000000000000000000000000000000000000..6a0815b06b1f930c7ffb57c4fb1e7d08186473fa
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/Accessor.java
@@ -0,0 +1,227 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Accessor.java,v 1.13.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+/**
+ * Field binding operations implemented via reflection (ReflectionAccessor) or
+ * bytecode enhancement (EnhancedAccessor).
+ *
+ * <p>Normally we read the set of all secondary key fields first and then the
+ * set of all non-key fields, reading each set in order of field name.  But
+ * when reading an old format record we must account for the following
+ * class evolution conversions:</p>
+ * <ul>
+ * <li>Convert a field: pass value thru converter</li>
+ * <li>Widen a field type: pass value thru widener</li>
+ * <li>Add a field: don't read the new field</li>
+ * <li>Delete a field: skip the deleted field</li>
+ * <li>Rename a field: read field in a different order</li>
+ * </ul>
+ * <p>To support these operations, the methods for reading fields allow reading
+ * specific ranges of fields as well as all fields.  For example, all fields
+ * up to a deleted field could be read, and then all fields from the following
+ * field onward.</p>
+ *
+ * @author Mark Hayes
+ */
+interface Accessor {
+
+    /**
+     * A large field value to use instead of Integer.MAX_VALUE, to work around
+     * Java JIT compiler bug when doing an (X <= Integer.MAX_VALUE) as would be
+     * done in readXxxKeyFields methods.
+     */
+    final int MAX_FIELD_NUM = Integer.MAX_VALUE - 1;
+
+    /**
+     * Creates a new instance of the target class using its default
+     * constructor.
+     */
+    Object newInstance();
+
+    /**
+     * Creates a new one dimensional array of the given length, having the
+     * target class as its component type.
+     *
+     * <p>Using a special method for a one dimensional array, which can be
+     * implemented by bytecode generation, is a compromise.  We use reflection
+     * to create multidimensional arrays.  We could in the future generate code
+     * to create arrays as they are encountered, if there is a need to avoid
+     * reflection for multidimensional arrays.</p>
+     */
+    Object newArray(int len);
+
+    /**
+     * Returns whether the primary key field is null (for a reference type) or
+     * zero (for a primitive integer type).  Null and zero are used as an
+     * indication that the key should be assigned from a sequence.
+     */
+    boolean isPriKeyFieldNullOrZero(Object o);
+
+    /**
+     * Writes the primary key field value to the given EntityOutput.
+     *
+     * <p>To write a primary key with a reference type, this method must call
+     * EntityOutput.writeKeyObject.</p>
+     *
+     * @param o is the object whose primary key field is to be written.
+     *
+     * @param output the output data to write to.
+     */
+    void writePriKeyField(Object o, EntityOutput output);
+
+    /**
+     * Reads the primary key field value from the given EntityInput.
+     *
+     * <p>To read a primary key with a reference type, this method must call
+     * EntityInput.readKeyObject.</p>
+     *
+     * @param o is the object whose primary key field is to be read.
+     *
+     * @param input the input data to read from.
+     */
+    void readPriKeyField(Object o, EntityInput input);
+
+    /**
+     * Writes all secondary key field values to the given EntityOutput,
+     * writing fields in super classes first and in name order within class.
+     *
+     * @param o is the object whose secondary key fields are to be written.
+     *
+     * <p>If the primary key has a reference type, this method must call
+     * EntityOutput.registerPriKeyObject before writing any other fields.</p>
+     *
+     * @param output the output data to write to.
+     */
+    void writeSecKeyFields(Object o, EntityOutput output);
+
+    /**
+     * Reads a range of secondary key field values from the given EntityInput,
+     * reading fields in super classes first and in name order within class.
+     *
+     * <p>If the primary key has a reference type, this method must call
+     * EntityInput.registerPriKeyObject before reading any other fields.</p>
+     *
+     * <p>To read all fields, pass -1 for superLevel, zero for startField and
+     * MAX_FIELD_NUM for endField.  Fields from super classes are read
+     * first.</p>
+     *
+     * <p>To read a specific range of fields, pass a non-negative number for
+     * superLevel and the specific indices of the field range to be read in the
+     * class at that level.</p>
+     *
+     * @param o is the object whose secondary key fields are to be read.
+     *
+     * @param input the input data to read from.
+     *
+     * @param startField the starting field index in the range of fields to
+     * read.  To read all fields, the startField should be zero.
+     *
+     * @param endField the ending field index in the range of fields to read.
+     * To read all fields, the endField should be MAX_FIELD_NUM.
+     *
+     * @param superLevel is a non-negative number to read the fields of the
+     * class that is the Nth super instance; or a negative number to read
+     * fields in all classes.
+     */
+    void readSecKeyFields(Object o,
+                          EntityInput input,
+                          int startField,
+                          int endField,
+                          int superLevel);
+
+    /**
+     * Writes all non-key field values to the given EntityOutput, writing
+     * fields in super classes first and in name order within class.
+     *
+     * @param o is the object whose non-key fields are to be written.
+     *
+     * @param output the output data to write to.
+     */
+    void writeNonKeyFields(Object o, EntityOutput output);
+
+    /**
+     * Reads a range of non-key field values from the given EntityInput,
+     * reading fields in super classes first and in name order within class.
+     *
+     * <p>To read all fields, pass -1 for superLevel, zero for startField and
+     * MAX_FIELD_NUM for endField.  Fields from super classes are read
+     * first.</p>
+     *
+     * <p>To read a specific range of fields, pass a non-negative number for
+     * superLevel and the specific indices of the field range to be read in the
+     * class at that level.</p>
+     *
+     * @param o is the object whose non-key fields are to be read.
+     *
+     * @param input the input data to read from.
+     *
+     * @param startField the starting field index in the range of fields to
+     * read.  To read all fields, the startField should be zero.
+     *
+     * @param endField the ending field index in the range of fields to read.
+     * To read all fields, the endField should be MAX_FIELD_NUM.
+     *
+     * @param superLevel is a non-negative number to read the fields of the
+     * class that is the Nth super instance; or a negative number to read
+     * fields in all classes.
+     */
+    void readNonKeyFields(Object o,
+                          EntityInput input,
+                          int startField,
+                          int endField,
+                          int superLevel);
+
+    /**
+     * Returns the value of a given field, representing primitives as primitive
+     * wrapper objects.
+     *
+     * @param o is the object containing the key field.
+     *
+     * @param field is the field index.
+     *
+     * @param superLevel is a positive number to identify the field of the
+     * class that is the Nth super instance; or zero to identify the field in
+     * this class.
+     *
+     * @param isSecField is true for a secondary key field or false for a
+     * non-key field.
+     *
+     * @return the current field value, or null for a reference type field
+     * that is null.
+     */
+    Object getField(Object o,
+                    int field,
+                    int superLevel,
+                    boolean isSecField);
+
+    /**
+     * Changes the value of a given field, representing primitives as primitive
+     * wrapper objects.
+     *
+     * @param o is the object containing the key field.
+     *
+     * @param field is the field index.
+     *
+     * @param superLevel is a positive number to identify the field of the
+     * class that is the Nth super instance; or zero to identify the field in
+     * this class.
+     *
+     * @param isSecField is true for a secondary key field or false for a
+     * non-key field.
+     *
+     * @param value is the new value of the field, or null to set a reference
+     * type field to null.
+     */
+    void setField(Object o,
+                  int field,
+                  int superLevel,
+                  boolean isSecField,
+                  Object value);
+}
diff --git a/src/com/sleepycat/persist/impl/Catalog.java b/src/com/sleepycat/persist/impl/Catalog.java
new file mode 100644
index 0000000000000000000000000000000000000000..2d78efcc4bc1c6942269b17849398adf4e6a5633
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/Catalog.java
@@ -0,0 +1,98 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Catalog.java,v 1.15.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.util.IdentityHashMap;
+import java.util.Map;
+
+import com.sleepycat.persist.raw.RawObject;
+
+/**
+ * Catalog operation interface used by format classes.
+ *
+ * @see PersistCatalog
+ * @see SimpleCatalog
+ * @see ReadOnlyCatalog
+ *
+ * @author Mark Hayes
+ */
+interface Catalog {
+
+    /*
+     * The catalog version is returned by getInitVersion and is the version of
+     * the serialized format classes loaded from the stored catalog.  When a
+     * field is added, for example, the version can be checked to determine how
+     * to initialize the field in Format.initialize.
+     *
+     * -1: The version is considered to be -1 when reading the beta version of
+     * the catalog data.  At this point no version field was stored, but we can
+     * distinguish the beta stored format.  See PersistCatalog.
+     *
+     * 0: The first released version of the catalog data, after beta.  At this
+     * point no version field was stored, but it is initialized to zero when
+     * the PersistCatalog.Data object is de-serialized.
+     *
+     * 1: Add the ComplexFormat.ConvertFieldReader.oldFieldNum field. [#15797]
+     */
+    static final int BETA_VERSION = -1;
+    static final int CURRENT_VERSION = 1;
+
+    /**
+     * See above.
+     */
+    int getInitVersion(Format format, boolean forReader);
+
+    /**
+     * Returns a format for a given ID, or throws an exception.  This method is
+     * used when reading an object from the byte array format.
+     *
+     * @throws IllegalStateException if the formatId does not correspond to a
+     * persistent class.  This is an internal consistency error.
+     */
+    Format getFormat(int formatId);
+
+    /**
+     * Returns a format for a given class, or throws an exception.  This method
+     * is used when writing an object that was passed in by the user.
+     *
+     * @param openEntitySubclassIndexes is true if we're expecting this format
+     * to be an entity subclass and therefore subclass secondary indexes should
+     * be opened.
+     *
+     * @throws IllegalArgumentException if the class is not persistent.  This
+     * is a user error.
+     */
+    Format getFormat(Class cls, boolean openEntitySubclassIndexes);
+
+    /**
+     * Returns a format by class name.  Unlike {@link #getFormat(Class)}, the
+     * format will not be created if it is not already known.
+     */
+    Format getFormat(String className);
+
+    /**
+     * @see PersistCatalog#createFormat
+     */
+    Format createFormat(String clsName, Map<String,Format> newFormats);
+
+    /**
+     * @see PersistCatalog#createFormat
+     */
+    Format createFormat(Class type, Map<String,Format> newFormats);
+
+    /**
+     * @see PersistCatalog#isRawAccess
+     */
+    boolean isRawAccess();
+
+    /**
+     * @see PersistCatalog#convertRawObject
+     */
+    Object convertRawObject(RawObject o, IdentityHashMap converted);
+}
diff --git a/src/com/sleepycat/persist/impl/CollectionProxy.java b/src/com/sleepycat/persist/impl/CollectionProxy.java
new file mode 100644
index 0000000000000000000000000000000000000000..6919856b1aab0eb9b68e3687ef10fbd6ed219de3
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/CollectionProxy.java
@@ -0,0 +1,164 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: CollectionProxy.java,v 1.11.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
+
+import com.sleepycat.bind.tuple.TupleBase;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.persist.model.Persistent;
+import com.sleepycat.persist.model.PersistentProxy;
+import com.sleepycat.persist.raw.RawObject;
+
+/**
+ * Proxy for Collection types.
+ *
+ * @author Mark Hayes
+ */
+@Persistent
+abstract class CollectionProxy<E>
+    implements PersistentProxy<Collection<E>> {
+
+    private E[] elements;
+
+    protected CollectionProxy() {}
+
+    public final void initializeProxy(Collection<E> collection) {
+        elements = (E[]) new Object[collection.size()];
+        int i = 0;
+        for (E element : collection) {
+            elements[i] = element;
+            i += 1;
+        }
+    }
+
+    public final Collection<E> convertProxy() {
+        Collection<E> collection = newInstance(elements.length);
+        for (E element : elements) {
+            collection.add(element);
+        }
+        return collection;
+    }
+
+    protected abstract Collection<E> newInstance(int size);
+
+    @Persistent(proxyFor=ArrayList.class)
+    static class ArrayListProxy<E> extends CollectionProxy<E> {
+
+        protected ArrayListProxy() {}
+
+        protected Collection<E> newInstance(int size) {
+            return new ArrayList<E>(size);
+        }
+    }
+
+    @Persistent(proxyFor=LinkedList.class)
+    static class LinkedListProxy<E> extends CollectionProxy<E> {
+
+        protected LinkedListProxy() {}
+
+        protected Collection<E> newInstance(int size) {
+            return new LinkedList<E>();
+        }
+    }
+
+    @Persistent(proxyFor=HashSet.class)
+    static class HashSetProxy<E> extends CollectionProxy<E> {
+
+        protected HashSetProxy() {}
+
+        protected Collection<E> newInstance(int size) {
+            return new HashSet<E>(size);
+        }
+    }
+
+    @Persistent(proxyFor=TreeSet.class)
+    static class TreeSetProxy<E> extends CollectionProxy<E> {
+
+        protected TreeSetProxy() {}
+
+        protected Collection<E> newInstance(int size) {
+            return new TreeSet<E>();
+        }
+    }
+
+    static Object[] getElements(RawObject collection) {
+        Object value = null;
+        while (value == null && collection != null) {
+            Map<String,Object> values = collection.getValues();
+            if (values != null) {
+                value = values.get("elements");
+                if (value == null) {
+                    collection = collection.getSuper();
+                }
+            }
+        }
+        if (value == null || !(value instanceof RawObject)) {
+            throw new IllegalStateException
+                ("Collection proxy for a secondary key field must " +
+                 "contain a field named 'elements'");
+        }
+        RawObject rawObj = (RawObject) value;
+        Format format = (Format) rawObj.getType();
+        if (!format.isArray() ||
+            format.getComponentType().getId() != Format.ID_OBJECT) {
+            throw new IllegalStateException
+                ("Collection proxy 'elements' field must be an Object array");
+        }
+        return rawObj.getElements();
+    }
+
+    static void setElements(RawObject collection, Object[] elements) {
+        RawObject value = null;
+        while (value == null && collection != null) {
+            Map<String,Object> values = collection.getValues();
+            if (values != null) {
+                value = (RawObject) values.get("elements");
+                if (value != null) {
+                    values.put("elements",
+                               new RawObject(value.getType(), elements));
+                } else {
+                    collection = collection.getSuper();
+                }
+            }
+        }
+        if (value == null) {
+            throw new IllegalStateException();
+        }
+    }
+
+    static void copyElements(RecordInput input,
+                             Format format,
+                             Format keyFormat,
+                             Set results) {
+        /*
+         * This could be optimized by traversing the byte format of the
+         * collection's elements array.
+         */
+        RawObject collection = (RawObject) format.newInstance(input, true);
+        collection = (RawObject) format.readObject(collection, input, true);
+        Object[] elements = getElements(collection);
+        if (elements != null) {
+            for (Object elem : elements) {
+                RecordOutput output =
+                    new RecordOutput(input.getCatalog(), true);
+                output.writeKeyObject(elem, keyFormat);
+                DatabaseEntry entry = new DatabaseEntry();
+                TupleBase.outputToEntry(output, entry);
+                results.add(entry);
+            }
+        }
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/ComplexFormat.java b/src/com/sleepycat/persist/impl/ComplexFormat.java
new file mode 100644
index 0000000000000000000000000000000000000000..53a91064c398c5f7320b5e05dae115a5adb0e326
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/ComplexFormat.java
@@ -0,0 +1,2061 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ComplexFormat.java,v 1.44.2.3 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.IdentityHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import com.sleepycat.persist.evolve.Converter;
+import com.sleepycat.persist.evolve.Deleter;
+import com.sleepycat.persist.evolve.EntityConverter;
+import com.sleepycat.persist.evolve.Mutations;
+import com.sleepycat.persist.evolve.Renamer;
+import com.sleepycat.persist.model.ClassMetadata;
+import com.sleepycat.persist.model.EntityMetadata;
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.model.FieldMetadata;
+import com.sleepycat.persist.model.Relationship;
+import com.sleepycat.persist.model.SecondaryKeyMetadata;
+import com.sleepycat.persist.raw.RawField;
+import com.sleepycat.persist.raw.RawObject;
+
+/**
+ * Format for persistent complex classes that are not composite key classes.
+ * This includes entity classes and subclasses.
+ *
+ * @author Mark Hayes
+ */
+public class ComplexFormat extends Format {
+
+    private static final long serialVersionUID = -2847843033590454917L;
+
+    private ClassMetadata clsMeta;
+    private EntityMetadata entityMeta;
+    private FieldInfo priKeyField;
+    private List<FieldInfo> secKeyFields;
+    private List<FieldInfo> nonKeyFields;
+    private FieldReader secKeyFieldReader;
+    private FieldReader nonKeyFieldReader;
+    private Map<String,String> oldToNewKeyMap;
+    private Map<String,String> newToOldFieldMap;
+    private boolean evolveNeeded;
+    private transient Accessor objAccessor;
+    private transient Accessor rawAccessor;
+    private transient ComplexFormat entityFormat;
+    private transient Map<String,FieldAddress> secKeyAddresses;
+    private transient volatile Map<String,RawField> rawFields;
+    private transient volatile FieldInfo[] rawInputFields;
+    private transient volatile int[] rawInputLevels;
+    private transient volatile int rawInputDepth;
+
+    ComplexFormat(Class cls,
+                  ClassMetadata clsMeta,
+                  EntityMetadata entityMeta) {
+        super(cls);
+        this.clsMeta = clsMeta;
+        this.entityMeta = entityMeta;
+        secKeyFields = new ArrayList<FieldInfo>();
+        nonKeyFields = FieldInfo.getInstanceFields(cls, clsMeta);
+
+        /*
+         * Validate primary key metadata and move primary key field from
+         * nonKeyFields to priKeyField.
+         */
+        if (clsMeta.getPrimaryKey() != null) {
+            String fieldName = clsMeta.getPrimaryKey().getName();
+            FieldInfo field = FieldInfo.getField(nonKeyFields, fieldName);
+            if (field == null) {
+                throw new IllegalArgumentException
+                    ("Primary key field does not exist: " +
+                     getClassName() + '.' + fieldName);
+            }
+            nonKeyFields.remove(field);
+            priKeyField = field;
+        }
+
+        /*
+         * Validate secondary key metadata and move secondary key fields from
+         * nonKeyFields to secKeyFields.
+         */
+        if (clsMeta.getSecondaryKeys() != null) {
+            for (SecondaryKeyMetadata secKeyMeta :
+                 clsMeta.getSecondaryKeys().values()) {
+                String fieldName = secKeyMeta.getName();
+                FieldInfo field = FieldInfo.getField(nonKeyFields, fieldName);
+                if (field == null) {
+                    throw new IllegalArgumentException
+                        ("Secondary key field does not exist: " +
+                         getClassName() + '.' + fieldName);
+                }
+                Class fieldCls = field.getFieldClass();
+                Relationship rel = secKeyMeta.getRelationship();
+                if (rel == Relationship.ONE_TO_MANY ||
+                    rel == Relationship.MANY_TO_MANY) {
+                    if (!PersistKeyCreator.isManyType(fieldCls)) {
+                        throw new IllegalArgumentException
+                            ("ONE_TO_MANY and MANY_TO_MANY keys must" +
+                             " have an array or Collection type: " +
+                             getClassName() + '.' + fieldName);
+                    }
+                } else {
+                    if (PersistKeyCreator.isManyType(fieldCls)) {
+                        throw new IllegalArgumentException
+                            ("ONE_TO_ONE and MANY_TO_ONE keys must not" +
+                             " have an array or Collection type: " +
+                             getClassName() + '.' + fieldName);
+                    }
+                }
+                nonKeyFields.remove(field);
+                secKeyFields.add(field);
+            }
+        }
+
+        /* Sort each group of fields by name. */
+        Collections.sort(secKeyFields);
+        Collections.sort(nonKeyFields);
+    }
+
+    @Override
+    void migrateFromBeta(Map<String,Format> formatMap) {
+        super.migrateFromBeta(formatMap);
+        if (priKeyField != null) {
+            priKeyField.migrateFromBeta(formatMap);
+        }
+        for (FieldInfo field : secKeyFields) {
+            field.migrateFromBeta(formatMap);
+        }
+        for (FieldInfo field : nonKeyFields) {
+            field.migrateFromBeta(formatMap);
+        }
+    }
+
+    /**
+     * Returns getSuperFormat cast to ComplexFormat.  It is guaranteed that all
+     * super formats of a ComplexFormat are a ComplexFormat.
+     */
+    private ComplexFormat getComplexSuper() {
+        return (ComplexFormat) getSuperFormat();
+    }
+
+    /**
+     * Returns getLatestVersion cast to ComplexFormat.  It is guaranteed that
+     * all versions of a ComplexFormat are a ComplexFormat.
+     */
+    private ComplexFormat getComplexLatest() {
+        return (ComplexFormat) getLatestVersion();
+    }
+
+    String getPriKeyField() {
+        if (clsMeta.getPrimaryKey() != null) {
+            return clsMeta.getPrimaryKey().getName();
+        } else {
+            return null;
+        }
+    }
+
+    @Override
+    boolean isEntity() {
+        return clsMeta.isEntityClass();
+    }
+
+    @Override
+    boolean isModelClass() {
+        return true;
+    }
+
+    @Override
+    public ClassMetadata getClassMetadata() {
+        return clsMeta;
+    }
+
+    @Override
+    public EntityMetadata getEntityMetadata() {
+        return entityMeta;
+    }
+
+    @Override
+    ComplexFormat getEntityFormat() {
+        if (isInitialized()) {
+            /* The transient entityFormat field is set by initialize(). */
+            return entityFormat;
+        } else {
+
+            /*
+             * If not initialized, the entity format can be found by traversing
+             * the super formats.  However, this is only possible for an
+             * existing format which has its superFormat field set.
+             */
+            if (isNew()) {
+                throw new IllegalStateException(toString());
+            }
+            for (ComplexFormat format = this;
+                 format != null;
+                 format = format.getComplexSuper()) {
+                if (format.isEntity()) {
+                    return format;
+                }
+            }
+            return null;
+        }
+    }
+
+    @Override
+    void setEvolveNeeded(boolean needed) {
+        evolveNeeded = needed;
+    }
+
+    @Override
+    boolean getEvolveNeeded() {
+        return evolveNeeded;
+    }
+
+    @Override
+    public Map<String,RawField> getFields() {
+
+        /*
+         * Synchronization is not required since rawFields is immutable.  If
+         * by chance we create two maps when two threads execute this block, no
+         * harm is done.  But be sure to assign the rawFields field only after
+         * the map is fully populated.
+         */
+        if (rawFields == null) {
+            Map<String,RawField> map = new HashMap<String,RawField>();
+            if (priKeyField != null) {
+                map.put(priKeyField.getName(), priKeyField);
+            }
+            for (RawField field : secKeyFields) {
+                map.put(field.getName(), field);
+            }
+            for (RawField field : nonKeyFields) {
+                map.put(field.getName(), field);
+            }
+            rawFields = map;
+        }
+        return rawFields;
+    }
+
+    @Override
+    void collectRelatedFormats(Catalog catalog,
+                               Map<String,Format> newFormats) {
+        Class cls = getType();
+        /* Collect field formats. */
+        if (priKeyField != null) {
+            priKeyField.collectRelatedFormats(catalog, newFormats);
+        }
+        for (FieldInfo field : secKeyFields) {
+            field.collectRelatedFormats(catalog, newFormats);
+        }
+        for (FieldInfo field : nonKeyFields) {
+            field.collectRelatedFormats(catalog, newFormats);
+        }
+        /* Collect TO_MANY secondary key field element class formats. */
+        if (entityMeta != null) {
+            for (SecondaryKeyMetadata secKeyMeta :
+                 entityMeta.getSecondaryKeys().values()) {
+                String elemClsName = secKeyMeta.getElementClassName();
+                if (elemClsName != null) {
+                    Class elemCls =
+                        SimpleCatalog.keyClassForName(elemClsName);
+                    catalog.createFormat(elemCls, newFormats);
+                }
+            }
+        }
+        /* Recursively collect superclass formats. */
+        Class superCls = cls.getSuperclass();
+        if (superCls != Object.class) {
+            Format superFormat = catalog.createFormat(superCls, newFormats);
+            if (!(superFormat instanceof ComplexFormat)) {
+                throw new IllegalArgumentException
+                    ("The superclass of a complex type must not be a" +
+                     " composite key class or a simple type class: " +
+                     superCls.getName());
+            }
+        }
+        /* Collect proxied format. */
+        String proxiedClsName = clsMeta.getProxiedClassName();
+        if (proxiedClsName != null) {
+            catalog.createFormat(proxiedClsName, newFormats);
+        }
+    }
+
+    @Override
+    void initialize(Catalog catalog, EntityModel model, int initVersion) {
+        Class type = getType();
+        boolean useEnhanced = false;
+        if (type != null) {
+            useEnhanced = EnhancedAccessor.isEnhanced(type);
+        }
+        /* Initialize all fields. */
+        if (priKeyField != null) {
+            priKeyField.initialize(catalog, model, initVersion);
+        }
+        for (FieldInfo field : secKeyFields) {
+            field.initialize(catalog, model, initVersion);
+        }
+        for (FieldInfo field : nonKeyFields) {
+            field.initialize(catalog, model, initVersion);
+        }
+        /* Set the superclass format for a new (never initialized) format. */
+        ComplexFormat superFormat = getComplexSuper();
+        if (type != null && superFormat == null) {
+            Class superCls = type.getSuperclass();
+            if (superCls != Object.class) {
+                superFormat =
+                    (ComplexFormat) catalog.getFormat(superCls.getName());
+                setSuperFormat(superFormat);
+            }
+        }
+        /* Initialize the superclass format and validate the super accessor. */
+        if (superFormat != null) {
+            superFormat.initializeIfNeeded(catalog, model);
+            Accessor superAccessor = superFormat.objAccessor;
+            if (type != null && superAccessor != null) {
+                if (useEnhanced) {
+                    if (!(superAccessor instanceof EnhancedAccessor)) {
+                        throw new IllegalStateException
+                            ("The superclass of an enhanced class must also " +
+                             "be enhanced: " + getClassName() +
+                             " extends " + superFormat.getClassName());
+                    }
+                } else {
+                    if (!(superAccessor instanceof ReflectionAccessor)) {
+                        throw new IllegalStateException
+                            ("The superclass of an unenhanced class must " +
+                             "not be enhanced: " + getClassName() +
+                             " extends " + superFormat.getClassName());
+                    }
+                }
+            }
+        }
+        /* Find entity format, if any. */
+        for (ComplexFormat format = this;
+             format != null;
+             format = format.getComplexSuper()) {
+            if (format.isEntity()) {
+                entityFormat = format;
+                break;
+            }
+        }
+
+        /*
+         * Ensure that the current entity metadata is always referenced in
+         * order to return it to the user and to properly construct secondary
+         * key addresses.  Secondary key metadata can change in an entity
+         * subclass or be created when a new subclass is used, but this will
+         * not cause evolution of the entity class; instead, the metadata is
+         * updated here.  [#16467]
+         */
+        if (isEntity() && isCurrentVersion()) {
+            entityMeta = model.getEntityMetadata(getClassName());
+        }
+
+        /* Disallow proxy class that extends an entity class. [#15950] */
+        if (clsMeta.getProxiedClassName() != null && entityFormat != null) {
+            throw new IllegalArgumentException
+                ("A proxy may not be an entity: " + getClassName());
+        }
+        /* Disallow primary keys on entity subclasses.  [#15757] */
+        if (entityFormat != null &&
+            entityFormat != this && 
+            priKeyField != null) {
+            throw new IllegalArgumentException
+                ("A PrimaryKey may not appear on an Entity subclass: " +
+                 getClassName() + " field: " + priKeyField.getName());
+        }
+        /* Create the accessors. */
+        if (type != null) {
+            if (useEnhanced) {
+                objAccessor = new EnhancedAccessor(catalog, type, this);
+            } else {
+                Accessor superObjAccessor =
+                    (superFormat != null) ?  superFormat.objAccessor : null;
+                objAccessor = new ReflectionAccessor
+                    (catalog, type, superObjAccessor, priKeyField,
+                     secKeyFields, nonKeyFields);
+            }
+        }
+        Accessor superRawAccessor =
+            (superFormat != null) ? superFormat.rawAccessor : null;
+        rawAccessor = new RawAccessor
+            (this, superRawAccessor, priKeyField, secKeyFields, nonKeyFields);
+
+        /* Initialize secondary key field addresses. */
+        EntityMetadata latestEntityMeta = null;
+        if (entityFormat != null) {
+            latestEntityMeta =
+                entityFormat.getLatestVersion().getEntityMetadata();
+        }
+        if (latestEntityMeta != null) {
+            secKeyAddresses = new HashMap<String,FieldAddress>();
+            ComplexFormat thisLatest = getComplexLatest();
+            if (thisLatest != this) {
+                thisLatest.initializeIfNeeded(catalog, model);
+            }
+            nextKeyLoop:
+            for (SecondaryKeyMetadata secKeyMeta :
+                 latestEntityMeta.getSecondaryKeys().values()) {
+                String clsName = secKeyMeta.getDeclaringClassName();
+                String fieldName = secKeyMeta.getName();
+                int superLevel = 0;
+                for (ComplexFormat format = this;
+                     format != null;
+                     format = format.getComplexSuper()) {
+                    if (clsName.equals
+                            (format.getLatestVersion().getClassName())) {
+                        String useFieldName = null;
+                        if (format.newToOldFieldMap != null &&
+                            format.newToOldFieldMap.containsKey(fieldName)) {
+                            useFieldName =
+                                format.newToOldFieldMap.get(fieldName);
+                        } else {
+                            useFieldName = fieldName;
+                        }
+                        boolean isSecField;
+                        int fieldNum;
+                        FieldInfo info = FieldInfo.getField
+                            (format.secKeyFields, useFieldName);
+                        if (info != null) {
+                            isSecField = true;
+                            fieldNum = format.secKeyFields.indexOf(info);
+                        } else {
+                            isSecField = false;
+                            info = FieldInfo.getField
+                                (format.nonKeyFields, useFieldName);
+                            if (info == null) {
+                                /* Field not present in old format. */
+                                assert thisLatest != this;
+                                thisLatest.checkNewSecKeyInitializer
+                                    (secKeyMeta);
+                                continue nextKeyLoop;
+                            }
+                            fieldNum = format.nonKeyFields.indexOf(info);
+                        }
+                        FieldAddress addr = new FieldAddress
+                            (isSecField, fieldNum, superLevel, format,
+                             info.getType());
+                        secKeyAddresses.put(secKeyMeta.getKeyName(), addr);
+                    }
+                    superLevel += 1;
+                }
+            }
+        }
+    }
+
+    /**
+     * Checks that the type of a new secondary key is not a primitive and that
+     * the default contructor does not initialize it to a non-null value.
+     */
+    private void checkNewSecKeyInitializer(SecondaryKeyMetadata secKeyMeta) {
+        if (objAccessor != null) {
+            FieldAddress addr = secKeyAddresses.get(secKeyMeta.getKeyName());
+            Object obj = objAccessor.newInstance();
+            Object val = objAccessor.getField
+                (obj, addr.fieldNum, addr.superLevel, addr.isSecField);
+            if (val != null) {
+                if (addr.keyFormat.isPrimitive()) {
+                    throw new IllegalArgumentException
+                        ("For a new secondary key field the field type must " +
+                         "not be a primitive -- class: " +
+                         secKeyMeta.getDeclaringClassName() + " field: " +
+                         secKeyMeta.getName());
+                } else {
+                    throw new IllegalArgumentException
+                        ("For a new secondary key field the default " +
+                         "constructor must not initialize the field to a " +
+                         "non-null value -- class: " +
+                         secKeyMeta.getDeclaringClassName() + " field: " +
+                         secKeyMeta.getName());
+                }
+            }
+        }
+    }
+
+    private boolean nullOrEqual(Object o1, Object o2) {
+        if (o1 == null) {
+            return o2 == null;
+        } else {
+            return o1.equals(o2);
+        }
+    }
+
+    @Override
+    Object newArray(int len) {
+        return objAccessor.newArray(len);
+    }
+
+    @Override
+    public Object newInstance(EntityInput input, boolean rawAccess) {
+        Accessor accessor = rawAccess ? rawAccessor : objAccessor;
+        return accessor.newInstance();
+    }
+
+    @Override
+    public Object readObject(Object o, EntityInput input, boolean rawAccess) {
+        Accessor accessor = rawAccess ? rawAccessor : objAccessor;
+        accessor.readSecKeyFields(o, input, 0, Accessor.MAX_FIELD_NUM, -1);
+        accessor.readNonKeyFields(o, input, 0, Accessor.MAX_FIELD_NUM, -1);
+        return o;
+    }
+
+    @Override
+    void writeObject(Object o, EntityOutput output, boolean rawAccess) {
+        Accessor accessor = rawAccess ? rawAccessor : objAccessor;
+        accessor.writeSecKeyFields(o, output);
+        accessor.writeNonKeyFields(o, output);
+    }
+
+    @Override
+    Object convertRawObject(Catalog catalog,
+                            boolean rawAccess,
+                            RawObject rawObject,
+                            IdentityHashMap converted) {
+        /*
+         * Synchronization is not required since rawInputFields, rawInputLevels
+         * and rawInputDepth are immutable.  If by chance we create duplicate
+         * values when two threads execute this block, no harm is done.  But be
+         * sure to assign the fields only after the values are fully populated.
+         */
+        FieldInfo[] fields = rawInputFields;
+        int[] levels = rawInputLevels;
+        int depth = rawInputDepth;
+        if (fields == null || levels == null || depth == 0) {
+
+            /*
+             * The volatile fields are not yet set.  Prepare to process the
+             * class hierarchy, storing class formats in order from the highest
+             * superclass down to the current class.
+             */
+            depth = 0;
+            int nFields = 0;
+            for (ComplexFormat format = this;
+                 format != null;
+                 format = format.getComplexSuper()) {
+                nFields += format.getNFields();
+                depth += 1;
+            }
+            ComplexFormat[] hierarchy = new ComplexFormat[depth];
+            int level = depth;
+            for (ComplexFormat format = this;
+                 format != null;
+                 format = format.getComplexSuper()) {
+                level -= 1;
+                hierarchy[level] = format;
+            }
+            assert level == 0;
+
+            /* Populate levels and fields in parallel. */
+            levels = new int[nFields];
+            fields = new FieldInfo[nFields];
+            int index = 0;
+
+            /*
+             * The primary key is the first field read/written.  We use the
+             * first primary key field encountered going from this class upward
+             * in the class hierarchy.
+             */
+            if (getEntityFormat() != null) {
+                for (level = depth - 1; level >= 0; level -= 1) {
+                    ComplexFormat format = hierarchy[level];
+                    if (format.priKeyField != null) {
+                        levels[index] = level;
+                        fields[index] = format.priKeyField;
+                        index += 1;
+                        break;
+                    }
+                }
+                assert index == 1;
+            }
+
+            /*
+             * Secondary key fields are read/written next, from the highest
+             * base class downward.
+             */
+            for (level = 0; level < depth; level += 1) {
+                ComplexFormat format = hierarchy[level];
+                for (FieldInfo field : format.secKeyFields) {
+                    levels[index] = level;
+                    fields[index] = field;
+                    index += 1;
+                }
+            }
+
+            /*
+             * Other fields are read/written last, from the highest base class
+             * downward.
+             */
+            for (level = 0; level < depth; level += 1) {
+                ComplexFormat format = hierarchy[level];
+                for (FieldInfo field : format.nonKeyFields) {
+                    levels[index] = level;
+                    fields[index] = field;
+                    index += 1;
+                }
+            }
+
+            /* We're finished -- update the volatile fields for next time. */
+            assert index == fields.length;
+            rawInputFields = fields;
+            rawInputLevels = levels;
+            rawInputDepth = depth;
+        }
+
+        /*
+         * Create an objects array that is parallel to the fields and levels
+         * arrays, but contains the RawObject for each slot from which the
+         * field value can be retrieved.  The predetermined level for each
+         * field determines which RawObject in the instance hierarchy to use.
+         */
+        RawObject[] objectsByLevel = new RawObject[depth];
+        int level = depth;
+        for (RawObject raw = rawObject; raw != null; raw = raw.getSuper()) {
+            if (level == 0) {
+                throw new IllegalArgumentException
+                    ("RawObject has too many superclasses: " +
+                     rawObject.getType().getClassName());
+            }
+            level -= 1;
+            objectsByLevel[level] = raw;
+        }
+        if (level > 0) {
+            throw new IllegalArgumentException
+                ("RawObject has too few superclasses: " +
+                 rawObject.getType().getClassName());
+        }
+        assert level == 0;
+        RawObject[] objects = new RawObject[fields.length];
+        for (int i = 0; i < objects.length; i += 1) {
+            objects[i] = objectsByLevel[levels[i]];
+        }
+
+        /* Create the persistent object and convert all RawObject fields. */
+        EntityInput in = new RawComplexInput
+            (catalog, rawAccess, converted, fields, objects);
+        Object o = newInstance(in, rawAccess);
+        converted.put(rawObject, o);
+        if (getEntityFormat() != null) {
+            readPriKey(o, in, rawAccess);
+        }
+        return readObject(o, in, rawAccess);
+    }
+
+    @Override
+    boolean isPriKeyNullOrZero(Object o, boolean rawAccess) {
+        Accessor accessor = rawAccess ? rawAccessor : objAccessor;
+        return accessor.isPriKeyFieldNullOrZero(o);
+    }
+
+    @Override
+    void writePriKey(Object o, EntityOutput output, boolean rawAccess) {
+        Accessor accessor = rawAccess ? rawAccessor : objAccessor;
+        accessor.writePriKeyField(o, output);
+    }
+
+    @Override
+    public void readPriKey(Object o, EntityInput input, boolean rawAccess) {
+        Accessor accessor = rawAccess ? rawAccessor : objAccessor;
+        accessor.readPriKeyField(o, input);
+    }
+
+    @Override
+    boolean nullifySecKey(Catalog catalog,
+                          Object entity,
+                          String keyName,
+                          Object keyElement) {
+        if (secKeyAddresses == null) {
+            throw new IllegalStateException();
+        }
+        FieldAddress addr = secKeyAddresses.get(keyName);
+        if (addr != null) {
+            Object oldVal = rawAccessor.getField
+                (entity, addr.fieldNum, addr.superLevel, addr.isSecField);
+            if (oldVal != null) {
+                if (keyElement != null) {
+                    RawObject container = (RawObject) oldVal;
+                    Object[] a1 = container.getElements();
+                    boolean isArray = (a1 != null);
+                    if (!isArray) {
+                        a1 = CollectionProxy.getElements(container);
+                    }
+                    if (a1 != null) {
+                        for (int i = 0; i < a1.length; i += 1) {
+                            if (keyElement.equals(a1[i])) {
+                                int len = a1.length - 1;
+                                Object[] a2 = new Object[len];
+                                System.arraycopy(a1, 0, a2, 0, i);
+                                System.arraycopy(a1, i + 1, a2, i, len - i);
+                                if (isArray) {
+                                    rawAccessor.setField
+                                        (entity, addr.fieldNum,
+                                         addr.superLevel, addr.isSecField,
+                                         new RawObject
+                                            (container.getType(), a2));
+                                } else {
+                                    CollectionProxy.setElements(container, a2);
+                                }
+                                return true;
+                            }
+                        }
+                    }
+                    return false;
+                } else {
+                    rawAccessor.setField
+                        (entity, addr.fieldNum, addr.superLevel,
+                         addr.isSecField, null);
+                    return true;
+                }
+            } else {
+                return false;
+            }
+        } else {
+            return false;
+        }
+    }
+
+    @Override
+    void skipContents(RecordInput input) {
+        skipToSecKeyField(input, Accessor.MAX_FIELD_NUM);
+        skipToNonKeyField(input, Accessor.MAX_FIELD_NUM);
+    }
+
+    @Override
+    void copySecMultiKey(RecordInput input, Format keyFormat, Set results) {
+        CollectionProxy.copyElements(input, this, keyFormat, results);
+    }
+
+    @Override
+    Format skipToSecKey(RecordInput input, String keyName) {
+        if (secKeyAddresses == null) {
+            throw new IllegalStateException();
+        }
+        FieldAddress addr = secKeyAddresses.get(keyName);
+        if (addr != null) {
+            if (addr.isSecField) {
+                addr.clsFormat.skipToSecKeyField(input, addr.fieldNum);
+            } else {
+                skipToSecKeyField(input, Accessor.MAX_FIELD_NUM);
+                addr.clsFormat.skipToNonKeyField(input, addr.fieldNum);
+            }
+            return addr.keyFormat;
+        } else {
+            return null;
+        }
+    }
+
+    private int getNFields() {
+        return ((priKeyField != null) ? 1 : 0) +
+               secKeyFields.size() +
+               nonKeyFields.size();
+    }
+
+    private void skipToSecKeyField(RecordInput input, int toFieldNum) {
+        ComplexFormat superFormat = getComplexSuper();
+        if (superFormat != null) {
+            superFormat.skipToSecKeyField(input, Accessor.MAX_FIELD_NUM);
+        }
+        int maxNum = Math.min(secKeyFields.size(), toFieldNum);
+        for (int i = 0; i < maxNum; i += 1) {
+            input.skipField(secKeyFields.get(i).getType());
+        }
+    }
+
+    private void skipToNonKeyField(RecordInput input, int toFieldNum) {
+        ComplexFormat superFormat = getComplexSuper();
+        if (superFormat != null) {
+            superFormat.skipToNonKeyField(input, Accessor.MAX_FIELD_NUM);
+        }
+        int maxNum = Math.min(nonKeyFields.size(), toFieldNum);
+        for (int i = 0; i < maxNum; i += 1) {
+            input.skipField(nonKeyFields.get(i).getType());
+        }
+    }
+
+    private static class FieldAddress {
+
+        boolean isSecField;
+        int fieldNum;
+        int superLevel;
+        ComplexFormat clsFormat;
+        Format keyFormat;
+
+        FieldAddress(boolean isSecField,
+                     int fieldNum,
+                     int superLevel,
+                     ComplexFormat clsFormat,
+                     Format keyFormat) {
+            this.isSecField = isSecField;
+            this.fieldNum = fieldNum;
+            this.superLevel = superLevel;
+            this.clsFormat = clsFormat;
+            this.keyFormat = keyFormat;
+        }
+    }
+
+    @Override
+    boolean evolve(Format newFormatParam, Evolver evolver) {
+
+        /* Disallow evolution to a non-complex format. */
+        if (!(newFormatParam instanceof ComplexFormat)) {
+            evolver.addMissingMutation
+                (this, newFormatParam,
+                 "Converter is required when a complex type is changed " +
+                 "to a simple type or enum type");
+            return false;
+        }
+        ComplexFormat newFormat = (ComplexFormat) newFormatParam;
+        Mutations mutations = evolver.getMutations();
+        boolean thisChanged = false;
+        boolean hierarchyChanged = false;
+        Map<String,String> allKeyNameMap = new HashMap<String,String>();
+
+        /* The Evolver has already ensured that entities evolve to entities. */
+        assert isEntity() == newFormat.isEntity();
+        assert isEntity() == (entityMeta != null);
+        assert newFormat.isEntity() == (newFormat.entityMeta != null);
+
+        /*
+         * Keep track of the old and new entity class names for use in deleting
+         * and renaming secondary keys below.  If the oldEntityClass is
+         * non-null this also signifies an entity class or subclass.  Note that
+         * getEntityFormat cannot be called on a newly created format during
+         * evolution because its super format property is not yet initialized.
+         * [#16253]
+         */
+        String oldEntityClass;
+        String newEntityClass;
+        if (isEntity()) {
+            oldEntityClass = getClassName();
+            newEntityClass = newFormat.getClassName();
+        } else {
+            oldEntityClass = null;
+            newEntityClass = null;
+        }
+
+        /*
+         * Evolve all superclass formats, even when a deleted class appears in
+         * the hierarchy.  This ensures that the super format's
+         * getLatestVersion/getComplexLatest method can be used accurately
+         * below.
+         */
+        for (ComplexFormat oldSuper = getComplexSuper();
+             oldSuper != null;
+             oldSuper = oldSuper.getComplexSuper()) {
+            Converter converter = mutations.getConverter
+                (oldSuper.getClassName(), oldSuper.getVersion(), null);
+            if (converter != null) {
+                evolver.addMissingMutation
+                    (this, newFormatParam,
+                     "Converter is required for this subclass when a " +
+                     "Converter appears on its superclass: " + converter);
+                return false;
+            }
+            if (!evolver.evolveFormat(oldSuper)) {
+                return false;
+            }
+            if (!oldSuper.isCurrentVersion()) {
+                if (oldSuper.isDeleted()) {
+                    if (!oldSuper.evolveDeletedClass(evolver)) {
+                        return false;
+                    }
+                }
+                if (oldSuper.oldToNewKeyMap != null) {
+                    allKeyNameMap.putAll(oldSuper.oldToNewKeyMap);
+                }
+                hierarchyChanged = true;
+            }
+        }
+
+        /*
+         * Compare the old and new class hierarhies and decide whether each
+         * change is allowed or not:
+         * + Old deleted and removed superclass -- allowed
+         * + Old empty and removed superclass -- allowed
+         * + Old non-empty and removed superclass -- not allowed
+         * + Old superclass repositioned in the hierarchy -- not allowed
+         * + New inserted superclass -- allowed
+         */
+        Class newFormatCls = newFormat.getExistingType();
+        Class newSuper = newFormatCls;
+        List<Integer> newLevels = new ArrayList<Integer>();
+        int newLevel = 0;
+        newLevels.add(newLevel);
+
+        /*
+         * When this format has a new superclass, we treat it as a change to
+         * this format as well as to the superclass hierarchy.
+         */
+        if (getSuperFormat() == null) {
+            if (newFormatCls.getSuperclass() != Object.class) {
+                thisChanged = true;
+                hierarchyChanged = true;
+            }
+        } else {
+            if (!getSuperFormat().getLatestVersion().getClassName().equals
+                    (newFormatCls.getSuperclass().getName())) {
+                thisChanged = true;
+                hierarchyChanged = true;
+            }
+        }
+
+        for (ComplexFormat oldSuper = getComplexSuper();
+             oldSuper != null;
+             oldSuper = oldSuper.getComplexSuper()) {
+
+            /* Find the matching superclass in the new hierarchy. */
+            String oldSuperName = oldSuper.getLatestVersion().getClassName();
+            Class foundNewSuper = null;
+            int tryNewLevel = newLevel;
+            for (Class newSuper2 = newSuper.getSuperclass();
+                 newSuper2 != Object.class;
+                 newSuper2 = newSuper2.getSuperclass()) {
+                tryNewLevel += 1;
+                if (oldSuperName.equals(newSuper2.getName())) {
+                    foundNewSuper = newSuper2;
+                    newLevel = tryNewLevel;
+                    if (oldSuper.isEntity()) {
+                        assert oldEntityClass == null;
+                        assert newEntityClass == null;
+                        oldEntityClass = oldSuper.getClassName();
+                        newEntityClass = foundNewSuper.getName();
+                    }
+                    break;
+                }
+            }
+
+            if (foundNewSuper != null) {
+
+                /*
+                 * We found the old superclass in the new hierarchy.  Traverse
+                 * through the superclass formats that were skipped over above
+                 * when finding it.
+                 */
+                for (Class newSuper2 = newSuper.getSuperclass();
+                     newSuper2 != foundNewSuper;
+                     newSuper2 = newSuper2.getSuperclass()) {
+
+                    /*
+                     * The class hierarchy changed -- a new class was inserted.
+                     */
+                    hierarchyChanged = true;
+
+                    /*
+                     * Check that the new formats skipped over above are not at
+                     * a different position in the old hierarchy.
+                     */
+                    for (ComplexFormat oldSuper2 = oldSuper.getComplexSuper();
+                         oldSuper2 != null;
+                         oldSuper2 = oldSuper2.getComplexSuper()) {
+                        String oldSuper2Name =
+                            oldSuper2.getLatestVersion().getClassName();
+                        if (oldSuper2Name.equals(newSuper2.getName())) {
+                            evolver.addMissingMutation
+                                (this, newFormatParam,
+                                 "Class Converter is required when a " +
+                                 "superclass is moved in the class " +
+                                 "hierarchy: " + newSuper2.getName());
+                            return false;
+                        }
+                    }
+                }
+                newSuper = foundNewSuper;
+                newLevels.add(newLevel);
+            } else {
+
+                /*
+                 * We did not find the old superclass in the new hierarchy.
+                 * The class hierarchy changed, since an old class no longer
+                 * appears.
+                 */
+                hierarchyChanged = true;
+
+                /* Check that the old class can be safely removed. */
+                if (!oldSuper.isDeleted()) {
+                    ComplexFormat oldSuperLatest =
+                        oldSuper.getComplexLatest();
+                    if (oldSuperLatest.getNFields() != 0) {
+                        evolver.addMissingMutation
+                            (this, newFormatParam,
+                             "When a superclass is removed from the class " +
+                             "hierarchy, the superclass or all of its " +
+                             "persistent fields must be deleted with a " +
+                             "Deleter: " +
+                             oldSuperLatest.getClassName());
+                        return false;
+                    }
+                }
+
+                if (oldEntityClass != null && isCurrentVersion()) {
+                    Map<String,SecondaryKeyMetadata> secKeys =
+                        oldSuper.clsMeta.getSecondaryKeys();
+                    for (FieldInfo field : oldSuper.secKeyFields) {
+                        SecondaryKeyMetadata meta =
+                            secKeys.get(field.getName());
+                        assert meta != null;
+                        allKeyNameMap.put(meta.getKeyName(), null);
+                    }
+                }
+
+                /*
+                 * Add the DO_NOT_READ_ACCESSOR level to prevent an empty class
+                 * (no persistent fields) from being read via the Accessor.
+                 */
+                newLevels.add(EvolveReader.DO_NOT_READ_ACCESSOR);
+            }
+        }
+
+        /* Make FieldReaders for this format if needed. */
+        int result = evolveAllFields(newFormat, evolver);
+        if (result == Evolver.EVOLVE_FAILURE) {
+            return false;
+        }
+        if (result == Evolver.EVOLVE_NEEDED) {
+            thisChanged = true;
+        }
+        if (oldToNewKeyMap != null) {
+            allKeyNameMap.putAll(oldToNewKeyMap);
+        }
+
+        /* Require new version number if this class was changed. */
+        if (thisChanged &&
+            !evolver.checkUpdatedVersion
+                ("Changes to the fields or superclass were detected", this,
+                 newFormat)) {
+            return false;
+        }
+
+        /* Rename and delete the secondary databases. */
+        if (allKeyNameMap.size() > 0 &&
+            oldEntityClass != null &&
+            newEntityClass != null &&
+            isCurrentVersion()) {
+            for (Map.Entry<String,String> entry : allKeyNameMap.entrySet()) {
+                String oldKeyName = entry.getKey();
+                String newKeyName = entry.getValue();
+                if (newKeyName != null) {
+                    evolver.renameSecondaryDatabase
+                        (oldEntityClass, newEntityClass,
+                         oldKeyName, newKeyName);
+                } else {
+                    evolver.deleteSecondaryDatabase
+                        (oldEntityClass, oldKeyName);
+                }
+            }
+        }
+
+        /* Use an EvolveReader if needed. */
+        if (hierarchyChanged || thisChanged) {
+            Reader reader = new EvolveReader(newLevels);
+            evolver.useEvolvedFormat(this, reader, newFormat);
+        } else {
+            evolver.useOldFormat(this, newFormat);
+        }
+        return true;
+    }
+
+    @Override
+    boolean evolveMetadata(Format newFormatParam,
+                           Converter converter,
+                           Evolver evolver) {
+        assert !isDeleted();
+        assert isEntity();
+        assert newFormatParam.isEntity();
+        ComplexFormat newFormat = (ComplexFormat) newFormatParam;
+
+        if (!checkKeyTypeChange
+                (newFormat, entityMeta.getPrimaryKey(),
+                 newFormat.entityMeta.getPrimaryKey(), "primary key",
+                 evolver)) {
+            return false;
+        }
+
+        Set<String> deletedKeys;
+        if (converter instanceof EntityConverter) {
+            EntityConverter entityConverter = (EntityConverter) converter;
+            deletedKeys = entityConverter.getDeletedKeys();
+        } else {
+            deletedKeys = Collections.emptySet();
+        }
+
+        Map<String,SecondaryKeyMetadata> oldSecondaryKeys =
+            entityMeta.getSecondaryKeys();
+        Map<String,SecondaryKeyMetadata> newSecondaryKeys =
+            newFormat.entityMeta.getSecondaryKeys();
+        Set<String> insertedKeys =
+            new HashSet<String>(newSecondaryKeys.keySet());
+
+        for (SecondaryKeyMetadata oldMeta : oldSecondaryKeys.values()) {
+            String keyName = oldMeta.getKeyName();
+            if (deletedKeys.contains(keyName)) {
+                if (isCurrentVersion()) {
+                    evolver.deleteSecondaryDatabase(getClassName(), keyName);
+                }
+            } else {
+                SecondaryKeyMetadata newMeta = newSecondaryKeys.get(keyName);
+                if (newMeta == null) {
+                    evolver.addInvalidMutation
+                        (this, newFormat, converter,
+                         "Existing key not found in new entity metadata: " +
+                          keyName);
+                    return false;
+                }
+                insertedKeys.remove(keyName);
+                String keyLabel = "secondary key: " + keyName;
+                if (!checkKeyTypeChange
+                        (newFormat, oldMeta, newMeta, keyLabel, evolver)) {
+                    return false;
+                }
+                if (!checkSecKeyMetadata
+                        (newFormat, oldMeta, newMeta, evolver)) {
+                    return false;
+                }
+            }
+        }
+
+        if (!insertedKeys.isEmpty()) {
+            evolver.addEvolveError
+                (this, newFormat, "Error",
+                 "New keys " + insertedKeys +
+                 " not allowed when using a Converter with an entity class");
+        }
+
+        return true;
+    }
+
+    /**
+     * Checks that changes to secondary key metadata are legal.
+     */
+    private boolean checkSecKeyMetadata(Format newFormat,
+                                        SecondaryKeyMetadata oldMeta,
+                                        SecondaryKeyMetadata newMeta,
+                                        Evolver evolver) {
+        if (oldMeta.getRelationship() != newMeta.getRelationship()) {
+            evolver.addEvolveError
+                (this, newFormat,
+                 "Change detected in the relate attribute (Relationship) " +
+                 "of a secondary key",
+                 "Old key: " + oldMeta.getKeyName() +
+                 " relate: " + oldMeta.getRelationship() +
+                 " new key: " + newMeta.getKeyName() +
+                 " relate: " + newMeta.getRelationship());
+            return false;
+        }
+        return true;
+    }
+
+    /**
+     * Checks that the type of a key field did not change, as known from
+     * metadata when a class conversion is used.
+     */
+    private boolean checkKeyTypeChange(Format newFormat,
+                                       FieldMetadata oldMeta,
+                                       FieldMetadata newMeta,
+                                       String keyLabel,
+                                       Evolver evolver) {
+        String oldClass = oldMeta.getClassName();
+        String newClass = newMeta.getClassName();
+        if (!oldClass.equals(newClass)) {
+            SimpleCatalog catalog = SimpleCatalog.getInstance();
+            Format oldType = catalog.getFormat(oldClass);
+            Format newType = catalog.getFormat(newClass);
+            if (oldType == null || newType == null ||
+                ((oldType.getWrapperFormat() == null ||
+                  oldType.getWrapperFormat().getId() !=
+                  newType.getId()) &&
+                 (newType.getWrapperFormat() == null ||
+                  newType.getWrapperFormat().getId() !=
+                  oldType.getId()))) {
+                evolver.addEvolveError
+                    (this, newFormat,
+                     "Type change detected for " + keyLabel,
+                     "Old field type: " + oldClass +
+                     " is not compatible with the new type: " +
+                     newClass +
+                     " old field: " + oldMeta.getName() +
+                     " new field: " + newMeta.getName());
+                return false;
+            }
+        }
+        return true;
+    }
+
+    /**
+     * Special case for creating FieldReaders for a deleted class when it
+     * appears in the class hierarchy of its non-deleted subclass.
+     */
+    private boolean evolveDeletedClass(Evolver evolver) {
+        assert isDeleted();
+        if (secKeyFieldReader == null || nonKeyFieldReader == null) {
+            if (priKeyField != null &&
+                getEntityFormat() != null &&
+                !getEntityFormat().isDeleted()) {
+                evolver.addEvolveError
+                    (this, this,
+                     "Class containing primary key field was deleted ",
+                     "Primary key is needed in an entity class hierarchy: " +
+                     priKeyField.getName());
+                return false;
+            } else {
+                secKeyFieldReader = new SkipFieldReader(0, secKeyFields);
+                nonKeyFieldReader = new SkipFieldReader(0, nonKeyFields);
+                return true;
+            }
+        } else {
+            return true;
+        }
+    }
+
+    /**
+     * Creates a FieldReader for secondary key fields and non-key fields if
+     * necessary.  Checks the primary key field if necessary.  Does not evolve
+     * superclass format fields.
+     */
+    private int evolveAllFields(ComplexFormat newFormat, Evolver evolver) {
+
+        assert !isDeleted();
+        secKeyFieldReader = null;
+        nonKeyFieldReader = null;
+        oldToNewKeyMap = null;
+
+        /* Evolve primary key field. */
+        boolean evolveFailure = false;
+        boolean localEvolveNeeded = false;
+        if (priKeyField != null) {
+            int result = evolver.evolveRequiredKeyField
+                (this, newFormat, priKeyField, newFormat.priKeyField);
+            if (result == Evolver.EVOLVE_FAILURE) {
+                evolveFailure = true;
+            } else if (result == Evolver.EVOLVE_NEEDED) {
+                localEvolveNeeded = true;
+            }
+        }
+
+        /* Evolve secondary key fields. */
+        FieldReader reader = evolveFieldList
+            (secKeyFields, newFormat.secKeyFields, true,
+             newFormat.nonKeyFields, newFormat, evolver);
+        if (reader == FieldReader.EVOLVE_FAILURE) {
+            evolveFailure = true;
+        } else if (reader != null) {
+            localEvolveNeeded = true;
+        }
+        if (reader != FieldReader.EVOLVE_NEEDED) {
+            secKeyFieldReader = reader;
+        }
+
+        /* Evolve non-key fields. */
+        reader = evolveFieldList
+            (nonKeyFields, newFormat.nonKeyFields, false,
+             newFormat.secKeyFields, newFormat, evolver);
+        if (reader == FieldReader.EVOLVE_FAILURE) {
+            evolveFailure = true;
+        } else if (reader != null) {
+            localEvolveNeeded = true;
+        }
+        if (reader != FieldReader.EVOLVE_NEEDED) {
+            nonKeyFieldReader = reader;
+        }
+
+        /* Return result. */
+        if (evolveFailure) {
+            return Evolver.EVOLVE_FAILURE;
+        } else if (localEvolveNeeded) {
+            return Evolver.EVOLVE_NEEDED;
+        } else {
+            return Evolver.EVOLVE_NONE;
+        }
+    }
+
+    /**
+     * Returns a FieldReader that reads no fields.
+     *
+     * Instead of adding a DoNothingFieldReader class, we use a
+     * MultiFieldReader with an empty field list.  We do not add a new
+     * FieldReader class to avoid changing the catalog format.  [#15524]
+     */
+    private FieldReader getDoNothingFieldReader() {
+        List<FieldReader> emptyList = Collections.emptyList();
+        return new MultiFieldReader(emptyList);
+    }
+
+    /**
+     * Evolves a list of fields, either secondary key or non-key fields, for a
+     * single class format.
+     *
+     * @return a FieldReader if field evolution is needed, null if no evolution
+     * is needed, or FieldReader.EVOLVE_FAILURE if an evolution error occurs.
+     */
+    private FieldReader evolveFieldList(List<FieldInfo> oldFields,
+                                        List<FieldInfo> newFields,
+                                        boolean isOldSecKeyField,
+                                        List<FieldInfo> otherNewFields,
+                                        ComplexFormat newFormat,
+                                        Evolver evolver) {
+        Mutations mutations = evolver.getMutations();
+        boolean evolveFailure = false;
+        boolean localEvolveNeeded = false;
+        boolean readerNeeded = false;
+        List<FieldReader> fieldReaders = new ArrayList<FieldReader>();
+        FieldReader currentReader = null;
+        int newFieldsMatched = 0;
+
+        /*
+         * Add FieldReaders to the list in old field storage order, since that
+         * is the order in which field values must be read.
+         */
+        fieldLoop:
+        for (int oldFieldIndex = 0;
+             oldFieldIndex < oldFields.size();
+             oldFieldIndex += 1) {
+
+            FieldInfo oldField = oldFields.get(oldFieldIndex);
+            String oldName = oldField.getName();
+            SecondaryKeyMetadata oldMeta = null;
+            if (isOldSecKeyField) {
+                oldMeta = clsMeta.getSecondaryKeys().get(oldName);
+                assert oldMeta != null;
+            }
+
+            /* Get field mutations. */
+            Renamer renamer = mutations.getRenamer
+                (getClassName(), getVersion(), oldName);
+            Deleter deleter = mutations.getDeleter
+                (getClassName(), getVersion(), oldName);
+            Converter converter = mutations.getConverter
+                (getClassName(), getVersion(), oldName);
+            if (deleter != null && (converter != null || renamer != null)) {
+                evolver.addInvalidMutation
+                    (this, newFormat, deleter,
+                     "Field Deleter is not allowed along with a Renamer or " +
+                     "Converter for the same field: " + oldName);
+                evolveFailure = true;
+                continue fieldLoop;
+            }
+
+            /*
+             * Match old and new field by name, taking into account the Renamer
+             * mutation.  If the @SecondaryKey annotation was added or removed,
+             * the field will have moved from one of the two field lists to the
+             * other.
+             */
+            String newName = (renamer != null) ?
+                renamer.getNewName() : oldName;
+            if (!oldName.equals(newName)) {
+                if (newToOldFieldMap == null) {
+                    newToOldFieldMap = new HashMap<String,String>();
+                }
+                newToOldFieldMap.put(newName, oldName);
+            }
+            int newFieldIndex = FieldInfo.getFieldIndex(newFields, newName);
+            FieldInfo newField = null;
+            boolean isNewSecKeyField = isOldSecKeyField;
+            if (newFieldIndex >= 0) {
+                newField = newFields.get(newFieldIndex);
+            } else {
+                newFieldIndex = FieldInfo.getFieldIndex
+                    (otherNewFields, newName);
+                if (newFieldIndex >= 0) {
+                    newField = otherNewFields.get(newFieldIndex);
+                    isNewSecKeyField = !isOldSecKeyField;
+                }
+                localEvolveNeeded = true;
+                readerNeeded = true;
+            }
+
+            /* Apply field Deleter and continue. */
+            if (deleter != null) {
+                if (newField != null) {
+                    evolver.addInvalidMutation
+                        (this, newFormat, deleter,
+                         "Field Deleter is not allowed when the persistent " +
+                         "field is still present: " + oldName);
+                    evolveFailure = true;
+                }
+                /* A SkipFieldReader can read multiple sequential fields. */
+                if (currentReader instanceof SkipFieldReader &&
+                    currentReader.acceptField
+                        (oldFieldIndex, newFieldIndex, isNewSecKeyField)) {
+                    currentReader.addField(oldField);
+                } else {
+                    currentReader = new SkipFieldReader
+                        (oldFieldIndex, oldField);
+                    fieldReaders.add(currentReader);
+                    readerNeeded = true;
+                    localEvolveNeeded = true;
+                }
+                if (isOldSecKeyField) {
+                    if (oldToNewKeyMap == null) {
+                        oldToNewKeyMap = new HashMap<String,String>();
+                    }
+                    oldToNewKeyMap.put(oldMeta.getKeyName(), null);
+                }
+                continue fieldLoop;
+            } else {
+                if (newField == null) {
+                    evolver.addMissingMutation
+                        (this, newFormat,
+                         "Field is not present or not persistent: " +
+                         oldName);
+                    evolveFailure = true;
+                    continue fieldLoop;
+                }
+            }
+
+            /*
+             * The old field corresponds to a known new field, and no Deleter
+             * mutation applies.
+             */
+            newFieldsMatched += 1;
+
+            /* Get and process secondary key metadata changes. */
+            SecondaryKeyMetadata newMeta = null;
+            if (isOldSecKeyField && isNewSecKeyField) {
+                newMeta = newFormat.clsMeta.getSecondaryKeys().get(newName);
+                assert newMeta != null;
+
+                /* Validate metadata changes. */
+                if (!checkSecKeyMetadata
+                        (newFormat, oldMeta, newMeta, evolver)) {
+                    evolveFailure = true;
+                    continue fieldLoop;
+                }
+
+                /*
+                 * Check for a renamed key and save the old-to-new mapping for
+                 * use in renaming the secondary database and for key
+                 * extraction.
+                 */
+                String oldKeyName = oldMeta.getKeyName();
+                String newKeyName = newMeta.getKeyName();
+                if (!oldKeyName.equals(newKeyName)) {
+                    if (oldToNewKeyMap == null) {
+                        oldToNewKeyMap = new HashMap<String,String>();
+                    }
+                    oldToNewKeyMap.put(oldName, newName);
+                    localEvolveNeeded = true;
+                }
+            } else if (isOldSecKeyField && !isNewSecKeyField) {
+                if (oldToNewKeyMap == null) {
+                    oldToNewKeyMap = new HashMap<String,String>();
+                }
+                oldToNewKeyMap.put(oldMeta.getKeyName(), null);
+            }
+
+            /* Apply field Converter and continue. */
+            if (converter != null) {
+                if (isOldSecKeyField) {
+                    evolver.addInvalidMutation
+                        (this, newFormat, converter,
+                         "Field Converter is not allowed for secondary key " +
+                         "fields: " + oldName);
+                    evolveFailure = true;
+                } else {
+                    currentReader = new ConvertFieldReader
+                        (converter, oldFieldIndex, newFieldIndex,
+                         isNewSecKeyField);
+                    fieldReaders.add(currentReader);
+                    readerNeeded = true;
+                    localEvolveNeeded = true;
+                }
+                continue fieldLoop;
+            }
+
+            /*
+             * Evolve the declared version of the field format and all versions
+             * more recent, and the formats for all of their subclasses.  While
+             * we're at it, check to see if all possible classes are converted.
+             */
+            boolean allClassesConverted = true;
+            Format oldFieldFormat = oldField.getType();
+            for (Format formatVersion = oldFieldFormat.getLatestVersion();
+                 true;
+                 formatVersion = formatVersion.getPreviousVersion()) {
+                assert formatVersion != null;
+                if (!evolver.evolveFormat(formatVersion)) {
+                    evolveFailure = true;
+                    continue fieldLoop;
+                }
+                if (!formatVersion.isNew() &&
+                    !evolver.isClassConverted(formatVersion)) {
+                    allClassesConverted = false;
+                }
+                Set<Format> subclassFormats =
+                    evolver.getSubclassFormats(formatVersion);
+                if (subclassFormats != null) {
+                    for (Format format2 : subclassFormats) {
+                        if (!evolver.evolveFormat(format2)) {
+                            evolveFailure = true;
+                            continue fieldLoop;
+                        }
+                        if (!format2.isNew() &&
+                            !evolver.isClassConverted(format2)) {
+                            allClassesConverted = false;
+                        }
+                    }
+                }
+                if (formatVersion == oldFieldFormat) {
+                    break;
+                }
+            }
+
+            /*
+             * Check for compatible field types and apply a field widener if
+             * needed.  If no widener is needed, fall through and apply a
+             * PlainFieldReader.
+             */
+            Format oldLatestFormat = oldFieldFormat.getLatestVersion();
+            Format newFieldFormat = newField.getType();
+            if (oldLatestFormat.getClassName().equals
+                    (newFieldFormat.getClassName()) &&
+                !oldLatestFormat.isDeleted()) {
+                /* Formats are identical.  Fall through. */
+            } else if (allClassesConverted) {
+                /* All old classes will be converted.  Fall through. */
+                localEvolveNeeded = true;
+            } else if (WidenerInput.isWideningSupported
+                        (oldLatestFormat, newFieldFormat, isOldSecKeyField)) {
+                /* Apply field widener and continue. */
+                currentReader = new WidenFieldReader
+                    (oldLatestFormat, newFieldFormat, newFieldIndex,
+                     isNewSecKeyField);
+                fieldReaders.add(currentReader);
+                readerNeeded = true;
+                localEvolveNeeded = true;
+                continue fieldLoop;
+            } else {
+                boolean refWidened = false;
+                if (!newFieldFormat.isPrimitive() &&
+                    !oldLatestFormat.isPrimitive() &&
+                    !oldLatestFormat.isDeleted() &&
+                    !evolver.isClassConverted(oldLatestFormat)) {
+                    Class oldCls = oldLatestFormat.getExistingType();
+                    Class newCls = newFieldFormat.getExistingType();
+                    if (newCls.isAssignableFrom(oldCls)) {
+                        refWidened = true;
+                    }
+                }
+                if (refWidened) {
+                    /* A reference type has been widened.  Fall through. */
+                    localEvolveNeeded = true;
+                } else {
+                    /* Types are not compatible. */
+                    evolver.addMissingMutation
+                        (this, newFormat,
+                         "Old field type: " + oldLatestFormat.getClassName() +
+                         " is not compatible with the new type: " +
+                         newFieldFormat.getClassName() +
+                         " for field: " + oldName);
+                    evolveFailure = true;
+                    continue fieldLoop;
+                }
+            }
+
+            /*
+             * Old to new field conversion is not needed or is automatic.  Read
+             * fields as if no evolution is needed.  A PlainFieldReader can
+             * read multiple sequential fields.
+             */
+            if (currentReader instanceof PlainFieldReader &&
+                currentReader.acceptField
+                    (oldFieldIndex, newFieldIndex, isNewSecKeyField)) {
+                currentReader.addField(oldField);
+            } else {
+                currentReader = new PlainFieldReader
+                    (oldFieldIndex, newFieldIndex, isNewSecKeyField);
+                fieldReaders.add(currentReader);
+            }
+        }
+
+        /*
+         * If there are new fields, then the old fields must be read using a
+         * reader, even if the old field list is empty.  Using the accessor
+         * directly will read fields in the wrong order and will read fields
+         * that were moved between lists (when adding and dropping
+         * @SecondaryKey).  [#15524]
+         */
+        if (newFieldsMatched < newFields.size()) {
+            localEvolveNeeded = true;
+            readerNeeded = true;
+        }
+
+        if (evolveFailure) {
+            return FieldReader.EVOLVE_FAILURE;
+        } else if (readerNeeded) {
+            if (fieldReaders.size() == 0) {
+                return getDoNothingFieldReader();
+            } else if (fieldReaders.size() == 1) {
+                return fieldReaders.get(0);
+            } else {
+                return new MultiFieldReader(fieldReaders);
+            }
+        } else if (localEvolveNeeded) {
+            return FieldReader.EVOLVE_NEEDED;
+        } else {
+            return null;
+        }
+    }
+
+    /**
+     * Base class for all FieldReader subclasses.  A FieldReader reads one or
+     * more fields in the old format data, and may call the new format Accessor
+     * to set the field values.
+     */
+    private static abstract class FieldReader implements Serializable {
+
+        static final FieldReader EVOLVE_NEEDED =
+            new PlainFieldReader(0, 0, false);
+        static final FieldReader EVOLVE_FAILURE =
+            new PlainFieldReader(0, 0, false);
+
+        private static final long serialVersionUID = 866041475399255164L;
+
+        FieldReader() {
+        }
+
+        void initialize(Catalog catalog,
+                        int initVersion,
+                        ComplexFormat oldParentFormat,
+                        ComplexFormat newParentFormat,
+                        boolean isOldSecKey) {
+        }
+
+        boolean acceptField(int oldFieldIndex,
+                            int newFieldIndex,
+                            boolean isNewSecKeyField) {
+            return false;
+        }
+
+        void addField(FieldInfo oldField) {
+            throw new UnsupportedOperationException();
+        }
+
+        abstract void readFields(Object o,
+                                 EntityInput input,
+                                 Accessor accessor,
+                                 int superLevel);
+    }
+
+    /**
+     * Reads a continguous block of fields that have the same format in the old
+     * and new formats.
+     */
+    private static class PlainFieldReader extends FieldReader {
+
+        private static final long serialVersionUID = 1795593463439931402L;
+
+        private int startField;
+        private int endField;
+        private boolean secKeyField;
+        private transient int endOldField;
+
+        PlainFieldReader(int oldFieldIndex,
+                         int newFieldIndex,
+                         boolean isNewSecKeyField) {
+            endOldField = oldFieldIndex;
+            startField = newFieldIndex;
+            endField = newFieldIndex;
+            secKeyField = isNewSecKeyField;
+        }
+
+        @Override
+        boolean acceptField(int oldFieldIndex,
+                            int newFieldIndex,
+                            boolean isNewSecKeyField) {
+            return oldFieldIndex == endOldField + 1 &&
+                   newFieldIndex == endField + 1 &&
+                   secKeyField == isNewSecKeyField;
+        }
+
+        @Override
+        void addField(FieldInfo oldField) {
+            endField += 1;
+            endOldField += 1;
+        }
+
+        @Override
+        final void readFields(Object o,
+                              EntityInput input,
+                              Accessor accessor,
+                              int superLevel) {
+            if (secKeyField) {
+                accessor.readSecKeyFields
+                    (o, input, startField, endField, superLevel);
+            } else {
+                accessor.readNonKeyFields
+                    (o, input, startField, endField, superLevel);
+            }
+        }
+    }
+
+    /**
+     * Skips a continguous block of fields that exist in the old format but not
+     * in the new format.
+     */
+    private static class SkipFieldReader extends FieldReader {
+
+        private static final long serialVersionUID = -3060281692155253098L;
+
+        private List<Format> fieldFormats;
+        private transient int endField;
+
+        SkipFieldReader(int startField, List<FieldInfo> fields) {
+            endField = startField + fields.size() - 1;
+            fieldFormats = new ArrayList<Format>(fields.size());
+            for (FieldInfo field : fields) {
+                fieldFormats.add(field.getType());
+            }
+        }
+
+        SkipFieldReader(int startField, FieldInfo oldField) {
+            endField = startField;
+            fieldFormats = new ArrayList<Format>();
+            fieldFormats.add(oldField.getType());
+        }
+
+        @Override
+        boolean acceptField(int oldFieldIndex,
+                            int newFieldIndex,
+                            boolean isNewSecKeyField) {
+            return oldFieldIndex == endField + 1;
+        }
+
+        @Override
+        void addField(FieldInfo oldField) {
+            endField += 1;
+            fieldFormats.add(oldField.getType());
+        }
+
+        @Override
+        final void readFields(Object o,
+                              EntityInput input,
+                              Accessor accessor,
+                              int superLevel) {
+            for (Format format : fieldFormats) {
+                input.skipField(format);
+            }
+        }
+    }
+
+    /**
+     * Converts a single field using a field Converter.
+     */
+    private static class ConvertFieldReader extends FieldReader {
+
+        private static final long serialVersionUID = 8736410481633998710L;
+
+        private Converter converter;
+        private int oldFieldNum;
+        private int fieldNum;
+        private boolean secKeyField;
+        private transient Format oldFormat;
+        private transient Format newFormat;
+
+        ConvertFieldReader(Converter converter,
+                           int oldFieldIndex,
+                           int newFieldIndex,
+                           boolean isNewSecKeyField) {
+            this.converter = converter;
+            oldFieldNum = oldFieldIndex;
+            fieldNum = newFieldIndex;
+            secKeyField = isNewSecKeyField;
+        }
+
+        @Override
+        void initialize(Catalog catalog,
+                        int initVersion,
+                        ComplexFormat oldParentFormat,
+                        ComplexFormat newParentFormat,
+                        boolean isOldSecKey) {
+            
+            /*
+             * The oldFieldNum field was added as part of a bug fix.  If not
+             * present in this version of the catalog, we assume it is equal to
+             * the new field index.  The code prior to the bug fix assumes the
+             * old and new fields have the same index. [#15797]
+             */
+            if (initVersion < 1) {
+                oldFieldNum = fieldNum;
+            }
+
+            if (isOldSecKey) {
+                oldFormat =
+                    oldParentFormat.secKeyFields.get(oldFieldNum).getType();
+            } else {
+                oldFormat =
+                    oldParentFormat.nonKeyFields.get(oldFieldNum).getType();
+            }
+            if (secKeyField) {
+                newFormat =
+                    newParentFormat.secKeyFields.get(fieldNum).getType();
+            } else {
+                newFormat =
+                    newParentFormat.nonKeyFields.get(fieldNum).getType();
+            }
+        }
+
+        @Override
+        final void readFields(Object o,
+                              EntityInput input,
+                              Accessor accessor,
+                              int superLevel) {
+
+            /* Create and read the old format instance in raw mode. */
+            boolean currentRawMode = input.setRawAccess(true);
+            Object value;
+            try {
+                if (oldFormat.isPrimitive()) {
+                    value = input.readKeyObject(oldFormat);
+                } else {
+                    value = input.readObject();
+                }
+            } finally {
+                input.setRawAccess(currentRawMode);
+            }
+
+            /* Convert the raw instance to the current format. */
+            Catalog catalog = input.getCatalog();
+            value = converter.getConversion().convert(value);
+
+            /* Use a RawSingleInput to convert and type-check the value. */
+            EntityInput rawInput = new RawSingleInput
+                (catalog, currentRawMode, null, value, newFormat);
+
+            if (secKeyField) {
+                accessor.readSecKeyFields
+                    (o, rawInput, fieldNum, fieldNum, superLevel);
+            } else {
+                accessor.readNonKeyFields
+                    (o, rawInput, fieldNum, fieldNum, superLevel);
+            }
+        }
+    }
+
+    /**
+     * Widens a single field using a field Converter.
+     */
+    private static class WidenFieldReader extends FieldReader {
+
+        private static final long serialVersionUID = -2054520670170407282L;
+
+        private int fromFormatId;
+        private int toFormatId;
+        private int fieldNum;
+        private boolean secKeyField;
+
+        WidenFieldReader(Format oldFormat,
+                         Format newFormat,
+                         int newFieldIndex,
+                         boolean isNewSecKeyField) {
+            fromFormatId = oldFormat.getId();
+            toFormatId = newFormat.getId();
+            fieldNum = newFieldIndex;
+            secKeyField = isNewSecKeyField;
+        }
+
+        @Override
+        final void readFields(Object o,
+                              EntityInput input,
+                              Accessor accessor,
+                              int superLevel) {
+
+            /* The Accessor reads the field value from a WidenerInput. */
+            EntityInput widenerInput = new WidenerInput
+                (input, fromFormatId, toFormatId);
+
+            if (secKeyField) {
+                accessor.readSecKeyFields
+                    (o, widenerInput, fieldNum, fieldNum, superLevel);
+            } else {
+                accessor.readNonKeyFields
+                    (o, widenerInput, fieldNum, fieldNum, superLevel);
+            }
+        }
+    }
+
+    /**
+     * A FieldReader composed of other FieldReaders, and that calls them in
+     * sequence.  Used when more than one FieldReader is needed for a list of
+     * fields.
+     */
+    private static class MultiFieldReader extends FieldReader {
+
+        private static final long serialVersionUID = -6035976787562441473L;
+
+        private List<FieldReader> subReaders;
+
+        MultiFieldReader(List<FieldReader> subReaders) {
+            this.subReaders = subReaders;
+        }
+
+        @Override
+        void initialize(Catalog catalog,
+                        int initVersion,
+                        ComplexFormat oldParentFormat,
+                        ComplexFormat newParentFormat,
+                        boolean isOldSecKey) {
+            for (FieldReader reader : subReaders) {
+                reader.initialize
+                    (catalog, initVersion, oldParentFormat, newParentFormat,
+                     isOldSecKey);
+            }
+        }
+
+        @Override
+        final void readFields(Object o,
+                              EntityInput input,
+                              Accessor accessor,
+                              int superLevel) {
+            for (FieldReader reader : subReaders) {
+                reader.readFields(o, input, accessor, superLevel);
+            }
+        }
+    }
+
+    /**
+     * The Reader for evolving ComplexFormat instances.  Reads the old format
+     * data one class (one level in the class hierarchy) at a time.  If an
+     * Accessor is used at a given level, the Accessor is used for the
+     * corresponding level in the new class hierarchy (classes may be
+     * inserted/deleted during evolution).  At each level, a FieldReader is
+     * called to evolve the secondary key and non-key lists of fields.
+     */
+    private static class EvolveReader implements Reader {
+
+        static final int DO_NOT_READ_ACCESSOR = Integer.MAX_VALUE;
+
+        private static final long serialVersionUID = -1016140948306913283L;
+
+        private transient ComplexFormat newFormat;
+
+        /**
+         * oldHierarchy contains the formats of the old class hierarchy in most
+         * to least derived class order.
+         */
+        private transient ComplexFormat[] oldHierarchy;
+
+        /**
+         * newHierarchyLevels contains the corresponding level in the new
+         * hierarchy for each format in oldHierarchy. newHierarchyLevels is
+         * indexed by the oldHierarchy index.
+         */
+        private int[] newHierarchyLevels;
+
+        EvolveReader(List<Integer> newHierarchyLevelsList) {
+            int oldDepth = newHierarchyLevelsList.size();
+            newHierarchyLevels = new int[oldDepth];
+            newHierarchyLevelsList.toArray();
+            for (int i = 0; i < oldDepth; i += 1) {
+                newHierarchyLevels[i] = newHierarchyLevelsList.get(i);
+            }
+        }
+
+        public void initializeReader(Catalog catalog,
+                                     EntityModel model,
+                                     int initVersion,
+                                     Format oldFormatParam) {
+
+            ComplexFormat oldFormat = (ComplexFormat) oldFormatParam;
+            newFormat = oldFormat.getComplexLatest();
+            newFormat.initializeIfNeeded(catalog, model);
+
+            /* Create newHierarchy array. */
+            int newDepth = 0;
+            for (Format format = newFormat;
+                 format != null;
+                 format = format.getSuperFormat()) {
+                newDepth += 1;
+            }
+            ComplexFormat[] newHierarchy = new ComplexFormat[newDepth];
+            int level = 0;
+            for (ComplexFormat format = newFormat;
+                 format != null;
+                 format = format.getComplexSuper()) {
+                newHierarchy[level] = format;
+                level += 1;
+            }
+            assert level == newDepth;
+
+            /* Create oldHierarchy array and initialize FieldReaders. */
+            int oldDepth = newHierarchyLevels.length;
+            oldHierarchy = new ComplexFormat[oldDepth];
+            level = 0;
+            for (ComplexFormat oldFormat2 = oldFormat;
+                 oldFormat2 != null;
+                 oldFormat2 = oldFormat2.getComplexSuper()) {
+                oldHierarchy[level] = oldFormat2;
+                int level2 = newHierarchyLevels[level];
+                ComplexFormat newFormat2 = (level2 != DO_NOT_READ_ACCESSOR) ?
+                    newHierarchy[level2] : null;
+                level += 1;
+                if (oldFormat2.secKeyFieldReader != null) {
+                    oldFormat2.secKeyFieldReader.initialize
+                        (catalog, initVersion, oldFormat2, newFormat2, true);
+                }
+                if (oldFormat2.nonKeyFieldReader != null) {
+                    oldFormat2.nonKeyFieldReader.initialize
+                        (catalog, initVersion, oldFormat2, newFormat2, false);
+                }
+            }
+            assert level == oldDepth;
+        }
+
+        public Object newInstance(EntityInput input, boolean rawAccess) {
+            return newFormat.newInstance(input, rawAccess);
+        }
+
+        public void readPriKey(Object o,
+                               EntityInput input,
+                               boolean rawAccess) {
+            /* No conversion necessary for primary keys. */
+            newFormat.readPriKey(o, input, rawAccess);
+        }
+
+        public Object readObject(Object o,
+                                 EntityInput input,
+                                 boolean rawAccess) {
+
+            /* Use the Accessor for the new format. */
+            Accessor accessor = rawAccess ? newFormat.rawAccessor
+                                          : newFormat.objAccessor;
+
+            /* Read old format fields from the top-most class downward. */
+            int maxMinusOne = oldHierarchy.length - 1;
+
+            /* Read secondary key fields with the adjusted superclass level. */
+            for (int i = maxMinusOne; i >= 0; i -= 1) {
+                FieldReader reader = oldHierarchy[i].secKeyFieldReader;
+                int newLevel = newHierarchyLevels[i];
+                if (reader != null) {
+                    reader.readFields(o, input, accessor, newLevel);
+                } else if (newLevel != DO_NOT_READ_ACCESSOR) {
+                    accessor.readSecKeyFields
+                        (o, input, 0, Accessor.MAX_FIELD_NUM, newLevel);
+                }
+            }
+
+            /* Read non-key fields with the adjusted superclass level. */
+            for (int i = maxMinusOne; i >= 0; i -= 1) {
+                FieldReader reader = oldHierarchy[i].nonKeyFieldReader;
+                int newLevel = newHierarchyLevels[i];
+                if (reader != null) {
+                    reader.readFields(o, input, accessor, newLevel);
+                } else if (newLevel != DO_NOT_READ_ACCESSOR) {
+                    accessor.readNonKeyFields
+                        (o, input, 0, Accessor.MAX_FIELD_NUM, newLevel);
+                }
+            }
+            return o;
+        }
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/CompositeKeyFormat.java b/src/com/sleepycat/persist/impl/CompositeKeyFormat.java
new file mode 100644
index 0000000000000000000000000000000000000000..fcf839d3f77ef478c3d54f138fab6ee6fa97a3fe
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/CompositeKeyFormat.java
@@ -0,0 +1,332 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: CompositeKeyFormat.java,v 1.27.2.3 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.IdentityHashMap;
+import java.util.List;
+import java.util.Map;
+
+import com.sleepycat.persist.model.ClassMetadata;
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.model.FieldMetadata;
+import com.sleepycat.persist.raw.RawField;
+import com.sleepycat.persist.raw.RawObject;
+
+/**
+ * Format for a composite key class.
+ *
+ * This class is similar to ComplexFormat in that a composite key class and
+ * other complex classes have fields, and the Accessor interface is used to
+ * access those fields.  Composite key classes are different in the following
+ * ways:
+ *
+ * - The superclass must be Object.  No inheritance is allowed.
+ *
+ * - All instance fields must be annotated with @KeyField, which determines
+ *   their order in the data bytes.
+ *
+ * - Although fields may be reference types (primitive wrappers or other simple
+ *   reference types), they are stored as if they were primitives.  No object
+ *   format ID is stored, and the class of the object must be the declared
+ *   classs of the field; i.e., no polymorphism is allowed for key fields.
+ *   In other words, a composite key is stored as an ordinary tuple as defined
+ *   in the com.sleepycat.bind.tuple package.  This keeps the key small and
+ *   gives it a well defined sort order.
+ *
+ * - If the key class implements Comparable, it is called by the Database
+ *   btree comparator.  It must therefore be available during JE recovery,
+ *   before the store and catalog have been opened.  To support this, this
+ *   format can be constructed during recovery.  A SimpleCatalog singleton
+ *   instance is used to provide a catalog of simple types that is used by
+ *   the composite key format.
+ *
+ * - When interacting with the Accessor, the composite key format treats the
+ *   Accessor's non-key fields as its key fields.  The Accessor's key fields
+ *   are secondary keys, while the composite format's key fields are the
+ *   component parts of a single key.
+ *
+ * @author Mark Hayes
+ */
+public class CompositeKeyFormat extends Format {
+
+    private static final long serialVersionUID = 306843428409314630L;
+
+    private ClassMetadata metadata;
+    private List<FieldInfo> fields;
+    private transient Accessor objAccessor;
+    private transient Accessor rawAccessor;
+    private transient volatile Map<String,RawField> rawFields;
+    private transient volatile FieldInfo[] rawInputFields;
+
+    static String[] getFieldNameArray(List<FieldMetadata> list) {
+        int index = 0;
+        String[] a = new String[list.size()];
+        for (FieldMetadata f : list) {
+            a[index] = f.getName();
+            index += 1;
+        }
+        return a;
+    }
+
+    CompositeKeyFormat(Class cls,
+                       ClassMetadata metadata,
+                       List<FieldMetadata> fieldNames) {
+        this(cls, metadata, getFieldNameArray(fieldNames));
+    }
+
+    CompositeKeyFormat(Class cls,
+                       ClassMetadata metadata,
+                       String[] fieldNames) {
+        super(cls);
+        this.metadata = metadata;
+
+        /* Check that the superclass is Object. */
+        Class superCls = cls.getSuperclass();
+        if (superCls != Object.class) {
+            throw new IllegalArgumentException
+                ("Composite key class must be derived from Object: " +
+                 cls.getName());
+        }
+
+        /* Populate fields list in fieldNames order. */
+        List<FieldInfo> instanceFields =
+            FieldInfo.getInstanceFields(cls, metadata);
+        fields = new ArrayList<FieldInfo>(instanceFields.size());
+        for (String fieldName : fieldNames) {
+            FieldInfo field = null;
+            for (FieldInfo tryField : instanceFields) {
+                if (fieldName.equals(tryField.getName())) {
+                    field = tryField;
+                    break;
+                }
+            }
+            if (field == null) {
+                throw new IllegalArgumentException
+                    ("Composite key field is not an instance field:" +
+                     getClassName() + '.' + fieldName);
+            }
+            fields.add(field);
+            instanceFields.remove(field);
+            if (!SimpleCatalog.isSimpleType(field.getFieldClass())) {
+                throw new IllegalArgumentException
+                    ("Composite key field is not a simple type: " +
+                     getClassName() + '.' + fieldName);
+            }
+        }
+        if (instanceFields.size() > 0) {
+            throw new IllegalArgumentException
+                ("All composite key instance fields must be key fields: " +
+                 getClassName() + '.' + instanceFields.get(0).getName());
+        }
+    }
+
+    @Override
+    void migrateFromBeta(Map<String,Format> formatMap) {
+        super.migrateFromBeta(formatMap);
+        for (FieldInfo field : fields) {
+            field.migrateFromBeta(formatMap);
+        }
+    }
+
+    @Override
+    boolean isModelClass() {
+        return true;
+    }
+
+    @Override
+    public ClassMetadata getClassMetadata() {
+        if (metadata == null) {
+            throw new IllegalStateException(getClassName());
+        }
+        return metadata;
+    }
+
+    @Override
+    public Map<String,RawField> getFields() {
+
+        /*
+         * Lazily create the raw type information.  Synchronization is not
+         * required since this object is immutable.  If by chance we create two
+         * maps when two threads execute this block, no harm is done.  But be
+         * sure to assign the rawFields field only after the map is fully
+         * populated.
+         */
+        if (rawFields == null) {
+            Map<String,RawField> map = new HashMap<String,RawField>();
+            for (RawField field : fields) {
+                map.put(field.getName(), field);
+            }
+            rawFields = map;
+        }
+        return rawFields;
+    }
+
+    @Override
+    void collectRelatedFormats(Catalog catalog,
+                               Map<String,Format> newFormats) {
+        /* Collect field formats. */
+        for (FieldInfo field : fields) {
+            field.collectRelatedFormats(catalog, newFormats);
+        }
+    }
+
+    @Override
+    void initialize(Catalog catalog, EntityModel model, int initVersion) {
+        /* Initialize all fields. */
+        for (FieldInfo field : fields) {
+            field.initialize(catalog, model, initVersion);
+        }
+        /* Create the accessor. */
+        Class type = getType();
+        if (type != null) {
+            if (EnhancedAccessor.isEnhanced(type)) {
+                objAccessor = new EnhancedAccessor(type);
+            } else {
+                objAccessor = new ReflectionAccessor(catalog, type, fields);
+            }
+        }
+        rawAccessor = new RawAccessor(this, fields);
+    }
+
+    @Override
+    Object newArray(int len) {
+        return objAccessor.newArray(len);
+    }
+
+    @Override
+    public Object newInstance(EntityInput input, boolean rawAccess) {
+        Accessor accessor = rawAccess ? rawAccessor : objAccessor;
+        return accessor.newInstance();
+    }
+
+    @Override
+    public Object readObject(Object o, EntityInput input, boolean rawAccess) {
+        Accessor accessor = rawAccess ? rawAccessor : objAccessor;
+        accessor.readNonKeyFields(o, input, 0, Accessor.MAX_FIELD_NUM, -1);
+        return o;
+    }
+
+    @Override
+    void writeObject(Object o, EntityOutput output, boolean rawAccess) {
+        Accessor accessor = rawAccess ? rawAccessor : objAccessor;
+        accessor.writeNonKeyFields(o, output);
+    }
+
+    @Override
+    Object convertRawObject(Catalog catalog,
+                            boolean rawAccess,
+                            RawObject rawObject,
+                            IdentityHashMap converted) {
+
+        /*
+         * Synchronization is not required since rawInputFields is immutable.
+         * If by chance we create duplicate values when two threads execute
+         * this block, no harm is done.  But be sure to assign the field only
+         * after the values are fully populated.
+         */
+        FieldInfo[] myFields = rawInputFields;
+        if (myFields == null) {
+            myFields = new FieldInfo[fields.size()];
+            fields.toArray(myFields);
+            rawInputFields = myFields;
+        }
+        if (rawObject.getSuper() != null) {
+            throw new IllegalArgumentException
+                ("RawObject has too many superclasses: " +
+                 rawObject.getType().getClassName());
+        }
+        RawObject[] objects = new RawObject[myFields.length];
+        Arrays.fill(objects, rawObject);
+        EntityInput in = new RawComplexInput
+            (catalog, rawAccess, converted, myFields, objects);
+        Object o = newInstance(in, rawAccess);
+        converted.put(rawObject, o);
+        return readObject(o, in, rawAccess);
+    }
+
+    @Override
+    void skipContents(RecordInput input) {
+        int maxNum = fields.size();
+        for (int i = 0; i < maxNum; i += 1) {
+            fields.get(i).getType().skipContents(input);
+        }
+    }
+
+    @Override
+    void copySecKey(RecordInput input, RecordOutput output) {
+        int maxNum = fields.size();
+        for (int i = 0; i < maxNum; i += 1) {
+            fields.get(i).getType().copySecKey(input, output);
+        }
+    }
+
+    @Override
+    Format getSequenceKeyFormat() {
+        if (fields.size() != 1) {
+            throw new IllegalArgumentException
+                ("A composite key class used with a sequence may contain " +
+                 "only a single integer key field: " + getClassName());
+        }
+        return fields.get(0).getType().getSequenceKeyFormat();
+    }
+
+    @Override
+    boolean evolve(Format newFormatParam, Evolver evolver) {
+
+        /* Disallow evolution to a non-composite format. */
+        if (!(newFormatParam instanceof CompositeKeyFormat)) {
+            evolver.addEvolveError
+                (this, newFormatParam, null,
+                 "A composite key class may not be changed to a different " +
+                 "type");
+            return false;
+        }
+        CompositeKeyFormat newFormat = (CompositeKeyFormat) newFormatParam;
+
+        /* Check for added or removed key fields. */
+        if (fields.size() != newFormat.fields.size()) {
+            evolver.addEvolveError
+                (this, newFormat,
+                 "Composite key class fields were added or removed ",
+                 "Old fields: " + fields +
+                 " new fields: " + newFormat.fields);
+            return false;
+        }
+
+        /* Check for modified key fields. */
+        boolean newVersion = false;
+        for (int i = 0; i < fields.size(); i += 1) {
+            int result = evolver.evolveRequiredKeyField
+                (this, newFormat, fields.get(i),
+                 newFormat.fields.get(i));
+            if (result == Evolver.EVOLVE_FAILURE) {
+                return false;
+            }
+            if (result == Evolver.EVOLVE_NEEDED) {
+                newVersion = true;
+            }
+        }
+
+        /*
+         * We never need to use a custom reader because the physical key field
+         * formats never change.  But we do create a new evolved format when
+         * a type changes (primitive <-> primitive wrapper) so that the new
+         * type information is correct.
+         */
+        if (newVersion) {
+            evolver.useEvolvedFormat(this, newFormat, newFormat);
+        } else {
+            evolver.useOldFormat(this, newFormat);
+        }
+        return true;
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/ConverterReader.java b/src/com/sleepycat/persist/impl/ConverterReader.java
new file mode 100644
index 0000000000000000000000000000000000000000..36aee1c776335ff3a4210764707c643082a278c7
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/ConverterReader.java
@@ -0,0 +1,66 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ConverterReader.java,v 1.10.2.3 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.evolve.Converter;
+import com.sleepycat.persist.raw.RawObject;
+
+/**
+ * Reader for invoking a class Converter mutation.
+ *
+ * @author Mark Hayes
+ */
+public class ConverterReader implements Reader {
+
+    private static final long serialVersionUID = -305788321064984348L;
+
+    private Converter converter;
+    private transient Format oldFormat;
+
+    ConverterReader(Converter converter) {
+        this.converter = converter;
+    }
+
+    public void initializeReader(Catalog catalog,
+                                 EntityModel model,
+                                 int initVersion,
+                                 Format oldFormat) {
+        this.oldFormat = oldFormat;
+    }
+
+    public Object newInstance(EntityInput input, boolean rawAccess) {
+        /* Create the old format RawObject. */
+        return oldFormat.newInstance(input, true);
+    }
+
+    public void readPriKey(Object o, EntityInput input, boolean rawAccess) {
+        /* Read the old format RawObject's primary key. */
+        oldFormat.readPriKey(o, input, true);
+    }
+
+    public Object readObject(Object o, EntityInput input, boolean rawAccess) {
+        Catalog catalog = input.getCatalog();
+
+        /* Read the old format RawObject and convert it. */
+        boolean currentRawMode = input.setRawAccess(true);
+        try {
+            o = oldFormat.readObject(o, input, true);
+        } finally {
+            input.setRawAccess(currentRawMode);
+        }
+        o = converter.getConversion().convert(o);
+
+        /* Convert the current format RawObject to a live Object. */
+        if (!rawAccess && o instanceof RawObject) {
+            o = catalog.convertRawObject((RawObject) o, null);
+        }
+        return o;
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/Enhanced.java b/src/com/sleepycat/persist/impl/Enhanced.java
new file mode 100644
index 0000000000000000000000000000000000000000..ca3142824a125106d4adbe2c9d328375ce5a1b00
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/Enhanced.java
@@ -0,0 +1,147 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Enhanced.java,v 1.12.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+/**
+ * Interface implemented by a persistent class via bytecode enhancement.
+ *
+ * <p>See {@link Accessor} for method documentation.  {@link EnhancedAccessor}
+ * implements Accessor and forwards all calls to methods in the Enhanced
+ * class.</p>
+ *
+ * <p>Each class that implements this interface (including its subclasses and
+ * superclasses except for Object) must also implement a static block that
+ * registers a prototype instance by calling
+ * EnhancedAccessor.registerPrototype.  Other instances are created from the
+ * protype instance using {@link #bdbNewInstance}.</p>
+ *
+ * <pre>static { EnhancedAccessor.registerPrototype(new Xxx()); }</pre>
+ *
+ * <p>An example of the generated code for reading and writing fields is shown
+ * below.</p>
+ *
+ * <pre>
+ *  private int f1;
+ *  private String f2;
+ *  private MyClass f3;
+ *
+ *  public void bdbWriteNonKeyFields(EntityOutput output) {
+ *
+ *      super.bdbWriteNonKeyFields(output);
+ *
+ *      output.writeInt(f1);
+ *      output.writeObject(f2, null);
+ *      output.writeObject(f3, null);
+ *  }
+ *
+ *  public void bdbReadNonKeyFields(EntityInput input,
+ *                                  int startField,
+ *                                  int endField,
+ *                                  int superLevel) {
+ *
+ *      if (superLevel != 0) {
+ *          super.bdbReadNonKeyFields(input, startField, endField,
+ *                                    superLevel - 1);
+ *      }
+ *      if (superLevel &lt;= 0) {
+ *          switch (startField) {
+ *          case 0:
+ *              f1 = input.readInt();
+ *              if (endField == 0) break;
+ *          case 1:
+ *              f2 = (String) input.readObject();
+ *              if (endField == 1) break;
+ *          case 2:
+ *              f3 = (MyClass) input.readObject();
+ *          }
+ *      }
+ *  }
+ * </pre>
+ *
+ * @author Mark Hayes
+ */
+public interface Enhanced {
+
+    /**
+     * @see Accessor#newInstance
+     */
+    Object bdbNewInstance();
+
+    /**
+     * @see Accessor#newArray
+     */
+    Object bdbNewArray(int len);
+
+    /**
+     * Calls the super class method if this class does not contain the primary
+     * key field.
+     *
+     * @see Accessor#isPriKeyFieldNullOrZero
+     */
+    boolean bdbIsPriKeyFieldNullOrZero();
+
+    /**
+     * Calls the super class method if this class does not contain the primary
+     * key field.
+     *
+     * @see Accessor#writePriKeyField
+     */
+    void bdbWritePriKeyField(EntityOutput output, Format format);
+
+    /**
+     * Calls the super class method if this class does not contain the primary
+     * key field.
+     *
+     * @see Accessor#readPriKeyField
+     */
+    void bdbReadPriKeyField(EntityInput input, Format format);
+
+    /**
+     * @see Accessor#writeSecKeyFields
+     */
+    void bdbWriteSecKeyFields(EntityOutput output);
+
+    /**
+     * @see Accessor#readSecKeyFields
+     */
+    void bdbReadSecKeyFields(EntityInput input,
+                             int startField,
+                             int endField,
+                             int superLevel);
+
+    /**
+     * @see Accessor#writeNonKeyFields
+     */
+    void bdbWriteNonKeyFields(EntityOutput output);
+
+    /**
+     * @see Accessor#readNonKeyFields
+     */
+    void bdbReadNonKeyFields(EntityInput input,
+                             int startField,
+                             int endField,
+                             int superLevel);
+
+    /**
+     * @see Accessor#getField
+     */
+    Object bdbGetField(Object o,
+                       int field,
+                       int superLevel,
+                       boolean isSecField);
+
+    /**
+     * @see Accessor#setField
+     */
+    void bdbSetField(Object o,
+                     int field,
+                     int superLevel,
+                     boolean isSecField,
+                     Object value);
+}
diff --git a/src/com/sleepycat/persist/impl/EnhancedAccessor.java b/src/com/sleepycat/persist/impl/EnhancedAccessor.java
new file mode 100644
index 0000000000000000000000000000000000000000..c781c3e1df4eabf823ced6c40f60110c692d8c2f
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/EnhancedAccessor.java
@@ -0,0 +1,178 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EnhancedAccessor.java,v 1.15.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.lang.reflect.Array;
+import java.lang.reflect.Modifier;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Implements Accessor for a complex persistent class.
+ *
+ * @author Mark Hayes
+ */
+public class EnhancedAccessor implements Accessor {
+
+    private static final Map<String,Enhanced> classRegistry =
+        Collections.synchronizedMap(new HashMap<String,Enhanced>());
+
+    /* Is public for unit tests. */
+    public static final boolean EXPECT_ENHANCED =
+        "true".equals(System.getProperty("expectEnhanced"));
+
+    private Enhanced prototype;
+    private Format priKeyFormat;
+    private Class type;
+
+    /**
+     * Registers a prototype instance, and should be called during
+     * initialization of the prototype class.  The prototype may be null for
+     * an abstract class.
+     */
+    public static void registerClass(String className, Enhanced prototype) {
+        classRegistry.put(className, prototype);
+    }
+
+    /**
+     * Returns whether a given class is a (registered) enhanced class.
+     */
+    static boolean isEnhanced(Class type) {
+        boolean enhanced = classRegistry.containsKey(type.getName());
+        if (!enhanced && EXPECT_ENHANCED) {
+            throw new IllegalStateException
+                ("Test was run with expectEnhanced=true but class " +
+                 type.getName() + " is not enhanced");
+        }
+        return enhanced;
+    }
+
+    /**
+     * Creates an accessor.
+     */
+    EnhancedAccessor(Class type) {
+        this.type = type;
+        prototype = classRegistry.get(type.getName());
+        assert prototype != null || Modifier.isAbstract(type.getModifiers());
+    }
+
+    /**
+     * Creates an accessor for a complex type.
+     */
+    EnhancedAccessor(Catalog catalog, Class type, ComplexFormat format) {
+        this(type);
+
+        /*
+         * Find the primary key format for this format or one of its superclass
+         * formats.
+         */
+        ComplexFormat declaringFormat = format;
+        while (declaringFormat != null) {
+            String priKeyField = declaringFormat.getPriKeyField();
+            if (priKeyField != null) {
+                Class declaringType = declaringFormat.getType();
+                Class fieldType;
+                try {
+                    fieldType =
+                        declaringType.getDeclaredField(priKeyField).getType();
+                } catch (NoSuchFieldException e) {
+                    throw new IllegalStateException(e);
+                }
+                priKeyFormat = catalog.getFormat
+                    (fieldType, false /*openEntitySubclassIndexes*/);
+                break;
+            } else {
+                Format superFormat = declaringFormat.getSuperFormat();
+                declaringFormat = (ComplexFormat) superFormat;
+            }
+        }
+    }
+
+    public Object newInstance() {
+        if (prototype == null) {
+            /* Abstract class -- internal error. */
+            throw new IllegalStateException();
+        }
+        return prototype.bdbNewInstance();
+    }
+
+    public Object newArray(int len) {
+        if (prototype == null) {
+            /* Abstract class -- use reflection for now. */
+            return Array.newInstance(type, len);
+        }
+        return prototype.bdbNewArray(len);
+    }
+
+    public boolean isPriKeyFieldNullOrZero(Object o) {
+        if (priKeyFormat == null) {
+            throw new IllegalStateException
+                ("No primary key: " + o.getClass().getName());
+        }
+        return ((Enhanced) o).bdbIsPriKeyFieldNullOrZero();
+    }
+
+    public void writePriKeyField(Object o, EntityOutput output) {
+        if (priKeyFormat == null) {
+            throw new IllegalStateException
+                ("No primary key: " + o.getClass().getName());
+        }
+        ((Enhanced) o).bdbWritePriKeyField(output, priKeyFormat);
+    }
+
+    public void readPriKeyField(Object o, EntityInput input) {
+        if (priKeyFormat == null) {
+            throw new IllegalStateException
+                ("No primary key: " + o.getClass().getName());
+        }
+        ((Enhanced) o).bdbReadPriKeyField(input, priKeyFormat);
+    }
+
+    public void writeSecKeyFields(Object o, EntityOutput output) {
+        ((Enhanced) o).bdbWriteSecKeyFields(output);
+    }
+
+    public void readSecKeyFields(Object o,
+                                 EntityInput input,
+                                 int startField,
+                                 int endField,
+                                 int superLevel) {
+        ((Enhanced) o).bdbReadSecKeyFields
+            (input, startField, endField, superLevel);
+    }
+
+    public void writeNonKeyFields(Object o, EntityOutput output) {
+        ((Enhanced) o).bdbWriteNonKeyFields(output);
+    }
+
+    public void readNonKeyFields(Object o,
+                                 EntityInput input,
+                                 int startField,
+                                 int endField,
+                                 int superLevel) {
+        ((Enhanced) o).bdbReadNonKeyFields
+            (input, startField, endField, superLevel);
+    }
+
+    public Object getField(Object o,
+                           int field,
+                           int superLevel,
+                           boolean isSecField) {
+        return ((Enhanced) o).bdbGetField(o, field, superLevel, isSecField);
+    }
+
+    public void setField(Object o,
+                         int field,
+                         int superLevel,
+                         boolean isSecField,
+                         Object value) {
+        ((Enhanced) o).bdbSetField(o, field, superLevel, isSecField, value);
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/EntityInput.java b/src/com/sleepycat/persist/impl/EntityInput.java
new file mode 100644
index 0000000000000000000000000000000000000000..51a640c984608c83bf2cdbeb0ccd9307abbdfdb8
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/EntityInput.java
@@ -0,0 +1,93 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EntityInput.java,v 1.19.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.math.BigInteger;
+
+/**
+ * Used for reading object fields.
+ *
+ * <p>Unlike TupleInput, Strings are returned by {@link #readObject} when using
+ * this class.</p>
+ *
+ * @author Mark Hayes
+ */
+public interface EntityInput {
+
+
+    /**
+     * Returns the Catalog associated with this input.
+     */
+    Catalog getCatalog();
+
+    /**
+     * Return whether this input is in raw mode, i.e., whether it is returning
+     * raw instances.
+     */
+    boolean isRawAccess();
+
+    /**
+     * Changes raw mode and returns the original mode, which is normally
+     * restored later.  For temporarily changing the mode during a conversion.
+     */
+    boolean setRawAccess(boolean rawAccessParam);
+
+    /**
+     * Called via Accessor to read all fields with reference types, except for
+     * the primary key field and composite key fields (see readKeyObject
+     * below).
+     */
+    Object readObject();
+
+    /**
+     * Called for a primary key field or a composite key field with a reference
+     * type.
+     *
+     * <p>For such key fields, no formatId is present nor can the object
+     * already be present in the visited object set.</p>
+     */
+    Object readKeyObject(Format format);
+
+    /**
+     * Called via Accessor.readSecKeyFields for a primary key field with a
+     * reference type.  This method must be called before reading any other
+     * fields.
+     */
+    void registerPriKeyObject(Object o);
+
+    /**
+     * Called by ObjectArrayFormat and PrimitiveArrayFormat to read the array
+     * length.
+     */
+    int readArrayLength();
+
+    /**
+     * Called by EnumFormat to read and return index of the enum constant.
+     */
+    int readEnumConstant(String[] names);
+
+    /**
+     * Called via PersistKeyCreator to skip fields prior to the secondary key
+     * field.  Also called during class evolution so skip deleted fields.
+     */
+    void skipField(Format declaredFormat);
+
+    /* The following methods are a subset of the methods in TupleInput. */
+
+    String readString();
+    char readChar();
+    boolean readBoolean();
+    byte readByte();
+    short readShort();
+    int readInt();
+    long readLong();
+    float readSortedFloat();
+    double readSortedDouble();
+    BigInteger readBigInteger();
+}
diff --git a/src/com/sleepycat/persist/impl/EntityOutput.java b/src/com/sleepycat/persist/impl/EntityOutput.java
new file mode 100644
index 0000000000000000000000000000000000000000..c680bb4bfce9a5749361868edd5e7ade247977e6
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/EntityOutput.java
@@ -0,0 +1,74 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EntityOutput.java,v 1.16.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.math.BigInteger;
+
+import com.sleepycat.bind.tuple.TupleOutput;
+
+/**
+ * Used for writing object fields.
+ *
+ * <p>Unlike TupleOutput, Strings should be passed to {@link #writeObject} when
+ * using this class.</p>
+ *
+ * <p>Note that currently there is only one implementation of EntityOutput:
+ * RecordOutput.  There is no RawObjectOutput implemention because we currently
+ * have no need to convert from persistent objects to RawObject instances.
+ * The EntityOutput interface is only for symmetry with EntityInput and in case
+ * we need RawObjectOutput in the future.</p>
+ *
+ * @author Mark Hayes
+ */
+public interface EntityOutput {
+
+    /**
+     * Called via Accessor to write all fields with reference types, except for
+     * the primary key field and composite key fields (see writeKeyObject
+     * below).
+     */
+    void writeObject(Object o, Format fieldFormat);
+
+    /**
+     * Called for a primary key field or composite key field with a reference
+     * type.
+     */
+    void writeKeyObject(Object o, Format fieldFormat);
+
+    /**
+     * Called via Accessor.writeSecKeyFields for a primary key field with a
+     * reference type.  This method must be called before writing any other
+     * fields.
+     */
+    void registerPriKeyObject(Object o);
+
+    /**
+     * Called by ObjectArrayFormat and PrimitiveArrayFormat to write the array
+     * length.
+     */
+    void writeArrayLength(int length);
+
+    /**
+     * Called by EnumFormat to write the given index of the enum constant.
+     */
+    void writeEnumConstant(String[] names, int index);
+
+    /* The following methods are a subset of the methods in TupleOutput. */
+
+    TupleOutput writeString(String val);
+    TupleOutput writeChar(int val);
+    TupleOutput writeBoolean(boolean val);
+    TupleOutput writeByte(int val);
+    TupleOutput writeShort(int val);
+    TupleOutput writeInt(int val);
+    TupleOutput writeLong(long val);
+    TupleOutput writeSortedFloat(float val);
+    TupleOutput writeSortedDouble(double val);
+    TupleOutput writeBigInteger(BigInteger val);
+}
diff --git a/src/com/sleepycat/persist/impl/EnumFormat.java b/src/com/sleepycat/persist/impl/EnumFormat.java
new file mode 100644
index 0000000000000000000000000000000000000000..bab074c4de30d0bd625cdee8c8ec37ac071a7ece
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/EnumFormat.java
@@ -0,0 +1,180 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EnumFormat.java,v 1.22.2.3 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.lang.reflect.Array;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.IdentityHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.raw.RawObject;
+
+/**
+ * Format for all enum types.
+ *
+ * In this class we resort to using reflection to allocate arrays of enums.
+ * If there is a need for it, reflection could be avoided in the future by
+ * generating code as new array formats are encountered.
+ *
+ * @author Mark Hayes
+ */
+public class EnumFormat extends Format {
+
+    private static final long serialVersionUID = 1069833955604373538L;
+
+    private String[] names;
+    private transient Object[] values;
+
+    EnumFormat(Class type) {
+        super(type);
+        values = type.getEnumConstants();
+        names = new String[values.length];
+        for (int i = 0; i < names.length; i += 1) {
+            names[i] = ((Enum) values[i]).name();
+        }
+    }
+
+    @Override
+    public boolean isEnum() {
+        return true;
+    }
+
+    @Override
+    public List<String> getEnumConstants() {
+        return Arrays.asList(names);
+    }
+
+    @Override
+    void collectRelatedFormats(Catalog catalog,
+                               Map<String,Format> newFormats) {
+    }
+
+    @Override
+    void initialize(Catalog catalog, EntityModel model, int initVersion) {
+        if (values == null) {
+            Class cls = getType();
+            if (cls != null) {
+                values = new Object[names.length];
+                for (int i = 0; i < names.length; i += 1) {
+                    values[i] = Enum.valueOf(cls, names[i]);
+                }
+            }
+        }
+    }
+
+    @Override
+    Object newArray(int len) {
+        return Array.newInstance(getType(), len);
+    }
+
+    @Override
+    public Object newInstance(EntityInput input, boolean rawAccess) {
+        int index = input.readEnumConstant(names);
+        if (rawAccess) {
+            return new RawObject(this, names[index]);
+        } else {
+            return values[index];
+        }
+    }
+
+    @Override
+    public Object readObject(Object o, EntityInput input, boolean rawAccess) {
+        /* newInstance reads the value -- do nothing here. */
+        return o;
+    }
+
+    @Override
+    void writeObject(Object o, EntityOutput output, boolean rawAccess) {
+        if (rawAccess) {
+            String name = ((RawObject) o).getEnum();
+            for (int i = 0; i < names.length; i += 1) {
+                if (names[i].equals(name)) {
+                    output.writeEnumConstant(names, i);
+                    return;
+                }
+            }
+        } else {
+            for (int i = 0; i < values.length; i += 1) {
+                if (o == values[i]) {
+                    output.writeEnumConstant(names, i);
+                    return;
+                }
+            }
+        }
+        throw new IllegalStateException("Bad enum: " + o);
+    }
+
+    @Override
+    Object convertRawObject(Catalog catalog,
+                            boolean rawAccess,
+                            RawObject rawObject,
+                            IdentityHashMap converted) {
+        String name = rawObject.getEnum();
+        for (int i = 0; i < names.length; i += 1) {
+            if (names[i].equals(name)) {
+                Object o = values[i];
+                converted.put(rawObject, o);
+                return o;
+            }
+        }
+        throw new IllegalArgumentException
+            ("Enum constant is not defined: " + name);
+    }
+
+    @Override
+    void skipContents(RecordInput input) {
+        input.skipFast(input.getPackedIntByteLength());
+    }
+
+    @Override
+    boolean evolve(Format newFormatParam, Evolver evolver) {
+        if (!(newFormatParam instanceof EnumFormat)) {
+            evolver.addEvolveError
+                (this, newFormatParam,
+                 "Incompatible enum type changed detected",
+                 "An enum class may not be changed to a non-enum type");
+            /* For future:
+            evolver.addMissingMutation
+                (this, newFormatParam,
+                 "Converter is required when an enum class is changed to " +
+                 "a non-enum type");
+            */
+            return false;
+        }
+        EnumFormat newFormat = (EnumFormat) newFormatParam;
+        if (Arrays.equals(names, newFormat.names)) {
+            evolver.useOldFormat(this, newFormat);
+            return true;
+        } else {
+            Set<String> oldNames = new HashSet<String>(Arrays.asList(names));
+            List<String> newNames = Arrays.asList(newFormat.names);
+            if (newNames.containsAll(oldNames)) {
+                evolver.useEvolvedFormat(this, newFormat, newFormat);
+                return true;
+            } else {
+                oldNames.removeAll(newNames);
+                evolver.addEvolveError
+                    (this, newFormat,
+                     "Incompatible enum type changed detected",
+                     "Enum values may not be removed: " + oldNames);
+                /* For future:
+                evolver.addMissingMutation
+                    (this, newFormatParam,
+                     "Converter is required when a value is removed from an " +
+                     "enum: " + oldNames);
+                */
+                return false;
+            }
+        }
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/Evolver.java b/src/com/sleepycat/persist/impl/Evolver.java
new file mode 100644
index 0000000000000000000000000000000000000000..ae47288e4a813a45e8f39735a90aeed9506632f7
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/Evolver.java
@@ -0,0 +1,754 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Evolver.java,v 1.15.2.3 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.io.FileNotFoundException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.IdentityHashMap;
+import java.util.Map;
+import java.util.Set;
+
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.DatabaseException;
+/* <!-- begin JE only --> */
+import com.sleepycat.je.DatabaseNotFoundException;
+/* <!-- end JE only --> */
+import com.sleepycat.je.Transaction;
+import com.sleepycat.persist.evolve.Converter;
+import com.sleepycat.persist.evolve.Deleter;
+import com.sleepycat.persist.evolve.Mutation;
+import com.sleepycat.persist.evolve.Mutations;
+import com.sleepycat.persist.evolve.Renamer;
+import com.sleepycat.persist.model.SecondaryKeyMetadata;
+
+/**
+ * Evolves each old format that is still relevant if necessary, using Mutations
+ * to configure deleters, renamers, and converters.
+ *
+ * @author Mark Hayes
+ */
+class Evolver {
+
+    static final int EVOLVE_NONE = 0;
+    static final int EVOLVE_NEEDED = 1;
+    static final int EVOLVE_FAILURE = 2;
+
+    private PersistCatalog catalog;
+    private String storePrefix;
+    private Mutations mutations;
+    private Map<String,Format> newFormats;
+    private boolean forceEvolution;
+    private boolean disallowClassChanges;
+    private boolean nestedFormatsChanged;
+    private Map<Format,Format> changedFormats;
+    private StringBuilder errors;
+    private Set<String> deleteDbs;
+    private Map<String,String> renameDbs;
+    private Map<Format,Format> renameFormats;
+    private Map<Integer,Boolean> evolvedFormats;
+    private List<Format> unprocessedFormats;
+    private Map<Format,Set<Format>> subclassMap;
+
+    Evolver(PersistCatalog catalog,
+            String storePrefix,
+            Mutations mutations,
+            Map<String,Format> newFormats,
+            boolean forceEvolution,
+            boolean disallowClassChanges) {
+        this.catalog = catalog;
+        this.storePrefix = storePrefix;
+        this.mutations = mutations;
+        this.newFormats = newFormats;
+        this.forceEvolution = forceEvolution;
+        this.disallowClassChanges = disallowClassChanges;
+        changedFormats = new IdentityHashMap<Format,Format>();
+        errors = new StringBuilder();
+        deleteDbs = new HashSet<String>();
+        renameDbs = new HashMap<String,String>();
+        renameFormats = new IdentityHashMap<Format,Format>();
+        evolvedFormats = new HashMap<Integer,Boolean>();
+        unprocessedFormats = new ArrayList<Format>();
+        subclassMap = catalog.getSubclassMap();
+    }
+
+    final Mutations getMutations() {
+        return mutations;
+    }
+
+    /**
+     * Returns whether any formats were changed during evolution, and therefore
+     * need to be stored in the catalog.
+     */
+    boolean areFormatsChanged() {
+        return !changedFormats.isEmpty();
+    }
+
+    /**
+     * Returns whether the given format was changed during evolution.
+     */
+    boolean isFormatChanged(Format format) {
+        return changedFormats.containsKey(format);
+    }
+
+    private void setFormatsChanged(Format oldFormat) {
+        checkClassChangesAllowed(oldFormat);
+        changedFormats.put(oldFormat, oldFormat);
+        nestedFormatsChanged = true;
+        /* PersistCatalog.expectNoClassChanges is true in unit tests only. */
+        if (PersistCatalog.expectNoClassChanges) {
+            throw new IllegalStateException("expectNoClassChanges");
+        }
+    }
+
+    private void checkClassChangesAllowed(Format oldFormat) {
+        if (disallowClassChanges) {
+            throw new IllegalStateException
+                ("When performing an upgrade changes are not allowed " +
+                 "but were made to: " + oldFormat.getClassName());
+        }
+    }
+
+    /**
+     * Returns the set of formats for a specific superclass format, or null if
+     * the superclass is not a complex type or has not subclasses.
+     */
+    Set<Format> getSubclassFormats(Format superFormat) {
+        return subclassMap.get(superFormat);
+    }
+
+    /**
+     * Returns an error string if any mutations are invalid or missing, or
+     * returns null otherwise.  If non-null is returned, the store may not be
+     * opened.
+     */
+    String getErrors() {
+        if (errors.length() > 0) {
+            return errors.toString();
+        } else {
+            return null;
+        }
+    }
+
+    /**
+     * Adds a newline and the given error.
+     */
+    private void addError(String error) {
+        if (errors.length() > 0) {
+            errors.append("\n---\n");
+        }
+        errors.append(error);
+    }
+
+    private String getClassVersionLabel(Format format, String prefix) {
+        if (format != null) {
+            return prefix +
+                   " class: " + format.getClassName() +
+                   " version: " + format.getVersion();
+        } else {
+            return "";
+        }
+    }
+
+    /**
+     * Adds a specified error when no specific mutation is involved.
+     */
+    void addEvolveError(Format oldFormat,
+                        Format newFormat,
+                        String scenario,
+                        String error) {
+        checkClassChangesAllowed(oldFormat);
+        if (scenario == null) {
+            scenario = "Error";
+        }
+        addError(scenario + " when evolving" +
+                 getClassVersionLabel(oldFormat, "") +
+                 getClassVersionLabel(newFormat, " to") +
+                 " Error: " + error);
+    }
+
+    /**
+     * Adds an error for an invalid mutation.
+     */
+    void addInvalidMutation(Format oldFormat,
+                            Format newFormat,
+                            Mutation mutation,
+                            String error) {
+        checkClassChangesAllowed(oldFormat);
+        addError("Invalid mutation: " + mutation +
+                 getClassVersionLabel(oldFormat, " For") +
+                 getClassVersionLabel(newFormat, " New") +
+                 " Error: " + error);
+    }
+
+    /**
+     * Adds an error for a missing mutation.
+     */
+    void addMissingMutation(Format oldFormat,
+                            Format newFormat,
+                            String error) {
+        checkClassChangesAllowed(oldFormat);
+        addError("Mutation is missing to evolve" +
+                 getClassVersionLabel(oldFormat, "") +
+                 getClassVersionLabel(newFormat, " to") +
+                 " Error: " + error);
+    }
+
+    /**
+     * Called by PersistCatalog for all non-entity formats.
+     */
+    void addNonEntityFormat(Format oldFormat) {
+        unprocessedFormats.add(oldFormat);
+    }
+
+    /**
+     * Called by PersistCatalog after calling evolveFormat or
+     * addNonEntityFormat for all old formats.
+     *
+     * We do not require deletion of an unreferenced class for two
+     * reasons: 1) built-in proxy classes may not be referenced, 2) the
+     * user may wish to declare persistent classes that are not yet used.
+     */
+    void finishEvolution() {
+        /* Process unreferenced classes. */
+        for (Format oldFormat : unprocessedFormats) {
+            oldFormat.setUnused(true);
+            evolveFormat(oldFormat);
+        }
+    }
+
+    /**
+     * Called by PersistCatalog for all entity formats, and by Format.evolve
+     * methods for all potentially referenced non-entity formats.
+     */
+    boolean evolveFormat(Format oldFormat) {
+        if (oldFormat.isNew()) {
+            return true;
+        }
+        boolean result;
+        Format oldEntityFormat = oldFormat.getEntityFormat();
+        boolean trackEntityChanges = oldEntityFormat != null;
+        boolean saveNestedFormatsChanged = nestedFormatsChanged;
+        if (trackEntityChanges) {
+            nestedFormatsChanged = false;
+        }
+        Integer oldFormatId = oldFormat.getId();
+        if (evolvedFormats.containsKey(oldFormatId)) {
+            result = evolvedFormats.get(oldFormatId);
+        } else {
+            evolvedFormats.put(oldFormatId, true);
+            result = evolveFormatInternal(oldFormat);
+            evolvedFormats.put(oldFormatId, result);
+        }
+        if (oldFormat.getLatestVersion().isNew()) {
+            nestedFormatsChanged = true;
+        }
+        if (trackEntityChanges) {
+            if (nestedFormatsChanged) {
+                Format latest = oldEntityFormat.getLatestVersion();
+                if (latest != null) {
+                    latest.setEvolveNeeded(true);
+                }
+            }
+            nestedFormatsChanged = saveNestedFormatsChanged;
+        }
+        return result;
+    }
+
+    /**
+     * Tries to evolve a given existing format to the current version of the
+     * class and returns false if an invalid mutation is encountered or the
+     * configured mutations are not sufficient.
+     */
+    private boolean evolveFormatInternal(Format oldFormat) {
+
+        /* Predefined formats and deleted classes never need evolving. */
+        if (Format.isPredefined(oldFormat) || oldFormat.isDeleted()) {
+            return true;
+        }
+
+        /* Get class mutations. */
+        String oldName = oldFormat.getClassName();
+        int oldVersion = oldFormat.getVersion();
+        Renamer renamer = mutations.getRenamer(oldName, oldVersion, null);
+        Deleter deleter = mutations.getDeleter(oldName, oldVersion, null);
+        Converter converter =
+            mutations.getConverter(oldName, oldVersion, null);
+        if (deleter != null && (converter != null || renamer != null)) {
+            addInvalidMutation
+                (oldFormat, null, deleter,
+                 "Class Deleter not allowed along with a Renamer or " +
+                 "Converter for the same class");
+            return false;
+        }
+
+        /*
+         * For determining the new name, arrays get special treatment.  The
+         * component format is evolved in the process, and we disallow muations
+         * for arrays.
+         */
+        String newName;
+        if (oldFormat.isArray()) {
+            if (deleter != null || converter != null || renamer != null) {
+                Mutation mutation = (deleter != null) ? deleter :
+                    ((converter != null) ? converter : renamer);
+                addInvalidMutation
+                    (oldFormat, null, mutation,
+                     "Mutations not allowed for an array");
+                return false;
+            }
+            Format compFormat = oldFormat.getComponentType();
+            if (!evolveFormat(compFormat)) {
+                return false;
+            }
+            Format latest = compFormat.getLatestVersion();
+            if (latest != compFormat) {
+                newName = (latest.isArray() ? "[" : "[L") +
+                           latest.getClassName() + ';';
+            } else {
+                newName = oldName;
+            }
+        } else {
+            newName = (renamer != null) ? renamer.getNewName() : oldName;
+        }
+
+        /* Try to get the new class format.  Save exception for later. */
+        Format newFormat;
+        String newFormatException;
+        try {
+            Class newClass = SimpleCatalog.classForName(newName);
+            try {
+                newFormat = catalog.createFormat(newClass, newFormats);
+                assert newFormat != oldFormat : newFormat.getClassName();
+                newFormatException = null;
+            } catch (Exception e) {
+                newFormat = null;
+                newFormatException = e.toString();
+            }
+        } catch (ClassNotFoundException e) {
+            newFormat = null;
+            newFormatException = e.toString();
+        }
+
+        if (newFormat != null) {
+
+            /*
+             * If the old format is not the existing latest version and the new
+             * format is not an existing format, then we must evolve the latest
+             * old version to the new format first.  We cannot evolve old
+             * format to a new format that may be discarded because it is equal
+             * to the latest existing format (which will remain the current
+             * version).
+             */
+            if (oldFormat != oldFormat.getLatestVersion() &&
+                newFormat.getPreviousVersion() == null) {
+                assert newFormats.containsValue(newFormat);
+                Format oldLatestFormat = oldFormat.getLatestVersion();
+                if (!evolveFormat(oldLatestFormat)) {
+                    return false;
+                }
+                if (oldLatestFormat == oldLatestFormat.getLatestVersion()) {
+                    assert !newFormats.containsValue(newFormat) : newFormat;
+                    /* newFormat equals oldLatestFormat and was discarded. */
+                    newFormat = oldLatestFormat;
+                }
+            }
+
+            /*
+             * If the old format was previously evolved to the new format
+             * (which means the new format is actually an existing format),
+             * then there is nothing to do.  This is the case where no class
+             * changes were made.
+             *
+             * However, if mutations were specified when opening the catalog
+             * that are different than the mutations last used, then we must
+             * force the re-evolution of all old formats.
+             */
+            if (!forceEvolution &&
+                newFormat == oldFormat.getLatestVersion()) {
+                return true;
+            }
+        }
+
+        /* Apply class Renamer and continue if successful. */
+        if (renamer != null) {
+            if (!applyClassRenamer(renamer, oldFormat, newFormat)) {
+                return false;
+            }
+        }
+
+        /* Apply class Converter and return. */
+        if (converter != null) {
+            if (oldFormat.isEntity()) {
+                if (newFormat == null || !newFormat.isEntity()) {
+                    addInvalidMutation
+                        (oldFormat, newFormat, deleter,
+                         "Class converter not allowed for an entity class " +
+                         "that is no longer present or not having an " +
+                         "@Entity annotation");
+                    return false;
+                }
+                if (!oldFormat.evolveMetadata(newFormat, converter, this)) {
+                    return false;
+                }
+            }
+            return applyConverter(converter, oldFormat, newFormat);
+        }
+
+        /* Apply class Deleter and return. */
+        boolean needDeleter =
+            (newFormat == null) ||
+            (newFormat.isEntity() != oldFormat.isEntity());
+        if (deleter != null) {
+            if (!needDeleter) {
+                addInvalidMutation
+                    (oldFormat, newFormat, deleter,
+                     "Class deleter not allowed when the class and its " +
+                     "@Entity or @Persistent annotation is still present");
+                return false;
+            }
+            return applyClassDeleter(deleter, oldFormat, newFormat);
+        } else {
+            if (needDeleter) {
+                if (newFormat == null) {
+                    assert newFormatException != null;
+		    /* FindBugs newFormat known to be null excluded. */
+                    addMissingMutation
+                        (oldFormat, newFormat, newFormatException);
+                } else {
+                    addMissingMutation
+                        (oldFormat, newFormat,
+                         "@Entity switched to/from @Persistent");
+                }
+                return false;
+            }
+        }
+
+        /*
+         * Class-level mutations have been applied.  Now apply field mutations
+         * (for complex classes) or special conversions (enum conversions, for
+         * example) by calling the old format's evolve method.
+         */
+        return oldFormat.evolve(newFormat, this);
+    }
+
+    /**
+     * Use the old format and discard the new format.  Called by
+     * Format.evolve when the old and new formats are identical.
+     */
+    void useOldFormat(Format oldFormat, Format newFormat) {
+        Format renamedFormat = renameFormats.get(oldFormat);
+        if (renamedFormat != null) {
+
+            /*
+             * The format was renamed but, because this method is called, we
+             * know that no other class changes were made.  Use the new/renamed
+             * format as the reader.
+             */
+            assert renamedFormat == newFormat;
+            useEvolvedFormat(oldFormat, renamedFormat, renamedFormat);
+        } else if (newFormat != null &&
+                   (oldFormat.getVersion() != newFormat.getVersion() ||
+                    !oldFormat.isCurrentVersion())) {
+
+            /*
+             * If the user wants a new version number, but ther are no other
+             * changes, we will oblige.  Or, if an attempt is being made to
+             * use an old version, then the following events happened and we
+             * must evolve the old format:
+             * 1) The (previously) latest version of the format was evolved
+             * because it is not equal to the live class version.  Note that
+             * evolveFormatInternal always evolves the latest version first.
+             * 2) We are now attempting to evolve an older version of the same
+             * format, and it happens to be equal to the live class version.
+             * However, we're already committed to the new format, and we must
+             * evolve all versions.
+             * [#16467]
+             */
+            useEvolvedFormat(oldFormat, newFormat, newFormat);
+        } else {
+            /* The new format is discarded. */
+            catalog.useExistingFormat(oldFormat);
+            if (newFormat != null) {
+                newFormats.remove(newFormat.getClassName());
+            }
+        }
+    }
+
+    /**
+     * Install an evolver Reader in the old format.  Called by Format.evolve
+     * when the old and new formats are not identical.
+     */
+    void useEvolvedFormat(Format oldFormat,
+                          Reader evolveReader,
+                          Format newFormat) {
+        oldFormat.setReader(evolveReader);
+        if (newFormat != null) {
+            oldFormat.setLatestVersion(newFormat);
+        }
+        setFormatsChanged(oldFormat);
+    }
+
+    private boolean applyClassRenamer(Renamer renamer,
+                                      Format oldFormat,
+                                      Format newFormat) {
+        if (!checkUpdatedVersion(renamer, oldFormat, newFormat)) {
+            return false;
+        }
+        if (oldFormat.isEntity() && oldFormat.isCurrentVersion()) {
+            String newClassName = newFormat.getClassName();
+            String oldClassName = oldFormat.getClassName();
+            /* Queue the renaming of the primary and secondary databases. */
+            renameDbs.put
+                (Store.makePriDbName(storePrefix, oldClassName),
+                 Store.makePriDbName(storePrefix, newClassName));
+            for (SecondaryKeyMetadata keyMeta :
+                 oldFormat.getEntityMetadata().getSecondaryKeys().values()) {
+                String keyName = keyMeta.getKeyName();
+                renameDbs.put
+                    (Store.makeSecDbName(storePrefix, oldClassName, keyName),
+                     Store.makeSecDbName(storePrefix, newClassName, keyName));
+            }
+        }
+
+        /*
+         * Link the old format to the renamed format so that we can detect the
+         * rename in useOldFormat.
+         */
+        renameFormats.put(oldFormat, newFormat);
+
+        setFormatsChanged(oldFormat);
+        return true;
+    }
+
+    /**
+     * Called by ComplexFormat when a secondary key name is changed.
+     */
+    void renameSecondaryDatabase(String oldEntityClass,
+                                 String newEntityClass,
+                                 String oldKeyName,
+                                 String newKeyName) {
+        renameDbs.put
+            (Store.makeSecDbName(storePrefix, oldEntityClass, oldKeyName),
+             Store.makeSecDbName(storePrefix, newEntityClass, newKeyName));
+    }
+
+    private boolean applyClassDeleter(Deleter deleter,
+                                      Format oldFormat,
+                                      Format newFormat) {
+        if (!checkUpdatedVersion(deleter, oldFormat, newFormat)) {
+            return false;
+        }
+        if (oldFormat.isEntity() && oldFormat.isCurrentVersion()) {
+            /* Queue the deletion of the primary and secondary databases. */
+            String className = oldFormat.getClassName();
+            deleteDbs.add(Store.makePriDbName(storePrefix, className));
+            for (SecondaryKeyMetadata keyMeta :
+                 oldFormat.getEntityMetadata().getSecondaryKeys().values()) {
+                deleteDbs.add(Store.makeSecDbName
+                    (storePrefix, className, keyMeta.getKeyName()));
+            }
+        }
+
+        /*
+         * Set the format to deleted last, so that the above test using
+         * isCurrentVersion works properly.
+         */
+        oldFormat.setDeleted(true);
+        if (newFormat != null) {
+            oldFormat.setLatestVersion(newFormat);
+        }
+
+        setFormatsChanged(oldFormat);
+        return true;
+    }
+
+    /**
+     * Called by ComplexFormat when a secondary key is dropped.
+     */
+    void deleteSecondaryDatabase(String oldEntityClass, String keyName) {
+        deleteDbs.add(Store.makeSecDbName
+            (storePrefix, oldEntityClass, keyName));
+    }
+
+    private boolean applyConverter(Converter converter,
+                                   Format oldFormat,
+                                   Format newFormat) {
+        if (!checkUpdatedVersion(converter, oldFormat, newFormat)) {
+            return false;
+        }
+        Reader reader = new ConverterReader(converter);
+        useEvolvedFormat(oldFormat, reader, newFormat);
+        return true;
+    }
+
+    boolean isClassConverted(Format format) {
+        return format.getReader() instanceof ConverterReader;
+    }
+
+    private boolean checkUpdatedVersion(Mutation mutation,
+                                        Format oldFormat,
+                                        Format newFormat) {
+        if (newFormat != null &&
+            !oldFormat.isEnum() &&
+            newFormat.getVersion() <= oldFormat.getVersion()) {
+            addInvalidMutation
+                (oldFormat, newFormat, mutation,
+                 "A new higher version number must be assigned");
+            return false;
+        } else {
+            return true;
+        }
+    }
+
+    boolean checkUpdatedVersion(String scenario,
+                                Format oldFormat,
+                                Format newFormat) {
+        if (newFormat != null &&
+            !oldFormat.isEnum() &&
+            newFormat.getVersion() <= oldFormat.getVersion()) {
+            addEvolveError
+                (oldFormat, newFormat, scenario,
+                 "A new higher version number must be assigned");
+            return false;
+        } else {
+            return true;
+        }
+    }
+
+    void renameAndRemoveDatabases(Store store, Transaction txn)
+        throws DatabaseException {
+
+        for (String dbName : deleteDbs) {
+            try {
+                String[] fileAndDbNames = store.parseDbName(dbName);
+                DbCompat.removeDatabase
+                    (store.getEnvironment(), txn,
+                     fileAndDbNames[0], fileAndDbNames[1]);
+            /* <!-- begin JE only --> */
+            } catch (DatabaseNotFoundException ignored) {
+            /* <!-- end JE only --> */
+            } catch (FileNotFoundException ignored) {
+            }
+        }
+        for (Map.Entry<String,String> entry : renameDbs.entrySet()) {
+            String oldName = entry.getKey();
+            String newName = entry.getValue();
+            try {
+                String[] oldFileAndDbNames = store.parseDbName(oldName);
+                String[] newFileAndDbNames = store.parseDbName(newName);
+                DbCompat.renameDatabase
+                    (store.getEnvironment(), txn,
+                     oldFileAndDbNames[0], oldFileAndDbNames[1],
+                     newFileAndDbNames[0], newFileAndDbNames[1]);
+            /* <!-- begin JE only --> */
+            } catch (DatabaseNotFoundException ignored) {
+            /* <!-- end JE only --> */
+            } catch (FileNotFoundException ignored) {
+            }
+        }
+    }
+
+    /**
+     * Evolves a primary key field or composite key field.
+     */
+    int evolveRequiredKeyField(Format oldParent,
+                               Format newParent,
+                               FieldInfo oldField,
+                               FieldInfo newField) {
+        int result = EVOLVE_NONE;
+        String oldName = oldField.getName();
+        final String FIELD_KIND =
+            "primary key field or composite key class field";
+        final String FIELD_LABEL =
+            FIELD_KIND + ": " + oldName;
+
+        if (newField == null) {
+            addMissingMutation
+                (oldParent, newParent,
+                 "Field is missing and deletion is not allowed for a " +
+                 FIELD_LABEL);
+            return EVOLVE_FAILURE;
+        }
+
+        /* Check field mutations.  Only a Renamer is allowed. */
+        Deleter deleter = mutations.getDeleter
+            (oldParent.getClassName(), oldParent.getVersion(), oldName);
+        if (deleter != null) {
+            addInvalidMutation
+                (oldParent, newParent, deleter,
+                 "Deleter is not allowed for a " + FIELD_LABEL);
+            return EVOLVE_FAILURE;
+        }
+        Converter converter = mutations.getConverter
+            (oldParent.getClassName(), oldParent.getVersion(), oldName);
+        if (converter != null) {
+            addInvalidMutation
+                (oldParent, newParent, converter,
+                 "Converter is not allowed for a " + FIELD_LABEL);
+            return EVOLVE_FAILURE;
+        }
+        Renamer renamer = mutations.getRenamer
+            (oldParent.getClassName(), oldParent.getVersion(), oldName);
+        String newName = newField.getName();
+        if (renamer != null) {
+            if (!renamer.getNewName().equals(newName)) {
+                addInvalidMutation
+                    (oldParent, newParent, converter,
+                     "Converter is not allowed for a " + FIELD_LABEL);
+                return EVOLVE_FAILURE;
+            }
+            result = EVOLVE_NEEDED;
+        } else {
+            if (!oldName.equals(newName)) {
+                addMissingMutation
+                    (oldParent, newParent,
+                     "Renamer is required when field name is changed from: " +
+                     oldName + " to: " + newName);
+                return EVOLVE_FAILURE;
+            }
+        }
+
+        /*
+         * Evolve the declared version of the field format.
+         */
+        Format oldFieldFormat = oldField.getType();
+        if (!evolveFormat(oldFieldFormat)) {
+            return EVOLVE_FAILURE;
+        }
+        Format oldLatestFormat = oldFieldFormat.getLatestVersion();
+        Format newFieldFormat = newField.getType();
+
+        if (oldLatestFormat.getClassName().equals
+                (newFieldFormat.getClassName())) {
+            /* Formats are identical. */
+            return result;
+        } else if ((oldLatestFormat.getWrapperFormat() != null &&
+                    oldLatestFormat.getWrapperFormat().getId() ==
+                    newFieldFormat.getId()) ||
+                   (newFieldFormat.getWrapperFormat() != null &&
+                    newFieldFormat.getWrapperFormat().getId() ==
+                    oldLatestFormat.getId())) {
+            /* Primitive <-> primitive wrapper type change. */
+            return EVOLVE_NEEDED;
+        } else {
+            /* Type was changed incompatibly. */
+            addEvolveError
+                (oldParent, newParent,
+                 "Type may not be changed for a " + FIELD_KIND,
+                 "Old field type: " + oldLatestFormat.getClassName() +
+                 " is not compatible with the new type: " +
+                 newFieldFormat.getClassName() +
+                 " for field: " + oldName);
+            return EVOLVE_FAILURE;
+        }
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/FieldInfo.java b/src/com/sleepycat/persist/impl/FieldInfo.java
new file mode 100644
index 0000000000000000000000000000000000000000..d842d2700a67929a440f8819dcfdba2064402365
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/FieldInfo.java
@@ -0,0 +1,205 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: FieldInfo.java,v 1.25.2.3 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.io.Serializable;
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.raw.RawField;
+import com.sleepycat.persist.model.FieldMetadata;
+import com.sleepycat.persist.model.ClassMetadata;
+
+/**
+ * A field definition used by ComplexFormat and CompositeKeyFormat.
+ *
+ * <p>Note that the equals(), compareTo() and hashCode() methods only use the
+ * name field in this class.  Comparing two FieldInfo objects is only done when
+ * both are declared in the same class, so comparing the field name is
+ * sufficient.</p>
+ *
+ * @author Mark Hayes
+ */
+class FieldInfo implements RawField, Serializable, Comparable<FieldInfo> {
+
+    private static final long serialVersionUID = 2062721100372306296L;
+
+    /**
+     * Returns a list of all non-transient non-static fields that are declared
+     * in the given class.
+     */
+    static List<FieldInfo> getInstanceFields(Class cls,
+                                             ClassMetadata clsMeta) {
+        List<FieldInfo> fields = null;
+        if (clsMeta != null) {
+            Collection<FieldMetadata> persistentFields =
+                clsMeta.getPersistentFields();
+            if (persistentFields != null) {
+                fields = new ArrayList<FieldInfo>(persistentFields.size());
+                String clsName = cls.getName();
+                for (FieldMetadata fieldMeta : persistentFields) {
+                    if (!clsName.equals(fieldMeta.getDeclaringClassName())) {
+                        throw new IllegalArgumentException
+                            ("Persistent field " + fieldMeta +
+                             " must be declared in " + clsName);
+                    }
+                    Field field;
+                    try {
+                        field = cls.getDeclaredField(fieldMeta.getName());
+                    } catch (NoSuchFieldException e) {
+                        throw new IllegalArgumentException
+                            ("Persistent field " + fieldMeta +
+                             " is not declared in this class");
+                    }
+                    if (!field.getType().getName().equals
+                        (fieldMeta.getClassName())) {
+                        throw new IllegalArgumentException
+                            ("Persistent field " + fieldMeta +
+                             " must be of type " + field.getType().getName());
+                    }
+                    if (Modifier.isStatic(field.getModifiers())) {
+                        throw new IllegalArgumentException
+                            ("Persistent field " + fieldMeta +
+                             " may not be static");
+                    }
+                    fields.add(new FieldInfo(field));
+                }
+            }
+        }
+        if (fields == null) {
+            Field[] declaredFields = cls.getDeclaredFields();
+            fields = new ArrayList<FieldInfo>(declaredFields.length);
+            for (Field field : declaredFields) {
+                int mods = field.getModifiers();
+                if (!Modifier.isTransient(mods) && !Modifier.isStatic(mods)) {
+                    fields.add(new FieldInfo(field));
+                }
+            }
+        }
+        return fields;
+    }
+
+    static FieldInfo getField(List<FieldInfo> fields, String fieldName) {
+        int i = getFieldIndex(fields, fieldName);
+        if (i >= 0) {
+            return fields.get(i);
+        } else {
+            return null;
+        }
+    }
+
+    static int getFieldIndex(List<FieldInfo> fields, String fieldName) {
+        for (int i = 0; i < fields.size(); i += 1) {
+            FieldInfo field = fields.get(i);
+            if (fieldName.equals(field.getName())) {
+                return i;
+            }
+        }
+        return -1;
+    }
+
+    private String name;
+    private String className;
+    private Format format;
+    private transient Class cls;
+
+    private FieldInfo(Field field) {
+        name = field.getName();
+        cls = field.getType();
+        className = cls.getName();
+    }
+
+    void collectRelatedFormats(Catalog catalog,
+                               Map<String,Format> newFormats) {
+
+        /*
+         * Prior to intialization we save the newly created format in the
+         * format field so that it can be used by class evolution.  But note
+         * that it may be replaced by the initialize method.  [#16233]
+         */
+        format = catalog.createFormat(cls, newFormats);
+    }
+
+    void migrateFromBeta(Map<String,Format> formatMap) {
+        if (format == null) {
+            format = formatMap.get(className);
+            if (format == null) {
+                throw new IllegalStateException(className);
+            }
+        }
+    }
+
+    void initialize(Catalog catalog, EntityModel model, int initVersion) {
+
+        /*
+         * Reset the format if it was never initialized, which can occur when a
+         * new format instance created during class evolution and discarded
+         * because nothing changed. [#16233]
+         *
+         * Note that the format field may be null when used in a composite key
+         * format used as a key comparator (via PersistComparator).  In that
+         * case (null format), we must not attempt to reset the format.
+         */
+        if (format != null && format.isNew()) {
+            format = catalog.getFormat(className);
+        }
+    }
+
+    Class getFieldClass() {
+        if (cls == null) {
+            try {
+                cls = SimpleCatalog.classForName(className);
+            } catch (ClassNotFoundException e) {
+                throw new IllegalStateException(e);
+            }
+        }
+        return cls;
+    }
+
+    String getClassName() {
+        return className;
+    }
+
+    public String getName() {
+        return name;
+    }
+
+    public Format getType() {
+        return format;
+    }
+
+    public int compareTo(FieldInfo o) {
+        return name.compareTo(o.name);
+    }
+
+    @Override
+    public boolean equals(Object other) {
+        if (other instanceof FieldInfo) {
+            FieldInfo o = (FieldInfo) other;
+            return name.equals(o.name);
+        } else {
+            return false;
+        }
+    }
+
+    @Override
+    public int hashCode() {
+        return name.hashCode();
+    }
+
+    @Override
+    public String toString() {
+        return "[Field name: " + name + " class: " + className + ']';
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/Format.java b/src/com/sleepycat/persist/impl/Format.java
new file mode 100644
index 0000000000000000000000000000000000000000..4cc18220275b1991e796cec3edc94ae6de87cad7
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/Format.java
@@ -0,0 +1,1103 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Format.java,v 1.40.2.3 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.io.Serializable;
+import java.util.HashSet;
+import java.util.IdentityHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import com.sleepycat.persist.evolve.Converter;
+import com.sleepycat.persist.model.ClassMetadata;
+import com.sleepycat.persist.model.EntityMetadata;
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.model.FieldMetadata;
+import com.sleepycat.persist.model.PrimaryKeyMetadata;
+import com.sleepycat.persist.model.SecondaryKeyMetadata;
+import com.sleepycat.persist.raw.RawField;
+import com.sleepycat.persist.raw.RawObject;
+import com.sleepycat.persist.raw.RawType;
+
+/**
+ * The base class for all object formats.  Formats are used to define the
+ * stored layout for all persistent classes, including simple types.
+ *
+ * The design documentation below describes the storage format for entities and
+ * its relationship to information stored per format in the catalog.
+ *
+ * Requirements
+ * ------------
+ * + Provides EntityBinding for objects and EntryBinding for keys.
+ * + Provides SecondaryKeyCreator, SecondaryMultiKeyCreator and
+ *   SecondaryMultiKeyNullifier (SecondaryKeyNullifier is redundant).
+ * + Works with reflection and bytecode enhancement.
+ * + For reflection only, works with any entity model not just annotations.
+ * + Bindings are usable independently of the persist API.
+ * + Performance is almost equivalent to hand coded tuple bindings.
+ * + Small performance penalty for compatible class changes (new fields,
+ *   widening).
+ * + Secondary key create/nullify do not have to deserialize the entire record;
+ *   in other words, store secondary keys at the start of the data.
+ *
+ * Class Format
+ * ------------
+ * Every distinct class format is given a unique format ID.  Class IDs are not
+ * equivalent to class version numbers (as in the version property of @Entity
+ * and @Persistent) because the format can change when the version number does
+ * not.  Changes that cause a unique format ID to be assigned are:
+ *
+ * + Add field.
+ * + Widen field type.
+ * + Change primitive type to primitive wrapper class.
+ * + Add or drop secondary key.
+ * + Any incompatible class change.
+ *
+ * The last item, incompatible class changes, also correspond to a class
+ * version change.
+ *
+ * For each distinct class format the following information is conceptually
+ * stored in the catalog, keyed by format ID.
+ *
+ * - Class name
+ * - Class version number
+ * - Superclass format
+ * - Kind: simple, enum, complex, array
+ * - For kind == simple:
+ *     - Primitive class
+ * - For kind == enum:
+ *     - Array of constant names, sorted by name.
+ * - For kind == complex:
+ *     - Primary key fieldInfo, or null if no primary key is declared
+ *     - Array of secondary key fieldInfo, sorted by field name
+ *     - Array of other fieldInfo, sorted by field name
+ * - For kind == array:
+ *     - Component class format
+ *     - Number of array dimensions
+ * - Other metadata for RawType
+ *
+ * Where fieldInfo is:
+ *     - Field name
+ *     - Field class
+ *     - Other metadata for RawField
+ *
+ * Data Layout
+ * -----------
+ * For each entity instance the data layout is as follows:
+ *
+ *   instanceData: formatId keyFields... nonKeyFields...
+ *   keyFields:    fieldValue...
+ *   nonKeyFields: fieldValue...
+ *
+ * The formatId is the (positive non-zero) ID of a class format, defined above.
+ * This is ID of the most derived class of the instance.  It is stored as a
+ * packed integer.
+ *
+ * Following the format ID, zero or more sets of secondary key field values
+ * appear, followed by zero or more sets of other class field values.
+ *
+ * The keyFields are the sets of secondary key fields for each class in order
+ * of the highest superclass first.  Within a class, fields are ordered by
+ * field name.
+ *
+ * The nonKeyFields are the sets of other non-key fields for each class in
+ * order of the highest superclass first.  Within a class, fields are ordered
+ * by field name.
+ *
+ * A field value is:
+ *
+ *   fieldValue:   primitiveValue
+ *               | nullId
+ *               | instanceRef
+ *               | instanceData
+ *               | simpleValue
+ *               | enumValue
+ *               | arrayValue
+ *
+ * For a primitive type, a primitive value is used as defined for tuple
+ * bindings.  For float and double, sorted float and sorted double tuple values
+ * are used.
+ *
+ * For a non-primitive type with a null value, a nullId is used that has a zero
+ * (illegal formatId) value.  This includes String and other simple reference
+ * types.  The formatId is stored as a packed integer, meaning that it is
+ * stored as a single zero byte.
+ *
+ * For a non-primitive type, an instanceRef is used for a non-null instance
+ * that appears earlier in the data byte array.  An instanceRef is the negation
+ * of the byte offset of the instanceData that appears earlier.  It is stored
+ * as a packed integer.
+ *
+ * The remaining rules apply only to reference types with non-null values that
+ * do not appear earlier in the data array.
+ *
+ * For an array type, an array formatId is used that identifies the component
+ * type and the number of array dimensions.  This is followed by an array
+ * length (stored as a packed integer) and zero or more fieldValue elements.
+ * For an array with N+1 dimensions where N is greater than zero, the leftmost
+ * dimension is enumerated such that each fieldValue element is itself an array
+ * of N dimensions or null.
+ *
+ *   arrayValue:  formatId length fieldValue...
+ *
+ * For an enum type, an enumValue is used, consisting of a formatId that
+ * identifies the enum class and an enumIndex (stored as a packed integer) that
+ * identifies the constant name in the enum constant array of the enum class
+ * format:
+ *
+ *   enumValue:   formatId enumIndex
+ *
+ * For a simple type, a simpleValue is used.  This consists of the formatId
+ * that identifies the class followed by the simple type value.  For a
+ * primitive wrapper type the simple type value is the corresponding primitive,
+ * for a Date it is the milliseconds as a long primitive, and for BigInteger or
+ * BigDecimal it is a byte array as defined for tuple bindings of these types.
+ *
+ *   simpleValue: formatId value
+ *
+ * For all other complex types, an instanceData is used, which is defined
+ * above.
+ *
+ * Secondary Keys
+ * --------------
+ * For secondary key support we must account for writing and nullifying
+ * specific keys.  Rather than instantiating the entity and then performing
+ * the secondary key operation, we strive to perform the secondary key
+ * operation directly on the byte format.
+ *
+ * To create a secondary key we skip over other fields and then copy the bytes
+ * of the embedded key.  This approach is very efficient because a) the entity
+ * is not instantiated, and b) the secondary keys are stored at the beginning
+ * of the byte format and can be quickly read.
+ *
+ * To nullify we currently instantiate the raw entity, set the key field to null
+ * (or remove it from the array/collection), and convert the raw entity back to
+ * bytes.  Although the performance of this approach is not ideal because it
+ * requires serialization, it avoids the complexity of modifying the packed
+ * serialized format directly, adjusting references to key objects, etc.  Plus,
+ * when we nullify a key we are going to write the record, so the serialization
+ * overhead may not be significant.  For the record, I tried implementing
+ * nullification of the bytes directly and found it was much too complex.
+ *
+ * Lifecycle
+ * ---------
+ * Format are managed by a Catalog class.  Simple formats are managed by
+ * SimpleCatalog, and are copied from the SimpleCatalog by PersistCatalog.
+ * Other formats are managed by PersistCatalog.  The lifecycle of a format
+ * instance is:
+ *
+ * - Constructed by the catalog when a format is requested for a Class
+ *   that currently has no associated format.
+ *
+ * - The catalog calls setId() and adds the format to its format list
+ *   (indexed by format id) and map (keyed by class name).
+ *
+ * - The catalog calls collectRelatedFormats(), where a format can create
+ *   additional formats that it needs, or that should also be persistent.
+ *
+ * - The catalog calls initializeIfNeeded(), which calls the initialize()
+ *   method of the format class.
+ *
+ * - initialize() should initialize any transient fields in the format.
+ *   initialize() can assume that all related formats are available in the
+ *   catalog.  It may call initializeIfNeeded() for those related formats, if
+ *   it needs to interact with an initialized related format; this does not
+ *   cause a cycle, because initializeIfNeeded() does nothing for an already
+ *   initialized format.
+ *
+ * - The catalog creates a group of related formats at one time, and then
+ *   writes its entire list of formats to the catalog DB as a single record.
+ *   This grouping reduces the number of writes.
+ *
+ * - When a catalog is opened and the list of existing formats is read.  After
+ *   a format is deserialized, its initializeIfNeeded() method is called.
+ *   setId() and collectRelatedFormats() are not called, since the ID and
+ *   related formats are stored in serialized fields.
+ *
+ * - There are two modes for opening an existing catalog: raw mode and normal
+ *   mode.  In raw mode, the old format is used regardless of whether it
+ *   matches the current class definition; in fact the class is not accessed
+ *   and does not need to be present.
+ *
+ * - In normal mode, for each existing format that is initialized, a new format
+ *   is also created based on the current class and metadata definition.  If
+ *   the two formats are equal, the new format is discarded.  If they are
+ *   unequal, the new format becomes the current format and the old format's
+ *   evolve() method is called.  evolve() is responsible for adjusting the
+ *   old format for class evolution.  Any number of non-current formats may
+ *   exist for a given class, and are setup to evolve the single current format
+ *   for the class.
+ *
+ * @author Mark Hayes
+ */
+public abstract class Format implements Reader, RawType, Serializable {
+
+    private static final long serialVersionUID = 545633644568489850L;
+
+    /** Null reference. */
+    static final int ID_NULL     = 0;
+    /** Object */
+    static final int ID_OBJECT   = 1;
+    /** Boolean */
+    static final int ID_BOOL     = 2;
+    static final int ID_BOOL_W   = 3;
+    /** Byte */
+    static final int ID_BYTE     = 4;
+    static final int ID_BYTE_W   = 5;
+    /** Short */
+    static final int ID_SHORT    = 6;
+    static final int ID_SHORT_W  = 7;
+    /** Integer */
+    static final int ID_INT      = 8;
+    static final int ID_INT_W    = 9;
+    /** Long */
+    static final int ID_LONG     = 10;
+    static final int ID_LONG_W   = 11;
+    /** Float */
+    static final int ID_FLOAT    = 12;
+    static final int ID_FLOAT_W  = 13;
+    /** Double */
+    static final int ID_DOUBLE   = 14;
+    static final int ID_DOUBLE_W = 15;
+    /** Character */
+    static final int ID_CHAR     = 16;
+    static final int ID_CHAR_W   = 17;
+    /** String */
+    static final int ID_STRING   = 18;
+    /** BigInteger */
+    static final int ID_BIGINT   = 19;
+    /** BigDecimal */
+    static final int ID_BIGDEC   = 20;
+    /** Date */
+    static final int ID_DATE     = 21;
+    /** Number */
+    static final int ID_NUMBER   = 22;
+
+    /** First simple type. */
+    static final int ID_SIMPLE_MIN  = 2;
+    /** Last simple type. */
+    static final int ID_SIMPLE_MAX  = 21;
+    /** Last predefined ID, after which dynamic IDs are assigned. */
+    static final int ID_PREDEFINED  = 30;
+
+    static boolean isPredefined(Format format) {
+        return format.getId() <= ID_PREDEFINED;
+    }
+
+    private int id;
+    private String className;
+    private Reader reader;
+    private Format superFormat;
+    private Format latestFormat;
+    private Format previousFormat;
+    private Set<String> supertypes;
+    private boolean deleted;
+    private boolean unused;
+    private transient Catalog catalog;
+    private transient Class type;
+    private transient Format proxiedFormat;
+    private transient boolean initialized;
+
+    /**
+     * Creates a new format for a given class.
+     */
+    Format(Class type) {
+        this(type.getName());
+        this.type = type;
+        addSupertypes();
+    }
+
+    /**
+     * Creates a format for class evolution when no class may be present.
+     */
+    Format(String className) {
+        this.className = className;
+        latestFormat = this;
+        supertypes = new HashSet<String>();
+    }
+
+    /**
+     * Special handling for JE 3.0.12 beta formats.
+     */
+    void migrateFromBeta(Map<String,Format> formatMap) {
+        if (latestFormat == null) {
+            latestFormat = this;
+        }
+    }
+
+    final boolean isNew() {
+        return id == 0;
+    }
+
+    final Catalog getCatalog() {
+        return catalog;
+    }
+
+    /**
+     * Returns the format ID.
+     */
+    public final int getId() {
+        return id;
+    }
+
+    /**
+     * Called by the Catalog to set the format ID when a new format is added to
+     * the format list, before calling initializeIfNeeded().
+     */
+    final void setId(int id) {
+        this.id = id;
+    }
+
+    /**
+     * Returns the class that this format represents.  This method will return
+     * null in rawAccess mode, or for an unevolved format.
+     */
+    final Class getType() {
+        return type;
+    }
+
+    /**
+     * Called to get the type when it is known to exist for an uninitialized
+     * format.
+     */
+    final Class getExistingType() {
+        if (type == null) {
+            try {
+                type = SimpleCatalog.classForName(className);
+            } catch (ClassNotFoundException e) {
+                throw new IllegalStateException(e);
+            }
+        }
+        return type;
+    }
+
+    /**
+     * Returns the object for reading objects of the latest format.  For the
+     * latest version format, 'this' is returned.  For prior version formats, a
+     * reader that converts this version to the latest version is returned.
+     */
+    final Reader getReader() {
+
+        /*
+         * For unit testing, record whether any un-evolved formats are
+         * encountered.
+         */
+        if (this != reader) {
+            PersistCatalog.unevolvedFormatsEncountered = true;
+        }
+
+        return reader;
+    }
+
+    /**
+     * Changes the reader during format evolution.
+     */
+    final void setReader(Reader reader) {
+        this.reader = reader;
+    }
+
+    /**
+     * Returns the format of the superclass.
+     */
+    final Format getSuperFormat() {
+        return superFormat;
+    }
+
+    /**
+     * Called to set the format of the superclass during initialize().
+     */
+    final void setSuperFormat(Format superFormat) {
+        this.superFormat = superFormat;
+    }
+
+    /**
+     * Returns the format that is proxied by this format.  If non-null is
+     * returned, then this format is a PersistentProxy.
+     */
+    final Format getProxiedFormat() {
+        return proxiedFormat;
+    }
+
+    /**
+     * Called by ProxiedFormat to set the proxied format.
+     */
+    final void setProxiedFormat(Format proxiedFormat) {
+        this.proxiedFormat = proxiedFormat;
+    }
+
+    /**
+     * If this is the latest/evolved format, returns this; otherwise, returns
+     * the current version of this format.  Note that this WILL return a
+     * format for a deleted class if the latest format happens to be deleted.
+     */
+    final Format getLatestVersion() {
+        return latestFormat;
+    }
+
+    /**
+     * Returns the previous version of this format in the linked list of
+     * versions, or null if this is the only version.
+     */
+    public final Format getPreviousVersion() {
+        return previousFormat;
+    }
+
+    /**
+     * Called by Evolver to set the latest format when this old format is
+     * evolved.
+     */
+    final void setLatestVersion(Format newFormat) {
+
+        /*
+         * If this old format is the former latest version, link it to the new
+         * latest version.  This creates a singly linked list of versions
+         * starting with the latest.
+         */
+        if (latestFormat == this) {
+            newFormat.previousFormat = this;
+        }
+
+        latestFormat = newFormat;
+    }
+
+    /**
+     * Returns whether the class for this format was deleted.
+     */
+    final boolean isDeleted() {
+        return deleted;
+    }
+
+    /**
+     * Called by the Evolver when applying a Deleter mutation.
+     */
+    final void setDeleted(boolean deleted) {
+        this.deleted = deleted;
+    }
+
+    /**
+     * Called by the Evolver for a format that is never referenced.
+     */
+    final void setUnused(boolean unused) {
+        this.unused = unused;
+    }
+
+    /**
+     * Called by the Evolver with true when an entity format or any of its
+     * nested format were changed.  Called by Store.evolve when an entity has
+     * been fully converted.  Overridden by ComplexFormat.
+     */
+    void setEvolveNeeded(boolean needed) {
+        throw new UnsupportedOperationException();
+    }
+
+    /**
+     * Overridden by ComplexFormat.
+     */
+    boolean getEvolveNeeded() {
+        throw new UnsupportedOperationException();
+    }
+
+    final boolean isInitialized() {
+        return initialized;
+    }
+
+    /**
+     * Called by the Catalog to initialize a format, and may also be called
+     * during initialize() for a related format to ensure that the related
+     * format is initialized.  This latter case is allowed to support
+     * bidirectional dependencies.  This method will do nothing if the format
+     * is already intialized.
+     */
+    final void initializeIfNeeded(Catalog catalog, EntityModel model) {
+        if (!initialized) {
+            initialized = true;
+            this.catalog = catalog;
+
+            /* Initialize objects serialized by an older Format class. */
+            if (latestFormat == null) {
+                latestFormat = this;
+            }
+            if (reader == null) {
+                reader = this;
+            }
+
+            /*
+             * The class is only guaranteed to be available in live (not raw)
+             * mode, for the current version of the format.
+             */
+            if (type == null &&
+                isCurrentVersion() &&
+                (isSimple() || !catalog.isRawAccess())) {
+                getExistingType();
+            }
+
+            /* Perform subclass-specific initialization. */
+            initialize(catalog, model,
+                       catalog.getInitVersion(this, false /*forReader*/));
+            reader.initializeReader
+                (catalog, model,
+                 catalog.getInitVersion(this, true /*forReader*/),
+                 this);
+        }
+    }
+
+    /**
+     * Called to initialize a separate Reader implementation.  This method is
+     * called when no separate Reader exists, and does nothing.
+     */
+    public void initializeReader(Catalog catalog,
+                                 EntityModel model,
+                                 int initVersion,
+                                 Format oldFormat) {
+    }
+
+    /**
+     * Adds all interfaces and superclasses to the supertypes set.
+     */
+    private void addSupertypes() {
+        addInterfaces(type);
+        Class stype = type.getSuperclass();
+        while (stype != null && stype != Object.class) {
+            supertypes.add(stype.getName());
+            addInterfaces(stype);
+            stype = stype.getSuperclass();
+        }
+    }
+
+    /**
+     * Recursively adds interfaces to the supertypes set.
+     */
+    private void addInterfaces(Class cls) {
+        Class[] interfaces = cls.getInterfaces();
+        for (Class iface : interfaces) {
+            if (iface != Enhanced.class) {
+                supertypes.add(iface.getName());
+                addInterfaces(iface);
+            }
+        }
+    }
+
+    /**
+     * Certain formats (ProxiedFormat for example) prohibit nested fields that
+     * reference the parent object. [#15815]
+     */
+    boolean areNestedRefsProhibited() {
+        return false;
+    }
+
+    /* -- Start of RawType interface methods. -- */
+
+    public String getClassName() {
+        return className;
+    }
+
+    public int getVersion() {
+        ClassMetadata meta = getClassMetadata();
+        if (meta != null) {
+            return meta.getVersion();
+        } else {
+            return 0;
+        }
+    }
+
+    public Format getSuperType() {
+        return superFormat;
+    }
+
+    /* -- RawType methods that are overridden as needed in subclasses. -- */
+
+    public boolean isSimple() {
+        return false;
+    }
+
+    public boolean isPrimitive() {
+        return false;
+    }
+
+    public boolean isEnum() {
+        return false;
+    }
+
+    public List<String> getEnumConstants() {
+        return null;
+    }
+
+    public boolean isArray() {
+        return false;
+    }
+
+    public int getDimensions() {
+        return 0;
+    }
+
+    public Format getComponentType() {
+        return null;
+    }
+
+    public Map<String,RawField> getFields() {
+        return null;
+    }
+
+    public ClassMetadata getClassMetadata() {
+        return null;
+    }
+
+    public EntityMetadata getEntityMetadata() {
+        return null;
+    }
+
+    /* -- End of RawType methods. -- */
+
+    /* -- Methods that may optionally be overridden by subclasses. -- */
+
+    /**
+     * Called by EntityOutput in rawAccess mode to determine whether an object
+     * type is allowed to be assigned to a given field type.
+     */
+    boolean isAssignableTo(Format format) {
+        if (proxiedFormat != null) {
+            return proxiedFormat.isAssignableTo(format);
+        } else {
+            return format == this ||
+                   format.id == ID_OBJECT ||
+                   supertypes.contains(format.className);
+        }
+    }
+
+    /**
+     * For primitive types only, returns their associated wrapper type.
+     */
+    Format getWrapperFormat() {
+        return null;
+    }
+
+    /**
+     * Returns whether this format class is an entity class.
+     */
+    boolean isEntity() {
+        return false;
+    }
+
+    /**
+     * Returns whether this class is present in the EntityModel.  Returns false
+     * for a simple type, array type, or enum type.
+     */
+    boolean isModelClass() {
+        return false;
+    }
+
+    /**
+     * For an entity class or subclass, returns the base entity class; returns
+     * null in other cases.
+     */
+    ComplexFormat getEntityFormat() {
+        return null;
+    }
+
+    /**
+     * Called for an existing format that may not equal the current format for
+     * the same class.
+     *
+     * <p>If this method returns true, then it must have determined one of two
+     * things:
+     *  - that the old and new formats are equal, and it must have called
+     *  Evolver.useOldFormat; or
+     *  - that the old format can be evolved to the new format, and it must
+     *  have called Evolver.useEvolvedFormat.</p>
+     *
+     * <p>If this method returns false, then it must have determined that the
+     * old format could not be evolved to the new format, and it must have
+     * called Evolver.addInvalidMutation, addMissingMutation or
+     * addEvolveError.</p>
+     */
+    abstract boolean evolve(Format newFormat, Evolver evolver);
+
+    /**
+     * Called when a Converter handles evolution of a class, but we may still
+     * need to evolve the metadata.
+     */
+    boolean evolveMetadata(Format newFormat,
+                           Converter converter,
+                           Evolver evolver) {
+        return true;
+    }
+
+    /**
+     * Returns whether this format is the current format for its class.  If
+     * false is returned, this format is setup to evolve to the current format.
+     */
+    final boolean isCurrentVersion() {
+        return latestFormat == this && !deleted;
+    }
+
+    /**
+     * Returns whether this format has the same class as the given format,
+     * irrespective of version changes and renaming.
+     */
+    final boolean isSameClass(Format other) {
+        return latestFormat == other.latestFormat;
+    }
+
+    /* -- Abstract methods that must be implemented by subclasses. -- */
+
+    /**
+     * Initializes an uninitialized format, initializing its related formats
+     * (superclass formats and array component formats) first.
+     */
+    abstract void initialize(Catalog catalog,
+                             EntityModel model,
+                             int initVersion);
+
+    /**
+     * Calls catalog.createFormat for formats that this format depends on, or
+     * that should also be persistent.
+     */
+    abstract void collectRelatedFormats(Catalog catalog,
+                                        Map<String,Format> newFormats);
+
+    /*
+     * The remaining methods are used to read objects from data bytes via
+     * EntityInput, and to write objects as data bytes via EntityOutput.
+     * Ultimately these methods call methods in the Accessor interface to
+     * get/set fields in the object.  Most methods have a rawAccess parameter
+     * that determines whether the object is a raw object or a real persistent
+     * object.
+     *
+     * The first group of methods are abstract and must be implemented by
+     * format classes.  The second group have default implementations that
+     * throw UnsupportedOperationException and may optionally be overridden.
+     */
+
+    /**
+     * Creates an array of the format's class of the given length, as if
+     * Array.newInstance(getType(), len) were called.  Formats implement this
+     * method for specific classes, or call the accessor, to avoid the
+     * reflection overhead of Array.newInstance.
+     */
+    abstract Object newArray(int len);
+
+    /**
+     * Creates a new instance of the target class using its default
+     * constructor.  Normally this creates an empty object, and readObject() is
+     * called next to fill in the contents.  This is done in two steps to allow
+     * the instance to be registered by EntityInput before reading the
+     * contents.  This allows the fields in an object or a nested object to
+     * refer to the parent object in a graph.
+     *
+     * Alternatively, this method may read all or the first portion of the
+     * data, rather than that being done by readObject().  This is required for
+     * simple types and enums, where the object cannot be created without
+     * reading the data.  In these cases, there is no possibility that the
+     * parent object will be referenced by the child object in the graph.  It
+     * should not be done in other cases, or the graph references may not be
+     * maintained faithfully.
+     *
+     * Is public only in order to implement the Reader interface.  Note that
+     * this method should only be called directly in raw conversion mode or
+     * during conversion of an old format.  Normally it should be called via
+     * the getReader method and the Reader interface.
+     */
+    public abstract Object newInstance(EntityInput input, boolean rawAccess);
+
+    /**
+     * Called after newInstance() to read the rest of the data bytes and fill
+     * in the object contents.  If the object was read completely by
+     * newInstance(), this method does nothing.
+     *
+     * Is public only in order to implement the Reader interface.  Note that
+     * this method should only be called directly in raw conversion mode or
+     * during conversion of an old format.  Normally it should be called via
+     * the getReader method and the Reader interface.
+     */
+    public abstract Object readObject(Object o,
+                                      EntityInput input,
+                                      boolean rawAccess);
+
+    /**
+     * Writes a given instance of the target class to the output data bytes.
+     * This is the complement of the newInstance()/readObject() pair.
+     */
+    abstract void writeObject(Object o, EntityOutput output, boolean rawAccess);
+
+    /**
+     * Skips over the object's contents, as if readObject() were called, but
+     * without returning an object.  Used for extracting secondary key bytes
+     * without having to instantiate the object.  For reference types, the
+     * format ID is read just before calling this method, so this method is
+     * responsible for skipping everything following the format ID.
+     */
+    abstract void skipContents(RecordInput input);
+
+    /* -- More methods that may optionally be overridden by subclasses. -- */
+
+    /**
+     * When extracting a secondary key, called to skip over all fields up to
+     * the given secondary key field.  Returns the format of the key field
+     * found, or null if the field is not present (nullified) in the object.
+     */
+    Format skipToSecKey(RecordInput input, String keyName) {
+        throw new UnsupportedOperationException(toString());
+    }
+
+    /**
+     * Called after skipToSecKey() to copy the data bytes of a singular
+     * (XXX_TO_ONE) key field.
+     */
+    void copySecKey(RecordInput input, RecordOutput output) {
+        throw new UnsupportedOperationException(toString());
+    }
+
+    /**
+     * Called after skipToSecKey() to copy the data bytes of an array or
+     * collection (XXX_TO_MANY) key field.
+     */
+    void copySecMultiKey(RecordInput input, Format keyFormat, Set results) {
+        throw new UnsupportedOperationException(toString());
+    }
+
+    /**
+     * Nullifies the given key field in the given RawObject --  rawAccess mode
+     * is implied.
+     */
+    boolean nullifySecKey(Catalog catalog,
+                          Object entity,
+                          String keyName,
+                          Object keyElement) {
+        throw new UnsupportedOperationException(toString());
+    }
+
+    /**
+     * Returns whether the entity's primary key field is null or zero, as
+     * defined for primary keys that are assigned from a sequence.
+     */
+    boolean isPriKeyNullOrZero(Object o, boolean rawAccess) {
+        throw new UnsupportedOperationException(toString());
+    }
+
+    /**
+     * Gets the primary key field from the given object and writes it to the
+     * given output data bytes.  This is a separate operation because the
+     * primary key data bytes are stored separately from the rest of the
+     * record.
+     */
+    void writePriKey(Object o, EntityOutput output, boolean rawAccess) {
+        throw new UnsupportedOperationException(toString());
+    }
+
+    /**
+     * Reads the primary key from the given input bytes and sets the primary
+     * key field in the given object.  This is complement of writePriKey().
+     *
+     * Is public only in order to implement the Reader interface.  Note that
+     * this method should only be called directly in raw conversion mode or
+     * during conversion of an old format.  Normally it should be called via
+     * the getReader method and the Reader interface.
+     */
+    public void readPriKey(Object o, EntityInput input, boolean rawAccess) {
+        throw new UnsupportedOperationException(toString());
+    }
+
+    /**
+     * Validates and returns the simple integer key format for a sequence key
+     * associated with this format.
+     *
+     * For a composite key type, the format of the one and only field is
+     * returned.  For a simple integer type, this format is returned.
+     * Otherwise (the default implementation), an IllegalArgumentException is
+     * thrown.
+     */
+    Format getSequenceKeyFormat() {
+        throw new IllegalArgumentException
+            ("Type not allowed for sequence: " + getClassName());
+    }
+
+    /**
+     * Converts a RawObject to a current class object and adds the converted
+     * pair to the converted map.
+     */
+    Object convertRawObject(Catalog catalog,
+                            boolean rawAccess,
+                            RawObject rawObject,
+                            IdentityHashMap converted) {
+        throw new UnsupportedOperationException(toString());
+    }
+
+    @Override
+    public String toString() {
+        final String INDENT = "  ";
+        final String INDENT2 = INDENT + "  ";
+        StringBuffer buf = new StringBuffer(500);
+        if (isSimple()) {
+            addTypeHeader(buf, "SimpleType");
+            buf.append(" primitive=\"");
+            buf.append(isPrimitive());
+            buf.append("\"/>\n");
+        } else if (isEnum()) {
+            addTypeHeader(buf, "EnumType");
+            buf.append(">\n");
+            for (String constant : getEnumConstants()) {
+                buf.append(INDENT);
+                buf.append("<Constant>");
+                buf.append(constant);
+                buf.append("</Constant>\n");
+            }
+            buf.append("</EnumType>\n");
+        } else if (isArray()) {
+            addTypeHeader(buf, "ArrayType");
+            buf.append(" componentId=\"");
+            buf.append(getComponentType().getVersion());
+            buf.append("\" componentClass=\"");
+            buf.append(getComponentType().getClassName());
+            buf.append("\" dimensions=\"");
+            buf.append(getDimensions());
+            buf.append("\"/>\n");
+        } else {
+            addTypeHeader(buf, "ComplexType");
+            Format superType = getSuperType();
+            if (superType != null) {
+                buf.append(" superTypeId=\"");
+                buf.append(superType.getId());
+                buf.append("\" superTypeClass=\"");
+                buf.append(superType.getClassName());
+                buf.append('"');
+            }
+            Format proxiedFormat = getProxiedFormat();
+            if (proxiedFormat != null) {
+                buf.append(" proxiedTypeId=\"");
+                buf.append(proxiedFormat.getId());
+                buf.append("\" proxiedTypeClass=\"");
+                buf.append(proxiedFormat.getClassName());
+                buf.append('"');
+            }
+            PrimaryKeyMetadata priMeta = null;
+            Map<String,SecondaryKeyMetadata> secondaryKeys = null;
+            List<FieldMetadata> compositeKeyFields = null;
+            ClassMetadata clsMeta = getClassMetadata();
+            if (clsMeta != null) {
+                compositeKeyFields = clsMeta.getCompositeKeyFields();
+                priMeta = clsMeta.getPrimaryKey();
+                secondaryKeys = clsMeta.getSecondaryKeys();
+            }
+            buf.append(" kind=\"");
+            buf.append(isEntity() ? "entity" :
+                       ((compositeKeyFields != null) ? "compositeKey" :
+                        "persistent"));
+            buf.append("\">\n");
+            Map<String, RawField> fields = getFields();
+            if (fields != null) {
+                for (RawField field : fields.values()) {
+                    String name = field.getName();
+                    RawType type = field.getType();
+                    buf.append(INDENT);
+                    buf.append("<Field");
+                    buf.append(" name=\"");
+                    buf.append(name);
+                    buf.append("\" typeId=\"");
+                    buf.append(type.getId());
+                    buf.append("\" typeClass=\"");
+                    buf.append(type.getClassName());
+                    buf.append('"');
+                    if (priMeta != null &&
+                        priMeta.getName().equals(name)) {
+                        buf.append(" primaryKey=\"true\"");
+                        if (priMeta.getSequenceName() != null) {
+                            buf.append(" sequence=\"");
+                            buf.append(priMeta.getSequenceName());
+                            buf.append('"');
+                        }
+                    }
+                    if (secondaryKeys != null) {
+                        SecondaryKeyMetadata secMeta = secondaryKeys.get(name);
+                        if (secMeta != null) {
+                            buf.append(" secondaryKey=\"true\" keyName=\"");
+                            buf.append(secMeta.getKeyName());
+                            buf.append("\" relate=\"");
+                            buf.append(secMeta.getRelationship());
+                            buf.append('"');
+                            String related = secMeta.getRelatedEntity();
+                            if (related != null) {
+                                buf.append("\" relatedEntity=\"");
+                                buf.append(related);
+                                buf.append("\" onRelatedEntityDelete=\"");
+                                buf.append(secMeta.getDeleteAction());
+                                buf.append('"');
+                            }
+                        }
+                    }
+                    if (compositeKeyFields != null) {
+                        int nFields = compositeKeyFields.size();
+                        for (int i = 0; i < nFields; i += 1) {
+                            FieldMetadata fldMeta = compositeKeyFields.get(i);
+                            if (fldMeta.getName().equals(name)) {
+                                buf.append(" compositeKeyField=\"");
+                                buf.append(i + 1);
+                                buf.append('"');
+                            }
+                        }
+                    }
+                    buf.append("/>\n");
+                }
+                EntityMetadata entMeta = getEntityMetadata();
+                if (entMeta != null) {
+                    buf.append(INDENT);
+                    buf.append("<EntityKeys>\n");
+                    priMeta = entMeta.getPrimaryKey();
+                    if (priMeta != null) {
+                        buf.append(INDENT2);
+                        buf.append("<Primary class=\"");
+                        buf.append(priMeta.getDeclaringClassName());
+                        buf.append("\" field=\"");
+                        buf.append(priMeta.getName());
+                        buf.append("\"/>\n");
+                    }
+                    secondaryKeys = entMeta.getSecondaryKeys();
+                    if (secondaryKeys != null) {
+                        for (SecondaryKeyMetadata secMeta :
+                             secondaryKeys.values()) {
+                            buf.append(INDENT2);
+                            buf.append("<Secondary class=\"");
+                            buf.append(secMeta.getDeclaringClassName());
+                            buf.append("\" field=\"");
+                            buf.append(secMeta.getName());
+                            buf.append("\"/>\n");
+                        }
+                    }
+                    buf.append("</EntityKeys>\n");
+                }
+            }
+            buf.append("</ComplexType>\n");
+        }
+        return buf.toString();
+    }
+
+    private void addTypeHeader(StringBuffer buf, String elemName) {
+        buf.append('<');
+        buf.append(elemName);
+        buf.append(" id=\"");
+        buf.append(getId());
+        buf.append("\" class=\"");
+        buf.append(getClassName());
+        buf.append("\" version=\"");
+        buf.append(getVersion());
+        buf.append('"');
+        Format currVersion = getLatestVersion();
+        if (currVersion != null) {
+            buf.append(" currentVersionId=\"");
+            buf.append(currVersion.getId());
+            buf.append('"');
+        }
+        Format prevVersion = getPreviousVersion();
+        if (prevVersion != null) {
+            buf.append(" previousVersionId=\"");
+            buf.append(prevVersion.getId());
+            buf.append('"');
+        }
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/KeyLocation.java b/src/com/sleepycat/persist/impl/KeyLocation.java
new file mode 100644
index 0000000000000000000000000000000000000000..d3b46bef72570dc4a570a144aa5eca97ad19c18a
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/KeyLocation.java
@@ -0,0 +1,26 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: KeyLocation.java,v 1.6.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+/**
+ * Holder for the input and format of a key.  Used when copying secondary keys.
+ * Returned by RecordInput.getKeyLocation().
+ *
+ * @author Mark Hayes
+ */
+class KeyLocation {
+
+    RecordInput input;
+    Format format;
+
+    KeyLocation(RecordInput input, Format format) {
+        this.input = input;
+        this.format = format;
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/MapProxy.java b/src/com/sleepycat/persist/impl/MapProxy.java
new file mode 100644
index 0000000000000000000000000000000000000000..e3a26df0c574b12b3519d2f5874cfaae1fe33159
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/MapProxy.java
@@ -0,0 +1,73 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: MapProxy.java,v 1.6.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.TreeMap;
+
+import com.sleepycat.persist.model.Persistent;
+import com.sleepycat.persist.model.PersistentProxy;
+
+/**
+ * Proxy for a Map.
+ *
+ * @author Mark Hayes
+ */
+@Persistent
+abstract class MapProxy<K,V> implements PersistentProxy<Map<K,V>> {
+
+    private K[] keys;
+    private V[] values;
+
+    protected MapProxy() {}
+
+    public final void initializeProxy(Map<K,V> map) {
+        int size = map.size();
+        keys = (K[]) new Object[size];
+        values = (V[]) new Object[size];
+        int i = 0;
+        for (Map.Entry<K,V> entry : map.entrySet()) {
+            keys[i] = entry.getKey();
+            values[i] = entry.getValue();
+            i += 1;
+        }
+    }
+
+    public final Map<K,V> convertProxy() {
+        int size = values.length;
+        Map<K,V> map = newInstance(size);
+        for (int i = 0; i < size; i += 1) {
+            map.put(keys[i], values[i]);
+        }
+        return map;
+    }
+
+    protected abstract Map<K,V> newInstance(int size);
+
+    @Persistent(proxyFor=HashMap.class)
+    static class HashMapProxy<K,V> extends MapProxy<K,V> {
+
+        protected HashMapProxy() {}
+
+        protected Map<K,V> newInstance(int size) {
+            return new HashMap<K,V>(size);
+        }
+    }
+
+    @Persistent(proxyFor=TreeMap.class)
+    static class TreeMapProxy<K,V> extends MapProxy<K,V> {
+
+        protected TreeMapProxy() {}
+
+        protected Map<K,V> newInstance(int size) {
+            return new TreeMap<K,V>();
+        }
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/NonPersistentFormat.java b/src/com/sleepycat/persist/impl/NonPersistentFormat.java
new file mode 100644
index 0000000000000000000000000000000000000000..f5ff63fd53c9ae5a74df04d7fdcc27fdb4f0f75f
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/NonPersistentFormat.java
@@ -0,0 +1,70 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: NonPersistentFormat.java,v 1.14.2.3 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.lang.reflect.Array;
+import java.util.Map;
+
+import com.sleepycat.persist.model.EntityModel;
+
+/**
+ * Format for a non-persistent class that is only used for declared field
+ * types and arrays.  Currently used only for Object and interface types.
+ *
+ * @author Mark Hayes
+ */
+class NonPersistentFormat extends Format {
+
+    private static final long serialVersionUID = -7488355830875148784L;
+
+    NonPersistentFormat(Class type) {
+        super(type);
+    }
+
+    @Override
+    void initialize(Catalog catalog, EntityModel model, int initVersion) {
+    }
+
+    @Override
+    void collectRelatedFormats(Catalog catalog,
+                               Map<String,Format> newFormats) {
+    }
+
+    @Override
+    Object newArray(int len) {
+        return Array.newInstance(getType(), len);
+    }
+
+    @Override
+    public Object newInstance(EntityInput input, boolean rawAccess) {
+        throw new UnsupportedOperationException
+            ("Cannot instantiate non-persistent class: " + getClassName());
+    }
+
+    @Override
+    public Object readObject(Object o, EntityInput input, boolean rawAccess) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    void writeObject(Object o, EntityOutput output, boolean rawAccess) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    void skipContents(RecordInput input) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    boolean evolve(Format newFormat, Evolver evolver) {
+        evolver.useOldFormat(this, newFormat);
+        return true;
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/ObjectArrayFormat.java b/src/com/sleepycat/persist/impl/ObjectArrayFormat.java
new file mode 100644
index 0000000000000000000000000000000000000000..e8f34a596ea920fdd4091eb123be1555d8aaf5ab
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/ObjectArrayFormat.java
@@ -0,0 +1,200 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ObjectArrayFormat.java,v 1.27.2.3 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.lang.reflect.Array;
+import java.util.IdentityHashMap;
+import java.util.Map;
+import java.util.Set;
+
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.raw.RawObject;
+
+/**
+ * An array of objects having a specified number of dimensions.  All
+ * multidimensional arrays are handled by this class, since even a primitive
+ * array of more than one dimension is an array of objects, where the component
+ * objects may be primitive arrays.  The {@link PrimitiveArrayFormat} class
+ * handles primitive arrays of one dimension only.
+ *
+ * In this class, and {@link PrimitiveArrayFormat}, we resort to using
+ * reflection to allocate multidimensional arrays.  If there is a need for it,
+ * reflection could be avoided in the future by generating code as new array
+ * formats are encountered.
+ *
+ * @author Mark Hayes
+ */
+public class ObjectArrayFormat extends Format {
+
+    private static final long serialVersionUID = 4317004346690441892L;
+
+    private Format componentFormat;
+    private int nDimensions;
+    private transient Format useComponentFormat;
+
+    ObjectArrayFormat(Class type) {
+        super(type);
+        String name = getClassName();
+        for (nDimensions = 0;
+             name.charAt(nDimensions) == '[';
+             nDimensions += 1) {
+        }
+    }
+
+    @Override
+    public boolean isArray() {
+        return true;
+    }
+
+    @Override
+    public int getDimensions() {
+        return nDimensions;
+    }
+
+    @Override
+    public Format getComponentType() {
+        return (useComponentFormat != null) ?
+            useComponentFormat : componentFormat;
+    }
+
+    @Override
+    void collectRelatedFormats(Catalog catalog,
+                               Map<String,Format> newFormats) {
+        Class cls = getType().getComponentType();
+        catalog.createFormat(cls, newFormats);
+    }
+
+    @Override
+    void initialize(Catalog catalog, EntityModel model, int initVersion) {
+        /* Set the component format for a new (never initialized) format. */
+        if (componentFormat == null) {
+            Class cls = getType().getComponentType();
+            componentFormat = catalog.getFormat(cls.getName());
+        }
+        useComponentFormat = componentFormat.getLatestVersion();
+    }
+
+    @Override
+    boolean isAssignableTo(Format format) {
+        if (super.isAssignableTo(format)) {
+            return true;
+        }
+        if (format instanceof ObjectArrayFormat) {
+            ObjectArrayFormat other = (ObjectArrayFormat) format;
+            if (useComponentFormat.isAssignableTo(other.useComponentFormat)) {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    @Override
+    Object newArray(int len) {
+        return Array.newInstance(getType(), len);
+    }
+
+    @Override
+    public Object newInstance(EntityInput input, boolean rawAccess) {
+        int len = input.readArrayLength();
+        if (rawAccess) {
+            return new RawObject(this, new Object[len]);
+        } else {
+            return useComponentFormat.newArray(len);
+        }
+    }
+
+    @Override
+    public Object readObject(Object o, EntityInput input, boolean rawAccess) {
+        Object[] a;
+        if (rawAccess) {
+            a = ((RawObject) o).getElements();
+        } else {
+            a = (Object[]) o;
+        }
+        for (int i = 0; i < a.length; i += 1) {
+            a[i] = input.readObject();
+        }
+        return o;
+    }
+
+    @Override
+    void writeObject(Object o, EntityOutput output, boolean rawAccess) {
+        Object[] a;
+        if (rawAccess) {
+            a = ((RawObject) o).getElements();
+        } else {
+            a = (Object[]) o;
+        }
+        output.writeArrayLength(a.length);
+        for (int i = 0; i < a.length; i += 1) {
+            output.writeObject(a[i], useComponentFormat);
+        }
+    }
+
+    @Override
+    Object convertRawObject(Catalog catalog,
+                            boolean rawAccess,
+                            RawObject rawObject,
+                            IdentityHashMap converted) {
+        RawArrayInput input = new RawArrayInput
+            (catalog, rawAccess, converted, rawObject, useComponentFormat);
+        Object a = newInstance(input, rawAccess);
+        converted.put(rawObject, a);
+        return readObject(a, input, rawAccess);
+    }
+
+    @Override
+    void skipContents(RecordInput input) {
+        int len = input.readPackedInt();
+        for (int i = 0; i < len; i += 1) {
+            input.skipField(useComponentFormat);
+        }
+    }
+
+    @Override
+    void copySecMultiKey(RecordInput input, Format keyFormat, Set results) {
+        int len = input.readPackedInt();
+        for (int i = 0; i < len; i += 1) {
+            KeyLocation loc = input.getKeyLocation(useComponentFormat);
+            if (loc == null) {
+                throw new IllegalArgumentException
+                    ("Secondary key values in array may not be null");
+            }
+            if (loc.format != useComponentFormat) {
+                throw new IllegalStateException
+                    (useComponentFormat.getClassName());
+            }
+            int off1 = loc.input.getBufferOffset();
+            useComponentFormat.skipContents(loc.input);
+            int off2 = loc.input.getBufferOffset();
+            DatabaseEntry entry = new DatabaseEntry
+                (loc.input.getBufferBytes(), off1, off2 - off1);
+            results.add(entry);
+        }
+    }
+
+    @Override
+    boolean evolve(Format newFormat, Evolver evolver) {
+
+        /*
+         * When the class name of the component changes, we need a new format
+         * that references it.  Otherwise, don't propogate changes from
+         * components upward to their arrays.
+         */
+        Format latest = componentFormat.getLatestVersion();
+        if (latest != componentFormat &&
+            !latest.getClassName().equals(componentFormat.getClassName())) {
+            evolver.useEvolvedFormat(this, newFormat, newFormat);
+        } else {
+            evolver.useOldFormat(this, newFormat);
+        }
+        return true;
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/PersistCatalog.java b/src/com/sleepycat/persist/impl/PersistCatalog.java
new file mode 100644
index 0000000000000000000000000000000000000000..cf4b5ebe1241cbb985a9c8059d49cb87dfeffec1
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/PersistCatalog.java
@@ -0,0 +1,966 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PersistCatalog.java,v 1.47.2.3 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.IdentityHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.Set;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.persist.DatabaseNamer;
+import com.sleepycat.persist.evolve.DeletedClassException;
+import com.sleepycat.persist.evolve.IncompatibleClassException;
+import com.sleepycat.persist.evolve.Mutations;
+import com.sleepycat.persist.evolve.Renamer;
+import com.sleepycat.persist.model.AnnotationModel;
+import com.sleepycat.persist.model.ClassMetadata;
+import com.sleepycat.persist.model.EntityMetadata;
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.raw.RawObject;
+import com.sleepycat.persist.raw.RawType;
+import com.sleepycat.util.RuntimeExceptionWrapper;
+
+/**
+ * The catalog of class formats for a store, along with its associated model
+ * and mutations.
+ *
+ * @author Mark Hayes
+ */
+public class PersistCatalog implements Catalog {
+
+    /**
+     * Key to Data record in the catalog database.  In the JE 3.0.12 beta
+     * version the formatList record is stored under this key and is converted
+     * to a Data object when it is read.
+     */
+    private static final byte[] DATA_KEY = getIntBytes(-1);
+
+    /**
+     * Key to a JE 3.0.12 beta version mutations record in the catalog
+     * database.  This record is no longer used because mutations are stored in
+     * the Data record and is deleted when the beta version is detected.
+     */
+    private static final byte[] BETA_MUTATIONS_KEY = getIntBytes(-2);
+
+    private static byte[] getIntBytes(int val) {
+        DatabaseEntry entry = new DatabaseEntry();
+        IntegerBinding.intToEntry(val, entry);
+        assert entry.getSize() == 4 && entry.getData().length == 4;
+        return entry.getData();
+    }
+
+    /**
+     * Used by unit tests.
+     */
+    public static boolean expectNoClassChanges;
+    public static boolean unevolvedFormatsEncountered;
+
+    /**
+     * The object stored under DATA_KEY in the catalog database.
+     */
+    private static class Data implements Serializable {
+
+        static final long serialVersionUID = 7515058069137413261L;
+
+        List<Format> formatList;
+        Mutations mutations;
+        int version;
+    }
+
+    /**
+     * A list of all formats indexed by formatId.  Element zero is unused and
+     * null, since IDs start at one; this avoids adjusting the ID to index the
+     * list.  Some elements are null to account for predefined IDs that are not
+     * used.
+     *
+     * <p>This field, like formatMap, is volatile because it is reassigned
+     * when dynamically adding new formats.  See {@link addNewFormat}.</p>
+     */
+    private volatile List<Format> formatList;
+
+    /**
+     * A map of the current/live formats in formatList, indexed by class name.
+     *
+     * <p>This field, like formatList, is volatile because it is reassigned
+     * when dynamically adding new formats.  See {@link addNewFormat}.</p>
+     */
+    private volatile Map<String,Format> formatMap;
+
+    /**
+     * A map of the latest formats (includes deleted formats) in formatList,
+     * indexed by class name.
+     *
+     * <p>This field, like formatMap, is volatile because it is reassigned
+     * when dynamically adding new formats.  See {@link addNewFormat}.</p>
+     */
+    private volatile Map<String,Format> latestFormatMap;
+
+    /**
+     * A temporary map of proxied class name to proxy class name.  Used during
+     * catalog creation, and then set to null.  This map is used to force proxy
+     * formats to be created prior to proxied formats. [#14665]
+     */
+    private Map<String,String> proxyClassMap;
+
+    private boolean rawAccess;
+    private EntityModel model;
+    private Mutations mutations;
+    private Database db;
+    private int openCount;
+
+    /**
+     * The Store is normally present but may be null in unit tests (for
+     * example, BindingTest).
+     */
+    private Store store;
+
+    /**
+     * The Evolver and catalog Data are non-null during catalog initialization,
+     * and null otherwise.
+     */
+    private Evolver evolver;
+    private Data catalogData;
+
+    /**
+     * Creates a new catalog, opening the database and reading it from a given
+     * catalog database if it already exists.  All predefined formats and
+     * formats for the given model are added.  For modified classes, old
+     * formats are defined based on the rules for compatible class changes and
+     * the given mutations.  If any format is changed or added, and the
+     * database is not read-only, write the initialized catalog to the
+     * database.
+     */
+    public PersistCatalog(Transaction txn,
+                          Environment env,
+                          String storePrefix,
+                          String dbName,
+                          DatabaseConfig dbConfig,
+                          EntityModel modelParam,
+                          Mutations mutationsParam,
+                          boolean rawAccess,
+                          Store store)
+        throws DatabaseException {
+
+        this.rawAccess = rawAccess;
+        this.store = store;
+        /* store may be null for testing. */
+        String[] fileAndDbNames = (store != null) ?
+            store.parseDbName(dbName) :
+            Store.parseDbName(dbName, DatabaseNamer.DEFAULT);
+        try {
+            db = DbCompat.openDatabase
+                (env, txn, fileAndDbNames[0], fileAndDbNames[1],
+                 dbConfig);
+        } catch (FileNotFoundException e) {
+            throw new DatabaseException(e);
+        }
+        openCount = 1;
+        boolean success = false;
+        try {
+            catalogData = readData(txn);
+            mutations = catalogData.mutations;
+            if (mutations == null) {
+                mutations = new Mutations();
+            }
+
+            /*
+             * When the beta version is detected, force a re-write of the
+             * catalog and disallow class changes.  This brings the catalog up
+             * to date so that evolution can proceed correctly from then on.
+             */
+            boolean betaVersion = (catalogData.version == BETA_VERSION);
+            boolean forceWriteData = betaVersion;
+            boolean disallowClassChanges = betaVersion;
+
+            /*
+             * Store the given mutations if they are different from the stored
+             * mutations, and force evolution to apply the new mutations.
+             */
+            boolean forceEvolution = false;
+            if (mutationsParam != null &&
+                !mutations.equals(mutationsParam)) {
+                mutations = mutationsParam;
+                forceWriteData = true;
+                forceEvolution = true;
+            }
+
+            /* Get the existing format list, or copy it from SimpleCatalog. */
+            formatList = catalogData.formatList;
+            if (formatList == null) {
+                formatList = SimpleCatalog.copyFormatList();
+
+                /*
+                 * Special cases: Object and Number are predefined but are not
+                 * simple types.
+                 */
+                Format format = new NonPersistentFormat(Object.class);
+                format.setId(Format.ID_OBJECT);
+                formatList.set(Format.ID_OBJECT, format);
+                format = new NonPersistentFormat(Number.class);
+                format.setId(Format.ID_NUMBER);
+                formatList.set(Format.ID_NUMBER, format);
+            } else {
+                if (SimpleCatalog.copyMissingFormats(formatList)) {
+                    forceWriteData = true;
+                }
+            }
+
+            /* Special handling for JE 3.0.12 beta formats. */
+            if (betaVersion) {
+                Map<String,Format> formatMap = new HashMap<String,Format>();
+                for (Format format : formatList) {
+                    if (format != null) {
+                        formatMap.put(format.getClassName(), format);
+                    }
+                }
+                for (Format format : formatList) {
+                    if (format != null) {
+                        format.migrateFromBeta(formatMap);
+                    }
+                }
+            }
+
+            /*
+             * If we should not use the current model, initialize the stored
+             * model and return.
+             */
+            formatMap = new HashMap<String,Format>(formatList.size());
+            latestFormatMap = new HashMap<String,Format>(formatList.size());
+            if (rawAccess) {
+                for (Format format : formatList) {
+                    if (format != null) {
+                        String name = format.getClassName();
+                        if (format.isCurrentVersion()) {
+                            formatMap.put(name, format);
+                        }
+                        if (format == format.getLatestVersion()) {
+                            latestFormatMap.put(name, format);
+                        }
+                    }
+                }
+                model = new StoredModel(this);
+                for (Format format : formatList) {
+                    if (format != null) {
+                        format.initializeIfNeeded(this, model);
+                    }
+                }
+                success = true;
+                return;
+            }
+
+            /*
+             * We are opening a store that uses the current model. Default to
+             * the AnnotationModel if no model is specified.
+             */
+            if (modelParam != null) {
+                model = modelParam;
+            } else {
+                model = new AnnotationModel();
+            }
+
+            /*
+             * Add all predefined (simple) formats to the format map.  The
+             * current version of other formats will be added below.
+             */
+            for (int i = 0; i <= Format.ID_PREDEFINED; i += 1) {
+                Format simpleFormat = formatList.get(i);
+                if (simpleFormat != null) {
+                    formatMap.put(simpleFormat.getClassName(), simpleFormat);
+                }
+            }
+
+            /*
+             * Known classes are those explicitly registered by the user via
+             * the model, plus the predefined proxy classes.
+             */
+            List<String> knownClasses =
+                new ArrayList<String>(model.getKnownClasses());
+            addPredefinedProxies(knownClasses);
+
+            /*
+             * Create a temporary map of proxied class name to proxy class
+             * name, using all known formats and classes.  This map is used to
+             * force proxy formats to be created prior to proxied formats.
+             * [#14665]
+             */
+            proxyClassMap = new HashMap<String,String>();
+            for (Format oldFormat : formatList) {
+                if (oldFormat == null || Format.isPredefined(oldFormat)) {
+                    continue;
+                }
+                String oldName = oldFormat.getClassName();
+                Renamer renamer = mutations.getRenamer
+                    (oldName, oldFormat.getVersion(), null);
+                String newName =
+                    (renamer != null) ? renamer.getNewName() : oldName;
+                addProxiedClass(newName);
+            }
+            for (String className : knownClasses) {
+                addProxiedClass(className);
+            }
+
+            /*
+             * Add known formats from the model and the predefined proxies.
+             * In general, classes will not be present in an AnnotationModel
+             * until an instance is stored, in which case an old format exists.
+             * However, registered proxy classes are an exception and must be
+             * added in advance.  And the user may choose to register new
+             * classes in advance.  The more formats we define in advance, the
+             * less times we have to write to the catalog database.
+             */
+            Map<String,Format> newFormats = new HashMap<String,Format>();
+            for (String className : knownClasses) {
+                createFormat(className, newFormats);
+            }
+
+            /*
+             * Perform class evolution for all old formats, and throw an
+             * exception that contains the messages for all of the errors in
+             * mutations or in the definition of new classes.
+             */
+            evolver = new Evolver
+                (this, storePrefix, mutations, newFormats, forceEvolution,
+                 disallowClassChanges);
+            for (Format oldFormat : formatList) {
+                if (oldFormat == null || Format.isPredefined(oldFormat)) {
+                    continue;
+                }
+                if (oldFormat.isEntity()) {
+                    evolver.evolveFormat(oldFormat);
+                } else {
+                    evolver.addNonEntityFormat(oldFormat);
+                }
+            }
+            evolver.finishEvolution();
+            String errors = evolver.getErrors();
+            if (errors != null) {
+                throw new IncompatibleClassException(errors);
+            }
+
+            /*
+             * Add the new formats remaining.  New formats that are equal to
+             * old formats were removed from the newFormats map above.
+             */
+            for (Format newFormat : newFormats.values()) {
+                addFormat(newFormat);
+            }
+
+            /* Initialize all formats. */
+            for (Format format : formatList) {
+                if (format != null) {
+                    format.initializeIfNeeded(this, model);
+                    if (format == format.getLatestVersion()) {
+                        latestFormatMap.put(format.getClassName(), format);
+                    }
+                }
+            }
+
+            boolean needWrite =
+                 newFormats.size() > 0 ||
+                 evolver.areFormatsChanged();
+
+            /* For unit testing. */
+            if (expectNoClassChanges && needWrite) {
+                throw new IllegalStateException
+                    ("Unexpected changes " +
+                     " newFormats.size=" + newFormats.size() +
+                     " areFormatsChanged=" + evolver.areFormatsChanged());
+            }
+
+            /* Write the catalog if anything changed. */
+            if ((needWrite || forceWriteData) &&
+                !db.getConfig().getReadOnly()) {
+
+                /*
+                 * Only rename/remove databases if we are going to update the
+                 * catalog to reflect those class changes.
+                 */
+                evolver.renameAndRemoveDatabases(store, txn);
+
+                /*
+                 * Note that we use the Data object that was read above, and
+                 * the beta version determines whether to delete the old
+                 * mutations record.
+                 */
+                catalogData.formatList = formatList;
+                catalogData.mutations = mutations;
+                writeData(txn, catalogData);
+            } else if (forceWriteData) {
+                throw new IllegalArgumentException
+                    ("When an upgrade is required the store may not be " +
+                     "opened read-only");
+            }
+
+            success = true;
+        } finally {
+
+            /*
+             * Fields needed only for the duration of this ctor and which
+             * should be null afterwards.
+             */
+            proxyClassMap = null;
+            catalogData = null;
+            evolver = null;
+
+            if (!success) {
+                close();
+            }
+        }
+    }
+
+    public void getEntityFormats(Collection<Format> entityFormats) {
+        for (Format format : formatMap.values()) {
+            if (format.isEntity()) {
+                entityFormats.add(format);
+            }
+        }
+    }
+
+    private void addProxiedClass(String className) {
+        ClassMetadata metadata = model.getClassMetadata(className);
+        if (metadata != null) {
+            String proxiedClassName = metadata.getProxiedClassName();
+            if (proxiedClassName != null) {
+                proxyClassMap.put(proxiedClassName, className);
+            }
+        }
+    }
+
+    private void addPredefinedProxies(List<String> knownClasses) {
+        knownClasses.add(CollectionProxy.ArrayListProxy.class.getName());
+        knownClasses.add(CollectionProxy.LinkedListProxy.class.getName());
+        knownClasses.add(CollectionProxy.HashSetProxy.class.getName());
+        knownClasses.add(CollectionProxy.TreeSetProxy.class.getName());
+        knownClasses.add(MapProxy.HashMapProxy.class.getName());
+        knownClasses.add(MapProxy.TreeMapProxy.class.getName());
+    }
+
+    /**
+     * Returns a map from format to a set of its superclass formats.  The
+     * format for simple types, enums and class Object are not included.  Only
+     * complex types have superclass formats as defined by
+     * Format.getSuperFormat.
+     */
+    Map<Format,Set<Format>> getSubclassMap() {
+        Map<Format,Set<Format>> subclassMap =
+            new HashMap<Format,Set<Format>>();
+        for (Format format : formatList) {
+            if (format == null || Format.isPredefined(format)) {
+                continue;
+            }
+            Format superFormat = format.getSuperFormat();
+            if (superFormat != null) {
+                Set<Format> subclass = subclassMap.get(superFormat);
+                if (subclass == null) {
+                    subclass = new HashSet<Format>();
+                    subclassMap.put(superFormat, subclass);
+                }
+                subclass.add(format);
+            }
+        }
+        return subclassMap;
+    }
+
+    /**
+     * Returns the model parameter, default model or stored model.
+     */
+    public EntityModel getResolvedModel() {
+        return model;
+    }
+
+    /**
+     * Increments the reference count for a catalog that is already open.
+     */
+    public void openExisting() {
+        openCount += 1;
+    }
+
+    /**
+     * Decrements the reference count and closes the catalog DB when it reaches
+     * zero.  Returns true if the database was closed or false if the reference
+     * count is still non-zero and the database was left open.
+     */
+    public boolean close()
+        throws DatabaseException {
+
+        if (openCount == 0) {
+            throw new IllegalStateException("Catalog is not open");
+        } else {
+            openCount -= 1;
+            if (openCount == 0) {
+                Database dbToClose = db;
+                db = null;
+                dbToClose.close();
+                return true;
+            } else {
+                return false;
+            }
+        }
+    }
+
+    /**
+     * Returns the current merged mutations.
+     */
+    public Mutations getMutations() {
+        return mutations;
+    }
+
+    /**
+     * Convenience method that gets the class for the given class name and
+     * calls createFormat with the class object.
+     */
+    public Format createFormat(String clsName, Map<String,Format> newFormats) {
+        Class type;
+        try {
+            type = SimpleCatalog.classForName(clsName);
+        } catch (ClassNotFoundException e) {
+            throw new IllegalStateException
+                ("Class does not exist: " + clsName);
+        }
+        return createFormat(type, newFormats);
+    }
+
+    /**
+     * If the given class format is not already present in the given map and
+     * a format for this class name does not already exist, creates an
+     * uninitialized format, adds it to the map, and also collects related
+     * formats in the map.
+     */
+    public Format createFormat(Class type, Map<String,Format> newFormats) {
+        /* Return a new or existing format for this class. */
+        String className = type.getName();
+        Format format = newFormats.get(className);
+        if (format != null) {
+            return format;
+        }
+        format = formatMap.get(className);
+        if (format != null) {
+            return format;
+        }
+        /* Simple types are predefined. */
+        assert !SimpleCatalog.isSimpleType(type) : className;
+        /* Create format of the appropriate type. */
+        String proxyClassName = null;
+        if (proxyClassMap != null) {
+            proxyClassName = proxyClassMap.get(className);
+        }
+        if (proxyClassName != null) {
+            format = new ProxiedFormat(type, proxyClassName);
+        } else if (type.isArray()) {
+            format = type.getComponentType().isPrimitive() ?
+                (new PrimitiveArrayFormat(type)) :
+                (new ObjectArrayFormat(type));
+        } else if (type.isEnum()) {
+            format = new EnumFormat(type);
+        } else if (type == Object.class || type.isInterface()) {
+            format = new NonPersistentFormat(type);
+        } else {
+            ClassMetadata metadata = model.getClassMetadata(className);
+            if (metadata == null) {
+                throw new IllegalArgumentException
+                    ("Class could not be loaded or is not persistent: " +
+                     className);
+            }
+            if (metadata.getCompositeKeyFields() != null &&
+                (metadata.getPrimaryKey() != null ||
+                 metadata.getSecondaryKeys() != null)) {
+                throw new IllegalArgumentException
+                    ("A composite key class may not have primary or" +
+                     " secondary key fields: " + type.getName());
+            }
+            try {
+                type.getDeclaredConstructor();
+            } catch (NoSuchMethodException e) {
+                throw new IllegalArgumentException
+                    ("No default constructor: " + type.getName(), e);
+            }
+            if (metadata.getCompositeKeyFields() != null) {
+                format = new CompositeKeyFormat
+                    (type, metadata, metadata.getCompositeKeyFields());
+            } else {
+                EntityMetadata entityMetadata =
+                    model.getEntityMetadata(className);
+                format = new ComplexFormat(type, metadata, entityMetadata);
+            }
+        }
+        /* Collect new format along with any related new formats. */
+        newFormats.put(className, format);
+        format.collectRelatedFormats(this, newFormats);
+
+        return format;
+    }
+
+    /**
+     * Adds a format and makes it the current format for the class.
+     */
+    private void addFormat(Format format) {
+        addFormat(format, formatList, formatMap);
+    }
+
+    /**
+     * Adds a format to the given the format collections, for use when
+     * dynamically adding formats.
+     */
+    private void addFormat(Format format,
+                           List<Format> list,
+                           Map<String,Format> map) {
+        format.setId(list.size());
+        list.add(format);
+        map.put(format.getClassName(), format);
+    }
+
+    /**
+     * Installs an existing format when no evolution is needed, i.e, when the
+     * new and old formats are identical.
+     */
+    void useExistingFormat(Format oldFormat) {
+        assert oldFormat.isCurrentVersion();
+        formatMap.put(oldFormat.getClassName(), oldFormat);
+    }
+
+    /**
+     * Returns a set of all persistent (non-simple type) class names.
+     */
+    Set<String> getModelClasses() {
+        Set<String> classes = new HashSet<String>();
+        for (Format format : formatMap.values()) {
+            if (format.isModelClass()) {
+                classes.add(format.getClassName());
+            }
+        }
+        return Collections.unmodifiableSet(classes);
+    }
+
+    /**
+     * Returns all formats as RawTypes.
+     */
+    public List<RawType> getAllRawTypes() {
+        List<RawType> list = new ArrayList<RawType>();
+        for (RawType type : formatList) {
+            if (type != null) {
+                list.add(type);
+            }
+        }
+        return Collections.unmodifiableList(list);
+    }
+
+    /**
+     * When a format is intialized, this method is called to get the version
+     * of the serialized object to be initialized.  See Catalog.
+     */
+    public int getInitVersion(Format format, boolean forReader) {
+
+        if (catalogData == null || catalogData.formatList == null ||
+            format.getId() >= catalogData.formatList.size()) {
+
+            /*
+             * For new formats, use the current version.  If catalogData is
+             * null, the Catalog ctor is finished and the format must be new.
+             * If the ctor is in progress, the format is new if its ID is
+             * greater than the ID of all pre-existing formats.
+             */
+            return Catalog.CURRENT_VERSION;
+        } else {
+
+            /*
+             * Get the version of a pre-existing format during execution of the
+             * Catalog ctor.  The catalogData field is non-null, but evolver
+             * may be null if the catalog is opened in raw mode.
+             */
+            assert catalogData != null;
+
+            if (forReader) {
+
+                /*
+                 * Get the version of the evolution reader for a pre-existing
+                 * format.  Use the current version if the format changed
+                 * during class evolution, otherwise use the stored version.
+                 */
+                return (evolver != null && evolver.isFormatChanged(format)) ?
+                       Catalog.CURRENT_VERSION : catalogData.version;
+            } else {
+                /* Always used the stored version for a pre-existing format. */
+                return catalogData.version;
+            }
+        }
+    }
+
+    public Format getFormat(int formatId) {
+        try {
+            Format format = formatList.get(formatId);
+            if (format == null) {
+                throw new DeletedClassException
+                    ("Format does not exist: " + formatId);
+            }
+            return format;
+        } catch (NoSuchElementException e) {
+            throw new DeletedClassException
+                ("Format does not exist: " + formatId);
+        }
+    }
+
+
+    /**
+     * Get a format for a given class, creating it if it does not exist.
+     *
+     * <p>This method is called for top level entity instances by
+     * PersistEntityBinding.  When a new entity subclass format is added we
+     * call Store.openSecondaryIndexes so that previously unknown secondary
+     * databases can be created, before storing the entity.  We do this here
+     * while not holding a synchronization mutex, not in addNewFormat, to avoid
+     * deadlocks. openSecondaryIndexes synchronizes on the Store. [#15247]</p>
+     */
+    public Format getFormat(Class cls, boolean openEntitySubclassIndexes) {
+        Format format = formatMap.get(cls.getName());
+        if (format == null) {
+            if (model != null) {
+                format = addNewFormat(cls);
+                /* Detect and handle new entity subclass. [#15247] */
+                if (openEntitySubclassIndexes && store != null) {
+                    Format entityFormat = format.getEntityFormat();
+                    if (entityFormat != null && entityFormat != format) {
+                        try {
+                            store.openSecondaryIndexes
+                                (null, entityFormat.getEntityMetadata(), null);
+                        } catch (DatabaseException e) {
+                            throw new RuntimeExceptionWrapper(e);
+                        }
+                    }
+                }
+            }
+            if (format == null) {
+                throw new IllegalArgumentException
+                    ("Class is not persistent: " + cls.getName());
+            }
+        }
+        return format;
+    }
+
+    public Format getFormat(String className) {
+        return formatMap.get(className);
+    }
+
+    public Format getLatestVersion(String className) {
+        return latestFormatMap.get(className);
+    }
+
+    /**
+     * Adds a format for a new class.  Returns the format added for the given
+     * class, or throws an exception if the given class is not persistent.
+     *
+     * <p>This method uses a copy-on-write technique to add new formats without
+     * impacting other threads.</p>
+     */
+    private synchronized Format addNewFormat(Class cls) {
+
+        /*
+         * After synchronizing, check whether another thread has added the
+         * format needed.  Note that this is not the double-check technique
+         * because the formatMap field is volatile and is not itself checked
+         * for null.  (The double-check technique is known to be flawed in
+         * Java.)
+         */
+        Format format = formatMap.get(cls.getName());
+        if (format != null) {
+            return format;
+        }
+
+        /* Copy the read-only format collections. */
+        List<Format> newFormatList = new ArrayList<Format>(formatList);
+        Map<String,Format> newFormatMap =
+            new HashMap<String,Format>(formatMap);
+        Map<String,Format> newLatestFormatMap =
+            new HashMap<String,Format>(latestFormatMap);
+
+        /* Add the new format and all related new formats. */
+        Map<String,Format> newFormats = new HashMap<String,Format>();
+        format = createFormat(cls, newFormats);
+        for (Format newFormat : newFormats.values()) {
+            addFormat(newFormat, newFormatList, newFormatMap);
+        }
+
+        /*
+         * Initialize new formats using a read-only catalog because we can't
+         * update this catalog until after we store it (below).
+         */
+        Catalog newFormatCatalog =
+            new ReadOnlyCatalog(newFormatList, newFormatMap);
+        for (Format newFormat : newFormats.values()) {
+            newFormat.initializeIfNeeded(newFormatCatalog, model);
+            newLatestFormatMap.put(newFormat.getClassName(), newFormat);
+        }
+
+        /*
+         * Write the updated catalog using auto-commit, then assign the new
+         * collections.  The database write must occur before the collections
+         * are used, since a format must be persistent before it can be
+         * referenced by a data record.
+         */
+        try {
+            Data catalogData = new Data();
+            catalogData.formatList = newFormatList;
+            catalogData.mutations = mutations;
+            writeData(null, catalogData);
+        } catch (DatabaseException e) {
+            throw new RuntimeExceptionWrapper(e);
+        }
+        formatList = newFormatList;
+        formatMap = newFormatMap;
+        latestFormatMap = newLatestFormatMap;
+
+        return format;
+    }
+
+    /**
+     * Used to write the catalog when a format has been changed, for example,
+     * when Store.evolve has updated a Format's EvolveNeeded property.  Uses
+     * auto-commit.
+     */
+    public synchronized void flush()
+        throws DatabaseException {
+
+        Data catalogData = new Data();
+        catalogData.formatList = formatList;
+        catalogData.mutations = mutations;
+        writeData(null, catalogData);
+    }
+
+    /**
+     * Reads catalog Data, converting old versions as necessary.  An empty
+     * Data object is returned if no catalog data currently exists.  Null is
+     * never returned.
+     */
+    private Data readData(Transaction txn)
+        throws DatabaseException {
+
+        Data catalogData;
+        DatabaseEntry key = new DatabaseEntry(DATA_KEY);
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status = db.get(txn, key, data, null);
+        if (status == OperationStatus.SUCCESS) {
+            ByteArrayInputStream bais = new ByteArrayInputStream
+                (data.getData(), data.getOffset(), data.getSize());
+            try {
+                ObjectInputStream ois = new ObjectInputStream(bais);
+                Object object = ois.readObject();
+                assert ois.available() == 0;
+                if (object instanceof Data) {
+                    catalogData = (Data) object;
+                } else {
+                    if (!(object instanceof List)) {
+                        throw new IllegalStateException
+                            (object.getClass().getName());
+                    }
+                    catalogData = new Data();
+                    catalogData.formatList = (List) object;
+                    catalogData.version = BETA_VERSION;
+                }
+                return catalogData;
+            } catch (ClassNotFoundException e) {
+                throw new DatabaseException(e);
+            } catch (IOException e) {
+                throw new DatabaseException(e);
+            }
+        } else {
+            catalogData = new Data();
+            catalogData.version = Catalog.CURRENT_VERSION;
+        }
+        return catalogData;
+    }
+
+    /**
+     * Writes catalog Data.  If txn is null, auto-commit is used.
+     */
+    private void writeData(Transaction txn, Data catalogData)
+        throws DatabaseException {
+
+        /* Catalog data is written in the current version. */
+        boolean wasBetaVersion = (catalogData.version == BETA_VERSION);
+        catalogData.version = CURRENT_VERSION;
+
+        ByteArrayOutputStream baos = new ByteArrayOutputStream();
+        try {
+            ObjectOutputStream oos = new ObjectOutputStream(baos);
+            oos.writeObject(catalogData);
+        } catch (IOException e) {
+            throw new DatabaseException(e);
+        }
+        DatabaseEntry key = new DatabaseEntry(DATA_KEY);
+        DatabaseEntry data = new DatabaseEntry(baos.toByteArray());
+        db.put(txn, key, data);
+
+        /*
+         * Delete the unused beta mutations record if we read the beta version
+         * record earlier.
+         */
+        if (wasBetaVersion) {
+            key.setData(BETA_MUTATIONS_KEY);
+            db.delete(txn, key);
+        }
+    }
+
+    public boolean isRawAccess() {
+        return rawAccess;
+    }
+
+    public Object convertRawObject(RawObject o, IdentityHashMap converted) {
+        Format format = (Format) o.getType();
+        if (this != format.getCatalog()) {
+
+            /*
+             * Use the corresponding format in this catalog when the external
+             * raw object was created using a different catalog.  Create the
+             * format if it does not already exist, for example, when this
+             * store is empty. [#16253].
+             */
+	    String clsName = format.getClassName();
+            Class cls;
+            try {
+                cls = SimpleCatalog.classForName(clsName);
+                format = getFormat(cls, true /*openEntitySubclassIndexes*/);
+            } catch (ClassNotFoundException e) {
+                format = null;
+            }
+            if (format == null) {
+                throw new IllegalArgumentException
+                    ("External raw type not found: " + clsName);
+            }
+        }
+        Format proxiedFormat = format.getProxiedFormat();
+        if (proxiedFormat != null) {
+            format = proxiedFormat;
+        }
+        if (converted == null) {
+            converted = new IdentityHashMap();
+        }
+        return format.convertRawObject(this, false, o, converted);
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/PersistComparator.java b/src/com/sleepycat/persist/impl/PersistComparator.java
new file mode 100644
index 0000000000000000000000000000000000000000..ba2d679150e2a6b2c87c625597bda1dda943aafa
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/PersistComparator.java
@@ -0,0 +1,66 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PersistComparator.java,v 1.11.2.2 2010/01/04 15:30:38 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.io.Serializable;
+import java.util.Comparator;
+import java.util.List;
+
+import com.sleepycat.persist.model.FieldMetadata;
+
+/**
+ * The btree comparator for persistent key classes.  The serialized form of
+ * this comparator is stored in the BDB JE database descriptor so that the
+ * comparator can be re-created during recovery.
+ *
+ * @author Mark Hayes
+ */
+public class PersistComparator implements Comparator<byte[]>, Serializable {
+
+    private static final long serialVersionUID = 5221576538843355317L;
+
+    private String keyClassName;
+    private String[] comositeFieldOrder;
+    private transient PersistKeyBinding binding;
+
+    public PersistComparator(String keyClassName,
+                             List<FieldMetadata> compositeKeyFields,
+                             PersistKeyBinding binding) {
+        this.keyClassName = keyClassName;
+        this.binding = binding;
+
+        if (compositeKeyFields != null) {
+            comositeFieldOrder =
+                CompositeKeyFormat.getFieldNameArray(compositeKeyFields);
+        }
+    }
+
+    public int compare(byte[] b1, byte[] b2) {
+
+        /*
+         * The binding will be null after the comparator is deserialized, i.e.,
+         * during BDB JE recovery.  We must construct it here, without access
+         * to the stored catalog since recovery is not complete.
+         */
+        if (binding == null) {
+            Class keyClass;
+            try {
+                keyClass = SimpleCatalog.classForName(keyClassName);
+            } catch (ClassNotFoundException e) {
+                throw new IllegalStateException(e);
+            }
+            binding = new PersistKeyBinding(keyClass, comositeFieldOrder);
+        }
+
+        Comparable k1 = (Comparable) binding.bytesToObject(b1, 0, b1.length);
+        Comparable k2 = (Comparable) binding.bytesToObject(b2, 0, b2.length);
+
+        return k1.compareTo(k2);
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/PersistEntityBinding.java b/src/com/sleepycat/persist/impl/PersistEntityBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..2f240471b8f914113f5e7ade5d7248631ab8bb29
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/PersistEntityBinding.java
@@ -0,0 +1,179 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PersistEntityBinding.java,v 1.20.2.3 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.bind.tuple.TupleBase;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.raw.RawObject;
+
+/**
+ * A persistence entity binding for a given entity class.
+ *
+ * @author Mark Hayes
+ */
+public class PersistEntityBinding implements EntityBinding {
+
+    PersistCatalog catalog;
+    Format entityFormat;
+    boolean rawAccess;
+    PersistKeyAssigner keyAssigner;
+
+    /**
+     * Creates a key binding for a given entity class.
+     */
+    public PersistEntityBinding(PersistCatalog catalog,
+                                String entityClassName,
+                                boolean rawAccess) {
+        this.catalog = catalog;
+        entityFormat = getOrCreateFormat(catalog, entityClassName, rawAccess);
+        if (!entityFormat.isEntity()) {
+            throw new IllegalArgumentException
+                ("Not an entity class: " + entityClassName);
+        }
+        this.rawAccess = rawAccess;
+    }
+
+    public PersistKeyAssigner getKeyAssigner() {
+        return keyAssigner;
+    }
+
+    public Object entryToObject(DatabaseEntry key, DatabaseEntry data) {
+        return readEntity(catalog, key, data, rawAccess);
+    }
+
+    /**
+     * Creates the instance, reads the entity key first to track visited
+     * entities correctly, then reads the data and returns the entity.
+     *
+     * This is a special case of EntityInput.readObject for a top level entity.
+     * Special treatments are:
+     * - The formatId must be >= 0; since this is the top level instance, it
+     *   cannot refer to a visited object nor be a null reference.
+     * - The resulting entity is not added to the visited object set; entities
+     *   cannot be referenced by another (or the same) entity.
+     * - Reader.readPriKey must be called prior to calling Reader.readObject.
+     */
+    static Object readEntity(Catalog catalog,
+                             DatabaseEntry key,
+                             DatabaseEntry data,
+                             boolean rawAccess) {
+        RecordInput keyInput = new RecordInput
+            (catalog, rawAccess, null, 0,
+             key.getData(), key.getOffset(), key.getSize());
+        RecordInput dataInput = new RecordInput
+            (catalog, rawAccess, null, 0,
+             data.getData(), data.getOffset(), data.getSize());
+        int formatId = dataInput.readPackedInt();
+        Format format = catalog.getFormat(formatId);
+        Reader reader = format.getReader();
+        Object entity = reader.newInstance(dataInput, rawAccess);
+        reader.readPriKey(entity, keyInput, rawAccess);
+        return reader.readObject(entity, dataInput, rawAccess);
+    }
+
+    public void objectToData(Object entity, DatabaseEntry data) {
+        Format format = getValidFormat(entity);
+        writeEntity(format, catalog, entity, data, rawAccess);
+    }
+
+    /**
+     * Writes the formatId and object, and returns the bytes.
+     *
+     * This is a special case of EntityOutput.writeObject for a top level
+     * entity.  Special treatments are:
+     * - The entity may not be null.
+     * - The entity is not added to the visited object set nor checked for
+     *   existence in the visited object set; entities cannot be referenced by
+     *   another (or the same) entity.
+     */
+    static void writeEntity(Format format,
+                            Catalog catalog,
+                            Object entity,
+                            DatabaseEntry data,
+                            boolean rawAccess) {
+        RecordOutput output = new RecordOutput(catalog, rawAccess);
+        output.writePackedInt(format.getId());
+        format.writeObject(entity, output, rawAccess);
+        TupleBase.outputToEntry(output, data);
+    }
+
+    public void objectToKey(Object entity, DatabaseEntry key) {
+
+        /*
+         * Write the primary key field as a special case since the output
+         * format is for a key binding, not entity data.
+         */
+        Format format = getValidFormat(entity);
+        RecordOutput output = new RecordOutput(catalog, rawAccess);
+
+        /* Write the primary key and return the bytes. */
+        format.writePriKey(entity, output, rawAccess);
+        TupleBase.outputToEntry(output, key);
+    }
+
+    /**
+     * Returns the format for the given entity and validates it, throwing an
+     * exception if it is invalid for this binding.
+     */
+    private Format getValidFormat(Object entity) {
+
+        /* A null entity is not allowed. */
+        if (entity == null) {
+            throw new IllegalArgumentException("An entity may not be null");
+        }
+
+        /*
+         * Get the format.  getFormat throws IllegalArgumentException if the
+         * class is not persistent.
+         */
+        Format format;
+        if (rawAccess) {
+            if (!(entity instanceof RawObject)) {
+                throw new IllegalArgumentException
+                    ("Entity must be a RawObject");
+            }
+            format = (Format) ((RawObject) entity).getType();
+        } else {
+            format = catalog.getFormat
+                (entity.getClass(), true /*openEntitySubclassIndexes*/);
+        }
+
+        /* Check that the entity class/subclass is valid for this binding. */
+        if (format.getEntityFormat() != entityFormat) {
+            throw new IllegalArgumentException
+                ("The entity class (" + format.getClassName() +
+                 ") must be this entity class or a subclass of it: " +
+                 entityFormat.getClassName());
+        }
+
+        return format;
+    }
+    
+    /**
+     * Utility method for getting or creating a format as appropriate for
+     * bindings and key creators.
+     */
+    static Format getOrCreateFormat(Catalog catalog,
+                                    String clsName,
+                                    boolean rawAccess) {
+        if (rawAccess) {
+            Format format = catalog.getFormat(clsName);
+            if (format == null) {
+                throw new IllegalArgumentException
+                    ("Not a persistent class: " + clsName);
+            }
+            return format;
+        } else {
+            Class cls = SimpleCatalog.keyClassForName(clsName);
+            return catalog.getFormat(cls, true /*openEntitySubclassIndexes*/);
+        }
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/PersistKeyAssigner.java b/src/com/sleepycat/persist/impl/PersistKeyAssigner.java
new file mode 100644
index 0000000000000000000000000000000000000000..6f69075b2bd8177df8e56e884ab20edeb84723e7
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/PersistKeyAssigner.java
@@ -0,0 +1,70 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PersistKeyAssigner.java,v 1.16.2.2 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import com.sleepycat.bind.tuple.TupleBase;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Sequence;
+
+/**
+ * Assigns primary keys from a Sequence.
+ *
+ * This class is used directly by PrimaryIndex, not via an interface.  To avoid
+ * making a public interface, the PersistEntityBinding contains a reference to
+ * a PersistKeyAssigner, and the PrimaryIndex gets the key assigner from the
+ * binding.  See the PrimaryIndex constructor for more information.
+ *
+ * @author Mark Hayes
+ */
+public class PersistKeyAssigner {
+
+    private Catalog catalog;
+    private Format keyFieldFormat;
+    private Format entityFormat;
+    private boolean rawAccess;
+    private Sequence sequence;
+
+    PersistKeyAssigner(PersistKeyBinding keyBinding,
+                       PersistEntityBinding entityBinding,
+                       Sequence sequence) {
+        catalog = keyBinding.catalog;
+        /* getSequenceKeyFormat will validate the field type for a sequence. */
+        keyFieldFormat = keyBinding.keyFormat.getSequenceKeyFormat();
+        entityFormat = entityBinding.entityFormat;
+        rawAccess = entityBinding.rawAccess;
+        this.sequence = sequence;
+    }
+
+    public boolean assignPrimaryKey(Object entity, DatabaseEntry key)
+        throws DatabaseException {
+            
+        /*
+         * The keyFieldFormat is the format of a simple integer field.  For a
+         * composite key class it is the contained integer field.  By writing
+         * the Long sequence value using that format, the output data can then
+         * be read to construct the actual key instance, whether it is a simple
+         * or composite key class, and assign it to the primary key field in
+         * the entity object.
+         */
+        if (entityFormat.isPriKeyNullOrZero(entity, rawAccess)) {
+            Long value = sequence.get(null, 1);
+            RecordOutput output = new RecordOutput(catalog, rawAccess);
+            keyFieldFormat.writeObject(value, output, rawAccess);
+            TupleBase.outputToEntry(output, key);
+            EntityInput input = new RecordInput
+                (catalog, rawAccess, null, 0,
+                 key.getData(), key.getOffset(), key.getSize());
+            entityFormat.getReader().readPriKey(entity, input, rawAccess);
+            return true;
+        } else {
+            return false;
+        }
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/PersistKeyBinding.java b/src/com/sleepycat/persist/impl/PersistKeyBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..04e5d90ec9b10c14fd9489fab1211cacf8ebecf9
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/PersistKeyBinding.java
@@ -0,0 +1,96 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PersistKeyBinding.java,v 1.22.2.4 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.tuple.TupleBase;
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * A persistence key binding for a given key class.
+ *
+ * @author Mark Hayes
+ */
+public class PersistKeyBinding implements EntryBinding {
+
+    Catalog catalog;
+    Format keyFormat;
+    boolean rawAccess;
+
+    /**
+     * Creates a key binding for a given key class.
+     */
+    public PersistKeyBinding(Catalog catalog,
+                             String clsName,
+                             boolean rawAccess) {
+        this.catalog = catalog;
+        keyFormat = PersistEntityBinding.getOrCreateFormat
+            (catalog, clsName, rawAccess);
+        if (!keyFormat.isSimple() &&
+            (keyFormat.getClassMetadata() == null ||
+             keyFormat.getClassMetadata().getCompositeKeyFields() == null)) {
+            throw new IllegalArgumentException
+                ("Key class is not a simple type or a composite key class " +
+                 "(composite keys must include @KeyField annotations): " +
+                 clsName);
+        }
+        this.rawAccess = rawAccess;
+    }
+
+    /**
+     * Creates a key binding dynamically for use by PersistComparator.  Formats
+     * are created from scratch rather than using a shared catalog.
+     */
+    PersistKeyBinding(Class cls, String[] compositeFieldOrder) {
+        catalog = SimpleCatalog.getInstance();
+        if (compositeFieldOrder != null) {
+            assert !SimpleCatalog.isSimpleType(cls);
+            keyFormat = new CompositeKeyFormat(cls, null, compositeFieldOrder);
+        } else {
+            assert SimpleCatalog.isSimpleType(cls);
+            keyFormat =
+                catalog.getFormat(cls, false /*openEntitySubclassIndexes*/);
+        }
+        keyFormat.initializeIfNeeded(catalog, null /*model*/);
+    }
+
+    /**
+     * Binds bytes to an object for use by PersistComparator as well as
+     * entryToObject.
+     */
+    Object bytesToObject(byte[] bytes, int offset, int length) {
+        return readKey(keyFormat, catalog, bytes, offset, length, rawAccess);
+    }
+
+    /**
+     * Binds bytes to an object for use by PersistComparator as well as
+     * entryToObject.
+     */
+    static Object readKey(Format keyFormat,
+                          Catalog catalog,
+                          byte[] bytes,
+                          int offset,
+                          int length,
+                          boolean rawAccess) {
+        EntityInput input = new RecordInput
+            (catalog, rawAccess, null, 0, bytes, offset, length);
+        return input.readKeyObject(keyFormat);
+    }
+
+    public Object entryToObject(DatabaseEntry entry) {
+        return bytesToObject
+            (entry.getData(), entry.getOffset(), entry.getSize());
+    }
+
+    public void objectToEntry(Object object, DatabaseEntry entry) {
+        RecordOutput output = new RecordOutput(catalog, rawAccess);
+        output.writeKeyObject(object, keyFormat);
+        TupleBase.outputToEntry(output, entry);
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/PersistKeyCreator.java b/src/com/sleepycat/persist/impl/PersistKeyCreator.java
new file mode 100644
index 0000000000000000000000000000000000000000..83c1992a5b4053753451e09325c83331c48cd7cb
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/PersistKeyCreator.java
@@ -0,0 +1,169 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PersistKeyCreator.java,v 1.18.2.3 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.util.Collection;
+import java.util.Set;
+
+import com.sleepycat.bind.tuple.TupleBase;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.ForeignMultiKeyNullifier;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.je.SecondaryKeyCreator;
+import com.sleepycat.je.SecondaryMultiKeyCreator;
+import com.sleepycat.persist.model.EntityMetadata;
+import com.sleepycat.persist.model.Relationship;
+import com.sleepycat.persist.model.SecondaryKeyMetadata;
+import com.sleepycat.persist.raw.RawObject;
+
+/**
+ * A persistence secondary key creator/nullifier.  This class always uses
+ * rawAccess=true to avoid depending on the presence of the proxy class.
+ *
+ * @author Mark Hayes
+ */
+public class PersistKeyCreator implements SecondaryKeyCreator,
+                                          SecondaryMultiKeyCreator,
+                                          ForeignMultiKeyNullifier {
+
+    static boolean isManyType(Class cls) {
+        return cls.isArray() || Collection.class.isAssignableFrom(cls);
+    }
+
+    private Catalog catalog;
+    private int priKeyFormatId;
+    private String keyName;
+    private Format keyFormat;
+    private boolean toMany;
+
+    /**
+     * Creates a key creator/nullifier for a given entity class and key name.
+     */
+    public PersistKeyCreator(Catalog catalog,
+                             EntityMetadata entityMeta,
+                             String keyClassName,
+                             SecondaryKeyMetadata secKeyMeta,
+                             boolean rawAccess) {
+        this.catalog = catalog;
+        Format priKeyFormat = PersistEntityBinding.getOrCreateFormat
+            (catalog, entityMeta.getPrimaryKey().getClassName(), rawAccess);
+        priKeyFormatId = priKeyFormat.getId();
+        keyName = secKeyMeta.getKeyName();
+        keyFormat = PersistEntityBinding.getOrCreateFormat
+            (catalog, keyClassName, rawAccess);
+        if (keyFormat == null) {
+            throw new IllegalArgumentException
+                ("Not a key class: " + keyClassName);
+        }
+        if (keyFormat.isPrimitive()) {
+            throw new IllegalArgumentException
+                ("Use a primitive wrapper class instead of class: " +
+                 keyFormat.getClassName());
+        }
+        Relationship rel = secKeyMeta.getRelationship();
+        toMany = (rel == Relationship.ONE_TO_MANY ||
+                  rel == Relationship.MANY_TO_MANY);
+    }
+
+    public boolean createSecondaryKey(SecondaryDatabase secondary,
+				      DatabaseEntry key,
+				      DatabaseEntry data,
+				      DatabaseEntry result)
+	throws DatabaseException {
+
+        if (toMany) {
+            throw new IllegalStateException();
+        }
+        KeyLocation loc = moveToKey(key, data);
+        if (loc != null) {
+            RecordOutput output = new RecordOutput
+                (catalog, true /*rawAccess*/);
+            loc.format.copySecKey(loc.input, output);
+            TupleBase.outputToEntry(output, result);
+            return true;
+        } else {
+            /* Key field is not present or null. */
+            return false;
+        }
+    }
+
+    public void createSecondaryKeys(SecondaryDatabase secondary,
+				    DatabaseEntry key,
+				    DatabaseEntry data,
+				    Set results)
+	throws DatabaseException {
+
+        if (!toMany) {
+            throw new IllegalStateException();
+        }
+        KeyLocation loc = moveToKey(key, data);
+        if (loc != null) {
+            loc.format.copySecMultiKey(loc.input, keyFormat, results);
+        }
+        /* Else key field is not present or null. */
+    }
+
+    public boolean nullifyForeignKey(SecondaryDatabase secondary,
+                                     DatabaseEntry key,
+                                     DatabaseEntry data,
+                                     DatabaseEntry secKey)
+	throws DatabaseException {
+
+        /* Deserialize the entity and get its current class format. */
+        RawObject entity = (RawObject) PersistEntityBinding.readEntity
+            (catalog, key, data, true /*rawAccess*/);
+        Format entityFormat = (Format) entity.getType();
+
+        /*
+         * Set the key to null.  For a TO_MANY key, pass the key object to be
+         * removed from the array/collection.
+         */
+        Object secKeyObject = null;
+        if (toMany) {
+            secKeyObject = PersistKeyBinding.readKey
+                (keyFormat, catalog, secKey.getData(), secKey.getOffset(),
+                 secKey.getSize(), true /*rawAccess*/);
+        }
+        if (entityFormat.nullifySecKey
+            (catalog, entity, keyName, secKeyObject)) {
+
+            /*
+             * Using the current format for the entity, serialize the modified
+             * entity back to the data entry.
+             */
+            PersistEntityBinding.writeEntity
+                (entityFormat, catalog, entity, data, true /*rawAccess*/);
+            return true;
+        } else {
+            /* Key field is not present or null. */
+            return false;
+        }
+    }
+
+    /**
+     * Returns the location from which the secondary key field can be copied.
+     */
+    private KeyLocation moveToKey(DatabaseEntry priKey, DatabaseEntry data) {
+
+        RecordInput input = new RecordInput
+            (catalog, true /*rawAccess*/, priKey, priKeyFormatId,
+             data.getData(), data.getOffset(), data.getSize());
+        int formatId = input.readPackedInt();
+        Format entityFormat = catalog.getFormat(formatId);
+        Format fieldFormat = entityFormat.skipToSecKey(input, keyName);
+        if (fieldFormat != null) {
+            /* Returns null if key field is null. */
+            return input.getKeyLocation(fieldFormat);
+        } else {
+            /* Key field is not present in this class. */
+            return null;
+        }
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/PrimitiveArrayFormat.java b/src/com/sleepycat/persist/impl/PrimitiveArrayFormat.java
new file mode 100644
index 0000000000000000000000000000000000000000..fa1f9ca55c8a5ada5304d5ff5bbe970154a5a4c9
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/PrimitiveArrayFormat.java
@@ -0,0 +1,138 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PrimitiveArrayFormat.java,v 1.24.2.3 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.lang.reflect.Array;
+import java.util.IdentityHashMap;
+import java.util.Map;
+import java.util.Set;
+
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.raw.RawObject;
+
+/**
+ * An array of primitives having one dimension.  Multidimensional arrays are
+ * handled by {@link ObjectArrayFormat}.
+ *
+ * @author Mark Hayes
+ */
+public class PrimitiveArrayFormat extends Format {
+
+    private static final long serialVersionUID = 8285299924106073591L;
+
+    private SimpleFormat componentFormat;
+
+    PrimitiveArrayFormat(Class type) {
+        super(type);
+    }
+
+    @Override
+    public boolean isArray() {
+        return true;
+    }
+
+    @Override
+    public int getDimensions() {
+        return 1;
+    }
+
+    @Override
+    public Format getComponentType() {
+        return componentFormat;
+    }
+
+    @Override
+    void collectRelatedFormats(Catalog catalog,
+                               Map<String,Format> newFormats) {
+        /* Component type is simple and simple type formats are predefined. */
+    }
+
+    @Override
+    void initialize(Catalog catalog, EntityModel model, int initVersion) {
+
+        /*
+         * getExistingType is allowed (to support raw mode) because primitive
+         * arrays are always available in Java.
+         */
+        componentFormat = (SimpleFormat)
+            catalog.getFormat(getExistingType().getComponentType(),
+                              false /*openEntitySubclassIndexes*/);
+    }
+
+    @Override
+    Object newArray(int len) {
+        return Array.newInstance(getType(), len);
+    }
+
+    @Override
+    public Object newInstance(EntityInput input, boolean rawAccess) {
+        int len = input.readArrayLength();
+        if (rawAccess) {
+            return new RawObject(this, new Object[len]);
+        } else {
+            return componentFormat.newPrimitiveArray(len, input);
+        }
+    }
+
+    @Override
+    public Object readObject(Object o, EntityInput input, boolean rawAccess) {
+        if (rawAccess) {
+            Object[] a = ((RawObject) o).getElements();
+            for (int i = 0; i < a.length; i += 1) {
+                a[i] = componentFormat.newInstance(input, true);
+                componentFormat.readObject(a[i], input, true);
+            }
+        }
+        /* Else, do nothing -- newInstance reads the value. */
+        return o;
+    }
+
+    @Override
+    void writeObject(Object o, EntityOutput output, boolean rawAccess) {
+        if (rawAccess) {
+            Object[] a = ((RawObject) o).getElements();
+            output.writeArrayLength(a.length);
+            for (int i = 0; i < a.length; i += 1) {
+                componentFormat.writeObject(a[i], output, true);
+            }
+        } else {
+            componentFormat.writePrimitiveArray(o, output);
+        }
+    }
+
+    @Override
+    Object convertRawObject(Catalog catalog,
+                            boolean rawAccess,
+                            RawObject rawObject,
+                            IdentityHashMap converted) {
+        RawArrayInput input = new RawArrayInput
+            (catalog, rawAccess, converted, rawObject, componentFormat);
+        Object a = newInstance(input, rawAccess);
+        converted.put(rawObject, a);
+        return readObject(a, input, rawAccess);
+    }
+
+    @Override
+    void skipContents(RecordInput input) {
+        int len = input.readPackedInt();
+        componentFormat.skipPrimitiveArray(len, input);
+    }
+
+    @Override
+    void copySecMultiKey(RecordInput input, Format keyFormat, Set results) {
+        int len = input.readPackedInt();
+        componentFormat.copySecMultiKeyPrimitiveArray(len, input, results);
+    }
+
+    @Override
+    boolean evolve(Format newFormat, Evolver evolver) {
+        evolver.useOldFormat(this, newFormat);
+        return true;
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/ProxiedFormat.java b/src/com/sleepycat/persist/impl/ProxiedFormat.java
new file mode 100644
index 0000000000000000000000000000000000000000..c142be946d1ab0f089ebd028e3ab3503ee3bc071
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/ProxiedFormat.java
@@ -0,0 +1,171 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ProxiedFormat.java,v 1.24.2.3 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.lang.reflect.Array;
+import java.util.IdentityHashMap;
+import java.util.Map;
+import java.util.Set;
+
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.model.PersistentProxy;
+import com.sleepycat.persist.raw.RawObject;
+
+/**
+ * Format for types proxied by a PersistentProxy.
+ *
+ * @author Mark Hayes
+ */
+public class ProxiedFormat extends Format {
+
+    private static final long serialVersionUID = -1000032651995478768L;
+
+    private Format proxyFormat;
+    private transient String proxyClassName;
+
+    ProxiedFormat(Class proxiedType, String proxyClassName) {
+        super(proxiedType);
+        this.proxyClassName = proxyClassName;
+    }
+
+    /**
+     * Returns the proxy class name.  The proxyClassName field is non-null for
+     * a constructed object and null for a de-serialized object.  Whenever the
+     * proxyClassName field is null (for a de-serialized object), the
+     * proxyFormat will be non-null.
+     */
+    private String getProxyClassName() {
+        if (proxyClassName != null) {
+            return proxyClassName;
+        } else {
+            assert proxyFormat != null;
+            return proxyFormat.getClassName();
+        }
+    }
+
+    /**
+     * In the future if we implement container proxies, which support nested
+     * references to the container, then we will return false if this is a
+     * container proxy.  [#15815]
+     */
+    @Override
+    boolean areNestedRefsProhibited() {
+        return true;
+    }
+
+    @Override
+    void collectRelatedFormats(Catalog catalog,
+                               Map<String,Format> newFormats) {
+        /* Collect the proxy format. */
+        assert proxyClassName != null;
+        catalog.createFormat(proxyClassName, newFormats);
+    }
+
+    @Override
+    void initialize(Catalog catalog, EntityModel model, int initVersion) {
+        /* Set the proxy format for a new (never initialized) format. */
+        if (proxyFormat == null) {
+            assert proxyClassName != null;
+            proxyFormat = catalog.getFormat(proxyClassName);
+        }
+        /* Make the linkage from proxy format to proxied format. */
+        proxyFormat.setProxiedFormat(this);
+    }
+
+    @Override
+    Object newArray(int len) {
+        return Array.newInstance(getType(), len);
+    }
+
+    @Override
+    public Object newInstance(EntityInput input, boolean rawAccess) {
+        Reader reader = proxyFormat.getReader();
+        if (rawAccess) {
+            return reader.newInstance(null, true);
+        } else {
+            PersistentProxy proxy =
+                (PersistentProxy) reader.newInstance(null, false);
+            proxy = (PersistentProxy) reader.readObject(proxy, input, false);
+            return proxy.convertProxy();
+        }
+    }
+
+    @Override
+    public Object readObject(Object o, EntityInput input, boolean rawAccess) {
+        if (rawAccess) {
+            o = proxyFormat.getReader().readObject(o, input, true);
+        }
+        /* Else, do nothing here -- newInstance reads the value. */
+        return o;
+    }
+
+    @Override
+    void writeObject(Object o, EntityOutput output, boolean rawAccess) {
+        if (rawAccess) {
+            proxyFormat.writeObject(o, output, true);
+        } else {
+            PersistentProxy proxy =
+                (PersistentProxy) proxyFormat.newInstance(null, false);
+            proxy.initializeProxy(o);
+            proxyFormat.writeObject(proxy, output, false);
+        }
+    }
+
+    @Override
+    Object convertRawObject(Catalog catalog,
+                            boolean rawAccess,
+                            RawObject rawObject,
+                            IdentityHashMap converted) {
+        PersistentProxy proxy = (PersistentProxy) proxyFormat.convertRawObject
+            (catalog, rawAccess, rawObject, converted);
+        Object o = proxy.convertProxy();
+        converted.put(rawObject, o);
+        return o;
+    }
+
+    @Override
+    void skipContents(RecordInput input) {
+        proxyFormat.skipContents(input);
+    }
+
+    @Override
+    void copySecMultiKey(RecordInput input, Format keyFormat, Set results) {
+        CollectionProxy.copyElements(input, this, keyFormat, results);
+    }
+
+    @Override
+    boolean evolve(Format newFormatParam, Evolver evolver) {
+        if (!(newFormatParam instanceof ProxiedFormat)) {
+            evolver.addEvolveError
+                (this, newFormatParam, null,
+                 "A proxied class may not be changed to a different type");
+            return false;
+        }
+        ProxiedFormat newFormat = (ProxiedFormat) newFormatParam;
+        if (!evolver.evolveFormat(proxyFormat)) {
+            return false;
+        }
+        Format newProxyFormat = proxyFormat.getLatestVersion();
+        if (!newProxyFormat.getClassName().equals
+                (newFormat.getProxyClassName())) {
+            evolver.addEvolveError
+                (this, newFormat, null,
+                 "The proxy class for this type has been changed from: " +
+                 newProxyFormat.getClassName() + " to: " +
+                 newFormat.getProxyClassName());
+            return false;
+        }
+        if (newProxyFormat != proxyFormat) {
+            evolver.useEvolvedFormat(this, this, newFormat);
+        } else {
+            evolver.useOldFormat(this, newFormat);
+        }
+        return true;
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/RawAbstractInput.java b/src/com/sleepycat/persist/impl/RawAbstractInput.java
new file mode 100644
index 0000000000000000000000000000000000000000..a2087c000eaa4e17a51f7d61c350646732d846d3
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/RawAbstractInput.java
@@ -0,0 +1,199 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RawAbstractInput.java,v 1.10.2.2 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.math.BigInteger;
+import java.util.IdentityHashMap;
+
+import com.sleepycat.persist.raw.RawObject;
+
+/**
+ * Base class for EntityInput implementations that type-check RawObject
+ * instances and convert them to regular persistent objects, via the
+ * Format.convertRawObject method.
+ *
+ * The subclass implements readNext which should call checkAndConvert before
+ * returning the final value.
+ *
+ * @author Mark Hayes
+ */
+abstract class RawAbstractInput extends AbstractInput {
+
+    private IdentityHashMap converted;
+
+    RawAbstractInput(Catalog catalog,
+                     boolean rawAccess,
+                     IdentityHashMap converted) {
+        super(catalog, rawAccess);
+        this.converted = converted;
+    }
+
+    public Object readObject() {
+        return readNext();
+    }
+
+    public Object readKeyObject(Format format) {
+        return readNext();
+    }
+
+    public void registerPriKeyObject(Object o) {
+    }
+
+    public int readArrayLength() {
+        throw new UnsupportedOperationException();
+    }
+
+    public int readEnumConstant(String[] names) {
+        throw new UnsupportedOperationException();
+    }
+
+    public void skipField(Format declaredFormat) {
+    }
+
+    abstract Object readNext();
+
+    Object checkAndConvert(Object o, Format declaredFormat) {
+        if (o == null) {
+            if (declaredFormat.isPrimitive()) {
+                throw new IllegalArgumentException
+                    ("A primitive type may not be null or missing: " +
+                     declaredFormat.getClassName());
+            }
+        } else if (declaredFormat.isSimple()) {
+            if (declaredFormat.isPrimitive()) {
+                if (o.getClass() !=
+                    declaredFormat.getWrapperFormat().getType()) {
+                    throw new IllegalArgumentException
+                        ("Raw value class: " + o.getClass().getName() +
+                         " must be the wrapper class for a primitive type: " +
+                         declaredFormat.getClassName());
+                }
+            } else {
+                if (o.getClass() != declaredFormat.getType()) {
+                    throw new IllegalArgumentException
+                        ("Raw value class: " + o.getClass().getName() +
+                         " must be the declared class for a simple type: " +
+                         declaredFormat.getClassName());
+                }
+            }
+        } else {
+            if (o instanceof RawObject) {
+                Object o2 = null;
+                if (!rawAccess) {
+                    if (converted != null) {
+                        o2 = converted.get(o);
+                    } else {
+                        converted = new IdentityHashMap();
+                    }
+                }
+                if (o2 != null) {
+                    o = o2;
+                } else {
+                    if (!rawAccess) {
+                        o = catalog.convertRawObject((RawObject) o, converted);
+                    }
+                }
+            } else {
+                if (!SimpleCatalog.isSimpleType(o.getClass())) {
+                    throw new IllegalArgumentException
+                        ("Raw value class: " + o.getClass().getName() +
+                         " must be RawObject a simple type");
+                }
+            }
+            if (rawAccess) {
+                checkRawType(catalog, o, declaredFormat);
+            } else {
+                if (!declaredFormat.getType().isAssignableFrom(o.getClass())) {
+                    throw new IllegalArgumentException
+                        ("Raw value class: " + o.getClass().getName() +
+                         " is not assignable to type: " +
+                         declaredFormat.getClassName());
+                }
+            }
+        }
+        return o;
+    }
+
+    static Format checkRawType(Catalog catalog,
+                               Object o,
+                               Format declaredFormat) {
+        assert declaredFormat != null;
+        Format format;
+        if (o instanceof RawObject) {
+            format = (Format) ((RawObject) o).getType();
+        } else {
+            format = catalog.getFormat(o.getClass(),
+                                       false /*openEntitySubclassIndexes*/);
+            if (!format.isSimple() || format.isEnum()) {
+                throw new IllegalArgumentException
+                    ("Not a RawObject or a non-enum simple type: " +
+                     format.getClassName());
+            }
+        }
+        if (!format.isAssignableTo(declaredFormat)) {
+            throw new IllegalArgumentException
+                ("Not a subtype of the field's declared class " +
+                 declaredFormat.getClassName() + ": " +
+                 format.getClassName());
+        }
+        if (!format.isCurrentVersion()) {
+            throw new IllegalArgumentException
+                ("Raw type version is not current.  Class: " +
+                 format.getClassName() + " Version: " +
+                 format.getVersion());
+        }
+        Format proxiedFormat = format.getProxiedFormat();
+        if (proxiedFormat != null) {
+            format = proxiedFormat;
+        }
+        return format;
+    }
+
+    /* The following methods are a subset of the methods in TupleInput. */
+
+    public String readString() {
+        return (String) readNext();
+    }
+
+    public char readChar() {
+        return ((Character) readNext()).charValue();
+    }
+
+    public boolean readBoolean() {
+        return ((Boolean) readNext()).booleanValue();
+    }
+
+    public byte readByte() {
+        return ((Byte) readNext()).byteValue();
+    }
+
+    public short readShort() {
+        return ((Short) readNext()).shortValue();
+    }
+
+    public int readInt() {
+        return ((Integer) readNext()).intValue();
+    }
+
+    public long readLong() {
+        return ((Long) readNext()).longValue();
+    }
+
+    public float readSortedFloat() {
+        return ((Float) readNext()).floatValue();
+    }
+
+    public double readSortedDouble() {
+        return ((Double) readNext()).doubleValue();
+    }
+
+    public BigInteger readBigInteger() {
+        return (BigInteger) readNext();
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/RawAccessor.java b/src/com/sleepycat/persist/impl/RawAccessor.java
new file mode 100644
index 0000000000000000000000000000000000000000..6dd53cddc72d62b109d29b428edd41d662ba3150
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/RawAccessor.java
@@ -0,0 +1,236 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RawAccessor.java,v 1.9.2.2 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+
+import com.sleepycat.persist.raw.RawObject;
+
+/**
+ * Implements Accessor for RawObject access.
+ *
+ * @author Mark Hayes
+ */
+class RawAccessor implements Accessor {
+
+    private Format parentFormat;
+    private Accessor superAccessor;
+    private FieldInfo priKeyField;
+    private List<FieldInfo> secKeyFields;
+    private List<FieldInfo> nonKeyFields;
+    private boolean isCompositeKey;
+
+    RawAccessor(Format parentFormat,
+                Accessor superAccessor,
+                FieldInfo priKeyField,
+                List<FieldInfo> secKeyFields,
+                List<FieldInfo> nonKeyFields) {
+        this.parentFormat = parentFormat;
+        this.superAccessor = superAccessor;
+        this.priKeyField = priKeyField;
+        this.secKeyFields = secKeyFields;
+        this.nonKeyFields = nonKeyFields;
+    }
+
+    RawAccessor(Format parentFormat,
+                List<FieldInfo> nonKeyFields) {
+        this.parentFormat = parentFormat;
+        this.nonKeyFields = nonKeyFields;
+        secKeyFields = Collections.emptyList();
+        isCompositeKey = true;
+    }
+
+    public Object newInstance() {
+        RawObject superObject;
+        if (superAccessor != null) {
+            superObject = ((RawObject) superAccessor.newInstance());
+        } else {
+            superObject = null;
+        }
+        return new RawObject
+            (parentFormat, new HashMap<String,Object>(), superObject);
+    }
+
+    public Object newArray(int len) {
+        throw new UnsupportedOperationException();
+    }
+
+    public boolean isPriKeyFieldNullOrZero(Object o) {
+        if (priKeyField != null) {
+            Object val = getValue(o, priKeyField);
+            Format format = priKeyField.getType();
+            if (format.isPrimitive()) {
+                return ((Number) val).longValue() == 0L;
+            } else {
+                return val == null;
+            }
+        } else if (superAccessor != null) {
+            return superAccessor.isPriKeyFieldNullOrZero(getSuper(o));
+        } else {
+            throw new IllegalStateException("No primary key field");
+        }
+    }
+
+    public void writePriKeyField(Object o, EntityOutput output) {
+        if (priKeyField != null) {
+            Object val = getValue(o, priKeyField);
+            Format format = priKeyField.getType();
+            output.writeKeyObject(val, format);
+        } else if (superAccessor != null) {
+            superAccessor.writePriKeyField(getSuper(o), output);
+        } else {
+            throw new IllegalStateException("No primary key field");
+        }
+    }
+
+    public void readPriKeyField(Object o, EntityInput input) {
+        if (priKeyField != null) {
+            Format format = priKeyField.getType();
+            Object val = input.readKeyObject(format);
+            setValue(o, priKeyField, val);
+        } else if (superAccessor != null) {
+            superAccessor.readPriKeyField(getSuper(o), input);
+        } else {
+            throw new IllegalStateException("No primary key field");
+        }
+    }
+
+    public void writeSecKeyFields(Object o, EntityOutput output) {
+        if (priKeyField != null && !priKeyField.getType().isPrimitive()) {
+            output.registerPriKeyObject(getValue(o, priKeyField));
+        }
+        if (superAccessor != null) {
+            superAccessor.writeSecKeyFields(getSuper(o), output);
+        }
+        for (int i = 0; i < secKeyFields.size(); i += 1) {
+            writeField(o, secKeyFields.get(i), output);
+        }
+    }
+
+    public void readSecKeyFields(Object o,
+                                 EntityInput input,
+                                 int startField,
+                                 int endField,
+                                 int superLevel) {
+        if (priKeyField != null && !priKeyField.getType().isPrimitive()) {
+            input.registerPriKeyObject(getValue(o, priKeyField));
+        }
+        if (superLevel != 0 && superAccessor != null) {
+            superAccessor.readSecKeyFields
+                (getSuper(o), input, startField, endField, superLevel - 1);
+        } else {
+            if (superLevel > 0) {
+                throw new IllegalStateException
+                    ("Super class does not exist");
+            }
+        }
+        if (superLevel <= 0) {
+            for (int i = startField;
+                 i <= endField && i < secKeyFields.size();
+                 i += 1) {
+                readField(o, secKeyFields.get(i), input);
+            }
+        }
+    }
+
+    public void writeNonKeyFields(Object o, EntityOutput output) {
+        if (superAccessor != null) {
+            superAccessor.writeNonKeyFields(getSuper(o), output);
+        }
+        for (int i = 0; i < nonKeyFields.size(); i += 1) {
+            writeField(o, nonKeyFields.get(i), output);
+        }
+    }
+
+    public void readNonKeyFields(Object o,
+                                 EntityInput input,
+                                 int startField,
+                                 int endField,
+                                 int superLevel) {
+        if (superLevel != 0 && superAccessor != null) {
+            superAccessor.readNonKeyFields
+                (getSuper(o), input, startField, endField, superLevel - 1);
+        } else {
+            if (superLevel > 0) {
+                throw new IllegalStateException
+                    ("Super class does not exist");
+            }
+        }
+        if (superLevel <= 0) {
+            for (int i = startField;
+                 i <= endField && i < nonKeyFields.size();
+                 i += 1) {
+                readField(o, nonKeyFields.get(i), input);
+            }
+        }
+    }
+
+    public Object getField(Object o,
+                           int field,
+                           int superLevel,
+                           boolean isSecField) {
+        if (superLevel > 0) {
+            return superAccessor.getField
+                (getSuper(o), field, superLevel - 1, isSecField);
+        }
+        FieldInfo fld =
+	    isSecField ? secKeyFields.get(field) : nonKeyFields.get(field);
+        return getValue(o, fld);
+    }
+
+    public void setField(Object o,
+                         int field,
+                         int superLevel,
+                         boolean isSecField,
+                         Object value) {
+        if (superLevel > 0) {
+            superAccessor.setField
+                (getSuper(o), field, superLevel - 1, isSecField, value);
+	    return;
+        }
+        FieldInfo fld =
+	    isSecField ? secKeyFields.get(field) : nonKeyFields.get(field);
+        setValue(o, fld, value);
+    }
+
+    private RawObject getSuper(Object o) {
+        return ((RawObject) o).getSuper();
+    }
+
+    private Object getValue(Object o, FieldInfo field) {
+        return ((RawObject) o).getValues().get(field.getName());
+    }
+
+    private void setValue(Object o, FieldInfo field, Object val) {
+        ((RawObject) o).getValues().put(field.getName(), val);
+    }
+
+    private void writeField(Object o, FieldInfo field, EntityOutput output) {
+        Object val = getValue(o, field);
+        Format format = field.getType();
+        if (isCompositeKey || format.isPrimitive()) {
+            output.writeKeyObject(val, format);
+        } else {
+            output.writeObject(val, format);
+        }
+    }
+
+    private void readField(Object o, FieldInfo field, EntityInput input) {
+        Format format = field.getType();
+        Object val;
+        if (isCompositeKey || format.isPrimitive()) {
+            val = input.readKeyObject(format);
+        } else {
+            val = input.readObject();
+        }
+        setValue(o, field, val);
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/RawArrayInput.java b/src/com/sleepycat/persist/impl/RawArrayInput.java
new file mode 100644
index 0000000000000000000000000000000000000000..f7229842f5ea4370a2aae61ea3486716c4921b72
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/RawArrayInput.java
@@ -0,0 +1,47 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RawArrayInput.java,v 1.6.2.2 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.util.IdentityHashMap;
+
+import com.sleepycat.persist.raw.RawObject;
+
+/**
+ * Extends RawAbstractInput to convert array (ObjectArrayFormat and
+ * PrimitiveArrayteKeyFormat) RawObject instances.
+ *
+ * @author Mark Hayes
+ */
+class RawArrayInput extends RawAbstractInput {
+
+    private Object[] array;
+    private int index;
+    private Format componentFormat;
+
+    RawArrayInput(Catalog catalog,
+                  boolean rawAccess,
+                  IdentityHashMap converted,
+                  RawObject raw,
+                  Format componentFormat) {
+        super(catalog, rawAccess, converted);
+        array = raw.getElements();
+        this.componentFormat = componentFormat;
+    }
+
+    @Override
+    public int readArrayLength() {
+        return array.length;
+    }
+
+    @Override
+    Object readNext() {
+        Object o = array[index++];
+        return checkAndConvert(o, componentFormat);
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/RawComplexInput.java b/src/com/sleepycat/persist/impl/RawComplexInput.java
new file mode 100644
index 0000000000000000000000000000000000000000..4207ff6666097962180e849a48cb07b03beb014b
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/RawComplexInput.java
@@ -0,0 +1,46 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RawComplexInput.java,v 1.6.2.2 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.util.IdentityHashMap;
+
+import com.sleepycat.persist.raw.RawObject;
+
+/**
+ * Extends RawAbstractInput to convert complex (ComplexFormat and
+ * CompositeKeyFormat) RawObject instances.
+ *
+ * @author Mark Hayes
+ */
+class RawComplexInput extends RawAbstractInput {
+
+    private FieldInfo[] fields;
+    private RawObject[] objects;
+    private int index;
+
+    RawComplexInput(Catalog catalog,
+                    boolean rawAccess,
+                    IdentityHashMap converted,
+                    FieldInfo[] fields,
+                    RawObject[] objects) {
+        super(catalog, rawAccess, converted);
+        this.fields = fields;
+        this.objects = objects;
+    }
+
+    @Override
+    Object readNext() {
+        RawObject raw = objects[index];
+        FieldInfo field = fields[index];
+        index += 1;
+        Format format = field.getType();
+        Object o = raw.getValues().get(field.getName());
+        return checkAndConvert(o, format);
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/RawSingleInput.java b/src/com/sleepycat/persist/impl/RawSingleInput.java
new file mode 100644
index 0000000000000000000000000000000000000000..29722a69ab8ac0e0333313b6036b85197577a7e8
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/RawSingleInput.java
@@ -0,0 +1,38 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RawSingleInput.java,v 1.6.2.2 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.util.IdentityHashMap;
+
+/**
+ * Extends RawAbstractInput to convert array (ObjectArrayFormat and
+ * PrimitiveArrayteKeyFormat) RawObject instances.
+ *
+ * @author Mark Hayes
+ */
+class RawSingleInput extends RawAbstractInput {
+
+    private Object singleValue;
+    private Format declaredFormat;
+
+    RawSingleInput(Catalog catalog,
+                   boolean rawAccess,
+                   IdentityHashMap converted,
+                   Object singleValue,
+                   Format declaredFormat) {
+        super(catalog, rawAccess, converted);
+        this.singleValue = singleValue;
+        this.declaredFormat = declaredFormat;
+    }
+
+    @Override
+    Object readNext() {
+        return checkAndConvert(singleValue, declaredFormat);
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/ReadOnlyCatalog.java b/src/com/sleepycat/persist/impl/ReadOnlyCatalog.java
new file mode 100644
index 0000000000000000000000000000000000000000..d88065f3c2a5bdea0d15d2f5c03656608170ff3f
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/ReadOnlyCatalog.java
@@ -0,0 +1,83 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ReadOnlyCatalog.java,v 1.12.2.2 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.util.IdentityHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.NoSuchElementException;
+
+import com.sleepycat.persist.raw.RawObject;
+
+/**
+ * Read-only catalog operations used when initializing new formats.  This
+ * catalog is used temprarily when the main catalog has not been updated yet,
+ * but the new formats need to do catalog lookups.
+ *
+ * @see PersistCatalog#addNewFormat
+ *
+ * @author Mark Hayes
+ */
+class ReadOnlyCatalog implements Catalog {
+
+    private List<Format> formatList;
+    private Map<String,Format> formatMap;
+
+    ReadOnlyCatalog(List<Format> formatList, Map<String,Format> formatMap) {
+        this.formatList = formatList;
+        this.formatMap = formatMap;
+    }
+
+    public int getInitVersion(Format format, boolean forReader) {
+        return Catalog.CURRENT_VERSION;
+    }
+
+    public Format getFormat(int formatId) {
+        try {
+            Format format = formatList.get(formatId);
+            if (format == null) {
+                throw new IllegalStateException
+                    ("Format does not exist: " + formatId);
+            }
+            return format;
+        } catch (NoSuchElementException e) {
+            throw new IllegalStateException
+                ("Format does not exist: " + formatId);
+        }
+    }
+
+    public Format getFormat(Class cls, boolean openEntitySubclassIndexes) {
+        Format format = formatMap.get(cls.getName());
+        if (format == null) {
+            throw new IllegalArgumentException
+                ("Class is not persistent: " + cls.getName());
+        }
+        return format;
+    }
+
+    public Format getFormat(String className) {
+        return formatMap.get(className);
+    }
+
+    public Format createFormat(String clsName, Map<String,Format> newFormats) {
+        throw new IllegalStateException();
+    }
+
+    public Format createFormat(Class type, Map<String,Format> newFormats) {
+        throw new IllegalStateException();
+    }
+
+    public boolean isRawAccess() {
+        return false;
+    }
+
+    public Object convertRawObject(RawObject o, IdentityHashMap converted) {
+        throw new IllegalStateException();
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/Reader.java b/src/com/sleepycat/persist/impl/Reader.java
new file mode 100644
index 0000000000000000000000000000000000000000..220c9b8648e04022df45beb6d3eb28720603ed05
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/Reader.java
@@ -0,0 +1,36 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Reader.java,v 1.6.2.3 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.io.Serializable;
+
+import com.sleepycat.persist.model.EntityModel;
+
+/**
+ * Interface to the "read object" methods of the Format class.  For the
+ * latest version format, the Format object provides the implementation of
+ * these methods.  For an older version format, an evolver object implements
+ * this interface to convert from the old to new format.
+ *
+ * See {@link Format} for a description of each method.
+ * @author Mark Hayes
+ */
+interface Reader extends Serializable {
+
+    void initializeReader(Catalog catalog,
+                          EntityModel model,
+                          int initVersion,
+                          Format oldFormat);
+
+    Object newInstance(EntityInput input, boolean rawAccess);
+
+    void readPriKey(Object o, EntityInput input, boolean rawAccess);
+
+    Object readObject(Object o, EntityInput input, boolean rawAccess);
+}
diff --git a/src/com/sleepycat/persist/impl/RecordInput.java b/src/com/sleepycat/persist/impl/RecordInput.java
new file mode 100644
index 0000000000000000000000000000000000000000..d52039a3b58ec4bc04b53d30fea0bc25cdd68db3
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/RecordInput.java
@@ -0,0 +1,273 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RecordInput.java,v 1.9.2.2 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * Implements EntityInput to read record key-data pairs.  Extends TupleInput to
+ * implement the subset of TupleInput methods that are defined in the
+ * EntityInput interface.
+ *
+ * @author Mark Hayes
+ */
+class RecordInput extends TupleInput implements EntityInput {
+
+    /* Initial size of visited map. */
+    static final int VISITED_INIT_SIZE = 50;
+
+    /*
+     * Offset to indicate that the visited object is stored in the primary key
+     * byte array.
+     */
+    static final int PRI_KEY_VISITED_OFFSET = Integer.MAX_VALUE - 1;
+
+    /* Used by RecordOutput to prevent illegal nested references. */
+    static final int PROHIBIT_REF_OFFSET = Integer.MAX_VALUE - 2;
+
+    /* Used by RecordInput to prevent illegal nested references. */
+    static final Object PROHIBIT_REF_OBJECT = new Object();
+
+    static final String PROHIBIT_NESTED_REF_MSG =
+        "Cannot embed a reference to a proxied object in the proxy; for " +
+        "example, a collection may not be an element of the collection " +
+        "because collections are proxied";
+
+    private Catalog catalog;
+    private boolean rawAccess;
+    private Map<Integer,Object> visited;
+    private DatabaseEntry priKeyEntry;
+    private int priKeyFormatId;
+
+    /**
+     * Creates a new input with a empty/null visited map.
+     */
+    RecordInput(Catalog catalog,
+                boolean rawAccess,
+                DatabaseEntry priKeyEntry,
+                int priKeyFormatId,
+                byte[] buffer,
+                int offset,
+                int length) {
+        super(buffer, offset, length);
+        this.catalog = catalog;
+        this.rawAccess = rawAccess;
+        this.priKeyEntry = priKeyEntry;
+        this.priKeyFormatId = priKeyFormatId;
+    }
+
+    /**
+     * Copy contructor where a new offset can be specified.
+     */
+    private RecordInput(RecordInput other, int offset) {
+        this(other.catalog, other.rawAccess, other.priKeyEntry,
+             other.priKeyFormatId, other.buf, offset, other.len);
+        visited = other.visited;
+    }
+
+    /**
+     * Copy contructor where a DatabaseEntry can be specified.
+     */
+    private RecordInput(RecordInput other, DatabaseEntry entry) {
+        this(other.catalog, other.rawAccess, other.priKeyEntry,
+             other.priKeyFormatId, entry.getData(), entry.getOffset(),
+             entry.getSize());
+        visited = other.visited;
+    }
+
+    /**
+     * @see EntityInput#getCatalog
+     */
+    public Catalog getCatalog() {
+        return catalog;
+    }
+
+    /**
+     * @see EntityInput#isRawAccess
+     */
+    public boolean isRawAccess() {
+        return rawAccess;
+    }
+
+    /**
+     * @see EntityInput#setRawAccess
+     */
+    public boolean setRawAccess(boolean rawAccessParam) {
+        boolean original = rawAccess;
+        rawAccess = rawAccessParam;
+        return original;
+    }
+
+    /**
+     * @see EntityInput#readObject
+     */
+    public Object readObject() {
+
+        /* Save the current offset before reading the format ID. */
+        Integer visitedOffset = off;
+        RecordInput useInput = this;
+        int formatId = readPackedInt();
+        Object o = null;
+
+        /* For a zero format ID, return a null instance. */
+        if (formatId == Format.ID_NULL) {
+            return null;
+        }
+
+        /* For a negative format ID, lookup an already visited instance. */
+        if (formatId < 0) {
+            int offset = (-(formatId + 1));
+            if (visited != null) {
+                o = visited.get(offset);
+            }
+            if (o == RecordInput.PROHIBIT_REF_OBJECT) {
+                throw new IllegalArgumentException
+                    (RecordInput.PROHIBIT_NESTED_REF_MSG);
+            }
+            if (o != null) {
+                /* Return a previously visited object. */
+                return o;
+            } else {
+
+                /*
+                 * When reading starts from a non-zero offset, we may have to
+                 * go back in the stream and read the referenced object.  This
+                 * happens when reading secondary key fields.
+                 */
+                visitedOffset = offset;
+                if (offset == RecordInput.PRI_KEY_VISITED_OFFSET) {
+                    assert priKeyEntry != null && priKeyFormatId > 0;
+                    useInput = new RecordInput(this, priKeyEntry);
+                    formatId = priKeyFormatId;
+                } else {
+                    useInput = new RecordInput(this, offset);
+                    formatId = useInput.readPackedInt();
+                }
+            }
+        }
+
+        /*
+         * Add a visted object slot that prohibits nested references to this
+         * object during the call to Reader.newInstance below.  The newInstance
+         * method is allowed to read nested fields (in which case
+         * Reader.readObject further below does nothing) under certain
+         * conditions, but under these conditions we do not support nested
+         * references to the parent object. [#15815]
+         */
+        if (visited == null) {
+            visited = new HashMap<Integer,Object>(VISITED_INIT_SIZE);
+        }
+        visited.put(visitedOffset, RecordInput.PROHIBIT_REF_OBJECT);
+
+        /* Create the object using the format indicated. */
+        Format format = catalog.getFormat(formatId);
+        Reader reader = format.getReader();
+        o = reader.newInstance(useInput, rawAccess);
+
+        /*
+         * Set the newly created object in the map of visited objects.  This
+         * must be done before calling Reader.readObject, which allows the
+         * object to contain a reference to itself.
+         */
+        visited.put(visitedOffset, o);
+
+        /*
+         * Finish reading the object.  Then replace it in the visited map in
+         * case a converted object is returned by readObject.
+         */
+        Object o2 = reader.readObject(o, useInput, rawAccess);
+        if (o != o2) {
+            visited.put(visitedOffset, o2);
+        }
+        return o2;
+    }
+
+    /**
+     * @see EntityInput#readKeyObject
+     */
+    public Object readKeyObject(Format format) {
+
+        /* Create and read the object using the given key format. */
+        Reader reader = format.getReader();
+        Object o = reader.newInstance(this, rawAccess);
+        return reader.readObject(o, this, rawAccess);
+    }
+
+    /**
+     * Called when copying secondary keys, for an input that is positioned on
+     * the secondary key field.  Handles references to previously occurring
+     * objects, returning a different RecordInput than this one if appropriate.
+     */
+    KeyLocation getKeyLocation(Format fieldFormat) {
+        RecordInput input = this;
+        if (!fieldFormat.isPrimitive()) {
+            int formatId = input.readPackedInt();
+            if (formatId == Format.ID_NULL) {
+                /* Key field is null. */
+                return null;
+            }
+            if (formatId < 0) {
+                int offset = (-(formatId + 1));
+                if (offset == RecordInput.PRI_KEY_VISITED_OFFSET) {
+                    assert priKeyEntry != null && priKeyFormatId > 0;
+                    input = new RecordInput(this, priKeyEntry);
+                    formatId = priKeyFormatId;
+                } else {
+                    input = new RecordInput(this, offset);
+                    formatId = input.readPackedInt();
+                }
+            }
+            fieldFormat = catalog.getFormat(formatId);
+        }
+        /* Key field is non-null. */
+        return new KeyLocation(input, fieldFormat);
+    }
+
+    /**
+     * @see EntityInput#registerPriKeyObject
+     */
+    public void registerPriKeyObject(Object o) {
+
+        /*
+         * PRI_KEY_VISITED_OFFSET is used as the visited offset to indicate
+         * that the visited object is stored in the primary key byte array.
+         */
+        if (visited == null) {
+            visited = new HashMap<Integer,Object>(VISITED_INIT_SIZE);
+        }
+        visited.put(RecordInput.PRI_KEY_VISITED_OFFSET, o);
+    }
+
+    /**
+     * @see EntityInput#skipField
+     */
+    public void skipField(Format declaredFormat) {
+        if (declaredFormat != null && declaredFormat.isPrimitive()) {
+            declaredFormat.skipContents(this);
+        } else {
+            int formatId = readPackedInt();
+            if (formatId > 0) {
+                Format format = catalog.getFormat(formatId);
+                format.skipContents(this);
+            }
+        }
+    }
+
+    public int readArrayLength() {
+        return readPackedInt();
+    }
+
+    public int readEnumConstant(String[] names) {
+        return readPackedInt();
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/RecordOutput.java b/src/com/sleepycat/persist/impl/RecordOutput.java
new file mode 100644
index 0000000000000000000000000000000000000000..99ffd54612ff8753adfb91ec8abe0391825f605a
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/RecordOutput.java
@@ -0,0 +1,187 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RecordOutput.java,v 1.10.2.2 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.util.IdentityHashMap;
+import java.util.Map;
+
+import com.sleepycat.bind.tuple.TupleOutput;
+import com.sleepycat.persist.raw.RawObject;
+
+/**
+ * Implements EntityOutput to write record key-data pairs.  Extends TupleOutput
+ * to implement the subset of TupleOutput methods that are defined in the
+ * EntityOutput interface.
+ *
+ * @author Mark Hayes
+ */
+class RecordOutput extends TupleOutput implements EntityOutput {
+
+    private Catalog catalog;
+    private boolean rawAccess;
+    private Map<Object,Integer> visited;
+
+    /**
+     * Creates a new output with an empty/null visited map.
+     */
+    RecordOutput(Catalog catalog, boolean rawAccess) {
+
+        super();
+        this.catalog = catalog;
+        this.rawAccess = rawAccess;
+    }
+
+    /**
+     * @see EntityInput#writeObject
+     */
+    public void writeObject(Object o, Format fieldFormat) {
+
+        /* For a null instance, write a zero format ID. */
+        if (o == null) {
+            writePackedInt(Format.ID_NULL);
+            return;
+        }
+
+        /*
+         * For an already visited instance, output a reference to it.  The
+         * reference is the negation of the visited offset minus one.
+         */
+        if (visited != null) {
+            Integer offset = visited.get(o);
+            if (offset != null) {
+                if (offset == RecordInput.PROHIBIT_REF_OFFSET) {
+                    throw new IllegalArgumentException
+                        (RecordInput.PROHIBIT_NESTED_REF_MSG);
+                } else {
+                    writePackedInt(-(offset + 1));
+                    return;
+                }
+            }
+        }
+
+        /*
+         * Get and validate the format.  Catalog.getFormat(Class) throws
+         * IllegalArgumentException if the class is not persistent.  We don't
+         * need to check the fieldFormat (and it will be null) for non-raw
+         * access because field type checking is enforced by Java.
+         */
+        Format format;
+        if (rawAccess) {
+            format = RawAbstractInput.checkRawType(catalog, o, fieldFormat);
+        } else {
+
+            /*
+             * Do not attempt to open subclass indexes in case this is an
+             * embedded entity.  We will detect that error below, but we must
+             * not fail first when attempting to open the secondaries.
+             */
+            format = catalog.getFormat
+                (o.getClass(), false /*openEntitySubclassIndexes*/);
+        }
+        if (format.getProxiedFormat() != null) {
+            throw new IllegalArgumentException
+                ("May not store proxy classes directly: " +
+                 format.getClassName());
+        }
+        /* Check for embedded entity classes and subclasses. */
+        if (format.getEntityFormat() != null) {
+            throw new IllegalArgumentException
+                ("References to entities are not allowed: " +
+                 o.getClass().getName());
+        }
+
+        /*
+         * Remember that we visited this instance.  Certain formats
+         * (ProxiedFormat for example) prohibit nested fields that reference
+         * the parent object. [#15815]
+         */
+        if (visited == null) {
+            visited = new IdentityHashMap<Object,Integer>();
+        }
+        boolean prohibitNestedRefs = format.areNestedRefsProhibited();
+        Integer visitedOffset = size();
+        visited.put(o, prohibitNestedRefs ? RecordInput.PROHIBIT_REF_OFFSET :
+                       visitedOffset);
+
+        /* Finally, write the formatId and object value. */
+        writePackedInt(format.getId());
+        format.writeObject(o, this, rawAccess);
+
+        /* Always allow references from siblings that follow. */
+        if (prohibitNestedRefs) {
+            visited.put(o, visitedOffset);
+        }
+    }
+
+    /**
+     * @see EntityInput#writeKeyObject
+     */
+    public void writeKeyObject(Object o, Format fieldFormat) {
+
+        /* Key objects must not be null and must be of the declared class. */
+        if (o == null) {
+            throw new IllegalArgumentException
+                ("Key field object may not be null");
+        }
+        Format format;
+        if (rawAccess) {
+            if (o instanceof RawObject) {
+                format = (Format) ((RawObject) o).getType();
+            } else {
+                format = catalog.getFormat
+                    (o.getClass(), false /*openEntitySubclassIndexes*/);
+                /* Expect primitive wrapper class in raw mode. */
+                if (fieldFormat.isPrimitive()) {
+                    fieldFormat = fieldFormat.getWrapperFormat();
+                }
+            }
+        } else {
+            format = catalog.getFormat(o.getClass(),
+                                       false /*openEntitySubclassIndexes*/);
+        }
+        if (fieldFormat != format) {
+            throw new IllegalArgumentException
+                ("The key field object class (" + o.getClass().getName() +
+                 ") must be the field's declared class: " +
+                 fieldFormat.getClassName());
+        }
+
+        /* Write the object value (no formatId is written for keys). */
+        fieldFormat.writeObject(o, this, rawAccess);
+    }
+
+    /**
+     * @see EntityInput#registerPriKeyObject
+     */
+    public void registerPriKeyObject(Object o) {
+
+        /*
+         * PRI_KEY_VISITED_OFFSET is used as the visited offset to indicate
+         * that the visited object is stored in the primary key byte array.
+         */
+        if (visited == null) {
+            visited = new IdentityHashMap<Object,Integer>();
+        }
+        visited.put(o, RecordInput.PRI_KEY_VISITED_OFFSET);
+    }
+
+    /**
+     * @see EntityInput#writeArrayLength
+     */
+    public void writeArrayLength(int length) {
+        writePackedInt(length);
+    }
+
+    /**
+     * @see EntityInput#writeEnumConstant
+     */
+    public void writeEnumConstant(String[] names, int index) {
+        writePackedInt(index);
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/ReflectionAccessor.java b/src/com/sleepycat/persist/impl/ReflectionAccessor.java
new file mode 100644
index 0000000000000000000000000000000000000000..0a67d461d69e2d4cc78dd84d214799fd72a6cca5
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/ReflectionAccessor.java
@@ -0,0 +1,441 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ReflectionAccessor.java,v 1.23.2.2 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.lang.reflect.AccessibleObject;
+import java.lang.reflect.Array;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Field;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Modifier;
+import java.util.List;
+
+/**
+ * Implements Accessor using reflection.
+ *
+ * @author Mark Hayes
+ */
+class ReflectionAccessor implements Accessor {
+
+    private static final FieldAccess[] EMPTY_KEYS = {};
+
+    private Class type;
+    private Accessor superAccessor;
+    private Constructor constructor;
+    private FieldAccess priKey;
+    private FieldAccess[] secKeys;
+    private FieldAccess[] nonKeys;
+
+    private ReflectionAccessor(Class type, Accessor superAccessor) {
+        this.type = type;
+        this.superAccessor = superAccessor;
+        try {
+            constructor = type.getDeclaredConstructor();
+        } catch (NoSuchMethodException e) {
+            throw new IllegalStateException(type.getName());
+        }
+        if (!Modifier.isPublic(constructor.getModifiers())) {
+            setAccessible(constructor, type.getName() + "()");
+        }
+    }
+
+    ReflectionAccessor(Catalog catalog,
+                       Class type,
+                       Accessor superAccessor,
+                       FieldInfo priKeyField,
+                       List<FieldInfo> secKeyFields,
+                       List<FieldInfo> nonKeyFields) {
+        this(type, superAccessor);
+        if (priKeyField != null) {
+            priKey = getField(catalog, priKeyField, true, false);
+        } else {
+            priKey = null;
+        }
+        if (secKeyFields.size() > 0) {
+            secKeys = getFields(catalog, secKeyFields, false, false);
+        } else {
+            secKeys = EMPTY_KEYS;
+        }
+        if (nonKeyFields.size() > 0) {
+            nonKeys = getFields(catalog, nonKeyFields, false, false);
+        } else {
+            nonKeys = EMPTY_KEYS;
+        }
+    }
+
+    ReflectionAccessor(Catalog catalog,
+                       Class type,
+                       List<FieldInfo> fieldInfos) {
+        this(type, null);
+        priKey = null;
+        secKeys = EMPTY_KEYS;
+        nonKeys = getFields(catalog, fieldInfos, true, true);
+    }
+
+    private FieldAccess[] getFields(Catalog catalog,
+                                    List<FieldInfo> fieldInfos,
+                                    boolean isRequiredKeyField,
+                                    boolean isCompositeKey) {
+        int index = 0;
+        FieldAccess[] fields = new FieldAccess[fieldInfos.size()];
+        for (FieldInfo info : fieldInfos) {
+            fields[index] = getField
+                (catalog, info, isRequiredKeyField, isCompositeKey);
+            index += 1;
+        }
+        return fields;
+    }
+
+    private FieldAccess getField(Catalog catalog,
+                                 FieldInfo fieldInfo,
+                                 boolean isRequiredKeyField,
+                                 boolean isCompositeKey) {
+        Field field;
+        try {
+            field = type.getDeclaredField(fieldInfo.getName());
+        } catch (NoSuchFieldException e) {
+            throw new IllegalStateException(e);
+        }
+        if (!Modifier.isPublic(field.getModifiers())) {
+            setAccessible(field, field.getName());
+        }
+        Class fieldCls = field.getType();
+        if (fieldCls.isPrimitive()) {
+            assert SimpleCatalog.isSimpleType(fieldCls);
+            return new PrimitiveAccess
+                (field, SimpleCatalog.getSimpleFormat(fieldCls));
+        } else if (isRequiredKeyField) {
+            Format format = catalog.getFormat
+                (fieldCls, false /*openEntitySubclassIndexes*/);
+            if (isCompositeKey && !SimpleCatalog.isSimpleType(fieldCls)) {
+                throw new IllegalArgumentException
+                    ("Composite key class has non-simple type field: " +
+                     type.getName() + '.' + field.getName());
+            }
+            return new KeyObjectAccess(field, format);
+        } else {
+            return new ObjectAccess(field);
+        }
+    }
+
+    private void setAccessible(AccessibleObject object, String memberName) {
+        try {
+            object.setAccessible(true);
+        } catch (SecurityException e) {
+            throw new IllegalStateException
+                ("Unable to access non-public member: " +
+                 type.getName() + '.' + memberName +
+                 ". Please configure the Java Security Manager setting: " +
+                 " ReflectPermission suppressAccessChecks", e);
+        }
+    }
+
+    public Object newInstance() {
+        try {
+            return constructor.newInstance();
+        } catch (IllegalAccessException e) {
+            throw new IllegalStateException(e);
+        } catch (InstantiationException e) {
+            throw new IllegalStateException(e);
+        } catch (InvocationTargetException e) {
+            throw new IllegalStateException(e);
+        }
+    }
+
+    public Object newArray(int len) {
+        return Array.newInstance(type, len);
+    }
+
+    public boolean isPriKeyFieldNullOrZero(Object o) {
+        try {
+            if (priKey != null) {
+                return priKey.isNullOrZero(o);
+            } else if (superAccessor != null) {
+                return superAccessor.isPriKeyFieldNullOrZero(o);
+            } else {
+                throw new IllegalStateException("No primary key field");
+            }
+        } catch (IllegalAccessException e) {
+            throw new IllegalStateException(e);
+        }
+    }
+
+    public void writePriKeyField(Object o, EntityOutput output) {
+        try {
+            if (priKey != null) {
+                priKey.write(o, output);
+            } else if (superAccessor != null) {
+                superAccessor.writePriKeyField(o, output);
+            } else {
+                throw new IllegalStateException("No primary key field");
+            }
+        } catch (IllegalAccessException e) {
+            throw new IllegalStateException(e);
+        }
+    }
+
+    public void readPriKeyField(Object o, EntityInput input) {
+        try {
+            if (priKey != null) {
+                priKey.read(o, input);
+            } else if (superAccessor != null) {
+                superAccessor.readPriKeyField(o, input);
+            } else {
+                throw new IllegalStateException("No primary key field");
+            }
+        } catch (IllegalAccessException e) {
+            throw new IllegalStateException(e);
+        }
+    }
+
+    public void writeSecKeyFields(Object o, EntityOutput output) {
+        try {
+            if (priKey != null && !priKey.isPrimitive) {
+                output.registerPriKeyObject(priKey.field.get(o));
+            }
+            if (superAccessor != null) {
+                superAccessor.writeSecKeyFields(o, output);
+            }
+            for (int i = 0; i < secKeys.length; i += 1) {
+                secKeys[i].write(o, output);
+            }
+        } catch (IllegalAccessException e) {
+            throw new IllegalStateException(e);
+        }
+    }
+
+    public void readSecKeyFields(Object o,
+                                 EntityInput input,
+                                 int startField,
+                                 int endField,
+                                 int superLevel) {
+        try {
+            if (priKey != null && !priKey.isPrimitive) {
+                input.registerPriKeyObject(priKey.field.get(o));
+            }
+            if (superLevel != 0 && superAccessor != null) {
+                superAccessor.readSecKeyFields
+                    (o, input, startField, endField, superLevel - 1);
+            } else {
+                if (superLevel > 0) {
+                    throw new IllegalStateException
+                        ("Superclass does not exist");
+                }
+            }
+            if (superLevel <= 0) {
+                for (int i = startField;
+                     i <= endField && i < secKeys.length;
+                     i += 1) {
+                    secKeys[i].read(o, input);
+                }
+            }
+        } catch (IllegalAccessException e) {
+            throw new IllegalStateException(e);
+        }
+    }
+
+    public void writeNonKeyFields(Object o, EntityOutput output) {
+        try {
+            if (superAccessor != null) {
+                superAccessor.writeNonKeyFields(o, output);
+            }
+            for (int i = 0; i < nonKeys.length; i += 1) {
+                nonKeys[i].write(o, output);
+            }
+        } catch (IllegalAccessException e) {
+            throw new IllegalStateException(e);
+        }
+    }
+
+    public void readNonKeyFields(Object o,
+                                 EntityInput input,
+                                 int startField,
+                                 int endField,
+                                 int superLevel) {
+        try {
+            if (superLevel != 0 && superAccessor != null) {
+                superAccessor.readNonKeyFields
+                    (o, input, startField, endField, superLevel - 1);
+            } else {
+                if (superLevel > 0) {
+                    throw new IllegalStateException
+                        ("Superclass does not exist");
+                }
+            }
+            if (superLevel <= 0) {
+                for (int i = startField;
+                     i <= endField && i < nonKeys.length;
+                     i += 1) {
+                    nonKeys[i].read(o, input);
+                }
+            }
+        } catch (IllegalAccessException e) {
+            throw new IllegalStateException(e);
+        }
+    }
+
+    public Object getField(Object o,
+                           int field,
+                           int superLevel,
+                           boolean isSecField) {
+        if (superLevel > 0) {
+            return superAccessor.getField
+                (o, field, superLevel - 1, isSecField);
+        }
+        try {
+            Field fld =
+		isSecField ? secKeys[field].field : nonKeys[field].field;
+            return fld.get(o);
+        } catch (IllegalAccessException e) {
+            throw new IllegalStateException(e);
+        }
+    }
+
+    public void setField(Object o,
+                         int field,
+                         int superLevel,
+                         boolean isSecField,
+                         Object value) {
+        if (superLevel > 0) {
+            superAccessor.setField
+                (o, field, superLevel - 1, isSecField, value);
+	    return;
+        }
+        try {
+            Field fld =
+		isSecField ? secKeys[field].field : nonKeys[field].field;
+            fld.set(o, value);
+        } catch (IllegalAccessException e) {
+            throw new IllegalStateException(e);
+        }
+    }
+
+    /**
+     * Abstract base class for field access classes.
+     */
+    private static abstract class FieldAccess {
+
+        Field field;
+        boolean isPrimitive;
+
+        FieldAccess(Field field) {
+            this.field = field;
+            isPrimitive = field.getType().isPrimitive();
+        }
+
+        /**
+         * Writes a field.
+         */
+        abstract void write(Object o, EntityOutput out)
+            throws IllegalAccessException;
+
+        /**
+         * Reads a field.
+         */
+        abstract void read(Object o, EntityInput in)
+            throws IllegalAccessException;
+
+        /**
+         * Returns whether a field is null (for reference types) or zero (for
+         * primitive integer types).  This implementation handles the reference
+         * types.
+         */
+        boolean isNullOrZero(Object o)
+            throws IllegalAccessException {
+
+            return field.get(o) == null;
+        }
+    }
+
+    /**
+     * Access for fields with object types.
+     */
+    private static class ObjectAccess extends FieldAccess {
+
+        ObjectAccess(Field field) {
+            super(field);
+        }
+
+        @Override
+        void write(Object o, EntityOutput out)
+            throws IllegalAccessException {
+
+            out.writeObject(field.get(o), null);
+        }
+
+        @Override
+        void read(Object o, EntityInput in)
+            throws IllegalAccessException {
+
+            field.set(o, in.readObject());
+        }
+    }
+
+    /**
+     * Access for primary key fields and composite key fields with object
+     * types.
+     */
+    private static class KeyObjectAccess extends FieldAccess {
+
+        private Format format;
+
+        KeyObjectAccess(Field field, Format format) {
+            super(field);
+            this.format = format;
+        }
+
+        @Override
+        void write(Object o, EntityOutput out)
+            throws IllegalAccessException {
+
+            out.writeKeyObject(field.get(o), format);
+        }
+
+        @Override
+        void read(Object o, EntityInput in)
+            throws IllegalAccessException {
+
+            field.set(o, in.readKeyObject(format));
+        }
+    }
+
+    /**
+     * Access for fields with primitive types.
+     */
+    private static class PrimitiveAccess extends FieldAccess {
+
+        private SimpleFormat format;
+
+        PrimitiveAccess(Field field, SimpleFormat format) {
+            super(field);
+            this.format = format;
+        }
+
+        @Override
+        void write(Object o, EntityOutput out)
+            throws IllegalAccessException {
+
+            format.writePrimitiveField(o, out, field);
+        }
+
+        @Override
+        void read(Object o, EntityInput in)
+            throws IllegalAccessException {
+
+            format.readPrimitiveField(o, in, field);
+        }
+
+        @Override
+        boolean isNullOrZero(Object o)
+            throws IllegalAccessException {
+
+            return field.getLong(o) == 0;
+        }
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/SimpleCatalog.java b/src/com/sleepycat/persist/impl/SimpleCatalog.java
new file mode 100644
index 0000000000000000000000000000000000000000..dabc8989d72de09537f07a194e71bab24ea3563d
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/SimpleCatalog.java
@@ -0,0 +1,256 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SimpleCatalog.java,v 1.26.2.3 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.IdentityHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.NoSuchElementException;
+
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.raw.RawObject;
+
+/**
+ * A static catalog containing simple types only.  Once created, this catalog
+ * is immutable.
+ *
+ * For bindings accessed by a PersistComparator during recovery, the
+ * SimpleCatalog provides formats for all simple types.  To reduce redundant
+ * format objects, the SimpleCatalog's formats are copied when creating a
+ * regular PersistCatalog.
+ *
+ * This class also contains utility methods for dealing with primitives.
+ *
+ * @author Mark Hayes
+ */
+public class SimpleCatalog implements Catalog {
+
+    private static final Map<String,Class> keywordToPrimitive;
+    static {
+        keywordToPrimitive = new HashMap<String,Class>(8);
+        keywordToPrimitive.put("boolean", Boolean.TYPE);
+        keywordToPrimitive.put("char", Character.TYPE);
+        keywordToPrimitive.put("byte", Byte.TYPE);
+        keywordToPrimitive.put("short", Short.TYPE);
+        keywordToPrimitive.put("int", Integer.TYPE);
+        keywordToPrimitive.put("long", Long.TYPE);
+        keywordToPrimitive.put("float", Float.TYPE);
+        keywordToPrimitive.put("double", Double.TYPE);
+    }
+
+    private static final Map<Class,Class> primitiveTypeToWrapper;
+    static {
+        primitiveTypeToWrapper = new HashMap<Class,Class>(8);
+        primitiveTypeToWrapper.put(Boolean.TYPE, Boolean.class);
+        primitiveTypeToWrapper.put(Character.TYPE, Character.class);
+        primitiveTypeToWrapper.put(Byte.TYPE, Byte.class);
+        primitiveTypeToWrapper.put(Short.TYPE, Short.class);
+        primitiveTypeToWrapper.put(Integer.TYPE, Integer.class);
+        primitiveTypeToWrapper.put(Long.TYPE, Long.class);
+        primitiveTypeToWrapper.put(Float.TYPE, Float.class);
+        primitiveTypeToWrapper.put(Double.TYPE, Double.class);
+    }
+
+    private static final SimpleCatalog instance = new SimpleCatalog();
+
+    static SimpleCatalog getInstance() {
+        return instance;
+    }
+
+    static boolean isSimpleType(Class type) {
+        return instance.formatMap.containsKey(type.getName());
+    }
+
+    static Class primitiveToWrapper(Class type) {
+        Class cls = primitiveTypeToWrapper.get(type);
+        if (cls == null) {
+            throw new IllegalStateException(type.getName());
+        }
+        return cls;
+    }
+
+    public static Class keyClassForName(String className) {
+        Class cls = keywordToPrimitive.get(className);
+        if (cls != null) {
+            cls = primitiveTypeToWrapper.get(cls);
+        } else {
+            try {
+                cls = EntityModel.classForName(className);
+            } catch (ClassNotFoundException e) {
+                throw new IllegalArgumentException
+                    ("Key class not found: " + className);
+            }
+        }
+        return cls;
+    }
+
+    public static String keyClassName(String className) {
+        Class cls = keywordToPrimitive.get(className);
+        if (cls != null) {
+            cls = primitiveTypeToWrapper.get(cls);
+            return cls.getName();
+        } else {
+            return className;
+        }
+    }
+
+    public static Class classForName(String className)
+        throws ClassNotFoundException {
+
+        Class cls = keywordToPrimitive.get(className);
+        if (cls == null) {
+            cls = EntityModel.classForName(className);
+        }
+        return cls;
+    }
+
+    static SimpleFormat getSimpleFormat(Class type) {
+        return instance.formatMap.get(type.getName());
+    }
+
+    static List<Format> copyFormatList() {
+        return new ArrayList<Format>(instance.formatList);
+    }
+
+    static boolean copyMissingFormats(List<Format> copyToList) {
+        boolean anyCopied = false;
+        for (int i = 0; i <= Format.ID_PREDEFINED; i += 1) {
+            Format thisFormat = instance.formatList.get(i);
+            Format otherFormat = copyToList.get(i);
+            if (thisFormat != null && otherFormat == null) {
+                copyToList.set(i, thisFormat);
+                anyCopied = true;
+            }
+        }
+        return anyCopied;
+    }
+
+    private List<SimpleFormat> formatList;
+    private Map<String,SimpleFormat> formatMap;
+
+    private SimpleCatalog() {
+
+        /*
+         * Reserve slots for all predefined IDs, so that that next ID assigned
+         * will be Format.ID_PREDEFINED plus one.
+         */
+        int initCapacity = Format.ID_PREDEFINED * 2;
+        formatList = new ArrayList<SimpleFormat>(initCapacity);
+        formatMap = new HashMap<String,SimpleFormat>(initCapacity);
+
+        for (int i = 0; i <= Format.ID_PREDEFINED; i += 1) {
+            formatList.add(null);
+        }
+
+        /* Initialize all predefined formats.  */
+        setFormat(Format.ID_BOOL,     new SimpleFormat.FBool(true));
+        setFormat(Format.ID_BOOL_W,   new SimpleFormat.FBool(false));
+        setFormat(Format.ID_BYTE,     new SimpleFormat.FByte(true));
+        setFormat(Format.ID_BYTE_W,   new SimpleFormat.FByte(false));
+        setFormat(Format.ID_SHORT,    new SimpleFormat.FShort(true));
+        setFormat(Format.ID_SHORT_W,  new SimpleFormat.FShort(false));
+        setFormat(Format.ID_INT,      new SimpleFormat.FInt(true));
+        setFormat(Format.ID_INT_W,    new SimpleFormat.FInt(false));
+        setFormat(Format.ID_LONG,     new SimpleFormat.FLong(true));
+        setFormat(Format.ID_LONG_W,   new SimpleFormat.FLong(false));
+        setFormat(Format.ID_FLOAT,    new SimpleFormat.FFloat(true));
+        setFormat(Format.ID_FLOAT_W,  new SimpleFormat.FFloat(false));
+        setFormat(Format.ID_DOUBLE,   new SimpleFormat.FDouble(true));
+        setFormat(Format.ID_DOUBLE_W, new SimpleFormat.FDouble(false));
+        setFormat(Format.ID_CHAR,     new SimpleFormat.FChar(true));
+        setFormat(Format.ID_CHAR_W,   new SimpleFormat.FChar(false));
+        setFormat(Format.ID_STRING,   new SimpleFormat.FString());
+        setFormat(Format.ID_BIGINT,   new SimpleFormat.FBigInt());
+        /*
+        setFormat(Format.ID_BIGDEC,   new SimpleFormat.FBigDec());
+        */
+        setFormat(Format.ID_DATE,     new SimpleFormat.FDate());
+
+        /* Tell primitives about their wrapper class. */
+        setWrapper(Format.ID_BOOL, Format.ID_BOOL_W);
+        setWrapper(Format.ID_BYTE, Format.ID_BYTE_W);
+        setWrapper(Format.ID_SHORT, Format.ID_SHORT_W);
+        setWrapper(Format.ID_INT, Format.ID_INT_W);
+        setWrapper(Format.ID_LONG, Format.ID_LONG_W);
+        setWrapper(Format.ID_FLOAT, Format.ID_FLOAT_W);
+        setWrapper(Format.ID_DOUBLE, Format.ID_DOUBLE_W);
+        setWrapper(Format.ID_CHAR, Format.ID_CHAR_W);
+    }
+
+    /**
+     * Sets a format for which space in the formatList has been preallocated,
+     * and makes it the current format for the class.
+     */
+    private void setFormat(int id, SimpleFormat format) {
+        format.setId(id);
+        format.initializeIfNeeded(this, null /*model*/);
+        formatList.set(id, format);
+        formatMap.put(format.getClassName(), format);
+    }
+
+    /**
+     * Tells a primitive format about the format for its corresponding
+     * primitive wrapper class.
+     */
+    private void setWrapper(int primitiveId, int wrapperId) {
+        SimpleFormat primitiveFormat = formatList.get(primitiveId);
+        SimpleFormat wrapperFormat = formatList.get(wrapperId);
+        primitiveFormat.setWrapperFormat(wrapperFormat);
+    }
+
+    public int getInitVersion(Format format, boolean forReader) {
+        return Catalog.CURRENT_VERSION;
+    }
+
+    public Format getFormat(int formatId) {
+        Format format;
+        try {
+            format = formatList.get(formatId);
+            if (format == null) {
+                throw new IllegalStateException
+                    ("Not a simple type: " + formatId);
+            }
+            return format;
+        } catch (NoSuchElementException e) {
+            throw new IllegalStateException
+                ("Not a simple type: " + formatId);
+        }
+    }
+
+    public Format getFormat(Class cls, boolean openEntitySubclassIndexes) {
+        Format format = formatMap.get(cls.getName());
+        if (format == null) {
+            throw new IllegalArgumentException
+                ("Not a simple type: " + cls.getName());
+        }
+        return format;
+    }
+
+    public Format getFormat(String className) {
+        return formatMap.get(className);
+    }
+
+    public Format createFormat(String clsName, Map<String,Format> newFormats) {
+        throw new IllegalStateException();
+    }
+
+    public Format createFormat(Class type, Map<String,Format> newFormats) {
+        throw new IllegalStateException();
+    }
+
+    public boolean isRawAccess() {
+        return false;
+    }
+
+    public Object convertRawObject(RawObject o, IdentityHashMap converted) {
+        throw new IllegalStateException();
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/SimpleFormat.java b/src/com/sleepycat/persist/impl/SimpleFormat.java
new file mode 100644
index 0000000000000000000000000000000000000000..df947bdf5b6527d085d480c14b146a57f35c854b
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/SimpleFormat.java
@@ -0,0 +1,840 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SimpleFormat.java,v 1.24.2.3 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.lang.reflect.Field;
+import java.math.BigInteger;
+import java.util.Date;
+import java.util.Map;
+import java.util.Set;
+
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.persist.model.EntityModel;
+
+/**
+ * Format for simple types, including primitives.  Additional methods are
+ * included to optimize the handling of primitives.  Other classes such as
+ * PrimitiveArrayFormat and ReflectAccessor take advantage of these methods.
+ *
+ * @author Mark Hayes
+ */
+public abstract class SimpleFormat extends Format {
+
+    private static final long serialVersionUID = 4595245575868697702L;
+
+    private boolean primitive;
+    private SimpleFormat wrapperFormat;
+
+    SimpleFormat(Class type, boolean primitive) {
+        super(type);
+        this.primitive = primitive;
+    }
+
+    void setWrapperFormat(SimpleFormat wrapperFormat) {
+        this.wrapperFormat = wrapperFormat;
+    }
+
+    @Override
+    Format getWrapperFormat() {
+        return wrapperFormat;
+    }
+
+    @Override
+    public boolean isSimple() {
+        return true;
+    }
+
+    @Override
+    public boolean isPrimitive() {
+        return primitive;
+    }
+
+    @Override
+    void collectRelatedFormats(Catalog catalog,
+                               Map<String,Format> newFormats) {
+    }
+
+    @Override
+    void initialize(Catalog catalog, EntityModel model, int initVersion) {
+    }
+
+    @Override
+    public Object readObject(Object o, EntityInput input, boolean rawAccess) {
+        /* newInstance reads the value -- do nothing here. */
+        return o;
+    }
+
+    @Override
+    boolean evolve(Format newFormat, Evolver evolver) {
+        evolver.useOldFormat(this, newFormat);
+        return true;
+    }
+
+    /* -- Begin methods to be overridden by primitive formats only. -- */
+
+    Object newPrimitiveArray(int len, EntityInput input) {
+        throw new UnsupportedOperationException();
+    }
+
+    void writePrimitiveArray(Object o, EntityOutput output) {
+        throw new UnsupportedOperationException();
+    }
+
+    int getPrimitiveLength() {
+        throw new UnsupportedOperationException();
+    }
+
+    void readPrimitiveField(Object o, EntityInput input, Field field)
+        throws IllegalAccessException {
+
+        throw new UnsupportedOperationException();
+    }
+
+    void writePrimitiveField(Object o, EntityOutput output, Field field)
+        throws IllegalAccessException {
+
+        throw new UnsupportedOperationException();
+    }
+
+    /* -- End methods to be overridden by primitive formats only. -- */
+
+    void skipPrimitiveArray(int len, RecordInput input) {
+        input.skipFast(len * getPrimitiveLength());
+    }
+
+    void copySecMultiKeyPrimitiveArray(int len,
+                                       RecordInput input,
+                                       Set results) {
+        int primLen = getPrimitiveLength();
+        for (int i = 0; i < len; i += 1) {
+            DatabaseEntry entry = new DatabaseEntry
+                (input.getBufferBytes(), input.getBufferOffset(), primLen);
+            results.add(entry);
+            input.skipFast(primLen);
+        }
+    }
+
+    public static class FBool extends SimpleFormat {
+
+        private static final long serialVersionUID = -7724949525068533451L;
+
+        FBool(boolean primitive) {
+            super(primitive ? Boolean.TYPE : Boolean.class, primitive);
+        }
+
+        @Override
+        Object newArray(int len) {
+            return new Boolean[len];
+        }
+
+        @Override
+        public Object newInstance(EntityInput input, boolean rawAccess) {
+            return Boolean.valueOf(input.readBoolean());
+        }
+
+        @Override
+        void writeObject(Object o, EntityOutput output, boolean rawAccess) {
+            output.writeBoolean(((Boolean) o).booleanValue());
+        }
+
+        @Override
+        void skipContents(RecordInput input) {
+            input.skipFast(1);
+        }
+
+        @Override
+        void copySecKey(RecordInput input, RecordOutput output) {
+            output.writeFast(input.readFast());
+        }
+
+        @Override
+        Object newPrimitiveArray(int len, EntityInput input) {
+            boolean[] a = new boolean[len];
+            for (int i = 0; i < len; i += 1) {
+                a[i] = input.readBoolean();
+            }
+            return a;
+        }
+
+        @Override
+        void writePrimitiveArray(Object o, EntityOutput output) {
+            boolean[] a = (boolean[]) o;
+            int len = a.length;
+            output.writeArrayLength(len);
+            for (int i = 0; i < len; i += 1) {
+                output.writeBoolean(a[i]);
+            }
+        }
+
+        @Override
+        int getPrimitiveLength() {
+            return 1;
+        }
+
+        @Override
+        void readPrimitiveField(Object o, EntityInput input, Field field)
+            throws IllegalAccessException {
+
+            field.setBoolean(o, input.readBoolean());
+        }
+
+        @Override
+        void writePrimitiveField(Object o, EntityOutput output, Field field)
+            throws IllegalAccessException {
+
+            output.writeBoolean(field.getBoolean(o));
+        }
+    }
+
+    public static class FByte extends SimpleFormat {
+
+        private static final long serialVersionUID = 3651752958101447257L;
+
+        FByte(boolean primitive) {
+            super(primitive ? Byte.TYPE : Byte.class, primitive);
+        }
+
+        @Override
+        Object newArray(int len) {
+            return new Byte[len];
+        }
+
+        @Override
+        public Object newInstance(EntityInput input, boolean rawAccess) {
+            return Byte.valueOf(input.readByte());
+        }
+
+        @Override
+        void writeObject(Object o, EntityOutput output, boolean rawAccess) {
+            output.writeByte(((Number) o).byteValue());
+        }
+
+        @Override
+        void skipContents(RecordInput input) {
+            input.skipFast(1);
+        }
+
+        @Override
+        void copySecKey(RecordInput input, RecordOutput output) {
+            output.writeFast(input.readFast());
+        }
+
+        @Override
+        Object newPrimitiveArray(int len, EntityInput input) {
+            byte[] a = new byte[len];
+            for (int i = 0; i < len; i += 1) {
+                a[i] = input.readByte();
+            }
+            return a;
+        }
+
+        @Override
+        void writePrimitiveArray(Object o, EntityOutput output) {
+            byte[] a = (byte[]) o;
+            int len = a.length;
+            output.writeArrayLength(len);
+            for (int i = 0; i < len; i += 1) {
+                output.writeByte(a[i]);
+            }
+        }
+
+        @Override
+        int getPrimitiveLength() {
+            return 1;
+        }
+
+        @Override
+        void readPrimitiveField(Object o, EntityInput input, Field field)
+            throws IllegalAccessException {
+
+            field.setByte(o, input.readByte());
+        }
+
+        @Override
+        void writePrimitiveField(Object o, EntityOutput output, Field field)
+            throws IllegalAccessException {
+
+            output.writeByte(field.getByte(o));
+        }
+
+        @Override
+        Format getSequenceKeyFormat() {
+            return this;
+        }
+    }
+
+    public static class FShort extends SimpleFormat {
+
+        private static final long serialVersionUID = -4909138198491785624L;
+
+        FShort(boolean primitive) {
+            super(primitive ? Short.TYPE : Short.class, primitive);
+        }
+
+        @Override
+        Object newArray(int len) {
+            return new Short[len];
+        }
+
+        @Override
+        public Object newInstance(EntityInput input, boolean rawAccess) {
+            return Short.valueOf(input.readShort());
+        }
+
+        @Override
+        void writeObject(Object o, EntityOutput output, boolean rawAccess) {
+            output.writeShort(((Number) o).shortValue());
+        }
+
+        @Override
+        void skipContents(RecordInput input) {
+            input.skipFast(2);
+        }
+
+        @Override
+        void copySecKey(RecordInput input, RecordOutput output) {
+            output.writeFast(input.readFast());
+            output.writeFast(input.readFast());
+        }
+
+        @Override
+        Object newPrimitiveArray(int len, EntityInput input) {
+            short[] a = new short[len];
+            for (int i = 0; i < len; i += 1) {
+                a[i] = input.readShort();
+            }
+            return a;
+        }
+
+        @Override
+        void writePrimitiveArray(Object o, EntityOutput output) {
+            short[] a = (short[]) o;
+            int len = a.length;
+            output.writeArrayLength(len);
+            for (int i = 0; i < len; i += 1) {
+                output.writeShort(a[i]);
+            }
+        }
+
+        @Override
+        int getPrimitiveLength() {
+            return 2;
+        }
+
+        @Override
+        void readPrimitiveField(Object o, EntityInput input, Field field)
+            throws IllegalAccessException {
+
+            field.setShort(o, input.readShort());
+        }
+
+        @Override
+        void writePrimitiveField(Object o, EntityOutput output, Field field)
+            throws IllegalAccessException {
+
+            output.writeShort(field.getShort(o));
+        }
+
+        @Override
+        Format getSequenceKeyFormat() {
+            return this;
+        }
+    }
+
+    public static class FInt extends SimpleFormat {
+
+        private static final long serialVersionUID = 2695910006049980013L;
+
+        FInt(boolean primitive) {
+            super(primitive ? Integer.TYPE : Integer.class, primitive);
+        }
+
+        @Override
+        Object newArray(int len) {
+            return new Integer[len];
+        }
+
+        @Override
+        public Object newInstance(EntityInput input, boolean rawAccess) {
+            return Integer.valueOf(input.readInt());
+        }
+
+        @Override
+        void writeObject(Object o, EntityOutput output, boolean rawAccess) {
+            output.writeInt(((Number) o).intValue());
+        }
+
+        @Override
+        void skipContents(RecordInput input) {
+            input.skipFast(4);
+        }
+
+        @Override
+        void copySecKey(RecordInput input, RecordOutput output) {
+            output.writeFast(input.readFast());
+            output.writeFast(input.readFast());
+            output.writeFast(input.readFast());
+            output.writeFast(input.readFast());
+        }
+
+        @Override
+        Object newPrimitiveArray(int len, EntityInput input) {
+            int[] a = new int[len];
+            for (int i = 0; i < len; i += 1) {
+                a[i] = input.readInt();
+            }
+            return a;
+        }
+
+        @Override
+        void writePrimitiveArray(Object o, EntityOutput output) {
+            int[] a = (int[]) o;
+            int len = a.length;
+            output.writeArrayLength(len);
+            for (int i = 0; i < len; i += 1) {
+                output.writeInt(a[i]);
+            }
+        }
+
+        @Override
+        int getPrimitiveLength() {
+            return 4;
+        }
+
+        @Override
+        void readPrimitiveField(Object o, EntityInput input, Field field)
+            throws IllegalAccessException {
+
+            field.setInt(o, input.readInt());
+        }
+
+        @Override
+        void writePrimitiveField(Object o, EntityOutput output, Field field)
+            throws IllegalAccessException {
+
+            output.writeInt(field.getInt(o));
+        }
+
+        @Override
+        Format getSequenceKeyFormat() {
+            return this;
+        }
+    }
+
+    public static class FLong extends SimpleFormat {
+
+        private static final long serialVersionUID = 1872661106534776520L;
+
+        FLong(boolean primitive) {
+            super(primitive ? Long.TYPE : Long.class, primitive);
+        }
+
+        @Override
+        Object newArray(int len) {
+            return new Long[len];
+        }
+
+        @Override
+        public Object newInstance(EntityInput input, boolean rawAccess) {
+            return Long.valueOf(input.readLong());
+        }
+
+        @Override
+        void writeObject(Object o, EntityOutput output, boolean rawAccess) {
+            output.writeLong(((Number) o).longValue());
+        }
+
+        @Override
+        void skipContents(RecordInput input) {
+            input.skipFast(8);
+        }
+
+        @Override
+        void copySecKey(RecordInput input, RecordOutput output) {
+            output.writeFast
+                (input.getBufferBytes(), input.getBufferOffset(), 8);
+            input.skipFast(8);
+        }
+
+        @Override
+        Object newPrimitiveArray(int len, EntityInput input) {
+            long[] a = new long[len];
+            for (int i = 0; i < len; i += 1) {
+                a[i] = input.readLong();
+            }
+            return a;
+        }
+
+        @Override
+        void writePrimitiveArray(Object o, EntityOutput output) {
+            long[] a = (long[]) o;
+            int len = a.length;
+            output.writeArrayLength(len);
+            for (int i = 0; i < len; i += 1) {
+                output.writeLong(a[i]);
+            }
+        }
+
+        @Override
+        int getPrimitiveLength() {
+            return 8;
+        }
+
+        @Override
+        void readPrimitiveField(Object o, EntityInput input, Field field)
+            throws IllegalAccessException {
+
+            field.setLong(o, input.readLong());
+        }
+
+        @Override
+        void writePrimitiveField(Object o, EntityOutput output, Field field)
+            throws IllegalAccessException {
+
+            output.writeLong(field.getLong(o));
+        }
+
+        @Override
+        Format getSequenceKeyFormat() {
+            return this;
+        }
+    }
+
+    public static class FFloat extends SimpleFormat {
+
+        private static final long serialVersionUID = 1033413049495053602L;
+
+        FFloat(boolean primitive) {
+            super(primitive ? Float.TYPE : Float.class, primitive);
+        }
+
+        @Override
+        Object newArray(int len) {
+            return new Float[len];
+        }
+
+        @Override
+        public Object newInstance(EntityInput input, boolean rawAccess) {
+            return Float.valueOf(input.readSortedFloat());
+        }
+
+        @Override
+        void writeObject(Object o, EntityOutput output, boolean rawAccess) {
+            output.writeSortedFloat(((Number) o).floatValue());
+        }
+
+        @Override
+        void skipContents(RecordInput input) {
+            input.skipFast(4);
+        }
+
+        @Override
+        void copySecKey(RecordInput input, RecordOutput output) {
+            output.writeFast(input.readFast());
+            output.writeFast(input.readFast());
+            output.writeFast(input.readFast());
+            output.writeFast(input.readFast());
+        }
+
+        @Override
+        Object newPrimitiveArray(int len, EntityInput input) {
+            float[] a = new float[len];
+            for (int i = 0; i < len; i += 1) {
+                a[i] = input.readSortedFloat();
+            }
+            return a;
+        }
+
+        @Override
+        void writePrimitiveArray(Object o, EntityOutput output) {
+            float[] a = (float[]) o;
+            int len = a.length;
+            output.writeArrayLength(len);
+            for (int i = 0; i < len; i += 1) {
+                output.writeSortedFloat(a[i]);
+            }
+        }
+
+        @Override
+        int getPrimitiveLength() {
+            return 4;
+        }
+
+        @Override
+        void readPrimitiveField(Object o, EntityInput input, Field field)
+            throws IllegalAccessException {
+
+            field.setFloat(o, input.readSortedFloat());
+        }
+
+        @Override
+        void writePrimitiveField(Object o, EntityOutput output, Field field)
+            throws IllegalAccessException {
+
+            output.writeSortedFloat(field.getFloat(o));
+        }
+    }
+
+    public static class FDouble extends SimpleFormat {
+
+        private static final long serialVersionUID = 646904456811041423L;
+
+        FDouble(boolean primitive) {
+            super(primitive ? Double.TYPE : Double.class, primitive);
+        }
+
+        @Override
+        Object newArray(int len) {
+            return new Double[len];
+        }
+
+        @Override
+        public Object newInstance(EntityInput input, boolean rawAccess) {
+            return Double.valueOf(input.readSortedDouble());
+        }
+
+        @Override
+        void writeObject(Object o, EntityOutput output, boolean rawAccess) {
+            output.writeSortedDouble(((Number) o).doubleValue());
+        }
+
+        @Override
+        void skipContents(RecordInput input) {
+            input.skipFast(8);
+        }
+
+        @Override
+        void copySecKey(RecordInput input, RecordOutput output) {
+            output.writeFast
+                (input.getBufferBytes(), input.getBufferOffset(), 8);
+            input.skipFast(8);
+        }
+
+        @Override
+        Object newPrimitiveArray(int len, EntityInput input) {
+            double[] a = new double[len];
+            for (int i = 0; i < len; i += 1) {
+                a[i] = input.readSortedDouble();
+            }
+            return a;
+        }
+
+        @Override
+        void writePrimitiveArray(Object o, EntityOutput output) {
+            double[] a = (double[]) o;
+            int len = a.length;
+            output.writeArrayLength(len);
+            for (int i = 0; i < len; i += 1) {
+                output.writeSortedDouble(a[i]);
+            }
+        }
+
+        @Override
+        int getPrimitiveLength() {
+            return 8;
+        }
+
+        @Override
+        void readPrimitiveField(Object o, EntityInput input, Field field)
+            throws IllegalAccessException {
+
+            field.setDouble(o, input.readSortedDouble());
+        }
+
+        @Override
+        void writePrimitiveField(Object o, EntityOutput output, Field field)
+            throws IllegalAccessException {
+
+            output.writeSortedDouble(field.getDouble(o));
+        }
+    }
+
+    public static class FChar extends SimpleFormat {
+
+        private static final long serialVersionUID = -7609118195770005374L;
+
+        FChar(boolean primitive) {
+            super(primitive ? Character.TYPE : Character.class, primitive);
+        }
+
+        @Override
+        Object newArray(int len) {
+            return new Character[len];
+        }
+
+        @Override
+        public Object newInstance(EntityInput input, boolean rawAccess) {
+            return Character.valueOf(input.readChar());
+        }
+
+        @Override
+        void writeObject(Object o, EntityOutput output, boolean rawAccess) {
+            output.writeChar(((Character) o).charValue());
+        }
+
+        @Override
+        void skipContents(RecordInput input) {
+            input.skipFast(2);
+        }
+
+        @Override
+        void copySecKey(RecordInput input, RecordOutput output) {
+            output.writeFast(input.readFast());
+            output.writeFast(input.readFast());
+        }
+
+        @Override
+        Object newPrimitiveArray(int len, EntityInput input) {
+            char[] a = new char[len];
+            for (int i = 0; i < len; i += 1) {
+                a[i] = input.readChar();
+            }
+            return a;
+        }
+
+        @Override
+        void writePrimitiveArray(Object o, EntityOutput output) {
+            char[] a = (char[]) o;
+            int len = a.length;
+            output.writeArrayLength(len);
+            for (int i = 0; i < len; i += 1) {
+                output.writeChar(a[i]);
+            }
+        }
+
+        @Override
+        int getPrimitiveLength() {
+            return 2;
+        }
+
+        @Override
+        void readPrimitiveField(Object o, EntityInput input, Field field)
+            throws IllegalAccessException {
+
+            field.setChar(o, input.readChar());
+        }
+
+        @Override
+        void writePrimitiveField(Object o, EntityOutput output, Field field)
+            throws IllegalAccessException {
+
+            output.writeChar(field.getChar(o));
+        }
+    }
+
+    public static class FString extends SimpleFormat {
+
+        private static final long serialVersionUID = 5710392786480064612L;
+
+        FString() {
+            super(String.class, false);
+        }
+
+        @Override
+        Object newArray(int len) {
+            return new String[len];
+        }
+
+        @Override
+        public Object newInstance(EntityInput input, boolean rawAccess) {
+            return input.readString();
+        }
+
+        @Override
+        void writeObject(Object o, EntityOutput output, boolean rawAccess) {
+            output.writeString((String) o);
+        }
+
+        @Override
+        void skipContents(RecordInput input) {
+            input.skipFast(input.getStringByteLength());
+        }
+
+        @Override
+        void copySecKey(RecordInput input, RecordOutput output) {
+            int len = input.getStringByteLength();
+            output.writeFast
+                (input.getBufferBytes(), input.getBufferOffset(), len);
+            input.skipFast(len);
+        }
+    }
+
+    public static class FBigInt extends SimpleFormat {
+
+        private static final long serialVersionUID = -5027098112507644563L;
+
+        FBigInt() {
+            super(BigInteger.class, false);
+        }
+
+        @Override
+        Object newArray(int len) {
+            return new BigInteger[len];
+        }
+
+        @Override
+        public Object newInstance(EntityInput input, boolean rawAccess) {
+            return input.readBigInteger();
+        }
+
+        @Override
+        void writeObject(Object o, EntityOutput output, boolean rawAccess) {
+            output.writeBigInteger((BigInteger) o);
+        }
+
+        @Override
+        void skipContents(RecordInput input) {
+            input.skipFast(input.getBigIntegerByteLength());
+        }
+
+        @Override
+        void copySecKey(RecordInput input, RecordOutput output) {
+            int len = input.getBigIntegerByteLength();
+            output.writeFast
+                (input.getBufferBytes(), input.getBufferOffset(), len);
+            input.skipFast(len);
+        }
+    }
+
+    public static class FDate extends SimpleFormat {
+
+        private static final long serialVersionUID = -5665773229869034145L;
+
+        FDate() {
+            super(Date.class, false);
+        }
+
+        @Override
+        Object newArray(int len) {
+            return new Date[len];
+        }
+
+        @Override
+        public Object newInstance(EntityInput input, boolean rawAccess) {
+            return new Date(input.readLong());
+        }
+
+        @Override
+        void writeObject(Object o, EntityOutput output, boolean rawAccess) {
+            output.writeLong(((Date) o).getTime());
+        }
+
+        @Override
+        void skipContents(RecordInput input) {
+            input.skipFast(8);
+        }
+
+        @Override
+        void copySecKey(RecordInput input, RecordOutput output) {
+            output.writeFast
+                (input.getBufferBytes(), input.getBufferOffset(), 8);
+            input.skipFast(8);
+        }
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/Store.java b/src/com/sleepycat/persist/impl/Store.java
new file mode 100644
index 0000000000000000000000000000000000000000..d2226bebf7043dda057e7ced916ae17824c33a19
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/Store.java
@@ -0,0 +1,1394 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Store.java,v 1.36.2.5 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.io.FileNotFoundException;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.IdentityHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.WeakHashMap;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.bind.tuple.StringBinding;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.CursorConfig;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+/* <!-- begin JE only --> */
+import com.sleepycat.je.DatabaseNotFoundException;
+/* <!-- end JE only --> */
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.ForeignKeyDeleteAction;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.SecondaryConfig;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.je.Sequence;
+import com.sleepycat.je.SequenceConfig;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.persist.DatabaseNamer;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.SecondaryIndex;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.evolve.Converter;
+import com.sleepycat.persist.evolve.EvolveConfig;
+import com.sleepycat.persist.evolve.EvolveEvent;
+import com.sleepycat.persist.evolve.EvolveInternal;
+import com.sleepycat.persist.evolve.EvolveListener;
+import com.sleepycat.persist.evolve.EvolveStats;
+import com.sleepycat.persist.evolve.Mutations;
+import com.sleepycat.persist.model.ClassMetadata;
+import com.sleepycat.persist.model.DeleteAction;
+import com.sleepycat.persist.model.EntityMetadata;
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.model.FieldMetadata;
+import com.sleepycat.persist.model.ModelInternal;
+import com.sleepycat.persist.model.PrimaryKeyMetadata;
+import com.sleepycat.persist.model.Relationship;
+import com.sleepycat.persist.model.SecondaryKeyMetadata;
+import com.sleepycat.persist.raw.RawObject;
+import com.sleepycat.util.keyrange.KeyRange;
+
+/**
+ * Base implementation for EntityStore and  RawStore.  The methods here
+ * correspond directly to those in EntityStore; see EntityStore documentation
+ * for details.
+ *
+ * @author Mark Hayes
+ */
+public class Store {
+
+    public static final String NAME_SEPARATOR = "#";
+    private static final String NAME_PREFIX = "persist" + NAME_SEPARATOR;
+    private static final String DB_NAME_PREFIX = "com.sleepycat.persist.";
+    private static final String CATALOG_DB = DB_NAME_PREFIX + "formats";
+    private static final String SEQUENCE_DB = DB_NAME_PREFIX + "sequences";
+
+    private static Map<Environment,Map<String,PersistCatalog>> catalogPool =
+        new WeakHashMap<Environment,Map<String,PersistCatalog>>();
+
+    /* For unit testing. */
+    private static SyncHook syncHook;
+
+    private Environment env;
+    private boolean rawAccess;
+    private PersistCatalog catalog;
+    private EntityModel model;
+    private Mutations mutations;
+    private StoreConfig storeConfig;
+    private String storeName;
+    private String storePrefix;
+    private Map<String,PrimaryIndex> priIndexMap;
+    private Map<String,SecondaryIndex> secIndexMap;
+    private Map<String,DatabaseConfig> priConfigMap;
+    private Map<String,SecondaryConfig> secConfigMap;
+    private Map<String,PersistKeyBinding> keyBindingMap;
+    private Map<String,Sequence> sequenceMap;
+    private Map<String,SequenceConfig> sequenceConfigMap;
+    private Database sequenceDb;
+    private IdentityHashMap<Database,Object> deferredWriteDatabases;
+    private Map<String,Set<String>> inverseRelatedEntityMap;
+
+    public Store(Environment env,
+                 String storeName,
+                 StoreConfig config,
+                 boolean rawAccess)
+        throws DatabaseException {
+
+        this.env = env;
+        this.storeName = storeName;
+        this.rawAccess = rawAccess;
+
+        if (env == null || storeName == null) {
+            throw new NullPointerException
+                ("env and storeName parameters must not be null");
+        }
+        if (config != null) {
+            model = config.getModel();
+            mutations = config.getMutations();
+        }
+        if (config == null) {
+            storeConfig = StoreConfig.DEFAULT;
+        } else {
+            storeConfig = config.cloneConfig();
+        }
+
+        storePrefix = NAME_PREFIX + storeName + NAME_SEPARATOR;
+        priIndexMap = new HashMap<String,PrimaryIndex>();
+        secIndexMap = new HashMap<String,SecondaryIndex>();
+        priConfigMap = new HashMap<String,DatabaseConfig>();
+        secConfigMap = new HashMap<String,SecondaryConfig>();
+        keyBindingMap = new HashMap<String,PersistKeyBinding>();
+        sequenceMap = new HashMap<String,Sequence>();
+        sequenceConfigMap = new HashMap<String,SequenceConfig>();
+        deferredWriteDatabases = new IdentityHashMap<Database,Object>();
+
+        if (rawAccess) {
+            /* Open a read-only catalog that uses the stored model. */
+            if (model != null) {
+                throw new IllegalArgumentException
+                    ("A model may not be specified when opening a RawStore");
+            }
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setReadOnly(true);
+            dbConfig.setTransactional
+                (storeConfig.getTransactional());
+            catalog = new PersistCatalog
+                (null, env, storePrefix, storePrefix + CATALOG_DB, dbConfig,
+                 model, mutations, rawAccess, this);
+        } else {
+            /* Open the shared catalog that uses the current model. */
+            synchronized (catalogPool) {
+                Map<String,PersistCatalog> catalogMap = catalogPool.get(env);
+                if (catalogMap == null) {
+                    catalogMap = new HashMap<String,PersistCatalog>();
+                    catalogPool.put(env, catalogMap);
+                }
+                catalog = catalogMap.get(storeName);
+                if (catalog != null) {
+                    catalog.openExisting();
+                } else {
+                    Transaction txn = null;
+                    if (storeConfig.getTransactional() &&
+			DbCompat.getThreadTransaction(env) == null) {
+                        txn = env.beginTransaction(null, null);
+                    }
+                    boolean success = false;
+                    try {
+                        DatabaseConfig dbConfig = new DatabaseConfig();
+                        dbConfig.setAllowCreate(storeConfig.getAllowCreate());
+                        /* <!-- begin JE only --> */
+                        dbConfig.setTemporary(storeConfig.getTemporary());
+                        /* <!-- end JE only --> */
+                        dbConfig.setReadOnly(storeConfig.getReadOnly());
+                        dbConfig.setTransactional
+                            (storeConfig.getTransactional());
+                        DbCompat.setTypeBtree(dbConfig);
+                        catalog = new PersistCatalog
+                            (txn, env, storePrefix, storePrefix + CATALOG_DB,
+                             dbConfig, model, mutations, rawAccess, this);
+                        catalogMap.put(storeName, catalog);
+                        success = true;
+                    } finally {
+                        if (txn != null) {
+                            if (success) {
+                                txn.commit();
+                            } else {
+                                txn.abort();
+                            }
+                        }
+                    }
+                }
+            }
+        }
+
+        /* Get the merged mutations from the catalog. */
+        mutations = catalog.getMutations();
+
+        /*
+         * If there is no model parameter, use the default or stored model
+         * obtained from the catalog.
+         */
+        model = catalog.getResolvedModel();
+
+        /*
+         * Give the model a reference to the catalog to fully initialize the
+         * model.  Only then may we initialize the Converter mutations, which
+         * themselves may call model methods and expect the model to be fully
+         * initialized.
+         */
+        ModelInternal.setCatalog(model, catalog);
+        for (Converter converter : mutations.getConverters()) {
+            converter.getConversion().initialize(model);
+        }
+
+        /*
+         * For each existing entity with a relatedEntity reference, create an
+         * inverse map (back pointer) from the class named in the relatedEntity
+         * to the class containing the secondary key.  This is used to open the
+         * class containing the secondary key whenever we open the
+         * relatedEntity class, to configure foreign key constraints. Note that
+         * we do not need to update this map as new primary indexes are
+         * created, because opening the new index will setup the foreign key
+         * constraints. [#15358]
+         */
+        inverseRelatedEntityMap = new HashMap<String,Set<String>>();
+        List<Format> entityFormats = new ArrayList<Format>();
+        catalog.getEntityFormats(entityFormats);
+        for (Format entityFormat : entityFormats) {
+            EntityMetadata entityMeta = entityFormat.getEntityMetadata();
+            for (SecondaryKeyMetadata secKeyMeta :
+                 entityMeta.getSecondaryKeys().values()) {
+                String relatedClsName = secKeyMeta.getRelatedEntity();
+                if (relatedClsName != null) {
+                    Set<String> inverseClassNames =
+                        inverseRelatedEntityMap.get(relatedClsName);
+                    if (inverseClassNames == null) {
+                        inverseClassNames = new HashSet<String>();
+                        inverseRelatedEntityMap.put
+                            (relatedClsName, inverseClassNames);
+                    }
+                    inverseClassNames.add(entityMeta.getClassName());
+                }
+            }
+        }
+    }
+
+    public Environment getEnvironment() {
+        return env;
+    }
+
+    public StoreConfig getConfig() {
+        return storeConfig.cloneConfig();
+    }
+
+    public String getStoreName() {
+        return storeName;
+    }
+
+    /* <!-- begin JE only --> */
+    public static Set<String> getStoreNames(Environment env)
+        throws DatabaseException {
+
+        Set<String> set = new HashSet<String>();
+        for (Object o : env.getDatabaseNames()) {
+            String s = (String) o;
+            if (s.startsWith(NAME_PREFIX)) {
+                int start = NAME_PREFIX.length();
+                int end = s.indexOf(NAME_SEPARATOR, start);
+                set.add(s.substring(start, end));
+            }
+        }
+        return set;
+    }
+    /* <!-- end JE only --> */
+
+    public EntityModel getModel() {
+        return model;
+    }
+
+    public Mutations getMutations() {
+        return mutations;
+    }
+
+    /**
+     * A getPrimaryIndex with extra parameters for opening a raw store.
+     * primaryKeyClass and entityClass are used for generic typing; for a raw
+     * store, these should always be Object.class and RawObject.class.
+     * primaryKeyClassName is used for consistency checking and should be null
+     * for a raw store only.  entityClassName is used to identify the store and
+     * may not be null.
+     */
+    public synchronized <PK,E> PrimaryIndex<PK,E>
+        getPrimaryIndex(Class<PK> primaryKeyClass,
+                        String primaryKeyClassName,
+                        Class<E> entityClass,
+                        String entityClassName)
+        throws DatabaseException {
+
+        assert (rawAccess && entityClass == RawObject.class) ||
+              (!rawAccess && entityClass != RawObject.class);
+        assert (rawAccess && primaryKeyClassName == null) ||
+              (!rawAccess && primaryKeyClassName != null);
+
+        checkOpen();
+
+        PrimaryIndex<PK,E> priIndex = priIndexMap.get(entityClassName);
+        if (priIndex == null) {
+
+            /* Check metadata. */
+            EntityMetadata entityMeta = checkEntityClass(entityClassName);
+            PrimaryKeyMetadata priKeyMeta = entityMeta.getPrimaryKey();
+            if (primaryKeyClassName == null) {
+                primaryKeyClassName = priKeyMeta.getClassName();
+            } else {
+                String expectClsName =
+                    SimpleCatalog.keyClassName(priKeyMeta.getClassName());
+                if (!primaryKeyClassName.equals(expectClsName)) {
+                    throw new IllegalArgumentException
+                        ("Wrong primary key class: " + primaryKeyClassName +
+                         " Correct class is: " + expectClsName);
+                }
+            }
+
+            /* Create bindings. */
+            PersistEntityBinding entityBinding =
+                new PersistEntityBinding(catalog, entityClassName, rawAccess);
+            PersistKeyBinding keyBinding = getKeyBinding(primaryKeyClassName);
+
+            /* If not read-only, get the primary key sequence. */
+            String seqName = priKeyMeta.getSequenceName();
+            if (!storeConfig.getReadOnly() && seqName != null) {
+                entityBinding.keyAssigner = new PersistKeyAssigner
+                    (keyBinding, entityBinding, getSequence(seqName));
+            }
+
+            /*
+             * Use a single transaction for opening the primary DB and its
+             * secondaries.  If opening any secondary fails, abort the
+             * transaction and undo the changes to the state of the store.
+             * Also support undo if the store is non-transactional.
+             */
+            Transaction txn = null;
+            DatabaseConfig dbConfig = getPrimaryConfig(entityMeta);
+            if (dbConfig.getTransactional() &&
+		DbCompat.getThreadTransaction(env) == null) {
+                txn = env.beginTransaction(null, null);
+            }
+            PrimaryOpenState priOpenState =
+                new PrimaryOpenState(entityClassName);
+            boolean success = false;
+            try {
+        
+                /* Open the primary database. */
+                String[] fileAndDbNames =
+                    parseDbName(storePrefix + entityClassName);
+                Database db;
+                try {
+                    db = DbCompat.openDatabase
+                        (env, txn, fileAndDbNames[0], fileAndDbNames[1],
+                         dbConfig);
+                } catch (FileNotFoundException e) {
+                    throw new DatabaseException(e);
+                }
+                priOpenState.addDatabase(db);
+
+                /* Create index object. */
+                priIndex = new PrimaryIndex
+                    (db, primaryKeyClass, keyBinding, entityClass,
+                     entityBinding);
+
+                /* Update index and database maps. */
+                priIndexMap.put(entityClassName, priIndex);
+                if (DbCompat.getDeferredWrite(dbConfig)) {
+                    deferredWriteDatabases.put(db, null);
+                }
+
+                /* If not read-only, open all associated secondaries. */
+                if (!dbConfig.getReadOnly()) {
+                    openSecondaryIndexes(txn, entityMeta, priOpenState);
+
+                    /*
+                     * To enable foreign key contratints, also open all primary
+                     * indexes referring to this class via a relatedEntity
+                     * property in another entity. [#15358]
+                     */
+                    Set<String> inverseClassNames =
+                        inverseRelatedEntityMap.get(entityClassName);
+                    if (inverseClassNames != null) {
+                        for (String relatedClsName : inverseClassNames) {
+                            getRelatedIndex(relatedClsName);
+                        }
+                    }
+                }
+                success = true;
+            } finally {
+                if (success) {
+                    if (txn != null) {
+                        txn.commit();
+                    }
+                } else {
+                    if (txn != null) {
+                        txn.abort();
+                    } else {
+                        priOpenState.closeDatabases();
+                    }
+                    priOpenState.undoState();
+                }
+            }
+        }
+        return priIndex;
+    }
+
+    /**
+     * Holds state information about opening a primary index and its secondary
+     * indexes.  Used to undo the state of this object if the transaction
+     * opening the primary and secondaries aborts.  Also used to close all
+     * databases opened during this process for a non-transactional store.
+     */
+    private class PrimaryOpenState {
+
+        private String entityClassName;
+        private IdentityHashMap<Database,Object> databases;
+        private Set<String> secNames;
+
+        PrimaryOpenState(String entityClassName) {
+            this.entityClassName = entityClassName;
+            databases = new IdentityHashMap<Database,Object>();
+            secNames = new HashSet<String>();
+        }
+
+        /**
+         * Save a database that was opening during this operation.
+         */
+        void addDatabase(Database db) {
+            databases.put(db, null);
+        }
+
+        /**
+         * Save the name of a secondary index that was opening during this
+         * operation.
+         */
+        void addSecondaryName(String secName) {
+            secNames.add(secName);
+        }
+
+        /**
+         * Close any databases opened during this operation when it fails.
+         * This method should be called if a non-transactional operation fails,
+         * since we cannot rely on the transaction abort to cleanup any
+         * databases that were opened.
+         */
+        void closeDatabases() {
+            for (Database db : databases.keySet()) {
+                try {
+                    db.close();
+                } catch (Exception ignored) {
+                }
+            }
+        }
+
+        /**
+         * Reset all state information when this operation fails.  This method
+         * should be called for both transactional and non-transsactional
+         * operation.
+         */
+        void undoState() {
+            priIndexMap.remove(entityClassName);
+            for (String secName : secNames) {
+                secIndexMap.remove(secName);
+            }
+            for (Database db : databases.keySet()) {
+                deferredWriteDatabases.remove(db);
+            }
+        }
+    }
+
+    /**
+     * Opens a primary index related via a foreign key (relatedEntity).
+     * Related indexes are not opened in the same transaction used by the
+     * caller to open a primary or secondary.  It is OK to leave the related
+     * index open when the caller's transaction aborts.  It is only important
+     * to open a primary and its secondaries atomically.
+     */
+    private PrimaryIndex getRelatedIndex(String relatedClsName)
+        throws DatabaseException {
+
+        PrimaryIndex relatedIndex = priIndexMap.get(relatedClsName);
+        if (relatedIndex == null) {
+            EntityMetadata relatedEntityMeta =
+                checkEntityClass(relatedClsName);
+            Class relatedKeyCls;
+            String relatedKeyClsName;
+            Class relatedCls;
+            if (rawAccess) {
+                relatedCls = RawObject.class;
+                relatedKeyCls = Object.class;
+                relatedKeyClsName = null;
+            } else {
+                try {
+                    relatedCls = EntityModel.classForName(relatedClsName);
+                } catch (ClassNotFoundException e) {
+                    throw new IllegalArgumentException
+                        ("Related entity class not found: " +
+                         relatedClsName);
+                }
+                relatedKeyClsName = SimpleCatalog.keyClassName
+                    (relatedEntityMeta.getPrimaryKey().getClassName());
+                relatedKeyCls =
+                    SimpleCatalog.keyClassForName(relatedKeyClsName);
+            }
+
+            /*
+             * Cycles are prevented here by adding primary indexes to the
+             * priIndexMap as soon as they are created, before opening related
+             * indexes.
+             */
+            relatedIndex = getPrimaryIndex
+                (relatedKeyCls, relatedKeyClsName,
+                 relatedCls, relatedClsName);
+        }
+        return relatedIndex;
+    }
+
+    /**
+     * A getSecondaryIndex with extra parameters for opening a raw store.
+     * keyClassName is used for consistency checking and should be null for a
+     * raw store only.
+     */
+    public synchronized <SK,PK,E1,E2 extends E1> SecondaryIndex<SK,PK,E2>
+        getSecondaryIndex(PrimaryIndex<PK,E1> primaryIndex,
+                          Class<E2> entityClass,
+                          String entityClassName,
+                          Class<SK> keyClass,
+                          String keyClassName,
+                          String keyName)
+        throws DatabaseException {
+
+        assert (rawAccess && keyClassName == null) ||
+              (!rawAccess && keyClassName != null);
+
+        checkOpen();
+
+        EntityMetadata entityMeta = null;
+        SecondaryKeyMetadata secKeyMeta = null;
+
+        /* Validate the subclass for a subclass index. */
+        if (entityClass != primaryIndex.getEntityClass()) {
+            entityMeta = model.getEntityMetadata(entityClassName);
+            assert entityMeta != null;
+            secKeyMeta = checkSecKey(entityMeta, keyName);
+            String subclassName = entityClass.getName();
+            String declaringClassName = secKeyMeta.getDeclaringClassName();
+            if (!subclassName.equals(declaringClassName)) {
+                throw new IllegalArgumentException
+                    ("Key for subclass " + subclassName +
+                     " is declared in a different class: " +
+                     makeSecName(declaringClassName, keyName));
+            }
+        }
+
+        /*
+         * Even though the primary is already open, we can't assume the
+         * secondary is open because we don't automatically open all
+         * secondaries when the primary is read-only.  Use auto-commit (a null
+         * transaction) since we're opening only one database.
+         */
+        String secName = makeSecName(entityClassName, keyName);
+        SecondaryIndex<SK,PK,E2> secIndex = secIndexMap.get(secName);
+        if (secIndex == null) {
+            if (entityMeta == null) {
+                entityMeta = model.getEntityMetadata(entityClassName);
+                assert entityMeta != null;
+            }
+            if (secKeyMeta == null) {
+                secKeyMeta = checkSecKey(entityMeta, keyName);
+            }
+
+            /* Check metadata. */
+            if (keyClassName == null) {
+                keyClassName = getSecKeyClass(secKeyMeta);
+            } else {
+                String expectClsName = getSecKeyClass(secKeyMeta);
+                if (!keyClassName.equals(expectClsName)) {
+                    throw new IllegalArgumentException
+                        ("Wrong secondary key class: " + keyClassName +
+                         " Correct class is: " + expectClsName);
+                }
+            }
+
+            secIndex = openSecondaryIndex
+                (null, primaryIndex, entityClass, entityMeta,
+                 keyClass, keyClassName, secKeyMeta, secName,
+                 false /*doNotCreate*/, null /*priOpenState*/);
+        }
+        return secIndex;
+    }
+
+    /**
+     * Opens any secondary indexes defined in the given entity metadata that
+     * are not already open.  This method is called when a new entity subclass
+     * is encountered when an instance of that class is stored, and the
+     * EntityStore.getSubclassIndex has not been previously called for that
+     * class. [#15247]
+     */
+    synchronized void openSecondaryIndexes(Transaction txn,
+                                           EntityMetadata entityMeta,
+                                           PrimaryOpenState priOpenState)
+        throws DatabaseException {
+
+        String entityClassName = entityMeta.getClassName();
+        PrimaryIndex<Object,Object> priIndex =
+            priIndexMap.get(entityClassName);
+        assert priIndex != null;
+        Class<Object> entityClass = priIndex.getEntityClass();
+
+        for (SecondaryKeyMetadata secKeyMeta :
+             entityMeta.getSecondaryKeys().values()) {
+            String keyName = secKeyMeta.getKeyName();
+            String secName = makeSecName(entityClassName, keyName);
+            SecondaryIndex<Object,Object,Object> secIndex =
+                secIndexMap.get(secName);
+            if (secIndex == null) {
+                String keyClassName = getSecKeyClass(secKeyMeta);
+                /* RawMode: should not require class. */
+                Class keyClass =
+                    SimpleCatalog.keyClassForName(keyClassName);
+                openSecondaryIndex
+                    (txn, priIndex, entityClass, entityMeta,
+                     keyClass, keyClassName, secKeyMeta,
+                     makeSecName
+                        (entityClassName, secKeyMeta.getKeyName()),
+                     storeConfig.getSecondaryBulkLoad() /*doNotCreate*/,
+                     priOpenState);
+            }
+        }
+    }
+
+    /**
+     * Opens a secondary index with a given transaction and adds it to the
+     * secIndexMap.  We assume that the index is not already open.
+     */
+    private <SK,PK,E1,E2 extends E1> SecondaryIndex<SK,PK,E2>
+        openSecondaryIndex(Transaction txn,
+                           PrimaryIndex<PK,E1> primaryIndex,
+                           Class<E2> entityClass,
+                           EntityMetadata entityMeta,
+                           Class<SK> keyClass,
+                           String keyClassName,
+                           SecondaryKeyMetadata secKeyMeta,
+                           String secName,
+                           boolean doNotCreate,
+                           PrimaryOpenState priOpenState)
+        throws DatabaseException {
+
+        assert !secIndexMap.containsKey(secName);
+        String[] fileAndDbNames = parseDbName(storePrefix + secName);
+        SecondaryConfig config =
+            getSecondaryConfig(secName, entityMeta, keyClassName, secKeyMeta);
+        Database priDb = primaryIndex.getDatabase();
+        DatabaseConfig priConfig = priDb.getConfig();
+
+        String relatedClsName = secKeyMeta.getRelatedEntity();
+        if (relatedClsName != null) {
+            PrimaryIndex relatedIndex = getRelatedIndex(relatedClsName);
+            config.setForeignKeyDatabase(relatedIndex.getDatabase());
+        }
+
+        if (config.getTransactional() != priConfig.getTransactional() ||
+            DbCompat.getDeferredWrite(config) !=
+            DbCompat.getDeferredWrite(priConfig) ||
+            config.getReadOnly() != priConfig.getReadOnly()) {
+            throw new IllegalArgumentException
+                ("One of these properties was changed to be inconsistent" +
+                 " with the associated primary database: " +
+                 " Transactional, DeferredWrite, ReadOnly");
+        }
+
+        PersistKeyBinding keyBinding = getKeyBinding(keyClassName);
+        
+        /*
+         * doNotCreate is true when StoreConfig.getSecondaryBulkLoad is true
+         * and we are opening a secondary as a side effect of opening a
+         * primary, i.e., getSecondaryIndex is not being called.  If
+         * doNotCreate is true and the database does not exist, we silently
+         * ignore the DatabaseNotFoundException and return null.  When
+         * getSecondaryIndex is subsequently called, the secondary database
+         * will be created and populated from the primary -- a bulk load.
+         */
+        SecondaryDatabase db;
+        boolean saveAllowCreate = config.getAllowCreate();
+        try {
+            if (doNotCreate) {
+                config.setAllowCreate(false);
+            }
+            db = DbCompat.openSecondaryDatabase
+                (env, txn, fileAndDbNames[0], fileAndDbNames[1], priDb,
+                 config);
+        /* <!-- begin JE only --> */
+        } catch (DatabaseNotFoundException e) {
+            if (doNotCreate) {
+                return null;
+            } else {
+                throw e;
+            }
+        /* <!-- end JE only --> */
+        } catch (FileNotFoundException e) {
+            if (doNotCreate) {
+                return null;
+            } else {
+                throw new DatabaseException(e);
+            }
+        } finally {
+            if (doNotCreate) {
+                config.setAllowCreate(saveAllowCreate);
+            }
+        }
+        SecondaryIndex<SK,PK,E2> secIndex = new SecondaryIndex
+            (db, null, primaryIndex, keyClass, keyBinding);
+
+        /* Update index and database maps. */
+        secIndexMap.put(secName, secIndex);
+        if (DbCompat.getDeferredWrite(config)) {
+            deferredWriteDatabases.put(db, null);
+        }
+        if (priOpenState != null) {
+            priOpenState.addDatabase(db);
+            priOpenState.addSecondaryName(secName);
+        }
+        return secIndex;
+    }
+
+    /* <!-- begin JE only --> */
+    public void sync()
+        throws DatabaseException {
+
+        List<Database> dbs = new ArrayList<Database>();
+        synchronized (this) {
+            dbs.addAll(deferredWriteDatabases.keySet());
+        }
+        int nDbs = dbs.size();
+        if (nDbs > 0) {
+            for (int i = 0; i < nDbs; i += 1) {
+                Database db = dbs.get(i);
+                boolean flushLog = (i == nDbs - 1);
+                DbCompat.syncDeferredWrite(db, flushLog);
+                /* Call hook for unit testing. */
+                if (syncHook != null) {
+                    syncHook.onSync(db, flushLog);
+                }
+            }
+        }
+    }
+    /* <!-- end JE only --> */
+
+    public void truncateClass(Class entityClass)
+        throws DatabaseException {
+
+        truncateClass(null, entityClass);
+    }
+
+    public synchronized void truncateClass(Transaction txn, Class entityClass)
+        throws DatabaseException {
+
+        checkOpen();
+
+        /* Close primary and secondary databases. */
+        closeClass(entityClass);
+
+        String clsName = entityClass.getName();
+        EntityMetadata entityMeta = checkEntityClass(clsName);
+
+        /*
+         * Truncate the primary first and let any exceptions propogate
+         * upwards.  Then truncate each secondary, only throwing the first
+         * exception.
+         */
+        boolean primaryExists = truncateIfExists(txn, storePrefix + clsName);
+        if (primaryExists) {
+            DatabaseException firstException = null;
+            for (SecondaryKeyMetadata keyMeta :
+                 entityMeta.getSecondaryKeys().values()) {
+                try {
+                    truncateIfExists
+                        (txn,
+                         storePrefix +
+                         makeSecName(clsName, keyMeta.getKeyName()));
+                    /* Ignore secondaries that do not exist. */
+                } catch (DatabaseException e) {
+                    if (firstException == null) {
+                        firstException = e;
+                    }
+                }
+            }
+            if (firstException != null) {
+                throw firstException;
+            }
+        }
+    }
+
+    private boolean truncateIfExists(Transaction txn, String dbName)
+        throws DatabaseException {
+
+        try {
+            String[] fileAndDbNames = parseDbName(dbName);
+            DbCompat.truncateDatabase
+                (env, txn, fileAndDbNames[0], fileAndDbNames[1],
+                 false/*returnCount*/);
+            return true;
+        /* <!-- begin JE only --> */
+        } catch (DatabaseNotFoundException e) {
+            return false;
+        /* <!-- end JE only --> */
+        } catch (FileNotFoundException e) {
+            return false;
+        }
+    }
+
+    public synchronized void closeClass(Class entityClass)
+        throws DatabaseException {
+
+        checkOpen();
+        String clsName = entityClass.getName();
+        EntityMetadata entityMeta = checkEntityClass(clsName);
+
+        PrimaryIndex priIndex = priIndexMap.get(clsName);
+        if (priIndex != null) {
+            /* Close the secondaries first. */
+            DatabaseException firstException = null;
+            for (SecondaryKeyMetadata keyMeta :
+                 entityMeta.getSecondaryKeys().values()) {
+
+                String secName = makeSecName(clsName, keyMeta.getKeyName());
+                SecondaryIndex secIndex = secIndexMap.get(secName);
+                if (secIndex != null) {
+                    Database db = secIndex.getDatabase();
+                    firstException = closeDb(db, firstException);
+                    firstException =
+                        closeDb(secIndex.getKeysDatabase(), firstException);
+                    secIndexMap.remove(secName);
+                    deferredWriteDatabases.remove(db);
+                }
+            }
+            /* Close the primary last. */
+            Database db = priIndex.getDatabase();
+            firstException = closeDb(db, firstException);
+            priIndexMap.remove(clsName);
+            deferredWriteDatabases.remove(db);
+
+            /* Throw the first exception encountered. */
+            if (firstException != null) {
+                throw firstException;
+            }
+        }
+    }
+
+    public synchronized void close()
+        throws DatabaseException {
+
+        checkOpen();
+        DatabaseException firstException = null;
+        try {
+            if (rawAccess) {
+                boolean allClosed = catalog.close();
+                assert allClosed;
+            } else {
+                synchronized (catalogPool) {
+                    Map<String,PersistCatalog> catalogMap =
+                        catalogPool.get(env);
+                    assert catalogMap != null;
+                    if (catalog.close()) {
+                        /* Remove when the reference count goes to zero. */
+                        catalogMap.remove(storeName);
+                    }
+                }
+            }
+            catalog = null;
+        } catch (DatabaseException e) {
+            if (firstException == null) {
+                firstException = e;
+            }
+        }
+        firstException = closeDb(sequenceDb, firstException);
+        for (SecondaryIndex index : secIndexMap.values()) {
+            firstException = closeDb(index.getDatabase(), firstException);
+            firstException = closeDb(index.getKeysDatabase(), firstException);
+        }
+        for (PrimaryIndex index : priIndexMap.values()) {
+            firstException = closeDb(index.getDatabase(), firstException);
+        }
+        if (firstException != null) {
+            throw firstException;
+        }
+    }
+
+    public synchronized Sequence getSequence(String name)
+        throws DatabaseException {
+
+        checkOpen();
+
+        if (storeConfig.getReadOnly()) {
+            throw new IllegalStateException("Store is read-only");
+        }
+
+        Sequence seq = sequenceMap.get(name);
+        if (seq == null) {
+            if (sequenceDb == null) {
+                String[] fileAndDbNames =
+                    parseDbName(storePrefix + SEQUENCE_DB);
+                DatabaseConfig dbConfig = new DatabaseConfig();
+                dbConfig.setTransactional(storeConfig.getTransactional());
+                dbConfig.setAllowCreate(true);
+                /* <!-- begin JE only --> */
+                dbConfig.setTemporary(storeConfig.getTemporary());
+                /* <!-- end JE only --> */
+                DbCompat.setTypeBtree(dbConfig);
+                try {
+                    sequenceDb = DbCompat.openDatabase
+                        (env, null/*txn*/, fileAndDbNames[0],
+                         fileAndDbNames[1], dbConfig);
+                } catch (FileNotFoundException e) {
+                    throw new DatabaseException(e);
+                }
+            }
+            DatabaseEntry entry = new DatabaseEntry();
+            StringBinding.stringToEntry(name, entry);
+            seq = sequenceDb.openSequence(null, entry, getSequenceConfig(name));
+            sequenceMap.put(name, seq);
+        }
+        return seq;
+    }
+
+    public synchronized SequenceConfig getSequenceConfig(String name) {
+        checkOpen();
+        SequenceConfig config = sequenceConfigMap.get(name);
+        if (config == null) {
+            config = new SequenceConfig();
+            config.setInitialValue(1);
+            config.setRange(1, Long.MAX_VALUE);
+            config.setCacheSize(100);
+            config.setAutoCommitNoSync(true);
+            config.setAllowCreate(!storeConfig.getReadOnly());
+            sequenceConfigMap.put(name, config);
+        }
+        return config;
+    }
+
+    public synchronized void setSequenceConfig(String name,
+                                               SequenceConfig config) {
+        checkOpen();
+        sequenceConfigMap.put(name, config);
+    }
+
+    public synchronized DatabaseConfig getPrimaryConfig(Class entityClass) {
+        checkOpen();
+        String clsName = entityClass.getName();
+        EntityMetadata meta = checkEntityClass(clsName);
+        return getPrimaryConfig(meta).cloneConfig();
+    }
+
+    private synchronized DatabaseConfig getPrimaryConfig(EntityMetadata meta) {
+        String clsName = meta.getClassName();
+        DatabaseConfig config = priConfigMap.get(clsName);
+        if (config == null) {
+            config = new DatabaseConfig();
+            config.setTransactional(storeConfig.getTransactional());
+            config.setAllowCreate(!storeConfig.getReadOnly());
+            config.setReadOnly(storeConfig.getReadOnly());
+            DbCompat.setTypeBtree(config);
+            /* <!-- begin JE only --> */
+            config.setTemporary(storeConfig.getTemporary());
+            config.setDeferredWrite(storeConfig.getDeferredWrite());
+            /* <!-- end JE only --> */
+            setBtreeComparator(config, meta.getPrimaryKey().getClassName());
+            priConfigMap.put(clsName, config);
+        }
+        return config;
+    }
+
+    public synchronized void setPrimaryConfig(Class entityClass,
+                                              DatabaseConfig config) {
+        checkOpen();
+        String clsName = entityClass.getName();
+        if (priIndexMap.containsKey(clsName)) {
+            throw new IllegalStateException
+                ("Cannot set config after DB is open");
+        }
+        EntityMetadata meta = checkEntityClass(clsName);
+        DatabaseConfig dbConfig = getPrimaryConfig(meta);
+        if (config.getSortedDuplicates() ||
+            /* <!-- begin JE only --> */
+            config.getTemporary() != dbConfig.getTemporary() ||
+            /* <!-- end JE only --> */
+            config.getBtreeComparator() != dbConfig.getBtreeComparator()) {
+            throw new IllegalArgumentException
+                ("One of these properties was illegally changed: " +
+                 " SortedDuplicates, Temporary or BtreeComparator");
+        }
+        if (!DbCompat.isTypeBtree(config)) {
+            throw new IllegalArgumentException("Only type BTREE allowed");
+        }
+        priConfigMap.put(clsName, config);
+    }
+
+    public synchronized SecondaryConfig getSecondaryConfig(Class entityClass,
+                                                           String keyName) {
+        checkOpen();
+        String entityClsName = entityClass.getName();
+        EntityMetadata entityMeta = checkEntityClass(entityClsName);
+        SecondaryKeyMetadata secKeyMeta = checkSecKey(entityMeta, keyName);
+        String keyClassName = getSecKeyClass(secKeyMeta);
+        String secName = makeSecName(entityClass.getName(), keyName);
+        return (SecondaryConfig) getSecondaryConfig
+            (secName, entityMeta, keyClassName, secKeyMeta).cloneConfig();
+    }
+
+    private SecondaryConfig getSecondaryConfig(String secName,
+                                               EntityMetadata entityMeta,
+                                               String keyClassName,
+                                               SecondaryKeyMetadata
+                                               secKeyMeta) {
+        SecondaryConfig config = secConfigMap.get(secName);
+        if (config == null) {
+            /* Set common properties to match the primary DB. */
+            DatabaseConfig priConfig = getPrimaryConfig(entityMeta);
+            config = new SecondaryConfig();
+            config.setTransactional(priConfig.getTransactional());
+            config.setAllowCreate(!priConfig.getReadOnly());
+            config.setReadOnly(priConfig.getReadOnly());
+            DbCompat.setTypeBtree(config);
+            /* <!-- begin JE only --> */
+            config.setTemporary(priConfig.getTemporary());
+            config.setDeferredWrite(priConfig.getDeferredWrite());
+            /* <!-- end JE only --> */
+            /* Set secondary properties based on metadata. */
+            config.setAllowPopulate(true);
+            Relationship rel = secKeyMeta.getRelationship();
+            config.setSortedDuplicates(rel == Relationship.MANY_TO_ONE ||
+                                       rel == Relationship.MANY_TO_MANY);
+            setBtreeComparator(config, secKeyMeta.getClassName());
+            PersistKeyCreator keyCreator = new PersistKeyCreator
+                (catalog, entityMeta, keyClassName, secKeyMeta, rawAccess);
+            if (rel == Relationship.ONE_TO_MANY ||
+                rel == Relationship.MANY_TO_MANY) {
+                config.setMultiKeyCreator(keyCreator);
+            } else {
+                config.setKeyCreator(keyCreator);
+            }
+            DeleteAction deleteAction = secKeyMeta.getDeleteAction();
+            if (deleteAction != null) {
+                ForeignKeyDeleteAction baseDeleteAction;
+                switch (deleteAction) {
+                case ABORT:
+                    baseDeleteAction = ForeignKeyDeleteAction.ABORT;
+                    break;
+                case CASCADE:
+                    baseDeleteAction = ForeignKeyDeleteAction.CASCADE;
+                    break;
+                case NULLIFY:
+                    baseDeleteAction = ForeignKeyDeleteAction.NULLIFY;
+                    break;
+                default:
+                    throw new IllegalStateException(deleteAction.toString());
+                }
+                config.setForeignKeyDeleteAction(baseDeleteAction);
+                if (deleteAction == DeleteAction.NULLIFY) {
+                    config.setForeignMultiKeyNullifier(keyCreator);
+                }
+            }
+            secConfigMap.put(secName, config);
+        }
+        return config;
+    }
+
+    public synchronized void setSecondaryConfig(Class entityClass,
+                                                String keyName,
+                                                SecondaryConfig config) {
+        checkOpen();
+        String entityClsName = entityClass.getName();
+        EntityMetadata entityMeta = checkEntityClass(entityClsName);
+        SecondaryKeyMetadata secKeyMeta = checkSecKey(entityMeta, keyName);
+        String keyClassName = getSecKeyClass(secKeyMeta);
+        String secName = makeSecName(entityClass.getName(), keyName);
+        if (secIndexMap.containsKey(secName)) {
+            throw new IllegalStateException
+                ("Cannot set config after DB is open");
+        }
+        SecondaryConfig dbConfig =
+            getSecondaryConfig(secName, entityMeta, keyClassName, secKeyMeta);
+        if (config.getSortedDuplicates() != dbConfig.getSortedDuplicates() ||
+            config.getBtreeComparator() != dbConfig.getBtreeComparator() ||
+            config.getDuplicateComparator() != null ||
+            /* <!-- begin JE only --> */
+            config.getTemporary() != dbConfig.getTemporary() ||
+            /* <!-- end JE only --> */
+            config.getAllowPopulate() != dbConfig.getAllowPopulate() ||
+            config.getKeyCreator() != dbConfig.getKeyCreator() ||
+            config.getMultiKeyCreator() != dbConfig.getMultiKeyCreator() ||
+            config.getForeignKeyNullifier() !=
+                dbConfig.getForeignKeyNullifier() ||
+            config.getForeignMultiKeyNullifier() !=
+                dbConfig.getForeignMultiKeyNullifier() ||
+            config.getForeignKeyDeleteAction() !=
+                dbConfig.getForeignKeyDeleteAction() ||
+            config.getForeignKeyDatabase() != null) {
+            throw new IllegalArgumentException
+                ("One of these properties was illegally changed: " +
+                 " SortedDuplicates, BtreeComparator, DuplicateComparator," +
+                 " Temporary, AllowPopulate, KeyCreator, MultiKeyCreator," +
+                 " ForeignKeyNullifer, ForeignMultiKeyNullifier," +
+                 " ForeignKeyDeleteAction, ForeignKeyDatabase");
+        }
+        if (!DbCompat.isTypeBtree(config)) {
+            throw new IllegalArgumentException("Only type BTREE allowed");
+        }
+        secConfigMap.put(secName, config);
+    }
+
+    private static String makeSecName(String entityClsName, String keyName) {
+         return entityClsName + NAME_SEPARATOR + keyName;
+    }
+
+    static String makePriDbName(String storePrefix, String entityClsName) {
+        return storePrefix + entityClsName;
+    }
+
+    static String makeSecDbName(String storePrefix,
+                                String entityClsName,
+                                String keyName) {
+        return storePrefix + makeSecName(entityClsName, keyName);
+    }
+
+    /**
+     * Parses a whole DB name and returns an array of 2 strings where element 0
+     * is the file name (always null for JE, always non-null for DB core) and
+     * element 1 is the logical DB name (always non-null for JE, may be null
+     * for DB core).
+     */
+    public String[] parseDbName(String wholeName) {
+        return parseDbName(wholeName, storeConfig.getDatabaseNamer());
+    }
+
+    /**
+     * Allows passing a namer to a static method for testing.
+     */
+    public static String[] parseDbName(String wholeName, DatabaseNamer namer) {
+        String[] result = new String[2];
+        if (DbCompat.SEPARATE_DATABASE_FILES) {
+            String[] splitName = wholeName.split(NAME_SEPARATOR);
+            assert splitName.length == 3 || splitName.length == 4 : wholeName;
+            assert splitName[0].equals("persist") : wholeName;
+            String storeName = splitName[1];
+            String clsName = splitName[2];
+            String keyName = (splitName.length > 3) ? splitName[3] : null;
+            result[0] = namer.getFileName(storeName, clsName, keyName);
+            result[1] = null;
+        } else {
+            result[0] = null;
+            result[1] = wholeName;
+        }
+        return result;
+    }
+
+    private void checkOpen() {
+        if (catalog == null) {
+            throw new IllegalStateException("Store has been closed");
+        }
+    }
+
+    private EntityMetadata checkEntityClass(String clsName) {
+        EntityMetadata meta = model.getEntityMetadata(clsName);
+        if (meta == null) {
+            throw new IllegalArgumentException
+                ("Class could not be loaded or is not an entity class: " +
+                 clsName);
+        }
+        return meta;
+    }
+
+    private SecondaryKeyMetadata checkSecKey(EntityMetadata entityMeta,
+                                             String keyName) {
+        SecondaryKeyMetadata secKeyMeta =
+            entityMeta.getSecondaryKeys().get(keyName);
+        if (secKeyMeta == null) {
+            throw new IllegalArgumentException
+                ("Not a secondary key: " +
+                 makeSecName(entityMeta.getClassName(), keyName));
+        }
+        return secKeyMeta;
+    }
+
+    private String getSecKeyClass(SecondaryKeyMetadata secKeyMeta) {
+        String clsName = secKeyMeta.getElementClassName();
+        if (clsName == null) {
+            clsName = secKeyMeta.getClassName();
+        }
+        return SimpleCatalog.keyClassName(clsName);
+    }
+
+    private PersistKeyBinding getKeyBinding(String keyClassName) {
+        PersistKeyBinding binding = keyBindingMap.get(keyClassName);
+        if (binding == null) {
+            binding = new PersistKeyBinding(catalog, keyClassName, rawAccess);
+            keyBindingMap.put(keyClassName, binding);
+        }
+        return binding;
+    }
+
+    private void setBtreeComparator(DatabaseConfig config, String clsName) {
+        if (!rawAccess) {
+            ClassMetadata meta = model.getClassMetadata(clsName);
+            if (meta != null) {
+                List<FieldMetadata> compositeKeyFields =
+                    meta.getCompositeKeyFields();
+                if (compositeKeyFields != null) {
+                    Class keyClass = SimpleCatalog.keyClassForName(clsName);
+                    if (Comparable.class.isAssignableFrom(keyClass)) {
+                        Comparator<byte[]> cmp = new PersistComparator
+                            (clsName, compositeKeyFields,
+                             getKeyBinding(clsName));
+                        config.setBtreeComparator(cmp);
+                    }
+                }
+            }
+        }
+    }
+
+    private DatabaseException closeDb(Database db,
+                                      DatabaseException firstException) {
+        if (db != null) {
+            try {
+                db.close();
+            } catch (DatabaseException e) {
+                if (firstException == null) {
+                    firstException = e;
+                }
+            }
+        }
+        return firstException;
+    }
+
+    public EvolveStats evolve(EvolveConfig config)
+        throws DatabaseException {
+
+        checkOpen();
+        List<Format> toEvolve = new ArrayList<Format>();
+        Set<String> configToEvolve = config.getClassesToEvolve();
+        if (configToEvolve.isEmpty()) {
+            catalog.getEntityFormats(toEvolve);
+        } else {
+            for (String name : configToEvolve) {
+                Format format = catalog.getFormat(name);
+                if (format == null) {
+                    throw new IllegalArgumentException
+                        ("Class to evolve is not persistent: " + name);
+                }
+                if (!format.isEntity()) {
+                    throw new IllegalArgumentException
+                        ("Class to evolve is not an entity class: " + name);
+                }
+                toEvolve.add(format);
+            }
+        }
+
+        EvolveEvent event = EvolveInternal.newEvent();
+        for (Format format : toEvolve) {
+            if (format.getEvolveNeeded()) {
+                evolveIndex(format, event, config.getEvolveListener());
+                format.setEvolveNeeded(false);
+                catalog.flush();
+            }
+        }
+
+        return event.getStats();
+    }
+
+    private void evolveIndex(Format format,
+                             EvolveEvent event,
+                             EvolveListener listener)
+        throws DatabaseException {
+
+        /* We may make this configurable later. */
+        final int WRITES_PER_TXN = 1;
+
+        Class entityClass = format.getType();
+        String entityClassName = format.getClassName();
+        EntityMetadata meta = model.getEntityMetadata(entityClassName);
+        String keyClassName = meta.getPrimaryKey().getClassName();
+        keyClassName = SimpleCatalog.keyClassName(keyClassName);
+        DatabaseConfig dbConfig = getPrimaryConfig(meta);
+
+        PrimaryIndex<Object,Object> index = getPrimaryIndex
+            (Object.class, keyClassName, entityClass, entityClassName);
+        Database db = index.getDatabase();
+
+        EntityBinding binding = index.getEntityBinding();
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        CursorConfig cursorConfig = null;
+        Transaction txn = null;
+        if (dbConfig.getTransactional()) {
+            txn = env.beginTransaction(null, null);
+            cursorConfig = CursorConfig.READ_COMMITTED;
+        }
+
+        Cursor cursor = null;
+        int nWritten = 0;
+        try {
+            cursor = db.openCursor(txn, cursorConfig);
+            OperationStatus status = cursor.getFirst(key, data, null);
+            while (status == OperationStatus.SUCCESS) {
+                boolean oneWritten = false;
+                if (evolveNeeded(key, data, binding)) {
+                    cursor.putCurrent(data);
+                    oneWritten = true;
+                    nWritten += 1;
+                }
+                /* Update event stats, even if no listener. [#17024] */
+                EvolveInternal.updateEvent
+                    (event, entityClassName, 1, oneWritten ? 1 : 0);
+                if (listener != null) {
+                    if (!listener.evolveProgress(event)) {
+                        break;
+                    }
+                }
+                if (txn != null && nWritten >= WRITES_PER_TXN) {
+                    cursor.close();
+                    cursor = null;
+                    txn.commit();
+                    txn = null;
+                    txn = env.beginTransaction(null, null);
+                    cursor = db.openCursor(txn, cursorConfig);
+                    DatabaseEntry saveKey = KeyRange.copy(key);
+                    status = cursor.getSearchKeyRange(key, data, null);
+                    if (status == OperationStatus.SUCCESS &&
+                        KeyRange.equalBytes(key, saveKey)) {
+                        status = cursor.getNext(key, data, null);
+                    }
+                } else {
+                    status = cursor.getNext(key, data, null);
+                }
+            }
+        } finally {
+            if (cursor != null) {
+                cursor.close();
+            }
+            if (txn != null) {
+                if (nWritten > 0) {
+                    txn.commit();
+                } else {
+                    txn.abort();
+                }
+            }
+        }
+    }
+
+    /**
+     * Checks whether the given data is in the current format by translating it
+     * to/from an object.  If true is returned, data is updated.
+     */
+    private boolean evolveNeeded(DatabaseEntry key,
+                                 DatabaseEntry data,
+                                 EntityBinding binding) {
+        Object entity = binding.entryToObject(key, data);
+        DatabaseEntry newData = new DatabaseEntry();
+        binding.objectToData(entity, newData);
+        if (data.equals(newData)) {
+            return false;
+        } else {
+            byte[] bytes = newData.getData();
+            int off = newData.getOffset();
+            int size = newData.getSize();
+            data.setData(bytes, off, size);
+            return true;
+        }
+    }
+
+    /**
+     * For unit testing.
+     */
+    public static void setSyncHook(SyncHook hook) {
+        syncHook = hook;
+    }
+
+    /**
+     * For unit testing.
+     */
+    public interface SyncHook {
+        void onSync(Database db, boolean flushLog);
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/StoredModel.java b/src/com/sleepycat/persist/impl/StoredModel.java
new file mode 100644
index 0000000000000000000000000000000000000000..c637d5dcb84b27d078e421524b494090a8c35ae0
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/StoredModel.java
@@ -0,0 +1,60 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: StoredModel.java,v 1.12.2.2 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.util.Set;
+
+import com.sleepycat.persist.model.ClassMetadata;
+import com.sleepycat.persist.model.EntityMetadata;
+import com.sleepycat.persist.model.EntityModel;
+
+/**
+ * The EntityModel used when a RawStore is opened.  The metadata and raw type
+ * information comes from the catalog directly, without using the current
+ * class definitions.
+ *
+ * @author Mark Hayes
+ */
+class StoredModel extends EntityModel {
+
+    private PersistCatalog catalog;
+    private Set<String> knownClasses;
+
+    StoredModel(PersistCatalog catalog) {
+        this.catalog = catalog;
+    }
+
+    @Override
+    public ClassMetadata getClassMetadata(String className) {
+        ClassMetadata metadata = null;
+        Format format = catalog.getFormat(className);
+        if (format != null && format.isCurrentVersion()) {
+            metadata = format.getClassMetadata();
+        }
+        return metadata;
+    }
+
+    @Override
+    public EntityMetadata getEntityMetadata(String className) {
+        EntityMetadata metadata = null;
+        Format format = catalog.getFormat(className);
+        if (format != null && format.isCurrentVersion()) {
+            metadata = format.getEntityMetadata();
+        }
+        return metadata;
+    }
+
+    @Override
+    public Set<String> getKnownClasses() {
+        if (knownClasses == null) {
+            knownClasses = catalog.getModelClasses();
+        }
+        return knownClasses;
+    }
+}
diff --git a/src/com/sleepycat/persist/impl/WidenerInput.java b/src/com/sleepycat/persist/impl/WidenerInput.java
new file mode 100644
index 0000000000000000000000000000000000000000..873061923e4091af6b9bad2f1e1ecda86490fd9d
--- /dev/null
+++ b/src/com/sleepycat/persist/impl/WidenerInput.java
@@ -0,0 +1,544 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: WidenerInput.java,v 1.6.2.2 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.impl;
+
+import java.math.BigInteger;
+
+/**
+ * Widens a value returned by another input when any readXxx method is called.
+ * Used to cause an Accessor to read a widened value.
+ *
+ * For non-key fields we support all Java primitive widening:
+ * - byte to short, int, long, float, double or BigInteger
+ * - short to int, long, float, double or BigInteger
+ * - char to int, long, float, double or BigInteger
+ * - int to long, float, double or BigInteger
+ * - long to float, double or BigInteger
+ * - float to double
+ *
+ * For non-key fields we also support:
+ * - Java reference widening
+ * - primitive to primitive wrapper
+ * - Java primitive widening to corresponding primitive wrappers
+ * - Java widening of primitive wrapper to primitive wrapper
+ *
+ * For secondary keys fields we ONLY support:
+ * - primitive to primitive wrapper
+ *
+ * But for primary keys and composite key fields we ONLY support:
+ * - primitive to primitive wrapper
+ * - primitive wrapper to primitive
+ * These conversions don't require any converter, since the stored format is
+ * not changed.  A WidenerInput is not used for these changes.
+ *
+ * @author Mark Hayes
+ */
+class WidenerInput extends AbstractInput {
+
+    private EntityInput input;
+    private int fromFormatId;
+    private int toFormatId;
+
+    /**
+     * Returns whether widening is supported by this class.  If false is
+     * returned by this method, then widening is disallowed and a field
+     * converter or deleter is necessary.
+     */
+    static boolean isWideningSupported(Format fromFormat,
+                                       Format toFormat,
+                                       boolean isSecKeyField) {
+        int fromFormatId = fromFormat.getId();
+        int toFormatId = toFormat.getId();
+
+        switch (fromFormatId) {
+        case Format.ID_BOOL:
+            switch (toFormatId) {
+            case Format.ID_BOOL_W:
+                return true;
+            default:
+                return false;
+            }
+        case Format.ID_BYTE:
+            switch (toFormatId) {
+            case Format.ID_BYTE_W:
+                return true;
+            case Format.ID_SHORT:
+            case Format.ID_SHORT_W:
+            case Format.ID_INT:
+            case Format.ID_INT_W:
+            case Format.ID_LONG:
+            case Format.ID_LONG_W:
+            case Format.ID_FLOAT:
+            case Format.ID_FLOAT_W:
+            case Format.ID_DOUBLE:
+            case Format.ID_DOUBLE_W:
+            case Format.ID_BIGINT:
+                return !isSecKeyField;
+            default:
+                return false;
+            }
+        case Format.ID_BYTE_W:
+            switch (toFormatId) {
+            case Format.ID_SHORT_W:
+            case Format.ID_INT_W:
+            case Format.ID_LONG_W:
+            case Format.ID_FLOAT_W:
+            case Format.ID_DOUBLE_W:
+            case Format.ID_BIGINT:
+                return !isSecKeyField;
+            default:
+                return false;
+            }
+        case Format.ID_SHORT:
+            switch (toFormatId) {
+            case Format.ID_SHORT_W:
+                return true;
+            case Format.ID_INT:
+            case Format.ID_INT_W:
+            case Format.ID_LONG:
+            case Format.ID_LONG_W:
+            case Format.ID_FLOAT:
+            case Format.ID_FLOAT_W:
+            case Format.ID_DOUBLE:
+            case Format.ID_DOUBLE_W:
+            case Format.ID_BIGINT:
+                return !isSecKeyField;
+            default:
+                return false;
+            }
+        case Format.ID_SHORT_W:
+            switch (toFormatId) {
+            case Format.ID_INT_W:
+            case Format.ID_LONG_W:
+            case Format.ID_FLOAT_W:
+            case Format.ID_DOUBLE_W:
+            case Format.ID_BIGINT:
+                return !isSecKeyField;
+            default:
+                return false;
+            }
+        case Format.ID_INT:
+            switch (toFormatId) {
+            case Format.ID_INT_W:
+                return true;
+            case Format.ID_LONG:
+            case Format.ID_LONG_W:
+            case Format.ID_FLOAT:
+            case Format.ID_FLOAT_W:
+            case Format.ID_DOUBLE:
+            case Format.ID_DOUBLE_W:
+            case Format.ID_BIGINT:
+                return !isSecKeyField;
+            default:
+                return false;
+            }
+        case Format.ID_INT_W:
+            switch (toFormatId) {
+            case Format.ID_LONG_W:
+            case Format.ID_FLOAT_W:
+            case Format.ID_DOUBLE_W:
+            case Format.ID_BIGINT:
+                return !isSecKeyField;
+            default:
+                return false;
+            }
+        case Format.ID_LONG:
+            switch (toFormatId) {
+            case Format.ID_LONG_W:
+                return true;
+            case Format.ID_FLOAT:
+            case Format.ID_FLOAT_W:
+            case Format.ID_DOUBLE:
+            case Format.ID_DOUBLE_W:
+            case Format.ID_BIGINT:
+                return !isSecKeyField;
+            default:
+                return false;
+            }
+        case Format.ID_LONG_W:
+            switch (toFormatId) {
+            case Format.ID_FLOAT_W:
+            case Format.ID_DOUBLE_W:
+            case Format.ID_BIGINT:
+                return !isSecKeyField;
+            default:
+                return false;
+            }
+        case Format.ID_FLOAT:
+            switch (toFormatId) {
+            case Format.ID_FLOAT_W:
+                return true;
+            case Format.ID_DOUBLE:
+            case Format.ID_DOUBLE_W:
+                return !isSecKeyField;
+            default:
+                return false;
+            }
+        case Format.ID_FLOAT_W:
+            switch (toFormatId) {
+            case Format.ID_DOUBLE_W:
+                return !isSecKeyField;
+            default:
+                return false;
+            }
+        case Format.ID_DOUBLE:
+            switch (toFormatId) {
+            case Format.ID_DOUBLE_W:
+                return true;
+            default:
+                return false;
+            }
+        case Format.ID_CHAR:
+            switch (toFormatId) {
+            case Format.ID_CHAR_W:
+                return true;
+            case Format.ID_INT:
+            case Format.ID_INT_W:
+            case Format.ID_LONG:
+            case Format.ID_LONG_W:
+            case Format.ID_FLOAT:
+            case Format.ID_FLOAT_W:
+            case Format.ID_DOUBLE:
+            case Format.ID_DOUBLE_W:
+            case Format.ID_BIGINT:
+                return !isSecKeyField;
+            default:
+                return false;
+            }
+        case Format.ID_CHAR_W:
+            switch (toFormatId) {
+            case Format.ID_INT_W:
+            case Format.ID_LONG_W:
+            case Format.ID_FLOAT_W:
+            case Format.ID_DOUBLE_W:
+            case Format.ID_BIGINT:
+                return !isSecKeyField;
+            default:
+                return false;
+            }
+        default:
+            return false;
+        }
+    }
+
+    WidenerInput(EntityInput input, int fromFormatId, int toFormatId) {
+        super(input.getCatalog(), input.isRawAccess());
+        this.input = input;
+        this.fromFormatId = fromFormatId;
+        this.toFormatId = toFormatId;
+    }
+
+    public void registerPriKeyObject(Object o) {
+        input.registerPriKeyObject(o);
+    }
+
+    public int readArrayLength() {
+        throw new UnsupportedOperationException();
+    }
+
+    public int readEnumConstant(String[] names) {
+        throw new UnsupportedOperationException();
+    }
+
+    public void skipField(Format declaredFormat) {
+        throw new UnsupportedOperationException();
+    }
+
+    public String readString() {
+        throw new UnsupportedOperationException();
+    }
+
+    public Object readKeyObject(Format fromFormat) {
+        return readObject();
+    }
+
+    public Object readObject() {
+        switch (fromFormatId) {
+        case Format.ID_BOOL:
+            checkToFormat(Format.ID_BOOL_W);
+            return input.readBoolean();
+        case Format.ID_BYTE:
+            return byteToObject(input.readByte());
+        case Format.ID_BYTE_W:
+            Byte b = (Byte) input.readObject();
+            return (b != null) ? byteToObject(b) : null;
+        case Format.ID_SHORT:
+            return shortToObject(input.readShort());
+        case Format.ID_SHORT_W:
+            Short s = (Short) input.readObject();
+            return (s != null) ? shortToObject(s) : null;
+        case Format.ID_INT:
+            return intToObject(input.readInt());
+        case Format.ID_INT_W:
+            Integer i = (Integer) input.readObject();
+            return (i != null) ? intToObject(i) : null;
+        case Format.ID_LONG:
+            return longToObject(input.readLong());
+        case Format.ID_LONG_W:
+            Long l = (Long) input.readObject();
+            return (l != null) ? longToObject(l) : null;
+        case Format.ID_FLOAT:
+            return floatToObject(input.readSortedFloat());
+        case Format.ID_FLOAT_W:
+            Float f = (Float) input.readObject();
+            return (f != null) ? floatToObject(f) : null;
+        case Format.ID_DOUBLE:
+            checkToFormat(Format.ID_DOUBLE_W);
+            return input.readSortedDouble();
+        case Format.ID_CHAR:
+            return charToObject(input.readChar());
+        case Format.ID_CHAR_W:
+            Character c = (Character) input.readObject();
+            return (c != null) ? charToObject(c) : null;
+        default:
+            throw new IllegalStateException(String.valueOf(fromFormatId));
+        }
+    }
+
+    private Object byteToObject(byte v) {
+        switch (toFormatId) {
+        case Format.ID_BYTE:
+        case Format.ID_BYTE_W:
+            return Byte.valueOf(v);
+        case Format.ID_SHORT:
+        case Format.ID_SHORT_W:
+            return Short.valueOf(v);
+        case Format.ID_INT:
+        case Format.ID_INT_W:
+            return Integer.valueOf(v);
+        case Format.ID_LONG:
+        case Format.ID_LONG_W:
+            return Long.valueOf(v);
+        case Format.ID_FLOAT:
+        case Format.ID_FLOAT_W:
+            return Float.valueOf(v);
+        case Format.ID_DOUBLE:
+        case Format.ID_DOUBLE_W:
+            return Double.valueOf(v);
+        case Format.ID_BIGINT:
+            return BigInteger.valueOf(v);
+        default:
+            throw new IllegalStateException(String.valueOf(toFormatId));
+        }
+    }
+
+    private Object shortToObject(short v) {
+        switch (toFormatId) {
+        case Format.ID_SHORT:
+        case Format.ID_SHORT_W:
+            return Short.valueOf(v);
+        case Format.ID_INT:
+        case Format.ID_INT_W:
+            return Integer.valueOf(v);
+        case Format.ID_LONG:
+        case Format.ID_LONG_W:
+            return Long.valueOf(v);
+        case Format.ID_FLOAT:
+        case Format.ID_FLOAT_W:
+            return Float.valueOf(v);
+        case Format.ID_DOUBLE:
+        case Format.ID_DOUBLE_W:
+            return Double.valueOf(v);
+        case Format.ID_BIGINT:
+            return BigInteger.valueOf(v);
+        default:
+            throw new IllegalStateException(String.valueOf(toFormatId));
+        }
+    }
+
+    private Object intToObject(int v) {
+        switch (toFormatId) {
+        case Format.ID_INT:
+        case Format.ID_INT_W:
+            return Integer.valueOf(v);
+        case Format.ID_LONG:
+        case Format.ID_LONG_W:
+            return Long.valueOf(v);
+        case Format.ID_FLOAT:
+        case Format.ID_FLOAT_W:
+            return Float.valueOf(v);
+        case Format.ID_DOUBLE:
+        case Format.ID_DOUBLE_W:
+            return Double.valueOf(v);
+        case Format.ID_BIGINT:
+            return BigInteger.valueOf(v);
+        default:
+            throw new IllegalStateException(String.valueOf(toFormatId));
+        }
+    }
+
+    private Object longToObject(long v) {
+        switch (toFormatId) {
+        case Format.ID_LONG:
+        case Format.ID_LONG_W:
+            return Long.valueOf(v);
+        case Format.ID_FLOAT:
+        case Format.ID_FLOAT_W:
+            return Float.valueOf(v);
+        case Format.ID_DOUBLE:
+        case Format.ID_DOUBLE_W:
+            return Double.valueOf(v);
+        case Format.ID_BIGINT:
+            return BigInteger.valueOf(v);
+        default:
+            throw new IllegalStateException(String.valueOf(toFormatId));
+        }
+    }
+
+    private Object floatToObject(float v) {
+        switch (toFormatId) {
+        case Format.ID_FLOAT:
+        case Format.ID_FLOAT_W:
+            return Float.valueOf(v);
+        case Format.ID_DOUBLE:
+        case Format.ID_DOUBLE_W:
+            return Double.valueOf(v);
+        default:
+            throw new IllegalStateException(String.valueOf(toFormatId));
+        }
+    }
+
+    private Object charToObject(char v) {
+        switch (toFormatId) {
+        case Format.ID_CHAR:
+        case Format.ID_CHAR_W:
+            return Character.valueOf(v);
+        case Format.ID_INT:
+        case Format.ID_INT_W:
+            return Integer.valueOf(v);
+        case Format.ID_LONG:
+        case Format.ID_LONG_W:
+            return Long.valueOf(v);
+        case Format.ID_FLOAT:
+        case Format.ID_FLOAT_W:
+            return Float.valueOf(v);
+        case Format.ID_DOUBLE:
+        case Format.ID_DOUBLE_W:
+            return Double.valueOf(v);
+        case Format.ID_BIGINT:
+            return BigInteger.valueOf(v);
+        default:
+            throw new IllegalStateException(String.valueOf(toFormatId));
+        }
+    }
+
+    public char readChar() {
+        throw new IllegalStateException(String.valueOf(fromFormatId));
+    }
+
+    public boolean readBoolean() {
+        throw new IllegalStateException(String.valueOf(fromFormatId));
+    }
+
+    public byte readByte() {
+        throw new IllegalStateException(String.valueOf(fromFormatId));
+    }
+
+    public short readShort() {
+        checkToFormat(Format.ID_SHORT);
+        switch (fromFormatId) {
+        case Format.ID_BYTE:
+            return input.readByte();
+        default:
+            throw new IllegalStateException(String.valueOf(fromFormatId));
+        }
+    }
+
+    public int readInt() {
+        checkToFormat(Format.ID_INT);
+        switch (fromFormatId) {
+        case Format.ID_BYTE:
+            return input.readByte();
+        case Format.ID_SHORT:
+            return input.readShort();
+        case Format.ID_CHAR:
+            return input.readChar();
+        default:
+            throw new IllegalStateException(String.valueOf(fromFormatId));
+        }
+    }
+
+    public long readLong() {
+        checkToFormat(Format.ID_LONG);
+        switch (fromFormatId) {
+        case Format.ID_BYTE:
+            return input.readByte();
+        case Format.ID_SHORT:
+            return input.readShort();
+        case Format.ID_INT:
+            return input.readInt();
+        case Format.ID_CHAR:
+            return input.readChar();
+        default:
+            throw new IllegalStateException(String.valueOf(fromFormatId));
+        }
+    }
+
+    public float readSortedFloat() {
+        checkToFormat(Format.ID_FLOAT);
+        switch (fromFormatId) {
+        case Format.ID_BYTE:
+            return input.readByte();
+        case Format.ID_SHORT:
+            return input.readShort();
+        case Format.ID_INT:
+            return input.readInt();
+        case Format.ID_LONG:
+            return input.readLong();
+        case Format.ID_CHAR:
+            return input.readChar();
+        default:
+            throw new IllegalStateException(String.valueOf(fromFormatId));
+        }
+    }
+
+    public double readSortedDouble() {
+        checkToFormat(Format.ID_DOUBLE);
+        switch (fromFormatId) {
+        case Format.ID_BYTE:
+            return input.readByte();
+        case Format.ID_SHORT:
+            return input.readShort();
+        case Format.ID_INT:
+            return input.readInt();
+        case Format.ID_LONG:
+            return input.readLong();
+        case Format.ID_FLOAT:
+            return input.readSortedFloat();
+        case Format.ID_CHAR:
+            return input.readChar();
+        default:
+            throw new IllegalStateException(String.valueOf(fromFormatId));
+        }
+    }
+
+    public BigInteger readBigInteger() {
+        checkToFormat(Format.ID_BIGINT);
+        switch (fromFormatId) {
+        case Format.ID_BYTE:
+            return BigInteger.valueOf(input.readByte());
+        case Format.ID_SHORT:
+            return BigInteger.valueOf(input.readShort());
+        case Format.ID_INT:
+            return BigInteger.valueOf(input.readInt());
+        case Format.ID_LONG:
+            return BigInteger.valueOf(input.readLong());
+        case Format.ID_CHAR:
+            return BigInteger.valueOf(input.readChar());
+        default:
+            throw new IllegalStateException(String.valueOf(fromFormatId));
+        }
+    }
+
+    private void checkToFormat(int id) {
+        if (toFormatId != id) {
+            throw new IllegalStateException(String.valueOf(toFormatId));
+        }
+    }
+}
diff --git a/src/com/sleepycat/persist/model/AnnotationModel.java b/src/com/sleepycat/persist/model/AnnotationModel.java
new file mode 100644
index 0000000000000000000000000000000000000000..9e6217d478ea33acdc40af0767d011113b10b0f9
--- /dev/null
+++ b/src/com/sleepycat/persist/model/AnnotationModel.java
@@ -0,0 +1,406 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: AnnotationModel.java,v 1.25.2.2 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.model;
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
+import java.lang.reflect.ParameterizedType;
+import java.lang.reflect.Type;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * The default annotation-based entity model.  An <code>AnnotationModel</code>
+ * is based on annotations that are specified for entity classes and their key
+ * fields.
+ *
+ * <p>{@code AnnotationModel} objects are thread-safe.  Multiple threads may
+ * safely call the methods of a shared {@code AnnotationModel} object.</p>
+ *
+ * <p>The set of persistent classes in the annotation model is the set of all
+ * classes with the {@link Persistent} or {@link Entity} annotation.</p>
+ *
+ * <p>The annotations used to define persistent classes are: {@link Entity},
+ * {@link Persistent}, {@link PrimaryKey}, {@link SecondaryKey} and {@link
+ * KeyField}.  A good starting point is {@link Entity}.</p>
+ *
+ * @author Mark Hayes
+ */
+public class AnnotationModel extends EntityModel {
+
+    private static class EntityInfo {
+        PrimaryKeyMetadata priKey;
+        Map<String,SecondaryKeyMetadata> secKeys =
+            new HashMap<String,SecondaryKeyMetadata>();
+    }
+
+    private Map<String,ClassMetadata> classMap;
+    private Map<String,EntityInfo> entityMap;
+
+    /**
+     * Constructs a model for annotated entity classes.
+     */
+    public AnnotationModel() {
+        super();
+        classMap = new HashMap<String,ClassMetadata>();
+        entityMap = new HashMap<String,EntityInfo>();
+    }
+
+    /* EntityModel methods */
+
+    @Override
+    public synchronized Set<String> getKnownClasses() {
+        return Collections.unmodifiableSet
+            (new HashSet<String>(classMap.keySet()));
+    }
+
+    @Override
+    public synchronized EntityMetadata getEntityMetadata(String className) {
+        /* Call getClassMetadata to collect metadata. */
+        getClassMetadata(className);
+        /* Return the collected entity metadata. */
+        EntityInfo info = entityMap.get(className);
+        if (info != null) {
+            return new EntityMetadata
+                (className, info.priKey,
+                 Collections.unmodifiableMap(info.secKeys));
+        } else {
+            return null;
+        }
+    }
+
+    @Override
+    public synchronized ClassMetadata getClassMetadata(String className) {
+        ClassMetadata metadata = classMap.get(className);
+        if (metadata == null) {
+            Class<?> type;
+            try {
+                type = EntityModel.classForName(className);
+            } catch (ClassNotFoundException e) {
+                return null;
+            }
+            /* Get class annotation. */
+            Entity entity = type.getAnnotation(Entity.class);
+            Persistent persistent = type.getAnnotation(Persistent.class);
+            if (entity == null && persistent == null) {
+                return null;
+            }
+            if (entity != null && persistent != null) {
+                throw new IllegalArgumentException
+                    ("Both @Entity and @Persistent are not allowed: " +
+                     type.getName());
+            }
+            boolean isEntity;
+            int version;
+            String proxiedClassName;
+            if (entity != null) {
+                isEntity = true;
+                version = entity.version();
+                proxiedClassName = null;
+            } else {
+                isEntity = false;
+                version = persistent.version();
+                Class proxiedClass = persistent.proxyFor();
+                proxiedClassName = (proxiedClass != void.class) ?
+                                    proxiedClass.getName() : null;
+            }
+            /* Get instance fields. */
+            List<Field> fields = new ArrayList<Field>();
+            boolean nonDefaultRules = getInstanceFields(fields, type);
+            Collection<FieldMetadata> nonDefaultFields = null;
+            if (nonDefaultRules) {
+                nonDefaultFields = new ArrayList<FieldMetadata>(fields.size());
+                for (Field field : fields) {
+                    nonDefaultFields.add(new FieldMetadata
+                        (field.getName(), field.getType().getName(),
+                         type.getName()));
+                }
+                nonDefaultFields =
+                    Collections.unmodifiableCollection(nonDefaultFields);
+            }
+            /* Get the rest of the metadata and save it. */
+            metadata = new ClassMetadata
+                (className, version, proxiedClassName, isEntity,
+                 getPrimaryKey(type, fields),
+                 getSecondaryKeys(type, fields),
+                 getCompositeKeyFields(type, fields),
+                 nonDefaultFields);
+            classMap.put(className, metadata);
+            /* Add any new information about entities. */
+            updateEntityInfo(metadata);
+        }
+        return metadata;
+    }
+
+    /**
+     * Fills in the fields array and returns true if the default rules for
+     * field persistence were overridden.
+     */
+    private boolean getInstanceFields(List<Field> fields, Class<?> type) {
+        boolean nonDefaultRules = false;
+        for (Field field : type.getDeclaredFields()) {
+            boolean notPersistent =
+                (field.getAnnotation(NotPersistent.class) != null);
+            boolean notTransient = 
+                (field.getAnnotation(NotTransient.class) != null);
+            if (notPersistent && notTransient) {
+                throw new IllegalArgumentException
+                    ("Both @NotTransient and @NotPersistent not allowed");
+            }
+            if (notPersistent || notTransient) {
+                nonDefaultRules = true;
+            }
+            int mods = field.getModifiers();
+
+            if (!Modifier.isStatic(mods) &&
+                !notPersistent &&
+                (!Modifier.isTransient(mods) || notTransient)) {
+                /* Field is DPL persistent. */
+                fields.add(field);
+            } else {
+                /* If non-persistent, no other annotations should be used. */
+                if (field.getAnnotation(PrimaryKey.class) != null ||
+                    field.getAnnotation(SecondaryKey.class) != null ||
+                    field.getAnnotation(KeyField.class) != null) {
+                    throw new IllegalArgumentException
+                        ("@PrimaryKey, @SecondaryKey and @KeyField not " +
+                         "allowed on non-persistent field");
+                }
+            }
+        }
+        return nonDefaultRules;
+    }
+
+    private PrimaryKeyMetadata getPrimaryKey(Class<?> type,
+                                             List<Field> fields) {
+        Field foundField = null;
+        String sequence = null;
+        for (Field field : fields) {
+            PrimaryKey priKey = field.getAnnotation(PrimaryKey.class);
+            if (priKey != null) {
+                if (foundField != null) {
+                    throw new IllegalArgumentException
+                        ("Only one @PrimaryKey allowed: " + type.getName());
+                } else {
+                    foundField = field;
+                    sequence = priKey.sequence();
+                    if (sequence.length() == 0) {
+                        sequence = null;
+                    }
+                }
+            }
+        }
+        if (foundField != null) {
+            return new PrimaryKeyMetadata
+                (foundField.getName(), foundField.getType().getName(),
+                 type.getName(), sequence);
+        } else {
+            return null;
+        }
+    }
+
+    private Map<String,SecondaryKeyMetadata> getSecondaryKeys(Class<?> type,
+                                                         List<Field> fields) {
+        Map<String,SecondaryKeyMetadata> map = null;
+        for (Field field : fields) {
+            SecondaryKey secKey = field.getAnnotation(SecondaryKey.class);
+            if (secKey != null) {
+                Relationship rel = secKey.relate();
+                String elemClassName = null;
+                if (rel == Relationship.ONE_TO_MANY ||
+                    rel == Relationship.MANY_TO_MANY) {
+                    elemClassName = getElementClass(field);
+                }
+                String keyName = secKey.name();
+                if (keyName.length() == 0) {
+                    keyName = field.getName();
+                }
+                Class<?> relatedClass = secKey.relatedEntity();
+                String relatedEntity = (relatedClass != void.class) ?
+                                        relatedClass.getName() : null;
+                DeleteAction deleteAction = (relatedEntity != null) ?
+                                        secKey.onRelatedEntityDelete() : null;
+                SecondaryKeyMetadata metadata = new SecondaryKeyMetadata
+                    (field.getName(), field.getType().getName(),
+                     type.getName(), elemClassName, keyName, rel,
+                     relatedEntity, deleteAction);
+                if (map == null) {
+                    map = new HashMap<String,SecondaryKeyMetadata>();
+                }
+                if (map.put(keyName, metadata) != null) {
+                    throw new IllegalArgumentException
+                        ("Only one @SecondaryKey with the same name allowed: "
+                         + type.getName() + '.' + keyName);
+                }
+            }
+        }
+        if (map != null) {
+            map = Collections.unmodifiableMap(map);
+        }
+        return map;
+    }
+
+    private String getElementClass(Field field) {
+        Class cls = field.getType();
+        if (cls.isArray()) {
+            return cls.getComponentType().getName();
+        }
+        if (Collection.class.isAssignableFrom(cls)) {
+            Type[] typeArgs = null;
+            if (field.getGenericType() instanceof ParameterizedType) {
+                typeArgs = ((ParameterizedType) field.getGenericType()).
+                    getActualTypeArguments();
+            }
+            if (typeArgs == null ||
+                typeArgs.length != 1 ||
+                !(typeArgs[0] instanceof Class)) {
+                throw new IllegalArgumentException
+                    ("Collection typed secondary key field must have a" +
+                     " single generic type argument and a wildcard or" +
+                     " type bound is not allowed: " +
+                     field.getDeclaringClass().getName() + '.' +
+                     field.getName());
+            }
+            return ((Class) typeArgs[0]).getName();
+        }
+        throw new IllegalArgumentException
+            ("ONE_TO_MANY or MANY_TO_MANY secondary key field must have" +
+             " an array or Collection type: " +
+             field.getDeclaringClass().getName() + '.' + field.getName());
+    }
+
+    private List<FieldMetadata> getCompositeKeyFields(Class<?> type,
+                                                      List<Field> fields) {
+        List<FieldMetadata> list = null;
+        for (Field field : fields) {
+            KeyField keyField = field.getAnnotation(KeyField.class);
+            if (keyField != null) {
+                int value = keyField.value();
+                if (value < 1 || value > fields.size()) {
+                    throw new IllegalArgumentException
+                        ("Unreasonable @KeyField index value " + value +
+                         ": " + type.getName());
+                }
+                if (list == null) {
+                    list = new ArrayList<FieldMetadata>(fields.size());
+                }
+                if (value <= list.size() && list.get(value - 1) != null) {
+                    throw new IllegalArgumentException
+                        ("@KeyField index value " + value +
+                         " is used more than once: " + type.getName());
+                }
+                while (value > list.size()) {
+                    list.add(null);
+                }
+                FieldMetadata metadata = new FieldMetadata
+                    (field.getName(), field.getType().getName(),
+                     type.getName());
+                list.set(value - 1, metadata);
+            }
+        }
+        if (list != null) {
+            if (list.size() < fields.size()) {
+                throw new IllegalArgumentException
+                    ("@KeyField is missing on one or more instance fields: " +
+                     type.getName());
+            }
+            for (int i = 0; i < list.size(); i += 1) {
+                if (list.get(i) == null) {
+                    throw new IllegalArgumentException
+                        ("@KeyField is missing for index value " + (i + 1) +
+                         ": " + type.getName());
+                }
+            }
+        }
+        if (list != null) {
+            list = Collections.unmodifiableList(list);
+        }
+        return list;
+    }
+
+    /**
+     * Add newly discovered metadata to our stash of entity info.  This info
+     * is maintained as it is discovered because it would be expensive to
+     * create it on demand -- all class metadata would have to be traversed.
+     */
+    private void updateEntityInfo(ClassMetadata metadata) {
+
+        /*
+         * Find out whether this class or its superclass is an entity.  In the
+         * process, traverse all superclasses to load their metadata -- this
+         * will populate as much entity info as possible.
+         */
+        String entityClass = null;
+        PrimaryKeyMetadata priKey = null;
+        Map<String,SecondaryKeyMetadata> secKeys =
+            new HashMap<String,SecondaryKeyMetadata>();
+        for (ClassMetadata data = metadata; data != null;) {
+            if (data.isEntityClass()) {
+                if (entityClass != null) {
+                    throw new IllegalArgumentException
+                        ("An entity class may not be derived from another" +
+                         " entity class: " + entityClass +
+                         ' ' + data.getClassName());
+                }
+                entityClass = data.getClassName();
+            }
+            /* Save first primary key encountered. */
+            if (priKey == null) {
+                priKey = data.getPrimaryKey();
+            }
+            /* Save all secondary keys encountered by key name. */
+            Map<String,SecondaryKeyMetadata> classSecKeys =
+                data.getSecondaryKeys();
+            if (classSecKeys != null) {
+                for (SecondaryKeyMetadata secKey : classSecKeys.values()) {
+                    secKeys.put(secKey.getKeyName(), secKey);
+                }
+            }
+            /* Load superclass metadata. */
+            Class cls;
+            try {
+                cls = EntityModel.classForName(data.getClassName());
+            } catch (ClassNotFoundException e) {
+                throw new IllegalStateException(e);
+            }
+            cls = cls.getSuperclass();
+            if (cls != Object.class) {
+                data = getClassMetadata(cls.getName());
+                if (data == null) {
+                    throw new IllegalArgumentException
+                        ("Persistent class has non-persistent superclass: " +
+                         cls.getName());
+                }
+            } else {
+                data = null;
+            }
+        }
+
+        /* Add primary and secondary key entity info. */
+        if (entityClass != null) {
+            EntityInfo info = entityMap.get(entityClass);
+            if (info == null) {
+                info = new EntityInfo();
+                entityMap.put(entityClass, info);
+            }
+            if (priKey == null) {
+                throw new IllegalArgumentException
+                    ("Entity class has no primary key: " + entityClass);
+            }
+            info.priKey = priKey;
+            info.secKeys.putAll(secKeys);
+        }
+    }
+}
diff --git a/src/com/sleepycat/persist/model/BytecodeEnhancer.java b/src/com/sleepycat/persist/model/BytecodeEnhancer.java
new file mode 100644
index 0000000000000000000000000000000000000000..420514d3cc4722a3aa4a8e2adc7e12f33a71afb9
--- /dev/null
+++ b/src/com/sleepycat/persist/model/BytecodeEnhancer.java
@@ -0,0 +1,1563 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: BytecodeEnhancer.java,v 1.17.2.2 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.model;
+
+import static com.sleepycat.asm.Opcodes.ACC_ABSTRACT;
+import static com.sleepycat.asm.Opcodes.ACC_PRIVATE;
+import static com.sleepycat.asm.Opcodes.ACC_PUBLIC;
+import static com.sleepycat.asm.Opcodes.ACC_STATIC;
+import static com.sleepycat.asm.Opcodes.ACC_TRANSIENT;
+import static com.sleepycat.asm.Opcodes.ACONST_NULL;
+import static com.sleepycat.asm.Opcodes.ALOAD;
+import static com.sleepycat.asm.Opcodes.ANEWARRAY;
+import static com.sleepycat.asm.Opcodes.ARETURN;
+import static com.sleepycat.asm.Opcodes.BIPUSH;
+import static com.sleepycat.asm.Opcodes.CHECKCAST;
+import static com.sleepycat.asm.Opcodes.DCMPL;
+import static com.sleepycat.asm.Opcodes.DCONST_0;
+import static com.sleepycat.asm.Opcodes.DUP;
+import static com.sleepycat.asm.Opcodes.FCMPL;
+import static com.sleepycat.asm.Opcodes.FCONST_0;
+import static com.sleepycat.asm.Opcodes.GETFIELD;
+import static com.sleepycat.asm.Opcodes.GOTO;
+import static com.sleepycat.asm.Opcodes.ICONST_0;
+import static com.sleepycat.asm.Opcodes.ICONST_1;
+import static com.sleepycat.asm.Opcodes.ICONST_2;
+import static com.sleepycat.asm.Opcodes.ICONST_3;
+import static com.sleepycat.asm.Opcodes.ICONST_4;
+import static com.sleepycat.asm.Opcodes.ICONST_5;
+import static com.sleepycat.asm.Opcodes.IFEQ;
+import static com.sleepycat.asm.Opcodes.IFGT;
+import static com.sleepycat.asm.Opcodes.IFLE;
+import static com.sleepycat.asm.Opcodes.IFNE;
+import static com.sleepycat.asm.Opcodes.IFNONNULL;
+import static com.sleepycat.asm.Opcodes.IF_ICMPNE;
+import static com.sleepycat.asm.Opcodes.ILOAD;
+import static com.sleepycat.asm.Opcodes.INVOKEINTERFACE;
+import static com.sleepycat.asm.Opcodes.INVOKESPECIAL;
+import static com.sleepycat.asm.Opcodes.INVOKESTATIC;
+import static com.sleepycat.asm.Opcodes.INVOKEVIRTUAL;
+import static com.sleepycat.asm.Opcodes.IRETURN;
+import static com.sleepycat.asm.Opcodes.ISUB;
+import static com.sleepycat.asm.Opcodes.LCMP;
+import static com.sleepycat.asm.Opcodes.LCONST_0;
+import static com.sleepycat.asm.Opcodes.NEW;
+import static com.sleepycat.asm.Opcodes.POP;
+import static com.sleepycat.asm.Opcodes.PUTFIELD;
+import static com.sleepycat.asm.Opcodes.RETURN;
+
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import com.sleepycat.asm.AnnotationVisitor;
+import com.sleepycat.asm.Attribute;
+import com.sleepycat.asm.ClassAdapter;
+import com.sleepycat.asm.ClassVisitor;
+import com.sleepycat.asm.FieldVisitor;
+import com.sleepycat.asm.Label;
+import com.sleepycat.asm.MethodVisitor;
+import com.sleepycat.asm.Type;
+
+/**
+ * An ASM ClassVisitor that examines a class, throws NotPersistentException if
+ * it is not persistent, or enhances it if it is persistent.  A class is
+ * persistent if it contains the @Entity or @Persistent annotations.  A
+ * resulting enhanced class implements the com.sleepycat.persist.impl.Enhanced
+ * interface.
+ *
+ * <p>NotPersistentException is thrown to abort the transformation in order to
+ * avoid making two passes over the class file (one to look for the annotations
+ * and another to enhance the bytecode) or outputing a class that isn't
+ * enhanced.  By aborting the transformation as soon as we detect that the
+ * annotations are missing, we make only one partial pass for a non-persistent
+ * class.</p>
+ *
+ * @author Mark Hayes
+ */
+class BytecodeEnhancer extends ClassAdapter {
+
+    /** Thrown when we determine that a class is not persistent. */
+    @SuppressWarnings("serial")
+    static class NotPersistentException extends RuntimeException {}
+
+    /** A static instance is used to avoid fillInStaceTrace overhead. */
+    private static final NotPersistentException NOT_PERSISTENT =
+        new NotPersistentException();
+
+    private static final Map<String,Integer> PRIMITIVE_WRAPPERS =
+        new HashMap<String,Integer>();
+    static {
+        PRIMITIVE_WRAPPERS.put(Boolean.class.getName(), Type.BOOLEAN);
+        PRIMITIVE_WRAPPERS.put(Character.class.getName(), Type.CHAR);
+        PRIMITIVE_WRAPPERS.put(Byte.class.getName(), Type.BYTE);
+        PRIMITIVE_WRAPPERS.put(Short.class.getName(), Type.SHORT);
+        PRIMITIVE_WRAPPERS.put(Integer.class.getName(), Type.INT);
+        PRIMITIVE_WRAPPERS.put(Long.class.getName(), Type.LONG);
+        PRIMITIVE_WRAPPERS.put(Float.class.getName(), Type.FLOAT);
+        PRIMITIVE_WRAPPERS.put(Double.class.getName(), Type.DOUBLE);
+    }
+
+    private String className;
+    private String superclassName;
+    private boolean isPersistent;
+    private boolean isAbstract;
+    private boolean hasDefaultConstructor;
+    private boolean hasPersistentSuperclass;
+    private boolean isCompositeKey;
+    private FieldInfo priKeyField;
+    private List<FieldInfo> secKeyFields;
+    private List<FieldInfo> nonKeyFields;
+    private String staticBlockMethod;
+
+    BytecodeEnhancer(ClassVisitor parentVisitor) {
+        super(parentVisitor);
+        secKeyFields = new ArrayList<FieldInfo>();
+        nonKeyFields = new ArrayList<FieldInfo>();
+    }
+
+    @Override
+    public void visit(int version,
+                      int access,
+                      String name,
+                      String sig,
+                      String superName,
+                      String[] interfaces) {
+        className = name;
+        superclassName = superName;
+        final String ENHANCED = "com/sleepycat/persist/impl/Enhanced";
+        if (containsString(interfaces, ENHANCED)) {
+            throw abort();
+        }
+        interfaces = appendString(interfaces, ENHANCED);
+        isAbstract = ((access & ACC_ABSTRACT) != 0);
+        hasPersistentSuperclass =
+            (superName != null && !superName.equals("java/lang/Object"));
+        super.visit(version, access, name, sig, superName, interfaces);
+    }
+
+    @Override
+    public void visitSource(String source, String debug) {
+        super.visitSource(source, debug);
+    }
+
+    @Override
+    public AnnotationVisitor visitAnnotation(String desc, boolean visible) {
+        if (desc.equals("Lcom/sleepycat/persist/model/Entity;") ||
+            desc.equals("Lcom/sleepycat/persist/model/Persistent;")) {
+            isPersistent = true;
+        }
+        return super.visitAnnotation(desc, visible);
+    }
+
+    @Override
+    public FieldVisitor visitField(int access,
+                                   String name,
+                                   String desc,
+                                   String sig,
+                                   Object value) {
+        if (!isPersistent) {
+            throw abort();
+        }
+        FieldVisitor ret = super.visitField(access, name, desc, sig, value);
+        if ((access & ACC_STATIC) == 0) {
+            FieldInfo info = new FieldInfo(ret, name, desc,
+                                           (access & ACC_TRANSIENT) != 0);
+            nonKeyFields.add(info);
+            ret = info;
+        }
+        return ret;
+    }
+
+    @Override
+    public MethodVisitor visitMethod(int access,
+                                     String name,
+                                     String desc,
+                                     String sig,
+                                     String[] exceptions) {
+        if (!isPersistent) {
+            throw abort();
+        }
+        if ("<init>".equals(name) && "()V".equals(desc)) {
+            hasDefaultConstructor = true;
+        }
+        if ("<clinit>".equals(name)) {
+            if (staticBlockMethod != null) {
+                throw new IllegalStateException();
+            }
+            staticBlockMethod = "bdbExistingStaticBlock";
+            return cv.visitMethod
+                (ACC_PRIVATE + ACC_STATIC, staticBlockMethod, "()V", null,
+                 null);
+        }
+        return super.visitMethod(access, name, desc, sig, exceptions);
+    }
+
+    @Override
+    public void visitEnd() {
+        if (!isPersistent || !hasDefaultConstructor) {
+            throw abort();
+        }
+        /* Generate new code at the end of the class. */
+        sortFields();
+        genBdbNewInstance();
+        genBdbNewArray();
+        genBdbIsPriKeyFieldNullOrZero();
+        genBdbWritePriKeyField();
+        genBdbReadPriKeyField();
+        genBdbWriteSecKeyFields();
+        genBdbReadSecKeyFields();
+        genBdbWriteNonKeyFields();
+        genBdbReadNonKeyFields();
+        genBdbGetField();
+        genBdbSetField();
+        genStaticBlock();
+        super.visitEnd();
+    }
+
+    private void sortFields() {
+        /*
+        System.out.println("AllFields: " + nonKeyFields);
+        //*/
+        if (nonKeyFields.size() == 0) {
+            return;
+        }
+        isCompositeKey = true;
+        for (FieldInfo field : nonKeyFields) {
+            if (field.order == null) {
+                isCompositeKey = false;
+            }
+        }
+        if (isCompositeKey) {
+            Collections.sort(nonKeyFields, new Comparator<FieldInfo>() {
+                public int compare(FieldInfo f1, FieldInfo f2) {
+                    return f1.order.value - f2.order.value;
+                }
+            });
+        } else {
+            for (int i = 0; i < nonKeyFields.size();) {
+                FieldInfo field = nonKeyFields.get(i);
+                if (field.isTransient) {
+                    nonKeyFields.remove(i);
+                } else if (field.isPriKey) {
+                    if (priKeyField == null) {
+                        priKeyField = field;
+                        nonKeyFields.remove(i);
+                    }
+                } else if (field.isSecKey) {
+                    secKeyFields.add(field);
+                    nonKeyFields.remove(i);
+                } else {
+                    i += 1;
+                }
+            }
+            Comparator<FieldInfo> cmp = new Comparator<FieldInfo>() {
+                public int compare(FieldInfo f1, FieldInfo f2) {
+                    return f1.name.compareTo(f2.name);
+                }
+            };
+            Collections.sort(secKeyFields, cmp);
+            Collections.sort(nonKeyFields, cmp);
+        }
+        /*
+        System.out.println("PriKey: " + priKeyField);
+        System.out.println("SecKeys: " + secKeyFields);
+        System.out.println("NonKeys: " + nonKeyFields);
+        //*/
+    }
+
+    /**
+     * Outputs code in a static block to register the prototype instance:
+     *
+     *  static {
+     *      EnhancedAccessor.registerClass(TheClassName, new TheClass());
+     *      // or for an abstract class:
+     *      EnhancedAccessor.registerClass(TheClassName, null);
+     *  }
+     */
+    private void genStaticBlock() {
+        MethodVisitor mv =
+            cv.visitMethod(ACC_STATIC, "<clinit>", "()V", null, null);
+        mv.visitCode();
+        if (staticBlockMethod != null) {
+            mv.visitMethodInsn
+                (INVOKESTATIC, className, staticBlockMethod, "()V");
+        }
+        mv.visitLdcInsn(className.replace('/', '.'));
+        if (isAbstract) {
+            mv.visitInsn(ACONST_NULL);
+        } else {
+            mv.visitTypeInsn(NEW, className);
+            mv.visitInsn(DUP);
+            mv.visitMethodInsn(INVOKESPECIAL, className, "<init>", "()V");
+        }
+        mv.visitMethodInsn
+            (INVOKESTATIC, "com/sleepycat/persist/impl/EnhancedAccessor",
+             "registerClass",
+             "(Ljava/lang/String;Lcom/sleepycat/persist/impl/Enhanced;)V");
+        mv.visitInsn(RETURN);
+        mv.visitMaxs(3, 0);
+        mv.visitEnd();
+    }
+
+    /**
+     *  public Object bdbNewInstance() {
+     *      return new TheClass();
+     *      // or if abstract:
+     *      return null;
+     *  }
+     */
+    private void genBdbNewInstance() {
+        MethodVisitor mv = cv.visitMethod
+            (ACC_PUBLIC, "bdbNewInstance", "()Ljava/lang/Object;", null, null);
+        mv.visitCode();
+        if (isAbstract) {
+            mv.visitInsn(ACONST_NULL);
+            mv.visitInsn(ARETURN);
+            mv.visitMaxs(1, 1);
+        } else {
+            mv.visitTypeInsn(NEW, className);
+            mv.visitInsn(DUP);
+            mv.visitMethodInsn(INVOKESPECIAL, className, "<init>", "()V");
+            mv.visitInsn(ARETURN);
+            mv.visitMaxs(2, 1);
+        }
+        mv.visitEnd();
+    }
+
+    /**
+     *  public Object bdbNewArray(int len) {
+     *      return new TheClass[len];
+     *      // or if abstract:
+     *      return null;
+     *  }
+     */
+    private void genBdbNewArray() {
+        MethodVisitor mv = cv.visitMethod
+            (ACC_PUBLIC, "bdbNewArray", "(I)Ljava/lang/Object;", null, null);
+        mv.visitCode();
+        if (isAbstract) {
+            mv.visitInsn(ACONST_NULL);
+            mv.visitInsn(ARETURN);
+            mv.visitMaxs(1, 2);
+        } else {
+            mv.visitVarInsn(ILOAD, 1);
+            mv.visitTypeInsn(ANEWARRAY, className);
+            mv.visitInsn(ARETURN);
+            mv.visitMaxs(1, 2);
+            mv.visitEnd();
+        }
+    }
+
+    /**
+     *  public boolean bdbIsPriKeyFieldNullOrZero() {
+     *      return theField == null; // or zero or false, as appropriate
+     *      // or if no primary key but has superclass:
+     *      return super.bdbIsPriKeyFieldNullOrZero();
+     *  }
+     */
+    private void genBdbIsPriKeyFieldNullOrZero() {
+        MethodVisitor mv = cv.visitMethod
+            (ACC_PUBLIC, "bdbIsPriKeyFieldNullOrZero", "()Z", null, null);
+        mv.visitCode();
+        if (priKeyField != null) {
+            mv.visitVarInsn(ALOAD, 0);
+            mv.visitFieldInsn
+                (GETFIELD, className, priKeyField.name,
+                 priKeyField.type.getDescriptor());
+            Label l0 = new Label();
+            if (isRefType(priKeyField.type)) {
+                mv.visitJumpInsn(IFNONNULL, l0);
+            } else {
+                genBeforeCompareToZero(mv, priKeyField.type);
+                mv.visitJumpInsn(IFNE, l0);
+            }
+            mv.visitInsn(ICONST_1);
+            Label l1 = new Label();
+            mv.visitJumpInsn(GOTO, l1);
+            mv.visitLabel(l0);
+            mv.visitInsn(ICONST_0);
+            mv.visitLabel(l1);
+        } else if (hasPersistentSuperclass) {
+            mv.visitVarInsn(ALOAD, 0);
+            mv.visitMethodInsn
+                (INVOKESPECIAL, superclassName, "bdbIsPriKeyFieldNullOrZero",
+                 "()Z");
+        } else {
+            mv.visitInsn(ICONST_0);
+        }
+        mv.visitInsn(IRETURN);
+        mv.visitMaxs(1, 1);
+        mv.visitEnd();
+    }
+
+    /**
+     *  public void bdbWritePriKeyField(EntityOutput output, Format format) {
+     *      output.writeKeyObject(theField, format);
+     *      // or
+     *      output.writeInt(theField); // and other simple types
+     *      // or if no primary key but has superclass:
+     *      return super.bdbWritePriKeyField(output, format);
+     *  }
+     */
+    private void genBdbWritePriKeyField() {
+        MethodVisitor mv = cv.visitMethod
+            (ACC_PUBLIC, "bdbWritePriKeyField",
+             "(Lcom/sleepycat/persist/impl/EntityOutput;" +
+              "Lcom/sleepycat/persist/impl/Format;)V",
+             null, null);
+        mv.visitCode();
+        if (priKeyField != null) {
+            if (!genWriteSimpleKeyField(mv, priKeyField)) {
+                /* For a non-simple type, call EntityOutput.writeKeyObject. */
+                mv.visitVarInsn(ALOAD, 1);
+                mv.visitVarInsn(ALOAD, 0);
+                mv.visitFieldInsn
+                    (GETFIELD, className, priKeyField.name,
+                     priKeyField.type.getDescriptor());
+                mv.visitVarInsn(ALOAD, 2);
+                mv.visitMethodInsn
+                    (INVOKEINTERFACE,
+                     "com/sleepycat/persist/impl/EntityOutput",
+                     "writeKeyObject",
+                     "(Ljava/lang/Object;" +
+                      "Lcom/sleepycat/persist/impl/Format;)V");
+            }
+        } else if (hasPersistentSuperclass) {
+            mv.visitVarInsn(ALOAD, 0);
+            mv.visitVarInsn(ALOAD, 1);
+            mv.visitVarInsn(ALOAD, 2);
+            mv.visitMethodInsn
+                (INVOKESPECIAL, superclassName, "bdbWritePriKeyField",
+                 "(Lcom/sleepycat/persist/impl/EntityOutput;" +
+                  "Lcom/sleepycat/persist/impl/Format;)V");
+        }
+        mv.visitInsn(RETURN);
+        mv.visitMaxs(3, 3);
+        mv.visitEnd();
+    }
+
+    /**
+     *  public void bdbReadPriKeyField(EntityInput input, Format format) {
+     *      theField = (TheFieldClass) input.readKeyObject(format);
+     *      // or
+     *      theField = input.readInt(); // and other simple types
+     *      // or if no primary key but has superclass:
+     *      super.bdbReadPriKeyField(input, format);
+     *  }
+     */
+    private void genBdbReadPriKeyField() {
+        MethodVisitor mv = cv.visitMethod
+            (ACC_PUBLIC, "bdbReadPriKeyField",
+             "(Lcom/sleepycat/persist/impl/EntityInput;" +
+              "Lcom/sleepycat/persist/impl/Format;)V",
+             null, null);
+        mv.visitCode();
+        if (priKeyField != null) {
+            if (!genReadSimpleKeyField(mv, priKeyField)) {
+                /* For a non-simple type, call EntityInput.readKeyObject. */
+                mv.visitVarInsn(ALOAD, 0);
+                mv.visitVarInsn(ALOAD, 1);
+                mv.visitVarInsn(ALOAD, 2);
+                mv.visitMethodInsn
+                    (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput",
+                     "readKeyObject",
+                     "(Lcom/sleepycat/persist/impl/Format;)" +
+                     "Ljava/lang/Object;");
+                mv.visitTypeInsn(CHECKCAST, getTypeInstName(priKeyField.type));
+                mv.visitFieldInsn
+                    (PUTFIELD, className, priKeyField.name,
+                     priKeyField.type.getDescriptor());
+            }
+        } else if (hasPersistentSuperclass) {
+            mv.visitVarInsn(ALOAD, 0);
+            mv.visitVarInsn(ALOAD, 1);
+            mv.visitVarInsn(ALOAD, 2);
+            mv.visitMethodInsn
+                (INVOKESPECIAL, superclassName, "bdbReadPriKeyField",
+                 "(Lcom/sleepycat/persist/impl/EntityInput;" +
+                  "Lcom/sleepycat/persist/impl/Format;)V");
+        }
+        mv.visitInsn(RETURN);
+        mv.visitMaxs(3, 3);
+        mv.visitEnd();
+    }
+
+    /**
+     *  public void bdbWriteSecKeyFields(EntityOutput output) {
+     *      output.registerPriKeyObject(priKeyField); // if an object
+     *      super.bdbWriteSecKeyFields(EntityOutput output); // if has super
+     *      output.writeInt(secKeyField1);
+     *      output.writeObject(secKeyField2, null);
+     *      // etc
+     *  }
+     */
+    private void genBdbWriteSecKeyFields() {
+        MethodVisitor mv = cv.visitMethod
+            (ACC_PUBLIC, "bdbWriteSecKeyFields",
+             "(Lcom/sleepycat/persist/impl/EntityOutput;)V", null, null);
+        mv.visitCode();
+        if (priKeyField != null && isRefType(priKeyField.type)) {
+            genRegisterPrimaryKey(mv, false);
+        }
+        if (hasPersistentSuperclass) {
+            mv.visitVarInsn(ALOAD, 0);
+            mv.visitVarInsn(ALOAD, 1);
+            mv.visitMethodInsn
+                (INVOKESPECIAL, superclassName, "bdbWriteSecKeyFields",
+                 "(Lcom/sleepycat/persist/impl/EntityOutput;)V");
+        }
+        for (FieldInfo field : secKeyFields) {
+            genWriteField(mv, field);
+        }
+        mv.visitInsn(RETURN);
+        mv.visitMaxs(2, 2);
+        mv.visitEnd();
+    }
+
+    /**
+     *  public void bdbReadSecKeyFields(EntityInput input,
+     *                                  int startField,
+     *                                  int endField,
+     *                                  int superLevel) {
+     *      input.registerPriKeyObject(priKeyField); // if an object
+     *      // if has super:
+     *      if (superLevel != 0) {
+     *          super.bdbReadSecKeyFields(..., superLevel - 1);
+     *      }
+     *      if (superLevel <= 0) {
+     *          switch (startField) {
+     *          case 0:
+     *              secKeyField1 = input.readInt();
+     *              if (endField == 0) break;
+     *          case 1:
+     *              secKeyField2 = (String) input.readObject();
+     *              if (endField == 1) break;
+     *          case 2:
+     *              secKeyField3 = input.readInt();
+     *          }
+     *      }
+     *  }
+     */
+    private void genBdbReadSecKeyFields() {
+        MethodVisitor mv = cv.visitMethod
+            (ACC_PUBLIC, "bdbReadSecKeyFields",
+             "(Lcom/sleepycat/persist/impl/EntityInput;III)V", null, null);
+        mv.visitCode();
+        if (priKeyField != null && isRefType(priKeyField.type)) {
+            genRegisterPrimaryKey(mv, true);
+        }
+        genReadSuperKeyFields(mv, true);
+        genReadFieldSwitch(mv, secKeyFields);
+        mv.visitInsn(RETURN);
+        mv.visitMaxs(5, 5);
+        mv.visitEnd();
+    }
+
+    /**
+     *      output.registerPriKeyObject(priKeyField);
+     *      // or
+     *      input.registerPriKeyObject(priKeyField);
+     */
+    private void genRegisterPrimaryKey(MethodVisitor mv, boolean input) {
+        String entityInputOrOutputClass =
+            input ? "com/sleepycat/persist/impl/EntityInput"
+                  : "com/sleepycat/persist/impl/EntityOutput";
+        mv.visitVarInsn(ALOAD, 1);
+        mv.visitVarInsn(ALOAD, 0);
+        mv.visitFieldInsn
+            (GETFIELD, className, priKeyField.name,
+             priKeyField.type.getDescriptor());
+        mv.visitMethodInsn
+            (INVOKEINTERFACE, entityInputOrOutputClass, "registerPriKeyObject",
+             "(Ljava/lang/Object;)V");
+    }
+
+    /**
+     *  public void bdbWriteNonKeyFields(EntityOutput output) {
+     *      super.bdbWriteNonKeyFields(output); // if has super
+     *      output.writeInt(nonKeyField1);
+     *      output.writeObject(nonKeyField2, null);
+     *      // etc
+     *  }
+     */
+    private void genBdbWriteNonKeyFields() {
+        MethodVisitor mv = cv.visitMethod
+            (ACC_PUBLIC, "bdbWriteNonKeyFields",
+             "(Lcom/sleepycat/persist/impl/EntityOutput;)V", null, null);
+        mv.visitCode();
+        if (hasPersistentSuperclass) {
+            mv.visitVarInsn(ALOAD, 0);
+            mv.visitVarInsn(ALOAD, 1);
+            mv.visitMethodInsn
+                (INVOKESPECIAL, superclassName, "bdbWriteNonKeyFields",
+                 "(Lcom/sleepycat/persist/impl/EntityOutput;)V");
+        }
+        if (isCompositeKey) {
+            for (FieldInfo field : nonKeyFields) {
+                genWriteSimpleKeyField(mv, field);
+                /* Ignore non-simple (illegal) types for composite key. */
+            }
+        } else {
+            for (FieldInfo field : nonKeyFields) {
+                genWriteField(mv, field);
+            }
+        }
+        mv.visitInsn(RETURN);
+        mv.visitMaxs(2, 2);
+        mv.visitEnd();
+    }
+
+    /**
+     *  public void bdbReadNonKeyFields(EntityInput input,
+     *                                  int startField,
+     *                                  int endField,
+     *                                  int superLevel) {
+     *      // if has super:
+     *      if (superLevel != 0) {
+     *          super.bdbReadNonKeyFields(..., superLevel - 1);
+     *      }
+     *      nonKeyField1 = input.readInt();
+     *      nonKeyField2 = (String) input.readObject();
+     *      // etc
+     *      // or like bdbReadSecKeyFields if not a composite key class
+     *  }
+     */
+    private void genBdbReadNonKeyFields() {
+        MethodVisitor mv = cv.visitMethod
+            (ACC_PUBLIC, "bdbReadNonKeyFields",
+             "(Lcom/sleepycat/persist/impl/EntityInput;III)V", null, null);
+        mv.visitCode();
+        if (isCompositeKey) {
+            for (FieldInfo field : nonKeyFields) {
+                genReadSimpleKeyField(mv, field);
+                /* Ignore non-simple (illegal) types for composite key. */
+            }
+        } else {
+            genReadSuperKeyFields(mv, false);
+            genReadFieldSwitch(mv, nonKeyFields);
+        }
+        mv.visitInsn(RETURN);
+        mv.visitMaxs(5, 5);
+        mv.visitEnd();
+    }
+
+    /**
+     *      output.writeInt(field); // and other primitives
+     *      // or
+     *      output.writeObject(field, null);
+     */
+    private void genWriteField(MethodVisitor mv, FieldInfo field) {
+        mv.visitVarInsn(ALOAD, 1);
+        mv.visitVarInsn(ALOAD, 0);
+        mv.visitFieldInsn
+            (GETFIELD, className, field.name, field.type.getDescriptor());
+        int sort = field.type.getSort();
+        if (sort == Type.OBJECT || sort == Type.ARRAY) {
+            mv.visitInsn(ACONST_NULL);
+            mv.visitMethodInsn
+                (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput",
+                 "writeObject",
+                 "(Ljava/lang/Object;Lcom/sleepycat/persist/impl/Format;)V");
+        } else {
+            genWritePrimitive(mv, sort);
+        }
+    }
+
+    /**
+     * Generates writing of a simple type key field, or returns false if the
+     * key field is not a simple type (i.e., it is a composite key type).
+     *
+     *      output.writeInt(theField); // and other primitives
+     *      // or
+     *      output.writeInt(theField.intValue()); // and other simple types
+     *      // or returns false
+     */
+    private boolean genWriteSimpleKeyField(MethodVisitor mv, FieldInfo field) {
+        if (genWritePrimitiveField(mv, field)) {
+            return true;
+        }
+        String fieldClassName = field.type.getClassName();
+        if (!isSimpleRefType(fieldClassName)) {
+            return false;
+        }
+        mv.visitVarInsn(ALOAD, 1);
+        mv.visitVarInsn(ALOAD, 0);
+        mv.visitFieldInsn
+            (GETFIELD, className, field.name, field.type.getDescriptor());
+        Integer sort = PRIMITIVE_WRAPPERS.get(fieldClassName);
+        if (sort != null) {
+            genUnwrapPrimitive(mv, sort);
+            genWritePrimitive(mv, sort);
+        } else if (fieldClassName.equals(Date.class.getName())) {
+            mv.visitMethodInsn
+                (INVOKEVIRTUAL, "java/util/Date", "getTime", "()J");
+            genWritePrimitive(mv, Type.LONG);
+        } else if (fieldClassName.equals(String.class.getName())) {
+            mv.visitMethodInsn
+                (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput",
+                 "writeString",
+                 "(Ljava/lang/String;)Lcom/sleepycat/bind/tuple/TupleOutput;");
+            mv.visitInsn(POP);
+        } else if (fieldClassName.equals(BigInteger.class.getName())) {
+            mv.visitMethodInsn
+                (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput",
+                 "writeBigInteger",
+             "(Ljava/math/BigInteger;)Lcom/sleepycat/bind/tuple/TupleOutput;");
+            mv.visitInsn(POP);
+        } else {
+            throw new IllegalStateException(fieldClassName);
+        }
+        return true;
+    }
+
+    private boolean genWritePrimitiveField(MethodVisitor mv, FieldInfo field) {
+        int sort = field.type.getSort();
+        if (sort == Type.OBJECT || sort == Type.ARRAY) {
+            return false;
+        }
+        mv.visitVarInsn(ALOAD, 1);
+        mv.visitVarInsn(ALOAD, 0);
+        mv.visitFieldInsn
+            (GETFIELD, className, field.name, field.type.getDescriptor());
+        genWritePrimitive(mv, sort);
+        return true;
+    }
+
+    /**
+     *      // if has super:
+     *      if (superLevel != 0) {
+     *          super.bdbReadXxxKeyFields(..., superLevel - 1);
+     *      }
+     */
+    private void genReadSuperKeyFields(MethodVisitor mv,
+                                       boolean areSecKeyFields) {
+        if (hasPersistentSuperclass) {
+            Label next = new Label();
+            mv.visitVarInsn(ILOAD, 4);
+            mv.visitJumpInsn(IFEQ, next);
+            mv.visitVarInsn(ALOAD, 0);
+            mv.visitVarInsn(ALOAD, 1);
+            mv.visitVarInsn(ILOAD, 2);
+            mv.visitVarInsn(ILOAD, 3);
+            mv.visitVarInsn(ILOAD, 4);
+            mv.visitInsn(ICONST_1);
+            mv.visitInsn(ISUB);
+            String name = areSecKeyFields ? "bdbReadSecKeyFields"
+                                          : "bdbReadNonKeyFields";
+            mv.visitMethodInsn
+                (INVOKESPECIAL, superclassName, name,
+                 "(Lcom/sleepycat/persist/impl/EntityInput;III)V");
+            mv.visitLabel(next);
+        }
+    }
+
+    /**
+     *  public void bdbReadXxxKeyFields(EntityInput input,
+     *                                  int startField,
+     *                                  int endField,
+     *                                  int superLevel) {
+     *      // ...
+     *      if (superLevel <= 0) {
+     *          switch (startField) {
+     *          case 0:
+     *              keyField1 = input.readInt();
+     *              if (endField == 0) break;
+     *          case 1:
+     *              keyField2 = (String) input.readObject();
+     *              if (endField == 1) break;
+     *          case 2:
+     *              keyField3 = input.readInt();
+     *          }
+     *      }
+     */
+    private void genReadFieldSwitch(MethodVisitor mv, List<FieldInfo> fields) {
+        int nFields = fields.size();
+        if (nFields > 0) {
+            mv.visitVarInsn(ILOAD, 4);
+            Label pastSwitch = new Label();
+            mv.visitJumpInsn(IFGT, pastSwitch);
+            Label[] labels = new Label[nFields];
+            for (int i = 0; i < nFields; i += 1) {
+                labels[i] = new Label();
+            }
+            mv.visitVarInsn(ILOAD, 2);
+            mv.visitTableSwitchInsn(0, nFields - 1, pastSwitch, labels);
+            for (int i = 0; i < nFields; i += 1) {
+                FieldInfo field = fields.get(i);
+                mv.visitLabel(labels[i]);
+                genReadField(mv, field);
+                if (i < nFields - 1) {
+                    Label nextCase = labels[i + 1];
+                    mv.visitVarInsn(ILOAD, 3);
+                    if (i == 0) {
+                        mv.visitJumpInsn(IFNE, nextCase);
+                    } else {
+                        switch (i) {
+                        case 1:
+                            mv.visitInsn(ICONST_1);
+                            break;
+                        case 2:
+                            mv.visitInsn(ICONST_2);
+                            break;
+                        case 3:
+                            mv.visitInsn(ICONST_3);
+                            break;
+                        case 4:
+                            mv.visitInsn(ICONST_4);
+                            break;
+                        case 5:
+                            mv.visitInsn(ICONST_5);
+                            break;
+                        default:
+                            mv.visitIntInsn(BIPUSH, i);
+                        }
+                        mv.visitJumpInsn(IF_ICMPNE, nextCase);
+                    }
+                    mv.visitJumpInsn(GOTO, pastSwitch);
+                }
+            }
+            mv.visitLabel(pastSwitch);
+        }
+    }
+
+    /**
+     *      field = input.readInt(); // and other primitives
+     *      // or
+     *      field = (FieldClass) input.readObject();
+     */
+    private void genReadField(MethodVisitor mv, FieldInfo field) {
+        mv.visitVarInsn(ALOAD, 0);
+        mv.visitVarInsn(ALOAD, 1);
+        if (isRefType(field.type)) {
+            mv.visitMethodInsn
+                (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput",
+                 "readObject", "()Ljava/lang/Object;");
+            mv.visitTypeInsn(CHECKCAST, getTypeInstName(field.type));
+        } else {
+            genReadPrimitive(mv, field.type.getSort());
+        }
+        mv.visitFieldInsn
+            (PUTFIELD, className, field.name, field.type.getDescriptor());
+    }
+
+    /**
+     * Generates reading of a simple type key field, or returns false if the
+     * key field is not a simple type (i.e., it is a composite key type).
+     *
+     *      field = input.readInt(); // and other primitives
+     *      // or
+     *      field = Integer.valueOf(input.readInt()); // and other simple types
+     *      // or returns false
+     */
+    private boolean genReadSimpleKeyField(MethodVisitor mv, FieldInfo field) {
+        if (genReadPrimitiveField(mv, field)) {
+            return true;
+        }
+        String fieldClassName = field.type.getClassName();
+        if (!isSimpleRefType(fieldClassName)) {
+            return false;
+        }
+        Integer sort = PRIMITIVE_WRAPPERS.get(fieldClassName);
+        if (sort != null) {
+            mv.visitVarInsn(ALOAD, 0);
+            mv.visitVarInsn(ALOAD, 1);
+            genReadPrimitive(mv, sort);
+            genWrapPrimitive(mv, sort);
+        } else if (fieldClassName.equals(Date.class.getName())) {
+            /* Date is a special case because we use NEW instead of valueOf. */
+            mv.visitVarInsn(ALOAD, 0);
+            mv.visitTypeInsn(NEW, "java/util/Date");
+            mv.visitInsn(DUP);
+            mv.visitVarInsn(ALOAD, 1);
+            genReadPrimitive(mv, Type.LONG);
+            mv.visitMethodInsn
+                (INVOKESPECIAL, "java/util/Date", "<init>", "(J)V");
+        } else if (fieldClassName.equals(String.class.getName())) {
+            mv.visitVarInsn(ALOAD, 0);
+            mv.visitVarInsn(ALOAD, 1);
+            mv.visitMethodInsn
+                (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput",
+                 "readString", "()Ljava/lang/String;");
+        } else if (fieldClassName.equals(BigInteger.class.getName())) {
+            mv.visitVarInsn(ALOAD, 0);
+            mv.visitVarInsn(ALOAD, 1);
+            mv.visitMethodInsn
+                (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput",
+                 "readBigInteger", "()Ljava/math/BigInteger;");
+        } else {
+            throw new IllegalStateException(fieldClassName);
+        }
+        mv.visitFieldInsn
+            (PUTFIELD, className, field.name, field.type.getDescriptor());
+        return true;
+    }
+
+    private boolean genReadPrimitiveField(MethodVisitor mv, FieldInfo field) {
+        int sort = field.type.getSort();
+        if (sort == Type.OBJECT || sort == Type.ARRAY) {
+            return false;
+        }
+        mv.visitVarInsn(ALOAD, 0);
+        mv.visitVarInsn(ALOAD, 1);
+        genReadPrimitive(mv, sort);
+        mv.visitFieldInsn
+            (PUTFIELD, className, field.name, field.type.getDescriptor());
+        return true;
+    }
+
+    /**
+     *  public Object bdbGetField(Object o,
+     *                            int field,
+     *                            int superLevel,
+     *                            boolean isSecField) {
+     *      if (superLevel > 0) {
+     *          // if has superclass:
+     *          return super.bdbGetField
+     *              (o, field, superLevel - 1, isSecField);
+     *      } else if (isSecField) {
+     *          switch (field) {
+     *          case 0:
+     *              return Integer.valueOf(f2);
+     *          case 1:
+     *              return f3;
+     *          case 2:
+     *              return f4;
+     *          }
+     *      } else {
+     *          switch (field) {
+     *          case 0:
+     *              return Integer.valueOf(f5);
+     *          case 1:
+     *              return f6;
+     *          case 2:
+     *              return f7;
+     *          }
+     *      }
+     *      return null;
+     *  }
+     */
+    private void genBdbGetField() {
+        MethodVisitor mv = cv.visitMethod
+            (ACC_PUBLIC, "bdbGetField",
+             "(Ljava/lang/Object;IIZ)Ljava/lang/Object;", null, null);
+        mv.visitCode();
+        mv.visitVarInsn(ILOAD, 3);
+        Label l0 = new Label();
+        mv.visitJumpInsn(IFLE, l0);
+        Label l1 = new Label();
+        if (hasPersistentSuperclass) {
+            mv.visitVarInsn(ALOAD, 0);
+            mv.visitVarInsn(ALOAD, 1);
+            mv.visitVarInsn(ILOAD, 2);
+            mv.visitVarInsn(ILOAD, 3);
+            mv.visitInsn(ICONST_1);
+            mv.visitInsn(ISUB);
+            mv.visitVarInsn(ILOAD, 4);
+            mv.visitMethodInsn
+                (INVOKESPECIAL, className, "bdbGetField",
+                 "(Ljava/lang/Object;IIZ)Ljava/lang/Object;");
+            mv.visitInsn(ARETURN);
+        } else {
+            mv.visitJumpInsn(GOTO, l1);
+        }
+        mv.visitLabel(l0);
+        mv.visitVarInsn(ILOAD, 4);
+        Label l2 = new Label();
+        mv.visitJumpInsn(IFEQ, l2);
+        genGetFieldSwitch(mv, secKeyFields, l1);
+        mv.visitLabel(l2);
+        genGetFieldSwitch(mv, nonKeyFields, l1);
+        mv.visitLabel(l1);
+        mv.visitInsn(ACONST_NULL);
+        mv.visitInsn(ARETURN);
+        mv.visitMaxs(1, 5);
+        mv.visitEnd();
+    }
+
+    /**
+     *  mv.visitVarInsn(ILOAD, 2);
+     *  Label l0 = new Label();
+     *  Label l1 = new Label();
+     *  Label l2 = new Label();
+     *  mv.visitTableSwitchInsn(0, 2, TheDefLabel, new Label[] { l0, l1, l2 });
+     *  mv.visitLabel(l0);
+     *  mv.visitVarInsn(ALOAD, 0);
+     *  mv.visitFieldInsn(GETFIELD, TheClassName, "f2", "I");
+     *  mv.visitMethodInsn(INVOKESTATIC, "java/lang/Integer", "valueOf",
+     *                     "(I)Ljava/lang/Integer;");
+     *  mv.visitInsn(ARETURN);
+     *  mv.visitLabel(l1);
+     *  mv.visitVarInsn(ALOAD, 0);
+     *  mv.visitFieldInsn(GETFIELD, TheClassName, "f3", "Ljava/lang/String;");
+     *  mv.visitInsn(ARETURN);
+     *  mv.visitLabel(l2);
+     *  mv.visitVarInsn(ALOAD, 0);
+     *  mv.visitFieldInsn(GETFIELD, TheClassName, "f4", "Ljava/lang/String;");
+     *  mv.visitInsn(ARETURN);
+     */
+    private void genGetFieldSwitch(MethodVisitor mv,
+                                   List<FieldInfo> fields,
+                                   Label defaultLabel) {
+        int nFields = fields.size();
+        if (nFields == 0) {
+            mv.visitJumpInsn(GOTO, defaultLabel);
+            return;
+        }
+        Label[] labels = new Label[nFields];
+        for (int i = 0; i < nFields; i += 1) {
+            labels[i] = new Label();
+        }
+        mv.visitVarInsn(ILOAD, 2);
+        mv.visitTableSwitchInsn(0, nFields - 1, defaultLabel, labels);
+        for (int i = 0; i < nFields; i += 1) {
+            FieldInfo field = fields.get(i);
+            mv.visitLabel(labels[i]);
+            mv.visitVarInsn(ALOAD, 0);
+            mv.visitFieldInsn
+                (GETFIELD, className, field.name, field.type.getDescriptor());
+            if (!isRefType(field.type)) {
+                genWrapPrimitive(mv, field.type.getSort());
+            }
+            mv.visitInsn(ARETURN);
+        }
+    }
+
+    /**
+     *  public void bdbSetField(Object o,
+     *                          int field,
+     *                          int superLevel,
+     *                          boolean isSecField,
+     *                          Object value) {
+     *      if (superLevel > 0) {
+     *          // if has superclass:
+     *          super.bdbSetField
+     *              (o, field, superLevel - 1, isSecField, value);
+     *      } else if (isSecField) {
+     *          switch (field) {
+     *          case 0:
+     *              f2 = ((Integer) value).intValue();
+     *          case 1:
+     *              f3 = (String) value;
+     *          case 2:
+     *              f4 = (String) value;
+     *          }
+     *      } else {
+     *          switch (field) {
+     *          case 0:
+     *              f5 = ((Integer) value).intValue();
+     *          case 1:
+     *              f6 = (String) value;
+     *          case 2:
+     *              f7 = (String) value;
+     *          }
+     *      }
+     *  }
+     */
+    private void genBdbSetField() {
+        MethodVisitor mv = cv.visitMethod
+            (ACC_PUBLIC, "bdbSetField",
+             "(Ljava/lang/Object;IIZLjava/lang/Object;)V", null, null);
+        mv.visitCode();
+        mv.visitVarInsn(ILOAD, 3);
+        Label l0 = new Label();
+        mv.visitJumpInsn(IFLE, l0);
+        if (hasPersistentSuperclass) {
+            mv.visitVarInsn(ALOAD, 0);
+            mv.visitVarInsn(ALOAD, 1);
+            mv.visitVarInsn(ILOAD, 2);
+            mv.visitVarInsn(ILOAD, 3);
+            mv.visitInsn(ICONST_1);
+            mv.visitInsn(ISUB);
+            mv.visitVarInsn(ILOAD, 4);
+            mv.visitVarInsn(ALOAD, 5);
+            mv.visitMethodInsn
+                (INVOKESPECIAL, className, "bdbSetField",
+                 "(Ljava/lang/Object;IIZLjava/lang/Object;)V");
+        }
+        mv.visitInsn(RETURN);
+        mv.visitLabel(l0);
+        mv.visitVarInsn(ILOAD, 4);
+        Label l2 = new Label();
+        mv.visitJumpInsn(IFEQ, l2);
+        Label l1 = new Label();
+        genSetFieldSwitch(mv, secKeyFields, l1);
+        mv.visitLabel(l2);
+        genSetFieldSwitch(mv, nonKeyFields, l1);
+        mv.visitLabel(l1);
+        mv.visitInsn(RETURN);
+        mv.visitMaxs(2, 6);
+        mv.visitEnd();
+    }
+
+    /**
+     *  mv.visitVarInsn(ILOAD, 2);
+     *  Label l0 = new Label();
+     *  Label l1 = new Label();
+     *  Label l2 = new Label();
+     *  mv.visitTableSwitchInsn(0, 2, TheDefLabel, new Label[] { l0, l1, l2 });
+     *  mv.visitLabel(l0);
+     *  mv.visitVarInsn(ALOAD, 0);
+     *  mv.visitVarInsn(ALOAD, 5);
+     *  mv.visitTypeInsn(CHECKCAST, "java/lang/Integer");
+     *  mv.visitMethodInsn(INVOKEVIRTUAL, "java/lang/Integer", "intValue",
+     *                     "()I");
+     *  mv.visitFieldInsn(PUTFIELD, TheClassName, "f2", "I");
+     *  mv.visitLabel(l1);
+     *  mv.visitVarInsn(ALOAD, 0);
+     *  mv.visitVarInsn(ALOAD, 5);
+     *  mv.visitTypeInsn(CHECKCAST, "java/lang/String");
+     *  mv.visitFieldInsn(PUTFIELD, TheClassName, "f3", "Ljava/lang/String;");
+     *  mv.visitLabel(l2);
+     *  mv.visitVarInsn(ALOAD, 0);
+     *  mv.visitVarInsn(ALOAD, 5);
+     *  mv.visitTypeInsn(CHECKCAST, "java/lang/String");
+     *  mv.visitFieldInsn(PUTFIELD, TheClassName, "f4", "Ljava/lang/String;");
+     */
+    private void genSetFieldSwitch(MethodVisitor mv,
+                                   List<FieldInfo> fields,
+                                   Label defaultLabel) {
+        int nFields = fields.size();
+        if (nFields == 0) {
+            mv.visitJumpInsn(GOTO, defaultLabel);
+            return;
+        }
+        Label[] labels = new Label[nFields];
+        for (int i = 0; i < nFields; i += 1) {
+            labels[i] = new Label();
+        }
+        mv.visitVarInsn(ILOAD, 2);
+        mv.visitTableSwitchInsn(0, nFields - 1, defaultLabel, labels);
+        for (int i = 0; i < nFields; i += 1) {
+            FieldInfo field = fields.get(i);
+            mv.visitLabel(labels[i]);
+            mv.visitVarInsn(ALOAD, 0);
+            mv.visitVarInsn(ALOAD, 5);
+            if (isRefType(field.type)) {
+                mv.visitTypeInsn(CHECKCAST, getTypeInstName(field.type));
+            } else {
+                int sort = field.type.getSort();
+                mv.visitTypeInsn
+                    (CHECKCAST,
+                     getPrimitiveWrapperClass(sort).replace('.', '/'));
+                genUnwrapPrimitive(mv, sort);
+            }
+            mv.visitFieldInsn
+                (PUTFIELD, className, field.name, field.type.getDescriptor());
+            mv.visitInsn(RETURN);
+        }
+    }
+
+    private void genWritePrimitive(MethodVisitor mv, int sort) {
+        switch (sort) {
+        case Type.BOOLEAN:
+            mv.visitMethodInsn
+                (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput",
+                 "writeBoolean", "(Z)Lcom/sleepycat/bind/tuple/TupleOutput;");
+            break;
+        case Type.CHAR:
+            mv.visitMethodInsn
+                (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput",
+                 "writeChar", "(I)Lcom/sleepycat/bind/tuple/TupleOutput;");
+            break;
+        case Type.BYTE:
+            mv.visitMethodInsn
+                (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput",
+                 "writeByte", "(I)Lcom/sleepycat/bind/tuple/TupleOutput;");
+            break;
+        case Type.SHORT:
+            mv.visitMethodInsn
+                (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput",
+                 "writeShort", "(I)Lcom/sleepycat/bind/tuple/TupleOutput;");
+            break;
+        case Type.INT:
+            mv.visitMethodInsn
+                (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput",
+                 "writeInt", "(I)Lcom/sleepycat/bind/tuple/TupleOutput;");
+            break;
+        case Type.LONG:
+            mv.visitMethodInsn
+                (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput",
+                 "writeLong", "(J)Lcom/sleepycat/bind/tuple/TupleOutput;");
+            break;
+        case Type.FLOAT:
+            mv.visitMethodInsn
+                (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput",
+                 "writeSortedFloat",
+                 "(F)Lcom/sleepycat/bind/tuple/TupleOutput;");
+            break;
+        case Type.DOUBLE:
+            mv.visitMethodInsn
+                (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput",
+                 "writeSortedDouble",
+                 "(D)Lcom/sleepycat/bind/tuple/TupleOutput;");
+            break;
+        default:
+            throw new IllegalStateException(String.valueOf(sort));
+        }
+        /* The write methods always return 'this' and we always discard it. */
+        mv.visitInsn(POP);
+    }
+
+    private void genReadPrimitive(MethodVisitor mv, int sort) {
+        switch (sort) {
+        case Type.BOOLEAN:
+            mv.visitMethodInsn
+                (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput",
+                 "readBoolean", "()Z");
+            break;
+        case Type.CHAR:
+            mv.visitMethodInsn
+                (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput",
+                 "readChar", "()C");
+            break;
+        case Type.BYTE:
+            mv.visitMethodInsn
+                (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput",
+                 "readByte", "()B");
+            break;
+        case Type.SHORT:
+            mv.visitMethodInsn
+                (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput",
+                 "readShort", "()S");
+            break;
+        case Type.INT:
+            mv.visitMethodInsn
+                (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput",
+                 "readInt", "()I");
+            break;
+        case Type.LONG:
+            mv.visitMethodInsn
+                (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput",
+                 "readLong", "()J");
+            break;
+        case Type.FLOAT:
+            mv.visitMethodInsn
+                (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput",
+                 "readSortedFloat", "()F");
+            break;
+        case Type.DOUBLE:
+            mv.visitMethodInsn
+                (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput",
+                 "readSortedDouble", "()D");
+            break;
+        default:
+            throw new IllegalStateException(String.valueOf(sort));
+        }
+    }
+
+    private void genWrapPrimitive(MethodVisitor mv, int sort) {
+        switch (sort) {
+        case Type.BOOLEAN:
+            mv.visitMethodInsn
+                (INVOKESTATIC, "java/lang/Boolean", "valueOf",
+                 "(Z)Ljava/lang/Boolean;");
+            break;
+        case Type.CHAR:
+            mv.visitMethodInsn
+                (INVOKESTATIC, "java/lang/Character", "valueOf",
+                 "(C)Ljava/lang/Character;");
+            break;
+        case Type.BYTE:
+            mv.visitMethodInsn
+                (INVOKESTATIC, "java/lang/Byte", "valueOf",
+                 "(B)Ljava/lang/Byte;");
+            break;
+        case Type.SHORT:
+            mv.visitMethodInsn
+                (INVOKESTATIC, "java/lang/Short", "valueOf",
+                 "(S)Ljava/lang/Short;");
+            break;
+        case Type.INT:
+            mv.visitMethodInsn
+                (INVOKESTATIC, "java/lang/Integer", "valueOf",
+                 "(I)Ljava/lang/Integer;");
+            break;
+        case Type.LONG:
+            mv.visitMethodInsn
+                (INVOKESTATIC, "java/lang/Long", "valueOf",
+                 "(J)Ljava/lang/Long;");
+            break;
+        case Type.FLOAT:
+            mv.visitMethodInsn
+                (INVOKESTATIC, "java/lang/Float", "valueOf",
+                 "(F)Ljava/lang/Float;");
+            break;
+        case Type.DOUBLE:
+            mv.visitMethodInsn
+                (INVOKESTATIC, "java/lang/Double", "valueOf",
+                 "(D)Ljava/lang/Double;");
+            break;
+        default:
+            throw new IllegalStateException(String.valueOf(sort));
+        }
+    }
+
+    private void genUnwrapPrimitive(MethodVisitor mv, int sort) {
+        switch (sort) {
+        case Type.BOOLEAN:
+            mv.visitMethodInsn
+                (INVOKEVIRTUAL, "java/lang/Boolean", "booleanValue", "()Z");
+            break;
+        case Type.CHAR:
+            mv.visitMethodInsn
+                (INVOKEVIRTUAL, "java/lang/Character", "charValue", "()C");
+            break;
+        case Type.BYTE:
+            mv.visitMethodInsn
+                (INVOKEVIRTUAL, "java/lang/Byte", "byteValue", "()B");
+            break;
+        case Type.SHORT:
+            mv.visitMethodInsn
+                (INVOKEVIRTUAL, "java/lang/Short", "shortValue", "()S");
+            break;
+        case Type.INT:
+            mv.visitMethodInsn
+                (INVOKEVIRTUAL, "java/lang/Integer", "intValue", "()I");
+            break;
+        case Type.LONG:
+            mv.visitMethodInsn
+                (INVOKEVIRTUAL, "java/lang/Long", "longValue", "()J");
+            break;
+        case Type.FLOAT:
+            mv.visitMethodInsn
+                (INVOKEVIRTUAL, "java/lang/Float", "floatValue", "()F");
+            break;
+        case Type.DOUBLE:
+            mv.visitMethodInsn
+                (INVOKEVIRTUAL, "java/lang/Double", "doubleValue", "()D");
+            break;
+        default:
+            throw new IllegalStateException(String.valueOf(sort));
+        }
+    }
+
+    /**
+     * Returns the type name for a visitTypeInsn operand, which is the internal
+     * name for an object type and the descriptor for an array type.  Must not
+     * be called for a non-reference type.
+     */
+    private static String getTypeInstName(Type type) {
+        if (type.getSort() == Type.OBJECT) {
+            return type.getInternalName();
+        } else if (type.getSort() == Type.ARRAY) {
+            return type.getDescriptor();
+        } else {
+            throw new IllegalStateException();
+        }
+    }
+
+    /**
+     * Call this method before comparing a non-reference operand to zero as an
+     * int, for example, with IFNE, IFEQ, IFLT, etc.  If the operand is a long,
+     * float or double, this method will compare it to zero and leave the
+     * result as an int operand.
+     */
+    private static void genBeforeCompareToZero(MethodVisitor mv, Type type) {
+        switch (type.getSort()) {
+        case Type.LONG:
+            mv.visitInsn(LCONST_0);
+            mv.visitInsn(LCMP);
+            break;
+        case Type.FLOAT:
+            mv.visitInsn(FCONST_0);
+            mv.visitInsn(FCMPL);
+            break;
+        case Type.DOUBLE:
+            mv.visitInsn(DCONST_0);
+            mv.visitInsn(DCMPL);
+            break;
+        }
+    }
+
+    /**
+     * Returns true if the given class is a primitive wrapper, Date or String.
+     */
+    static boolean isSimpleRefType(String className) {
+        return (PRIMITIVE_WRAPPERS.containsKey(className) ||
+                className.equals(BigInteger.class.getName()) ||
+                className.equals(Date.class.getName()) ||
+                className.equals(String.class.getName()));
+    }
+
+    /**
+     * Returns the wrapper class for a primitive.
+     */
+    static String getPrimitiveWrapperClass(int primitiveSort) {
+        for (Map.Entry<String,Integer> entry : PRIMITIVE_WRAPPERS.entrySet()) {
+            if (entry.getValue() == primitiveSort) {
+                return entry.getKey();
+            }
+        }
+        throw new IllegalStateException(String.valueOf(primitiveSort));
+    }
+
+    /**
+     * Returns true if the given type is an object or array.
+     */
+    private static boolean isRefType(Type type) {
+        int sort = type.getSort();
+        return (sort == Type.OBJECT || sort == Type.ARRAY);
+    }
+
+    /**
+     * Returns whether a string array contains a given string.
+     */
+    private static boolean containsString(String[] a, String s) {
+        if (a != null) {
+            for (String t : a) {
+                if (s.equals(t)) {
+                    return true;
+                }
+            }
+        }
+        return false;
+    }
+
+    /**
+     * Appends a string to a string array.
+     */
+    private static String[] appendString(String[] a, String s) {
+        if (a != null) {
+            int len = a.length;
+            String[] a2 = new String[len + 1];
+            System.arraycopy(a, 0, a2, 0, len);
+            a2[len] = s;
+            return a2;
+        } else {
+            return new String[] { s };
+        }
+    }
+
+    /**
+     * Aborts the enhancement process when we determine that enhancement is
+     * unnecessary or not possible.
+     */
+    private NotPersistentException abort() {
+        return NOT_PERSISTENT;
+    }
+
+    private static class FieldInfo implements FieldVisitor {
+
+        FieldVisitor parent;
+        String name;
+        Type type;
+        OrderInfo order;
+        boolean isPriKey;
+        boolean isSecKey;
+        boolean isTransient;
+
+        FieldInfo(FieldVisitor parent,
+                  String name,
+                  String desc,
+                  boolean isTransient) {
+            this.parent = parent;
+            this.name = name;
+            this.isTransient = isTransient;
+            type = Type.getType(desc);
+        }
+
+        public AnnotationVisitor visitAnnotation(String desc,
+                                                 boolean visible) {
+            AnnotationVisitor ret = parent.visitAnnotation(desc, visible);
+            if (desc.equals
+                    ("Lcom/sleepycat/persist/model/KeyField;")) {
+                order = new OrderInfo(ret);
+                ret = order;
+            } else if (desc.equals
+                    ("Lcom/sleepycat/persist/model/PrimaryKey;")) {
+                isPriKey = true;
+            } else if (desc.equals
+                    ("Lcom/sleepycat/persist/model/SecondaryKey;")) {
+                isSecKey = true;
+            } else if (desc.equals
+                    ("Lcom/sleepycat/persist/model/NotPersistent;")) {
+                isTransient = true;
+            } else if (desc.equals
+                    ("Lcom/sleepycat/persist/model/NotTransient;")) {
+                isTransient = false;
+            }
+            return ret;
+        }
+
+        public void visitAttribute(Attribute attr) {
+            parent.visitAttribute(attr);
+        }
+
+        public void visitEnd() {
+            parent.visitEnd();
+        }
+
+        @Override
+        public String toString() {
+            String label;
+            if (isPriKey) {
+                label = "PrimaryKey";
+            } else if (isSecKey) {
+                label = "SecondaryKey";
+            } else if (order != null) {
+                label = "CompositeKeyField " + order.value;
+            } else {
+                label = "NonKeyField";
+            }
+            return "[" + label + ' ' + name + ' ' + type + ']';
+        }
+    }
+
+    private static class OrderInfo extends AnnotationInfo {
+
+        int value;
+
+        OrderInfo(AnnotationVisitor parent) {
+            super(parent);
+        }
+
+        @Override
+        public void visit(String name, Object value) {
+            if (name.equals("value")) {
+                this.value = (Integer) value;
+            }
+            parent.visit(name, value);
+        }
+    }
+
+    private static abstract class AnnotationInfo implements AnnotationVisitor {
+
+        AnnotationVisitor parent;
+
+        AnnotationInfo(AnnotationVisitor parent) {
+            this.parent = parent;
+        }
+
+        public void visit(String name, Object value) {
+            parent.visit(name, value);
+        }
+
+        public AnnotationVisitor visitAnnotation(String name, String desc) {
+            return parent.visitAnnotation(name, desc);
+        }
+
+        public AnnotationVisitor visitArray(String name) {
+            return parent.visitArray(name);
+        }
+
+        public void visitEnum(String name, String desc, String value) {
+            parent.visitEnum(name, desc, value);
+        }
+
+        public void visitEnd() {
+            parent.visitEnd();
+        }
+    }
+}
diff --git a/src/com/sleepycat/persist/model/ClassEnhancer.java b/src/com/sleepycat/persist/model/ClassEnhancer.java
new file mode 100644
index 0000000000000000000000000000000000000000..34ae5b5207be8214f67ac47f2cd4771663394ffa
--- /dev/null
+++ b/src/com/sleepycat/persist/model/ClassEnhancer.java
@@ -0,0 +1,315 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ClassEnhancer.java,v 1.18.2.2 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.model;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.lang.instrument.ClassFileTransformer;
+import java.lang.instrument.Instrumentation;
+import java.security.ProtectionDomain;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.StringTokenizer;
+
+import com.sleepycat.asm.ClassReader;
+import com.sleepycat.asm.ClassVisitor;
+import com.sleepycat.asm.ClassWriter;
+
+/**
+ * Enhances the bytecode of persistent classes to provide efficient access to
+ * fields and constructors, and to avoid special security policy settings for
+ * accessing non-public members.  Classes are enhanced if they are annotated
+ * with {@link Entity} or {@link Persistent}.
+ *
+ * <p>{@code ClassEnhancer} objects are thread-safe.  Multiple threads may
+ * safely call the methods of a shared {@code ClassEnhancer} object.</p>
+ *
+ * <p>As described in the {@link <a
+ * href="../package-summary.html#bytecode">package summary</a>}, bytecode
+ * enhancement may be used either at runtime or offline (at build time).</p>
+ *
+ * <p>To use enhancement offline, this class may be used as a {@link #main main
+ * program}.
+ * <!-- begin JE only -->
+ * It may also be used via an {@link ClassEnhancerTask ant task}.
+ * <!-- end JE only -->
+ * </p>
+ *
+ * <p>For enhancement at runtime, this class provides the low level support
+ * needed to transform class bytes during class loading.  To configure runtime
+ * enhancement you may use one of the following approaches:</p>
+ * <ol>
+ * <li>For Java 1.5, the {@code je-<version>.jar} file may be used as an instrumentation
+ * agent as follows:
+ * <pre class="code">{@literal java -javaagent:lib/je-<version>.jar=enhance:packageNames ...}</pre>
+ * {@code packageNames} is a comma separated list of packages containing
+ * persistent classes.  Sub-packages of these packages are also searched.  If
+ * {@code packageNames} is omitted then all packages known to the current
+ * classloader are searched.
+ * <p>The "-v" option may be included in the comma separated list to print the
+ * name of each class that is enhanced.</p></li>
+ * <br>
+ * <li>The {@link #enhance} method may be called to implement a class loader
+ * that performs enhancement.  Using this approach, it is the developer's
+ * responsibility to implement and configure the class loader.</li>
+ * </ol>
+ *
+ * @author Mark Hayes
+ */
+public class ClassEnhancer implements ClassFileTransformer {
+
+    private static final String AGENT_PREFIX = "enhance:";
+
+    private Set<String> packagePrefixes;
+    private boolean verbose;
+
+    /**
+     * Enhances classes in the directories specified.  The class files are
+     * replaced when they are enhanced, without changing the file modification
+     * date.  For example:
+     *
+     * <pre class="code">java -cp je-&lt;version&gt;.jar com.sleepycat.persist.model.ClassEnhancer ./classes</pre>
+     *
+     * <p>The "-v" argument may be specified to print the name of each class
+     * file that is enhanced.  The total number of class files enhanced will
+     * always be printed.</p>
+     *
+     * @param args one or more directories containing classes to be enhanced.
+     * Subdirectories of these directories will also be searched.  Optionally,
+     * -v may be included to print the name of every class file enhanced.
+     */
+    public static void main(String[] args) throws Exception {
+        try {
+            boolean verbose = false;
+            List<File> fileList = new ArrayList<File>();
+            for (int i = 0; i < args.length; i += 1) {
+                String arg = args[i];
+                if (arg.startsWith("-")) {
+                    if ("-v".equals(args[i])) {
+                        verbose = true;
+                    } else {
+                        throw new IllegalArgumentException
+                            ("Unknown arg: " + arg);
+                    }
+                } else {
+                    fileList.add(new File(arg));
+                }
+            }
+            ClassEnhancer enhancer = new ClassEnhancer();
+            enhancer.setVerbose(verbose);
+            int nFiles = 0;
+            for (File file : fileList) {
+                nFiles += enhancer.enhanceFile(file);
+            }
+            if (nFiles > 0) {
+                System.out.println("Enhanced: " + nFiles + " files");
+            }
+        } catch (Exception e) {
+            e.printStackTrace();
+            throw e;
+        }
+    }
+
+    /**
+     * Enhances classes as specified by a JVM -javaagent argument.
+     *
+     * @see java.lang.instrument
+     */
+    public static void premain(String args, Instrumentation inst) {
+        if (!args.startsWith(AGENT_PREFIX)) {
+            throw new IllegalArgumentException
+                ("Unknown javaagent args: " + args +
+                 " Args must start with: \"" + AGENT_PREFIX + '"');
+        }
+        args = args.substring(AGENT_PREFIX.length());
+        Set<String> packageNames = null;
+        boolean verbose = false;
+        if (args.length() > 0) {
+            packageNames = new HashSet<String>();
+            StringTokenizer tokens = new StringTokenizer(args, ",");
+            while (tokens.hasMoreTokens()) {
+                String token = tokens.nextToken();
+                if (token.startsWith("-")) {
+                    if (token.equals("-v")) {
+                        verbose = true;
+                    } else {
+                        throw new IllegalArgumentException
+                            ("Unknown javaagent arg: " + token);
+                    }
+                } else {
+                    packageNames.add(token);
+                }
+            }
+        }
+        ClassEnhancer enhancer = new ClassEnhancer(packageNames);
+        enhancer.setVerbose(verbose);
+        inst.addTransformer(enhancer);
+    }
+
+    /**
+     * Creates a class enhancer that searches all packages.
+     */
+    public ClassEnhancer() {
+    }
+
+    /**
+     * Sets verbose mode.
+     *
+     * <p>True may be specified to print the name of each class file that is
+     * enhanced.  This property is false by default.</p>
+     */
+    public void setVerbose(boolean verbose) {
+        this.verbose = verbose;
+    }
+
+    /**
+     * Gets verbose mode.
+     *
+     * @see #setVerbose
+     */
+    public boolean getVerbose() {
+        return verbose;
+    }
+
+    /**
+     * Creates a class enhancer that searches a given set of packages.
+     *
+     * @param packageNames a set of packages to search for persistent
+     * classes.  Sub-packages of these packages are also searched.  If empty or
+     * null, all packages known to the current classloader are searched.
+     */
+    public ClassEnhancer(Set<String> packageNames) {
+        if (packageNames != null) {
+            packagePrefixes = new HashSet<String>();
+            for (String name : packageNames) {
+                packagePrefixes.add(name + '.');
+            }
+        }
+    }
+
+    public byte[] transform(ClassLoader loader,
+                            String className,
+                            Class<?> classBeingRedefined,
+                            ProtectionDomain protectionDomain,
+                            byte[] classfileBuffer) {
+        className = className.replace('/', '.');
+        byte[] bytes = enhance(className, classfileBuffer);
+        if (verbose && bytes != null) {
+            System.out.println("Enhanced: " + className);
+        }
+        return bytes;
+    }
+
+    /**
+     * Enhances the given class bytes if the class is annotated with {@link
+     * Entity} or {@link Persistent}.
+     *
+     * @param className the class name in binary format; for example,
+     * "my.package.MyClass$Name", or null if no filtering by class name
+     * should be performed.
+     *
+     * @param classBytes are the class file bytes to be enhanced.
+     *
+     * @return the enhanced bytes, or null if no enhancement was performed.
+     */
+    public byte[] enhance(String className, byte[] classBytes) {
+        if (className != null && packagePrefixes != null) {
+            for (String prefix : packagePrefixes) {
+                if (className.startsWith(prefix)) {
+                    return enhanceBytes(classBytes);
+                }
+            }
+            return null;
+        } else {
+            return enhanceBytes(classBytes);
+        }
+    }
+
+    int enhanceFile(File file)
+        throws IOException {
+
+        int nFiles = 0;
+        if (file.isDirectory()) {
+            String[] names = file.list();
+            if (names != null) {
+                for (int i = 0; i < names.length; i += 1) {
+                    nFiles += enhanceFile(new File(file, names[i]));
+                }
+            }
+        } else if (file.getName().endsWith(".class")) {
+            byte[] newBytes = enhanceBytes(readFile(file));
+            if (newBytes != null) {
+                long modified = file.lastModified();
+                writeFile(file, newBytes);
+                file.setLastModified(modified);
+                nFiles += 1;
+                if (verbose) {
+                    System.out.println("Enhanced: " + file);
+                }
+            }
+        }
+        return nFiles;
+    }
+
+    private byte[] readFile(File file)
+        throws IOException {
+
+        byte[] bytes = new byte[(int) file.length()];
+        FileInputStream in = new FileInputStream(file);
+        try {
+            in.read(bytes);
+        } finally {
+            in.close();
+        }
+        return bytes;
+    }
+
+    private void writeFile(File file, byte[] bytes)
+        throws IOException {
+
+        FileOutputStream out = new FileOutputStream(file);
+        try {
+            out.write(bytes);
+        } finally {
+            out.close();
+        }
+    }
+
+    private byte[] enhanceBytes(byte[] bytes) {
+
+        /*
+         * The writer is at the end of the visitor chain.  Pass true to
+         * calculate stack size, for safety.
+         */
+        ClassWriter writer = new ClassWriter(true);
+        ClassVisitor visitor = writer;
+
+        /* The enhancer is at the beginning of the visitor chain. */
+        visitor = new BytecodeEnhancer(visitor);
+
+        /* The reader processes the class and invokes the visitors. */
+        ClassReader reader = new ClassReader(bytes);
+        try {
+
+            /*
+             * Pass false for skipDebug since we are rewriting the class and
+             * should include all information.
+             */
+            reader.accept(visitor, false);
+            return writer.toByteArray();
+        } catch (BytecodeEnhancer.NotPersistentException e) {
+            /* The class is not persistent and should not be enhanced. */
+            return null;
+        }
+    }
+}
diff --git a/src/com/sleepycat/persist/model/ClassEnhancerTask.java b/src/com/sleepycat/persist/model/ClassEnhancerTask.java
new file mode 100644
index 0000000000000000000000000000000000000000..7bf0515833229f18be4b925d64d47d258b9b6944
--- /dev/null
+++ b/src/com/sleepycat/persist/model/ClassEnhancerTask.java
@@ -0,0 +1,92 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ClassEnhancerTask.java,v 1.11.2.2 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.model;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.tools.ant.BuildException;
+import org.apache.tools.ant.DirectoryScanner;
+import org.apache.tools.ant.Task;
+import org.apache.tools.ant.types.FileSet;
+
+/**
+ * An {@code ant} task for running the {@link ClassEnhancer}.
+ *
+ * <p>{@code ClassEnhancerTask} objects are thread-safe.  Multiple threads may
+ * safely call the methods of a shared {@code ClassEnhancerTask} object.</p>
+ *
+ * <p>The class enhancer task element has no attributes.  It may contain one or
+ * more nested {@code fileset} elements specifying the classes to be enhanced.
+ * The class files are replaced when they are enhanced, without changing the
+ * file modification date.  For example:</p>
+ *
+ * <pre class="code">
+ * {@literal <taskdef name="enhance-persistent-classes"}
+ *          {@literal classname="com.sleepycat.persist.model.ClassEnhancerTask"}
+ *          {@literal classpath="${je.home}/lib/je-<version>.jar"/>}
+ *
+ * {@literal <target name="main">}
+ *     {@literal <enhance-persistent-classes verbose="no">}
+ *         {@literal <fileset dir="classes"/>}
+ *     {@literal </enhance-persistent-classes>}
+ * {@literal </target>}</pre>
+ *
+ * <p>The verbose attribute may be specified as "true", "yes" or "on" (like
+ * other Ant boolean attributes) to print the name of each class file that is
+ * enhanced.  The total number of class files enhanced will always be
+ * printed.</p>
+ *
+ * @author Mark Hayes
+ */
+public class ClassEnhancerTask extends Task {
+
+    private List<FileSet> fileSets = new ArrayList<FileSet>();
+    private boolean verbose;
+
+    public void execute() throws BuildException {
+        if (fileSets.size() == 0) {
+            throw new BuildException("At least one fileset must be specified");
+        }
+        try {
+            int nFiles = 0;
+            ClassEnhancer enhancer = new ClassEnhancer();
+            enhancer.setVerbose(verbose);
+            for (FileSet fileSet : fileSets) {
+                DirectoryScanner scanner =
+                    fileSet.getDirectoryScanner(getProject());
+                String[] fileNames = scanner.getIncludedFiles();
+                for (String fileName : fileNames) {
+                    File file = new File(scanner.getBasedir(), fileName);
+                    try {
+                        nFiles += enhancer.enhanceFile(file);
+                    } catch (IOException e) {
+                        throw new BuildException(e);
+                    }
+                }
+            }
+            if (nFiles > 0) {
+                System.out.println("Enhanced: " + nFiles + " files");
+            }
+        } catch (RuntimeException e) {
+            e.printStackTrace();
+            throw e;
+        }
+    }
+
+    public void addConfiguredFileset(FileSet files) {
+        fileSets.add(files);
+    }
+
+    public void setVerbose(boolean verbose) {
+        this.verbose = verbose;
+    }
+}
diff --git a/src/com/sleepycat/persist/model/ClassMetadata.java b/src/com/sleepycat/persist/model/ClassMetadata.java
new file mode 100644
index 0000000000000000000000000000000000000000..88ae3067f5d7d22420872bc180c0e28870153424
--- /dev/null
+++ b/src/com/sleepycat/persist/model/ClassMetadata.java
@@ -0,0 +1,201 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ClassMetadata.java,v 1.14.2.2 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.model;
+
+import java.io.Serializable;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * The metadata for a persistent class.  A persistent class may be specified
+ * with the {@link Entity} or {@link Persistent} annotation.
+ *
+ * <p>{@code ClassMetadata} objects are thread-safe.  Multiple threads may
+ * safely call the methods of a shared {@code ClassMetadata} object.</p>
+ *
+ * <p>This and other metadata classes are classes rather than interfaces to
+ * allow adding properties to the model at a future date without causing
+ * incompatibilities.  Any such property will be given a default value and
+ * its use will be optional.</p>
+ *
+ * @author Mark Hayes
+ */
+public class ClassMetadata implements Serializable {
+
+    private static final long serialVersionUID = -2520207423701776679L;
+
+    private String className;
+    private int version;
+    private String proxiedClassName;
+    private boolean entityClass;
+    private PrimaryKeyMetadata primaryKey;
+    private Map<String,SecondaryKeyMetadata> secondaryKeys;
+    private List<FieldMetadata> compositeKeyFields;
+    private Collection<FieldMetadata> persistentFields;
+
+    /**
+     * Used by an {@code EntityModel} to construct persistent class metadata.
+     * The optional {@link #getPersistentFields} property will be set to null.
+     */
+    public ClassMetadata(String className,
+                         int version,
+                         String proxiedClassName,
+                         boolean entityClass,
+                         PrimaryKeyMetadata primaryKey,
+                         Map<String,SecondaryKeyMetadata> secondaryKeys,
+                         List<FieldMetadata> compositeKeyFields) {
+
+        this(className, version, proxiedClassName, entityClass, primaryKey,
+             secondaryKeys, compositeKeyFields, null /*persistentFields*/);
+    }
+
+    /**
+     * Used by an {@code EntityModel} to construct persistent class metadata.
+     */
+    public ClassMetadata(String className,
+                         int version,
+                         String proxiedClassName,
+                         boolean entityClass,
+                         PrimaryKeyMetadata primaryKey,
+                         Map<String,SecondaryKeyMetadata> secondaryKeys,
+                         List<FieldMetadata> compositeKeyFields,
+                         Collection<FieldMetadata> persistentFields) {
+        this.className = className;
+        this.version = version;
+        this.proxiedClassName = proxiedClassName;
+        this.entityClass = entityClass;
+        this.primaryKey = primaryKey;
+        this.secondaryKeys = secondaryKeys;
+        this.compositeKeyFields = compositeKeyFields;
+        this.persistentFields = persistentFields;
+    }
+
+    /**
+     * Returns the name of the persistent class.
+     */
+    public String getClassName() {
+        return className;
+    }
+
+    /**
+     * Returns the version of this persistent class.  This may be specified
+     * using the {@link Entity#version} or {@link Persistent#version}
+     * annotation.
+     */
+    public int getVersion() {
+        return version;
+    }
+
+    /**
+     * Returns the class name of the proxied class if this class is a {@link
+     * PersistentProxy}, or null otherwise.
+     */
+    public String getProxiedClassName() {
+        return proxiedClassName;
+    }
+
+    /**
+     * Returns whether this class is an entity class.
+     */
+    public boolean isEntityClass() {
+        return entityClass;
+    }
+
+    /**
+     * Returns the primary key metadata for a key declared in this class, or
+     * null if none is declared.  This may be specified using the {@link
+     * PrimaryKey} annotation.
+     */
+    public PrimaryKeyMetadata getPrimaryKey() {
+        return primaryKey;
+    }
+
+    /**
+     * Returns an unmodifiable map of field name to secondary key metadata for
+     * all secondary keys declared in this class, or null if no secondary keys
+     * are declared in this class.  This metadata may be specified using {@link
+     * SecondaryKey} annotations.
+     */
+    public Map<String,SecondaryKeyMetadata> getSecondaryKeys() {
+        return secondaryKeys;
+    }
+
+    /**
+     * Returns an unmodifiable list of metadata for the fields making up a
+     * composite key, or null if this is a not a composite key class.  The
+     * order of the fields in the returned list determines their stored order
+     * and may be specified using the {@link KeyField} annotation.  When the
+     * composite key class does not implement {@link Comparable}, the order of
+     * the fields is the relative sort order.
+     */
+    public List<FieldMetadata> getCompositeKeyFields() {
+        return compositeKeyFields;
+    }
+
+    /**
+     * Returns an unmodifiable list of metadata for the persistent fields in
+     * this class, or null if the default rules for persistent fields should be
+     * used.  All fields returned must be declared in this class and must be
+     * non-static.
+     *
+     * <p>By default (if null is returned) the persistent fields of a class
+     * will be all declared instance fields that are non-transient (are not
+     * declared with the <code>transient</code> keyword).  The default rules
+     * may be overridden by an {@link EntityModel}.  For example, the {@link
+     * AnnotationModel} overrides the default rules when the {@link
+     * NotPersistent} or {@link NotTransient} annotation is specified.</p>
+     */
+    public Collection<FieldMetadata> getPersistentFields() {
+        return persistentFields;
+    }
+
+    @Override
+    public boolean equals(Object other) {
+        if (other instanceof ClassMetadata) {
+            ClassMetadata o = (ClassMetadata) other;
+            return version == o.version &&
+                   entityClass == o.entityClass &&
+                   nullOrEqual(className, o.className) &&
+                   nullOrEqual(proxiedClassName, o.proxiedClassName) &&
+                   nullOrEqual(primaryKey, o.primaryKey) &&
+                   nullOrEqual(secondaryKeys, o.secondaryKeys) &&
+                   nullOrEqual(compositeKeyFields, o.compositeKeyFields);
+        } else {
+            return false;
+        }
+    }
+
+    @Override
+    public int hashCode() {
+        return version +
+               (entityClass ? 1 : 0) +
+               hashCode(className) +
+               hashCode(proxiedClassName) +
+               hashCode(primaryKey) +
+               hashCode(secondaryKeys) +
+               hashCode(compositeKeyFields);
+    }
+
+    static boolean nullOrEqual(Object o1, Object o2) {
+        if (o1 == null) {
+            return o2 == null;
+        } else {
+            return o1.equals(o2);
+        }
+    }
+
+    static int hashCode(Object o) {
+        if (o != null) {
+            return o.hashCode();
+        } else {
+            return 0;
+        }
+    }
+}
diff --git a/src/com/sleepycat/persist/model/DeleteAction.java b/src/com/sleepycat/persist/model/DeleteAction.java
new file mode 100644
index 0000000000000000000000000000000000000000..b842a0b484f2ec1bcea7ac1fc4abef7cb9c856e6
--- /dev/null
+++ b/src/com/sleepycat/persist/model/DeleteAction.java
@@ -0,0 +1,44 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DeleteAction.java,v 1.8.2.2 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.model;
+
+import com.sleepycat.je.DatabaseException;
+
+/**
+ * Specifies the action to take when a related entity is deleted having a
+ * primary key value that exists as a secondary key value for this entity.
+ * This can be specified using a {@link SecondaryKey#onRelatedEntityDelete}
+ * annotation.
+ *
+ * @author Mark Hayes
+ */
+public enum DeleteAction {
+
+    /**
+     * The default action, {@code ABORT}, means that a {@link
+     * DatabaseException} is thrown in order to abort the current transaction.
+     */
+    ABORT,
+
+    /**
+     * If {@code CASCADE} is specified, then this entity will be deleted also,
+     * which could in turn trigger further deletions, causing a cascading
+     * effect.
+     */
+    CASCADE,
+
+    /**
+     * If {@code NULLIFY} is specified, then the secondary key in this entity
+     * is set to null and this entity is updated.  For a secondary key field
+     * that has an array or collection type, the array or collection element
+     * will be removed by this action.  The secondary key field must have a
+     * reference (not a primitive) type in order to specify this action.
+     */
+    NULLIFY;
+}
diff --git a/src/com/sleepycat/persist/model/Entity.java b/src/com/sleepycat/persist/model/Entity.java
new file mode 100644
index 0000000000000000000000000000000000000000..4e8a927d14fc56d3b5cbfe60a8f0f15eda6db2e9
--- /dev/null
+++ b/src/com/sleepycat/persist/model/Entity.java
@@ -0,0 +1,241 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Entity.java,v 1.15.2.2 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.model;
+
+import static java.lang.annotation.ElementType.TYPE;
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+import java.lang.annotation.Documented;
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.SecondaryIndex;
+import com.sleepycat.persist.evolve.IncompatibleClassException;
+import com.sleepycat.persist.evolve.Mutations;
+
+/**
+ * Indicates a persistent entity class.  For each entity class, a {@link
+ * PrimaryIndex} can be used to store and access instances of that class.
+ * Optionally, one or more {@link SecondaryIndex} objects may be used to access
+ * entity instances by secondary key.
+ *
+ * <p><strong>Entity Subclasses and Superclasses</strong></p>
+ *
+ * <p>An entity class may have any number of subclasses and superclasses;
+ * however, none of these may themselves be entity classes (annotated with
+ * {@code Entity}).</p>
+ *
+ * <p>Entity superclasses are used to share common definitions and instance
+ * data.  For example, fields in an entity superclass may be defined as primary
+ * or secondary keys.</p>
+ *
+ * <p>Entity subclasses are used to provide polymorphism within a single {@code
+ * PrimaryIndex}.  Instances of the entity class and its subclasses are stored
+ * in the same {@code PrimaryIndex}.  Fields in an entity subclass may be
+ * defined as secondary keys.</p>
+ *
+ * <p>For example, the following {@code BaseClass} defines the primary key for
+ * any number of entity classes, using a single sequence to assign primary key
+ * values.  The entity class {@code Pet} extends the base class, implicitly
+ * defining a primary index that will contain instances of it and its
+ * subclasses, including {@code Cat} which is defined below.  The primary key
+ * ({@code id}) and secondary key ({@code name}) can be used to retrieve any
+ * {@code Pet} instance.</p>
+ * <pre class="code">
+ *  {@literal @Persistent}
+ *  class BaseClass {
+ *      {@literal @PrimaryKey(sequence="ID")}
+ *      long id;
+ *  }
+ *
+ *  {@literal @Entity}
+ *  class Pet extends BaseClass {
+ *      {@literal @SecondaryKey(relate=ONE_TO_ONE)}
+ *      String name;
+ *      float height;
+ *      float weight;
+ *  }</pre>
+ *
+ * <p>The entity subclass {@code Cat} defines a secondary key ({@code
+ * finickyness}) that only applies to {@code Cat} instances.  Querying by this
+ * key will never retrieve a {@code Dog} instance, if such a subclass existed,
+ * because a {@code Dog} instance will never contain a {@code finickyness}
+ * key.</p>
+ * <pre class="code">
+ *  {@literal @Persistent}
+ *  class Cat extends Pet {
+ *      {@literal @SecondaryKey(relate=MANY_TO_ONE)}
+ *      int finickyness;
+ *  }</pre>
+ *
+ * <p><strong>Persistent Fields and Types</strong></p>
+ *
+ * <p>All non-transient instance fields of an entity class, as well as its
+ * superclasses and subclasses, are persistent.  {@code static} and {@code
+ * transient} fields are not persistent.  The persistent fields of a class may
+ * be {@code private}, package-private (default access), {@code protected} or
+ * {@code public}.</p>
+ *
+ * <p>It is worthwhile to note the reasons that object persistence is defined
+ * in terms of fields rather than properties (getters and setters).  This
+ * allows business methods (getters and setters) to be defined independently of
+ * the persistent state of an object; for example, a setter method may perform
+ * validation that could not be performed if it were called during object
+ * deserialization.  Similarly, this allows public methods to evolve somewhat
+ * independently of the (typically non-public) persistent fields.</p>
+ *
+ * <p><a name="simpleTypes"><strong>Simple Types</strong></a></p>
+ *
+ * <p>Persistent types are divided into simple types, enum types, complex
+ * types, and array types.  Simple types and enum types are single valued,
+ * while array types may contain multiple elements and complex types may
+ * contain one or more named fields.</p>
+ *
+ * <p>Simple types include:</p>
+ * <ul>
+ * <li>Java primitive types: {@code boolean, char, byte, short, int, long,
+ * float, double}</p>
+ * <li>The wrapper classes for Java primitive types</p>
+ * <!--
+ * <li>{@link java.math.BigDecimal}</p>
+ * -->
+ * <li>{@link java.math.BigInteger}</p>
+ * <li>{@link java.lang.String}</p>
+ * <li>{@link java.util.Date}</p>
+ * </ul>
+ *
+ * <p>When null values are required (for optional key fields, for example),
+ * primitive wrapper classes must be used instead of primitive types.</p>
+ *
+ * <p>Simple types, enum types and array types do not require annotations to
+ * make them persistent.</p>
+ *
+ * <p><a name="proxyTypes"><strong>Complex and Proxy Types</strong></a></p>
+ *
+ * <p>Complex persistent classes must be annotated with {@link Entity} or
+ * {@link Persistent}, or must be proxied by a persistent proxy class
+ * (described below).  This includes entity classes, subclasses and
+ * superclasses, and all other complex classes referenced via fields of these
+ * classes.</p>
+ *
+ * <p>All complex persistent classes must have a default constructor.  The
+ * default constructor may be {@code private}, package-private (default
+ * access), {@code protected}, or {@code public}.  Other constructors are
+ * allowed but are not used by the persistence mechanism.</p>
+ *
+ * <p>It is sometimes desirable to store instances of a type that is externally
+ * defined and cannot be annotated or does not have a default constructor; for
+ * example, a class defined in the Java standard libraries or a 3rd party
+ * library.  In this case, a {@link PersistentProxy} class may be used to
+ * represent the stored values for the externally defined type.  The proxy
+ * class itself must be annotated with {@link Persistent} like other persistent
+ * classes, and the {@link Persistent#proxyFor} property must be specified.</p>
+ *
+ * <p>For convenience, built-in proxy classes are included for several common
+ * classes (listed below) in the Java library.  If you wish, you may define
+ * your own {@link PersistentProxy} to override these built-in proxies.</p>
+ * <ul>
+ * <li>{@link java.util.HashSet}</li>
+ * <li>{@link java.util.TreeSet}</li>
+ * <li>{@link java.util.HashMap}</li>
+ * <li>{@link java.util.TreeMap}</li>
+ * <li>{@link java.util.ArrayList}</li>
+ * <li>{@link java.util.LinkedList}</li>
+ * </ul>
+ *
+ * <p>Complex persistent types should in general be application-defined
+ * classes.  This gives the application control over the persistent state and
+ * its evolution over time.</p>
+ *
+ * <p><strong>Other Type Restrictions</strong></p>
+ *
+ * <p>Entity classes and subclasses may not be used in field declarations for
+ * persistent types.  Fields of entity classes and subclasses must be simple
+ * types or non-entity persistent types (annotated with {@link Persistent} not
+ * with {@link Entity}).</p>
+ *
+ * <p>Entity classes, subclasses and superclasses may be {@code abstract} and
+ * may implement arbitrary interfaces.  Interfaces do not need to be annotated
+ * with {@link Persistent} in order to be used in a persistent class, since
+ * interfaces do not contain instance fields.</p>
+ *
+ * <p>Persistent instances of static nested classes are allowed, but the nested
+ * class must be annotated with {@link Persistent} or {@link Entity}.  Inner
+ * classes (non-static nested classes, including anonymous classes) are not
+ * currently allowed as persistent types.</p>
+ *
+ * <p>Arrays of simple and persistent complex types are allowed as fields of
+ * persistent types.  Arrays may be multidimensional.  However, an array may
+ * not be stored as a top level instance in a primary index.  Only instances of
+ * entity classes and subclasses may be top level instances in a primary
+ * index.</p>
+ *
+ * <p><strong>Embedded Objects</strong></p>
+ *
+ * <p>As stated above, the embedded (or member) non-transient non-static fields
+ * of an entity class are themselves persistent and are stored along with their
+ * parent entity object.  This allows embedded objects to be stored in an
+ * entity to an arbitrary depth.</p>
+ *
+ * <p>There is no arbitrary limit to the nesting depth of embedded objects
+ * within an entity; however, there is a practical limit.  When an entity is
+ * marshalled, each level of nesting is implemented internally via recursive
+ * method calls.  If the nesting depth is large enough, a {@code
+ * StackOverflowError} can occur.  In practice, this has been observed with a
+ * nesting depth of 12,000, using the default Java stack size.</p>
+ *
+ * <p>This restriction on the nesting depth of embedded objects does not apply
+ * to cyclic references, since these are handled specially as described
+ * below.</p>
+ *
+ * <p><strong>Object Graphs</strong></p>
+ *
+ * <p>When an entity instance is stored, the graph of objects referenced via
+ * its fields is stored and retrieved as a graph.  In other words, if a single
+ * instance is referenced by two or more fields when the entity is stored, the
+ * same will be true when the entity is retrieved.</p>
+ *
+ * <p>When a reference to a particular object is stored as a member field
+ * inside that object or one of its embedded objects, this is called a cyclic
+ * reference.  Because multiple references to a single object are stored as
+ * such, cycles are also represented correctly and do not cause infinite
+ * recursion or infinite processing loops.  If an entity containing a cyclic
+ * reference is stored, the cyclic reference will be present when the entity is
+ * retrieved.</p>
+ *
+ * <p>Note that the stored object graph is restricted in scope to a single
+ * entity instance.  This is because each entity instance is stored separately.
+ * If two entities have a reference to the same object when stored, they will
+ * refer to two separate instances when the entities are retrieved.</p>
+ *
+ * @see Persistent
+ * @see PrimaryKey
+ * @see SecondaryKey
+ * @see KeyField
+ *
+ * @author Mark Hayes
+ */
+@Documented @Retention(RUNTIME) @Target(TYPE)
+public @interface Entity {
+
+    /**
+     * Identifies a new version of a class when an incompatible class change
+     * has been made.  Prior versions of a class are referred to by version
+     * number to perform class evolution and conversion using {@link
+     * Mutations}.
+     *
+     * <p>The first version of a class is version zero, if {@link #version} is
+     * not specified.  When an incompatible class change is made, a version
+     * number must be assigned using {@link #version} that is higher than the
+     * previous version number for the class.  If this is not done, an {@link
+     * IncompatibleClassException} will be thrown when the store is opened.</p>
+     */
+    int version() default 0;
+}
diff --git a/src/com/sleepycat/persist/model/EntityMetadata.java b/src/com/sleepycat/persist/model/EntityMetadata.java
new file mode 100644
index 0000000000000000000000000000000000000000..dcc8453df6535216aa49aa0ffb431e080825f49c
--- /dev/null
+++ b/src/com/sleepycat/persist/model/EntityMetadata.java
@@ -0,0 +1,87 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EntityMetadata.java,v 1.13.2.2 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.model;
+
+import java.io.Serializable;
+import java.util.Map;
+
+/**
+ * The metadata for a persistent entity class.  An entity class may be
+ * specified with the {@link Entity} annotation.
+ *
+ * <p>{@code EntityMetadata} objects are thread-safe.  Multiple threads may
+ * safely call the methods of a shared {@code EntityMetadata} object.</p>
+ *
+ * @author Mark Hayes
+ */
+public class EntityMetadata implements Serializable {
+
+    private static final long serialVersionUID = 4224509631681963159L;
+
+    private String className;
+    private PrimaryKeyMetadata primaryKey;
+    private Map<String,SecondaryKeyMetadata> secondaryKeys;
+
+    /**
+     * Used by an {@code EntityModel} to construct entity metadata.
+     */
+    public EntityMetadata(String className,
+                          PrimaryKeyMetadata primaryKey,
+                          Map<String,SecondaryKeyMetadata> secondaryKeys) {
+        this.className = className;
+        this.primaryKey = primaryKey;
+        this.secondaryKeys = secondaryKeys;
+    }
+
+    /**
+     * Returns the name of the entity class.
+     */
+    public String getClassName() {
+        return className;
+    }
+
+    /**
+     * Returns the primary key metadata for this entity.  Note that the primary
+     * key field may be declared in this class or in a subclass. This metadata
+     * may be specified using the {@link PrimaryKey} annotation.
+     */
+    public PrimaryKeyMetadata getPrimaryKey() {
+        return primaryKey;
+    }
+
+    /**
+     * Returns an unmodifiable map of key name to secondary key metadata, or
+     * an empty map if no secondary keys are defined for this entity.  The
+     * returned map contains a mapping for each secondary key of this entity,
+     * including secondary keys declared in subclasses and superclasses.  This
+     * metadata may be specified using {@link SecondaryKey} annotations.
+     */
+    public Map<String,SecondaryKeyMetadata> getSecondaryKeys() {
+        return secondaryKeys;
+    }
+
+    @Override
+    public boolean equals(Object other) {
+        if (other instanceof EntityMetadata) {
+            EntityMetadata o = (EntityMetadata) other;
+            return ClassMetadata.nullOrEqual(className, o.className) &&
+                   ClassMetadata.nullOrEqual(primaryKey, o.primaryKey) &&
+                   ClassMetadata.nullOrEqual(secondaryKeys, o.secondaryKeys);
+        } else {
+            return false;
+        }
+    }
+
+    @Override
+    public int hashCode() {
+        return ClassMetadata.hashCode(className) +
+               ClassMetadata.hashCode(primaryKey) +
+               ClassMetadata.hashCode(secondaryKeys);
+    }
+}
diff --git a/src/com/sleepycat/persist/model/EntityModel.java b/src/com/sleepycat/persist/model/EntityModel.java
new file mode 100644
index 0000000000000000000000000000000000000000..d17037cd2ba00750193ef986996dfcaa8e96db15
--- /dev/null
+++ b/src/com/sleepycat/persist/model/EntityModel.java
@@ -0,0 +1,261 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EntityModel.java,v 1.20.2.3 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.model;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.SecondaryIndex;
+import com.sleepycat.persist.impl.Format;
+import com.sleepycat.persist.impl.PersistCatalog;
+import com.sleepycat.persist.raw.RawObject;
+import com.sleepycat.persist.raw.RawType;
+
+/**
+ * The base class for classes that provide entity model metadata.  An {@link
+ * EntityModel} defines entity classes, primary keys, secondary keys, and
+ * relationships between entities.  For each entity class that is part of the
+ * model, a single {@link PrimaryIndex} object and zero or more {@link
+ * SecondaryIndex} objects may be accessed via an {@link EntityStore}.
+ *
+ * <p>The built-in entity model, the {@link AnnotationModel}, is based on
+ * annotations that are added to entity classes and their key fields.
+ * Annotations are used in the examples in this package, and it is expected
+ * that annotations will normally be used; most readers should therefore skip
+ * to the {@link AnnotationModel} class.  However, a custom entity model class
+ * may define its own metadata.  This can be used to define entity classes and
+ * keys using mechanisms other than annotations.</p>
+ *
+ * <p>A concrete entity model class should extend this class and implement the
+ * {@link #getClassMetadata}, {@link #getEntityMetadata} and {@link
+ * #getKnownClasses} methods.</p>
+ *
+ * <p>This is an abstract class rather than an interface to allow adding
+ * capabilities to the model at a future date without causing
+ * incompatibilities.  For example, a method may be added in the future for
+ * returning new information about the model and subclasses may override this
+ * method to return the new information.  Any new methods will have default
+ * implementations that return default values, and the use of the new
+ * information will be optional.</p>
+ *
+ * @author Mark Hayes
+ */
+public abstract class EntityModel {
+
+    private PersistCatalog catalog;
+
+    /**
+     * The default constructor for use by subclasses.
+     */
+    protected EntityModel() {
+    }
+
+    /**
+     * Returns whether the model is associated with an open store.
+     *
+     * <p>The {@link #registerClass} method may only be called when the model
+     * is not yet open.  Certain other methods may only be called when the
+     * model is open:</p>
+     * <ul>
+     * <li>{@link #convertRawObject}</li>
+     * <li>{@link #getAllRawTypeVersions}</li>
+     * <li>{@link #getRawType}</li>
+     * <li>{@link #getRawTypeVersion}</li>
+     * </ul>
+     */
+    public final boolean isOpen() {
+        return catalog != null;
+    }
+
+    /**
+     * Registers a persistent class, most importantly, a {@link
+     * PersistentProxy} class.  Any persistent class may be registered in
+     * advance of using it, to avoid the overhead of updating the catalog
+     * database when an instance of the class is first stored.  This method
+     * <em>must</em> be called to register {@link PersistentProxy} classes.
+     * This method must be called before opening a store based on this model.
+     *
+     * @throws IllegalStateException if this method is called for a model that
+     * is associated with an open store.
+     *
+     * @throws IllegalArgumentException if the given class is not persistent
+     * or has a different class loader than previously registered classes.
+     */
+    public final void registerClass(Class persistentClass) {
+        if (catalog != null) {
+            throw new IllegalStateException("Store is already open");
+        } else {
+            String className = persistentClass.getName();
+            ClassMetadata meta = getClassMetadata(className);
+            if (meta == null) {
+                throw new IllegalArgumentException
+                    ("Class is not persistent: " + className);
+            }
+        }
+    }
+
+    /**
+     * Gives this model access to the catalog, which is used for returning
+     * raw type information.
+     */
+    void setCatalog(PersistCatalog catalog) {
+        this.catalog = catalog;
+    }
+
+    /**
+     * Returns the metadata for a given persistent class name, including proxy
+     * classes and entity classes.
+     *
+     * @return the metadata or null if the class is not persistent or does not
+     * exist.
+     */
+    public abstract ClassMetadata getClassMetadata(String className);
+
+    /**
+     * Returns the metadata for a given entity class name.
+     *
+     * @return the metadata or null if the class is not an entity class or does
+     * not exist.
+     */
+    public abstract EntityMetadata getEntityMetadata(String className);
+
+    /**
+     * Returns the names of all known persistent classes.  A type becomes known
+     * when an instance of the type is stored for the first time or metadata or
+     * type information is queried for a specific class name.
+     *
+     * @return an unmodifiable set of class names.
+     *
+     * @throws IllegalStateException if this method is called for a model that
+     * is not associated with an open store.
+     */
+    public abstract Set<String> getKnownClasses();
+
+    /**
+     * Returns the type information for the current version of a given class,
+     * or null if the class is not currently persistent.
+     *
+     * @param className the name of the current version of the class.
+     *
+     * @throws IllegalStateException if this method is called for a model that
+     * is not associated with an open store.
+     */
+    public final RawType getRawType(String className) {
+        if (catalog != null) {
+            return catalog.getFormat(className);
+        } else {
+            throw new IllegalStateException("Store is not open");
+        }
+    }
+
+    /**
+     * Returns the type information for a given version of a given class,
+     * or null if the given version of the class is unknown.
+     *
+     * @param className the name of the latest version of the class.
+     *
+     * @param version the desired version of the class.
+     *
+     * @throws IllegalStateException if this method is called for a model that
+     * is not associated with an open store.
+     */
+    public final RawType getRawTypeVersion(String className, int version) {
+        if (catalog != null) {
+            Format format = catalog.getLatestVersion(className);
+            while (format != null) {
+                if (version == format.getVersion()) {
+                    return format;
+                }
+            }
+            return null;
+        } else {
+            throw new IllegalStateException("Store is not open");
+        }
+    }
+
+    /**
+     * Returns all known versions of type information for a given class name,
+     * or null if no persistent version of the class is known.
+     *
+     * @param className the name of the latest version of the class.
+     *
+     * @return an unmodifiable list of types for the given class name in order
+     * from most recent to least recent.
+     *
+     * @throws IllegalStateException if this method is called for a model that
+     * is not associated with an open store.
+     */
+    public final List<RawType> getAllRawTypeVersions(String className) {
+        if (catalog != null) {
+            Format format = catalog.getLatestVersion(className);
+            if (format != null) {
+                List<RawType> list = new ArrayList<RawType>();
+                while (format != null) {
+                    list.add(format);
+                    format = format.getPreviousVersion();
+                }
+                return Collections.unmodifiableList(list);
+            } else {
+                return null;
+            }
+        } else {
+            throw new IllegalStateException("Store is not open");
+        }
+    }
+
+    /**
+     * Returns all versions of all known types.
+     *
+     * @return an unmodifiable list of types.
+     *
+     * @throws IllegalStateException if this method is called for a model that
+     * is not associated with an open store.
+     */
+    public final List<RawType> getAllRawTypes() {
+        if (catalog != null) {
+            return catalog.getAllRawTypes();
+        } else {
+            throw new IllegalStateException("Store is not open");
+        }
+    }
+
+    /**
+     * Converts a given raw object to a live object according to the current
+     * class definitions.
+     *
+     * <p>The given raw object must conform to the current class definitions.
+     * However, the raw type ({@link RawObject#getType}) is allowed to be from
+     * a different store, as long as the class names and the value types match.
+     * This allows converting raw objects that are read from one store to live
+     * objects in another store, for example, in a conversion program.</p>
+     */
+    public final Object convertRawObject(RawObject raw) {
+        return catalog.convertRawObject(raw, null);
+    }
+
+    /**
+     * Calls Class.forName with the current thread context class loader.  This
+     * method should be called by entity model implementations instead of
+     * calling Class.forName whenever loading an application class.
+     */
+    public static Class classForName(String className)
+        throws ClassNotFoundException {
+
+        try {
+            return Class.forName(className, true /*initialize*/,
+                             Thread.currentThread().getContextClassLoader());
+        } catch (ClassNotFoundException e) {
+            return Class.forName(className);
+        }
+    }
+}
diff --git a/src/com/sleepycat/persist/model/FieldMetadata.java b/src/com/sleepycat/persist/model/FieldMetadata.java
new file mode 100644
index 0000000000000000000000000000000000000000..7018da6dbe47b71ac26ea9086416ac69d4eb9132
--- /dev/null
+++ b/src/com/sleepycat/persist/model/FieldMetadata.java
@@ -0,0 +1,87 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: FieldMetadata.java,v 1.13.2.2 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.model;
+
+import java.io.Serializable;
+
+/**
+ * The metadata for a key field.  This class defines common properties for
+ * singular and composite key fields.
+ *
+ * <p>{@code FieldMetadata} objects are thread-safe.  Multiple threads may
+ * safely call the methods of a shared {@code FieldMetadata} object.</p>
+ *
+ * @author Mark Hayes
+ */
+public class FieldMetadata implements Serializable {
+
+    private static final long serialVersionUID = -9037650229184174279L;
+
+    private String name;
+    private String className;
+    private String declaringClassName;
+
+    /**
+     * Used by an {@code EntityModel} to construct field metadata.
+     */
+    public FieldMetadata(String name,
+                         String className,
+                         String declaringClassName) {
+        this.name = name;
+        this.className = className;
+        this.declaringClassName = declaringClassName;
+    }
+
+    /**
+     * Returns the field name.
+     */
+    public String getName() {
+        return name;
+    }
+
+    /**
+     * Returns the class name of the field type.
+     */
+    public String getClassName() {
+        return className;
+    }
+
+    /**
+     * Returns the name of the class where the field is declared.
+     */
+    public String getDeclaringClassName() {
+        return declaringClassName;
+    }
+
+    @Override
+    public boolean equals(Object other) {
+        if (other instanceof FieldMetadata) {
+            FieldMetadata o = (FieldMetadata) other;
+            return ClassMetadata.nullOrEqual(name, o.name) &&
+                   ClassMetadata.nullOrEqual(className, o.className) &&
+                   ClassMetadata.nullOrEqual(declaringClassName,
+                                             o.declaringClassName);
+        } else {
+            return false;
+        }
+    }
+
+    @Override
+    public int hashCode() {
+        return ClassMetadata.hashCode(name) +
+               ClassMetadata.hashCode(className) +
+               ClassMetadata.hashCode(declaringClassName);
+    }
+
+    @Override
+    public String toString() {
+        return "[FieldMetadata name: " + name + " className: " + className +
+               " declaringClassName: " + declaringClassName + ']';
+    }
+}
diff --git a/src/com/sleepycat/persist/model/KeyField.java b/src/com/sleepycat/persist/model/KeyField.java
new file mode 100644
index 0000000000000000000000000000000000000000..3072cd0f65338c9645917edf12d38e313d1cc794
--- /dev/null
+++ b/src/com/sleepycat/persist/model/KeyField.java
@@ -0,0 +1,132 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: KeyField.java,v 1.12.2.2 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.model;
+
+import static java.lang.annotation.ElementType.FIELD;
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+import java.lang.annotation.Documented;
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+import com.sleepycat.je.Environment;
+
+/**
+ * Indicates the sorting position of a key field in a composite key class when
+ * the {@code Comparable} interface is not implemented.  The {@code KeyField}
+ * integer value specifies the sort order of this field within the set of
+ * fields in the composite key.
+ *
+ * <p>If the field type of a {@link PrimaryKey} or {@link SecondaryKey} is a
+ * composite key class containing more than one key field, then a {@code
+ * KeyField} annotation must be present on each non-transient instance field of
+ * the composite key class.  The {@code KeyField} value must be a number
+ * between one and the number of non-transient instance fields declared in the
+ * composite key class.</p>
+ *
+ * <p>Note that a composite key class is a flat container for one or more
+ * simple type fields.  All non-transient instance fields in the composite key
+ * class are key fields, and its superclass must be {@code Object}.</p>
+ *
+ * <p>For example:</p>
+ * <pre class="code">
+ *  {@literal @Entity}
+ *  class Animal {
+ *      {@literal @PrimaryKey}
+ *      Classification classification;
+ *      ...
+ *  }
+ *
+ *  {@literal @Persistent}
+ *  class Classification {
+ *      {@literal @KeyField(1) String kingdom;}
+ *      {@literal @KeyField(2) String phylum;}
+ *      {@literal @KeyField(3) String clazz;}
+ *      {@literal @KeyField(4) String order;}
+ *      {@literal @KeyField(5) String family;}
+ *      {@literal @KeyField(6) String genus;}
+ *      {@literal @KeyField(7) String species;}
+ *      {@literal @KeyField(8) String subspecies;}
+ *      ...
+ *  }</pre>
+ *
+ * <p>This causes entities to be sorted first by {@code kingdom}, then by
+ * {@code phylum} within {@code kingdom}, and so on.</p>
+ *
+ * <p>The fields in a composite key class may not be null.</p>
+ *
+ * <p><a name="comparable"><strong>Custom Sort Order</strong></a></p>
+ *
+ * <p>To override the default sort order, a composite key class may implement
+ * the {@link Comparable} interface.  This allows overriding the sort order and
+ * is therefore useful even when there is only one key field in the composite
+ * key class.  For example, the following class sorts Strings using a Canadian
+ * collator:</p>
+ *
+ * <pre class="code">
+ *  import java.text.Collator;
+ *  import java.util.Locale;
+ *
+ *  {@literal @Entity}
+ *  class Animal {
+ *      ...
+ *      {@literal @SecondaryKey(relate=ONE_TO_ONE)}
+ *      CollatedString canadianName;
+ *      ...
+ *  }
+ *
+ *  {@literal @Persistent}
+ *  {@literal class CollatedString implements Comparable<CollatedString>} {
+ *
+ *      static Collator collator = Collator.getInstance(Locale.CANADA);
+ *
+ *      {@literal @KeyField(1)}
+ *      String value;
+ *
+ *      CollatedString(String value) { this.value = value; }
+ *
+ *      private CollatedString() {}
+ *
+ *      public int compareTo(CollatedString o) {
+ *          return collator.compare(value, o.value);
+ *      }
+ *  }</pre>
+ *
+ * <p>Several important rules should be considered when implementing a custom
+ * comparison method.  Failure to follow these rules may result in the primary
+ * or secondary index becoming unusable; in other words, the store will not be
+ * able to function.</p>
+ * <ol>
+ * <li>The comparison method must always return the same result, given the same
+ * inputs.  The behavior of the comparison method must not change over
+ * time.</li>
+ * <br>
+ * <li>A corollary to the first rule is that the behavior of the comparison
+ * method must not be dependent on state which may change over time.  For
+ * example, if the above collation method used the default Java locale, and the
+ * default locale is changed, then the sort order will change.</li>
+ * <br>
+ * <li>The comparison method must not assume that it is called after the store
+ * has been opened.  With Berkeley DB Java Edition, the comparison method is
+ * called during database recovery, which occurs in the {@link Environment}
+ * constructor.</li>
+ * <br>
+ * <li>The comparison method must not assume that it will only be called with
+ * keys that are currently present in the database.  The comparison method will
+ * occasionally be called with deleted keys or with keys for records that were
+ * not part of a committed transaction.</li>
+ * </ol>
+ *
+ * @author Mark Hayes
+ */
+@Documented @Retention(RUNTIME) @Target(FIELD)
+public @interface KeyField {
+
+    int value();
+}
diff --git a/src/com/sleepycat/persist/model/ModelInternal.java b/src/com/sleepycat/persist/model/ModelInternal.java
new file mode 100644
index 0000000000000000000000000000000000000000..8df141e39c47e3d97dad1e1aedd31463a49d5cef
--- /dev/null
+++ b/src/com/sleepycat/persist/model/ModelInternal.java
@@ -0,0 +1,29 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ModelInternal.java,v 1.9.2.2 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.model;
+
+import com.sleepycat.persist.impl.PersistCatalog;
+
+/**
+ * <!-- begin JE only -->
+ * @hidden
+ * <!-- end JE only -->
+ * Internal access class that should not be used by applications.
+ *
+ * @author Mark Hayes
+ */
+public class ModelInternal {
+
+    /**
+     * Internal access method that should not be used by applications.
+     */
+    public static void setCatalog(EntityModel model, PersistCatalog catalog) {
+        model.setCatalog(catalog);
+    }
+}
diff --git a/src/com/sleepycat/persist/model/NotPersistent.java b/src/com/sleepycat/persist/model/NotPersistent.java
new file mode 100644
index 0000000000000000000000000000000000000000..185fadf0dbf9ca34e5b90ca011117a0e02040b89
--- /dev/null
+++ b/src/com/sleepycat/persist/model/NotPersistent.java
@@ -0,0 +1,42 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: NotPersistent.java,v 1.1.2.3 2010/03/22 21:53:33 mark Exp $
+ */
+
+package com.sleepycat.persist.model;
+
+import static java.lang.annotation.ElementType.FIELD;
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+import java.lang.annotation.Documented;
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+/**
+ * Overrides the default rules for field persistence and defines a field as
+ * being non-persistent even when it is not declared with the
+ * <code>transient</code> keyword.
+ *
+ * <p>By default, the persistent fields of a class are all declared instance
+ * fields that are non-transient (are not declared with the
+ * <code>transient</code> keyword).  The default rules may be overridden by
+ * specifying the {@link NotPersistent} or {@link NotTransient} annotation.</p>
+ *
+ * <p>For example, the following field is non-transient (persistent) with
+ * respect to Java serialization but is transient with respect to the DPL.</p>
+ *
+ * <pre style="code">
+ *      {@code @NotPersistent}
+ *      int myField;
+ * }
+ * </pre>
+ *
+ * @see NotTransient
+ * @author Mark Hayes
+ */
+@Documented @Retention(RUNTIME) @Target(FIELD)
+public @interface NotPersistent {
+}
diff --git a/src/com/sleepycat/persist/model/NotTransient.java b/src/com/sleepycat/persist/model/NotTransient.java
new file mode 100644
index 0000000000000000000000000000000000000000..e4822764e750f4bb5174d25ddf089fb88900aa9c
--- /dev/null
+++ b/src/com/sleepycat/persist/model/NotTransient.java
@@ -0,0 +1,42 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: NotTransient.java,v 1.1.2.3 2010/03/22 21:53:33 mark Exp $
+ */
+
+package com.sleepycat.persist.model;
+
+import static java.lang.annotation.ElementType.FIELD;
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+import java.lang.annotation.Documented;
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+/**
+ * Overrides the default rules for field persistence and defines a field as
+ * being persistent even when it is declared with the <code>transient</code>
+ * keyword.
+ *
+ * <p>By default, the persistent fields of a class are all declared instance
+ * fields that are non-transient (are not declared with the
+ * <code>transient</code> keyword).  The default rules may be overridden by
+ * specifying the {@link NotPersistent} or {@link NotTransient} annotation.</p>
+ *
+ * <p>For example, the following field is transient with respect to Java
+ * serialization but is persistent with respect to the DPL.</p>
+ *
+ * <pre style="code">
+ *      {@code @NotTransient}
+ *      transient int myField;
+ * }
+ * </pre>
+ *
+ * @see NotPersistent
+ * @author Mark Hayes
+ */
+@Documented @Retention(RUNTIME) @Target(FIELD)
+public @interface NotTransient {
+}
diff --git a/src/com/sleepycat/persist/model/Persistent.java b/src/com/sleepycat/persist/model/Persistent.java
new file mode 100644
index 0000000000000000000000000000000000000000..123757d9d83710e2e1498e097ca2f0f6ebe8c904
--- /dev/null
+++ b/src/com/sleepycat/persist/model/Persistent.java
@@ -0,0 +1,42 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Persistent.java,v 1.8.2.2 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.model;
+
+import static java.lang.annotation.ElementType.TYPE;
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+import java.lang.annotation.Documented;
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+/**
+ * Identifies a persistent class that is not an {@link Entity} class or a
+ * {@link <a href="Entity.html#simpleTypes">simple type</a>}.
+ *
+ * @author Mark Hayes
+ */
+@Documented @Retention(RUNTIME) @Target(TYPE)
+public @interface Persistent {
+
+    /**
+     * Identifies a new version of a class when an incompatible class change
+     * has been made.
+     *
+     * @see Entity#version
+     */
+    int version() default 0;
+
+    /**
+     * Specifies the class that is proxied by this {@link PersistentProxy}
+     * instance.
+     *
+     * @see PersistentProxy
+     */
+    Class proxyFor() default void.class;
+}
diff --git a/src/com/sleepycat/persist/model/PersistentProxy.java b/src/com/sleepycat/persist/model/PersistentProxy.java
new file mode 100644
index 0000000000000000000000000000000000000000..2fbad21ad5d4a9de1454261df5327cd0d7a0554f
--- /dev/null
+++ b/src/com/sleepycat/persist/model/PersistentProxy.java
@@ -0,0 +1,119 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PersistentProxy.java,v 1.17.2.2 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.model;
+
+import com.sleepycat.persist.evolve.Converter; // for javadoc
+import com.sleepycat.persist.raw.RawStore; // for javadoc
+
+/**
+ * Implemented by a proxy class to represent the persistent state of a
+ * (non-persistent) proxied class.  Normally classes that are outside the scope
+ * of the developer's control must be proxied since they cannot be annotated,
+ * and because it is desirable to insulate the stored format from changes to
+ * the instance fields of the proxied class.  This is useful for classes in the
+ * standard Java libraries, for example.
+ *
+ * <p>{@code PersistentProxy} objects are not required to be thread-safe.  A
+ * single thread will create and call the methods of a given {@code
+ * PersistentProxy} object.</p>
+ *
+ * <p>There are three requirements for a proxy class:</p>
+ * <ol>
+ * <li>It must implement the <code>PersistentProxy</code> interface.</li>
+ * <li>It must be specified as a persistent proxy class in the entity model.
+ * When using the {@link AnnotationModel}, a proxy class is indicated by the
+ * {@link Persistent} annotation with the {@link Persistent#proxyFor}
+ * property.</li>
+ * <li>It must be explicitly registered by calling {@link
+ * EntityModel#registerClass} before opening the store.</li>
+ * </ol>
+ *
+ * <p>In order to serialize an instance of the proxied class before it is
+ * stored, an instance of the proxy class is created.  The proxied instance is
+ * then passed to the proxy's {@link #initializeProxy initializeProxy} method.
+ * When this method returns, the proxy instance contains the state of the
+ * proxied instance.  The proxy instance is then serialized and stored in the
+ * same way as for any persistent object.</p>
+ *
+ * <p>When an instance of the proxy object is deserialized after it is
+ * retrieved from storage, its {@link #convertProxy} method is called.  The
+ * instance of the proxied class returned by this method is then returned as a
+ * field in the persistent instance.</p>
+ *
+ * <p>For example:</p>
+ * <pre class="code">
+ *  import java.util.Locale;
+ *
+ *  {@literal @Persistent(proxyFor=Locale.class)}
+ *  class LocaleProxy implements {@literal PersistentProxy<Locale>} {
+ *
+ *      String language;
+ *      String country;
+ *      String variant;
+ *
+ *      private LocaleProxy() {}
+ *
+ *      public void initializeProxy(Locale object) {
+ *          language = object.getLanguage();
+ *          country = object.getCountry();
+ *          variant = object.getVariant();
+ *      }
+ *
+ *      public Locale convertProxy() {
+ *          return new Locale(language, country, variant);
+ *      }
+ *  }</pre>
+ *
+ * <p>The above definition allows the {@code Locale} class to be used in any
+ * persistent class, for example:</p>
+ * <pre class="code">
+ *  {@literal @Persistent}
+ *  class LocalizedText {
+ *      String text;
+ *      Locale locale;
+ *  }</pre>
+ *
+ * <p>A proxy for proxied class P does not handle instances of subclasses of P.
+ * To proxy a subclass of P, a separate proxy class is needed.</p>
+ *
+ * <p>Several {@link <a href="Entity.html#proxyTypes">built in proxy types</a>}
+ * are used implicitly.  An application defined proxy will be used instead of a
+ * built-in proxy, if both exist for the same proxied class.</p>
+ *
+ * <p>With respect to class evolution, a proxy instance is no different than
+ * any other persistent instance.  When using a {@link RawStore} or {@link
+ * Converter}, only the raw data of the proxy instance will be visible.  Raw
+ * data for the proxied instance never exists.</p>
+ *
+ * <p>Currently a proxied object may not contain a reference to itself.  For
+ * simple proxied objects such as the Locale class shown above, this naturally
+ * won't occur.  But for proxied objects that are containers -- the built-in
+ * Collection and Map classes for example -- this can occur if the container is
+ * added as an element of itself.  This should be avoided.  If an attempt to
+ * store such an object is made, an {@code IllegalArgumentException} will be
+ * thrown.</p>
+ *
+ * <p>Note that a proxy class may not be a subclass of an entity class.</p>
+ *
+ * @author Mark Hayes
+ */
+public interface PersistentProxy<T> {
+
+    /**
+     * Copies the state of a given proxied class instance to this proxy
+     * instance.
+     */
+    void initializeProxy(T object);
+
+    /**
+     * Returns a new proxied class instance to which the state of this proxy
+     * instance has been copied.
+     */
+    T convertProxy();
+}
diff --git a/src/com/sleepycat/persist/model/PrimaryKey.java b/src/com/sleepycat/persist/model/PrimaryKey.java
new file mode 100644
index 0000000000000000000000000000000000000000..e3f5ab8c0a1ba62f48019143754ea412a5e92b92
--- /dev/null
+++ b/src/com/sleepycat/persist/model/PrimaryKey.java
@@ -0,0 +1,174 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PrimaryKey.java,v 1.15.2.2 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.model;
+
+import static java.lang.annotation.ElementType.FIELD;
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+import java.lang.annotation.Documented;
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+
+/**
+ * Indicates the primary key field of an entity class.  The value of the
+ * primary key field is the unique identifier for the entity in a {@link
+ * PrimaryIndex}.
+ *
+ * <p>{@link PrimaryKey} may appear on at most one declared field per
+ * class.</p>
+ *
+ * <p>Primary key values may be automatically assigned as sequential integers
+ * using a {@link #sequence}.  In this case the type of the key field is
+ * restricted to a simple integer type.</p>
+ *
+ * <p>A primary key field may not be null, unless it is being assigned from a
+ * sequence.</p>
+ *
+ * <p><a name="keyTypes"><strong>Key Field Types</strong></a></p>
+ *
+ * <p>The type of a key field must either be one of the following:</p>
+ * <ul>
+ * <li>Any of the {@link <a href="Entity.html#simpleTypes">simple
+ * types</a>}.</li>
+ * <li>A composite key class containing one or more simple type fields.</li>
+ * </ul>
+ * <p>Enum types and array types are not allowed.</p>
+ *
+ * <p>When using a composite key class, each field of the composite key class
+ * must be annotated with {@link KeyField} to identify the storage order and
+ * default sort order.  See {@link KeyField} for an example and more
+ * information on composite keys.</p>
+ *
+ * <p><a name="sortOrder"><strong>Key Sort Order</strong></a></p>
+ *
+ * <p>Key field types, being simple types, have a well defined and reasonable
+ * default sort order, described below.  This sort order is based on a storage
+ * encoding that allows a fast byte-by-byte comparison.</p>
+ * <ul>
+ * <li>All simple types except for {@code String} are encoded so that they are
+ * sorted as expected, that is, as if the {@link Comparable#compareTo} method
+ * of their class (or, for primitives, their wrapper class) is called.</li>
+ * <br>
+ * <li>Strings are encoded as UTF-8 byte arrays.  Zero (0x0000) character
+ * values are UTF encoded as non-zero values, and therefore embedded zeros in
+ * the string are supported.  The sequence {@literal {0xC0,0x80}} is used to
+ * encode a zero character.  This UTF encoding is the same one used by native
+ * Java UTF libraries.  However, this encoding of zero does impact the
+ * lexicographical ordering, and zeros will not be sorted first (the natural
+ * order) or last.  For all character values other than zero, the default UTF
+ * byte ordering is the same as the Unicode lexicographical character
+ * ordering.</li>
+ * </ul>
+ *
+ * <p>When using a composite key class with more than one field, the sorting
+ * order among fields is determined by the {@link KeyField} annotations.  To
+ * override the default sort order, you can use a composite key class that
+ * implements {@link Comparable}.  This allows overriding the sort order and is
+ * therefore useful even when there is only one key field in the composite key
+ * class.  See {@link <a href="KeyField.html#comparable">Custom Sort Order</a>}
+ * for more information on sorting of composite keys.</p>
+ *
+ * <p><a name="inherit"><strong>Inherited Primary Key</strong></a></p>
+ *
+ * <p>If it does not appear on a declared field in the entity class, {@code
+ * PrimaryKey} must appear on a field of an entity superclass.  In the
+ * following example, the primary key on the base class is used:</p>
+ *
+ * <pre class="code">
+ * {@literal @Persistent}
+ * class BaseClass {
+ *     {@literal @PrimaryKey}
+ *     long id;
+ *     ...
+ * }
+ * {@literal @Entity}
+ * class Employee extends BaseClass {
+ *     // inherits id primary key
+ *     ...
+ * }</pre>
+ *
+ * <p>If more than one class with {@code PrimaryKey} is present in a class
+ * hierarchy, the key in the most derived class is used.  In this case, primary
+ * key fields in superclasses are "shadowed" and are not persistent.  In the
+ * following example, the primary key in the base class is not used and is not
+ * persistent:</p>
+ * <pre class="code">
+ * {@literal @Persistent}
+ * class BaseClass {
+ *     {@literal @PrimaryKey}
+ *     long id;
+ *     ...
+ * }
+ * {@literal @Entity}
+ * class Employee extends BaseClass {
+ *     // overrides id primary key
+ *     {@literal @PrimaryKey}
+ *     String uuid;
+ *     ...
+ * }</pre>
+ *
+ * <p>Note that a {@code PrimaryKey} is not allowed on entity subclasses.  The
+ * following is illegal and will cause an {@code IllegalArgumentException} when
+ * trying to store an {@code Employee} instance:</p>
+ * <pre class="code">
+ * {@literal @Entity}
+ * class Person {
+ *     {@literal @PrimaryKey}
+ *     long id;
+ *     ...
+ * }
+ * {@literal @Persistent}
+ * class Employee extends Person {
+ *     {@literal @PrimaryKey}
+ *     String uuid;
+ *     ...
+ * }</pre>
+ *
+ * @author Mark Hayes
+ */
+@Documented @Retention(RUNTIME) @Target(FIELD)
+public @interface PrimaryKey {
+
+    /**
+     * The name of a sequence from which to assign primary key values
+     * automatically.  If a non-empty string is specified, sequential integers
+     * will be assigned from the named sequence.
+     *
+     * <p>A single sequence may be used for more than one entity class by
+     * specifying the same sequence name for each {@code PrimaryKey}.  For
+     * each named sequence, a {@link com.sleepycat.je.Sequence} will be used to
+     * assign key values.  For more information on configuring sequences, see
+     * {@link EntityStore#setSequenceConfig EntityStore.setSequenceConfig}.</p>
+     *
+     * <p>To use a sequence, the type of the key field must be a primitive
+     * integer type ({@code byte}, {@code short}, {@code int} or {@code long})
+     * or the primitive wrapper class for one of these types.  A composite key
+     * class may also be used to override sort order, but it may contain only a
+     * single key field that has one of the types previously mentioned.</p>
+     *
+     * <p>When an entity with a primary key sequence is stored using one of the
+     * <code>put</code> methods in the {@link PrimaryIndex}, a new key will be
+     * assigned if the primary key field in the entity instance is null (for a
+     * reference type) or zero (for a primitive integer type).  Specifying zero
+     * for a primitive integer key field is allowed because the initial value
+     * of the sequence is one (not zero) by default.  If the sequence
+     * configuration is changed such that zero is part of the sequence, then
+     * the field type must be a primitive wrapper class and the field value
+     * must be null to cause a new key to be assigned.</p>
+     *
+     * <p>When one of the <code>put</code> methods in the {@link PrimaryIndex}
+     * is called and a new key is assigned, the assigned value is returned to
+     * the caller via the key field of the entity object that is passed as a
+     * parameter.</p>
+     */
+    String sequence() default "";
+}
diff --git a/src/com/sleepycat/persist/model/PrimaryKeyMetadata.java b/src/com/sleepycat/persist/model/PrimaryKeyMetadata.java
new file mode 100644
index 0000000000000000000000000000000000000000..3befb195bf390170825eb5d0d40e63856daa5bd3
--- /dev/null
+++ b/src/com/sleepycat/persist/model/PrimaryKeyMetadata.java
@@ -0,0 +1,61 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PrimaryKeyMetadata.java,v 1.14.2.2 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.model;
+
+
+/**
+ * The metadata for a primary key field.  A primary key may be specified with
+ * the {@link PrimaryKey} annotation.
+ *
+ * <p>{@code PrimaryKeyMetadata} objects are thread-safe.  Multiple threads may
+ * safely call the methods of a shared {@code PrimaryKeyMetadata} object.</p>
+ *
+ * @author Mark Hayes
+ */
+public class PrimaryKeyMetadata extends FieldMetadata {
+
+    private static final long serialVersionUID = 2946863622972437018L;
+
+    private String sequenceName;
+
+    /**
+     * Used by an {@code EntityModel} to construct primary key metadata.
+     */
+    public PrimaryKeyMetadata(String name,
+                              String className,
+                              String declaringClassName,
+                              String sequenceName) {
+        super(name, className, declaringClassName);
+        this.sequenceName = sequenceName;
+    }
+
+    /**
+     * Returns the name of the sequence for assigning key values.  This may be
+     * specified using the {@link PrimaryKey#sequence} annotation.
+     */
+    public String getSequenceName() {
+        return sequenceName;
+    }
+
+    @Override
+    public boolean equals(Object other) {
+        if (other instanceof PrimaryKeyMetadata) {
+            PrimaryKeyMetadata o = (PrimaryKeyMetadata) other;
+            return super.equals(o) &&
+                   ClassMetadata.nullOrEqual(sequenceName, o.sequenceName);
+        } else {
+            return false;
+        }
+    }
+
+    @Override
+    public int hashCode() {
+        return super.hashCode() + ClassMetadata.hashCode(sequenceName);
+    }
+}
diff --git a/src/com/sleepycat/persist/model/Relationship.java b/src/com/sleepycat/persist/model/Relationship.java
new file mode 100644
index 0000000000000000000000000000000000000000..a2ed8867030ac88871397ee3a390a01d9d5e3a3a
--- /dev/null
+++ b/src/com/sleepycat/persist/model/Relationship.java
@@ -0,0 +1,61 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Relationship.java,v 1.6.2.2 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.model;
+
+/**
+ * Defines the relationship between instances of the entity class and the
+ * secondary keys.  This can be specified using a {@link SecondaryKey#relate}
+ * annotation.
+ *
+ * @author Mark Hayes
+ */
+public enum Relationship {
+
+    /**
+     * Relates many entities to one secondary key.
+     *
+     * <p>The secondary index will have non-unique keys; in other words,
+     * duplicates will be allowed.</p>
+     *
+     * <p>The secondary key field is singular, in other words, it may not be an
+     * array or collection type.</p>
+     */
+    MANY_TO_ONE,
+
+    /**
+     * Relates one entity to many secondary keys.
+     *
+     * <p>The secondary index will have unique keys, in other words, duplicates
+     * will not be allowed.</p>
+     *
+     * <p>The secondary key field must be an array or collection type.</p>
+     */
+    ONE_TO_MANY,
+
+    /**
+     * Relates many entities to many secondary keys.
+     *
+     * <p>The secondary index will have non-unique keys, in other words,
+     * duplicates will be allowed.</p>
+     *
+     * <p>The secondary key field must be an array or collection type.</p>
+     */
+    MANY_TO_MANY,
+
+    /**
+     * Relates one entity to one secondary key.
+     *
+     * <p>The secondary index will have unique keys, in other words, duplicates
+     * will not be allowed.</p>
+     *
+     * <p>The secondary key field is singular, in other words, it may not be an
+     * array or collection type.</p>
+     */
+    ONE_TO_ONE;
+}
diff --git a/src/com/sleepycat/persist/model/SecondaryKey.java b/src/com/sleepycat/persist/model/SecondaryKey.java
new file mode 100644
index 0000000000000000000000000000000000000000..4dcd230f2148759330816b0f4571e1e17f51fabf
--- /dev/null
+++ b/src/com/sleepycat/persist/model/SecondaryKey.java
@@ -0,0 +1,198 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SecondaryKey.java,v 1.12.2.2 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.model;
+
+import static java.lang.annotation.ElementType.FIELD;
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+import java.lang.annotation.Documented;
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.SecondaryIndex; // for javadoc
+import com.sleepycat.persist.StoreConfig;
+
+/**
+ * Indicates a secondary key field of an entity class.  The value of the
+ * secondary key field is a unique or non-unique identifier for the entity and
+ * is accessed via a {@link SecondaryIndex}.
+ *
+ * <p>{@code SecondaryKey} may appear on any number of fields in an entity
+ * class, subclasses and superclasses.  For a secondary key field in the entity
+ * class or one of its superclasses, all entity instances will be indexed by
+ * that field (if it is non-null).  For a secondary key field in an entity
+ * subclass, only instances of that subclass will be indexed by that field (if
+ * it is non-null).</p>
+ *
+ * <p>If a secondary key field is null, the entity will not be indexed by that
+ * key.  In other words, the entity cannot be queried by that secondary key nor
+ * can the entity be found by iterating through the secondary index.</p>
+ *
+ * <p>For a given entity class and its superclasses and subclasses, no two
+ * secondary keys may have the same name.  By default, the field name
+ * identifies the secondary key and the secondary index for a given entity
+ * class.  {@link #name} may be specified to override this default.</p>
+ *
+ * <p>Using {@link #relate}, instances of the entity class are related to
+ * secondary keys in a many-to-one, one-to-many, many-to-many, or one-to-one
+ * relationship.  This required property specifies the <em>cardinality</em> of
+ * each side of the relationship.</p>
+ *
+ * <p>A secondary key may optionally be used to form a relationship with
+ * instances of another entity class using {@link #relatedEntity} and {@link
+ * #onRelatedEntityDelete}.  This establishes <em>foreign key constraints</em>
+ * for the secondary key.</p>
+ *
+ * <p>The secondary key field type must be an array or collection type when a
+ * <em>x-to-many</em> relationship is used or a singular type when an
+ * <em>x-to-one</em> relationship is used; see {@link #relate}.</p>
+ *
+ * <p>The field type (or element type, when an array or collection type is
+ * used) of a secondary key field must follow the same rules as for a {@link
+ * <a href="PrimaryKey.html#keyTypes">primary key type</a>}.  The {@link <a
+ * href="PrimaryKey.html#sortOrder">key sort order</a>} is also the same.</p>
+ *
+ * <p>For a secondary key field with a collection type, a type parameter must
+ * be used to specify the element type.  For example {@code Collection<String>}
+ * is allowed but {@code Collection} is not.</p>
+ *
+ * @author Mark Hayes
+ */
+@Documented @Retention(RUNTIME) @Target(FIELD)
+public @interface SecondaryKey {
+
+    /**
+     * Defines the relationship between instances of the entity class and the
+     * secondary keys.
+     *
+     * <p>The table below summarizes how to create all four variations of
+     * relationships.</p>
+     * <div>
+     * <table border="yes">
+     *     <tr><th>Relationship</th>
+     *         <th>Field type</th>
+     *         <th>Key type</th>
+     *         <th>Example</th>
+     *     </tr>
+     *     <tr><td>{@link Relationship#ONE_TO_ONE}</td>
+     *         <td>Singular</td>
+     *         <td>Unique</td>
+     *         <td>A person record with a unique social security number
+     *             key.</td>
+     *     </tr>
+     *     <tr><td>{@link Relationship#MANY_TO_ONE}</td>
+     *         <td>Singular</td>
+     *         <td>Duplicates</td>
+     *         <td>A person record with a non-unique employer key.</td>
+     *     </tr>
+     *     <tr><td>{@link Relationship#ONE_TO_MANY}</td>
+     *         <td>Array/Collection</td>
+     *         <td>Unique</td>
+     *         <td>A person record with multiple unique email address keys.</td>
+     *     </tr>
+     *     <tr><td>{@link Relationship#MANY_TO_MANY}</td>
+     *         <td>Array/Collection</td>
+     *         <td>Duplicates</td>
+     *         <td>A person record with multiple non-unique organization
+     *             keys.</td>
+     *     </tr>
+     * </table>
+     * </div>
+     *
+     * <p>For a <em>many-to-x</em> relationship, the secondary index will
+     * have non-unique keys; in other words, duplicates will be allowed.
+     * Conversely, for <em>one-to-x</em> relationship, the secondary index
+     * will have unique keys.</p>
+     *
+     * <p>For a <em>x-to-one</em> relationship, the secondary key field is
+     * singular; in other words, it may not be an array or collection type.
+     * Conversely, for a <em>x-to-many</em> relationship, the secondary key
+     * field must be an array or collection type.  A collection type is any
+     * implementation of {@link java.util.Collection}.</p>
+     */
+    Relationship relate();
+
+    /**
+     * Specifies the entity to which this entity is related, for establishing
+     * foreign key constraints.  Values of this secondary key will be
+     * constrained to the set of primary key values for the given entity class.
+     *
+     * <p>The given class must be an entity class.  This class is called the
+     * <em>related entity</em> or <em>foreign entity</em>.</p>
+     *
+     * <p>When a related entity class is specified, a check (foreign key
+     * constraint) is made every time a new secondary key value is stored for
+     * this entity, and every time a related entity is deleted.</p>
+     *
+     * <p>Whenever a new secondary key value is stored for this entity, it is
+     * checked to ensure it exists as a primary key value of the related
+     * entity.  If it does not, a {@link DatabaseException} will be thrown
+     * by the {@link PrimaryIndex} {@code put} method.</p>
+     *
+     * <p>Whenever a related entity is deleted and its primary key value exists
+     * as a secondary key value for this entity, the action is taken that is
+     * specified using the {@link #onRelatedEntityDelete} property.</p>
+     *
+     * <p>Together, these two checks guarantee that a secondary key value for
+     * this entity will always exist as a primary key value for the related
+     * entity.  Note, however, that a transactional store must be configured
+     * to guarantee this to be true in the face of a crash; see {@link
+     * StoreConfig#setTransactional}.</p>
+     */
+    Class relatedEntity() default void.class;
+
+    /**
+     * Specifies the action to take when a related entity is deleted having a
+     * primary key value that exists as a secondary key value for this entity.
+     *
+     * <p><em>Note:</em> This property only applies when {@link #relatedEntity}
+     * is specified to define the related entity.</p>
+     *
+     * <p>The default action, {@link DeleteAction#ABORT ABORT}, means that a
+     * {@link DatabaseException} is thrown in order to abort the current
+     * transaction.</p>
+     *
+     * <p>If {@link DeleteAction#CASCADE CASCADE} is specified, then this
+     * entity will be deleted also.  This in turn could trigger further
+     * deletions, causing a cascading effect.</p>
+     *
+     * <p>If {@link DeleteAction#NULLIFY NULLIFY} is specified, then the
+     * secondary key in this entity is set to null and this entity is updated.
+     * If the key field type is singular, the field value is set to null;
+     * therefore, to specify {@code NULLIFY} for a singular key field type, a
+     * primitive wrapper type must be used instead of a primitive type.  If the
+     * key field type is an array or collection type, the key is deleted from
+     * the array (the array is resized) or from the collection (using {@link
+     * java.util.Collection#remove Collection.remove}).</p>
+     */
+    DeleteAction onRelatedEntityDelete() default DeleteAction.ABORT;
+
+    /**
+     * Specifies the name of the key in order to use a name that is different
+     * than the field name.
+     *
+     * <p>This is convenient when prefixes or suffices are used on field names.
+     * For example:</p>
+     * <pre class="code">
+     *  class Person {
+     *      {@literal @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Person.class, name="parentSsn")}
+     *      String m_parentSsn;
+     *  }</pre>
+     *
+     * <p>It can also be used to uniquely name a key when multiple secondary
+     * keys for a single entity class have the same field name.  For example,
+     * an entity class and its subclass may both have a field named 'date',
+     * and both fields are used as secondary keys.  The {@code name} property
+     * can be specified for one or both fields to give each key a unique
+     * name.</p>
+     */
+    String name() default "";
+}
diff --git a/src/com/sleepycat/persist/model/SecondaryKeyMetadata.java b/src/com/sleepycat/persist/model/SecondaryKeyMetadata.java
new file mode 100644
index 0000000000000000000000000000000000000000..5996826093d0016d70c3f61cc6ee08b59858bb30
--- /dev/null
+++ b/src/com/sleepycat/persist/model/SecondaryKeyMetadata.java
@@ -0,0 +1,122 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SecondaryKeyMetadata.java,v 1.15.2.3 2010/01/04 15:30:39 cwl Exp $
+ */
+
+package com.sleepycat.persist.model;
+
+
+/**
+ * The metadata for a secondary key field.  A secondary key may be specified
+ * with the {@link SecondaryKey} annotation.
+ *
+ * <p>{@code SecondaryKeyMetadata} objects are thread-safe.  Multiple threads
+ * may safely call the methods of a shared {@code SecondaryKeyMetadata}
+ * object.</p>
+ *
+ * @author Mark Hayes
+ */
+public class SecondaryKeyMetadata extends FieldMetadata {
+
+    private static final long serialVersionUID = 8118924993396722502L;
+
+    private String keyName;
+    private Relationship relationship;
+    private String elementClassName;
+    private String relatedEntity;
+    private DeleteAction deleteAction;
+
+    /**
+     * Used by an {@code EntityModel} to construct secondary key metadata.
+     */
+    public SecondaryKeyMetadata(String name,
+                                String className,
+                                String declaringClassName,
+                                String elementClassName,
+                                String keyName,
+                                Relationship relationship,
+                                String relatedEntity,
+                                DeleteAction deleteAction) {
+        super(name, className, declaringClassName);
+        this.elementClassName = elementClassName;
+        this.keyName = keyName;
+        this.relationship = relationship;
+        this.relatedEntity = relatedEntity;
+        this.deleteAction = deleteAction;
+    }
+
+    /**
+     * Returns the class name of the array or collection element for a {@link
+     * Relationship#ONE_TO_MANY ONE_TO_MANY} or {@link
+     * Relationship#MANY_TO_MANY MANY_TO_MANY} relationship, or null for a
+     * Relationship#ONE_TO_ONE ONE_TO_ONE} or {@link Relationship#MANY_TO_ONE
+     * MANY_TO_ONE} relationship.
+     */
+    public String getElementClassName() {
+        return elementClassName;
+    }
+
+    /**
+     * Returns the key name, which may be different from the field name.
+     */
+    public String getKeyName() {
+        return keyName;
+    }
+
+    /**
+     * Returns the relationship between instances of the entity class and the
+     * secondary keys.  This may be specified using the {@link
+     * SecondaryKey#relate} annotation.
+     */
+    public Relationship getRelationship() {
+        return relationship;
+    }
+
+    /**
+     * Returns the class name of the related (foreign) entity, for which
+     * foreign key constraints are specified using the {@link
+     * SecondaryKey#relatedEntity} annotation.
+     */
+    public String getRelatedEntity() {
+        return relatedEntity;
+    }
+
+    /**
+     * Returns the action to take when a related entity is deleted having a
+     * primary key value that exists as a secondary key value for this entity.
+     * This may be specified using the {@link
+     * SecondaryKey#onRelatedEntityDelete} annotation.
+     */
+    public DeleteAction getDeleteAction() {
+        return deleteAction;
+    }
+
+    @Override
+    public boolean equals(Object other) {
+        if (other instanceof SecondaryKeyMetadata) {
+            SecondaryKeyMetadata o = (SecondaryKeyMetadata) other;
+            return super.equals(o) &&
+                   relationship == o.relationship &&
+                   ClassMetadata.nullOrEqual(deleteAction, o.deleteAction) &&
+                   ClassMetadata.nullOrEqual(keyName, o.keyName) &&
+                   ClassMetadata.nullOrEqual(elementClassName,
+                                             o.elementClassName) &&
+                   ClassMetadata.nullOrEqual(relatedEntity, o.relatedEntity);
+        } else {
+            return false;
+        }
+    }
+
+    @Override
+    public int hashCode() {
+        return super.hashCode() +
+               relationship.hashCode() +
+               ClassMetadata.hashCode(deleteAction) +
+               ClassMetadata.hashCode(keyName) +
+               ClassMetadata.hashCode(elementClassName) +
+               ClassMetadata.hashCode(relatedEntity);
+    }
+}
diff --git a/src/com/sleepycat/persist/model/package.html b/src/com/sleepycat/persist/model/package.html
new file mode 100644
index 0000000000000000000000000000000000000000..28033640a6997ebe67b4ebe6afee36400b0a3739
--- /dev/null
+++ b/src/com/sleepycat/persist/model/package.html
@@ -0,0 +1,6 @@
+<!-- $Id: package.html,v 1.2 2008/02/05 23:28:22 mark Exp $ -->
+<html>
+<body>
+Annotations for defining a persistent object model.
+</body>
+</html>
diff --git a/src/com/sleepycat/persist/package.html b/src/com/sleepycat/persist/package.html
new file mode 100644
index 0000000000000000000000000000000000000000..81f4a3d8dd06522881b9292312fbdfced3fa7972
--- /dev/null
+++ b/src/com/sleepycat/persist/package.html
@@ -0,0 +1,610 @@
+<!-- $Id: package.html,v 1.9 2008/02/05 23:28:21 mark Exp $ -->
+<html>
+<body>
+The Direct Persistence Layer (DPL) adds a persistent object model to the
+Berkeley DB transactional engine.
+
+<h1>Package Specification</h1>
+
+<ul>
+<li><a href="#intro">Introduction</a></li>
+<li><a href="#model">The Entity Model</a></li>
+<li><a href="#example">A brief example</a></li>
+<li><a href="#whichAPI">Which API to use?</a></li>
+<li><a href="#java14and15">Java 1.5 dependencies</a>
+    <ul>
+    <li><a href="#genericTypes">Generic Types</a></li>
+    <li><a href="#annotations">Annotations</a></li>
+    </ul>
+</li>
+<li><a href="#bytecode">Bytecode Enhancement</a></li>
+</ul>
+
+<a name="intro"><h2>Introduction</h2></a>
+
+<p>The Direct Persistence Layer (DPL) was designed to meet the following
+requirements.</p>
+<ul>
+<li>A type safe and convenient API is provided for accessing persistent
+objects.  The use of Java generic types, although optional, is fully exploited
+to provide type safety.  For example:
+<pre class="code">
+{@literal PrimaryIndex<Long,Employer> employerById = ...;}
+long employerId = ...;
+Employer employer = employerById.get(employerId);</pre>
+</li>
+<li>All Java types are allowed to be persistent without requiring that they
+implement special interfaces.  Persistent fields may be {@code private},
+package-private (default access), {@code protected}, or {@code public}.  No
+hand-coding of bindings is required.  However, each persistent class must have
+a default constructor.  For example:
+<pre class="code">
+{@literal @Persistent}
+class Address {
+    String street;
+    String city;
+    String state;
+    int zipCode;
+    private Address() {}
+}</pre>
+</li>
+<li>Bytecode enhancement provides fully optimized bindings that do not use Java
+reflection.</li>
+<li>It is easy to define primary and secondary keys.  No external schema is
+required and Java annotations may be used for defining all metadata.
+Extensions may derive metadata from other sources.  For example, the following
+Employer class is defined as a persistent entity with a primary key field
+{@code id} and the secondary key field {@code name}:</li>
+<pre class="code">
+{@literal @Entity}
+class Employer {
+
+    {@literal @PrimaryKey(sequence="ID")}
+    long id;
+
+    {@literal @SecondaryKey(relate=ONE_TO_ONE)}
+    String name;
+
+    Address address;
+
+    private Employer() {}
+}</pre>
+<li>Interoperability with external components is supported via the Java
+collections framework.  Any primary or secondary index can be accessed using a
+standard <code>java.util</code> collection.  For example:
+<pre class="code">{@literal java.util.SortedMap<String,Employer> map = employerByName.sortedMap();}</pre>
+</li>
+<li>Class evolution is explicitly supported.  Compatible changes (adding fields
+and type widening) are performed automatically and transparently.  For example,
+without any special configuration a {@code street2} field may be added to the
+{@code Address} class and the type of the {@code zipCode} field may be changed
+from {@code int} to {@code long}:
+<pre class="code">
+{@literal @Persistent}
+class Address {
+    String street;
+    String street2;
+    String city;
+    String state;
+    long zipCode;
+    private Address() {}
+}</pre>
+Many incompatible class changes, such as renaming fields or refactoring a
+single class, can be performed using {@link
+com.sleepycat.persist.evolve.Mutations Mutations}.  Mutations are automatically
+applied lazily as data is accessed, avoiding downtime to convert large
+databases during a software upgrade.
+<p>Complex refactoring involving multiple classes may be performed using the a
+<a href="package-summary.html#storeConversion">store conversion</a>.  The DPL
+always provides access to your data via a {@code RawStore}, no matter what
+changes have been made to persistent classes.</p>
+</li>
+<br>
+<li>The performance of the Berkeley DB transactional engine is not compromised.
+Operations are internally mapped directly to the engine API, object bindings
+are lightweight, and all engine tuning parameters are available.  For example,
+a "dirty read" may be performed using an optional {@link
+com.sleepycat.je.LockMode LockMode} parameter:
+<pre class="code">Employer employer = employerByName.get(null, "Gizmo Inc", LockMode.READ_UNCOMMITTED);</pre>
+For high performance applications, {@link com.sleepycat.je.DatabaseConfig
+DatabaseConfig} parameters may be used to tune the performance of the Berkeley
+DB engine.  For example, the size of an internal Btree node can be specified
+as follows:
+<pre class="code">
+DatabaseConfig config = store.getPrimaryConfig(Employer.class);
+config.setNodeMaxEntries(64);
+store.setPrimaryConfig(config);</pre>
+</li>
+</ul>
+
+<a name="model"><h2>The Entity Model</h2></a>
+
+<p>The DPL is intended for applications that represent persistent domain
+objects using Java classes.  An <em>entity class</em> is an ordinary Java class
+that has a primary key and is stored and accessed using a primary index.  It
+may also have any number of secondary keys, and entities may be accessed by
+secondary key using a secondary index.</p>
+
+<p>An entity class may be defined with the {@link
+com.sleepycat.persist.model.Entity Entity} annotation.  For each entity class,
+its primary key may be defined using the {@link
+com.sleepycat.persist.model.PrimaryKey PrimaryKey} annotation and any number of
+secondary keys may be defined using the {@link
+com.sleepycat.persist.model.SecondaryKey SecondaryKey} annotation.</p>
+
+<p>In the following example, the {@code Person.ssn} (social security number)
+field is the primary key and the {@code Person.employerIds} field is a
+many-to-many secondary key.</p>
+<pre class="code">
+{@literal @Entity}
+class Person {
+
+    {@literal @PrimaryKey}
+    String ssn;
+
+    String name;
+    Address address;
+
+    {@literal @SecondaryKey(relate=MANY_TO_MANY, relatedEntity=Employer.class)}
+    {@literal Set<Long> employerIds = new HashSet<Long>();}
+
+    private Person() {} // For bindings
+}</pre>
+
+<p>A set of entity classes constitutes an <em>entity model</em>.  In addition
+to isolated entity classes, an entity model may contain relationships between
+entities.  Relationships may be defined using the {@link
+com.sleepycat.persist.model.SecondaryKey SecondaryKey} annotation.
+Many-to-one, one-to-many, many-to-many and one-to-one relationships are
+supported, as well as foreign key constraints.</p>
+
+<p>In the example above, a relationship between the {@code Person} and {@code
+Employer} entities is defined via the {@code Person.employerIds} field.  The
+{@code relatedEntity=Employer.class} annotation property establishes foreign
+key constraints to guarantee that every element of the {@code employerIds} set
+is a valid {@code Employer} primary key.</p>
+
+<p>For more information on the entity model, see the {@link
+com.sleepycat.persist.model.AnnotationModel AnnotationModel} and the {@link
+com.sleepycat.persist.model.Entity Entity} annotation.</p>
+
+<p>The root object in the DPL is the {@link com.sleepycat.persist.EntityStore
+EntityStore}.  An entity store manages any number of objects for each entity
+class defined in the model.  The store provides access to the primary and
+secondary indices for each entity class, for example:</p>
+
+<pre class="code">
+EntityStore store = new EntityStore(...);
+
+{@literal PrimaryIndex<String,Person> personBySsn} =
+    store.getPrimaryIndex(String.class, Person.class);</pre>
+
+<a name="example"><h2>A brief example</h2></a>
+
+<p>The following example shows how to define an entity model and how to store
+and access persistent objects.  Exception handling is omitted for brevity.</p>
+
+<pre class="code">
+import java.io.File;
+import java.util.HashSet;
+import java.util.Set;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.persist.EntityCursor;
+import com.sleepycat.persist.EntityIndex;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.SecondaryIndex;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.Persistent;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.SecondaryKey;
+import static com.sleepycat.persist.model.DeleteAction.NULLIFY;
+import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE;
+import static com.sleepycat.persist.model.Relationship.ONE_TO_MANY;
+import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE;
+import static com.sleepycat.persist.model.Relationship.MANY_TO_MANY;
+
+// An entity class.
+//
+{@literal @Entity}
+class Person {
+
+    {@literal @PrimaryKey}
+    String ssn;
+
+    String name;
+    Address address;
+
+    {@literal @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Person.class)}
+    String parentSsn;
+
+    {@literal @SecondaryKey(relate=ONE_TO_MANY)}
+    {@literal Set<String> emailAddresses = new HashSet<String>();}
+
+    {@code @SecondaryKey(relate=MANY_TO_MANY, relatedEntity=Employer.class,
+                                       onRelatedEntityDelete=NULLIFY)}
+    {@code Set<Long> employerIds = new HashSet<Long>();}
+
+    Person(String name, String ssn, String parentSsn) {
+        this.name = name;
+        this.ssn = ssn;
+        this.parentSsn = parentSsn;
+    }
+
+    private Person() {} // For bindings
+}
+
+// Another entity class.
+//
+{@literal @Entity}
+class Employer {
+
+    {@literal @PrimaryKey(sequence="ID")}
+    long id;
+
+    {@literal @SecondaryKey(relate=ONE_TO_ONE)}
+    String name;
+
+    Address address;
+
+    Employer(String name) {
+        this.name = name;
+    }
+
+    private Employer() {} // For bindings
+}
+
+// A persistent class used in other classes.
+//
+{@literal @Persistent}
+class Address {
+    String street;
+    String city;
+    String state;
+    int zipCode;
+    private Address() {} // For bindings
+}
+
+// The data accessor class for the entity model.
+//
+class PersonAccessor {
+
+    // Person accessors
+    //
+    {@literal PrimaryIndex<String,Person> personBySsn;}
+    {@literal SecondaryIndex<String,String,Person> personByParentSsn;}
+    {@literal SecondaryIndex<String,String,Person> personByEmailAddresses;}
+    {@literal SecondaryIndex<Long,String,Person> personByEmployerIds;}
+
+    // Employer accessors
+    //
+    {@literal PrimaryIndex<Long,Employer> employerById;}
+    {@literal SecondaryIndex<String,Long,Employer> employerByName;}
+
+    // Opens all primary and secondary indices.
+    //
+    public PersonAccessor(EntityStore store)
+        throws DatabaseException {
+
+        personBySsn = store.getPrimaryIndex(
+            String.class, Person.class);
+
+        personByParentSsn = store.getSecondaryIndex(
+            personBySsn, String.class, "parentSsn");
+
+        personByEmailAddresses = store.getSecondaryIndex(
+            personBySsn, String.class, "emailAddresses");
+
+        personByEmployerIds = store.getSecondaryIndex(
+            personBySsn, Long.class, "employerIds");
+
+        employerById = store.getPrimaryIndex(
+            Long.class, Employer.class);
+
+        employerByName = store.getSecondaryIndex(
+            employerById, String.class, "name"); 
+    }
+}
+
+// Open a transactional Berkeley DB engine environment.
+//
+EnvironmentConfig envConfig = new EnvironmentConfig();
+envConfig.setAllowCreate(true);
+envConfig.setTransactional(true);
+Environment env = new Environment(new File("/my/data"), envConfig);
+
+// Open a transactional entity store.
+//
+StoreConfig storeConfig = new StoreConfig();
+storeConfig.setAllowCreate(true);
+storeConfig.setTransactional(true);
+EntityStore store = new EntityStore(env, "PersonStore", storeConfig);
+
+// Initialize the data access object.
+//
+PersonAccessor dao = new PersonAccessor(store);
+
+// Add a parent and two children using the Person primary index.  Specifying a
+// non-null parentSsn adds the child Person to the sub-index of children for
+// that parent key.
+//
+dao.personBySsn.put(new Person("Bob Smith", "111-11-1111", null));
+dao.personBySsn.put(new Person("Mary Smith", "333-33-3333", "111-11-1111"));
+dao.personBySsn.put(new Person("Jack Smith", "222-22-2222", "111-11-1111"));
+
+// Print the children of a parent using a sub-index and a cursor.
+//
+{@literal EntityCursor<Person> children =}
+    dao.personByParentSsn.subIndex("111-11-1111").entities();
+try {
+    for (Person child : children) {
+        System.out.println(child.ssn + ' ' + child.name);
+    }
+} finally {
+    children.close();
+}
+
+// Get Bob by primary key using the primary index.
+//
+Person bob = dao.personBySsn.get("111-11-1111");
+assert bob != null;
+
+// Create two employers.  Their primary keys are assigned from a sequence.
+//
+Employer gizmoInc = new Employer("Gizmo Inc");
+Employer gadgetInc = new Employer("Gadget Inc");
+dao.employerById.put(gizmoInc);
+dao.employerById.put(gadgetInc);
+
+// Bob has two jobs and two email addresses.
+//
+bob.employerIds.add(gizmoInc.id);
+bob.employerIds.add(gadgetInc.id);
+bob.emailAddresses.add("bob@bob.com");
+bob.emailAddresses.add("bob@gmail.com");
+
+// Update Bob's record.
+//
+dao.personBySsn.put(bob);
+
+// Bob can now be found by both email addresses.
+//
+bob = dao.personByEmailAddresses.get("bob@bob.com");
+assert bob != null;
+bob = dao.personByEmailAddresses.get("bob@gmail.com");
+assert bob != null;
+
+// Bob can also be found as an employee of both employers.
+//
+{@literal EntityIndex<String,Person> employees;}
+employees = dao.personByEmployerIds.subIndex(gizmoInc.id);
+assert employees.contains("111-11-1111");
+employees = dao.personByEmployerIds.subIndex(gadgetInc.id);
+assert employees.contains("111-11-1111");
+
+// When an employer is deleted, the onRelatedEntityDelete=NULLIFY for the
+// employerIds key causes the deleted ID to be removed from Bob's employerIds.
+//
+dao.employerById.delete(gizmoInc.id);
+bob = dao.personBySsn.get("111-11-1111");
+assert !bob.employerIds.contains(gizmoInc.id);
+
+store.close();
+env.close();
+</pre>
+<p>The example illustrates several characteristics of the DPL:</p>
+<ul>
+<li>Persistent data and keys are defined in terms of instance fields.  For
+brevity the example does not show getter and setter methods, although these
+would normally exist to provide encapsulation.  The DPL accesses fields during
+object serialization and deserialization, rather than calling getter/setter
+methods, leaving business methods free to enforce arbitrary validation rules.
+For example: 
+<pre class="code">
+{@literal @Persistent}
+public class ConstrainedValue {
+
+    private int min;
+    private int max;
+    private int value;
+
+    private ConstrainedValue() {} // For bindings
+
+    public ConstrainedValue(int min, int max) {
+        this.min = min;
+        this.max = max;
+        value = min;
+    }
+
+    public setValue(int value) {
+        if (value &lt; min || value &gt; max) {
+            throw new IllegalArgumentException("out of range");
+        }
+        this.value = value;
+    }
+}
+</pre>
+The above {@code setValue} method would not work if it were called during
+object deserialization, since the order of setting fields is arbitrary.  The
+{@code min} and {@code max} fields may not be set before the {@code value} is
+set.
+</li>
+<br>
+<li>The example creates a transactional store and therefore all operations are
+transaction protected.  Because no explicit transactions are used, auto-commit
+is used implicitly.
+
+<p>Explicit transactions may also be used to group multiple operations in a
+single transaction, and all access methods have optional transaction
+parameters.  For example, the following two operations are performed atomically
+in a transaction:
+<pre class="code">
+Transaction txn = env.beginTransaction(null, null);
+dao.employerById.put(txn, gizmoInc);
+dao.employerById.put(txn, gadgetInc);
+txn.commit();
+</pre>
+</li>
+<li>To provide maximum performance, the DPL operations map directly to the
+Btree operations of the Berkeley DB engine.  Unlike other persistence
+approaches, keys and indices are exposed for direct access and performance
+tuning.
+<p>Queries are implemented by calling methods of the primary and secondary
+indices.  An {@link com.sleepycat.persist.EntityJoin EntityJoin} class is also
+available for performing equality joins.  For example, the following code
+queries all of Bob's children that work for Gizmo Inc:
+<pre class="code">
+{@literal EntityJoin<String,Person> join = new EntityJoin(dao.personBySsn);}
+
+join.addCondition(dao.personByParentSsn, "111-11-1111");
+join.addCondition(dao.personByEmployerIds, gizmoInc.id);
+
+{@literal ForwardCursor<Person> results = join.entities();}
+try {
+    for (Person person : results) {
+        System.out.println(person.ssn + ' ' + person.name);
+    }
+} finally {
+    results.close();
+}
+</li>
+<li>Object relationships are based on keys.  When a {@code Person} with a given
+employer ID in its {@code employerIds} set is stored, the {@code Person} object
+becomes part of the collection of employees for that employer.  This collection
+of employees is accessed using a {@link
+com.sleepycat.persist.SecondaryIndex#subIndex SecondaryIndex.subIndex} for the
+employer ID, as shown below:
+<pre class="code">
+{@literal EntityCursor<Person> employees =}
+    dao.personByEmployerIds.subIndex(gizmoInc.id).entities();
+try {
+    for (Person employee : employees) {
+        System.out.println(employee.ssn + ' ' + employee.name);
+    }
+} finally {
+    employees.close();
+}
+</pre></li>
+<li>Note that when Bob's employer is deleted in the example, the {@code Person}
+object for Bob is refetched to see the change to its {@code employerIds}.  This
+is because objects are accessed by value, not by reference.  In other words, no
+object cache or "persistence context" is maintained by the DPL.  The low level
+caching of the embedded Berkeley DB engine, combined with lightweight object
+bindings, provides maximum performance.</li>
+</ul>
+
+<a name="whichAPI"><h2>Which API to use?</h2></a>
+
+<p>The Berkeley DB engine has a {@link com.sleepycat.je Base API}, a {@link
+com.sleepycat.collections Collections API} and a {@link com.sleepycat.persist
+Direct Persistence Layer (DPL)}.  Follow these guidelines if you are not sure
+which API to use:</p>
+<ul>
+<li>When Java classes are used to represent domain objects in an application,
+the DPL is recommended.  The more domain classes, the more value there is in
+using annotations to define your schema.</li>
+<br>
+<li>When porting an application between Berkeley DB and Berkeley DB Java
+Edition, or when you've chosen not to use Java classes to represent domain
+objects, then the Base API is recommended.  You may also prefer to use this API
+if you have very few domain classes.</li>
+<br>
+<li>The Collections API is useful for interoperating with external components
+because it conforms to the standard Java Collections Framework.  It is
+therefore useful in combination with both the Base API and the DPL.  You may
+prefer this API because it provides the familiar Java Collections
+interface.</li>
+</ul>
+
+<a name="java14and15"><h2>Java 1.5 dependencies</h2></a>
+
+<p><em>NOTE:</em> The current release of the DPL requires compiling and
+deploying with Java 1.5 or greater.  Support for Java 1.4 may be added in a
+future release, based on user demand.</p>
+
+<p>The DPL uses two features of Java 1.5: generic types and annotations.  If
+you wish to avoid using these two Java 1.5 features, the DPL provides options
+for doing so.</p>
+
+<a name="genericTypes"><h3>Generic Types</h3></a>
+
+<p>Generic types are used to provide type safety, especially for the {@link
+com.sleepycat.persist.PrimaryIndex PrimaryIndex}, {@link
+com.sleepycat.persist.SecondaryIndex SecondaryIndex}, and {@link
+com.sleepycat.persist.EntityCursor EntityCursor} classes.  If you don't wish to
+use generic types, you can simply not declare your index and cursor objects
+using generic type parameters.  This is the same as using the Java 1.5
+Collections Framework without using generic types.</p>
+
+<a name="annotations"><h3>Annotations</h3></a>
+
+<p>If you don't wish to use annotations, you can provide another source of
+metadata by implementing an {@link com.sleepycat.persist.model.EntityModel
+EntityModel} class.  For example, naming conventions, static members, or an XML
+configuration file might be used as a source of metadata.  However, if you
+don't use annotations then you won't be able to use bytecode enhancement, which
+is described next.</p>
+
+<a name="bytecode"><h2>Bytecode Enhancement</h2></a>
+
+<p>The persistent fields of a class may be private, package-private, protected
+or public.  The DPL can access persistent fields either by bytecode enhancement
+or by reflection.</p>
+
+<p>Bytecode enhancement may be used to fully optimize binding performance and
+to avoid the use of Java reflection.  In applications that are CPU bound,
+avoiding Java reflection can have a significant performance impact.</p>
+
+<p>Bytecode enhancement may be performed either at runtime or at build time
+(offline).  When enhancement is performed at runtime, persistent classes are
+enhanced as they are loaded.  When enhancement is performed offline, class
+files are enhanced during a post-compilation step.
+<!-- begin JE only -->
+Both a main program and an Ant task are provided for performing offline
+enhancement.
+<!-- end JE only -->
+Enhanced classes are used to efficiently access all fields and default
+constructors, including non-public members.</p>
+
+<p>See {@link com.sleepycat.persist.model.ClassEnhancer ClassEnhancer} for
+bytecode enhancement configuration details.</p>
+
+<p>If bytecode enhancement is not used as described above, the DPL will use
+reflection for accessing persistent fields and the default constructor.  The
+{@link java.lang.reflect.AccessibleObject#setAccessible
+AccessibleObject.setAccessible} method is called by the DPL to enable access to
+non-public fields and constructors.  If you are running under a Java security
+manager you must configure your security policy to allow the following
+permission:</p>
+
+<p>{@code permission java.lang.reflect.ReflectPermission "suppressAccessChecks";}
+
+<p>There are three cases where setting the above permission is <em>not</em>
+required:</p>
+<ol>
+<li>If you are not running under a Java Security Manager, then access to
+non-public members via reflection is not restricted.  This is the default for
+J2SE.</li>
+<br>
+<li>If all persistent fields and default constructors are {@code public} then
+they can be accessed via reflection without special permissions, even when
+running under a Java Security Manager.  However, declaring {@code public}
+instance fields is not recommended because it discourages encapsulation.</li>
+<br>
+<li>If bytecode enhancement is used as described above, then reflection will
+not be used.</li>
+</ol>
+
+<p>It is well known that executing generated code is faster than reflection.
+However, this performance difference may or may not impact a given application
+since it may be overshadowed by other factors.  Performance testing in a
+realistic usage scenario is the best way to determine the impact.  If you are
+determined to avoid the use of reflection then option 3 above is
+recommended.</p>
+
+</body>
+</html>
diff --git a/src/com/sleepycat/persist/raw/RawField.java b/src/com/sleepycat/persist/raw/RawField.java
new file mode 100644
index 0000000000000000000000000000000000000000..0c49fc28a7ab1c1af1f349c089d7ab718d5f9e45
--- /dev/null
+++ b/src/com/sleepycat/persist/raw/RawField.java
@@ -0,0 +1,31 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RawField.java,v 1.10.2.2 2010/01/04 15:30:41 cwl Exp $
+ */
+
+package com.sleepycat.persist.raw;
+
+/**
+ * The definition of a field in a {@link RawType}.
+ *
+ * <p>{@code RawField} objects are thread-safe.  Multiple threads may safely
+ * call the methods of a shared {@code RawField} object.</p>
+ *
+ * @author Mark Hayes
+ */
+public interface RawField {
+
+    /**
+     * Returns the name of the field.
+     */
+    String getName();
+
+    /**
+     * Returns the type of the field, without expanding parameterized types,
+     * or null if the type is an interface type or the Object class.
+     */
+    RawType getType();
+}
diff --git a/src/com/sleepycat/persist/raw/RawObject.java b/src/com/sleepycat/persist/raw/RawObject.java
new file mode 100644
index 0000000000000000000000000000000000000000..958d4fcc3710c58bfe55380717e62963f5b43b8a
--- /dev/null
+++ b/src/com/sleepycat/persist/raw/RawObject.java
@@ -0,0 +1,323 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RawObject.java,v 1.15.2.3 2010/01/04 15:30:41 cwl Exp $
+ */
+
+package com.sleepycat.persist.raw;
+
+import java.util.Arrays;
+import java.util.Map;
+import java.util.TreeSet;
+
+import com.sleepycat.persist.evolve.Conversion;
+import com.sleepycat.persist.model.EntityModel;
+
+/**
+ * A raw instance that can be used with a {@link RawStore} or {@link
+ * Conversion}.  A <code>RawObject</code> is used to represent instances of
+ * complex types (persistent classes with fields), arrays, and enum values.  It
+ * is not used to represent non-enum simple types, which are represented as
+ * simple objects.  This includes primitives, which are represented as
+ * instances of their wrapper class.
+ *
+ * <p>{@code RawObject} objects are thread-safe.  Multiple threads may safely
+ * call the methods of a shared {@code RawObject} object.</p>
+ *
+ * @author Mark Hayes
+ */
+public class RawObject {
+
+    private static final String INDENT = "  ";
+
+    private RawType type;
+    private Map<String,Object> values;
+    private Object[] elements;
+    private String enumConstant;
+    private RawObject superObject;
+
+    /**
+     * Creates a raw object with a given set of field values for a complex
+     * type.
+     *
+     * @param type the type of this raw object.
+     *
+     * @param values a map of field name to value for each declared field in
+     * the class, or null to create an empty map.  Each value in the map is a
+     * {@link RawObject}, a {@link <a
+     * href="../model/Entity.html#simpleTypes">simple type</a>} instance, or
+     * null.
+     *
+     * @param superObject the instance of the superclass, or null if the
+     * superclass is {@code Object}.
+     *
+     * @throws IllegalArgumentException if the type argument is an array type.
+     */
+    public RawObject(RawType type,
+                     Map<String,Object> values,
+                     RawObject superObject) {
+        if (type == null || values == null) {
+            throw new NullPointerException();
+        }
+        this.type = type;
+        this.values = values;
+        this.superObject = superObject;
+    }
+
+    /**
+     * Creates a raw object with the given array elements for an array type.
+     *
+     * @param type the type of this raw object.
+     *
+     * @param elements an array of elements.  Each element in the array is a
+     * {@link RawObject}, a {@link <a
+     * href="../model/Entity.html#simpleTypes">simple type</a>} instance, or
+     * null.
+     *
+     * @throws IllegalArgumentException if the type argument is not an array
+     * type.
+     */
+    public RawObject(RawType type, Object[] elements) {
+        if (type == null || elements == null) {
+            throw new NullPointerException();
+        }
+        this.type = type;
+        this.elements = elements;
+    }
+
+    /**
+     * Creates a raw object with the given enum value for an enum type.
+     *
+     * @param type the type of this raw object.
+     *
+     * @param enumConstant the String value of this enum constant; must be
+     * one of the Strings returned by {@link RawType#getEnumConstants}.
+     *
+     * @throws IllegalArgumentException if the type argument is not an array
+     * type.
+     */
+    public RawObject(RawType type, String enumConstant) {
+        if (type == null || enumConstant == null) {
+            throw new NullPointerException();
+        }
+        this.type = type;
+        this.enumConstant = enumConstant;
+    }
+
+    /**
+     * Returns the raw type information for this raw object.
+     *
+     * <p>Note that if this object is unevolved, the returned type may be
+     * different from the current type returned by {@link
+     * EntityModel#getRawType EntityModel.getRawType} for the same class name.
+     * This can only occur in a {@link Conversion#convert
+     * Conversion.convert}.</p>
+     */
+    public RawType getType() {
+        return type;
+    }
+
+    /**
+     * Returns a map of field name to value for a complex type, or null for an
+     * array type or an enum type.  The map contains a String key for each
+     * declared field in the class.  Each value in the map is a {@link
+     * RawObject}, a {@link <a href="../model/Entity.html#simpleTypes">simple
+     * type</a>} instance, or null.
+     *
+     * <p>There will be an entry in the map for every field declared in this
+     * type, as determined by {@link RawType#getFields} for the type returned
+     * by {@link #getType}.  Values in the map may be null for fields with
+     * non-primitive types.</p>
+     */
+    public Map<String,Object> getValues() {
+        return values;
+    }
+
+    /**
+     * Returns the array of elements for an array type, or null for a complex
+     * type or an enum type.  Each element in the array is a {@link RawObject},
+     * a {@link <a href="../model/Entity.html#simpleTypes">simple type</a>}
+     * instance, or null.
+     */
+    public Object[] getElements() {
+        return elements;
+    }
+
+    /**
+     * Returns the enum constant String for an enum type, or null for a complex
+     * type or an array type.  The String returned will be one of the Strings
+     * returned by {@link RawType#getEnumConstants}.
+     */
+    public String getEnum() {
+        return enumConstant;
+    }
+
+    /**
+     * Returns the instance of the superclass, or null if the superclass is
+     * {@code Object} or {@code Enum}.
+     */
+    public RawObject getSuper() {
+        return superObject;
+    }
+
+    @Override
+    public boolean equals(Object other) {
+        if (other == this) {
+            return true;
+        }
+        if (!(other instanceof RawObject)) {
+            return false;
+        }
+        RawObject o = (RawObject) other;
+        if (type != o.type) {
+            return false;
+        }
+        if (!Arrays.deepEquals(elements, o.elements)) {
+            return false;
+        }
+        if (enumConstant != null) {
+            if (!enumConstant.equals(o.enumConstant)) {
+                return false;
+            }
+        } else {
+            if (o.enumConstant != null) {
+                return false;
+            }
+        }
+        if (values != null) {
+            if (!values.equals(o.values)) {
+                return false;
+            }
+        } else {
+            if (o.values != null) {
+                return false;
+            }
+        }
+        if (superObject != null) {
+            if (!superObject.equals(o.superObject)) {
+                return false;
+            }
+        } else {
+            if (o.superObject != null) {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    @Override
+    public int hashCode() {
+        return System.identityHashCode(type) +
+               Arrays.deepHashCode(elements) +
+               (enumConstant != null ? enumConstant.hashCode() : 0) +
+               (values != null ? values.hashCode() : 0) +
+               (superObject != null ? superObject.hashCode() : 0);
+    }
+
+    /**
+     * Returns an XML representation of the raw object.
+     */
+    @Override
+    public String toString() {
+        StringBuffer buf = new StringBuffer(500);
+        formatRawObject(buf, "", null, false);
+        return buf.toString();
+    }
+
+    private void formatRawObject(StringBuffer buf,
+                                 String indent,
+                                 String id,
+                                 boolean isSuper) {
+        if (type.isEnum()) {
+            buf.append(indent);
+            buf.append("<Enum");
+            formatId(buf, id);
+            buf.append(" class=\"");
+            buf.append(type.getClassName());
+            buf.append("\" typeId=\"");
+            buf.append(type.getId());
+            buf.append("\">");
+            buf.append(enumConstant);
+            buf.append("</Enum>\n");
+        } else {
+            String indent2 = indent + INDENT;
+            String endTag;
+            buf.append(indent);
+            if (type.isArray()) {
+                buf.append("<Array");
+                endTag = "</Array>";
+            } else if (isSuper) {
+                buf.append("<Super");
+                endTag = "</Super>";
+            } else {
+                buf.append("<Object");
+                endTag = "</Object>";
+            }
+            formatId(buf, id);
+            if (type.isArray()) {
+                buf.append(" length=\"");
+                buf.append(elements.length);
+                buf.append('"');
+            }
+            buf.append(" class=\"");
+            buf.append(type.getClassName());
+            buf.append("\" typeId=\"");
+            buf.append(type.getId());
+            buf.append("\">\n");
+
+            if (superObject != null) {
+                superObject.formatRawObject(buf, indent2, null, true);
+            }
+            if (type.isArray()) {
+                for (int i = 0; i < elements.length; i += 1) {
+                    formatValue(buf, indent2, String.valueOf(i), elements[i]);
+                }
+            } else {
+                TreeSet<String> keys = new TreeSet<String>(values.keySet());
+                for (String name : keys) {
+                    formatValue(buf, indent2, name, values.get(name));
+                }
+            }
+            buf.append(indent);
+            buf.append(endTag);
+            buf.append("\n");
+        }
+    }
+
+    private static void formatValue(StringBuffer buf,
+                                    String indent,
+                                    String id,
+                                    Object val) {
+        if (val == null) {
+            buf.append(indent);
+            buf.append("<Null");
+            formatId(buf, id);
+            buf.append("/>\n");
+        } else if (val instanceof RawObject) {
+            ((RawObject) val).formatRawObject(buf, indent, id, false);
+        } else {
+            buf.append(indent);
+            buf.append("<Value");
+            formatId(buf, id);
+            buf.append(" class=\"");
+            buf.append(val.getClass().getName());
+            buf.append("\">");
+            buf.append(val.toString());
+            buf.append("</Value>\n");
+        }
+    }
+
+    private static void formatId(StringBuffer buf, String id) {
+        if (id != null) {
+            if (Character.isDigit(id.charAt(0))) {
+                buf.append(" index=\"");
+            } else {
+                buf.append(" field=\"");
+            }
+            buf.append(id);
+            buf.append('"');
+        }
+    }
+}
diff --git a/src/com/sleepycat/persist/raw/RawStore.java b/src/com/sleepycat/persist/raw/RawStore.java
new file mode 100644
index 0000000000000000000000000000000000000000..2280cefcc2df9f5035b8f511a3a51e773a9cf72d
--- /dev/null
+++ b/src/com/sleepycat/persist/raw/RawStore.java
@@ -0,0 +1,135 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RawStore.java,v 1.16.2.2 2010/01/04 15:30:41 cwl Exp $
+ */
+
+package com.sleepycat.persist.raw;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.SecondaryIndex;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.evolve.Mutations;
+import com.sleepycat.persist.impl.Store;
+import com.sleepycat.persist.model.EntityModel;
+
+/**
+ * Provides access to the raw data in a store for use by general purpose tools.
+ * A <code>RawStore</code> provides access to stored entities without using
+ * entity classes or key classes.  Keys are represented as simple type objects
+ * or, for composite keys, as {@link RawObject} instances, and entities are
+ * represented as {@link RawObject} instances.
+ *
+ * <p>{@code RawStore} objects are thread-safe.  Multiple threads may safely
+ * call the methods of a shared {@code RawStore} object.</p>
+ *
+ * <p>When using a {@code RawStore}, the current persistent class definitions
+ * are not used.  Instead, the previously stored metadata and class definitions
+ * are used.  This has several implications:</p>
+ * <ol>
+ * <li>An {@code EntityModel} may not be specified using {@link
+ * StoreConfig#setModel}.  In other words, the configured model must be
+ * null (the default).</li>
+ * <li>When storing entities, their format will not automatically be evolved
+ * to the current class definition, even if the current class definition has
+ * changed.</li>
+ * </ol>
+ *
+ * @author Mark Hayes
+ */
+public class RawStore {
+
+    private Store store;
+
+    /**
+     * Opens an entity store for raw data access.
+     *
+     * @param env an open Berkeley DB environment.
+     *
+     * @param storeName the name of the entity store within the given
+     * environment.
+     *
+     * @param config the store configuration, or null to use default
+     * configuration properties.
+     *
+     * @throws IllegalArgumentException if the <code>Environment</code> is
+     * read-only and the <code>config ReadOnly</code> property is false.
+     */
+    public RawStore(Environment env, String storeName, StoreConfig config)
+        throws DatabaseException {
+
+        store = new Store(env, storeName, config, true /*rawAccess*/);
+    }
+
+    /**
+     * Opens the primary index for a given entity class.
+     */
+    public PrimaryIndex<Object,RawObject> getPrimaryIndex(String entityClass)
+        throws DatabaseException {
+
+        return store.getPrimaryIndex
+            (Object.class, null, RawObject.class, entityClass);
+    }
+
+    /**
+     * Opens the secondary index for a given entity class and secondary key
+     * name.
+     */
+    public SecondaryIndex<Object,Object,RawObject>
+        getSecondaryIndex(String entityClass, String keyName)
+        throws DatabaseException {
+
+        return store.getSecondaryIndex
+            (getPrimaryIndex(entityClass), RawObject.class, entityClass,
+             Object.class, null, keyName);
+    }
+
+    /**
+     * Returns the environment associated with this store.
+     */
+    public Environment getEnvironment() {
+        return store.getEnvironment();
+    }
+
+    /**
+     * Returns a copy of the entity store configuration.
+     */
+    public StoreConfig getConfig() {
+        return store.getConfig();
+    }
+
+    /**
+     * Returns the name of this store.
+     */
+    public String getStoreName() {
+        return store.getStoreName();
+    }
+
+    /**
+     * Returns the last configured and stored entity model for this store.
+     */
+    public EntityModel getModel() {
+        return store.getModel();
+    }
+
+    /**
+     * Returns the set of mutations that were configured and stored previously.
+     */
+    public Mutations getMutations() {
+        return store.getMutations();
+    }
+
+    /**
+     * Closes all databases and sequences that were opened by this model.  No
+     * databases opened via this store may be in use.
+     */
+    public void close()
+        throws DatabaseException {
+
+        store.close();
+    }
+}
diff --git a/src/com/sleepycat/persist/raw/RawType.java b/src/com/sleepycat/persist/raw/RawType.java
new file mode 100644
index 0000000000000000000000000000000000000000..4be6114161fc2bc41e38abb33b4b494f269eb338
--- /dev/null
+++ b/src/com/sleepycat/persist/raw/RawType.java
@@ -0,0 +1,159 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RawType.java,v 1.12.2.3 2010/01/04 15:30:41 cwl Exp $
+ */
+
+package com.sleepycat.persist.raw;
+
+import java.util.List;
+import java.util.Map;
+
+import com.sleepycat.persist.model.ClassMetadata;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.EntityMetadata;
+import com.sleepycat.persist.model.Persistent;
+
+/**
+ * The type definition for a simple or complex persistent type, or an array
+ * of persistent types.
+ *
+ * <p>{@code RawType} objects are thread-safe.  Multiple threads may safely
+ * call the methods of a shared {@code RawType} object.</p>
+ *
+ * @author Mark Hayes
+ */
+public interface RawType {
+
+    /**
+     * Returns the class name for this type in the format specified by {@link
+     * Class#getName}.
+     *
+     * <p>If this class currently exists (has not been removed or renamed) then
+     * the class name may be passed to {@link Class#forName} to get the current
+     * {@link Class} object.  However, if this raw type is not the current
+     * version of the class, this type information may differ from that of the
+     * current {@link Class}.</p>
+     */
+    String getClassName();
+
+    /**
+     * Returns the class version for this type.  For simple types, zero is
+     * always returned.
+     *
+     * @see Entity#version
+     * @see Persistent#version
+     */
+    int getVersion();
+
+    /**
+     * Returns the internal unique ID for this type.
+     */
+    int getId();
+
+    /**
+     * Returns whether this is a {@link <a
+     * href="../model/Entity.html#simpleTypes">simple type</a>}: primitive,
+     * primitive wrapper, BigInteger, String or Date.
+     * <!--
+     * primitive wrapper, BigInteger, BigDecimal, String or Date.
+     * -->
+     *
+     * <p>If true is returned, {@link #isPrimitive} can be called for more
+     * information, and a raw value of this type is represented as a simple
+     * type object (not as a {@link RawObject}).</p>
+     *
+     * <p>If false is returned, this is a complex type, an array type (see
+     * {@link #isArray}), or an enum type, and a raw value of this type is
+     * represented as a {@link RawObject}.</p>
+     */
+    boolean isSimple();
+
+    /**
+     * Returns whether this type is a Java primitive: char, byte, short, int,
+     * long, float or double.
+     *
+     * <p>If true is returned, this is also a simple type.  In other words,
+     * primitive types are a subset of simple types.</p>
+     *
+     * <p>If true is returned, a raw value of this type is represented as a
+     * non-null instance of the primitive type's wrapper class.  For example,
+     * an <code>int</code> raw value is represented as an
+     * <code>Integer</code>.</p>
+     */
+    boolean isPrimitive();
+
+    /**
+     * Returns whether this is an enum type.
+     *
+     * <p>If true is returned, a value of this type is a {@link RawObject} and
+     * the enum constant String is available via {@link RawObject#getEnum}.</p>
+     *
+     * <p>If false is returned, then this is a complex type, an array type (see
+     * {@link #isArray}), or a simple type (see {@link #isSimple}).</p>
+     */
+    boolean isEnum();
+
+    /**
+     * Returns an unmodifiable list of the names of the enum instances, or null
+     * if this is not an enum type.
+     */
+    List<String> getEnumConstants();
+
+    /**
+     * Returns whether this is an array type.  Raw value arrays are represented
+     * as {@link RawObject} instances.
+     *
+     * <p>If true is returned, the array component type is returned by {@link
+     * #getComponentType} and the number of array dimensions is returned by
+     * {@link #getDimensions}.</p>
+     *
+     * <p>If false is returned, then this is a complex type, an enum type (see
+     * {@link #isEnum}), or a simple type (see {@link #isSimple}).</p>
+     */
+    boolean isArray();
+
+    /**
+     * Returns the number of array dimensions, or zero if this is not an array
+     * type.
+     */
+    int getDimensions();
+
+    /**
+     * Returns the array component type, or null if this is not an array type.
+     */
+    RawType getComponentType();
+
+    /**
+     * Returns a map of field name to raw field for each non-static
+     * non-transient field declared in this class, or null if this is not a
+     * complex type (in other words, this is a simple type or an array type).
+     */
+    Map<String,RawField> getFields();
+
+    /**
+     * Returns the type of the superclass, or null if the superclass is Object
+     * or this is not a complex type (in other words, this is a simple type or
+     * an array type).
+     */
+    RawType getSuperType();
+
+    /**
+     * Returns the original model class metadata used to create this class, or
+     * null if this is not a model class.
+     */
+    ClassMetadata getClassMetadata();
+
+    /**
+     * Returns the original model entity metadata used to create this class, or
+     * null if this is not an entity class.
+     */
+    EntityMetadata getEntityMetadata();
+
+    /**
+     * Returns an XML representation of the raw type.
+     */
+    String toString();
+}
diff --git a/src/com/sleepycat/persist/raw/package.html b/src/com/sleepycat/persist/raw/package.html
new file mode 100644
index 0000000000000000000000000000000000000000..adde2cb59105b62ded550d0aae815214c87c261b
--- /dev/null
+++ b/src/com/sleepycat/persist/raw/package.html
@@ -0,0 +1,6 @@
+<!-- $Id: package.html,v 1.2 2008/02/05 23:28:23 mark Exp $ -->
+<html>
+<body>
+Raw data access for general purpose tools and manual conversions.
+</body>
+</html>
diff --git a/src/com/sleepycat/util/ExceptionUnwrapper.java b/src/com/sleepycat/util/ExceptionUnwrapper.java
new file mode 100644
index 0000000000000000000000000000000000000000..741d6dcf1cb8f6e7787e348872fbe90605a71a29
--- /dev/null
+++ b/src/com/sleepycat/util/ExceptionUnwrapper.java
@@ -0,0 +1,68 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: ExceptionUnwrapper.java,v 1.18 2008/01/07 14:29:00 cwl Exp $
+ */
+
+package com.sleepycat.util;
+
+/**
+ * Unwraps nested exceptions by calling the {@link
+ * ExceptionWrapper#getCause()} method for exceptions that implement the
+ * {@link ExceptionWrapper} interface.  Does not currently support the Java 1.4
+ * <code>Throwable.getCause()</code> method.
+ *
+ * @author Mark Hayes
+ */
+public class ExceptionUnwrapper {
+
+    /**
+     * Unwraps an Exception and returns the underlying Exception, or throws an
+     * Error if the underlying Throwable is an Error.
+     *
+     * @param e is the Exception to unwrap.
+     *
+     * @return the underlying Exception.
+     *
+     * @throws Error if the underlying Throwable is an Error.
+     *
+     * @throws IllegalArgumentException if the underlying Throwable is not an
+     * Exception or an Error.
+     */
+    public static Exception unwrap(Exception e) {
+
+        Throwable t = unwrapAny(e);
+        if (t instanceof Exception) {
+            return (Exception) t;
+        } else if (t instanceof Error) {
+            throw (Error) t;
+        } else {
+            throw new IllegalArgumentException("Not Exception or Error: " + t);
+        }
+    }
+
+    /**
+     * Unwraps an Exception and returns the underlying Throwable.
+     *
+     * @param e is the Exception to unwrap.
+     *
+     * @return the underlying Throwable.
+     */
+    public static Throwable unwrapAny(Throwable e) {
+
+        while (true) {
+            if (e instanceof ExceptionWrapper) {
+                Throwable e2 = ((ExceptionWrapper) e).getCause();
+                if (e2 == null) {
+                    return e;
+                } else {
+                    e = e2;
+                }
+            } else {
+                return e;
+            }
+        }
+    }
+}
diff --git a/src/com/sleepycat/util/ExceptionWrapper.java b/src/com/sleepycat/util/ExceptionWrapper.java
new file mode 100644
index 0000000000000000000000000000000000000000..620cb8f378dd7c93a9d18edb5f83c9083105de26
--- /dev/null
+++ b/src/com/sleepycat/util/ExceptionWrapper.java
@@ -0,0 +1,38 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: ExceptionWrapper.java,v 1.18 2008/01/07 14:29:00 cwl Exp $
+ */
+
+package com.sleepycat.util;
+
+/**
+ * Interface implemented by exceptions that can contain nested exceptions.
+ *
+ * @author Mark Hayes
+ */
+public interface ExceptionWrapper {
+
+    /**
+     * Returns the nested exception or null if none is present.
+     *
+     * @return the nested exception or null if none is present.
+     *
+     * @deprecated replaced by {@link #getCause}.
+     */
+    Throwable getDetail();
+
+    /**
+     * Returns the nested exception or null if none is present.
+     *
+     * <p>This method is intentionally defined to be the same signature as the
+     * <code>java.lang.Throwable.getCause</code> method in Java 1.4 and
+     * greater.  By defining this method to return a nested exception, the Java
+     * 1.4 runtime will print the nested stack trace.</p>
+     *
+     * @return the nested exception or null if none is present.
+     */
+    Throwable getCause();
+}
diff --git a/src/com/sleepycat/util/FastInputStream.java b/src/com/sleepycat/util/FastInputStream.java
new file mode 100644
index 0000000000000000000000000000000000000000..1170d9776acc586b34c0df4ead7bd19bda0808c4
--- /dev/null
+++ b/src/com/sleepycat/util/FastInputStream.java
@@ -0,0 +1,195 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: FastInputStream.java,v 1.21 2008/06/10 02:52:17 cwl Exp $
+ */
+
+package com.sleepycat.util;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * A replacement for ByteArrayInputStream that does not synchronize every
+ * byte read.
+ *
+ * <p>This class extends {@link InputStream} and its <code>read()</code>
+ * methods allow it to be used as a standard input stream.  In addition, it
+ * provides <code>readFast()</code> methods that are not declared to throw
+ * <code>IOException</code>.  <code>IOException</code> is never thrown by this
+ * class.</p>
+ *
+ * @author Mark Hayes
+ */
+public class FastInputStream extends InputStream {
+
+    protected int len;
+    protected int off;
+    protected int mark;
+    protected byte[] buf;
+
+    /**
+     * Creates an input stream.
+     *
+     * @param buffer the data to read.
+     */
+    public FastInputStream(byte[] buffer) {
+
+        buf = buffer;
+        len = buffer.length;
+    }
+
+    /**
+     * Creates an input stream.
+     *
+     * @param buffer the data to read.
+     *
+     * @param offset the byte offset at which to begin reading.
+     *
+     * @param length the number of bytes to read.
+     */
+    public FastInputStream(byte[] buffer, int offset, int length) {
+
+        buf = buffer;
+        off = offset;
+        len = offset + length;
+    }
+
+    // --- begin ByteArrayInputStream compatible methods ---
+
+    @Override
+    public int available() {
+
+        return len - off;
+    }
+
+    @Override
+    public boolean markSupported() {
+
+        return true;
+    }
+
+    @Override
+    public void mark(int readLimit) {
+
+        mark = off;
+    }
+
+    @Override
+    public void reset() {
+
+        off = mark;
+    }
+
+    @Override
+    public long skip(long count) {
+
+        int myCount = (int) count;
+        if (myCount + off > len) {
+            myCount = len - off;
+        }
+        skipFast(myCount);
+        return myCount;
+    }
+
+    @Override
+    public int read() throws IOException {
+
+        return readFast();
+    }
+
+    @Override
+    public int read(byte[] toBuf) throws IOException {
+
+        return readFast(toBuf, 0, toBuf.length);
+    }
+
+    @Override
+    public int read(byte[] toBuf, int offset, int length) throws IOException {
+
+        return readFast(toBuf, offset, length);
+    }
+
+    // --- end ByteArrayInputStream compatible methods ---
+
+    /**
+     * Equivalent to <code>skip()<code> but takes an int parameter instead of a
+     * long, and does not check whether the count given is larger than the
+     * number of remaining bytes.
+     * @see #skip(long)
+     */
+    public final void skipFast(int count) {
+        off += count;
+    }
+
+    /**
+     * Equivalent to <code>read()<code> but does not throw
+     * <code>IOException</code>.
+     * @see #read()
+     */
+    public final int readFast() {
+
+        return (off < len) ? (buf[off++] & 0xff) : (-1);
+    }
+
+    /**
+     * Equivalent to <code>read(byte[])<code> but does not throw
+     * <code>IOException</code>.
+     * @see #read(byte[])
+     */
+    public final int readFast(byte[] toBuf) {
+
+        return readFast(toBuf, 0, toBuf.length);
+    }
+
+    /**
+     * Equivalent to <code>read(byte[],int,int)<code> but does not throw
+     * <code>IOException</code>.
+     * @see #read(byte[],int,int)
+     */
+    public final int readFast(byte[] toBuf, int offset, int length) {
+
+        int avail = len - off;
+        if (avail <= 0) {
+            return -1;
+        }
+        if (length > avail) {
+            length = avail;
+        }
+        System.arraycopy(buf, off, toBuf, offset, length);
+        off += length;
+        return length;
+    }
+
+    /**
+     * Returns the underlying data being read.
+     *
+     * @return the underlying data.
+     */
+    public final byte[] getBufferBytes() {
+
+        return buf;
+    }
+
+    /**
+     * Returns the offset at which data is being read from the buffer.
+     *
+     * @return the offset at which data is being read.
+     */
+    public final int getBufferOffset() {
+
+        return off;
+    }
+
+    /**
+     * Returns the end of the buffer being read.
+     *
+     * @return the end of the buffer.
+     */
+    public final int getBufferLength() {
+
+        return len;
+    }
+}
diff --git a/src/com/sleepycat/util/FastOutputStream.java b/src/com/sleepycat/util/FastOutputStream.java
new file mode 100644
index 0000000000000000000000000000000000000000..fb8bf9733fe6d4ad20a9aa4c33966fb54f72ad98
--- /dev/null
+++ b/src/com/sleepycat/util/FastOutputStream.java
@@ -0,0 +1,283 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: FastOutputStream.java,v 1.24 2008/06/10 02:52:17 cwl Exp $
+ */
+
+package com.sleepycat.util;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.UnsupportedEncodingException;
+
+/**
+ * A replacement for ByteArrayOutputStream that does not synchronize every
+ * byte read.
+ *
+ * <p>This class extends {@link OutputStream} and its <code>write()</code>
+ * methods allow it to be used as a standard output stream.  In addition, it
+ * provides <code>writeFast()</code> methods that are not declared to throw
+ * <code>IOException</code>.  <code>IOException</code> is never thrown by this
+ * class.</p>
+ *
+ * @author Mark Hayes
+ */
+public class FastOutputStream extends OutputStream {
+
+    /**
+     * The default initial size of the buffer if no initialSize parameter is
+     * specified.  This constant is 100 bytes.
+     */
+    public static final int DEFAULT_INIT_SIZE = 100;
+
+    /**
+     * The default amount that the buffer is increased when it is full.  This
+     * constant is zero, which means to double the current buffer size.
+     */
+    public static final int DEFAULT_BUMP_SIZE = 0;
+
+    private int len;
+    private int bumpLen;
+    private byte[] buf;
+
+    /*
+     * We can return the same byte[] for 0 length arrays.
+     */
+    private static byte[] ZERO_LENGTH_BYTE_ARRAY = new byte[0];
+
+    /**
+     * Creates an output stream with default sizes.
+     */
+    public FastOutputStream() {
+
+        initBuffer(DEFAULT_INIT_SIZE, DEFAULT_BUMP_SIZE);
+    }
+
+    /**
+     * Creates an output stream with a default bump size and a given initial
+     * size.
+     *
+     * @param initialSize the initial size of the buffer.
+     */
+    public FastOutputStream(int initialSize) {
+
+	initBuffer(initialSize, DEFAULT_BUMP_SIZE);
+    }
+
+    /**
+     * Creates an output stream with a given bump size and initial size.
+     *
+     * @param initialSize the initial size of the buffer.
+     *
+     * @param bumpSize the amount to increment the buffer.
+     */
+    public FastOutputStream(int initialSize, int bumpSize) {
+
+	initBuffer(initialSize, bumpSize);
+    }
+
+    /**
+     * Creates an output stream with a given initial buffer and a default
+     * bump size.
+     *
+     * @param buffer the initial buffer; will be owned by this object.
+     */
+    public FastOutputStream(byte[] buffer) {
+
+        buf = buffer;
+        bumpLen = DEFAULT_BUMP_SIZE;
+    }
+
+    /**
+     * Creates an output stream with a given initial buffer and a given
+     * bump size.
+     *
+     * @param buffer the initial buffer; will be owned by this object.
+     *
+     * @param bumpSize the amount to increment the buffer.  If zero (the
+     * default), the current buffer size will be doubled when the buffer is
+     * full.
+     */
+    public FastOutputStream(byte[] buffer, int bumpSize) {
+
+        buf = buffer;
+        bumpLen = bumpSize;
+    }
+
+    private void initBuffer(int bufferSize, int bumpLen) {
+	buf = new byte[bufferSize];
+	this.bumpLen = bumpLen;
+    }
+
+    // --- begin ByteArrayOutputStream compatible methods ---
+
+    public int size() {
+
+        return len;
+    }
+
+    public void reset() {
+
+        len = 0;
+    }
+
+    public void write(int b) throws IOException {
+
+        writeFast(b);
+    }
+
+    @Override
+    public void write(byte[] fromBuf) throws IOException {
+
+        writeFast(fromBuf);
+    }
+
+    @Override
+    public void write(byte[] fromBuf, int offset, int length)
+        throws IOException {
+
+        writeFast(fromBuf, offset, length);
+    }
+
+    public void writeTo(OutputStream out) throws IOException {
+
+        out.write(buf, 0, len);
+    }
+
+    @Override
+    public String toString() {
+
+        return new String(buf, 0, len);
+    }
+
+    public String toString(String encoding)
+        throws UnsupportedEncodingException {
+
+        return new String(buf, 0, len, encoding);
+    }
+
+    public byte[] toByteArray() {
+
+	if (len == 0) {
+	    return ZERO_LENGTH_BYTE_ARRAY;
+	} else {
+	    byte[] toBuf = new byte[len];
+	    System.arraycopy(buf, 0, toBuf, 0, len);
+
+	    return toBuf;
+	}
+    }
+
+    // --- end ByteArrayOutputStream compatible methods ---
+
+    /**
+     * Equivalent to <code>write(int)<code> but does not throw
+     * <code>IOException</code>.
+     * @see #write(int)
+     */
+    public final void writeFast(int b) {
+
+        if (len + 1 > buf.length)
+            bump(1);
+
+        buf[len++] = (byte) b;
+    }
+
+    /**
+     * Equivalent to <code>write(byte[])<code> but does not throw
+     * <code>IOException</code>.
+     * @see #write(byte[])
+     */
+    public final void writeFast(byte[] fromBuf) {
+
+        int needed = len + fromBuf.length - buf.length;
+        if (needed > 0)
+            bump(needed);
+
+        System.arraycopy(fromBuf, 0, buf, len, fromBuf.length);
+        len += fromBuf.length;
+    }
+
+    /**
+     * Equivalent to <code>write(byte[],int,int)<code> but does not throw
+     * <code>IOException</code>.
+     * @see #write(byte[],int,int)
+     */
+    public final void writeFast(byte[] fromBuf, int offset, int length) {
+
+        int needed = len + length - buf.length;
+        if (needed > 0)
+            bump(needed);
+
+        System.arraycopy(fromBuf, offset, buf, len, length);
+        len += length;
+    }
+
+    /**
+     * Returns the buffer owned by this object.
+     *
+     * @return the buffer.
+     */
+    public byte[] getBufferBytes() {
+
+        return buf;
+    }
+
+    /**
+     * Returns the offset of the internal buffer.
+     *
+     * @return always zero currently.
+     */
+    public int getBufferOffset() {
+
+        return 0;
+    }
+
+    /**
+     * Returns the length used in the internal buffer, i.e., the offset at
+     * which data will be written next.
+     *
+     * @return the buffer length.
+     */
+    public int getBufferLength() {
+
+        return len;
+    }
+
+    /**
+     * Ensure that at least the given number of bytes are available in the
+     * internal buffer.
+     *
+     * @param sizeNeeded the number of bytes desired.
+     */
+    public void makeSpace(int sizeNeeded) {
+
+        int needed = len + sizeNeeded - buf.length;
+        if (needed > 0)
+            bump(needed);
+    }
+
+    /**
+     * Skip the given number of bytes in the buffer.
+     *
+     * @param sizeAdded number of bytes to skip.
+     */
+    public void addSize(int sizeAdded) {
+
+        len += sizeAdded;
+    }
+
+    private void bump(int needed) {
+
+        /* Double the buffer if the bumpLen is zero. */
+        int bump = (bumpLen > 0) ? bumpLen : buf.length;
+
+        byte[] toBuf = new byte[buf.length + needed + bump];
+
+        System.arraycopy(buf, 0, toBuf, 0, len);
+
+        buf = toBuf;
+    }
+}
diff --git a/src/com/sleepycat/util/IOExceptionWrapper.java b/src/com/sleepycat/util/IOExceptionWrapper.java
new file mode 100644
index 0000000000000000000000000000000000000000..e37b39e05dc5ec45c13e319d346626deb5bb823b
--- /dev/null
+++ b/src/com/sleepycat/util/IOExceptionWrapper.java
@@ -0,0 +1,42 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: IOExceptionWrapper.java,v 1.20 2008/06/10 02:52:17 cwl Exp $
+ */
+
+package com.sleepycat.util;
+
+import java.io.IOException;
+
+/**
+ * An IOException that can contain nested exceptions.
+ *
+ * @author Mark Hayes
+ */
+public class IOExceptionWrapper
+    extends IOException implements ExceptionWrapper {
+
+    private Throwable e;
+
+    public IOExceptionWrapper(Throwable e) {
+
+        super(e.getMessage());
+        this.e = e;
+    }
+
+    /**
+     * @deprecated replaced by {@link #getCause}.
+     */
+    public Throwable getDetail() {
+
+        return e;
+    }
+
+    @Override
+    public Throwable getCause() {
+
+        return e;
+    }
+}
diff --git a/src/com/sleepycat/util/PackedInteger.java b/src/com/sleepycat/util/PackedInteger.java
new file mode 100644
index 0000000000000000000000000000000000000000..df8ffe67b5c37a1c1ab5422c63a11c9f6d5322af
--- /dev/null
+++ b/src/com/sleepycat/util/PackedInteger.java
@@ -0,0 +1,382 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: PackedInteger.java,v 1.8 2008/01/07 14:29:00 cwl Exp $
+ */
+
+package com.sleepycat.util;
+
+/**
+ * Static methods for reading and writing packed integers.
+ *
+ * <p>Note that packed integers are not sorted naturally for a byte-by-byte
+ * comparison because they have a preceding length and are little endian;
+ * therefore, they are typically not used for keys.</p>
+ *
+ * <p>Values in the inclusive range [-119,119] are stored in a single byte.
+ * For values outside that range, the first byte stores the sign and the number
+ * of additional bytes.  The additional bytes store (abs(value) - 119) as an
+ * unsigned little endian integer.</p>
+ *
+ * <p>To read and write packed integer values, call {@link #readInt} and {@link
+ * #writeInt} or for long values {@link #readLong} and {@link #writeLong}.  To
+ * get the length of a packed integer without reading it, call {@link
+ * #getReadIntLength} or {@link #getReadLongLength}.  To get the length of an
+ * unpacked integer without writing it, call {@link #getWriteIntLength} or
+ * {@link #getWriteLongLength}.</p>
+ *
+ * <p>Because the same packed format is used for int and long values, stored
+ * int values may be expanded to long values without introducing a format
+ * incompatibility.  You can treat previously stored packed int values as long
+ * values by calling {@link #readLong} and {@link #getReadLongLength}.</p>
+ *
+ * @author Mark Hayes
+ */
+public class PackedInteger {
+
+    /**
+     * The maximum number of bytes needed to store an int value (5).
+     */
+    public static final int MAX_LENGTH = 5;
+
+    /**
+     * The maximum number of bytes needed to store a long value (9).
+     */
+    public static final int MAX_LONG_LENGTH = 9;
+
+    /**
+     * Reads a packed integer at the given buffer offset and returns it.
+     *
+     * @param buf the buffer to read from.
+     *
+     * @param off the offset in the buffer at which to start reading.
+     *
+     * @return the integer that was read.
+     */
+    public static int readInt(byte[] buf, int off) {
+
+        boolean negative;
+        int byteLen;
+
+        int b1 = buf[off++];
+        if (b1 < -119) {
+            negative = true;
+            byteLen = -b1 - 119;
+        } else if (b1 > 119) {
+            negative = false;
+            byteLen = b1 - 119;
+        } else {
+            return b1;
+        }
+
+        int value = buf[off++] & 0xFF;
+        if (byteLen > 1) {
+            value |= (buf[off++] & 0xFF) << 8;
+            if (byteLen > 2) {
+                value |= (buf[off++] & 0xFF) << 16;
+                if (byteLen > 3) {
+                    value |= (buf[off++] & 0xFF) << 24;
+                }
+            }
+        }
+
+        return negative ? (-value - 119) : (value + 119);
+    }
+
+    /**
+     * Reads a packed long integer at the given buffer offset and returns it.
+     *
+     * @param buf the buffer to read from.
+     *
+     * @param off the offset in the buffer at which to start reading.
+     *
+     * @return the long integer that was read.
+     */
+    public static long readLong(byte[] buf, int off) {
+
+        boolean negative;
+        int byteLen;
+
+        int b1 = buf[off++];
+        if (b1 < -119) {
+            negative = true;
+            byteLen = -b1 - 119;
+        } else if (b1 > 119) {
+            negative = false;
+            byteLen = b1 - 119;
+        } else {
+            return b1;
+        }
+
+        long value = buf[off++] & 0xFFL;
+        if (byteLen > 1) {
+            value |= (buf[off++] & 0xFFL) << 8;
+            if (byteLen > 2) {
+                value |= (buf[off++] & 0xFFL) << 16;
+                if (byteLen > 3) {
+                    value |= (buf[off++] & 0xFFL) << 24;
+                    if (byteLen > 4) {
+                        value |= (buf[off++] & 0xFFL) << 32;
+                        if (byteLen > 5) {
+                            value |= (buf[off++] & 0xFFL) << 40;
+                            if (byteLen > 6) {
+                                value |= (buf[off++] & 0xFFL) << 48;
+                                if (byteLen > 7) {
+                                    value |= (buf[off++] & 0xFFL) << 56;
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+
+        return negative ? (-value - 119) : (value + 119);
+    }
+
+    /**
+     * Returns the number of bytes that would be read by {@link #readInt}.
+     *
+     * <p>Because the length is stored in the first byte, this method may be
+     * called with only the first byte of the packed integer in the given
+     * buffer.  This method only accesses one byte at the given offset.</p>
+     *
+     * @param buf the buffer to read from.
+     *
+     * @param off the offset in the buffer at which to start reading.
+     *
+     * @return the number of bytes that would be read.
+     */
+    public static int getReadIntLength(byte[] buf, int off) {
+
+        int b1 = buf[off];
+        if (b1 < -119) {
+            return -b1 - 119 + 1;
+        } else if (b1 > 119) {
+            return b1 - 119 + 1;
+        } else {
+            return 1;
+        }
+    }
+
+    /**
+     * Returns the number of bytes that would be read by {@link #readLong}.
+     *
+     * <p>Because the length is stored in the first byte, this method may be
+     * called with only the first byte of the packed integer in the given
+     * buffer.  This method only accesses one byte at the given offset.</p>
+     *
+     * @param buf the buffer to read from.
+     *
+     * @param off the offset in the buffer at which to start reading.
+     *
+     * @return the number of bytes that would be read.
+     */
+    public static int getReadLongLength(byte[] buf, int off) {
+
+        /* The length is stored in the same way for int and long. */
+        return getReadIntLength(buf, off);
+    }
+
+    /**
+     * Writes a packed integer starting at the given buffer offset and returns
+     * the next offset to be written.
+     *
+     * @param buf the buffer to write to.
+     *
+     * @param offset the offset in the buffer at which to start writing.
+     *
+     * @param value the integer to be written.
+     *
+     * @return the offset past the bytes written.
+     */
+    public static int writeInt(byte[] buf, int offset, int value) {
+
+        int byte1Off = offset;
+        boolean negative;
+
+        if (value < -119) {
+            negative = true;
+            value = -value - 119;
+        } else if (value > 119) {
+            negative = false;
+            value = value - 119;
+        } else {
+            buf[offset++] = (byte) value;
+            return offset;
+        }
+        offset++;
+
+        buf[offset++] = (byte) value;
+        if ((value & 0xFFFFFF00) == 0) {
+            buf[byte1Off] = negative ? (byte) -120 : (byte) 120;
+            return offset;
+        }
+
+        buf[offset++] = (byte) (value >>> 8);
+        if ((value & 0xFFFF0000) == 0) {
+            buf[byte1Off] = negative ? (byte) -121 : (byte) 121;
+            return offset;
+        }
+
+        buf[offset++] = (byte) (value >>> 16);
+        if ((value & 0xFF000000) == 0) {
+            buf[byte1Off] = negative ? (byte) -122 : (byte) 122;
+            return offset;
+        }
+
+        buf[offset++] = (byte) (value >>> 24);
+        buf[byte1Off] = negative ? (byte) -123 : (byte) 123;
+        return offset;
+    }
+
+    /**
+     * Writes a packed long integer starting at the given buffer offset and
+     * returns the next offset to be written.
+     *
+     * @param buf the buffer to write to.
+     *
+     * @param offset the offset in the buffer at which to start writing.
+     *
+     * @param value the long integer to be written.
+     *
+     * @return the offset past the bytes written.
+     */
+    public static int writeLong(byte[] buf, int offset, long value) {
+
+        int byte1Off = offset;
+        boolean negative;
+
+        if (value < -119) {
+            negative = true;
+            value = -value - 119;
+        } else if (value > 119) {
+            negative = false;
+            value = value - 119;
+        } else {
+            buf[offset++] = (byte) value;
+            return offset;
+        }
+        offset++;
+
+        buf[offset++] = (byte) value;
+        if ((value & 0xFFFFFFFFFFFFFF00L) == 0) {
+            buf[byte1Off] = negative ? (byte) -120 : (byte) 120;
+            return offset;
+        }
+
+        buf[offset++] = (byte) (value >>> 8);
+        if ((value & 0xFFFFFFFFFFFF0000L) == 0) {
+            buf[byte1Off] = negative ? (byte) -121 : (byte) 121;
+            return offset;
+        }
+
+        buf[offset++] = (byte) (value >>> 16);
+        if ((value & 0xFFFFFFFFFF000000L) == 0) {
+            buf[byte1Off] = negative ? (byte) -122 : (byte) 122;
+            return offset;
+        }
+
+        buf[offset++] = (byte) (value >>> 24);
+        if ((value & 0xFFFFFFFF00000000L) == 0) {
+            buf[byte1Off] = negative ? (byte) -123 : (byte) 123;
+            return offset;
+        }
+
+        buf[offset++] = (byte) (value >>> 32);
+        if ((value & 0xFFFFFF0000000000L) == 0) {
+            buf[byte1Off] = negative ? (byte) -124 : (byte) 124;
+            return offset;
+        }
+
+        buf[offset++] = (byte) (value >>> 40);
+        if ((value & 0xFFFF000000000000L) == 0) {
+            buf[byte1Off] = negative ? (byte) -125 : (byte) 125;
+            return offset;
+        }
+
+        buf[offset++] = (byte) (value >>> 48);
+        if ((value & 0xFF00000000000000L) == 0) {
+            buf[byte1Off] = negative ? (byte) -126 : (byte) 126;
+            return offset;
+        }
+
+        buf[offset++] = (byte) (value >>> 56);
+        buf[byte1Off] = negative ? (byte) -127 : (byte) 127;
+        return offset;
+    }
+
+    /**
+     * Returns the number of bytes that would be written by {@link #writeInt}.
+     *
+     * @param value the integer to be written.
+     *
+     * @return the number of bytes that would be used to write the given
+     * integer.
+     */
+    public static int getWriteIntLength(int value) {
+
+        if (value < -119) {
+            value = -value - 119;
+        } else if (value > 119) {
+            value = value - 119;
+        } else {
+            return 1;
+        }
+
+        if ((value & 0xFFFFFF00) == 0) {
+            return 2;
+        }
+        if ((value & 0xFFFF0000) == 0) {
+            return 3;
+        }
+        if ((value & 0xFF000000) == 0) {
+            return 4;
+        }
+        return 5;
+    }
+
+    /**
+     * Returns the number of bytes that would be written by {@link #writeLong}.
+     *
+     * @param value the long integer to be written.
+     *
+     * @return the number of bytes that would be used to write the given long
+     * integer.
+     */
+    public static int getWriteLongLength(long value) {
+
+        if (value < -119) {
+            value = -value - 119;
+        } else if (value > 119) {
+            value = value - 119;
+        } else {
+            return 1;
+        }
+
+        if ((value & 0xFFFFFFFFFFFFFF00L) == 0) {
+            return 2;
+        }
+        if ((value & 0xFFFFFFFFFFFF0000L) == 0) {
+            return 3;
+        }
+        if ((value & 0xFFFFFFFFFF000000L) == 0) {
+            return 4;
+        }
+        if ((value & 0xFFFFFFFF00000000L) == 0) {
+            return 5;
+        }
+        if ((value & 0xFFFFFF0000000000L) == 0) {
+            return 6;
+        }
+        if ((value & 0xFFFF000000000000L) == 0) {
+            return 7;
+        }
+        if ((value & 0xFF00000000000000L) == 0) {
+            return 8;
+        }
+        return 9;
+    }
+}
diff --git a/src/com/sleepycat/util/RuntimeExceptionWrapper.java b/src/com/sleepycat/util/RuntimeExceptionWrapper.java
new file mode 100644
index 0000000000000000000000000000000000000000..e2b791fc627d8075c18b266b63ff04b30373c55f
--- /dev/null
+++ b/src/com/sleepycat/util/RuntimeExceptionWrapper.java
@@ -0,0 +1,40 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: RuntimeExceptionWrapper.java,v 1.19 2008/06/10 02:52:17 cwl Exp $
+ */
+
+package com.sleepycat.util;
+
+/**
+ * A RuntimeException that can contain nested exceptions.
+ *
+ * @author Mark Hayes
+ */
+public class RuntimeExceptionWrapper extends RuntimeException
+    implements ExceptionWrapper {
+
+    private Throwable e;
+
+    public RuntimeExceptionWrapper(Throwable e) {
+
+        super(e.getMessage());
+        this.e = e;
+    }
+
+    /**
+     * @deprecated replaced by {@link #getCause}.
+     */
+    public Throwable getDetail() {
+
+        return e;
+    }
+
+    @Override
+    public Throwable getCause() {
+
+        return e;
+    }
+}
diff --git a/src/com/sleepycat/util/UtfOps.java b/src/com/sleepycat/util/UtfOps.java
new file mode 100644
index 0000000000000000000000000000000000000000..26bcafbd8d2d1156b7bc16dc241d373457000e33
--- /dev/null
+++ b/src/com/sleepycat/util/UtfOps.java
@@ -0,0 +1,280 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: UtfOps.java,v 1.19 2008/01/07 14:29:00 cwl Exp $
+ */
+
+package com.sleepycat.util;
+
+/**
+ * UTF operations with more flexibility than is provided by DataInput and
+ * DataOutput.
+ *
+ * @author Mark Hayes
+ */
+public class UtfOps {
+
+    private static byte[] EMPTY_BYTES = {};
+    private static String EMPTY_STRING = "";
+
+    /**
+     * Returns the byte length of a null terminated UTF string, not including
+     * the terminator.
+     *
+     * @param bytes the data containing the UTF string.
+     *
+     * @param offset the beginning of the string the measure.
+     *
+     * @throws IndexOutOfBoundsException if no zero terminator is found.
+     *
+     * @return the number of bytes.
+     */
+    public static int getZeroTerminatedByteLength(byte[] bytes, int offset)
+        throws IndexOutOfBoundsException {
+
+        int len = 0;
+        while (bytes[offset++] != 0) {
+            len++;
+        }
+        return len;
+    }
+
+    /**
+     * Returns the byte length of the UTF string that would be created by
+     * converting the given characters to UTF.
+     *
+     * @param chars the characters that would be converted.
+     *
+     * @return the byte length of the equivalent UTF data.
+     */
+    public static int getByteLength(char[] chars) {
+
+        return getByteLength(chars, 0, chars.length);
+    }
+
+    /**
+     * Returns the byte length of the UTF string that would be created by
+     * converting the given characters to UTF.
+     *
+     * @param chars the characters that would be converted.
+     *
+     * @param offset the first character to be converted.
+     *
+     * @param length the number of characters to be converted.
+     *
+     * @return the byte length of the equivalent UTF data.
+     */
+    public static int getByteLength(char[] chars, int offset, int length) {
+
+        int len = 0;
+        length += offset;
+        for (int i = offset; i < length; i++) {
+            int c = chars[i];
+            if ((c >= 0x0001) && (c <= 0x007F)) {
+                len++;
+            } else if (c > 0x07FF) {
+                len += 3;
+            } else {
+                len += 2;
+            }
+        }
+        return len;
+    }
+
+    /**
+     * Returns the number of characters represented by the given UTF string.
+     *
+     * @param bytes the UTF string.
+     *
+     * @return the number of characters.
+     *
+     * @throws IndexOutOfBoundsException if a UTF character sequence at the end
+     * of the data is not complete.
+     *
+     * @throws IllegalArgumentException if an illegal UTF sequence is
+     * encountered.
+     */
+    public static int getCharLength(byte[] bytes)
+        throws IllegalArgumentException, IndexOutOfBoundsException {
+
+        return getCharLength(bytes, 0, bytes.length);
+    }
+
+    /**
+     * Returns the number of characters represented by the given UTF string.
+     *
+     * @param bytes the data containing the UTF string.
+     *
+     * @param offset the first byte to be converted.
+     *
+     * @param length the number of byte to be converted.
+     *
+     * @throws IndexOutOfBoundsException if a UTF character sequence at the end
+     * of the data is not complete.
+     *
+     * @throws IllegalArgumentException if an illegal UTF sequence is
+     * encountered.
+     */
+    public static int getCharLength(byte[] bytes, int offset, int length)
+        throws IllegalArgumentException, IndexOutOfBoundsException {
+
+        int charCount = 0;
+        length += offset;
+        while (offset < length) {
+            switch ((bytes[offset] & 0xff) >> 4) {
+            case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7:
+                offset++;
+                break;
+            case 12: case 13:
+                offset += 2;
+                break;
+            case 14:
+                offset += 3;
+                break;
+            default:
+                throw new IllegalArgumentException();
+            }
+            charCount++;
+        }
+        return charCount;
+    }
+
+    /**
+     * Converts byte arrays into character arrays.
+     *
+     * @param bytes the source byte data to convert
+     *
+     * @param byteOffset the offset into the byte array at which
+     * to start the conversion
+     *
+     * @param chars the destination array
+     *
+     * @param charOffset the offset into chars at which to begin the copy
+     *
+     * @param len the amount of information to copy into chars
+     *
+     * @param isByteLen if true then len is a measure of bytes, otherwise
+     * len is a measure of characters
+     *
+     * @throws IndexOutOfBoundsException if a UTF character sequence at the end
+     * of the data is not complete.
+     *
+     * @throws IllegalArgumentException if an illegal UTF sequence is
+     * encountered.
+     */
+    public static int bytesToChars(byte[] bytes, int byteOffset,
+                                   char[] chars, int charOffset,
+                                   int len, boolean isByteLen)
+        throws IllegalArgumentException, IndexOutOfBoundsException {
+
+        int char1, char2, char3;
+        len += isByteLen ? byteOffset : charOffset;
+        while ((isByteLen ? byteOffset : charOffset) < len) {
+            char1 = bytes[byteOffset++] & 0xff;
+            switch ((char1 & 0xff) >> 4) {
+            case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7:
+                chars[charOffset++] = (char) char1;
+                break;
+            case 12: case 13:
+                char2 = bytes[byteOffset++];
+                if ((char2 & 0xC0) != 0x80) {
+                    throw new IllegalArgumentException();
+                }
+                chars[charOffset++] = (char)(((char1 & 0x1F) << 6) |
+                                             (char2 & 0x3F));
+                break;
+            case 14:
+                char2 = bytes[byteOffset++];
+                char3 = bytes[byteOffset++];
+                if (((char2 & 0xC0) != 0x80) || ((char3 & 0xC0) != 0x80))
+                    throw new IllegalArgumentException();
+                chars[charOffset++] = (char)(((char1 & 0x0F) << 12) |
+                                             ((char2 & 0x3F) << 6)  |
+                                             ((char3 & 0x3F) << 0));
+                break;
+            default:
+                throw new IllegalArgumentException();
+            }
+        }
+        return byteOffset;
+    }
+
+    /**
+     * Converts character arrays into byte arrays.
+     *
+     * @param chars the source character data to convert
+     *
+     * @param charOffset the offset into the character array at which
+     * to start the conversion
+     *
+     * @param bytes the destination array
+     *
+     * @param byteOffset the offset into bytes at which to begin the copy
+     *
+     * @param charLength the length of characters to copy into bytes
+     */
+    public static void charsToBytes(char[] chars, int charOffset,
+                                    byte[] bytes, int byteOffset,
+                                    int charLength) {
+        charLength += charOffset;
+        for (int i = charOffset; i < charLength; i++) {
+            int c = chars[i];
+            if ((c >= 0x0001) && (c <= 0x007F)) {
+                bytes[byteOffset++] = (byte) c;
+            } else if (c > 0x07FF) {
+                bytes[byteOffset++] = (byte) (0xE0 | ((c >> 12) & 0x0F));
+                bytes[byteOffset++] = (byte) (0x80 | ((c >>  6) & 0x3F));
+                bytes[byteOffset++] = (byte) (0x80 | ((c >>  0) & 0x3F));
+            } else {
+                bytes[byteOffset++] = (byte) (0xC0 | ((c >>  6) & 0x1F));
+                bytes[byteOffset++] = (byte) (0x80 | ((c >>  0) & 0x3F));
+            }
+        }
+    }
+
+    /**
+     * Converts byte arrays into strings.
+     *
+     * @param bytes the source byte data to convert
+     *
+     * @param offset the offset into the byte array at which
+     * to start the conversion
+     *
+     * @param length the number of bytes to be converted.
+     *
+     * @return the string.
+     *
+     * @throws IndexOutOfBoundsException if a UTF character sequence at the end
+     * of the data is not complete.
+     *
+     * @throws IllegalArgumentException if an illegal UTF sequence is
+     * encountered.
+     */
+    public static String bytesToString(byte[] bytes, int offset, int length)
+        throws IllegalArgumentException, IndexOutOfBoundsException {
+
+        if (length == 0) return EMPTY_STRING;
+        int charLen = UtfOps.getCharLength(bytes, offset, length);
+        char[] chars = new char[charLen];
+        UtfOps.bytesToChars(bytes, offset, chars, 0, length, true);
+        return new String(chars, 0, charLen);
+    }
+
+    /**
+     * Converts strings to byte arrays.
+     *
+     * @param string the string to convert.
+     *
+     * @return the UTF byte array.
+     */
+    public static byte[] stringToBytes(String string) {
+
+        if (string.length() == 0) return EMPTY_BYTES;
+        char[] chars = string.toCharArray();
+        byte[] bytes = new byte[UtfOps.getByteLength(chars)];
+        UtfOps.charsToBytes(chars, 0, bytes, 0, chars.length);
+        return bytes;
+    }
+}
diff --git a/src/com/sleepycat/util/keyrange/KeyRange.java b/src/com/sleepycat/util/keyrange/KeyRange.java
new file mode 100644
index 0000000000000000000000000000000000000000..affbca52a440f581d9a794b7a3335c1f100d4c65
--- /dev/null
+++ b/src/com/sleepycat/util/keyrange/KeyRange.java
@@ -0,0 +1,351 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: KeyRange.java,v 1.9.2.2 2010/01/04 15:30:41 cwl Exp $
+ */
+
+package com.sleepycat.util.keyrange;
+
+import java.util.Comparator;
+
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * Encapsulates a key range for use with a RangeCursor.
+ */
+public class KeyRange {
+
+    /*
+     * We can return the same byte[] for 0 length arrays.
+     */
+    public static final byte[] ZERO_LENGTH_BYTE_ARRAY = new byte[0];
+
+    Comparator<byte[]> comparator;
+    DatabaseEntry beginKey;
+    DatabaseEntry endKey;
+    boolean singleKey;
+    boolean beginInclusive;
+    boolean endInclusive;
+
+    /**
+     * Creates an unconstrained key range.
+     */
+    public KeyRange(Comparator<byte[]> comparator) {
+        this.comparator = comparator;
+    }
+
+    /**
+     * Creates a range for a single key.
+     */
+    public KeyRange subRange(DatabaseEntry key)
+        throws KeyRangeException {
+
+        if (!check(key)) {
+            throw new KeyRangeException("singleKey out of range");
+        }
+        KeyRange range = new KeyRange(comparator);
+        range.beginKey = key;
+        range.endKey = key;
+        range.beginInclusive = true;
+        range.endInclusive = true;
+        range.singleKey = true;
+        return range;
+    }
+
+    /**
+     * Creates a range that is the intersection of this range and the given
+     * range parameters.
+     */
+    public KeyRange subRange(DatabaseEntry beginKey, boolean beginInclusive,
+                             DatabaseEntry endKey, boolean endInclusive)
+        throws KeyRangeException {
+
+        if (beginKey == null) {
+            beginKey = this.beginKey;
+            beginInclusive = this.beginInclusive;
+        } else if (!check(beginKey, beginInclusive)) {
+            throw new KeyRangeException("beginKey out of range");
+        }
+        if (endKey == null) {
+            endKey = this.endKey;
+            endInclusive = this.endInclusive;
+        } else if (!check(endKey, endInclusive)) {
+            throw new KeyRangeException("endKey out of range");
+        }
+        KeyRange range = new KeyRange(comparator);
+        range.beginKey = beginKey;
+        range.endKey = endKey;
+        range.beginInclusive = beginInclusive;
+        range.endInclusive = endInclusive;
+        return range;
+    }
+
+    /**
+     * Returns whether this is a single-key range.
+     */
+    public final boolean isSingleKey() {
+        return singleKey;
+    }
+
+    /**
+     * Returns the key of a single-key range, or null if not a single-key
+     * range.
+     */
+    public final DatabaseEntry getSingleKey() {
+
+        return singleKey ? beginKey : null;
+    }
+
+    /**
+     * Returns whether this range has a begin or end bound.
+     */
+    public final boolean hasBound() {
+
+        return endKey != null || beginKey != null;
+    }
+
+    /**
+     * Formats this range as a string for debugging.
+     */
+    @Override
+    public String toString() {
+
+        return "[KeyRange " + beginKey + ' ' + beginInclusive +
+                              endKey + ' ' + endInclusive +
+                              (singleKey ? " single" : "");
+    }
+
+    /**
+     * Returns whether a given key is within range.
+     */
+    public boolean check(DatabaseEntry key) {
+
+        if (singleKey) {
+            return (compare(key, beginKey) == 0);
+        } else {
+            return checkBegin(key, true) && checkEnd(key, true);
+        }
+    }
+
+    /**
+     * Returns whether a given key is within range.
+     */
+    public boolean check(DatabaseEntry key, boolean inclusive) {
+
+        if (singleKey) {
+            return (compare(key, beginKey) == 0);
+        } else {
+            return checkBegin(key, inclusive) && checkEnd(key, inclusive);
+        }
+    }
+
+    /**
+     * Returns whether the given key is within range with respect to the
+     * beginning of the range.
+     *
+     * <p>The inclusive parameter should be true for checking a key read from
+     * the database; this will require that the key is within range.  When
+     * inclusive=false the key is allowed to be equal to the beginKey for the
+     * range; this is used for checking a new exclusive bound of a
+     * sub-range.</p>
+     *
+     * <p>Note that when inclusive=false and beginInclusive=true our check is
+     * not exactly correct because in theory we should allow the key to be "one
+     * less" than the existing bound; however, checking for "one less"  is
+     * impossible so we do the best we can and test the bounds
+     * conservatively.</p>
+     */
+    public boolean checkBegin(DatabaseEntry key, boolean inclusive) {
+
+        if (beginKey == null) {
+            return true;
+        } else if (!beginInclusive && inclusive) {
+            return compare(key, beginKey) > 0;
+        } else {
+            return compare(key, beginKey) >= 0;
+        }
+    }
+
+    /**
+     * Returns whether the given key is within range with respect to the
+     * end of the range.  See checkBegin for details.
+     */
+    public boolean checkEnd(DatabaseEntry key, boolean inclusive) {
+
+        if (endKey == null) {
+            return true;
+        } else if (!endInclusive && inclusive) {
+            return compare(key, endKey) < 0;
+        } else {
+            return compare(key, endKey) <= 0;
+        }
+    }
+
+    /**
+     * Compares two keys, using the user comparator if there is one.
+     */
+    public int compare(DatabaseEntry key1, DatabaseEntry key2) {
+
+        if (comparator != null) {
+            return comparator.compare(getByteArray(key1), getByteArray(key2));
+        } else {
+            return compareBytes
+                    (key1.getData(), key1.getOffset(), key1.getSize(),
+                     key2.getData(), key2.getOffset(), key2.getSize());
+
+        }
+    }
+
+    /**
+     * Copies a byte array.
+     */
+    public static byte[] copyBytes(byte[] bytes) {
+
+        byte[] a = new byte[bytes.length];
+        System.arraycopy(bytes, 0, a, 0, a.length);
+        return a;
+    }
+
+    /**
+     * Compares two keys as unsigned byte arrays, which is the default
+     * comparison used by JE/DB.
+     */
+    public static int compareBytes(byte[] data1, int offset1, int size1,
+                                   byte[] data2, int offset2, int size2) {
+
+        for (int i = 0; i < size1 && i < size2; i++) {
+
+            int b1 = 0xFF & data1[offset1 + i];
+            int b2 = 0xFF & data2[offset2 + i];
+            if (b1 < b2)
+                return -1;
+            else if (b1 > b2)
+                return 1;
+        }
+
+        if (size1 < size2)
+            return -1;
+        else if (size1 > size2)
+            return 1;
+        else
+            return 0;
+    }
+
+    /**
+     * Compares two byte arrays for equality.
+     */
+    public static boolean equalBytes(byte[] data1, int offset1, int size1,
+                                     byte[] data2, int offset2, int size2) {
+        if (size1 != size2) {
+            return false;
+        }
+        for (int i = 0; i < size1; i += 1) {
+            if (data1[i + offset1] != data2[i + offset2]) {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    /**
+     * Returns a copy of an entry.
+     */
+    public static DatabaseEntry copy(DatabaseEntry from) {
+        return new DatabaseEntry(getByteArray(from));
+    }
+
+    /**
+     * Copies one entry to another.
+     */
+    public static void copy(DatabaseEntry from, DatabaseEntry to) {
+        to.setData(getByteArray(from));
+        to.setOffset(0);
+    }
+
+    /**
+     * Returns an entry's byte array, copying it if the entry offset is
+     * non-zero.
+     */
+    public static byte[] getByteArray(DatabaseEntry entry) {
+	return getByteArrayInternal(entry, Integer.MAX_VALUE);
+    }
+
+    public static byte[] getByteArray(DatabaseEntry entry, int maxBytes) {
+	return getByteArrayInternal(entry, maxBytes);
+    }
+
+    private static byte[] getByteArrayInternal(DatabaseEntry entry,
+					       int maxBytes) {
+
+        byte[] bytes = entry.getData();
+        if (bytes == null) return null;
+        int size = Math.min(entry.getSize(), maxBytes);
+	byte[] data;
+	if (size == 0) {
+	    data = ZERO_LENGTH_BYTE_ARRAY;
+	} else {
+	    data = new byte[size];
+	    System.arraycopy(bytes, entry.getOffset(), data, 0, size);
+	}
+        return data;
+    }
+
+    /**
+     * Returns the two DatabaseEntry objects have the same data value.
+     */
+    public static boolean equalBytes(DatabaseEntry e1, DatabaseEntry e2) {
+
+        if (e1 == null && e2 == null) {
+            return true;
+        }
+        if (e1 == null || e2 == null) {
+            return false;
+        }
+
+        byte[] d1 = e1.getData();
+        byte[] d2 = e2.getData();
+        int s1 = e1.getSize();
+        int s2 = e2.getSize();
+        int o1 = e1.getOffset();
+        int o2 = e2.getOffset();
+
+        if (d1 == null && d2 == null) {
+            return true;
+        }
+        if (d1 == null || d2 == null) {
+            return false;
+        }
+        if (s1 != s2) {
+            return false;
+        }
+        for (int i = 0; i < s1; i += 1) {
+            if (d1[o1 + i] != d2[o2 + i]) {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    /**
+     * Converts the byte array of this thang to space-separated integers,
+     * and suffixed by the record number if applicable.
+     *
+     * @param dbt the thang to convert.
+     *
+     * @return the resulting string.
+     */
+    public static String toString(DatabaseEntry dbt) {
+
+        int len = dbt.getOffset() + dbt.getSize();
+        StringBuffer buf = new StringBuffer(len * 2);
+        byte[] data = dbt.getData();
+        for (int i = dbt.getOffset(); i < len; i++) {
+            String num = Integer.toHexString(data[i]);
+            if (num.length() < 2) buf.append('0');
+            buf.append(num);
+        }
+        return buf.toString();
+    }
+}
diff --git a/src/com/sleepycat/util/keyrange/KeyRangeException.java b/src/com/sleepycat/util/keyrange/KeyRangeException.java
new file mode 100644
index 0000000000000000000000000000000000000000..f02b22c100a8df53915089ed671dce322d906c11
--- /dev/null
+++ b/src/com/sleepycat/util/keyrange/KeyRangeException.java
@@ -0,0 +1,25 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: KeyRangeException.java,v 1.7 2008/05/20 17:52:38 linda Exp $
+ */
+
+package com.sleepycat.util.keyrange;
+
+/**
+ * An exception thrown when a key is out of range.
+ *
+ * @author Mark Hayes
+ */
+public class KeyRangeException extends IllegalArgumentException {
+
+    /**
+     * Creates a key range exception.
+     */
+    public KeyRangeException(String msg) {
+
+        super(msg);
+    }
+}
diff --git a/src/com/sleepycat/util/keyrange/RangeCursor.java b/src/com/sleepycat/util/keyrange/RangeCursor.java
new file mode 100644
index 0000000000000000000000000000000000000000..b5af3d17d9dd233e105f3962317ae378dec82114
--- /dev/null
+++ b/src/com/sleepycat/util/keyrange/RangeCursor.java
@@ -0,0 +1,1060 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RangeCursor.java,v 1.10.2.2 2010/01/04 15:30:41 cwl Exp $
+ */
+
+package com.sleepycat.util.keyrange;
+
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.SecondaryCursor;
+
+/**
+ * A cursor-like interface that enforces a key range.  The method signatures
+ * are actually those of SecondaryCursor, but the pKey parameter may be null.
+ * It was done this way to avoid doubling the number of methods.
+ *
+ * <p>This is not a fully general implementation of a range cursor and should
+ * not be used directly by applications; however, it may evolve into a
+ * generally useful range cursor some day.</p>
+ *
+ * @author Mark Hayes
+ */
+public class RangeCursor implements Cloneable {
+
+    /**
+     * The cursor and secondary cursor are the same object.  The secCursor is
+     * null if the database is not a secondary database.
+     */
+    private Cursor cursor;
+    private SecondaryCursor secCursor;
+
+    /**
+     * The range is always non-null, but may be unbounded meaning that it is
+     * open and not used.
+     */
+    private KeyRange range;
+
+    /**
+     * The pkRange may be non-null only if the range is a single-key range
+     * and the cursor is a secondary cursor.  It further restricts the range of
+     * primary keys in a secondary database.
+     */
+    private KeyRange pkRange;
+
+    /**
+     * If the DB supported sorted duplicates, then calling
+     * Cursor.getSearchBothRange is allowed.
+     */
+    private boolean sortedDups;
+
+    /**
+     * The privXxx entries are used only when the range is bounded.  We read
+     * into these private entries to avoid modifying the caller's entry
+     * parameters in the case where we read successfully but the key is out of
+     * range.  In that case we return NOTFOUND and we want to leave the entry
+     * parameters unchanged.
+     */
+    private DatabaseEntry privKey;
+    private DatabaseEntry privPKey;
+    private DatabaseEntry privData;
+
+    /**
+     * The initialized flag is set to true whenever we successfully position
+     * the cursor.  It is used to implement the getNext/Prev logic for doing a
+     * getFirst/Last when the cursor is not initialized.  We can't rely on
+     * Cursor to do that for us, since if we position the underlying cursor
+     * successfully but the key is out of range, we have no way to set the
+     * underlying cursor to uninitialized.  A range cursor always starts in the
+     * uninitialized state.
+     */
+    private boolean initialized;
+
+    /**
+     * Creates a range cursor with a duplicate range.
+     */
+    public RangeCursor(KeyRange range,
+                       KeyRange pkRange,
+                       boolean sortedDups,
+                       Cursor cursor)
+        throws DatabaseException {
+
+        if (pkRange != null && !range.singleKey) {
+            throw new IllegalArgumentException();
+        }
+        this.range = range;
+        this.pkRange = pkRange;
+        this.sortedDups = sortedDups;
+        this.cursor = cursor;
+        init();
+        if (pkRange != null && secCursor == null) {
+            throw new IllegalArgumentException();
+        }
+    }
+
+    /**
+     * Create a cloned range cursor.  The caller must clone the underlying
+     * cursor before using this constructor, because cursor open/close is
+     * handled specially for CDS cursors outside this class.
+     */
+    public RangeCursor dup(boolean samePosition)
+        throws DatabaseException {
+
+        try {
+            RangeCursor c = (RangeCursor) super.clone();
+            c.cursor = dupCursor(cursor, samePosition);
+            c.init();
+            return c;
+        } catch (CloneNotSupportedException neverHappens) {
+            return null;
+        }
+    }
+
+    /**
+     * Used for opening and duping (cloning).
+     */
+    private void init() {
+
+        if (cursor instanceof SecondaryCursor) {
+            secCursor = (SecondaryCursor) cursor;
+        } else {
+            secCursor = null;
+        }
+
+        if (range.hasBound()) {
+            privKey = new DatabaseEntry();
+            privPKey = new DatabaseEntry();
+            privData = new DatabaseEntry();
+        } else {
+            privKey = null;
+            privPKey = null;
+            privData = null;
+        }
+    }
+
+    /**
+     * Returns whether the cursor is initialized at a valid position.
+     */
+    public boolean isInitialized() {
+        return initialized;
+    }
+
+    /**
+     * Returns the underlying cursor.  Used for cloning.
+     */
+    public Cursor getCursor() {
+        return cursor;
+    }
+
+    /**
+     * When an unbounded range is used, this method is called to use the
+     * callers entry parameters directly, to avoid the extra step of copying
+     * between the private entries and the caller's entries.
+     */
+    private void setParams(DatabaseEntry key, DatabaseEntry pKey,
+                           DatabaseEntry data) {
+        privKey = key;
+        privPKey = pKey;
+        privData = data;
+    }
+
+    /**
+     * Dups the cursor, sets the cursor and secCursor fields to the duped
+     * cursor, and returns the old cursor.  Always call endOperation in a
+     * finally clause after calling beginOperation.
+     *
+     * <p>If the returned cursor == the cursor field, the cursor is
+     * uninitialized and was not duped; this case is handled correctly by
+     * endOperation.</p>
+     */
+    private Cursor beginOperation()
+        throws DatabaseException {
+
+        Cursor oldCursor = cursor;
+        if (initialized) {
+            cursor = dupCursor(cursor, true);
+            if (secCursor != null) {
+                secCursor = (SecondaryCursor) cursor;
+            }
+        } else {
+            return cursor;
+        }
+        return oldCursor;
+    }
+
+    /**
+     * If the operation succeded, leaves the duped cursor in place and closes
+     * the oldCursor.  If the operation failed, moves the oldCursor back in
+     * place and closes the duped cursor.  oldCursor may be null if
+     * beginOperation was not called, in cases where we don't need to dup
+     * the cursor.  Always call endOperation when a successful operation ends,
+     * in order to set the initialized field.
+     */
+    private void endOperation(Cursor oldCursor, OperationStatus status,
+                              DatabaseEntry key, DatabaseEntry pKey,
+                              DatabaseEntry data)
+        throws DatabaseException {
+
+        if (status == OperationStatus.SUCCESS) {
+            if (oldCursor != null && oldCursor != cursor) {
+                closeCursor(oldCursor);
+            }
+            if (key != null) {
+                swapData(key, privKey);
+            }
+            if (pKey != null && secCursor != null) {
+                swapData(pKey, privPKey);
+            }
+            if (data != null) {
+                swapData(data, privData);
+            }
+            initialized = true;
+        } else {
+            if (oldCursor != null && oldCursor != cursor) {
+                closeCursor(cursor);
+                cursor = oldCursor;
+                if (secCursor != null) {
+                    secCursor = (SecondaryCursor) cursor;
+                }
+            }
+        }
+    }
+
+    /**
+     * Swaps the contents of the two entries.  Used to return entry data to
+     * the caller when the operation was successful.
+     */
+    private static void swapData(DatabaseEntry e1, DatabaseEntry e2) {
+
+        byte[] d1 = e1.getData();
+        int o1 = e1.getOffset();
+        int s1 = e1.getSize();
+
+        e1.setData(e2.getData(), e2.getOffset(), e2.getSize());
+        e2.setData(d1, o1, s1);
+    }
+
+    /**
+     * Shares the same byte array, offset and size between two entries.
+     * Used when copying the entry data is not necessary because it is known
+     * that the underlying operation will not modify the entry, for example,
+     * with getSearchKey.
+     */
+    private static void shareData(DatabaseEntry from, DatabaseEntry to) {
+
+        if (from != null) {
+            to.setData(from.getData(), from.getOffset(), from.getSize());
+        }
+    }
+
+    public OperationStatus getFirst(DatabaseEntry key,
+                                    DatabaseEntry pKey,
+                                    DatabaseEntry data,
+                                    LockMode lockMode)
+        throws DatabaseException {
+
+        OperationStatus status;
+        if (!range.hasBound()) {
+            setParams(key, pKey, data);
+            status = doGetFirst(lockMode);
+            endOperation(null, status, null, null, null);
+            return status;
+        }
+        if (pkRange != null) {
+            KeyRange.copy(range.beginKey, privKey);
+            if (pkRange.singleKey) {
+                KeyRange.copy(pkRange.beginKey, privPKey);
+                status = doGetSearchBoth(lockMode);
+                endOperation(null, status, key, pKey, data);
+            } else {
+                status = OperationStatus.NOTFOUND;
+                Cursor oldCursor = beginOperation();
+                try {
+                    if (pkRange.beginKey == null || !sortedDups) {
+                        status = doGetSearchKey(lockMode);
+                    } else {
+                        KeyRange.copy(pkRange.beginKey, privPKey);
+                        status = doGetSearchBothRange(lockMode);
+                        if (status == OperationStatus.SUCCESS &&
+                            !pkRange.beginInclusive &&
+                            pkRange.compare(privPKey, pkRange.beginKey) == 0) {
+                            status = doGetNextDup(lockMode);
+                        }
+                    }
+                    if (status == OperationStatus.SUCCESS &&
+                        !pkRange.check(privPKey)) {
+                        status = OperationStatus.NOTFOUND;
+                    }
+                } finally {
+                    endOperation(oldCursor, status, key, pKey, data);
+                }
+            }
+        } else if (range.singleKey) {
+            KeyRange.copy(range.beginKey, privKey);
+            status = doGetSearchKey(lockMode);
+            endOperation(null, status, key, pKey, data);
+        } else {
+            status = OperationStatus.NOTFOUND;
+            Cursor oldCursor = beginOperation();
+            try {
+                if (range.beginKey == null) {
+                    status = doGetFirst(lockMode);
+                } else {
+                    KeyRange.copy(range.beginKey, privKey);
+                    status = doGetSearchKeyRange(lockMode);
+                    if (status == OperationStatus.SUCCESS &&
+                        !range.beginInclusive &&
+                        range.compare(privKey, range.beginKey) == 0) {
+                        status = doGetNextNoDup(lockMode);
+                    }
+                }
+                if (status == OperationStatus.SUCCESS &&
+                    !range.check(privKey)) {
+                    status = OperationStatus.NOTFOUND;
+                }
+            } finally {
+                endOperation(oldCursor, status, key, pKey, data);
+            }
+        }
+        return status;
+    }
+
+    public OperationStatus getLast(DatabaseEntry key,
+                                   DatabaseEntry pKey,
+                                   DatabaseEntry data,
+                                   LockMode lockMode)
+        throws DatabaseException {
+
+        OperationStatus status = OperationStatus.NOTFOUND;
+        if (!range.hasBound()) {
+            setParams(key, pKey, data);
+            status = doGetLast(lockMode);
+            endOperation(null, status, null, null, null);
+            return status;
+        }
+        Cursor oldCursor = beginOperation();
+        try {
+            if (pkRange != null) {
+                KeyRange.copy(range.beginKey, privKey);
+                boolean doLast = false;
+                if (!sortedDups) {
+                    status = doGetSearchKey(lockMode);
+                } else if (pkRange.endKey == null) {
+                    doLast = true;
+                } else {
+                    KeyRange.copy(pkRange.endKey, privPKey);
+                    status = doGetSearchBothRange(lockMode);
+                    if (status == OperationStatus.SUCCESS) {
+                        if (!pkRange.endInclusive ||
+                            pkRange.compare(pkRange.endKey, privPKey) != 0) {
+                            status = doGetPrevDup(lockMode);
+                        }
+                    } else {
+                        KeyRange.copy(range.beginKey, privKey);
+                        doLast = true;
+                    }
+                }
+                if (doLast) {
+                    status = doGetSearchKey(lockMode);
+                    if (status == OperationStatus.SUCCESS) {
+                        status = doGetNextNoDup(lockMode);
+                        if (status == OperationStatus.SUCCESS) {
+                            status = doGetPrev(lockMode);
+                        } else {
+                            status = doGetLast(lockMode);
+                        }
+                    }
+                }
+                if (status == OperationStatus.SUCCESS &&
+                    !pkRange.check(privPKey)) {
+                    status = OperationStatus.NOTFOUND;
+                }
+            } else if (range.endKey == null) {
+                status = doGetLast(lockMode);
+            } else {
+                KeyRange.copy(range.endKey, privKey);
+                status = doGetSearchKeyRange(lockMode);
+                if (status == OperationStatus.SUCCESS) {
+                    if (range.endInclusive &&
+                        range.compare(range.endKey, privKey) == 0) {
+                        /* Skip this step if dups are not configured? */
+                        status = doGetNextNoDup(lockMode);
+                        if (status == OperationStatus.SUCCESS) {
+                            status = doGetPrev(lockMode);
+                        } else {
+                            status = doGetLast(lockMode);
+                        }
+                    } else {
+                        status = doGetPrev(lockMode);
+                    }
+                } else {
+                    status = doGetLast(lockMode);
+                }
+            }
+            if (status == OperationStatus.SUCCESS &&
+                !range.checkBegin(privKey, true)) {
+                status = OperationStatus.NOTFOUND;
+            }
+        } finally {
+            endOperation(oldCursor, status, key, pKey, data);
+        }
+        return status;
+    }
+
+    public OperationStatus getNext(DatabaseEntry key,
+                                   DatabaseEntry pKey,
+                                   DatabaseEntry data,
+                                   LockMode lockMode)
+        throws DatabaseException {
+
+        OperationStatus status;
+        if (!initialized) {
+            return getFirst(key, pKey, data, lockMode);
+        }
+        if (!range.hasBound()) {
+            setParams(key, pKey, data);
+            status = doGetNext(lockMode);
+            endOperation(null, status, null, null, null);
+            return status;
+        }
+        if (pkRange != null) {
+            if (pkRange.endKey == null) {
+                status = doGetNextDup(lockMode);
+                endOperation(null, status, key, pKey, data);
+            } else {
+                status = OperationStatus.NOTFOUND;
+                Cursor oldCursor = beginOperation();
+                try {
+                    status = doGetNextDup(lockMode);
+                    if (status == OperationStatus.SUCCESS &&
+                        !pkRange.checkEnd(privPKey, true)) {
+                        status = OperationStatus.NOTFOUND;
+                    }
+                } finally {
+                    endOperation(oldCursor, status, key, pKey, data);
+                }
+            }
+        } else if (range.singleKey) {
+            status = doGetNextDup(lockMode);
+            endOperation(null, status, key, pKey, data);
+        } else {
+            status = OperationStatus.NOTFOUND;
+            Cursor oldCursor = beginOperation();
+            try {
+                status = doGetNext(lockMode);
+                if (status == OperationStatus.SUCCESS &&
+                    !range.check(privKey)) {
+                    status = OperationStatus.NOTFOUND;
+                }
+            } finally {
+                endOperation(oldCursor, status, key, pKey, data);
+            }
+        }
+        return status;
+    }
+
+    public OperationStatus getNextNoDup(DatabaseEntry key,
+                                        DatabaseEntry pKey,
+                                        DatabaseEntry data,
+                                        LockMode lockMode)
+        throws DatabaseException {
+
+        OperationStatus status;
+        if (!initialized) {
+            return getFirst(key, pKey, data, lockMode);
+        }
+        if (!range.hasBound()) {
+            setParams(key, pKey, data);
+            status = doGetNextNoDup(lockMode);
+            endOperation(null, status, null, null, null);
+            return status;
+        }
+        if (range.singleKey) {
+            status = OperationStatus.NOTFOUND;
+        } else {
+            status = OperationStatus.NOTFOUND;
+            Cursor oldCursor = beginOperation();
+            try {
+                status = doGetNextNoDup(lockMode);
+                if (status == OperationStatus.SUCCESS &&
+                    !range.check(privKey)) {
+                    status = OperationStatus.NOTFOUND;
+                }
+            } finally {
+                endOperation(oldCursor, status, key, pKey, data);
+            }
+        }
+        return status;
+    }
+
+    public OperationStatus getPrev(DatabaseEntry key,
+                                   DatabaseEntry pKey,
+                                   DatabaseEntry data,
+                                   LockMode lockMode)
+        throws DatabaseException {
+
+        OperationStatus status;
+        if (!initialized) {
+            return getLast(key, pKey, data, lockMode);
+        }
+        if (!range.hasBound()) {
+            setParams(key, pKey, data);
+            status = doGetPrev(lockMode);
+            endOperation(null, status, null, null, null);
+            return status;
+        }
+        if (pkRange != null) {
+            if (pkRange.beginKey == null) {
+                status = doGetPrevDup(lockMode);
+                endOperation(null, status, key, pKey, data);
+            } else {
+                status = OperationStatus.NOTFOUND;
+                Cursor oldCursor = beginOperation();
+                try {
+                    status = doGetPrevDup(lockMode);
+                    if (status == OperationStatus.SUCCESS &&
+                        !pkRange.checkBegin(privPKey, true)) {
+                        status = OperationStatus.NOTFOUND;
+                    }
+                } finally {
+                    endOperation(oldCursor, status, key, pKey, data);
+                }
+            }
+        } else if (range.singleKey) {
+            status = doGetPrevDup(lockMode);
+            endOperation(null, status, key, pKey, data);
+        } else {
+            status = OperationStatus.NOTFOUND;
+            Cursor oldCursor = beginOperation();
+            try {
+                status = doGetPrev(lockMode);
+                if (status == OperationStatus.SUCCESS &&
+                    !range.check(privKey)) {
+                    status = OperationStatus.NOTFOUND;
+                }
+            } finally {
+                endOperation(oldCursor, status, key, pKey, data);
+            }
+        }
+        return status;
+    }
+
+    public OperationStatus getPrevNoDup(DatabaseEntry key,
+                                        DatabaseEntry pKey,
+                                        DatabaseEntry data,
+                                        LockMode lockMode)
+        throws DatabaseException {
+
+        OperationStatus status;
+        if (!initialized) {
+            return getLast(key, pKey, data, lockMode);
+        }
+        if (!range.hasBound()) {
+            setParams(key, pKey, data);
+            status = doGetPrevNoDup(lockMode);
+            endOperation(null, status, null, null, null);
+            return status;
+        }
+        if (range.singleKey) {
+            status = OperationStatus.NOTFOUND;
+        } else {
+            status = OperationStatus.NOTFOUND;
+            Cursor oldCursor = beginOperation();
+            try {
+                status = doGetPrevNoDup(lockMode);
+                if (status == OperationStatus.SUCCESS &&
+                    !range.check(privKey)) {
+                    status = OperationStatus.NOTFOUND;
+                }
+            } finally {
+                endOperation(oldCursor, status, key, pKey, data);
+            }
+        }
+        return status;
+    }
+
+    public OperationStatus getSearchKey(DatabaseEntry key,
+                                        DatabaseEntry pKey,
+                                        DatabaseEntry data,
+                                        LockMode lockMode)
+        throws DatabaseException {
+
+        OperationStatus status;
+        if (!range.hasBound()) {
+            setParams(key, pKey, data);
+            status = doGetSearchKey(lockMode);
+            endOperation(null, status, null, null, null);
+            return status;
+        }
+        if (!range.check(key)) {
+            status = OperationStatus.NOTFOUND;
+        } else if (pkRange != null) {
+            status = OperationStatus.NOTFOUND;
+            Cursor oldCursor = beginOperation();
+            try {
+                shareData(key, privKey);
+                status = doGetSearchKey(lockMode);
+                if (status == OperationStatus.SUCCESS &&
+                    !pkRange.check(privPKey)) {
+                    status = OperationStatus.NOTFOUND;
+                }
+            } finally {
+                endOperation(oldCursor, status, key, pKey, data);
+            }
+        } else {
+            shareData(key, privKey);
+            status = doGetSearchKey(lockMode);
+            endOperation(null, status, key, pKey, data);
+        }
+        return status;
+    }
+
+    public OperationStatus getSearchBoth(DatabaseEntry key,
+                                         DatabaseEntry pKey,
+                                         DatabaseEntry data,
+                                         LockMode lockMode)
+        throws DatabaseException {
+
+        OperationStatus status;
+        if (!range.hasBound()) {
+            setParams(key, pKey, data);
+            status = doGetSearchBoth(lockMode);
+            endOperation(null, status, null, null, null);
+            return status;
+        }
+        if (!range.check(key) ||
+            (pkRange != null && !pkRange.check(pKey))) {
+            status = OperationStatus.NOTFOUND;
+        } else {
+            shareData(key, privKey);
+            if (secCursor != null) {
+                shareData(pKey, privPKey);
+            } else {
+                shareData(data, privData);
+            }
+            status = doGetSearchBoth(lockMode);
+            endOperation(null, status, key, pKey, data);
+        }
+        return status;
+    }
+
+    public OperationStatus getSearchKeyRange(DatabaseEntry key,
+                                             DatabaseEntry pKey,
+                                             DatabaseEntry data,
+                                             LockMode lockMode)
+        throws DatabaseException {
+
+        OperationStatus status = OperationStatus.NOTFOUND;
+        if (!range.hasBound()) {
+            setParams(key, pKey, data);
+            status = doGetSearchKeyRange(lockMode);
+            endOperation(null, status, null, null, null);
+            return status;
+        }
+        Cursor oldCursor = beginOperation();
+        try {
+            shareData(key, privKey);
+            status = doGetSearchKeyRange(lockMode);
+            if (status == OperationStatus.SUCCESS &&
+                (!range.check(privKey) ||
+                 (pkRange != null && !pkRange.check(pKey)))) {
+                status = OperationStatus.NOTFOUND;
+            }
+        } finally {
+            endOperation(oldCursor, status, key, pKey, data);
+        }
+        return status;
+    }
+
+    public OperationStatus getSearchBothRange(DatabaseEntry key,
+                                              DatabaseEntry pKey,
+                                              DatabaseEntry data,
+                                              LockMode lockMode)
+        throws DatabaseException {
+
+        OperationStatus status = OperationStatus.NOTFOUND;
+        if (!range.hasBound()) {
+            setParams(key, pKey, data);
+            status = doGetSearchBothRange(lockMode);
+            endOperation(null, status, null, null, null);
+            return status;
+        }
+        Cursor oldCursor = beginOperation();
+        try {
+            shareData(key, privKey);
+            if (secCursor != null) {
+                shareData(pKey, privPKey);
+            } else {
+                shareData(data, privData);
+            }
+            status = doGetSearchBothRange(lockMode);
+            if (status == OperationStatus.SUCCESS &&
+                (!range.check(privKey) ||
+                 (pkRange != null && !pkRange.check(pKey)))) {
+                status = OperationStatus.NOTFOUND;
+            }
+        } finally {
+            endOperation(oldCursor, status, key, pKey, data);
+        }
+        return status;
+    }
+
+    public OperationStatus getSearchRecordNumber(DatabaseEntry key,
+                                                 DatabaseEntry pKey,
+                                                 DatabaseEntry data,
+                                                 LockMode lockMode)
+        throws DatabaseException {
+
+        OperationStatus status;
+        if (!range.hasBound()) {
+            setParams(key, pKey, data);
+            status = doGetSearchRecordNumber(lockMode);
+            endOperation(null, status, null, null, null);
+            return status;
+        }
+        if (!range.check(key)) {
+            status = OperationStatus.NOTFOUND;
+        } else {
+            shareData(key, privKey);
+            status = doGetSearchRecordNumber(lockMode);
+            endOperation(null, status, key, pKey, data);
+        }
+        return status;
+    }
+
+    public OperationStatus getNextDup(DatabaseEntry key,
+                                      DatabaseEntry pKey,
+                                      DatabaseEntry data,
+                                      LockMode lockMode)
+        throws DatabaseException {
+
+        if (!initialized) {
+            throw new DatabaseException("Cursor not initialized");
+        }
+        OperationStatus status;
+        if (!range.hasBound()) {
+            setParams(key, pKey, data);
+            status = doGetNextDup(lockMode);
+            endOperation(null, status, null, null, null);
+        } else if (pkRange != null && pkRange.endKey != null) {
+            status = OperationStatus.NOTFOUND;
+            Cursor oldCursor = beginOperation();
+            try {
+                status = doGetNextDup(lockMode);
+                if (status == OperationStatus.SUCCESS &&
+                    !pkRange.checkEnd(privPKey, true)) {
+                    status = OperationStatus.NOTFOUND;
+                }
+            } finally {
+                endOperation(oldCursor, status, key, pKey, data);
+            }
+        } else {
+            status = doGetNextDup(lockMode);
+            endOperation(null, status, key, pKey, data);
+        }
+        return status;
+    }
+
+    public OperationStatus getPrevDup(DatabaseEntry key,
+                                      DatabaseEntry pKey,
+                                      DatabaseEntry data,
+                                      LockMode lockMode)
+        throws DatabaseException {
+
+        if (!initialized) {
+            throw new DatabaseException("Cursor not initialized");
+        }
+        OperationStatus status;
+        if (!range.hasBound()) {
+            setParams(key, pKey, data);
+            status = doGetPrevDup(lockMode);
+            endOperation(null, status, null, null, null);
+        } else if (pkRange != null && pkRange.beginKey != null) {
+            status = OperationStatus.NOTFOUND;
+            Cursor oldCursor = beginOperation();
+            try {
+                status = doGetPrevDup(lockMode);
+                if (status == OperationStatus.SUCCESS &&
+                    !pkRange.checkBegin(privPKey, true)) {
+                    status = OperationStatus.NOTFOUND;
+                }
+            } finally {
+                endOperation(oldCursor, status, key, pKey, data);
+            }
+        } else {
+            status = doGetPrevDup(lockMode);
+            endOperation(null, status, key, pKey, data);
+        }
+        return status;
+    }
+
+    public OperationStatus getCurrent(DatabaseEntry key,
+                                      DatabaseEntry pKey,
+                                      DatabaseEntry data,
+                                      LockMode lockMode)
+        throws DatabaseException {
+
+        if (!initialized) {
+            throw new DatabaseException("Cursor not initialized");
+        }
+        if (secCursor != null && pKey != null) {
+            return secCursor.getCurrent(key, pKey, data, lockMode);
+        } else {
+            return cursor.getCurrent(key, data, lockMode);
+        }
+    }
+
+    /*
+     * Pass-thru methods.
+     */
+
+    public void close()
+        throws DatabaseException {
+
+        closeCursor(cursor);
+    }
+
+    public int count()
+        throws DatabaseException {
+
+	return cursor.count();
+    }
+
+    public OperationStatus delete()
+        throws DatabaseException {
+
+	return cursor.delete();
+    }
+
+    public OperationStatus put(DatabaseEntry key, DatabaseEntry data)
+        throws DatabaseException {
+
+        return cursor.put(key, data);
+    }
+
+    public OperationStatus putNoOverwrite(DatabaseEntry key,
+                                          DatabaseEntry data)
+        throws DatabaseException {
+
+        return cursor.putNoOverwrite(key, data);
+    }
+
+    public OperationStatus putNoDupData(DatabaseEntry key, DatabaseEntry data)
+        throws DatabaseException {
+
+        return cursor.putNoDupData(key, data);
+    }
+
+    public OperationStatus putCurrent(DatabaseEntry data)
+        throws DatabaseException {
+
+        return cursor.putCurrent(data);
+    }
+
+    public OperationStatus putAfter(DatabaseEntry key, DatabaseEntry data)
+        throws DatabaseException {
+
+        return DbCompat.putAfter(cursor, key, data);
+    }
+
+    public OperationStatus putBefore(DatabaseEntry key, DatabaseEntry data)
+        throws DatabaseException {
+
+        return DbCompat.putBefore(cursor, key, data);
+    }
+
+    private OperationStatus doGetFirst(LockMode lockMode)
+        throws DatabaseException {
+
+        if (secCursor != null && privPKey != null) {
+            return secCursor.getFirst(privKey, privPKey, privData, lockMode);
+        } else {
+            return cursor.getFirst(privKey, privData, lockMode);
+        }
+    }
+
+    private OperationStatus doGetLast(LockMode lockMode)
+        throws DatabaseException {
+
+        if (secCursor != null && privPKey != null) {
+            return secCursor.getLast(privKey, privPKey, privData, lockMode);
+        } else {
+            return cursor.getLast(privKey, privData, lockMode);
+        }
+    }
+
+    private OperationStatus doGetNext(LockMode lockMode)
+        throws DatabaseException {
+
+        if (secCursor != null && privPKey != null) {
+            return secCursor.getNext(privKey, privPKey, privData, lockMode);
+        } else {
+            return cursor.getNext(privKey, privData, lockMode);
+        }
+    }
+
+    private OperationStatus doGetNextDup(LockMode lockMode)
+        throws DatabaseException {
+
+        if (secCursor != null && privPKey != null) {
+            return secCursor.getNextDup(privKey, privPKey, privData, lockMode);
+        } else {
+            return cursor.getNextDup(privKey, privData, lockMode);
+        }
+    }
+
+    private OperationStatus doGetNextNoDup(LockMode lockMode)
+        throws DatabaseException {
+
+        if (secCursor != null && privPKey != null) {
+            return secCursor.getNextNoDup(privKey, privPKey, privData,
+                                          lockMode);
+        } else {
+            return cursor.getNextNoDup(privKey, privData, lockMode);
+        }
+    }
+
+    private OperationStatus doGetPrev(LockMode lockMode)
+        throws DatabaseException {
+
+        if (secCursor != null && privPKey != null) {
+            return secCursor.getPrev(privKey, privPKey, privData, lockMode);
+        } else {
+            return cursor.getPrev(privKey, privData, lockMode);
+        }
+    }
+
+    private OperationStatus doGetPrevDup(LockMode lockMode)
+        throws DatabaseException {
+
+        if (secCursor != null && privPKey != null) {
+            return secCursor.getPrevDup(privKey, privPKey, privData, lockMode);
+        } else {
+            return cursor.getPrevDup(privKey, privData, lockMode);
+        }
+    }
+
+    private OperationStatus doGetPrevNoDup(LockMode lockMode)
+        throws DatabaseException {
+
+        if (secCursor != null && privPKey != null) {
+            return secCursor.getPrevNoDup(privKey, privPKey, privData,
+                                          lockMode);
+        } else {
+            return cursor.getPrevNoDup(privKey, privData, lockMode);
+        }
+    }
+
+    private OperationStatus doGetSearchKey(LockMode lockMode)
+        throws DatabaseException {
+
+        if (checkRecordNumber() && DbCompat.getRecordNumber(privKey) <= 0) {
+            return OperationStatus.NOTFOUND;
+        }
+        if (secCursor != null && privPKey != null) {
+            return secCursor.getSearchKey(privKey, privPKey, privData,
+                                          lockMode);
+        } else {
+            return cursor.getSearchKey(privKey, privData, lockMode);
+        }
+    }
+
+    private OperationStatus doGetSearchKeyRange(LockMode lockMode)
+        throws DatabaseException {
+
+        if (checkRecordNumber() && DbCompat.getRecordNumber(privKey) <= 0) {
+            return OperationStatus.NOTFOUND;
+        }
+        if (secCursor != null && privPKey != null) {
+            return secCursor.getSearchKeyRange(privKey, privPKey, privData,
+                                               lockMode);
+        } else {
+            return cursor.getSearchKeyRange(privKey, privData, lockMode);
+        }
+    }
+
+    private OperationStatus doGetSearchBoth(LockMode lockMode)
+        throws DatabaseException {
+
+        if (checkRecordNumber() && DbCompat.getRecordNumber(privKey) <= 0) {
+            return OperationStatus.NOTFOUND;
+        }
+        if (secCursor != null && privPKey != null) {
+            return secCursor.getSearchBoth(privKey, privPKey, privData,
+                                           lockMode);
+        } else {
+            return cursor.getSearchBoth(privKey, privData, lockMode);
+        }
+    }
+
+    private OperationStatus doGetSearchBothRange(LockMode lockMode)
+        throws DatabaseException {
+
+        if (checkRecordNumber() && DbCompat.getRecordNumber(privKey) <= 0) {
+            return OperationStatus.NOTFOUND;
+        }
+        if (secCursor != null && privPKey != null) {
+            return secCursor.getSearchBothRange(privKey, privPKey,
+                                                privData, lockMode);
+        } else {
+            return cursor.getSearchBothRange(privKey, privData, lockMode);
+        }
+    }
+
+    private OperationStatus doGetSearchRecordNumber(LockMode lockMode)
+        throws DatabaseException {
+
+        if (DbCompat.getRecordNumber(privKey) <= 0) {
+            return OperationStatus.NOTFOUND;
+        }
+        if (secCursor != null && privPKey != null) {
+            return DbCompat.getSearchRecordNumber(secCursor, privKey, privPKey,
+                                                  privData, lockMode);
+        } else {
+            return DbCompat.getSearchRecordNumber(cursor, privKey, privData,
+                                                  lockMode);
+        }
+    }
+
+    /*
+     * Protected methods for duping and closing cursors.  These are overridden
+     * by the collections API to implement cursor pooling for CDS.
+     */
+
+    /**
+     * Dups the given cursor.
+     */
+    protected Cursor dupCursor(Cursor cursor, boolean samePosition)
+        throws DatabaseException {
+
+        return cursor.dup(samePosition);
+    }
+
+    /**
+     * Closes the given cursor.
+     */
+    protected void closeCursor(Cursor cursor)
+        throws DatabaseException {
+
+        cursor.close();
+    }
+
+    /**
+     * If the database is a RECNO or QUEUE database, we know its keys are
+     * record numbers.  We treat a non-positive record number as out of bounds,
+     * that is, we return NOTFOUND rather than throwing
+     * IllegalArgumentException as would happen if we passed a non-positive
+     * record number into the DB cursor.  This behavior is required by the
+     * collections interface.
+     */
+    protected boolean checkRecordNumber() {
+        return false;
+    }
+}
diff --git a/src/com/sleepycat/util/package.html b/src/com/sleepycat/util/package.html
new file mode 100644
index 0000000000000000000000000000000000000000..a3cd49492f71401aa75f6723a1b4cd1a7a117d9a
--- /dev/null
+++ b/src/com/sleepycat/util/package.html
@@ -0,0 +1,6 @@
+<!-- $Id: package.html,v 1.15 2004/03/29 19:06:06 linda Exp $ -->
+<html>
+<body>
+General utilities used throughout Berkeley DB.
+</body>
+</html>
diff --git a/test/com/sleepycat/bind/serial/test/MarshalledObject.java b/test/com/sleepycat/bind/serial/test/MarshalledObject.java
new file mode 100644
index 0000000000000000000000000000000000000000..fcc4a1a20d1c9a5d728e991bc517869804c50a19
--- /dev/null
+++ b/test/com/sleepycat/bind/serial/test/MarshalledObject.java
@@ -0,0 +1,127 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: MarshalledObject.java,v 1.23.2.2 2010/01/04 15:30:41 cwl Exp $
+ */
+
+package com.sleepycat.bind.serial.test;
+
+import java.io.Serializable;
+
+import com.sleepycat.bind.tuple.MarshalledTupleKeyEntity;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+
+/**
+ * @author Mark Hayes
+ */
+@SuppressWarnings("serial")
+public class MarshalledObject
+    implements Serializable, MarshalledTupleKeyEntity {
+
+    private String data;
+    private transient String primaryKey;
+    private String indexKey1;
+    private String indexKey2;
+
+    public MarshalledObject(String data, String primaryKey,
+                            String indexKey1, String indexKey2) {
+        this.data = data;
+        this.primaryKey = primaryKey;
+        this.indexKey1 = indexKey1;
+        this.indexKey2 = indexKey2;
+    }
+
+    public boolean equals(Object o) {
+
+        try {
+            MarshalledObject other = (MarshalledObject) o;
+
+            return this.data.equals(other.data) &&
+                   this.primaryKey.equals(other.primaryKey) &&
+                   this.indexKey1.equals(other.indexKey1) &&
+                   this.indexKey2.equals(other.indexKey2);
+        } catch (Exception e) {
+            return false;
+        }
+    }
+
+    public String getData() {
+
+        return data;
+    }
+
+    public String getPrimaryKey() {
+
+        return primaryKey;
+    }
+
+    public String getIndexKey1() {
+
+        return indexKey1;
+    }
+
+    public String getIndexKey2() {
+
+        return indexKey2;
+    }
+
+    public int expectedKeyLength() {
+
+        return primaryKey.length() + 1;
+    }
+
+    public void marshalPrimaryKey(TupleOutput keyOutput) {
+
+        keyOutput.writeString(primaryKey);
+    }
+
+    public void unmarshalPrimaryKey(TupleInput keyInput) {
+
+        primaryKey = keyInput.readString();
+    }
+
+    public boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput) {
+
+        if ("1".equals(keyName)) {
+            if (indexKey1.length() > 0) {
+                keyOutput.writeString(indexKey1);
+                return true;
+            } else {
+                return false;
+            }
+        } else if ("2".equals(keyName)) {
+            if (indexKey2.length() > 0) {
+                keyOutput.writeString(indexKey2);
+                return true;
+            } else {
+                return false;
+            }
+        } else {
+            throw new IllegalArgumentException("Unknown keyName: " + keyName);
+        }
+    }
+
+    public boolean nullifyForeignKey(String keyName) {
+
+        if ("1".equals(keyName)) {
+            if (indexKey1.length() > 0) {
+                indexKey1 = "";
+                return true;
+            } else {
+                return false;
+            }
+        } else if ("2".equals(keyName)) {
+            if (indexKey2.length() > 0) {
+                indexKey2 = "";
+                return true;
+            } else {
+                return false;
+            }
+        } else {
+            throw new IllegalArgumentException("Unknown keyName: " + keyName);
+        }
+    }
+}
diff --git a/test/com/sleepycat/bind/serial/test/NullClassCatalog.java b/test/com/sleepycat/bind/serial/test/NullClassCatalog.java
new file mode 100644
index 0000000000000000000000000000000000000000..38299c7b582cc40e4a3c478b464a78abb8cf5b17
--- /dev/null
+++ b/test/com/sleepycat/bind/serial/test/NullClassCatalog.java
@@ -0,0 +1,43 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: NullClassCatalog.java,v 1.20.2.2 2010/01/04 15:30:41 cwl Exp $
+ */
+
+package com.sleepycat.bind.serial.test;
+
+import java.io.ObjectStreamClass;
+import java.math.BigInteger;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.bind.serial.ClassCatalog;
+
+/**
+ * NullCatalog is a dummy Catalog implementation that simply
+ * returns large (8 byte) class IDs so that ObjectOutput
+ * can be simulated when computing a serialized size.
+ *
+ * @author Mark Hayes
+ */
+class NullClassCatalog implements ClassCatalog {
+
+    private long id = Long.MAX_VALUE;
+
+    public void close()
+        throws DatabaseException {
+    }
+
+    public byte[] getClassID(ObjectStreamClass classFormat)
+        throws DatabaseException {
+
+        return BigInteger.valueOf(id--).toByteArray();
+    }
+
+    public ObjectStreamClass getClassFormat(byte[] classID)
+        throws DatabaseException, ClassNotFoundException {
+
+        return null; // ObjectInput not supported
+    }
+}
diff --git a/test/com/sleepycat/bind/serial/test/SerialBindingTest.java b/test/com/sleepycat/bind/serial/test/SerialBindingTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..c637d3a19f1c0322ac4b18b17cbb1ae3de8e532d
--- /dev/null
+++ b/test/com/sleepycat/bind/serial/test/SerialBindingTest.java
@@ -0,0 +1,328 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SerialBindingTest.java,v 1.37.2.2 2010/01/04 15:30:41 cwl Exp $
+ */
+
+package com.sleepycat.bind.serial.test;
+
+import java.io.Serializable;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.bind.serial.ClassCatalog;
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.bind.serial.SerialSerialBinding;
+import com.sleepycat.bind.serial.TupleSerialMarshalledBinding;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.util.ExceptionUnwrapper;
+import com.sleepycat.util.FastOutputStream;
+import com.sleepycat.util.test.SharedTestUtils;
+
+/**
+ * @author Mark Hayes
+ */
+public class SerialBindingTest extends TestCase {
+
+    private ClassCatalog catalog;
+    private DatabaseEntry buffer;
+    private DatabaseEntry keyBuffer;
+
+    public static void main(String[] args)
+        throws Exception {
+
+        junit.framework.TestResult tr =
+            junit.textui.TestRunner.run(suite());
+        if (tr.errorCount() > 0 ||
+            tr.failureCount() > 0) {
+            System.exit(1);
+        } else {
+            System.exit(0);
+        }
+    }
+
+    public static Test suite()
+        throws Exception {
+
+        TestSuite suite = new TestSuite(SerialBindingTest.class);
+        return suite;
+    }
+
+    public SerialBindingTest(String name) {
+
+        super(name);
+    }
+
+    public void setUp() {
+
+        SharedTestUtils.printTestName("SerialBindingTest." + getName());
+        catalog = new TestClassCatalog();
+        buffer = new DatabaseEntry();
+        keyBuffer = new DatabaseEntry();
+    }
+
+    public void tearDown() {
+
+        /* Ensure that GC can cleanup. */
+        catalog = null;
+        buffer = null;
+        keyBuffer = null;
+    }
+
+    public void runTest()
+        throws Throwable {
+
+        try {
+            super.runTest();
+        } catch (Exception e) {
+            throw ExceptionUnwrapper.unwrap(e);
+        }
+    }
+
+    private void primitiveBindingTest(Object val) {
+
+        Class cls = val.getClass();
+        SerialBinding binding = new SerialBinding(catalog, cls);
+
+        binding.objectToEntry(val, buffer);
+        assertTrue(buffer.getSize() > 0);
+
+        Object val2 = binding.entryToObject(buffer);
+        assertSame(cls, val2.getClass());
+        assertEquals(val, val2);
+
+        Object valWithWrongCls = (cls == String.class)
+                      ? ((Object) new Integer(0)) : ((Object) new String(""));
+        try {
+            binding.objectToEntry(valWithWrongCls, buffer);
+        } catch (IllegalArgumentException expected) {}
+    }
+
+    public void testPrimitiveBindings() {
+
+        primitiveBindingTest("abc");
+        primitiveBindingTest(new Character('a'));
+        primitiveBindingTest(new Boolean(true));
+        primitiveBindingTest(new Byte((byte) 123));
+        primitiveBindingTest(new Short((short) 123));
+        primitiveBindingTest(new Integer(123));
+        primitiveBindingTest(new Long(123));
+        primitiveBindingTest(new Float(123.123));
+        primitiveBindingTest(new Double(123.123));
+    }
+
+    public void testNullObjects() {
+
+        SerialBinding binding = new SerialBinding(catalog, null);
+        buffer.setSize(0);
+        binding.objectToEntry(null, buffer);
+        assertTrue(buffer.getSize() > 0);
+        assertEquals(null, binding.entryToObject(buffer));
+    }
+
+    public void testSerialSerialBinding() {
+
+        SerialBinding keyBinding = new SerialBinding(catalog, String.class);
+        SerialBinding valueBinding = new SerialBinding(catalog, String.class);
+        EntityBinding binding = new MySerialSerialBinding(keyBinding,
+                                                          valueBinding);
+
+        String val = "key#value?indexKey";
+        binding.objectToData(val, buffer);
+        assertTrue(buffer.getSize() > 0);
+        binding.objectToKey(val, keyBuffer);
+        assertTrue(keyBuffer.getSize() > 0);
+
+        Object result = binding.entryToObject(keyBuffer, buffer);
+        assertEquals(val, result);
+    }
+
+    // also tests TupleSerialBinding since TupleSerialMarshalledBinding extends
+    // it
+    public void testTupleSerialMarshalledBinding() {
+
+        SerialBinding valueBinding = new SerialBinding(catalog,
+                                                    MarshalledObject.class);
+        EntityBinding binding =
+            new TupleSerialMarshalledBinding(valueBinding);
+
+        MarshalledObject val = new MarshalledObject("abc", "primary",
+                                                    "index1", "index2");
+        binding.objectToData(val, buffer);
+        assertTrue(buffer.getSize() > 0);
+        binding.objectToKey(val, keyBuffer);
+        assertEquals(val.expectedKeyLength(), keyBuffer.getSize());
+
+        Object result = binding.entryToObject(keyBuffer, buffer);
+        assertTrue(result instanceof MarshalledObject);
+        val = (MarshalledObject) result;
+        assertEquals("abc", val.getData());
+        assertEquals("primary", val.getPrimaryKey());
+        assertEquals("index1", val.getIndexKey1());
+        assertEquals("index2", val.getIndexKey2());
+    }
+
+    public void testBufferSize() {
+
+        CaptureSizeBinding binding =
+            new CaptureSizeBinding(catalog, String.class);
+
+        binding.objectToEntry("x", buffer);
+        assertEquals("x", binding.entryToObject(buffer));
+        assertEquals(FastOutputStream.DEFAULT_INIT_SIZE, binding.bufSize);
+
+        binding.setSerialBufferSize(1000);
+        binding.objectToEntry("x", buffer);
+        assertEquals("x", binding.entryToObject(buffer));
+        assertEquals(1000, binding.bufSize);
+    }
+
+    private static class CaptureSizeBinding extends SerialBinding {
+
+        int bufSize;
+
+        CaptureSizeBinding(ClassCatalog classCatalog, Class baseClass) {
+            super(classCatalog, baseClass);
+        }
+
+        public FastOutputStream getSerialOutput(Object object) {
+            FastOutputStream fos = super.getSerialOutput(object);
+            bufSize = fos.getBufferBytes().length;
+            return fos;
+        }
+    }
+
+    public void testBufferOverride() {
+
+        FastOutputStream out = new FastOutputStream(10);
+        CachedOutputBinding binding =
+            new CachedOutputBinding(catalog, String.class, out);
+
+        binding.used = false;
+        binding.objectToEntry("x", buffer);
+        assertEquals("x", binding.entryToObject(buffer));
+        assertTrue(binding.used);
+
+        binding.used = false;
+        binding.objectToEntry("aaaaaaaaaaaaaaaaaaaaaa", buffer);
+        assertEquals("aaaaaaaaaaaaaaaaaaaaaa", binding.entryToObject(buffer));
+        assertTrue(binding.used);
+
+        binding.used = false;
+        binding.objectToEntry("x", buffer);
+        assertEquals("x", binding.entryToObject(buffer));
+        assertTrue(binding.used);
+    }
+
+    private static class CachedOutputBinding extends SerialBinding {
+
+        FastOutputStream out;
+        boolean used;
+
+        CachedOutputBinding(ClassCatalog classCatalog,
+                            Class baseClass,
+                            FastOutputStream out) {
+            super(classCatalog, baseClass);
+            this.out = out;
+        }
+
+        public FastOutputStream getSerialOutput(Object object) {
+            out.reset();
+            used = true;
+            return out;
+        }
+    }
+
+    private static class MySerialSerialBinding extends SerialSerialBinding {
+
+        private MySerialSerialBinding(SerialBinding keyBinding,
+                                      SerialBinding valueBinding) {
+
+            super(keyBinding, valueBinding);
+        }
+
+        public Object entryToObject(Object keyInput, Object valueInput) {
+
+            return "" + keyInput + '#' + valueInput;
+        }
+
+        public Object objectToKey(Object object) {
+
+            String s = (String) object;
+            int i = s.indexOf('#');
+            if (i < 0 || i == s.length() - 1) {
+                throw new IllegalArgumentException(s);
+            } else {
+                return s.substring(0, i);
+            }
+        }
+
+        public Object objectToData(Object object) {
+
+            String s = (String) object;
+            int i = s.indexOf('#');
+            if (i < 0 || i == s.length() - 1) {
+                throw new IllegalArgumentException(s);
+            } else {
+                return s.substring(i + 1);
+            }
+        }
+    }
+
+    /**
+     * Tests that overriding SerialBinding.getClassLoader is possible.  This is
+     * a crude test because to create a truly working class loader is a large
+     * undertaking.
+     */
+    public void testClassloaderOverride()
+        throws Exception {
+
+        DatabaseEntry entry = new DatabaseEntry();
+
+        SerialBinding binding = new CustomLoaderBinding
+            (catalog, null, new FailureClassLoader());
+
+        try {
+            binding.objectToEntry(new MyClass(), entry);
+            binding.entryToObject(entry);
+            fail();
+        } catch (RuntimeException e) {
+            assertTrue(e.getMessage().startsWith("expect failure"));
+        }
+    }
+
+    private static class CustomLoaderBinding extends SerialBinding {
+
+        private ClassLoader loader;
+
+        CustomLoaderBinding(ClassCatalog classCatalog,
+                            Class baseClass,
+                            ClassLoader loader) {
+
+            super(classCatalog, baseClass);
+            this.loader = loader;
+        }
+
+        public ClassLoader getClassLoader() {
+            return loader;
+        }
+    }
+
+    private static class FailureClassLoader extends ClassLoader {
+
+        public Class loadClass(String name)
+            throws ClassNotFoundException {
+
+            throw new RuntimeException("expect failure: " + name);
+        }
+    }
+
+    @SuppressWarnings("serial")
+    private static class MyClass implements Serializable {
+    }
+}
diff --git a/test/com/sleepycat/bind/serial/test/TestClassCatalog.java b/test/com/sleepycat/bind/serial/test/TestClassCatalog.java
new file mode 100644
index 0000000000000000000000000000000000000000..d10e7ff6df23dc9485a0ea03925405d943361020
--- /dev/null
+++ b/test/com/sleepycat/bind/serial/test/TestClassCatalog.java
@@ -0,0 +1,59 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TestClassCatalog.java,v 1.18.2.2 2010/01/04 15:30:41 cwl Exp $
+ */
+
+package com.sleepycat.bind.serial.test;
+
+import java.io.ObjectStreamClass;
+import java.util.HashMap;
+
+import com.sleepycat.bind.serial.ClassCatalog;
+import com.sleepycat.je.DatabaseException;
+
+/**
+ * @author Mark Hayes
+ */
+public class TestClassCatalog implements ClassCatalog {
+
+    private HashMap idToDescMap = new HashMap();
+    private HashMap nameToIdMap = new HashMap();
+    private int nextId = 1;
+
+    public TestClassCatalog() {
+    }
+
+    public void close()
+        throws DatabaseException {
+    }
+
+    public synchronized byte[] getClassID(ObjectStreamClass desc)
+        throws DatabaseException {
+
+        String className = desc.getName();
+        byte[] id = (byte[]) nameToIdMap.get(className);
+        if (id == null) {
+            String strId = String.valueOf(nextId);
+            id = strId.getBytes();
+            nextId += 1;
+
+            idToDescMap.put(strId, desc);
+            nameToIdMap.put(className, id);
+        }
+        return id;
+    }
+
+    public synchronized ObjectStreamClass getClassFormat(byte[] id)
+        throws DatabaseException {
+
+        String strId = new String(id);
+        ObjectStreamClass desc = (ObjectStreamClass) idToDescMap.get(strId);
+        if (desc == null) {
+            throw new DatabaseException("classID not found");
+        }
+        return desc;
+    }
+}
diff --git a/test/com/sleepycat/bind/test/BindingSpeedTest.java b/test/com/sleepycat/bind/test/BindingSpeedTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..e219ea5839c22f5f69b3e46daa4d142d58c44614
--- /dev/null
+++ b/test/com/sleepycat/bind/test/BindingSpeedTest.java
@@ -0,0 +1,492 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: BindingSpeedTest.java,v 1.31.2.2 2010/01/04 15:30:41 cwl Exp $
+ */
+
+package com.sleepycat.bind.test;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutput;
+import java.io.ObjectOutputStream;
+import java.io.OutputStreamWriter;
+import java.io.Serializable;
+import java.io.Writer;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+
+import javax.xml.parsers.SAXParserFactory;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import org.xml.sax.InputSource;
+import org.xml.sax.XMLReader;
+
+import com.sleepycat.bind.serial.SerialInput;
+import com.sleepycat.bind.serial.SerialOutput;
+import com.sleepycat.bind.serial.test.TestClassCatalog;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+import com.sleepycat.util.FastInputStream;
+import com.sleepycat.util.FastOutputStream;
+import com.sleepycat.util.test.SharedTestUtils;
+
+/**
+ * @author Mark Hayes
+ */
+public class BindingSpeedTest extends TestCase {
+
+    static final String JAVA_UNSHARED = "java-unshared".intern();
+    static final String JAVA_SHARED = "java-shared".intern();
+    static final String JAVA_EXTERNALIZABLE = "java-externalizable".intern();
+    static final String XML_SAX = "xml-sax".intern();
+    static final String TUPLE = "tuple".intern();
+    static final String REFLECT_METHOD = "reflectMethod".intern();
+    static final String REFLECT_FIELD = "reflectField".intern();
+
+    static final int RUN_COUNT = 1000;
+    static final boolean VERBOSE = false;
+
+    public static void main(String[] args)
+        throws Exception {
+
+        junit.framework.TestResult tr =
+            junit.textui.TestRunner.run(suite());
+        if (tr.errorCount() > 0 ||
+            tr.failureCount() > 0) {
+            System.exit(1);
+        } else {
+            System.exit(0);
+        }
+    }
+
+    public static Test suite() {
+
+        TestSuite suite = new TestSuite();
+        suite.addTest(new BindingSpeedTest(JAVA_UNSHARED));
+        suite.addTest(new BindingSpeedTest(JAVA_SHARED));
+        suite.addTest(new BindingSpeedTest(JAVA_EXTERNALIZABLE));
+        suite.addTest(new BindingSpeedTest(XML_SAX));
+        suite.addTest(new BindingSpeedTest(TUPLE));
+        suite.addTest(new BindingSpeedTest(REFLECT_METHOD));
+        suite.addTest(new BindingSpeedTest(REFLECT_FIELD));
+        return suite;
+    }
+
+    private String command;
+    private FastOutputStream fo;
+    private TupleOutput to;
+    private TestClassCatalog jtc;
+    private byte[] buf;
+    private XMLReader parser;
+    private Method[] getters;
+    private Method[] setters;
+    private Field[] fields;
+
+    public BindingSpeedTest(String name) {
+
+        super("BindingSpeedTest." + name);
+        command = name;
+    }
+
+    public void runTest()
+        throws Exception {
+
+        SharedTestUtils.printTestName(getName());
+
+        boolean isTuple = false;
+        boolean isReflectMethod = false;
+        boolean isReflectField = false;
+        boolean isXmlSax = false;
+        boolean isSerial = false;
+        boolean isShared = false;
+        boolean isExternalizable = false;
+
+        if (command == TUPLE) {
+            isTuple = true;
+        } else if (command == REFLECT_METHOD) {
+            isReflectMethod = true;
+        } else if (command == REFLECT_FIELD) {
+            isReflectField = true;
+        } else if (command == XML_SAX) {
+            isXmlSax = true;
+        } else if (command == JAVA_UNSHARED) {
+            isSerial = true;
+        } else if (command == JAVA_SHARED) {
+            isSerial = true;
+            isShared = true;
+        } else if (command == JAVA_EXTERNALIZABLE) {
+            isSerial = true;
+            isShared = true;
+            isExternalizable = true;
+        } else {
+            throw new Exception("invalid command: " + command);
+        }
+
+        // Do initialization
+
+        if (isTuple) {
+            initTuple();
+        } else if (isReflectMethod) {
+            initReflectMethod();
+        } else if (isReflectField) {
+            initReflectField();
+        } else if (isXmlSax) {
+            initXmlSax();
+        } else if (isSerial) {
+            if (isShared) {
+                initSerialShared();
+            } else {
+                initSerialUnshared();
+            }
+        }
+
+        // Prime the Java compiler
+
+        int size = 0;
+        for (int i = 0; i < RUN_COUNT; i += 1) {
+
+            if (isTuple) {
+                size = runTuple();
+            } else if (isReflectMethod) {
+                size = runReflectMethod();
+            } else if (isReflectField) {
+                size = runReflectField();
+            } else if (isXmlSax) {
+                size = runXmlSax();
+            } else if (isSerial) {
+                if (isShared) {
+                    if (isExternalizable) {
+                        size = runSerialExternalizable();
+                    } else {
+                        size = runSerialShared();
+                    }
+                } else {
+                    size = runSerialUnshared();
+                }
+            }
+        }
+
+        // Then run the timing tests
+
+        long startTime = System.currentTimeMillis();
+
+        for (int i = 0; i < RUN_COUNT; i += 1) {
+            if (isTuple) {
+                size = runTuple();
+            } else if (isReflectMethod) {
+                size = runReflectMethod();
+            } else if (isReflectField) {
+                size = runReflectField();
+            } else if (isXmlSax) {
+                size = runXmlSax();
+            } else if (isSerial) {
+                if (isShared) {
+                    if (isExternalizable) {
+                        size = runSerialExternalizable();
+                    } else {
+                        size = runSerialShared();
+                    }
+                } else {
+                    size = runSerialUnshared();
+                }
+            }
+        }
+
+        long stopTime = System.currentTimeMillis();
+
+	assertTrue("data size too big", size < 250);
+
+        if (VERBOSE) {
+            System.out.println(command);
+            System.out.println("data size: " + size);
+            System.out.println("run time:  " +
+                ((stopTime - startTime) / (double) RUN_COUNT));
+        }
+    }
+
+    public void tearDown() {
+
+        /* Ensure that GC can cleanup. */
+        command = null;
+        fo = null;
+        to = null;
+        jtc = null;
+        buf = null;
+        parser = null;
+    }
+
+    void initSerialUnshared()
+        throws Exception {
+
+        fo = new FastOutputStream();
+    }
+
+    int runSerialUnshared()
+        throws Exception {
+
+        fo.reset();
+        ObjectOutputStream oos = new ObjectOutputStream(fo);
+        oos.writeObject(new Data());
+        byte[] bytes = fo.toByteArray();
+        FastInputStream fi = new FastInputStream(bytes);
+        ObjectInputStream ois = new ObjectInputStream(fi);
+        ois.readObject();
+        return bytes.length;
+    }
+
+    void initSerialShared()
+        throws Exception {
+
+        jtc = new TestClassCatalog();
+        fo = new FastOutputStream();
+    }
+
+    int runSerialShared()
+        throws Exception {
+
+        fo.reset();
+        SerialOutput oos = new SerialOutput(fo, jtc);
+        oos.writeObject(new Data());
+        byte[] bytes = fo.toByteArray();
+        FastInputStream fi = new FastInputStream(bytes);
+        SerialInput ois = new SerialInput(fi, jtc);
+        ois.readObject();
+        return (bytes.length - SerialOutput.getStreamHeader().length);
+    }
+
+    int runSerialExternalizable()
+        throws Exception {
+
+        fo.reset();
+        SerialOutput oos = new SerialOutput(fo, jtc);
+        oos.writeObject(new Data2());
+        byte[] bytes = fo.toByteArray();
+        FastInputStream fi = new FastInputStream(bytes);
+        SerialInput ois = new SerialInput(fi, jtc);
+        ois.readObject();
+        return (bytes.length - SerialOutput.getStreamHeader().length);
+    }
+
+    void initTuple()
+        throws Exception {
+
+        buf = new byte[500];
+        to = new TupleOutput(buf);
+    }
+
+    int runTuple()
+        throws Exception {
+
+        to.reset();
+        new Data().writeTuple(to);
+
+        TupleInput ti = new TupleInput(
+                          to.getBufferBytes(), to.getBufferOffset(),
+                          to.getBufferLength());
+        new Data().readTuple(ti);
+
+        return to.getBufferLength();
+    }
+
+    void initReflectMethod()
+        throws Exception {
+
+        initTuple();
+
+        Class cls = Data.class;
+
+        getters = new Method[5];
+        getters[0] = cls.getMethod("getField1", new Class[0]);
+        getters[1] = cls.getMethod("getField2", new Class[0]);
+        getters[2] = cls.getMethod("getField3", new Class[0]);
+        getters[3] = cls.getMethod("getField4", new Class[0]);
+        getters[4] = cls.getMethod("getField5", new Class[0]);
+
+        setters = new Method[5];
+        setters[0] = cls.getMethod("setField1", new Class[] {String.class});
+        setters[1] = cls.getMethod("setField2", new Class[] {String.class});
+        setters[2] = cls.getMethod("setField3", new Class[] {Integer.TYPE});
+        setters[3] = cls.getMethod("setField4", new Class[] {Integer.TYPE});
+        setters[4] = cls.getMethod("setField5", new Class[] {String.class});
+    }
+
+    int runReflectMethod()
+        throws Exception {
+
+        to.reset();
+        Data data = new Data();
+        to.writeString((String) getters[0].invoke(data, (Object[]) null));
+        to.writeString((String) getters[1].invoke(data, (Object[]) null));
+        to.writeInt(((Integer) getters[2].invoke(data, (Object[]) null)).intValue());
+        to.writeInt(((Integer) getters[3].invoke(data, (Object[]) null)).intValue());
+        to.writeString((String) getters[4].invoke(data, (Object[]) null));
+
+        TupleInput ti = new TupleInput(
+                          to.getBufferBytes(), to.getBufferOffset(),
+                          to.getBufferLength());
+        data = new Data();
+        setters[0].invoke(data, new Object[] {ti.readString()});
+        setters[1].invoke(data, new Object[] {ti.readString()});
+        setters[2].invoke(data, new Object[] {new Integer(ti.readInt())});
+        setters[3].invoke(data, new Object[] {new Integer(ti.readInt())});
+        setters[4].invoke(data, new Object[] {ti.readString()});
+
+        return to.getBufferLength();
+    }
+
+    void initReflectField()
+        throws Exception {
+
+        initTuple();
+
+        Class cls = Data.class;
+
+        fields = new Field[5];
+        fields[0] = cls.getField("field1");
+        fields[1] = cls.getField("field2");
+        fields[2] = cls.getField("field3");
+        fields[3] = cls.getField("field4");
+        fields[4] = cls.getField("field5");
+    }
+
+    int runReflectField()
+        throws Exception {
+
+        to.reset();
+        Data data = new Data();
+        to.writeString((String) fields[0].get(data));
+        to.writeString((String) fields[1].get(data));
+        to.writeInt(((Integer) fields[2].get(data)).intValue());
+        to.writeInt(((Integer) fields[3].get(data)).intValue());
+        to.writeString((String) fields[4].get(data));
+
+        TupleInput ti = new TupleInput(
+                          to.getBufferBytes(), to.getBufferOffset(),
+                          to.getBufferLength());
+        data = new Data();
+        fields[0].set(data, ti.readString());
+        fields[1].set(data, ti.readString());
+        fields[2].set(data, new Integer(ti.readInt()));
+        fields[3].set(data, new Integer(ti.readInt()));
+        fields[4].set(data, ti.readString());
+
+        return to.getBufferLength();
+    }
+
+    void initXmlSax()
+        throws Exception {
+
+        buf = new byte[500];
+        fo = new FastOutputStream();
+        SAXParserFactory saxFactory = SAXParserFactory.newInstance();
+        saxFactory.setNamespaceAware(true);
+        parser = saxFactory.newSAXParser().getXMLReader();
+    }
+
+    int runXmlSax()
+        throws Exception {
+
+        fo.reset();
+        OutputStreamWriter writer = new OutputStreamWriter(fo);
+        new Data().writeXmlText(writer);
+
+        byte[] bytes = fo.toByteArray();
+        FastInputStream fi = new FastInputStream(bytes);
+        InputSource input = new InputSource(fi);
+        parser.parse(input);
+
+        //InputStreamReader reader = new InputStreamReader(fi);
+        //new Data().readXmlText(??);
+
+        return bytes.length;
+    }
+
+    static class Data2 extends Data implements Externalizable {
+
+        public Data2() {}
+
+        public void readExternal(ObjectInput in)
+            throws IOException, ClassNotFoundException {
+
+            field1 = in.readUTF();
+            field2 = in.readUTF();
+            field3 = in.readInt();
+            field4 = in.readInt();
+            field5 = in.readUTF();
+        }
+
+        public void writeExternal(ObjectOutput out)
+            throws IOException {
+
+            out.writeUTF(field1);
+            out.writeUTF(field2);
+            out.writeInt(field3);
+            out.writeInt(field4);
+            out.writeUTF(field5);
+        }
+    }
+
+    @SuppressWarnings("serial")
+    static class Data implements Serializable {
+
+        public String field1 = "field1";
+        public String field2 = "field2";
+        public int field3 = 333;
+        public int field4 = 444;
+        public String field5 = "field5";
+
+        public String getField1() { return field1; }
+        public String getField2() { return field2; }
+        public int getField3() { return field3; }
+        public int getField4() { return field4; }
+        public String getField5() { return field5; }
+
+        public void setField1(String v) { field1 = v; }
+        public void setField2(String v) { field2 = v; }
+        public void setField3(int v) { field3 = v; }
+        public void setField4(int v) { field4 = v; }
+        public void setField5(String v) { field5 = v; }
+
+        void readTuple(TupleInput _input) {
+
+            field1 = _input.readString();
+            field2 = _input.readString();
+            field3 = _input.readInt();
+            field4 = _input.readInt();
+            field5 = _input.readString();
+        }
+
+        void writeTuple(TupleOutput _output) {
+
+            _output.writeString(field1);
+            _output.writeString(field2);
+            _output.writeInt(field3);
+            _output.writeInt(field4);
+            _output.writeString(field5);
+        }
+
+        void writeXmlText(Writer writer) throws IOException {
+
+            writer.write("<Data><Field1>");
+            writer.write(field1);
+            writer.write("</Field1><Field2>");
+            writer.write(field2);
+            writer.write("</Field2><Field3>");
+            writer.write(String.valueOf(field3));
+            writer.write("</Field3><Field4>");
+            writer.write(String.valueOf(field4));
+            writer.write("</Field4><Field5>");
+            writer.write(field5);
+            writer.write("</Field5></Data>");
+            writer.flush();
+        }
+    }
+}
diff --git a/test/com/sleepycat/bind/tuple/test/MarshalledObject.java b/test/com/sleepycat/bind/tuple/test/MarshalledObject.java
new file mode 100644
index 0000000000000000000000000000000000000000..1f56b4de4c65f1a1611ac380dd16ac6d838bc1fb
--- /dev/null
+++ b/test/com/sleepycat/bind/tuple/test/MarshalledObject.java
@@ -0,0 +1,137 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: MarshalledObject.java,v 1.22.2.2 2010/01/04 15:30:41 cwl Exp $
+ */
+
+package com.sleepycat.bind.tuple.test;
+
+import com.sleepycat.bind.tuple.MarshalledTupleEntry;
+import com.sleepycat.bind.tuple.MarshalledTupleKeyEntity;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+
+/**
+ * @author Mark Hayes
+ */
+public class MarshalledObject
+    implements MarshalledTupleEntry, MarshalledTupleKeyEntity {
+
+    private String data;
+    private String primaryKey;
+    private String indexKey1;
+    private String indexKey2;
+
+    public MarshalledObject() {
+    }
+
+    MarshalledObject(String data, String primaryKey,
+                     String indexKey1, String indexKey2) {
+
+        this.data = data;
+        this.primaryKey = primaryKey;
+        this.indexKey1 = indexKey1;
+        this.indexKey2 = indexKey2;
+    }
+
+    String getData() {
+
+        return data;
+    }
+
+    String getPrimaryKey() {
+
+        return primaryKey;
+    }
+
+    String getIndexKey1() {
+
+        return indexKey1;
+    }
+
+    String getIndexKey2() {
+
+        return indexKey2;
+    }
+
+    int expectedDataLength() {
+
+        return data.length() + 1 +
+               indexKey1.length() + 1 +
+               indexKey2.length() + 1;
+    }
+
+    int expectedKeyLength() {
+
+        return primaryKey.length() + 1;
+    }
+
+    public void marshalEntry(TupleOutput dataOutput) {
+
+        dataOutput.writeString(data);
+        dataOutput.writeString(indexKey1);
+        dataOutput.writeString(indexKey2);
+    }
+
+    public void unmarshalEntry(TupleInput dataInput) {
+
+        data = dataInput.readString();
+        indexKey1 = dataInput.readString();
+        indexKey2 = dataInput.readString();
+    }
+
+    public void marshalPrimaryKey(TupleOutput keyOutput) {
+
+        keyOutput.writeString(primaryKey);
+    }
+
+    public void unmarshalPrimaryKey(TupleInput keyInput) {
+
+        primaryKey = keyInput.readString();
+    }
+
+    public boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput) {
+
+        if ("1".equals(keyName)) {
+            if (indexKey1.length() > 0) {
+                keyOutput.writeString(indexKey1);
+                return true;
+            } else {
+                return false;
+            }
+        } else if ("2".equals(keyName)) {
+            if (indexKey1.length() > 0) {
+                keyOutput.writeString(indexKey2);
+                return true;
+            } else {
+                return false;
+            }
+        } else {
+            throw new IllegalArgumentException("Unknown keyName: " + keyName);
+        }
+    }
+
+    public boolean nullifyForeignKey(String keyName) {
+
+        if ("1".equals(keyName)) {
+            if (indexKey1.length() > 0) {
+                indexKey1 = "";
+                return true;
+            } else {
+                return false;
+            }
+        } else if ("2".equals(keyName)) {
+            if (indexKey1.length() > 0) {
+                indexKey2 = "";
+                return true;
+            } else {
+                return false;
+            }
+        } else {
+            throw new IllegalArgumentException("Unknown keyName: " + keyName);
+        }
+    }
+}
+
diff --git a/test/com/sleepycat/bind/tuple/test/TupleBindingTest.java b/test/com/sleepycat/bind/tuple/test/TupleBindingTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..b46a61f3cb68724b773f284983acb2e90c10800e
--- /dev/null
+++ b/test/com/sleepycat/bind/tuple/test/TupleBindingTest.java
@@ -0,0 +1,420 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TupleBindingTest.java,v 1.41.2.2 2010/01/04 15:30:41 cwl Exp $
+ */
+
+package com.sleepycat.bind.tuple.test;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+import java.math.BigInteger;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.tuple.BooleanBinding;
+import com.sleepycat.bind.tuple.ByteBinding;
+import com.sleepycat.bind.tuple.CharacterBinding;
+import com.sleepycat.bind.tuple.DoubleBinding;
+import com.sleepycat.bind.tuple.FloatBinding;
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.bind.tuple.LongBinding;
+import com.sleepycat.bind.tuple.ShortBinding;
+import com.sleepycat.bind.tuple.BigIntegerBinding;
+import com.sleepycat.bind.tuple.SortedDoubleBinding;
+import com.sleepycat.bind.tuple.SortedFloatBinding;
+import com.sleepycat.bind.tuple.StringBinding;
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleInputBinding;
+import com.sleepycat.bind.tuple.TupleMarshalledBinding;
+import com.sleepycat.bind.tuple.TupleOutput;
+import com.sleepycat.bind.tuple.TupleTupleMarshalledBinding;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.util.FastOutputStream;
+import com.sleepycat.util.ExceptionUnwrapper;
+import com.sleepycat.util.test.SharedTestUtils;
+
+/**
+ * @author Mark Hayes
+ */
+public class TupleBindingTest extends TestCase {
+
+    private DatabaseEntry buffer;
+    private DatabaseEntry keyBuffer;
+
+    public static void main(String[] args)
+        throws Exception {
+
+        junit.framework.TestResult tr =
+            junit.textui.TestRunner.run(suite());
+        if (tr.errorCount() > 0 ||
+            tr.failureCount() > 0) {
+            System.exit(1);
+        } else {
+            System.exit(0);
+        }
+    }
+
+    public static Test suite()
+        throws Exception {
+
+        TestSuite suite = new TestSuite(TupleBindingTest.class);
+        return suite;
+    }
+
+    public TupleBindingTest(String name) {
+
+        super(name);
+    }
+
+    public void setUp() {
+
+        SharedTestUtils.printTestName("TupleBindingTest." + getName());
+        buffer = new DatabaseEntry();
+        keyBuffer = new DatabaseEntry();
+    }
+
+    public void tearDown() {
+
+        /* Ensure that GC can cleanup. */
+        buffer = null;
+        keyBuffer = null;
+    }
+
+    public void runTest()
+        throws Throwable {
+
+        try {
+            super.runTest();
+        } catch (Exception e) {
+            throw ExceptionUnwrapper.unwrap(e);
+        }
+    }
+
+    private void primitiveBindingTest(Class primitiveCls, Class compareCls,
+                                      Object val, int byteSize) {
+
+        TupleBinding binding = TupleBinding.getPrimitiveBinding(primitiveCls);
+
+        /* Test standard object binding. */
+
+        binding.objectToEntry(val, buffer);
+        assertEquals(byteSize, buffer.getSize());
+
+        Object val2 = binding.entryToObject(buffer);
+        assertSame(compareCls, val2.getClass());
+        assertEquals(val, val2);
+
+        Object valWithWrongCls = (primitiveCls == String.class)
+                      ? ((Object) new Integer(0)) : ((Object) new String(""));
+        try {
+            binding.objectToEntry(valWithWrongCls, buffer);
+        }
+        catch (ClassCastException expected) {}
+
+        /* Test nested tuple binding. */
+	forMoreCoverageTest(binding, val);
+    }
+
+    private void forMoreCoverageTest(TupleBinding val1,Object val2) {
+
+        TupleOutput output = new TupleOutput();
+        output.writeString("abc");
+        val1.objectToEntry(val2, output);
+        output.writeString("xyz");
+
+        TupleInput input = new TupleInput(output);
+        assertEquals("abc", input.readString());
+        Object val3 = val1.entryToObject(input);
+        assertEquals("xyz", input.readString());
+
+        assertEquals(0, input.available());
+        assertSame(val2.getClass(), val3.getClass());
+        assertEquals(val2, val3);
+    }
+
+    public void testPrimitiveBindings() {
+
+        primitiveBindingTest(String.class, String.class,
+                             "abc", 4);
+
+        primitiveBindingTest(Character.class, Character.class,
+                             new Character('a'), 2);
+        primitiveBindingTest(Boolean.class, Boolean.class,
+                             new Boolean(true), 1);
+        primitiveBindingTest(Byte.class, Byte.class,
+                             new Byte((byte) 123), 1);
+        primitiveBindingTest(Short.class, Short.class,
+                             new Short((short) 123), 2);
+        primitiveBindingTest(Integer.class, Integer.class,
+                             new Integer(123), 4);
+	primitiveBindingTest(Long.class, Long.class,
+                             new Long(123), 8);
+        primitiveBindingTest(Float.class, Float.class,
+                             new Float(123.123), 4);
+        primitiveBindingTest(Double.class, Double.class,
+                             new Double(123.123), 8);
+
+        primitiveBindingTest(Character.TYPE, Character.class,
+                             new Character('a'), 2);
+        primitiveBindingTest(Boolean.TYPE, Boolean.class,
+                             new Boolean(true), 1);
+        primitiveBindingTest(Byte.TYPE, Byte.class,
+                             new Byte((byte) 123), 1);
+        primitiveBindingTest(Short.TYPE, Short.class,
+                             new Short((short) 123), 2);
+        primitiveBindingTest(Integer.TYPE, Integer.class,
+                             new Integer(123), 4);
+        primitiveBindingTest(Long.TYPE, Long.class,
+                             new Long(123), 8);
+        primitiveBindingTest(Float.TYPE, Float.class,
+                             new Float(123.123), 4);
+        primitiveBindingTest(Double.TYPE, Double.class,
+                             new Double(123.123), 8);
+
+        DatabaseEntry entry = new DatabaseEntry();
+	
+        StringBinding.stringToEntry("abc", entry);
+	assertEquals(4, entry.getData().length);
+        assertEquals("abc", StringBinding.entryToString(entry));
+
+        new StringBinding().objectToEntry("abc", entry);
+	assertEquals(4, entry.getData().length);
+
+        StringBinding.stringToEntry(null, entry);
+	assertEquals(2, entry.getData().length);
+        assertEquals(null, StringBinding.entryToString(entry));
+
+        new StringBinding().objectToEntry(null, entry);
+	assertEquals(2, entry.getData().length);
+
+        CharacterBinding.charToEntry('a', entry);
+	assertEquals(2, entry.getData().length);
+        assertEquals('a', CharacterBinding.entryToChar(entry));
+
+        new CharacterBinding().objectToEntry(new Character('a'), entry);
+	assertEquals(2, entry.getData().length);
+
+        BooleanBinding.booleanToEntry(true, entry);
+	assertEquals(1, entry.getData().length);
+        assertEquals(true, BooleanBinding.entryToBoolean(entry));
+
+        new BooleanBinding().objectToEntry(Boolean.TRUE, entry);
+	assertEquals(1, entry.getData().length);
+
+        ByteBinding.byteToEntry((byte) 123, entry);
+	assertEquals(1, entry.getData().length);
+        assertEquals((byte) 123, ByteBinding.entryToByte(entry));
+
+        ShortBinding.shortToEntry((short) 123, entry);
+	assertEquals(2, entry.getData().length);
+        assertEquals((short) 123, ShortBinding.entryToShort(entry));
+
+        new ByteBinding().objectToEntry(new Byte((byte) 123), entry);
+	assertEquals(1, entry.getData().length);
+
+        IntegerBinding.intToEntry(123, entry);
+	assertEquals(4, entry.getData().length);
+        assertEquals(123, IntegerBinding.entryToInt(entry));
+
+        new IntegerBinding().objectToEntry(new Integer(123), entry);
+	assertEquals(4, entry.getData().length);
+
+        LongBinding.longToEntry(123, entry);
+	assertEquals(8, entry.getData().length);
+        assertEquals(123, LongBinding.entryToLong(entry));
+
+        new LongBinding().objectToEntry(new Long(123), entry);
+	assertEquals(8, entry.getData().length);
+
+        FloatBinding.floatToEntry((float) 123.123, entry);
+	assertEquals(4, entry.getData().length);
+        assertTrue(((float) 123.123) == FloatBinding.entryToFloat(entry));
+
+        new FloatBinding().objectToEntry(new Float((float) 123.123), entry);
+	assertEquals(4, entry.getData().length);
+
+        DoubleBinding.doubleToEntry(123.123, entry);
+	assertEquals(8, entry.getData().length);
+        assertTrue(123.123 == DoubleBinding.entryToDouble(entry));
+
+        new DoubleBinding().objectToEntry(new Double(123.123), entry);
+	assertEquals(8, entry.getData().length);
+
+        BigIntegerBinding.bigIntegerToEntry
+                (new BigInteger("1234567890123456"), entry);
+        assertEquals(9, entry.getData().length);
+        assertTrue((new BigInteger("1234567890123456")).equals
+		   (BigIntegerBinding.entryToBigInteger(entry)));
+
+        new BigIntegerBinding().objectToEntry
+                (new BigInteger("1234567890123456"), entry);
+        assertEquals(9, entry.getData().length);
+        forMoreCoverageTest(new BigIntegerBinding(),
+                            new BigInteger("1234567890123456"));
+	
+        SortedFloatBinding.floatToEntry((float) 123.123, entry);
+	assertEquals(4, entry.getData().length);
+        assertTrue(((float) 123.123) ==
+                   SortedFloatBinding.entryToFloat(entry));
+
+        new SortedFloatBinding().objectToEntry
+            (new Float((float) 123.123), entry);
+	assertEquals(4, entry.getData().length);
+        forMoreCoverageTest(new SortedFloatBinding(),
+                            new Float((float) 123.123));
+
+        SortedDoubleBinding.doubleToEntry(123.123, entry);
+	assertEquals(8, entry.getData().length);
+        assertTrue(123.123 == SortedDoubleBinding.entryToDouble(entry));
+
+        new SortedDoubleBinding().objectToEntry(new Double(123.123), entry);
+	assertEquals(8, entry.getData().length);
+        forMoreCoverageTest(new SortedDoubleBinding(),
+                            new Double(123.123));
+    }
+
+    public void testTupleInputBinding() {
+
+        EntryBinding binding = new TupleInputBinding();
+
+        TupleOutput out = new TupleOutput();
+        out.writeString("abc");
+        binding.objectToEntry(new TupleInput(out), buffer);
+        assertEquals(4, buffer.getSize());
+
+        Object result = binding.entryToObject(buffer);
+        assertTrue(result instanceof TupleInput);
+        TupleInput in = (TupleInput) result;
+        assertEquals("abc", in.readString());
+        assertEquals(0, in.available());
+    }
+
+    // also tests TupleBinding since TupleMarshalledBinding extends it
+    public void testTupleMarshalledBinding() {
+
+        EntryBinding binding =
+            new TupleMarshalledBinding(MarshalledObject.class);
+
+        MarshalledObject val = new MarshalledObject("abc", "", "", "");
+        binding.objectToEntry(val, buffer);
+        assertEquals(val.expectedDataLength(), buffer.getSize());
+
+        Object result = binding.entryToObject(buffer);
+        assertTrue(result instanceof MarshalledObject);
+        val = (MarshalledObject) result;
+        assertEquals("abc", val.getData());
+    }
+
+    // also tests TupleTupleBinding since TupleTupleMarshalledBinding extends
+    // it
+    public void testTupleTupleMarshalledBinding() {
+
+        EntityBinding binding =
+            new TupleTupleMarshalledBinding(MarshalledObject.class);
+
+        MarshalledObject val = new MarshalledObject("abc", "primary",
+                                                    "index1", "index2");
+        binding.objectToData(val, buffer);
+        assertEquals(val.expectedDataLength(), buffer.getSize());
+        binding.objectToKey(val, keyBuffer);
+        assertEquals(val.expectedKeyLength(), keyBuffer.getSize());
+
+        Object result = binding.entryToObject(keyBuffer, buffer);
+        assertTrue(result instanceof MarshalledObject);
+        val = (MarshalledObject) result;
+        assertEquals("abc", val.getData());
+        assertEquals("primary", val.getPrimaryKey());
+        assertEquals("index1", val.getIndexKey1());
+        assertEquals("index2", val.getIndexKey2());
+    }
+
+    public void testBufferSize() {
+
+        CaptureSizeBinding binding = new CaptureSizeBinding();
+
+        binding.objectToEntry("x", buffer);
+        assertEquals("x", binding.entryToObject(buffer));
+        assertEquals(FastOutputStream.DEFAULT_INIT_SIZE, binding.bufSize);
+
+        binding.setTupleBufferSize(1000);
+        binding.objectToEntry("x", buffer);
+        assertEquals("x", binding.entryToObject(buffer));
+        assertEquals(1000, binding.bufSize);
+    }
+
+    private class CaptureSizeBinding extends TupleBinding {
+
+        int bufSize;
+
+        CaptureSizeBinding() {
+            super();
+        }
+
+        public TupleOutput getTupleOutput(Object object) {
+            TupleOutput out = super.getTupleOutput(object);
+            bufSize = out.getBufferBytes().length;
+            return out;
+        }
+
+        public Object entryToObject(TupleInput input) {
+            return input.readString();
+        }
+
+        public void objectToEntry(Object object, TupleOutput output) {
+            assertEquals(bufSize, output.getBufferBytes().length);
+            output.writeString((String) object);
+        }
+    }
+
+    public void testBufferOverride() {
+
+        TupleOutput out = new TupleOutput(new byte[10]);
+        CachedOutputBinding binding = new CachedOutputBinding(out);
+
+        binding.used = false;
+        binding.objectToEntry("x", buffer);
+        assertEquals("x", binding.entryToObject(buffer));
+        assertTrue(binding.used);
+
+        binding.used = false;
+        binding.objectToEntry("aaaaaaaaaaaaaaaaaaaaaa", buffer);
+        assertEquals("aaaaaaaaaaaaaaaaaaaaaa", binding.entryToObject(buffer));
+        assertTrue(binding.used);
+
+        binding.used = false;
+        binding.objectToEntry("x", buffer);
+        assertEquals("x", binding.entryToObject(buffer));
+        assertTrue(binding.used);
+    }
+
+    private class CachedOutputBinding extends TupleBinding {
+
+        TupleOutput out;
+        boolean used;
+
+        CachedOutputBinding(TupleOutput out) {
+            super();
+            this.out = out;
+        }
+
+        public TupleOutput getTupleOutput(Object object) {
+            out.reset();
+            used = true;
+            return out;
+        }
+
+        public Object entryToObject(TupleInput input) {
+            return input.readString();
+        }
+
+        public void objectToEntry(Object object, TupleOutput output) {
+            assertSame(out, output);
+            output.writeString((String) object);
+        }
+    }
+}
diff --git a/test/com/sleepycat/bind/tuple/test/TupleFormatTest.java b/test/com/sleepycat/bind/tuple/test/TupleFormatTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..e7c5fff7346d7db9b94b2fbbce8fec8a4476da90
--- /dev/null
+++ b/test/com/sleepycat/bind/tuple/test/TupleFormatTest.java
@@ -0,0 +1,929 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TupleFormatTest.java,v 1.29.2.2 2010/01/04 15:30:41 cwl Exp $
+ */
+
+package com.sleepycat.bind.tuple.test;
+
+import java.util.Arrays;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.util.test.SharedTestUtils;
+
+/**
+ * @author Mark Hayes
+ */
+public class TupleFormatTest extends TestCase {
+
+    private TupleInput in;
+    private TupleOutput out;
+    private DatabaseEntry buffer;
+
+    public static void main(String[] args)
+        throws Exception {
+
+        junit.framework.TestResult tr =
+            junit.textui.TestRunner.run(suite());
+        if (tr.errorCount() > 0 ||
+            tr.failureCount() > 0) {
+            System.exit(1);
+        } else {
+            System.exit(0);
+        }
+    }
+
+    public static Test suite()
+        throws Exception {
+
+        TestSuite suite = new TestSuite(TupleFormatTest.class);
+        return suite;
+    }
+
+    public TupleFormatTest(String name) {
+
+        super(name);
+    }
+
+    public void setUp() {
+
+        SharedTestUtils.printTestName("TupleFormatTest." + getName());
+        buffer = new DatabaseEntry();
+        out = new TupleOutput();
+    }
+
+    public void tearDown() {
+
+        /* Ensure that GC can cleanup. */
+        in = null;
+        out = null;
+        buffer = null;
+    }
+
+    private void copyOutputToInput() {
+
+        TupleBinding.outputToEntry(out, buffer);
+        assertEquals(out.size(), buffer.getSize());
+        in = TupleBinding.entryToInput(buffer);
+        assertEquals(in.available(), buffer.getSize());
+        assertEquals(in.getBufferLength(), buffer.getSize());
+    }
+
+    private void stringTest(String val) {
+
+        out.reset();
+        out.writeString(val);
+        assertEquals(val.length() + 1, out.size()); // assume 1-byte chars
+        copyOutputToInput();
+        assertEquals(val, in.readString());
+        assertEquals(0, in.available());
+    }
+
+    public void testString() {
+
+        stringTest("");
+        stringTest("a");
+        stringTest("abc");
+
+        out.reset();
+        out.writeString("abc");
+        out.writeString("defg");
+        assertEquals(9, out.size());
+        copyOutputToInput();
+        assertEquals("abc", in.readString());
+        assertEquals("defg", in.readString());
+        assertEquals(0, in.available());
+
+        out.reset();
+        out.writeString("abc");
+        out.writeString("defg");
+        out.writeString("hijkl");
+        assertEquals(15, out.size());
+        copyOutputToInput();
+        assertEquals("abc", in.readString());
+        assertEquals("defg", in.readString());
+        assertEquals("hijkl", in.readString());
+        assertEquals(0, in.available());
+    }
+
+    private void fixedStringTest(char[] val) {
+
+        out.reset();
+        out.writeString(val);
+        assertEquals(val.length, out.size()); // assume 1 byte chars
+        copyOutputToInput();
+        char[] val2 = new char[val.length];
+        in.readString(val2);
+        assertTrue(Arrays.equals(val, val2));
+        assertEquals(0, in.available());
+        in.reset();
+        String val3 = in.readString(val.length);
+        assertTrue(Arrays.equals(val, val3.toCharArray()));
+        assertEquals(0, in.available());
+    }
+
+    public void testFixedString() {
+
+        fixedStringTest(new char[0]);
+        fixedStringTest(new char[] {'a'});
+        fixedStringTest(new char[] {'a', 'b', 'c'});
+
+        out.reset();
+        out.writeString(new char[] {'a', 'b', 'c'});
+        out.writeString(new char[] {'d', 'e', 'f', 'g'});
+        assertEquals(7, out.size());
+        copyOutputToInput();
+        assertEquals("abc", in.readString(3));
+        assertEquals("defg", in.readString(4));
+        assertEquals(0, in.available());
+
+        out.reset();
+        out.writeString(new char[] {'a', 'b', 'c'});
+        out.writeString(new char[] {'d', 'e', 'f', 'g'});
+        out.writeString(new char[] {'h', 'i', 'j', 'k', 'l'});
+        assertEquals(12, out.size());
+        copyOutputToInput();
+        assertEquals("abc", in.readString(3));
+        assertEquals("defg", in.readString(4));
+        assertEquals("hijkl", in.readString(5));
+        assertEquals(0, in.available());
+    }
+
+    public void testNullString() {
+
+        out.reset();
+        out.writeString((String) null);
+        assertEquals(2, out.size());
+        copyOutputToInput();
+        assertEquals(null, in.readString());
+        assertEquals(0, in.available());
+
+        out.reset();
+        out.writeString((String) null);
+        out.writeString("x");
+        assertEquals(4, out.size());
+        copyOutputToInput();
+        assertEquals(null, in.readString());
+        assertEquals(2, in.available());
+        assertEquals("x", in.readString());
+        assertEquals(0, in.available());
+
+        out.reset();
+        out.writeString("x");
+        out.writeString((String) null);
+        assertEquals(4, out.size());
+        copyOutputToInput();
+        assertEquals("x", in.readString());
+        assertEquals(2, in.available());
+        assertEquals(null, in.readString());
+        assertEquals(0, in.available());
+
+        out.reset();
+        out.writeString((String) null);
+        out.writeInt(123);
+        assertEquals(6, out.size());
+        copyOutputToInput();
+        assertEquals(null, in.readString());
+        assertEquals(4, in.available());
+        assertEquals(123, in.readInt());
+        assertEquals(0, in.available());
+
+        out.reset();
+        out.writeInt(123);
+        out.writeString((String) null);
+        assertEquals(6, out.size());
+        copyOutputToInput();
+        assertEquals(123, in.readInt());
+        assertEquals(2, in.available());
+        assertEquals(null, in.readString());
+        assertEquals(0, in.available());
+    }
+
+    private void charsTest(char[] val) {
+
+        for (int mode = 0; mode < 2; mode += 1) {
+            out.reset();
+            switch (mode) {
+                case 0: out.writeChars(val); break;
+                case 1: out.writeChars(new String(val)); break;
+                default: throw new IllegalStateException();
+            }
+            assertEquals(val.length * 2, out.size());
+            copyOutputToInput();
+            char[] val2 = new char[val.length];
+            in.readChars(val2);
+            assertTrue(Arrays.equals(val, val2));
+            assertEquals(0, in.available());
+            in.reset();
+            String val3 = in.readChars(val.length);
+            assertTrue(Arrays.equals(val, val3.toCharArray()));
+            assertEquals(0, in.available());
+        }
+    }
+
+    public void testChars() {
+
+        charsTest(new char[0]);
+        charsTest(new char[] {'a'});
+        charsTest(new char[] {'a', 'b', 'c'});
+
+        out.reset();
+        out.writeChars("abc");
+        out.writeChars("defg");
+        assertEquals(7 * 2, out.size());
+        copyOutputToInput();
+        assertEquals("abc", in.readChars(3));
+        assertEquals("defg", in.readChars(4));
+        assertEquals(0, in.available());
+
+        out.reset();
+        out.writeChars("abc");
+        out.writeChars("defg");
+        out.writeChars("hijkl");
+        assertEquals(12 * 2, out.size());
+        copyOutputToInput();
+        assertEquals("abc", in.readChars(3));
+        assertEquals("defg", in.readChars(4));
+        assertEquals("hijkl", in.readChars(5));
+        assertEquals(0, in.available());
+    }
+
+    private void bytesTest(char[] val) {
+
+        char[] valBytes = new char[val.length];
+        for (int i = 0; i < val.length; i += 1)
+            valBytes[i] = (char) (val[i] & 0xFF);
+
+        for (int mode = 0; mode < 2; mode += 1) {
+            out.reset();
+            switch (mode) {
+                case 0: out.writeBytes(val); break;
+                case 1: out.writeBytes(new String(val)); break;
+                default: throw new IllegalStateException();
+            }
+            assertEquals(val.length, out.size());
+            copyOutputToInput();
+            char[] val2 = new char[val.length];
+            in.readBytes(val2);
+            assertTrue(Arrays.equals(valBytes, val2));
+            assertEquals(0, in.available());
+            in.reset();
+            String val3 = in.readBytes(val.length);
+            assertTrue(Arrays.equals(valBytes, val3.toCharArray()));
+            assertEquals(0, in.available());
+        }
+    }
+
+    public void testBytes() {
+
+        bytesTest(new char[0]);
+        bytesTest(new char[] {'a'});
+        bytesTest(new char[] {'a', 'b', 'c'});
+        bytesTest(new char[] {0x7F00, 0x7FFF, 0xFF00, 0xFFFF});
+
+        out.reset();
+        out.writeBytes("abc");
+        out.writeBytes("defg");
+        assertEquals(7, out.size());
+        copyOutputToInput();
+        assertEquals("abc", in.readBytes(3));
+        assertEquals("defg", in.readBytes(4));
+        assertEquals(0, in.available());
+
+        out.reset();
+        out.writeBytes("abc");
+        out.writeBytes("defg");
+        out.writeBytes("hijkl");
+        assertEquals(12, out.size());
+        copyOutputToInput();
+        assertEquals("abc", in.readBytes(3));
+        assertEquals("defg", in.readBytes(4));
+        assertEquals("hijkl", in.readBytes(5));
+        assertEquals(0, in.available());
+    }
+
+    private void booleanTest(boolean val) {
+
+        out.reset();
+        out.writeBoolean(val);
+        assertEquals(1, out.size());
+        copyOutputToInput();
+        assertEquals(val, in.readBoolean());
+        assertEquals(0, in.available());
+    }
+
+    public void testBoolean() {
+
+        booleanTest(true);
+        booleanTest(false);
+
+        out.reset();
+        out.writeBoolean(true);
+        out.writeBoolean(false);
+        assertEquals(2, out.size());
+        copyOutputToInput();
+        assertEquals(true, in.readBoolean());
+        assertEquals(false, in.readBoolean());
+        assertEquals(0, in.available());
+
+        out.reset();
+        out.writeBoolean(true);
+        out.writeBoolean(false);
+        out.writeBoolean(true);
+        assertEquals(3, out.size());
+        copyOutputToInput();
+        assertEquals(true, in.readBoolean());
+        assertEquals(false, in.readBoolean());
+        assertEquals(true, in.readBoolean());
+        assertEquals(0, in.available());
+    }
+
+    private void unsignedByteTest(int val) {
+
+        unsignedByteTest(val, val);
+    }
+
+    private void unsignedByteTest(int val, int expected) {
+
+        out.reset();
+        out.writeUnsignedByte(val);
+        assertEquals(1, out.size());
+        copyOutputToInput();
+        assertEquals(expected, in.readUnsignedByte());
+    }
+
+    public void testUnsignedByte() {
+
+        unsignedByteTest(0);
+        unsignedByteTest(1);
+        unsignedByteTest(254);
+        unsignedByteTest(255);
+        unsignedByteTest(256, 0);
+        unsignedByteTest(-1, 255);
+        unsignedByteTest(-2, 254);
+        unsignedByteTest(-255, 1);
+
+        out.reset();
+        out.writeUnsignedByte(0);
+        out.writeUnsignedByte(1);
+        out.writeUnsignedByte(255);
+        assertEquals(3, out.size());
+        copyOutputToInput();
+        assertEquals(0, in.readUnsignedByte());
+        assertEquals(1, in.readUnsignedByte());
+        assertEquals(255, in.readUnsignedByte());
+        assertEquals(0, in.available());
+    }
+
+    private void unsignedShortTest(int val) {
+
+        unsignedShortTest(val, val);
+    }
+
+    private void unsignedShortTest(int val, int expected) {
+
+        out.reset();
+        out.writeUnsignedShort(val);
+        assertEquals(2, out.size());
+        copyOutputToInput();
+        assertEquals(expected, in.readUnsignedShort());
+    }
+
+    public void testUnsignedShort() {
+
+        unsignedShortTest(0);
+        unsignedShortTest(1);
+        unsignedShortTest(255);
+        unsignedShortTest(256);
+        unsignedShortTest(257);
+        unsignedShortTest(Short.MAX_VALUE - 1);
+        unsignedShortTest(Short.MAX_VALUE);
+        unsignedShortTest(Short.MAX_VALUE + 1);
+        unsignedShortTest(0xFFFF - 1);
+        unsignedShortTest(0xFFFF);
+        unsignedShortTest(0xFFFF + 1, 0);
+        unsignedShortTest(0x7FFF0000, 0);
+        unsignedShortTest(0xFFFF0000, 0);
+        unsignedShortTest(-1, 0xFFFF);
+        unsignedShortTest(-2, 0xFFFF - 1);
+        unsignedShortTest(-0xFFFF, 1);
+
+        out.reset();
+        out.writeUnsignedShort(0);
+        out.writeUnsignedShort(1);
+        out.writeUnsignedShort(0xFFFF);
+        assertEquals(6, out.size());
+        copyOutputToInput();
+        assertEquals(0, in.readUnsignedShort());
+        assertEquals(1, in.readUnsignedShort());
+        assertEquals(0xFFFF, in.readUnsignedShort());
+        assertEquals(0, in.available());
+    }
+
+    private void unsignedIntTest(long val) {
+
+        unsignedIntTest(val, val);
+    }
+
+    private void unsignedIntTest(long val, long expected) {
+
+        out.reset();
+        out.writeUnsignedInt(val);
+        assertEquals(4, out.size());
+        copyOutputToInput();
+        assertEquals(expected, in.readUnsignedInt());
+    }
+
+    public void testUnsignedInt() {
+
+        unsignedIntTest(0L);
+        unsignedIntTest(1L);
+        unsignedIntTest(255L);
+        unsignedIntTest(256L);
+        unsignedIntTest(257L);
+        unsignedIntTest(Short.MAX_VALUE - 1L);
+        unsignedIntTest(Short.MAX_VALUE);
+        unsignedIntTest(Short.MAX_VALUE + 1L);
+        unsignedIntTest(Integer.MAX_VALUE - 1L);
+        unsignedIntTest(Integer.MAX_VALUE);
+        unsignedIntTest(Integer.MAX_VALUE + 1L);
+        unsignedIntTest(0xFFFFFFFFL - 1L);
+        unsignedIntTest(0xFFFFFFFFL);
+        unsignedIntTest(0xFFFFFFFFL + 1L, 0L);
+        unsignedIntTest(0x7FFFFFFF00000000L, 0L);
+        unsignedIntTest(0xFFFFFFFF00000000L, 0L);
+        unsignedIntTest(-1, 0xFFFFFFFFL);
+        unsignedIntTest(-2, 0xFFFFFFFFL - 1L);
+        unsignedIntTest(-0xFFFFFFFFL, 1L);
+
+        out.reset();
+        out.writeUnsignedInt(0L);
+        out.writeUnsignedInt(1L);
+        out.writeUnsignedInt(0xFFFFFFFFL);
+        assertEquals(12, out.size());
+        copyOutputToInput();
+        assertEquals(0L, in.readUnsignedInt());
+        assertEquals(1L, in.readUnsignedInt());
+        assertEquals(0xFFFFFFFFL, in.readUnsignedInt());
+        assertEquals(0L, in.available());
+    }
+
+    private void byteTest(int val) {
+
+        out.reset();
+        out.writeByte(val);
+        assertEquals(1, out.size());
+        copyOutputToInput();
+        assertEquals((byte) val, in.readByte());
+    }
+
+    public void testByte() {
+
+        byteTest(0);
+        byteTest(1);
+        byteTest(-1);
+        byteTest(Byte.MAX_VALUE - 1);
+        byteTest(Byte.MAX_VALUE);
+        byteTest(Byte.MAX_VALUE + 1);
+        byteTest(Byte.MIN_VALUE + 1);
+        byteTest(Byte.MIN_VALUE);
+        byteTest(Byte.MIN_VALUE - 1);
+        byteTest(0x7F);
+        byteTest(0xFF);
+        byteTest(0x7FFF);
+        byteTest(0xFFFF);
+        byteTest(0x7FFFFFFF);
+        byteTest(0xFFFFFFFF);
+
+        out.reset();
+        out.writeByte(0);
+        out.writeByte(1);
+        out.writeByte(-1);
+        assertEquals(3, out.size());
+        copyOutputToInput();
+        assertEquals(0, in.readByte());
+        assertEquals(1, in.readByte());
+        assertEquals(-1, in.readByte());
+        assertEquals(0, in.available());
+    }
+
+    private void shortTest(int val) {
+
+        out.reset();
+        out.writeShort(val);
+        assertEquals(2, out.size());
+        copyOutputToInput();
+        assertEquals((short) val, in.readShort());
+    }
+
+    public void testShort() {
+
+        shortTest(0);
+        shortTest(1);
+        shortTest(-1);
+        shortTest(Short.MAX_VALUE - 1);
+        shortTest(Short.MAX_VALUE);
+        shortTest(Short.MAX_VALUE + 1);
+        shortTest(Short.MIN_VALUE + 1);
+        shortTest(Short.MIN_VALUE);
+        shortTest(Short.MIN_VALUE - 1);
+        shortTest(0x7F);
+        shortTest(0xFF);
+        shortTest(0x7FFF);
+        shortTest(0xFFFF);
+        shortTest(0x7FFFFFFF);
+        shortTest(0xFFFFFFFF);
+
+        out.reset();
+        out.writeShort(0);
+        out.writeShort(1);
+        out.writeShort(-1);
+        assertEquals(3 * 2, out.size());
+        copyOutputToInput();
+        assertEquals(0, in.readShort());
+        assertEquals(1, in.readShort());
+        assertEquals(-1, in.readShort());
+        assertEquals(0, in.available());
+    }
+
+    private void intTest(int val) {
+
+        out.reset();
+        out.writeInt(val);
+        assertEquals(4, out.size());
+        copyOutputToInput();
+        assertEquals((int) val, in.readInt());
+    }
+
+    public void testInt() {
+
+        intTest(0);
+        intTest(1);
+        intTest(-1);
+        intTest(Integer.MAX_VALUE - 1);
+        intTest(Integer.MAX_VALUE);
+        intTest(Integer.MAX_VALUE + 1);
+        intTest(Integer.MIN_VALUE + 1);
+        intTest(Integer.MIN_VALUE);
+        intTest(Integer.MIN_VALUE - 1);
+        intTest(0x7F);
+        intTest(0xFF);
+        intTest(0x7FFF);
+        intTest(0xFFFF);
+        intTest(0x7FFFFFFF);
+        intTest(0xFFFFFFFF);
+
+        out.reset();
+        out.writeInt(0);
+        out.writeInt(1);
+        out.writeInt(-1);
+        assertEquals(3 * 4, out.size());
+        copyOutputToInput();
+        assertEquals(0, in.readInt());
+        assertEquals(1, in.readInt());
+        assertEquals(-1, in.readInt());
+        assertEquals(0, in.available());
+    }
+
+    private void longTest(long val) {
+
+        out.reset();
+        out.writeLong(val);
+        assertEquals(8, out.size());
+        copyOutputToInput();
+        assertEquals((long) val, in.readLong());
+    }
+
+    public void testLong() {
+
+        longTest(0);
+        longTest(1);
+        longTest(-1);
+        longTest(Long.MAX_VALUE - 1);
+        longTest(Long.MAX_VALUE);
+        longTest(Long.MAX_VALUE + 1);
+        longTest(Long.MIN_VALUE + 1);
+        longTest(Long.MIN_VALUE);
+        longTest(Long.MIN_VALUE - 1);
+        longTest(0x7F);
+        longTest(0xFF);
+        longTest(0x7FFF);
+        longTest(0xFFFF);
+        longTest(0x7FFFFFFF);
+        longTest(0xFFFFFFFF);
+        longTest(0x7FFFFFFFFFFFFFFFL);
+        longTest(0xFFFFFFFFFFFFFFFFL);
+
+        out.reset();
+        out.writeLong(0);
+        out.writeLong(1);
+        out.writeLong(-1);
+        assertEquals(3 * 8, out.size());
+        copyOutputToInput();
+        assertEquals(0, in.readLong());
+        assertEquals(1, in.readLong());
+        assertEquals(-1, in.readLong());
+        assertEquals(0, in.available());
+    }
+
+    private void floatTest(double val) {
+
+        out.reset();
+        out.writeFloat((float) val);
+        assertEquals(4, out.size());
+        copyOutputToInput();
+        if (Double.isNaN(val)) {
+            assertTrue(Float.isNaN(in.readFloat()));
+        } else {
+            assertEquals((float) val, in.readFloat(), 0);
+        }
+    }
+
+    public void testFloat() {
+
+        floatTest(0);
+        floatTest(1);
+        floatTest(-1);
+        floatTest(1.0);
+        floatTest(0.1);
+        floatTest(-1.0);
+        floatTest(-0.1);
+        floatTest(Float.NaN);
+        floatTest(Float.NEGATIVE_INFINITY);
+        floatTest(Float.POSITIVE_INFINITY);
+        floatTest(Short.MAX_VALUE);
+        floatTest(Short.MIN_VALUE);
+        floatTest(Integer.MAX_VALUE);
+        floatTest(Integer.MIN_VALUE);
+        floatTest(Long.MAX_VALUE);
+        floatTest(Long.MIN_VALUE);
+        floatTest(Float.MAX_VALUE);
+        floatTest(Float.MAX_VALUE + 1);
+        floatTest(Float.MIN_VALUE + 1);
+        floatTest(Float.MIN_VALUE);
+        floatTest(Float.MIN_VALUE - 1);
+        floatTest(0x7F);
+        floatTest(0xFF);
+        floatTest(0x7FFF);
+        floatTest(0xFFFF);
+        floatTest(0x7FFFFFFF);
+        floatTest(0xFFFFFFFF);
+        floatTest(0x7FFFFFFFFFFFFFFFL);
+        floatTest(0xFFFFFFFFFFFFFFFFL);
+
+        out.reset();
+        out.writeFloat(0);
+        out.writeFloat(1);
+        out.writeFloat(-1);
+        assertEquals(3 * 4, out.size());
+        copyOutputToInput();
+        assertEquals(0, in.readFloat(), 0);
+        assertEquals(1, in.readFloat(), 0);
+        assertEquals(-1, in.readFloat(), 0);
+        assertEquals(0, in.available(), 0);
+    }
+
+    private void doubleTest(double val) {
+
+        out.reset();
+        out.writeDouble((double) val);
+        assertEquals(8, out.size());
+        copyOutputToInput();
+        if (Double.isNaN(val)) {
+            assertTrue(Double.isNaN(in.readDouble()));
+        } else {
+            assertEquals((double) val, in.readDouble(), 0);
+        }
+    }
+
+    public void testDouble() {
+
+        doubleTest(0);
+        doubleTest(1);
+        doubleTest(-1);
+        doubleTest(1.0);
+        doubleTest(0.1);
+        doubleTest(-1.0);
+        doubleTest(-0.1);
+        doubleTest(Double.NaN);
+        doubleTest(Double.NEGATIVE_INFINITY);
+        doubleTest(Double.POSITIVE_INFINITY);
+        doubleTest(Short.MAX_VALUE);
+        doubleTest(Short.MIN_VALUE);
+        doubleTest(Integer.MAX_VALUE);
+        doubleTest(Integer.MIN_VALUE);
+        doubleTest(Long.MAX_VALUE);
+        doubleTest(Long.MIN_VALUE);
+        doubleTest(Float.MAX_VALUE);
+        doubleTest(Float.MIN_VALUE);
+        doubleTest(Double.MAX_VALUE - 1);
+        doubleTest(Double.MAX_VALUE);
+        doubleTest(Double.MAX_VALUE + 1);
+        doubleTest(Double.MIN_VALUE + 1);
+        doubleTest(Double.MIN_VALUE);
+        doubleTest(Double.MIN_VALUE - 1);
+        doubleTest(0x7F);
+        doubleTest(0xFF);
+        doubleTest(0x7FFF);
+        doubleTest(0xFFFF);
+        doubleTest(0x7FFFFFFF);
+        doubleTest(0xFFFFFFFF);
+        doubleTest(0x7FFFFFFFFFFFFFFFL);
+        doubleTest(0xFFFFFFFFFFFFFFFFL);
+
+        out.reset();
+        out.writeDouble(0);
+        out.writeDouble(1);
+        out.writeDouble(-1);
+        assertEquals(3 * 8, out.size());
+        copyOutputToInput();
+        assertEquals(0, in.readDouble(), 0);
+        assertEquals(1, in.readDouble(), 0);
+        assertEquals(-1, in.readDouble(), 0);
+        assertEquals(0, in.available(), 0);
+    }
+
+    private void sortedFloatTest(double val) {
+
+        out.reset();
+        out.writeSortedFloat((float) val);
+        assertEquals(4, out.size());
+        copyOutputToInput();
+        if (Double.isNaN(val)) {
+            assertTrue(Float.isNaN(in.readSortedFloat()));
+        } else {
+            assertEquals((float) val, in.readSortedFloat(), 0);
+        }
+    }
+
+    public void testSortedFloat() {
+
+        sortedFloatTest(0);
+        sortedFloatTest(1);
+        sortedFloatTest(-1);
+        sortedFloatTest(1.0);
+        sortedFloatTest(0.1);
+        sortedFloatTest(-1.0);
+        sortedFloatTest(-0.1);
+        sortedFloatTest(Float.NaN);
+        sortedFloatTest(Float.NEGATIVE_INFINITY);
+        sortedFloatTest(Float.POSITIVE_INFINITY);
+        sortedFloatTest(Short.MAX_VALUE);
+        sortedFloatTest(Short.MIN_VALUE);
+        sortedFloatTest(Integer.MAX_VALUE);
+        sortedFloatTest(Integer.MIN_VALUE);
+        sortedFloatTest(Long.MAX_VALUE);
+        sortedFloatTest(Long.MIN_VALUE);
+        sortedFloatTest(Float.MAX_VALUE);
+        sortedFloatTest(Float.MAX_VALUE + 1);
+        sortedFloatTest(Float.MIN_VALUE + 1);
+        sortedFloatTest(Float.MIN_VALUE);
+        sortedFloatTest(Float.MIN_VALUE - 1);
+        sortedFloatTest(0x7F);
+        sortedFloatTest(0xFF);
+        sortedFloatTest(0x7FFF);
+        sortedFloatTest(0xFFFF);
+        sortedFloatTest(0x7FFFFFFF);
+        sortedFloatTest(0xFFFFFFFF);
+        sortedFloatTest(0x7FFFFFFFFFFFFFFFL);
+        sortedFloatTest(0xFFFFFFFFFFFFFFFFL);
+
+        out.reset();
+        out.writeSortedFloat(0);
+        out.writeSortedFloat(1);
+        out.writeSortedFloat(-1);
+        assertEquals(3 * 4, out.size());
+        copyOutputToInput();
+        assertEquals(0, in.readSortedFloat(), 0);
+        assertEquals(1, in.readSortedFloat(), 0);
+        assertEquals(-1, in.readSortedFloat(), 0);
+        assertEquals(0, in.available(), 0);
+    }
+
+    private void sortedDoubleTest(double val) {
+
+        out.reset();
+        out.writeSortedDouble((double) val);
+        assertEquals(8, out.size());
+        copyOutputToInput();
+        if (Double.isNaN(val)) {
+            assertTrue(Double.isNaN(in.readSortedDouble()));
+        } else {
+            assertEquals((double) val, in.readSortedDouble(), 0);
+        }
+    }
+
+    public void testSortedDouble() {
+
+        sortedDoubleTest(0);
+        sortedDoubleTest(1);
+        sortedDoubleTest(-1);
+        sortedDoubleTest(1.0);
+        sortedDoubleTest(0.1);
+        sortedDoubleTest(-1.0);
+        sortedDoubleTest(-0.1);
+        sortedDoubleTest(Double.NaN);
+        sortedDoubleTest(Double.NEGATIVE_INFINITY);
+        sortedDoubleTest(Double.POSITIVE_INFINITY);
+        sortedDoubleTest(Short.MAX_VALUE);
+        sortedDoubleTest(Short.MIN_VALUE);
+        sortedDoubleTest(Integer.MAX_VALUE);
+        sortedDoubleTest(Integer.MIN_VALUE);
+        sortedDoubleTest(Long.MAX_VALUE);
+        sortedDoubleTest(Long.MIN_VALUE);
+        sortedDoubleTest(Float.MAX_VALUE);
+        sortedDoubleTest(Float.MIN_VALUE);
+        sortedDoubleTest(Double.MAX_VALUE - 1);
+        sortedDoubleTest(Double.MAX_VALUE);
+        sortedDoubleTest(Double.MAX_VALUE + 1);
+        sortedDoubleTest(Double.MIN_VALUE + 1);
+        sortedDoubleTest(Double.MIN_VALUE);
+        sortedDoubleTest(Double.MIN_VALUE - 1);
+        sortedDoubleTest(0x7F);
+        sortedDoubleTest(0xFF);
+        sortedDoubleTest(0x7FFF);
+        sortedDoubleTest(0xFFFF);
+        sortedDoubleTest(0x7FFFFFFF);
+        sortedDoubleTest(0xFFFFFFFF);
+        sortedDoubleTest(0x7FFFFFFFFFFFFFFFL);
+        sortedDoubleTest(0xFFFFFFFFFFFFFFFFL);
+
+        out.reset();
+        out.writeSortedDouble(0);
+        out.writeSortedDouble(1);
+        out.writeSortedDouble(-1);
+        assertEquals(3 * 8, out.size());
+        copyOutputToInput();
+        assertEquals(0, in.readSortedDouble(), 0);
+        assertEquals(1, in.readSortedDouble(), 0);
+        assertEquals(-1, in.readSortedDouble(), 0);
+        assertEquals(0, in.available(), 0);
+    }
+
+    private void packedIntTest(int val, int size) {
+
+        out.reset();
+        out.writePackedInt(val);
+        assertEquals(size, out.size());
+        copyOutputToInput();
+        assertEquals(size, in.getPackedIntByteLength());
+        assertEquals(val, in.readPackedInt());
+    }
+
+    public void testPackedInt() {
+
+        /* Exhaustive value testing is in PackedIntTest. */
+        packedIntTest(119, 1);
+        packedIntTest(0xFFFF + 119, 3);
+        packedIntTest(Integer.MAX_VALUE, 5);
+
+        out.reset();
+        out.writePackedInt(119);
+        out.writePackedInt(0xFFFF + 119);
+        out.writePackedInt(Integer.MAX_VALUE);
+        assertEquals(1 + 3 + 5, out.size());
+        copyOutputToInput();
+        assertEquals(119, in.readPackedInt(), 0);
+        assertEquals(0xFFFF + 119, in.readPackedInt(), 0);
+        assertEquals(Integer.MAX_VALUE, in.readPackedInt(), 0);
+        assertEquals(0, in.available(), 0);
+    }
+
+    private void packedLongTest(long val, int size) {
+
+        out.reset();
+        out.writePackedLong(val);
+        assertEquals(size, out.size());
+        copyOutputToInput();
+        assertEquals(size, in.getPackedLongByteLength());
+        assertEquals(val, in.readPackedLong());
+    }
+
+    public void testPackedLong() {
+
+        /* Exhaustive value testing is in PackedIntTest. */
+        packedLongTest(119, 1);
+        packedLongTest(0xFFFFFFFFL + 119, 5);
+        packedLongTest(Long.MAX_VALUE, 9);
+
+        out.reset();
+        out.writePackedLong(119);
+        out.writePackedLong(0xFFFFFFFFL + 119);
+        out.writePackedLong(Long.MAX_VALUE);
+        assertEquals(1 + 5 + 9, out.size());
+        copyOutputToInput();
+        assertEquals(119, in.readPackedLong(), 0);
+        assertEquals(0xFFFFFFFFL + 119, in.readPackedLong(), 0);
+        assertEquals(Long.MAX_VALUE, in.readPackedLong(), 0);
+        assertEquals(0, in.available(), 0);
+    }
+}
diff --git a/test/com/sleepycat/bind/tuple/test/TupleOrderingTest.java b/test/com/sleepycat/bind/tuple/test/TupleOrderingTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..05787838e0cc86c360f7ae68ae70e2909815df84
--- /dev/null
+++ b/test/com/sleepycat/bind/tuple/test/TupleOrderingTest.java
@@ -0,0 +1,464 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TupleOrderingTest.java,v 1.27.2.2 2010/01/04 15:30:41 cwl Exp $
+ */
+
+package com.sleepycat.bind.tuple.test;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.bind.tuple.TupleOutput;
+import com.sleepycat.util.test.SharedTestUtils;
+
+/**
+ * @author Mark Hayes
+ */
+public class TupleOrderingTest extends TestCase {
+
+    private TupleOutput out;
+    private byte[] prevBuf;
+
+    public static void main(String[] args)
+        throws Exception {
+
+        junit.framework.TestResult tr =
+            junit.textui.TestRunner.run(suite());
+        if (tr.errorCount() > 0 ||
+            tr.failureCount() > 0) {
+            System.exit(1);
+        } else {
+            System.exit(0);
+        }
+    }
+
+    public static Test suite()
+        throws Exception {
+
+        TestSuite suite = new TestSuite(TupleOrderingTest.class);
+        return suite;
+    }
+
+    public TupleOrderingTest(String name) {
+
+        super(name);
+    }
+
+    public void setUp() {
+
+        SharedTestUtils.printTestName("TupleOrderingTest." + getName());
+        out = new TupleOutput();
+        prevBuf = null;
+    }
+
+    public void tearDown() {
+
+        /* Ensure that GC can cleanup. */
+        out = null;
+        prevBuf = null;
+    }
+
+    /**
+     * Each tuple written must be strictly less than (by comparison of bytes)
+     * the tuple written just before it.  The check() method compares bytes
+     * just written to those written before the previous call to check().
+     */
+    private void check() {
+
+        check(-1);
+    }
+
+    private void check(int dataIndex) {
+
+        byte[] buf = new byte[out.size()];
+        System.arraycopy(out.getBufferBytes(), out.getBufferOffset(),
+                         buf, 0, buf.length);
+        if (prevBuf != null) {
+            int errOffset = -1;
+            int len = Math.min(prevBuf.length,  buf.length);
+            boolean areEqual = true;
+            for (int i = 0; i < len; i += 1) {
+                int val1 = prevBuf[i] & 0xFF;
+                int val2 = buf[i] & 0xFF;
+                if (val1 < val2) {
+                    areEqual = false;
+                    break;
+                } else if (val1 > val2) {
+                    errOffset = i;
+                    break;
+                }
+            }
+            if (areEqual) {
+                if (prevBuf.length < buf.length) {
+                    areEqual = false;
+                } else if (prevBuf.length > buf.length) {
+                    areEqual = false;
+                    errOffset = buf.length + 1;
+                }
+            }
+            if (errOffset != -1 || areEqual) {
+                StringBuffer msg = new StringBuffer();
+                if (errOffset != -1) {
+                    msg.append("Left >= right at byte offset " + errOffset);
+                } else if (areEqual) {
+                    msg.append("Bytes are equal");
+                } else {
+                    throw new IllegalStateException();
+                }
+                msg.append("\nLeft hex bytes: ");
+                for (int i = 0; i < prevBuf.length; i += 1) {
+                    msg.append(' ');
+                    int val = prevBuf[i] & 0xFF;
+                    if ((val & 0xF0) == 0) {
+                        msg.append('0');
+                    }
+                    msg.append(Integer.toHexString(val));
+                }
+                msg.append("\nRight hex bytes:");
+                for (int i = 0; i < buf.length; i += 1) {
+                    msg.append(' ');
+                    int val = buf[i] & 0xFF;
+                    if ((val & 0xF0) == 0) {
+                        msg.append('0');
+                    }
+                    msg.append(Integer.toHexString(val));
+                }
+                if (dataIndex >= 0) {
+                    msg.append("\nData index: " + dataIndex);
+                }
+                fail(msg.toString());
+            }
+        }
+        prevBuf = buf;
+        out.reset();
+    }
+
+    private void reset() {
+
+        prevBuf = null;
+        out.reset();
+    }
+
+    public void testString() {
+
+        final String[] DATA = {
+            "", "a", "ab", "b", "bb", "bba",
+            new String(new char[] { 0x7F }),
+            new String(new char[] { 0x7F, 0 }),
+            new String(new char[] { 0xFF }),
+            new String(new char[] { Character.MAX_VALUE }),
+        };
+        for (int i = 0; i < DATA.length; i += 1) {
+            out.writeString(DATA[i]);
+            check(i);
+        }
+        reset();
+        out.writeString("a");
+        check();
+        out.writeString("a");
+        out.writeString("");
+        check();
+        out.writeString("a");
+        out.writeString("");
+        out.writeString("a");
+        check();
+        out.writeString("a");
+        out.writeString("b");
+        check();
+        out.writeString("aa");
+        check();
+        out.writeString("b");
+        check();
+    }
+
+    public void testFixedString() {
+
+        final char[][] DATA = {
+            {}, {'a'}, {'a', 'b'}, {'b'}, {'b', 'b'}, {0x7F}, {0xFF},
+        };
+        for (int i = 0; i < DATA.length; i += 1) {
+            out.writeString(DATA[i]);
+            check(i);
+        }
+    }
+
+    public void testChars() {
+
+        final char[][] DATA = {
+            {}, {0}, {'a'}, {'a', 0}, {'a', 'b'}, {'b'}, {'b', 'b'},
+            {0x7F}, {0x7F, 0}, {0xFF}, {0xFF, 0},
+        };
+        for (int i = 0; i < DATA.length; i += 1) {
+            out.writeChars(DATA[i]);
+            check(i);
+        }
+    }
+
+    public void testBytes() {
+
+        final char[][] DATA = {
+            {}, {0}, {'a'}, {'a', 0}, {'a', 'b'}, {'b'}, {'b', 'b'},
+            {0x7F}, {0xFF},
+        };
+        for (int i = 0; i < DATA.length; i += 1) {
+            out.writeBytes(DATA[i]);
+            check(i);
+        }
+    }
+
+    public void testBoolean() {
+
+        final boolean[] DATA = {
+            false, true
+        };
+        for (int i = 0; i < DATA.length; i += 1) {
+            out.writeBoolean(DATA[i]);
+            check(i);
+        }
+    }
+
+    public void testUnsignedByte() {
+
+        final int[] DATA = {
+            0, 1, 0x7F, 0xFF
+        };
+        for (int i = 0; i < DATA.length; i += 1) {
+            out.writeUnsignedByte(DATA[i]);
+            check(i);
+        }
+    }
+
+    public void testUnsignedShort() {
+
+        final int[] DATA = {
+            0, 1, 0xFE, 0xFF, 0x800, 0x7FFF, 0xFFFF
+        };
+        for (int i = 0; i < DATA.length; i += 1) {
+            out.writeUnsignedShort(DATA[i]);
+            check(i);
+        }
+    }
+
+    public void testUnsignedInt() {
+
+        final long[] DATA = {
+            0, 1, 0xFE, 0xFF, 0x800, 0x7FFF, 0xFFFF, 0x80000,
+            0x7FFFFFFF, 0x80000000, 0xFFFFFFFF
+        };
+        for (int i = 0; i < DATA.length; i += 1) {
+            out.writeUnsignedInt(DATA[i]);
+            check(i);
+        }
+    }
+
+    public void testByte() {
+
+        final byte[] DATA = {
+            Byte.MIN_VALUE, Byte.MIN_VALUE + 1,
+            -1, 0, 1,
+            Byte.MAX_VALUE - 1, Byte.MAX_VALUE,
+        };
+        for (int i = 0; i < DATA.length; i += 1) {
+            out.writeByte(DATA[i]);
+            check(i);
+        }
+    }
+
+    public void testShort() {
+
+        final short[] DATA = {
+            Short.MIN_VALUE, Short.MIN_VALUE + 1,
+            Byte.MIN_VALUE, Byte.MIN_VALUE + 1,
+            -1, 0, 1,
+            Byte.MAX_VALUE - 1, Byte.MAX_VALUE,
+            Short.MAX_VALUE - 1, Short.MAX_VALUE,
+        };
+        for (int i = 0; i < DATA.length; i += 1) {
+            out.writeShort(DATA[i]);
+            check(i);
+        }
+    }
+
+    public void testInt() {
+
+        final int[] DATA = {
+            Integer.MIN_VALUE, Integer.MIN_VALUE + 1,
+            Short.MIN_VALUE, Short.MIN_VALUE + 1,
+            Byte.MIN_VALUE, Byte.MIN_VALUE + 1,
+            -1, 0, 1,
+            Byte.MAX_VALUE - 1, Byte.MAX_VALUE,
+            Short.MAX_VALUE - 1, Short.MAX_VALUE,
+            Integer.MAX_VALUE - 1, Integer.MAX_VALUE,
+        };
+        for (int i = 0; i < DATA.length; i += 1) {
+            out.writeInt(DATA[i]);
+            check(i);
+        }
+    }
+
+    public void testLong() {
+
+        final long[] DATA = {
+            Long.MIN_VALUE, Long.MIN_VALUE + 1,
+            Integer.MIN_VALUE, Integer.MIN_VALUE + 1,
+            Short.MIN_VALUE, Short.MIN_VALUE + 1,
+            Byte.MIN_VALUE, Byte.MIN_VALUE + 1,
+            -1, 0, 1,
+            Byte.MAX_VALUE - 1, Byte.MAX_VALUE,
+            Short.MAX_VALUE - 1, Short.MAX_VALUE,
+            Integer.MAX_VALUE - 1, Integer.MAX_VALUE,
+            Long.MAX_VALUE - 1, Long.MAX_VALUE,
+        };
+        for (int i = 0; i < DATA.length; i += 1) {
+            out.writeLong(DATA[i]);
+            check(i);
+        }
+    }
+
+    public void testFloat() {
+
+        // Only positive floats and doubles are ordered deterministically
+
+        final float[] DATA = {
+            0, Float.MIN_VALUE, 2 * Float.MIN_VALUE,
+            (float) 0.01, (float) 0.02, (float) 0.99,
+            1, (float) 1.01, (float) 1.02, (float) 1.99,
+            Byte.MAX_VALUE - 1, Byte.MAX_VALUE,
+            Short.MAX_VALUE - 1, Short.MAX_VALUE,
+            Integer.MAX_VALUE,
+            Long.MAX_VALUE / 2, Long.MAX_VALUE,
+            Float.MAX_VALUE,
+            Float.POSITIVE_INFINITY,
+            Float.NaN,
+        };
+        for (int i = 0; i < DATA.length; i += 1) {
+            out.writeFloat(DATA[i]);
+            check(i);
+        }
+    }
+
+    public void testDouble() {
+
+        // Only positive floats and doubles are ordered deterministically
+
+        final double[] DATA = {
+            0, Double.MIN_VALUE, 2 * Double.MIN_VALUE,
+            0.001, 0.002, 0.999,
+            1, 1.001, 1.002, 1.999,
+            Byte.MAX_VALUE - 1, Byte.MAX_VALUE,
+            Short.MAX_VALUE - 1, Short.MAX_VALUE,
+            Integer.MAX_VALUE - 1, Integer.MAX_VALUE,
+            Long.MAX_VALUE / 2, Long.MAX_VALUE,
+            Float.MAX_VALUE, Double.MAX_VALUE,
+            Double.POSITIVE_INFINITY,
+            Double.NaN,
+        };
+        for (int i = 0; i < DATA.length; i += 1) {
+            out.writeDouble(DATA[i]);
+            check(i);
+        }
+    }
+
+    public void testSortedFloat() {
+
+        final float[] DATA = {
+            Float.NEGATIVE_INFINITY,
+            (- Float.MAX_VALUE),
+            Long.MIN_VALUE,
+            Long.MIN_VALUE / 2,
+            Integer.MIN_VALUE,
+            Short.MIN_VALUE,
+            Short.MIN_VALUE + 1,
+            Byte.MIN_VALUE,
+            Byte.MIN_VALUE + 1,
+            (float) -1.99,
+            (float) -1.02,
+            (float) -1.01,
+            -1,
+            (float) -0.99,
+            (float) -0.02,
+            (float) -0.01,
+            2 * (- Float.MIN_VALUE),
+            (- Float.MIN_VALUE),
+            0,
+            Float.MIN_VALUE,
+            2 * Float.MIN_VALUE,
+            (float) 0.01,
+            (float) 0.02,
+            (float) 0.99,
+            1,
+            (float) 1.01,
+            (float) 1.02,
+            (float) 1.99,
+            Byte.MAX_VALUE - 1,
+            Byte.MAX_VALUE,
+            Short.MAX_VALUE - 1,
+            Short.MAX_VALUE,
+            Integer.MAX_VALUE,
+            Long.MAX_VALUE / 2,
+            Long.MAX_VALUE,
+            Float.MAX_VALUE,
+            Float.POSITIVE_INFINITY,
+            Float.NaN,
+        };
+        for (int i = 0; i < DATA.length; i += 1) {
+            out.writeSortedFloat(DATA[i]);
+            check(i);
+        }
+    }
+
+    public void testSortedDouble() {
+
+        final double[] DATA = {
+            Double.NEGATIVE_INFINITY,
+            (- Double.MAX_VALUE),
+            (- Float.MAX_VALUE),
+            Long.MIN_VALUE,
+            Long.MIN_VALUE / 2,
+            Integer.MIN_VALUE,
+            Short.MIN_VALUE,
+            Short.MIN_VALUE + 1,
+            Byte.MIN_VALUE,
+            Byte.MIN_VALUE + 1,
+            -1.999,
+            -1.002,
+            -1.001,
+            -1,
+            -0.999,
+            -0.002,
+            -0.001,
+            2 * (- Double.MIN_VALUE),
+            (- Double.MIN_VALUE),
+            0,
+            Double.MIN_VALUE,
+            2 * Double.MIN_VALUE,
+            0.001,
+            0.002,
+            0.999,
+            1,
+            1.001,
+            1.002,
+            1.999,
+            Byte.MAX_VALUE - 1,
+            Byte.MAX_VALUE,
+            Short.MAX_VALUE - 1,
+            Short.MAX_VALUE,
+            Integer.MAX_VALUE - 1,
+            Integer.MAX_VALUE,
+            Long.MAX_VALUE / 2,
+            Long.MAX_VALUE,
+            Float.MAX_VALUE,
+            Double.MAX_VALUE,
+            Double.POSITIVE_INFINITY,
+            Double.NaN,
+        };
+        for (int i = 0; i < DATA.length; i += 1) {
+            out.writeSortedDouble(DATA[i]);
+            check(i);
+        }
+    }
+}
diff --git a/test/com/sleepycat/collections/KeyRangeTest.java b/test/com/sleepycat/collections/KeyRangeTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..a9fd6a4d5990581c0b76bdde0df2f026ffa9fa9a
--- /dev/null
+++ b/test/com/sleepycat/collections/KeyRangeTest.java
@@ -0,0 +1,444 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: KeyRangeTest.java,v 1.45.2.2 2010/01/04 15:30:41 cwl Exp $
+ */
+
+package com.sleepycat.collections;
+
+import java.io.File;
+import java.io.Serializable;
+import java.util.Arrays;
+import java.util.Comparator;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.bind.ByteArrayBinding;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.util.keyrange.KeyRange;
+import com.sleepycat.util.keyrange.KeyRangeException;
+import com.sleepycat.util.test.SharedTestUtils;
+
+/**
+ * @author Mark Hayes
+ */
+public class KeyRangeTest extends TestCase {
+
+    private static boolean VERBOSE = false;
+
+    private static final byte FF = (byte) 0xFF;
+
+    private static final byte[][] KEYS = {
+        /* 0 */ {1},
+        /* 1 */ {FF},
+        /* 2 */ {FF, 0},
+        /* 3 */ {FF, 0x7F},
+        /* 4 */ {FF, FF},
+        /* 5 */ {FF, FF, 0},
+        /* 6 */ {FF, FF, 0x7F},
+        /* 7 */ {FF, FF, FF},
+    };
+    private static byte[][] EXTREME_KEY_BYTES = {
+        /* 0 */ {0},
+        /* 1 */ {FF, FF, FF, FF},
+    };
+
+    private Environment env;
+    private Database store;
+    private DataView view;
+    private DataCursor cursor;
+
+    public static void main(String[] args)
+        throws Exception {
+
+        junit.framework.TestResult tr =
+            junit.textui.TestRunner.run(suite());
+        if (tr.errorCount() > 0 ||
+            tr.failureCount() > 0) {
+            System.exit(1);
+        } else {
+            System.exit(0);
+        }
+    }
+
+    public static Test suite() {
+
+        return new TestSuite(KeyRangeTest.class);
+    }
+
+    public KeyRangeTest(String name) {
+
+        super(name);
+    }
+
+    public void setUp()
+        throws Exception {
+
+        SharedTestUtils.printTestName(SharedTestUtils.qualifiedTestName(this));
+    }
+
+    private void openDb(Comparator<byte []> comparator)
+        throws Exception {
+
+        File dir = SharedTestUtils.getNewDir();
+        ByteArrayBinding dataBinding = new ByteArrayBinding();
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setAllowCreate(true);
+        DbCompat.setInitializeCache(envConfig, true);
+        env = new Environment(dir, envConfig);
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        DbCompat.setTypeBtree(dbConfig);
+        dbConfig.setAllowCreate(true);
+        if (comparator != null) {
+            DbCompat.setBtreeComparator(dbConfig, comparator);
+        }
+        store = DbCompat.testOpenDatabase
+            (env, null, "test.db", null, dbConfig);
+        view = new DataView(store, dataBinding, dataBinding, null, true, null);
+    }
+
+    private void closeDb()
+        throws Exception {
+
+        store.close();
+        store = null;
+        env.close();
+        env = null;
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        try {
+            if (store != null) {
+                store.close();
+            }
+        } catch (Exception e) {
+            System.out.println("Exception ignored during close: " + e);
+        }
+        try {
+            if (env != null) {
+                env.close();
+            }
+        } catch (Exception e) {
+            System.out.println("Exception ignored during close: " + e);
+        }
+        /* Ensure that GC can cleanup. */
+        env = null;
+        store = null;
+        view = null;
+        cursor = null;
+    }
+
+    public void testScan() throws Exception {
+        openDb(null);
+        doScan(false);
+        closeDb();
+    }
+
+    public void testScanComparator() throws Exception {
+        openDb(new ReverseComparator());
+        doScan(true);
+        closeDb();
+    }
+
+    private void doScan(boolean reversed) throws Exception {
+
+        byte[][] keys = new byte[KEYS.length][];
+        final int end = KEYS.length - 1;
+        cursor = new DataCursor(view, true);
+        for (int i = 0; i <= end; i++) {
+            keys[i] = KEYS[i];
+            cursor.put(keys[i], KEYS[i], null, false);
+        }
+        cursor.close();
+        byte[][] extremeKeys = new byte[EXTREME_KEY_BYTES.length][];
+        for (int i = 0; i < extremeKeys.length; i++) {
+            extremeKeys[i] = EXTREME_KEY_BYTES[i];
+        }
+
+        // with empty range
+
+        cursor = new DataCursor(view, false);
+        expectRange(KEYS, 0, end, reversed);
+        cursor.close();
+
+        // begin key only, inclusive
+
+        for (int i = 0; i <= end; i++) {
+            cursor = newCursor(view, keys[i], true, null, false, reversed);
+            expectRange(KEYS, i, end, reversed);
+            cursor.close();
+        }
+
+        // begin key only, exclusive
+
+        for (int i = 0; i <= end; i++) {
+            cursor = newCursor(view, keys[i], false, null, false, reversed);
+            expectRange(KEYS, i + 1, end, reversed);
+            cursor.close();
+        }
+
+        // end key only, inclusive
+
+        for (int i = 0; i <= end; i++) {
+            cursor = newCursor(view, null, false, keys[i], true, reversed);
+            expectRange(KEYS, 0, i, reversed);
+            cursor.close();
+        }
+
+        // end key only, exclusive
+
+        for (int i = 0; i <= end; i++) {
+            cursor = newCursor(view, null, false, keys[i], false, reversed);
+            expectRange(KEYS, 0, i - 1, reversed);
+            cursor.close();
+        }
+
+        // begin and end keys, inclusive and exclusive
+
+        for (int i = 0; i <= end; i++) {
+            for (int j = i; j <= end; j++) {
+                // begin inclusive, end inclusive
+
+                cursor = newCursor(view, keys[i], true, keys[j],
+                                        true, reversed);
+                expectRange(KEYS, i, j, reversed);
+                cursor.close();
+
+                // begin inclusive, end exclusive
+
+                cursor = newCursor(view, keys[i], true, keys[j],
+                                        false, reversed);
+                expectRange(KEYS, i, j - 1, reversed);
+                cursor.close();
+
+                // begin exclusive, end inclusive
+
+                cursor = newCursor(view, keys[i], false, keys[j],
+                                        true, reversed);
+                expectRange(KEYS, i + 1, j, reversed);
+                cursor.close();
+
+                // begin exclusive, end exclusive
+
+                cursor = newCursor(view, keys[i], false, keys[j],
+                                        false, reversed);
+                expectRange(KEYS, i + 1, j - 1, reversed);
+                cursor.close();
+            }
+        }
+
+        // single key range
+
+        for (int i = 0; i <= end; i++) {
+            cursor = new DataCursor(view, false, keys[i]);
+            expectRange(KEYS, i, i, reversed);
+            cursor.close();
+        }
+
+        // start with lower extreme (before any existing key)
+
+        cursor = newCursor(view, extremeKeys[0], true, null, false, reversed);
+        expectRange(KEYS, 0, end, reversed);
+        cursor.close();
+
+        // start with higher extreme (after any existing key)
+
+        cursor = newCursor(view, null, false, extremeKeys[1], true, reversed);
+        expectRange(KEYS, 0, end, reversed);
+        cursor.close();
+    }
+
+    private DataCursor newCursor(DataView view,
+                                 Object beginKey, boolean beginInclusive,
+                                 Object endKey, boolean endInclusive,
+                                 boolean reversed)
+        throws Exception {
+
+        if (reversed) {
+            return new DataCursor(view, false,
+                                  endKey, endInclusive,
+                                  beginKey, beginInclusive);
+        } else {
+            return new DataCursor(view, false,
+                                  beginKey, beginInclusive,
+                                  endKey, endInclusive);
+        }
+    }
+
+    private void expectRange(byte[][] bytes, int first, int last,
+                             boolean reversed)
+        throws DatabaseException {
+
+        int i;
+        boolean init;
+        for (init = true, i = first;; i++, init = false) {
+            if (checkRange(bytes, first, last, i <= last,
+                           reversed, !reversed, init, i)) {
+                break;
+            }
+        }
+        for (init = true, i = last;; i--, init = false) {
+            if (checkRange(bytes, first, last, i >= first,
+                           reversed, reversed, init, i)) {
+                break;
+            }
+        }
+    }
+
+    private boolean checkRange(byte[][] bytes, int first, int last,
+                               boolean inRange, boolean reversed,
+                               boolean forward, boolean init,
+                               int i)
+        throws DatabaseException {
+
+        OperationStatus s;
+        if (forward) {
+            if (init) {
+                s = cursor.getFirst(false);
+            } else {
+                s = cursor.getNext(false);
+            }
+        } else {
+            if (init) {
+                s = cursor.getLast(false);
+            } else {
+                s = cursor.getPrev(false);
+            }
+        }
+
+        String msg = " " + (forward ? "next" : "prev") + " i=" + i +
+                     " first=" + first + " last=" + last +
+                     (reversed ? " reversed" : " not reversed");
+
+        // check that moving past ends doesn't move the cursor
+        if (s == OperationStatus.SUCCESS && i == first) {
+            OperationStatus s2 = reversed ? cursor.getNext(false)
+                                          : cursor.getPrev(false);
+            assertEquals(msg, OperationStatus.NOTFOUND, s2);
+        }
+        if (s == OperationStatus.SUCCESS && i == last) {
+            OperationStatus s2 = reversed ? cursor.getPrev(false)
+                                          : cursor.getNext(false);
+            assertEquals(msg, OperationStatus.NOTFOUND, s2);
+        }
+
+        byte[] val = (s == OperationStatus.SUCCESS)
+                        ?  ((byte[]) cursor.getCurrentValue())
+                        : null;
+
+        if (inRange) {
+            assertNotNull("RangeNotFound" + msg, val);
+
+            if (!Arrays.equals(val, bytes[i])){
+                printBytes(val);
+                printBytes(bytes[i]);
+                fail("RangeKeyNotEqual" + msg);
+            }
+            if (VERBOSE) {
+                System.out.println("GotRange" + msg);
+            }
+            return false;
+        } else {
+            assertEquals("RangeExceeded" + msg, OperationStatus.NOTFOUND, s);
+            return true;
+        }
+    }
+
+    private void printBytes(byte[] bytes) {
+
+        for (int i = 0; i < bytes.length; i += 1) {
+            System.out.print(Integer.toHexString(bytes[i] & 0xFF));
+            System.out.print(' ');
+        }
+        System.out.println();
+    }
+
+    public void testSubRanges() {
+
+        DatabaseEntry begin = new DatabaseEntry();
+        DatabaseEntry begin2 = new DatabaseEntry();
+        DatabaseEntry end = new DatabaseEntry();
+        DatabaseEntry end2 = new DatabaseEntry();
+        KeyRange range = new KeyRange(null);
+        KeyRange range2;
+
+        /* Base range [1, 2] */
+        begin.setData(new byte[] { 1 });
+        end.setData(new byte[] { 2 });
+        range = range.subRange(begin, true, end, true);
+
+        /* Subrange (0, 1] is invalid **. */
+        begin2.setData(new byte[] { 0 });
+        end2.setData(new byte[] { 1 });
+        try {
+            range2 = range.subRange(begin2, false, end2, true);
+            fail();
+        } catch (KeyRangeException expected) {}
+
+        /* Subrange [1, 3) is invalid. */
+        begin2.setData(new byte[] { 1 });
+        end2.setData(new byte[] { 3 });
+        try {
+            range2 = range.subRange(begin2, true, end2, false);
+            fail();
+        } catch (KeyRangeException expected) {}
+
+        /* Subrange [2, 2] is valid. */
+        begin2.setData(new byte[] { 2 });
+        end2.setData(new byte[] { 2 });
+        range2 = range.subRange(begin2, true, end2, true);
+
+        /* Subrange [0, 1] is invalid. */
+        begin2.setData(new byte[] { 0 });
+        end2.setData(new byte[] { 1 });
+        try {
+            range2 = range.subRange(begin2, true, end2, true);
+            fail();
+        } catch (KeyRangeException expected) {}
+
+        /* Subrange (0, 3] is invalid. */
+        begin2.setData(new byte[] { 0 });
+        end2.setData(new byte[] { 3 });
+        try {
+            range2 = range.subRange(begin2, false, end2, true);
+            fail();
+        } catch (KeyRangeException expected) {}
+
+        /* Subrange [3, 3) is invalid. */
+        begin2.setData(new byte[] { 3 });
+        end2.setData(new byte[] { 3 });
+        try {
+            range2 = range.subRange(begin2, true, end2, false);
+            fail();
+        } catch (KeyRangeException expected) {}
+    }
+
+    @SuppressWarnings("serial")
+    public static class ReverseComparator implements Comparator<byte[]>, 
+                                                     Serializable {
+        public int compare(byte[] d1, byte[] d2) {
+            int cmp = KeyRange.compareBytes(d1, 0, d1.length,
+                                            d2, 0, d2.length);
+            if (cmp < 0) {
+                return 1;
+            } else if (cmp > 0) {
+                return -1;
+            } else {
+                return 0;
+            }
+        }
+    }
+}
diff --git a/test/com/sleepycat/collections/test/CollectionTest.java b/test/com/sleepycat/collections/test/CollectionTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..bfc0aedd88e5fcbfae10a255d8a5e311711a51b9
--- /dev/null
+++ b/test/com/sleepycat/collections/test/CollectionTest.java
@@ -0,0 +1,3066 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: CollectionTest.java,v 1.63.2.3 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.collections.test;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.SortedSet;
+import java.util.concurrent.ConcurrentMap;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.collections.MapEntryParameter;
+import com.sleepycat.collections.StoredCollection;
+import com.sleepycat.collections.StoredCollections;
+import com.sleepycat.collections.StoredContainer;
+import com.sleepycat.collections.StoredEntrySet;
+import com.sleepycat.collections.StoredIterator;
+import com.sleepycat.collections.StoredKeySet;
+import com.sleepycat.collections.StoredList;
+import com.sleepycat.collections.StoredMap;
+import com.sleepycat.collections.StoredSortedEntrySet;
+import com.sleepycat.collections.StoredSortedKeySet;
+import com.sleepycat.collections.StoredSortedMap;
+import com.sleepycat.collections.StoredSortedValueSet;
+import com.sleepycat.collections.StoredValueSet;
+import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.util.ExceptionUnwrapper;
+import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * @author Mark Hayes
+ */
+public class CollectionTest extends TestCase {
+
+    private static final int NONE = 0;
+    private static final int SUB = 1;
+    private static final int HEAD = 2;
+    private static final int TAIL = 3;
+
+    /*
+     * For long tests we permute testStoredIterator to test both StoredIterator
+     * and BlockIterator.  When testing BlockIterator, we permute the maxKey
+     * over the array values below.  BlockIterator's block size is 10.  So we
+     * test below the block size (6), at the block size (10), and above it (14
+     * and 22).
+     */
+    private static final int DEFAULT_MAX_KEY = 6;
+    private static final int[] MAX_KEYS = {6, 10, 14, 22};
+
+    private boolean testStoredIterator;
+    private int maxKey; /* Must be a multiple of 2. */
+    private int beginKey = 1;
+    private int endKey;
+
+    private Environment env;
+    private Database store;
+    private Database index;
+    private boolean isEntityBinding;
+    private boolean isAutoCommit;
+    private TestStore testStore;
+    private String testName;
+    private EntryBinding keyBinding;
+    private EntryBinding valueBinding;
+    private EntityBinding entityBinding;
+    private TransactionRunner readRunner;
+    private TransactionRunner writeRunner;
+    private TransactionRunner writeIterRunner;
+    private TestEnv testEnv;
+
+    private StoredMap map;
+    private StoredMap imap; // insertable map (primary store for indexed map)
+    private StoredSortedMap smap; // sorted map (null or equal to map)
+    private StoredMap saveMap;
+    private StoredSortedMap saveSMap;
+    private int rangeType;
+    private StoredList list;
+    private StoredList ilist; // insertable list (primary store for index list)
+    private StoredList saveList;
+    private StoredKeySet keySet;
+    private StoredValueSet valueSet;
+
+    /**
+     * Runs a command line collection test.
+     * @see #usage
+     */
+    public static void main(String[] args)
+        throws Exception {
+
+        if (args.length == 1 &&
+            (args[0].equals("-h") || args[0].equals("-help"))) {
+            usage();
+        } else {
+            junit.framework.TestResult tr =
+		junit.textui.TestRunner.run(suite(args));
+	    if (tr.errorCount() > 0 ||
+		tr.failureCount() > 0) {
+		System.exit(1);
+	    } else {
+		System.exit(0);
+	    }
+        }
+    }
+
+    private static void usage() {
+
+        System.out.println(
+            "Usage: java com.sleepycat.collections.test.CollectionTest\n" +
+            "              -h | -help\n" +
+            "              [testName]...\n" +
+            "  where testName has the format:\n" +
+            "    <env>-<store>-{entity|value}\n" +
+            "  <env> is:\n" +
+            "    bdb | cdb | txn\n" +
+            "  <store> is:\n" +
+            "    btree-uniq | btree-dup | btree-dupsort | btree-recnum |\n" +
+            "    hash-uniq | hash-dup | hash-dupsort |\n" +
+            "    queue | recno | recno-renum\n" +
+            "  For example:  bdb-btree-uniq-entity\n" +
+            "  If no arguments are given then all tests are run.");
+        System.exit(2);
+    }
+
+    public static Test suite()
+        throws Exception {
+
+        return suite(null);
+    }
+
+    static Test suite(String[] args)
+        throws Exception {
+
+        if (SharedTestUtils.runLongTests()) {
+            TestSuite suite = new TestSuite();
+
+            /* StoredIterator tests. */
+            permuteTests(args, suite, true, DEFAULT_MAX_KEY);
+
+            /* BlockIterator tests with different maxKey values. */
+            for (int i = 0; i < MAX_KEYS.length; i += 1) {
+                permuteTests(args, suite, false, MAX_KEYS[i]);
+            }
+
+            return suite;
+        } else {
+            return baseSuite(args);
+        }
+    }
+
+    private static void permuteTests(String[] args,
+                                     TestSuite suite,
+                                     boolean storedIter,
+                                     int maxKey)
+        throws Exception {
+
+        TestSuite baseTests = baseSuite(args);
+        Enumeration e = baseTests.tests();
+        while (e.hasMoreElements()) {
+            CollectionTest t = (CollectionTest) e.nextElement();
+            t.setParams(storedIter, maxKey);
+            suite.addTest(t);
+        }
+    }
+
+    private static TestSuite baseSuite(String[] args)
+        throws Exception {
+
+        TestSuite suite = new TestSuite();
+        for (int i = 0; i < TestEnv.ALL.length; i += 1) {
+            for (int j = 0; j < TestStore.ALL.length; j += 1) {
+                for (int k = 0; k < 2; k += 1) {
+                    boolean entityBinding = (k != 0);
+
+                    addTest(args, suite, new CollectionTest(
+                            TestEnv.ALL[i], TestStore.ALL[j],
+                            entityBinding, false));
+
+                    if (TestEnv.ALL[i].isTxnMode()) {
+                        addTest(args, suite, new CollectionTest(
+                                TestEnv.ALL[i], TestStore.ALL[j],
+                                entityBinding, true));
+                    }
+                }
+            }
+        }
+        return suite;
+    }
+
+    private static void addTest(String[] args, TestSuite suite,
+                                CollectionTest test) {
+
+        if (args == null || args.length == 0) {
+            suite.addTest(test);
+        } else {
+            for (int t = 0; t < args.length; t += 1) {
+                if (args[t].equals(test.testName)) {
+                    suite.addTest(test);
+                    break;
+                }
+            }
+        }
+    }
+
+    public CollectionTest(TestEnv testEnv, TestStore testStore,
+                          boolean isEntityBinding, boolean isAutoCommit) {
+
+        super(null);
+
+        this.testEnv = testEnv;
+        this.testStore = testStore;
+        this.isEntityBinding = isEntityBinding;
+        this.isAutoCommit = isAutoCommit;
+
+        keyBinding = testStore.getKeyBinding();
+        valueBinding = testStore.getValueBinding();
+        entityBinding = testStore.getEntityBinding();
+
+        setParams(false, DEFAULT_MAX_KEY);
+    }
+
+    private void setParams(boolean storedIter, int maxKey) {
+
+        this.testStoredIterator = storedIter;
+        this.maxKey = maxKey;
+        this.endKey = maxKey;
+
+        testName = testEnv.getName() + '-' + testStore.getName() +
+                    (isEntityBinding ? "-entity" : "-value") +
+                    (isAutoCommit ? "-autoCommit" : "") +
+                    (testStoredIterator ? "-storedIter" : "") +
+                    ((maxKey != DEFAULT_MAX_KEY) ? ("-maxKey-" + maxKey) : "");
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        setName(testName);
+    }
+
+    public void runTest()
+        throws Exception {
+
+        SharedTestUtils.printTestName(SharedTestUtils.qualifiedTestName(this));
+        try {
+            env = testEnv.open(testName);
+
+            // For testing auto-commit, use a normal (transactional) runner for
+            // all reading and for writing via an iterator, and a do-nothing
+            // runner for writing via collections; if auto-commit is tested,
+            // the per-collection auto-commit property will be set elsewhere.
+            //
+            TransactionRunner normalRunner = newTransactionRunner(env);
+            normalRunner.setAllowNestedTransactions(
+                    DbCompat.NESTED_TRANSACTIONS);
+            TransactionRunner nullRunner = new NullTransactionRunner(env);
+            readRunner = nullRunner;
+            if (isAutoCommit) {
+                writeRunner = nullRunner;
+                writeIterRunner = testStoredIterator ? normalRunner
+                                                     : nullRunner;
+            } else {
+                writeRunner = normalRunner;
+                writeIterRunner = normalRunner;
+            }
+
+            store = testStore.open(env, "unindexed.db");
+            testUnindexed();
+            store.close();
+            store = null;
+
+            TestStore indexOf = testStore.getIndexOf();
+            if (indexOf != null) {
+                store = indexOf.open(env, "indexed.db");
+                index = testStore.openIndex(store, "index.db");
+                testIndexed();
+                index.close();
+                index = null;
+                store.close();
+                store = null;
+            }
+            env.close();
+            env = null;
+        } catch (Exception e) {
+            throw ExceptionUnwrapper.unwrap(e);
+        } finally {
+            if (index != null) {
+                try {
+		    index.close();
+		} catch (Exception e) {
+		}
+            }
+            if (store != null) {
+                try {
+		    store.close();
+		} catch (Exception e) {
+		}
+            }
+            if (env != null) {
+                try {
+		    env.close();
+		} catch (Exception e) {
+		}
+            }
+            /* Ensure that GC can cleanup. */
+            index = null;
+            store = null;
+            env = null;
+            readRunner = null;
+            writeRunner = null;
+            writeIterRunner = null;
+            map = null;
+            imap = null;
+            smap = null;
+            saveMap = null;
+            saveSMap = null;
+            list = null;
+            ilist = null;
+            saveList = null;
+            keySet = null;
+            valueSet = null;
+	    testEnv = null;
+	    testStore = null;
+        }
+    }
+
+    /**
+     * Is overridden in XACollectionTest.
+     */
+    protected TransactionRunner newTransactionRunner(Environment env)
+        throws DatabaseException {
+
+        return new TransactionRunner(env);
+    }
+
+    void testCreation(StoredContainer cont, int expectSize)
+        throws Exception {
+
+        assertEquals(index != null, cont.isSecondary());
+        assertEquals(testStore.isOrdered(), cont.isOrdered());
+        assertEquals(testStore.areKeyRangesAllowed(),
+                     cont.areKeyRangesAllowed());
+        assertEquals(testStore.areKeysRenumbered(), cont.areKeysRenumbered());
+        assertEquals(testStore.areDuplicatesAllowed(),
+                     cont.areDuplicatesAllowed());
+        assertEquals(testEnv.isTxnMode(), cont.isTransactional());
+        assertEquals(expectSize, cont.size());
+    }
+
+    void testMapCreation(ConcurrentMap map)
+        throws Exception {
+
+        assertTrue(map.values() instanceof Set);
+        assertEquals(testStore.areKeyRangesAllowed(),
+                     map.keySet() instanceof SortedSet);
+        assertEquals(testStore.areKeyRangesAllowed(),
+                     map.entrySet() instanceof SortedSet);
+        assertEquals(testStore.areKeyRangesAllowed() && isEntityBinding,
+                     map.values() instanceof SortedSet);
+    }
+
+    void testUnindexed()
+        throws Exception {
+
+        // create primary map
+        if (testStore.areKeyRangesAllowed()) {
+            if (isEntityBinding) {
+                smap = new StoredSortedMap(store, keyBinding,
+                                           entityBinding,
+                                           testStore.getKeyAssigner());
+                valueSet = new StoredSortedValueSet(store, entityBinding,
+                                                    true);
+            } else {
+                smap = new StoredSortedMap(store, keyBinding,
+                                           valueBinding,
+                                           testStore.getKeyAssigner());
+                // sorted value set is not possible since key cannot be derived
+                // for performing subSet, etc.
+            }
+            keySet = new StoredSortedKeySet(store, keyBinding, true);
+            map = smap;
+        } else {
+            if (isEntityBinding) {
+                map = new StoredMap(store, keyBinding, entityBinding,
+                                    testStore.getKeyAssigner());
+                valueSet = new StoredValueSet(store, entityBinding, true);
+            } else {
+                map = new StoredMap(store, keyBinding, valueBinding,
+                                    testStore.getKeyAssigner());
+                valueSet = new StoredValueSet(store, valueBinding, true);
+            }
+            smap = null;
+            keySet = new StoredKeySet(store, keyBinding, true);
+        }
+        imap = map;
+
+        // create primary list
+        if (testStore.hasRecNumAccess()) {
+            if (isEntityBinding) {
+                ilist = new StoredList(store, entityBinding,
+                                       testStore.getKeyAssigner());
+            } else {
+                ilist = new StoredList(store, valueBinding,
+                                       testStore.getKeyAssigner());
+            }
+            list = ilist;
+        } else {
+            try {
+                if (isEntityBinding) {
+                    ilist = new StoredList(store, entityBinding,
+                                           testStore.getKeyAssigner());
+                } else {
+                    ilist = new StoredList(store, valueBinding,
+                                           testStore.getKeyAssigner());
+                }
+                fail();
+            } catch (IllegalArgumentException expected) {}
+        }
+
+        testCreation(map, 0);
+        if (list != null) {
+            testCreation(list, 0);
+        }
+        testMapCreation(map);
+        addAll();
+        testAll();
+    }
+
+    void testIndexed()
+        throws Exception {
+
+        // create primary map
+        if (isEntityBinding) {
+            map = new StoredMap(store, keyBinding, entityBinding,
+                                testStore.getKeyAssigner());
+        } else {
+            map = new StoredMap(store, keyBinding, valueBinding,
+                                testStore.getKeyAssigner());
+        }
+        imap = map;
+        smap = null;
+        // create primary list
+        if (testStore.hasRecNumAccess()) {
+            if (isEntityBinding) {
+                list = new StoredList(store, entityBinding,
+                                      testStore.getKeyAssigner());
+            } else {
+                list = new StoredList(store, valueBinding,
+                                      testStore.getKeyAssigner());
+            }
+            ilist = list;
+        }
+
+        addAll();
+        readAll();
+
+        // create indexed map (keySet/valueSet)
+        if (testStore.areKeyRangesAllowed()) {
+            if (isEntityBinding) {
+                map = smap = new StoredSortedMap(index, keyBinding,
+                                                 entityBinding, true);
+                valueSet = new StoredSortedValueSet(index, entityBinding,
+                                                    true);
+            } else {
+                map = smap = new StoredSortedMap(index, keyBinding,
+                                                 valueBinding, true);
+                // sorted value set is not possible since key cannot be derived
+                // for performing subSet, etc.
+            }
+            keySet = new StoredSortedKeySet(index, keyBinding, true);
+        } else {
+            if (isEntityBinding) {
+                map = new StoredMap(index, keyBinding, entityBinding, true);
+                valueSet = new StoredValueSet(index, entityBinding, true);
+            } else {
+                map = new StoredMap(index, keyBinding, valueBinding, true);
+                valueSet = new StoredValueSet(index, valueBinding, true);
+            }
+            smap = null;
+            keySet = new StoredKeySet(index, keyBinding, true);
+        }
+
+        // create indexed list
+        if (testStore.hasRecNumAccess()) {
+            if (isEntityBinding) {
+                list = new StoredList(index, entityBinding, true);
+            } else {
+                list = new StoredList(index, valueBinding, true);
+            }
+        } else {
+            try {
+                if (isEntityBinding) {
+                    list = new StoredList(index, entityBinding, true);
+                } else {
+                    list = new StoredList(index, valueBinding, true);
+                }
+                fail();
+            } catch (IllegalArgumentException expected) {}
+        }
+
+        testCreation(map, maxKey);
+        testCreation((StoredContainer) map.values(), maxKey);
+        testCreation((StoredContainer) map.keySet(), maxKey);
+        testCreation((StoredContainer) map.entrySet(), maxKey);
+        if (list != null) {
+            testCreation(list, maxKey);
+        }
+        testMapCreation(map);
+        testAll();
+    }
+
+    void testAll()
+        throws Exception {
+
+        checkKeySetAndValueSet();
+        readAll();
+        updateAll();
+        readAll();
+        if (!map.areKeysRenumbered()) {
+            removeOdd();
+            readEven();
+            addOdd();
+            readAll();
+            removeOddIter();
+            readEven();
+            if (imap.areDuplicatesAllowed()) {
+                addOddDup();
+            } else {
+                addOdd();
+            }
+            readAll();
+            removeOddEntry();
+            readEven();
+            addOdd();
+            readAll();
+            if (isEntityBinding) {
+                removeOddEntity();
+                readEven();
+                addOddEntity();
+                readAll();
+            }
+            bulkOperations();
+        }
+        if (isListAddAllowed()) {
+            removeOddList();
+            readEvenList();
+            addOddList();
+            readAll();
+            if (!isEntityBinding) {
+                removeOddListValue();
+                readEvenList();
+                addOddList();
+                readAll();
+            }
+        }
+        if (list != null) {
+            bulkListOperations();
+        } else {
+            listOperationsNotAllowed();
+        }
+        if (smap != null) {
+            readWriteRange(SUB,  1, 1);
+            readWriteRange(HEAD, 1, 1);
+            readWriteRange(SUB,  1, maxKey);
+            readWriteRange(HEAD, 1, maxKey);
+            readWriteRange(TAIL, 1, maxKey);
+            readWriteRange(SUB,  1, 3);
+            readWriteRange(HEAD, 1, 3);
+            readWriteRange(SUB,  2, 2);
+            readWriteRange(SUB,  2, maxKey);
+            readWriteRange(TAIL, 2, maxKey);
+            readWriteRange(SUB,  maxKey, maxKey);
+            readWriteRange(TAIL, maxKey, maxKey);
+            readWriteRange(SUB,  maxKey + 1, maxKey + 1);
+            readWriteRange(TAIL, maxKey + 1, maxKey + 1);
+            readWriteRange(SUB,  0, 0);
+            readWriteRange(HEAD, 0, 0);
+        }
+        updateAll();
+        readAll();
+        if (map.areDuplicatesAllowed()) {
+            readWriteDuplicates();
+            readAll();
+        } else {
+            duplicatesNotAllowed();
+            readAll();
+        }
+        if (testEnv.isCdbMode()) {
+            testCdbLocking();
+        }
+        removeAll();
+        testConcurrentMap();
+        if (isListAddAllowed()) {
+            testIterAddList();
+            clearAll();
+        }
+        if (imap.areDuplicatesAllowed()) {
+            testIterAddDuplicates();
+            clearAll();
+        }
+        if (isListAddAllowed()) {
+            addAllList();
+            readAll();
+            removeAllList();
+        }
+        appendAll();
+    }
+
+    void checkKeySetAndValueSet() {
+
+        // use bulk operations to check that explicitly constructed
+        // keySet/valueSet are equivalent
+        assertEquals(keySet, imap.keySet());
+        if (valueSet != null) {
+            assertEquals(valueSet, imap.values());
+        }
+    }
+
+    Iterator iterator(Collection storedCollection) {
+
+        if (testStoredIterator) {
+            return ((StoredCollection) storedCollection).storedIterator();
+        } else {
+            return storedCollection.iterator();
+        }
+    }
+
+    void addAll()
+        throws Exception {
+
+        writeRunner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                assertTrue(imap.isEmpty());
+                Iterator iter = iterator(imap.entrySet());
+                try {
+                    assertTrue(!iter.hasNext());
+                } finally {
+                    StoredIterator.close(iter);
+                }
+                assertEquals(0, imap.keySet().toArray().length);
+                assertEquals(0, imap.keySet().toArray(new Object[0]).length);
+                assertEquals(0, imap.entrySet().toArray().length);
+                assertEquals(0, imap.entrySet().toArray(new Object[0]).length);
+                assertEquals(0, imap.values().toArray().length);
+                assertEquals(0, imap.values().toArray(new Object[0]).length);
+
+                for (int i = beginKey; i <= endKey; i += 1) {
+                    Long key = makeKey(i);
+                    Object val = makeVal(i);
+                    assertNull(imap.get(key));
+                    assertTrue(!imap.keySet().contains(key));
+                    assertTrue(!imap.values().contains(val));
+                    assertNull(imap.put(key, val));
+                    assertEquals(val, imap.get(key));
+                    assertTrue(imap.keySet().contains(key));
+                    assertTrue(imap.values().contains(val));
+                    assertTrue(imap.duplicates(key).contains(val));
+                    if (!imap.areDuplicatesAllowed()) {
+                        assertEquals(val, imap.put(key, val));
+                    }
+                    checkDupsSize(1, imap.duplicates(key));
+                }
+                assertTrue(!imap.isEmpty());
+            }
+        });
+    }
+
+    void appendAll()
+        throws Exception {
+
+        writeRunner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                assertTrue(imap.isEmpty());
+
+                TestKeyAssigner keyAssigner = testStore.getKeyAssigner();
+                if (keyAssigner != null) {
+                    keyAssigner.reset();
+                }
+
+                for (int i = beginKey; i <= endKey; i += 1) {
+                    boolean useList = (i & 1) == 0;
+                    Long key = makeKey(i);
+                    Object val = makeVal(i);
+                    assertNull(imap.get(key));
+                    if (keyAssigner != null) {
+                        if (useList && ilist != null) {
+                            assertEquals(i - 1, ilist.append(val));
+                        } else {
+                            assertEquals(key, imap.append(val));
+                        }
+                        assertEquals(val, imap.get(key));
+                    } else {
+                        Long recnoKey;
+                        if (useList && ilist != null) {
+                            recnoKey = new Long(ilist.append(val) + 1);
+                        } else {
+                            recnoKey = (Long) imap.append(val);
+                        }
+                        assertNotNull(recnoKey);
+                        Object recnoVal;
+                        if (isEntityBinding) {
+                            recnoVal = makeEntity(recnoKey.intValue(), i);
+                        } else {
+                            recnoVal = val;
+                        }
+                        assertEquals(recnoVal, imap.get(recnoKey));
+                    }
+                }
+            }
+        });
+    }
+
+    void updateAll()
+        throws Exception {
+
+        writeRunner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                for (int i = beginKey; i <= endKey; i += 1) {
+                    Long key = makeKey(i);
+                    Object val = makeVal(i);
+                    if (!imap.areDuplicatesAllowed()) {
+                        assertEquals(val, imap.put(key, val));
+                    }
+                    if (isEntityBinding) {
+                        assertTrue(!imap.values().add(val));
+                    }
+                    checkDupsSize(1, imap.duplicates(key));
+                    if (ilist != null) {
+                        int idx = i - 1;
+                        assertEquals(val, ilist.set(idx, val));
+                    }
+                }
+                updateIter(map.entrySet());
+                updateIter(map.values());
+                if (beginKey <= endKey) {
+                    ListIterator iter = (ListIterator) iterator(map.keySet());
+                    try {
+                        assertNotNull(iter.next());
+                        iter.set(makeKey(beginKey));
+                        fail();
+                    } catch (UnsupportedOperationException e) {
+                    } finally {
+                        StoredIterator.close(iter);
+                    }
+                }
+                if (list != null) {
+                    updateIter(list);
+                }
+            }
+        });
+    }
+
+    void updateIter(final Collection coll)
+        throws Exception {
+
+        writeIterRunner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                ListIterator iter = (ListIterator) iterator(coll);
+                try {
+                    for (int i = beginKey; i <= endKey; i += 1) {
+                        assertTrue(iter.hasNext());
+                        Object obj = iter.next();
+                        if (map.isOrdered()) {
+                            assertEquals(i, intIter(coll, obj));
+                        }
+                        if (index != null) {
+                            try {
+                                setValuePlusOne(iter, obj);
+                                fail();
+                            } catch (UnsupportedOperationException e) {}
+                        } else if
+                           (((StoredCollection) coll).areDuplicatesOrdered()) {
+                            try {
+                                setValuePlusOne(iter, obj);
+                                fail();
+                            } catch (RuntimeException e) {
+                                Exception e2 = ExceptionUnwrapper.unwrap(e);
+                                assertTrue(e2.getClass().getName(),
+                                      e2 instanceof IllegalArgumentException ||
+                                      e2 instanceof DatabaseException);
+                            }
+                        } else {
+                            setValuePlusOne(iter, obj);
+                            /* Ensure iterator position is correct. */
+                            if (map.isOrdered()) {
+                                assertTrue(iter.hasPrevious());
+                                obj = iter.previous();
+                                assertEquals(i, intIter(coll, obj));
+                                assertTrue(iter.hasNext());
+                                obj = iter.next();
+                                assertEquals(i, intIter(coll, obj));
+                            }
+                        }
+                    }
+                    assertTrue(!iter.hasNext());
+                } finally {
+                    StoredIterator.close(iter);
+                }
+            }
+        });
+    }
+
+    void setValuePlusOne(ListIterator iter, Object obj) {
+
+        if (obj instanceof Map.Entry) {
+            Map.Entry entry = (Map.Entry) obj;
+            Long key = (Long) entry.getKey();
+            Object oldVal = entry.getValue();
+            Object val = makeVal(key.intValue() + 1);
+            if (isEntityBinding) {
+                try {
+                    // must fail on attempt to change the key via an entity
+                    entry.setValue(val);
+                    fail();
+                } catch (IllegalArgumentException e) {}
+                val = makeEntity(key.intValue(), key.intValue() + 1);
+            }
+            entry.setValue(val);
+            assertEquals(val, entry.getValue());
+            assertEquals(val, map.get(key));
+            assertTrue(map.duplicates(key).contains(val));
+            checkDupsSize(1, map.duplicates(key));
+            entry.setValue(oldVal);
+            assertEquals(oldVal, entry.getValue());
+            assertEquals(oldVal, map.get(key));
+            assertTrue(map.duplicates(key).contains(oldVal));
+            checkDupsSize(1, map.duplicates(key));
+        } else {
+            Object oldVal = obj;
+            Long key = makeKey(intVal(obj));
+            Object val = makeVal(key.intValue() + 1);
+            if (isEntityBinding) {
+                try {
+                    // must fail on attempt to change the key via an entity
+                    iter.set(val);
+                    fail();
+                } catch (IllegalArgumentException e) {}
+                val = makeEntity(key.intValue(), key.intValue() + 1);
+            }
+            iter.set(val);
+            assertEquals(val, map.get(key));
+            assertTrue(map.duplicates(key).contains(val));
+            checkDupsSize(1, map.duplicates(key));
+            iter.set(oldVal);
+            assertEquals(oldVal, map.get(key));
+            assertTrue(map.duplicates(key).contains(oldVal));
+            checkDupsSize(1, map.duplicates(key));
+        }
+    }
+
+    void removeAll()
+        throws Exception {
+
+        writeIterRunner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                assertTrue(!map.isEmpty());
+                ListIterator iter = null;
+                try {
+                    if (list != null) {
+                        iter = (ListIterator) iterator(list);
+                    } else {
+                        iter = (ListIterator) iterator(map.values());
+                    }
+                    iteratorSetAndRemoveNotAllowed(iter);
+
+                    Object val = iter.next();
+                    assertNotNull(val);
+                    iter.remove();
+                    iteratorSetAndRemoveNotAllowed(iter);
+
+                    if (index == null) {
+                        val = iter.next();
+                        assertNotNull(val);
+                        iter.set(val);
+
+                        if (map.areDuplicatesAllowed()) {
+                            iter.add(makeVal(intVal(val), intVal(val) + 1));
+                            iteratorSetAndRemoveNotAllowed(iter);
+                        }
+                    }
+                } finally {
+                    StoredIterator.close(iter);
+                }
+                map.clear();
+                assertTrue(map.isEmpty());
+                assertTrue(map.entrySet().isEmpty());
+                assertTrue(map.keySet().isEmpty());
+                assertTrue(map.values().isEmpty());
+                for (int i = beginKey; i <= endKey; i += 1) {
+                    Long key = makeKey(i);
+                    Object val = makeVal(i);
+                    assertNull(map.get(key));
+                    assertTrue(!map.duplicates(key).contains(val));
+                    checkDupsSize(0, map.duplicates(key));
+                }
+            }
+        });
+    }
+
+    void clearAll()
+        throws Exception {
+
+        writeRunner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                map.clear();
+                assertTrue(map.isEmpty());
+            }
+        });
+    }
+
+    /**
+     * Tests that removing while iterating works properly, especially when
+     * removing everything in the key range or everything from some point to
+     * the end of the range. [#15858]
+     */
+    void removeIter()
+        throws Exception {
+
+        writeIterRunner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                ListIterator iter;
+
+                /* Save contents. */
+                HashMap<Object,Object> savedMap =
+                    new HashMap<Object,Object>(map);
+                assertEquals(savedMap, map);
+
+                /* Remove all moving forward. */
+                iter = (ListIterator) iterator(map.keySet());
+                try {
+                    while (iter.hasNext()) {
+                        assertNotNull(iter.next());
+                        iter.remove();
+                    }
+                    assertTrue(!iter.hasNext());
+                    assertTrue(!iter.hasPrevious());
+                    assertTrue(map.isEmpty());
+                } finally {
+                    StoredIterator.close(iter);
+                }
+
+                /* Restore contents. */
+                imap.putAll(savedMap);
+                assertEquals(savedMap, map);
+
+                /* Remove all moving backward. */
+                iter = (ListIterator) iterator(map.keySet());
+                try {
+                    while (iter.hasNext()) {
+                        assertNotNull(iter.next());
+                    }
+                    while (iter.hasPrevious()) {
+                        assertNotNull(iter.previous());
+                        iter.remove();
+                    }
+                    assertTrue(!iter.hasNext());
+                    assertTrue(!iter.hasPrevious());
+                    assertTrue(map.isEmpty());
+                } finally {
+                    StoredIterator.close(iter);
+                }
+
+                /* Restore contents. */
+                imap.putAll(savedMap);
+                assertEquals(savedMap, map);
+
+                int first = Math.max(1, beginKey);
+                int last = Math.min(maxKey, endKey);
+
+                /* Skip N forward, remove all from that point forward. */
+                for (int readTo = first + 1; readTo <= last; readTo += 1) {
+                    iter = (ListIterator) iterator(map.keySet());
+                    try {
+                        for (int i = first; i < readTo; i += 1) {
+                            assertTrue(iter.hasNext());
+                            assertNotNull(iter.next());
+                        }
+                        for (int i = readTo; i <= last; i += 1) {
+                            assertTrue(iter.hasNext());
+                            assertNotNull(iter.next());
+                            iter.remove();
+                        }
+                        assertTrue(!iter.hasNext());
+                        assertTrue(iter.hasPrevious());
+                        assertEquals(readTo - first, map.size());
+                    } finally {
+                        StoredIterator.close(iter);
+                    }
+
+                    /* Restore contents. */
+                    for (Map.Entry entry : savedMap.entrySet()) {
+                        if (!imap.entrySet().contains(entry)) {
+                            imap.put(entry.getKey(), entry.getValue());
+                        }
+                    }
+                    assertEquals(savedMap, map);
+                }
+
+                /* Skip N backward, remove all from that point backward. */
+                for (int readTo = last - 1; readTo >= first; readTo -= 1) {
+                    iter = (ListIterator) iterator(map.keySet());
+                    try {
+                        while (iter.hasNext()) {
+                            assertNotNull(iter.next());
+                        }
+                        for (int i = last; i > readTo; i -= 1) {
+                            assertTrue(iter.hasPrevious());
+                            assertNotNull(iter.previous());
+                        }
+                        for (int i = readTo; i >= first; i -= 1) {
+                            assertTrue(iter.hasPrevious());
+                            assertNotNull(iter.previous());
+                            iter.remove();
+                        }
+                        assertTrue(!iter.hasPrevious());
+                        assertTrue(iter.hasNext());
+                        assertEquals(last - readTo, map.size());
+                    } finally {
+                        StoredIterator.close(iter);
+                    }
+
+                    /* Restore contents. */
+                    for (Map.Entry entry : savedMap.entrySet()) {
+                        if (!imap.entrySet().contains(entry)) {
+                            imap.put(entry.getKey(), entry.getValue());
+                        }
+                    }
+                    assertEquals(savedMap, map);
+                }
+            }
+        });
+    }
+
+    void iteratorSetAndRemoveNotAllowed(ListIterator i) {
+
+        try {
+            i.remove();
+            fail();
+        } catch (IllegalStateException e) {}
+
+        if (index == null) {
+            try {
+                Object val = makeVal(1);
+                i.set(val);
+                fail();
+            } catch (IllegalStateException e) {}
+        }
+    }
+
+    void removeOdd()
+        throws Exception {
+
+        writeRunner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                boolean toggle = false;
+                for (int i = beginKey; i <= endKey; i += 2) {
+                    toggle = !toggle;
+                    Long key = makeKey(i);
+                    Object val = makeVal(i);
+                    if (toggle) {
+                        assertTrue(map.keySet().contains(key));
+                        assertTrue(map.keySet().remove(key));
+                        assertTrue(!map.keySet().contains(key));
+                    } else {
+                        assertTrue(map.containsValue(val));
+                        Object oldVal = map.remove(key);
+                        assertEquals(oldVal, val);
+                        assertTrue(!map.containsKey(key));
+                        assertTrue(!map.containsValue(val));
+                    }
+                    assertNull(map.get(key));
+                    assertTrue(!map.duplicates(key).contains(val));
+                    checkDupsSize(0, map.duplicates(key));
+                }
+            }
+        });
+    }
+
+    void removeOddEntity()
+        throws Exception {
+
+        writeRunner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                for (int i = beginKey; i <= endKey; i += 2) {
+                    Long key = makeKey(i);
+                    Object val = makeVal(i);
+                    assertTrue(map.values().contains(val));
+                    assertTrue(map.values().remove(val));
+                    assertTrue(!map.values().contains(val));
+                    assertNull(map.get(key));
+                    assertTrue(!map.duplicates(key).contains(val));
+                    checkDupsSize(0, map.duplicates(key));
+                }
+            }
+        });
+    }
+
+    void removeOddEntry()
+        throws Exception {
+
+        writeRunner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                for (int i = beginKey; i <= endKey; i += 2) {
+                    Long key = makeKey(i);
+                    Object val = mapEntry(i);
+                    assertTrue(map.entrySet().contains(val));
+                    assertTrue(map.entrySet().remove(val));
+                    assertTrue(!map.entrySet().contains(val));
+                    assertNull(map.get(key));
+                }
+            }
+        });
+    }
+
+    void removeOddIter()
+        throws Exception {
+
+        writeIterRunner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                Iterator iter = iterator(map.keySet());
+                try {
+                    for (int i = beginKey; i <= endKey; i += 1) {
+                        assertTrue(iter.hasNext());
+                        Long key = (Long) iter.next();
+                        assertNotNull(key);
+                        if (map instanceof SortedMap) {
+                            assertEquals(makeKey(i), key);
+                        }
+                        if ((key.intValue() & 1) != 0) {
+                            iter.remove();
+                        }
+                    }
+                } finally {
+                    StoredIterator.close(iter);
+                }
+            }
+        });
+    }
+
+    void removeOddList()
+        throws Exception {
+
+        writeRunner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                for (int i = beginKey; i <= endKey; i += 2) {
+                    // remove by index
+                    // (with entity binding, embbeded keys in values are
+                    // being changed so we can't use values for comparison)
+                    int idx = (i - beginKey) / 2;
+                    Object val = makeVal(i);
+                    if (!isEntityBinding) {
+                        assertTrue(list.contains(val));
+                        assertEquals(val, list.get(idx));
+                        assertEquals(idx, list.indexOf(val));
+                    }
+                    assertNotNull(list.get(idx));
+                    if (isEntityBinding) {
+                        assertNotNull(list.remove(idx));
+                    } else {
+                        assertTrue(list.contains(val));
+                        assertEquals(val, list.remove(idx));
+                    }
+                    assertTrue(!list.remove(val));
+                    assertTrue(!list.contains(val));
+                    assertTrue(!val.equals(list.get(idx)));
+                }
+            }
+        });
+    }
+
+    void removeOddListValue()
+        throws Exception {
+
+        writeRunner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                for (int i = beginKey; i <= endKey; i += 2) {
+                    // for non-entity case remove by value
+                    // (with entity binding, embbeded keys in values are
+                    // being changed so we can't use values for comparison)
+                    int idx = (i - beginKey) / 2;
+                    Object val = makeVal(i);
+                    assertTrue(list.contains(val));
+                    assertEquals(val, list.get(idx));
+                    assertEquals(idx, list.indexOf(val));
+                    assertTrue(list.remove(val));
+                    assertTrue(!list.remove(val));
+                    assertTrue(!list.contains(val));
+                    assertTrue(!val.equals(list.get(idx)));
+                }
+            }
+        });
+    }
+
+    void addOdd()
+        throws Exception {
+
+        writeRunner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                // add using Map.put()
+                for (int i = beginKey; i <= endKey; i += 2) {
+                    Long key = makeKey(i);
+                    Object val = makeVal(i);
+                    assertNull(imap.get(key));
+                    assertNull(imap.put(key, val));
+                    assertEquals(val, imap.get(key));
+                    assertTrue(imap.duplicates(key).contains(val));
+                    checkDupsSize(1, imap.duplicates(key));
+                    if (isEntityBinding) {
+                        assertTrue(!imap.values().add(val));
+                    }
+                    if (!imap.areDuplicatesAllowed()) {
+                        assertEquals(val, imap.put(key, val));
+                    }
+                }
+            }
+        });
+    }
+
+    void addOddEntity()
+        throws Exception {
+
+        writeRunner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                // add using Map.values().add()
+                for (int i = beginKey; i <= endKey; i += 2) {
+                    Long key = makeKey(i);
+                    Object val = makeVal(i);
+                    assertNull(imap.get(key));
+                    assertTrue(!imap.values().contains(val));
+                    assertTrue(imap.values().add(val));
+                    assertEquals(val, imap.get(key));
+                    assertTrue(imap.values().contains(val));
+                    assertTrue(imap.duplicates(key).contains(val));
+                    checkDupsSize(1, imap.duplicates(key));
+                    if (isEntityBinding) {
+                        assertTrue(!imap.values().add(val));
+                    }
+                }
+            }
+        });
+    }
+
+    void addOddDup()
+        throws Exception {
+
+        writeRunner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                // add using Map.duplicates().add()
+                for (int i = beginKey; i <= endKey; i += 2) {
+                    Long key = makeKey(i);
+                    Object val = makeVal(i);
+                    assertNull(imap.get(key));
+                    assertTrue(!imap.values().contains(val));
+                    assertTrue(imap.duplicates(key).add(val));
+                    assertEquals(val, imap.get(key));
+                    assertTrue(imap.values().contains(val));
+                    assertTrue(imap.duplicates(key).contains(val));
+                    checkDupsSize(1, imap.duplicates(key));
+                    assertTrue(!imap.duplicates(key).add(val));
+                    if (isEntityBinding) {
+                        assertTrue(!imap.values().add(val));
+                    }
+                }
+            }
+        });
+    }
+
+    void addOddList()
+        throws Exception {
+
+        writeRunner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                for (int i = beginKey; i <= endKey; i += 2) {
+                    int idx = i - beginKey;
+                    Object val = makeVal(i);
+                    assertTrue(!list.contains(val));
+                    assertTrue(!val.equals(list.get(idx)));
+                    list.add(idx, val);
+                    assertTrue(list.contains(val));
+                    assertEquals(val, list.get(idx));
+                }
+            }
+        });
+    }
+
+    void addAllList()
+        throws Exception {
+
+        writeRunner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                for (int i = beginKey; i <= endKey; i += 1) {
+                    int idx = i - beginKey;
+                    Object val = makeVal(i);
+                    assertTrue(!list.contains(val));
+                    assertTrue(list.add(val));
+                    assertTrue(list.contains(val));
+                    assertEquals(val, list.get(idx));
+                }
+            }
+        });
+    }
+
+    void removeAllList()
+        throws Exception {
+
+        writeRunner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                assertTrue(!list.isEmpty());
+                list.clear();
+                assertTrue(list.isEmpty());
+                for (int i = beginKey; i <= endKey; i += 1) {
+                    int idx = i - beginKey;
+                    assertNull(list.get(idx));
+                }
+            }
+        });
+    }
+
+    /**
+     * Tests ConcurentMap methods implemented by StordMap.  Starts with an
+     * empty DB and ends with an empty DB.  [#16218]
+     */
+    void testConcurrentMap()
+        throws Exception {
+
+        writeRunner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                for (int i = beginKey; i <= endKey; i += 1) {
+                    Long key = makeKey(i);
+                    Object val = makeVal(i);
+                    Object valPlusOne = makeVal(i, i + 1);
+                    assertFalse(imap.containsKey(key));
+
+                    assertNull(imap.putIfAbsent(key, val));
+                    assertEquals(val, imap.get(key));
+
+                    assertEquals(val, imap.putIfAbsent(key, val));
+                    assertEquals(val, imap.get(key));
+
+                    if (!imap.areDuplicatesAllowed()) {
+                        assertEquals(val, imap.replace(key, valPlusOne));
+                        assertEquals(valPlusOne, imap.get(key));
+
+                        assertEquals(valPlusOne, imap.replace(key, val));
+                        assertEquals(val, imap.get(key));
+
+                        assertFalse(imap.replace(key, valPlusOne, val));
+                        assertEquals(val, imap.get(key));
+
+                        assertTrue(imap.replace(key, val, valPlusOne));
+                        assertEquals(valPlusOne, imap.get(key));
+
+                        assertTrue(imap.replace(key, valPlusOne, val));
+                        assertEquals(val, imap.get(key));
+                    }
+
+                    assertFalse(imap.remove(key, valPlusOne));
+                    assertTrue(imap.containsKey(key));
+
+                    assertTrue(imap.remove(key, val));
+                    assertFalse(imap.containsKey(key));
+
+                    assertNull(imap.replace(key, val));
+                    assertFalse(imap.containsKey(key));
+                }
+            }
+        });
+    }
+
+    void testIterAddList()
+        throws Exception {
+
+        writeIterRunner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                ListIterator i = (ListIterator) iterator(list);
+                try {
+                    assertTrue(!i.hasNext());
+                    i.add(makeVal(3));
+                    assertTrue(!i.hasNext());
+                    assertTrue(i.hasPrevious());
+                    assertEquals(3, intVal(i.previous()));
+
+                    i.add(makeVal(1));
+                    assertTrue(i.hasPrevious());
+                    assertTrue(i.hasNext());
+                    assertEquals(1, intVal(i.previous()));
+                    assertTrue(i.hasNext());
+                    assertEquals(1, intVal(i.next()));
+                    assertTrue(i.hasNext());
+                    assertEquals(3, intVal(i.next()));
+                    assertEquals(3, intVal(i.previous()));
+
+                    assertTrue(i.hasNext());
+                    i.add(makeVal(2));
+                    assertTrue(i.hasNext());
+                    assertTrue(i.hasPrevious());
+                    assertEquals(2, intVal(i.previous()));
+                    assertTrue(i.hasNext());
+                    assertEquals(2, intVal(i.next()));
+                    assertTrue(i.hasNext());
+                    assertEquals(3, intVal(i.next()));
+
+                    assertTrue(!i.hasNext());
+                    i.add(makeVal(4));
+                    i.add(makeVal(5));
+                    assertTrue(!i.hasNext());
+                    assertEquals(5, intVal(i.previous()));
+                    assertEquals(4, intVal(i.previous()));
+                    assertEquals(3, intVal(i.previous()));
+                    assertEquals(2, intVal(i.previous()));
+                    assertEquals(1, intVal(i.previous()));
+                    assertTrue(!i.hasPrevious());
+                } finally {
+                    StoredIterator.close(i);
+                }
+            }
+        });
+    }
+
+    void testIterAddDuplicates()
+        throws Exception {
+
+        writeIterRunner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                assertNull(imap.put(makeKey(1), makeVal(1)));
+                ListIterator i =
+                    (ListIterator) iterator(imap.duplicates(makeKey(1)));
+                try {
+                    if (imap.areDuplicatesOrdered()) {
+                        i.add(makeVal(1, 4));
+                        i.add(makeVal(1, 2));
+                        i.add(makeVal(1, 3));
+                        while (i.hasPrevious()) i.previous();
+                        assertEquals(1, intVal(i.next()));
+                        assertEquals(2, intVal(i.next()));
+                        assertEquals(3, intVal(i.next()));
+                        assertEquals(4, intVal(i.next()));
+                        assertTrue(!i.hasNext());
+                    } else {
+                        assertEquals(1, intVal(i.next()));
+                        i.add(makeVal(1, 2));
+                        i.add(makeVal(1, 3));
+                        assertTrue(!i.hasNext());
+                        assertTrue(i.hasPrevious());
+                        assertEquals(3, intVal(i.previous()));
+                        assertEquals(2, intVal(i.previous()));
+                        assertEquals(1, intVal(i.previous()));
+                        assertTrue(!i.hasPrevious());
+                        i.add(makeVal(1, 4));
+                        i.add(makeVal(1, 5));
+                        assertTrue(i.hasNext());
+                        assertEquals(5, intVal(i.previous()));
+                        assertEquals(4, intVal(i.previous()));
+                        assertTrue(!i.hasPrevious());
+                        assertEquals(4, intVal(i.next()));
+                        assertEquals(5, intVal(i.next()));
+                        assertEquals(1, intVal(i.next()));
+                        assertEquals(2, intVal(i.next()));
+                        assertEquals(3, intVal(i.next()));
+                        assertTrue(!i.hasNext());
+                    }
+                } finally {
+                    StoredIterator.close(i);
+                }
+            }
+        });
+    }
+
+    void readAll()
+        throws Exception {
+
+        readRunner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                // map
+
+                assertNotNull(map.toString());
+                for (int i = beginKey; i <= endKey; i += 1) {
+                    Long key = makeKey(i);
+                    Object val = map.get(key);
+                    assertEquals(makeVal(i), val);
+                    assertTrue(map.containsKey(key));
+                    assertTrue(map.containsValue(val));
+                    assertTrue(map.keySet().contains(key));
+                    assertTrue(map.values().contains(val));
+                    assertTrue(map.duplicates(key).contains(val));
+                    checkDupsSize(1, map.duplicates(key));
+                }
+                assertNull(map.get(makeKey(-1)));
+                assertNull(map.get(makeKey(0)));
+                assertNull(map.get(makeKey(beginKey - 1)));
+                assertNull(map.get(makeKey(endKey + 1)));
+                checkDupsSize(0, map.duplicates(makeKey(-1)));
+                checkDupsSize(0, map.duplicates(makeKey(0)));
+                checkDupsSize(0, map.duplicates(makeKey(beginKey - 1)));
+                checkDupsSize(0, map.duplicates(makeKey(endKey + 1)));
+
+                // entrySet
+
+                Set set = map.entrySet();
+                assertNotNull(set.toString());
+                assertEquals(beginKey > endKey, set.isEmpty());
+                Iterator iter = iterator(set);
+                try {
+                    for (int i = beginKey; i <= endKey; i += 1) {
+                        assertTrue(iter.hasNext());
+                        Map.Entry entry = (Map.Entry) iter.next();
+                        Long key = (Long) entry.getKey();
+                        Object val = entry.getValue();
+                        if (map instanceof SortedMap) {
+                            assertEquals(intKey(key), i);
+                        }
+                        assertEquals(intKey(key), intVal(val));
+                        assertTrue(set.contains(entry));
+                    }
+                    assertTrue(!iter.hasNext());
+                } finally {
+                    StoredIterator.close(iter);
+                }
+                Map.Entry[] entries =
+                    (Map.Entry[]) set.toArray(new Map.Entry[0]);
+                assertNotNull(entries);
+                assertEquals(endKey - beginKey + 1, entries.length);
+                for (int i = beginKey; i <= endKey; i += 1) {
+                    Map.Entry entry = entries[i - beginKey];
+                    assertNotNull(entry);
+                    if (map instanceof SortedMap) {
+                        assertEquals(makeKey(i), entry.getKey());
+                        assertEquals(makeVal(i), entry.getValue());
+                    }
+                }
+                readIterator(set, iterator(set), beginKey, endKey);
+                if (smap != null) {
+                    SortedSet sset = (SortedSet) set;
+                    if (beginKey == 1 && endKey >= 1) {
+                        readIterator(sset,
+                                     iterator(sset.subSet(mapEntry(1),
+                                                          mapEntry(2))),
+                                     1, 1);
+                    }
+                    if (beginKey <= 2 && endKey >= 2) {
+                        readIterator(sset,
+                                     iterator(sset.subSet(mapEntry(2),
+                                                          mapEntry(3))),
+                                     2, 2);
+                    }
+                    if (beginKey <= endKey) {
+                        readIterator(sset,
+                                     iterator(sset.subSet
+                                                (mapEntry(endKey),
+                                                 mapEntry(endKey + 1))),
+                                     endKey, endKey);
+                    }
+                    if (isSubMap()) {
+                        if (beginKey <= endKey) {
+                            if (rangeType != TAIL) {
+                                try {
+                                    sset.subSet(mapEntry(endKey + 1),
+                                                mapEntry(endKey + 2));
+                                    fail();
+                                } catch (IllegalArgumentException e) {}
+                            }
+                            if (rangeType != HEAD) {
+                                try {
+                                    sset.subSet(mapEntry(0),
+                                                mapEntry(1));
+                                    fail();
+                                } catch (IllegalArgumentException e) {}
+                            }
+                        }
+                    } else {
+                        readIterator(sset,
+                                     iterator(sset.subSet
+                                                (mapEntry(endKey + 1),
+                                                 mapEntry(endKey + 2))),
+                                     endKey, endKey - 1);
+                        readIterator(sset,
+                                     iterator(sset.subSet(mapEntry(0),
+                                                          mapEntry(1))),
+                                     0, -1);
+                    }
+                }
+
+                // keySet
+
+                set = map.keySet();
+                assertNotNull(set.toString());
+                assertEquals(beginKey > endKey, set.isEmpty());
+                iter = iterator(set);
+                try {
+                    for (int i = beginKey; i <= endKey; i += 1) {
+                        assertTrue(iter.hasNext());
+                        Long key = (Long) iter.next();
+                        assertTrue(set.contains(key));
+                        Object val = map.get(key);
+                        if (map instanceof SortedMap) {
+                            assertEquals(key, makeKey(i));
+                        }
+                        assertEquals(intKey(key), intVal(val));
+                    }
+                    assertTrue("" + beginKey + ' ' + endKey, !iter.hasNext());
+                } finally {
+                    StoredIterator.close(iter);
+                }
+                Long[] keys = (Long[]) set.toArray(new Long[0]);
+                assertNotNull(keys);
+                assertEquals(endKey - beginKey + 1, keys.length);
+                for (int i = beginKey; i <= endKey; i += 1) {
+                    Long key = keys[i - beginKey];
+                    assertNotNull(key);
+                    if (map instanceof SortedMap) {
+                        assertEquals(makeKey(i), key);
+                    }
+                }
+                readIterator(set, iterator(set), beginKey, endKey);
+
+                // values
+
+                Collection coll = map.values();
+                assertNotNull(coll.toString());
+                assertEquals(beginKey > endKey, coll.isEmpty());
+                iter = iterator(coll);
+                try {
+                    for (int i = beginKey; i <= endKey; i += 1) {
+                        assertTrue(iter.hasNext());
+                        Object val = iter.next();
+                        if (map instanceof SortedMap) {
+                            assertEquals(makeVal(i), val);
+                        }
+                    }
+                    assertTrue(!iter.hasNext());
+                } finally {
+                    StoredIterator.close(iter);
+                }
+                Object[] values = coll.toArray();
+                assertNotNull(values);
+                assertEquals(endKey - beginKey + 1, values.length);
+                for (int i = beginKey; i <= endKey; i += 1) {
+                    Object val = values[i - beginKey];
+                    assertNotNull(val);
+                    if (map instanceof SortedMap) {
+                        assertEquals(makeVal(i), val);
+                    }
+                }
+                readIterator(coll, iterator(coll), beginKey, endKey);
+
+                // list
+
+                if (list != null) {
+                    assertNotNull(list.toString());
+                    assertEquals(beginKey > endKey, list.isEmpty());
+                    for (int i = beginKey; i <= endKey; i += 1) {
+                        int idx = i - beginKey;
+                        Object val = list.get(idx);
+                        assertEquals(makeVal(i), val);
+                        assertTrue(list.contains(val));
+                        assertEquals(idx, list.indexOf(val));
+                        assertEquals(idx, list.lastIndexOf(val));
+                    }
+                    ListIterator li = (ListIterator) iterator(list);
+                    try {
+                        for (int i = beginKey; i <= endKey; i += 1) {
+                            int idx = i - beginKey;
+                            assertTrue(li.hasNext());
+                            assertEquals(idx, li.nextIndex());
+                            Object val = li.next();
+                            assertEquals(makeVal(i), val);
+                            assertEquals(idx, li.previousIndex());
+                        }
+                        assertTrue(!li.hasNext());
+                    } finally {
+                        StoredIterator.close(li);
+                    }
+                    if (beginKey < endKey) {
+                        li = list.listIterator(1);
+                        try {
+                            for (int i = beginKey + 1; i <= endKey; i += 1) {
+                                int idx = i - beginKey;
+                                assertTrue(li.hasNext());
+                                assertEquals(idx, li.nextIndex());
+                                Object val = li.next();
+                                assertEquals(makeVal(i), val);
+                                assertEquals(idx, li.previousIndex());
+                            }
+                            assertTrue(!li.hasNext());
+                        } finally {
+                            StoredIterator.close(li);
+                        }
+                    }
+                    values = list.toArray();
+                    assertNotNull(values);
+                    assertEquals(endKey - beginKey + 1, values.length);
+                    for (int i = beginKey; i <= endKey; i += 1) {
+                        Object val = values[i - beginKey];
+                        assertNotNull(val);
+                        assertEquals(makeVal(i), val);
+                    }
+                    readIterator(list, iterator(list), beginKey, endKey);
+                }
+
+                // first/last
+
+                if (smap != null) {
+                    if (beginKey <= endKey &&
+                        beginKey >= 1 && beginKey <= maxKey) {
+                        assertEquals(makeKey(beginKey),
+                                     smap.firstKey());
+                        assertEquals(makeKey(beginKey),
+                                     ((SortedSet) smap.keySet()).first());
+                        Object entry = ((SortedSet) smap.entrySet()).first();
+                        assertEquals(makeKey(beginKey),
+                                     ((Map.Entry) entry).getKey());
+                        if (smap.values() instanceof SortedSet) {
+                            assertEquals(makeVal(beginKey),
+                                         ((SortedSet) smap.values()).first());
+                        }
+                    } else {
+                        assertNull(smap.firstKey());
+                        assertNull(((SortedSet) smap.keySet()).first());
+                        assertNull(((SortedSet) smap.entrySet()).first());
+                        if (smap.values() instanceof SortedSet) {
+                            assertNull(((SortedSet) smap.values()).first());
+                        }
+                    }
+                    if (beginKey <= endKey &&
+                        endKey >= 1 && endKey <= maxKey) {
+                        assertEquals(makeKey(endKey),
+                                     smap.lastKey());
+                        assertEquals(makeKey(endKey),
+                                     ((SortedSet) smap.keySet()).last());
+                        Object entry = ((SortedSet) smap.entrySet()).last();
+                        assertEquals(makeKey(endKey),
+                                     ((Map.Entry) entry).getKey());
+                        if (smap.values() instanceof SortedSet) {
+                            assertEquals(makeVal(endKey),
+                                         ((SortedSet) smap.values()).last());
+                        }
+                    } else {
+                        assertNull(smap.lastKey());
+                        assertNull(((SortedSet) smap.keySet()).last());
+                        assertNull(((SortedSet) smap.entrySet()).last());
+                        if (smap.values() instanceof SortedSet) {
+                            assertNull(((SortedSet) smap.values()).last());
+                        }
+                    }
+                }
+            }
+        });
+    }
+
+    void readEven()
+        throws Exception {
+
+        readRunner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                int readBegin = ((beginKey & 1) != 0) ?
+                                    (beginKey + 1) : beginKey;
+                int readEnd = ((endKey & 1) != 0) ?  (endKey - 1) : endKey;
+                int readIncr = 2;
+
+                // map
+
+                for (int i = beginKey; i <= endKey; i += 1) {
+                    Long key = makeKey(i);
+                    if ((i & 1) == 0) {
+                        Object val = map.get(key);
+                        assertEquals(makeVal(i), val);
+                        assertTrue(map.containsKey(key));
+                        assertTrue(map.containsValue(val));
+                        assertTrue(map.keySet().contains(key));
+                        assertTrue(map.values().contains(val));
+                        assertTrue(map.duplicates(key).contains(val));
+                        checkDupsSize(1, map.duplicates(key));
+                    } else {
+                        Object val = makeVal(i);
+                        assertTrue(!map.containsKey(key));
+                        assertTrue(!map.containsValue(val));
+                        assertTrue(!map.keySet().contains(key));
+                        assertTrue(!map.values().contains(val));
+                        assertTrue(!map.duplicates(key).contains(val));
+                        checkDupsSize(0, map.duplicates(key));
+                    }
+                }
+
+                // entrySet
+
+                Set set = map.entrySet();
+                assertEquals(beginKey > endKey, set.isEmpty());
+                Iterator iter = iterator(set);
+                try {
+                    for (int i = readBegin; i <= readEnd; i += readIncr) {
+                        assertTrue(iter.hasNext());
+                        Map.Entry entry = (Map.Entry) iter.next();
+                        Long key = (Long) entry.getKey();
+                        Object val = entry.getValue();
+                        if (map instanceof SortedMap) {
+                            assertEquals(intKey(key), i);
+                        }
+                        assertEquals(intKey(key), intVal(val));
+                        assertTrue(set.contains(entry));
+                    }
+                    assertTrue(!iter.hasNext());
+                } finally {
+                    StoredIterator.close(iter);
+                }
+
+                // keySet
+
+                set = map.keySet();
+                assertEquals(beginKey > endKey, set.isEmpty());
+                iter = iterator(set);
+                try {
+                    for (int i = readBegin; i <= readEnd; i += readIncr) {
+                        assertTrue(iter.hasNext());
+                        Long key = (Long) iter.next();
+                        assertTrue(set.contains(key));
+                        Object val = map.get(key);
+                        if (map instanceof SortedMap) {
+                            assertEquals(key, makeKey(i));
+                        }
+                        assertEquals(intKey(key), intVal(val));
+                    }
+                    assertTrue(!iter.hasNext());
+                } finally {
+                    StoredIterator.close(iter);
+                }
+
+                // values
+
+                Collection coll = map.values();
+                assertEquals(beginKey > endKey, coll.isEmpty());
+                iter = iterator(coll);
+                try {
+                    for (int i = readBegin; i <= readEnd; i += readIncr) {
+                        assertTrue(iter.hasNext());
+                        Object val = iter.next();
+                        if (map instanceof SortedMap) {
+                            assertEquals(makeVal(i), val);
+                        }
+                    }
+                    assertTrue(!iter.hasNext());
+                } finally {
+                    StoredIterator.close(iter);
+                }
+
+
+                // list not used since keys may not be renumbered for this
+                // method to work in general
+
+                // first/last
+
+                if (smap != null) {
+                    if (readBegin <= readEnd &&
+                        readBegin >= 1 && readBegin <= maxKey) {
+                        assertEquals(makeKey(readBegin),
+                                     smap.firstKey());
+                        assertEquals(makeKey(readBegin),
+                                     ((SortedSet) smap.keySet()).first());
+                        Object entry = ((SortedSet) smap.entrySet()).first();
+                        assertEquals(makeKey(readBegin),
+                                     ((Map.Entry) entry).getKey());
+                        if (smap.values() instanceof SortedSet) {
+                            assertEquals(makeVal(readBegin),
+                                         ((SortedSet) smap.values()).first());
+                        }
+                    } else {
+                        assertNull(smap.firstKey());
+                        assertNull(((SortedSet) smap.keySet()).first());
+                        assertNull(((SortedSet) smap.entrySet()).first());
+                        if (smap.values() instanceof SortedSet) {
+                            assertNull(((SortedSet) smap.values()).first());
+                        }
+                    }
+                    if (readBegin <= readEnd &&
+                        readEnd >= 1 && readEnd <= maxKey) {
+                        assertEquals(makeKey(readEnd),
+                                     smap.lastKey());
+                        assertEquals(makeKey(readEnd),
+                                     ((SortedSet) smap.keySet()).last());
+                        Object entry = ((SortedSet) smap.entrySet()).last();
+                        assertEquals(makeKey(readEnd),
+                                     ((Map.Entry) entry).getKey());
+                        if (smap.values() instanceof SortedSet) {
+                            assertEquals(makeVal(readEnd),
+                                         ((SortedSet) smap.values()).last());
+                        }
+                    } else {
+                        assertNull(smap.lastKey());
+                        assertNull(((SortedSet) smap.keySet()).last());
+                        assertNull(((SortedSet) smap.entrySet()).last());
+                        if (smap.values() instanceof SortedSet) {
+                            assertNull(((SortedSet) smap.values()).last());
+                        }
+                    }
+                }
+            }
+        });
+    }
+
+    void readEvenList()
+        throws Exception {
+
+        readRunner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                int readBegin = ((beginKey & 1) != 0) ?
+                                    (beginKey + 1) : beginKey;
+                int readEnd = ((endKey & 1) != 0) ?  (endKey - 1) : endKey;
+                int readIncr = 2;
+
+                assertEquals(beginKey > endKey, list.isEmpty());
+                ListIterator iter = (ListIterator) iterator(list);
+                try {
+                    int idx = 0;
+                    for (int i = readBegin; i <= readEnd; i += readIncr) {
+                        assertTrue(iter.hasNext());
+                        assertEquals(idx, iter.nextIndex());
+                        Object val = iter.next();
+                        assertEquals(idx, iter.previousIndex());
+                        if (isEntityBinding) {
+                            assertEquals(i, intVal(val));
+                        } else {
+                            assertEquals(makeVal(i), val);
+                        }
+                        idx += 1;
+                    }
+                    assertTrue(!iter.hasNext());
+                } finally {
+                    StoredIterator.close(iter);
+                }
+            }
+        });
+    }
+
+    void readIterator(Collection coll, Iterator iter,
+                      int beginValue, int endValue) {
+
+        ListIterator li = (ListIterator) iter;
+        boolean isList = (coll instanceof List);
+        Iterator clone = null;
+        try {
+            // at beginning
+            assertTrue(!li.hasPrevious());
+            assertTrue(!li.hasPrevious());
+            try { li.previous(); } catch (NoSuchElementException e) {}
+            if (isList) {
+                assertEquals(-1, li.previousIndex());
+            }
+            if (endValue < beginValue) {
+                // is empty
+                assertTrue(!iter.hasNext());
+                try { iter.next(); } catch (NoSuchElementException e) {}
+                if (isList) {
+                    assertEquals(Integer.MAX_VALUE, li.nextIndex());
+                }
+            }
+            // loop thru all and collect in array
+            int[] values = new int[endValue - beginValue + 1];
+            for (int i = beginValue; i <= endValue; i += 1) {
+                assertTrue(iter.hasNext());
+                int idx = i - beginKey;
+                if (isList) {
+                    assertEquals(idx, li.nextIndex());
+                }
+                int value = intIter(coll, iter.next());
+                if (isList) {
+                    assertEquals(idx, li.previousIndex());
+                }
+                values[i - beginValue] = value;
+                if (((StoredCollection) coll).isOrdered()) {
+                    assertEquals(i, value);
+                } else {
+                    assertTrue(value >= beginValue);
+                    assertTrue(value <= endValue);
+                }
+            }
+            // at end
+            assertTrue(!iter.hasNext());
+            try { iter.next(); } catch (NoSuchElementException e) {}
+            if (isList) {
+                assertEquals(Integer.MAX_VALUE, li.nextIndex());
+            }
+            // clone at same position
+            clone = StoredCollections.iterator(iter);
+            assertTrue(!clone.hasNext());
+            // loop thru in reverse
+            for (int i = endValue; i >= beginValue; i -= 1) {
+                assertTrue(li.hasPrevious());
+                int idx = i - beginKey;
+                if (isList) {
+                    assertEquals(idx, li.previousIndex());
+                }
+                int value = intIter(coll, li.previous());
+                if (isList) {
+                    assertEquals(idx, li.nextIndex());
+                }
+                assertEquals(values[i - beginValue], value);
+            }
+            // clone should not have changed
+            assertTrue(!clone.hasNext());
+            // at beginning
+            assertTrue(!li.hasPrevious());
+            try { li.previous(); } catch (NoSuchElementException e) {}
+            if (isList) {
+                assertEquals(-1, li.previousIndex());
+            }
+            // loop thru with some back-and-forth
+            for (int i = beginValue; i <= endValue; i += 1) {
+                assertTrue(iter.hasNext());
+                int idx = i - beginKey;
+                if (isList) {
+                    assertEquals(idx, li.nextIndex());
+                }
+                Object obj = iter.next();
+                if (isList) {
+                    assertEquals(idx, li.previousIndex());
+                }
+                assertEquals(obj, li.previous());
+                if (isList) {
+                    assertEquals(idx, li.nextIndex());
+                }
+                assertEquals(obj, iter.next());
+                if (isList) {
+                    assertEquals(idx, li.previousIndex());
+                }
+                int value = intIter(coll, obj);
+                assertEquals(values[i - beginValue], value);
+            }
+            // at end
+            assertTrue(!iter.hasNext());
+            try { iter.next(); } catch (NoSuchElementException e) {}
+            if (isList) {
+                assertEquals(Integer.MAX_VALUE, li.nextIndex());
+            }
+        } finally {
+            StoredIterator.close(iter);
+            StoredIterator.close(clone);
+        }
+    }
+
+    void bulkOperations()
+        throws Exception {
+
+        writeRunner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                HashMap hmap = new HashMap();
+                for (int i = Math.max(1, beginKey);
+                         i <= Math.min(maxKey, endKey);
+                         i += 1) {
+                    hmap.put(makeKey(i), makeVal(i));
+                }
+                assertEquals(hmap, map);
+                assertEquals(hmap.entrySet(), map.entrySet());
+                assertEquals(hmap.keySet(), map.keySet());
+                assertEquals(map.values(), hmap.values());
+
+                assertTrue(map.entrySet().containsAll(hmap.entrySet()));
+                assertTrue(map.keySet().containsAll(hmap.keySet()));
+                assertTrue(map.values().containsAll(hmap.values()));
+
+                map.clear();
+                assertTrue(map.isEmpty());
+                imap.putAll(hmap);
+                assertEquals(hmap, map);
+
+                assertTrue(map.entrySet().removeAll(hmap.entrySet()));
+                assertTrue(map.entrySet().isEmpty());
+                assertTrue(!map.entrySet().removeAll(hmap.entrySet()));
+                assertTrue(imap.entrySet().addAll(hmap.entrySet()));
+                assertTrue(map.entrySet().containsAll(hmap.entrySet()));
+                assertTrue(!imap.entrySet().addAll(hmap.entrySet()));
+                assertEquals(hmap, map);
+
+                assertTrue(!map.entrySet().retainAll(hmap.entrySet()));
+                assertEquals(hmap, map);
+                assertTrue(map.entrySet().retainAll(Collections.EMPTY_SET));
+                assertTrue(map.isEmpty());
+                imap.putAll(hmap);
+                assertEquals(hmap, map);
+
+                assertTrue(map.values().removeAll(hmap.values()));
+                assertTrue(map.values().isEmpty());
+                assertTrue(!map.values().removeAll(hmap.values()));
+                if (isEntityBinding) {
+                    assertTrue(imap.values().addAll(hmap.values()));
+                    assertTrue(map.values().containsAll(hmap.values()));
+                    assertTrue(!imap.values().addAll(hmap.values()));
+                } else {
+                    imap.putAll(hmap);
+                }
+                assertEquals(hmap, map);
+
+                assertTrue(!map.values().retainAll(hmap.values()));
+                assertEquals(hmap, map);
+                assertTrue(map.values().retainAll(Collections.EMPTY_SET));
+                assertTrue(map.isEmpty());
+                imap.putAll(hmap);
+                assertEquals(hmap, map);
+
+                assertTrue(map.keySet().removeAll(hmap.keySet()));
+                assertTrue(map.keySet().isEmpty());
+                assertTrue(!map.keySet().removeAll(hmap.keySet()));
+                assertTrue(imap.keySet().addAll(hmap.keySet()));
+                assertTrue(imap.keySet().containsAll(hmap.keySet()));
+                if (index != null) {
+                    assertTrue(map.keySet().isEmpty());
+                }
+                assertTrue(!imap.keySet().addAll(hmap.keySet()));
+                // restore values to non-null
+                imap.keySet().removeAll(hmap.keySet());
+                imap.putAll(hmap);
+                assertEquals(hmap, map);
+
+                assertTrue(!map.keySet().retainAll(hmap.keySet()));
+                assertEquals(hmap, map);
+                assertTrue(map.keySet().retainAll(Collections.EMPTY_SET));
+                assertTrue(map.isEmpty());
+                imap.putAll(hmap);
+                assertEquals(hmap, map);
+            }
+        });
+    }
+
+    void bulkListOperations()
+        throws Exception {
+
+        writeRunner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                ArrayList alist = new ArrayList();
+                for (int i = beginKey; i <= endKey; i += 1) {
+                    alist.add(makeVal(i));
+                }
+
+                assertEquals(alist, list);
+                assertTrue(list.containsAll(alist));
+
+                if (isListAddAllowed()) {
+                    list.clear();
+                    assertTrue(list.isEmpty());
+                    assertTrue(ilist.addAll(alist));
+                    assertEquals(alist, list);
+                }
+
+                assertTrue(!list.retainAll(alist));
+                assertEquals(alist, list);
+
+                if (isListAddAllowed()) {
+                    assertTrue(list.retainAll(Collections.EMPTY_SET));
+                    assertTrue(list.isEmpty());
+                    assertTrue(ilist.addAll(alist));
+                    assertEquals(alist, list);
+                }
+
+                if (isListAddAllowed() && !isEntityBinding) {
+                    // deleting in a renumbered list with entity binding will
+                    // change the values dynamically, making it very difficult
+                    // to test
+                    assertTrue(list.removeAll(alist));
+                    assertTrue(list.isEmpty());
+                    assertTrue(!list.removeAll(alist));
+                    assertTrue(ilist.addAll(alist));
+                    assertTrue(list.containsAll(alist));
+                    assertEquals(alist, list);
+                }
+
+                if (isListAddAllowed() && !isEntityBinding) {
+                    // addAll at an index is also very difficult to test with
+                    // an entity binding
+
+                    // addAll at first index
+                    ilist.addAll(beginKey, alist);
+                    assertTrue(list.containsAll(alist));
+                    assertEquals(2 * alist.size(), countElements(list));
+                    for (int i = beginKey; i <= endKey; i += 1)
+                        ilist.remove(beginKey);
+                    assertEquals(alist, list);
+
+                    // addAll at last index
+                    ilist.addAll(endKey, alist);
+                    assertTrue(list.containsAll(alist));
+                    assertEquals(2 * alist.size(), countElements(list));
+                    for (int i = beginKey; i <= endKey; i += 1)
+                        ilist.remove(endKey);
+                    assertEquals(alist, list);
+
+                    // addAll in the middle
+                    ilist.addAll(endKey - 1, alist);
+                    assertTrue(list.containsAll(alist));
+                    assertEquals(2 * alist.size(), countElements(list));
+                    for (int i = beginKey; i <= endKey; i += 1)
+                        ilist.remove(endKey - 1);
+                    assertEquals(alist, list);
+                }
+            }
+        });
+    }
+
+    void readWriteRange(final int type, final int rangeBegin,
+                        final int rangeEnd)
+        throws Exception {
+
+        writeRunner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                setRange(type, rangeBegin, rangeEnd);
+                createOutOfRange(rangeBegin, rangeEnd);
+                if (rangeType != TAIL) {
+                    writeOutOfRange(new Long(rangeEnd + 1));
+                }
+                if (rangeType != HEAD) {
+                    writeOutOfRange(new Long(rangeBegin - 1));
+                }
+                if (rangeBegin <= rangeEnd) {
+                    updateAll();
+                }
+                if (rangeBegin < rangeEnd && !map.areKeysRenumbered()) {
+                    bulkOperations();
+                    removeIter();
+                }
+                readAll();
+                clearRange();
+            }
+        });
+    }
+
+    void setRange(int type, int rangeBegin, int rangeEnd) {
+
+        rangeType = type;
+        saveMap = map;
+        saveSMap = smap;
+        saveList = list;
+        int listBegin = rangeBegin - beginKey;
+        boolean canMakeSubList = (list != null && listBegin>= 0);
+        if (!canMakeSubList) {
+            list = null;
+        }
+        if (list != null) {
+            try {
+                list.subList(-1, 0);
+                fail();
+            } catch (IndexOutOfBoundsException e) { }
+        }
+        switch (type) {
+
+        case SUB:
+            smap = (StoredSortedMap) smap.subMap(makeKey(rangeBegin),
+                                                 makeKey(rangeEnd + 1));
+            if (canMakeSubList) {
+                list = (StoredList) list.subList(listBegin,
+                                                 rangeEnd + 1 - beginKey);
+            }
+            // check for equivalent ranges
+            assertEquals(smap,
+                        ((StoredSortedMap) saveSMap).subMap(
+                            makeKey(rangeBegin), true,
+                            makeKey(rangeEnd + 1), false));
+            assertEquals(smap.entrySet(),
+                        ((StoredSortedEntrySet) saveSMap.entrySet()).subSet(
+                            mapEntry(rangeBegin), true,
+                            mapEntry(rangeEnd + 1), false));
+            assertEquals(smap.keySet(),
+                        ((StoredSortedKeySet) saveSMap.keySet()).subSet(
+                            makeKey(rangeBegin), true,
+                            makeKey(rangeEnd + 1), false));
+            if (smap.values() instanceof SortedSet) {
+                assertEquals(smap.values(),
+                            ((StoredSortedValueSet) saveSMap.values()).subSet(
+                                makeVal(rangeBegin), true,
+                                makeVal(rangeEnd + 1), false));
+            }
+            break;
+        case HEAD:
+            smap = (StoredSortedMap) smap.headMap(makeKey(rangeEnd + 1));
+            if (canMakeSubList) {
+                list = (StoredList) list.subList(0,
+                                                 rangeEnd + 1 - beginKey);
+            }
+            // check for equivalent ranges
+            assertEquals(smap,
+                        ((StoredSortedMap) saveSMap).headMap(
+                            makeKey(rangeEnd + 1), false));
+            assertEquals(smap.entrySet(),
+                        ((StoredSortedEntrySet) saveSMap.entrySet()).headSet(
+                            mapEntry(rangeEnd + 1), false));
+            assertEquals(smap.keySet(),
+                        ((StoredSortedKeySet) saveSMap.keySet()).headSet(
+                            makeKey(rangeEnd + 1), false));
+            if (smap.values() instanceof SortedSet) {
+                assertEquals(smap.values(),
+                            ((StoredSortedValueSet) saveSMap.values()).headSet(
+                                makeVal(rangeEnd + 1), false));
+            }
+            break;
+        case TAIL:
+            smap = (StoredSortedMap) smap.tailMap(makeKey(rangeBegin));
+            if (canMakeSubList) {
+                list = (StoredList) list.subList(listBegin,
+                                                 maxKey + 1 - beginKey);
+            }
+            // check for equivalent ranges
+            assertEquals(smap,
+                        ((StoredSortedMap) saveSMap).tailMap(
+                            makeKey(rangeBegin), true));
+            assertEquals(smap.entrySet(),
+                        ((StoredSortedEntrySet) saveSMap.entrySet()).tailSet(
+                            mapEntry(rangeBegin), true));
+            assertEquals(smap.keySet(),
+                        ((StoredSortedKeySet) saveSMap.keySet()).tailSet(
+                            makeKey(rangeBegin), true));
+            if (smap.values() instanceof SortedSet) {
+                assertEquals(smap.values(),
+                            ((StoredSortedValueSet) saveSMap.values()).tailSet(
+                                makeVal(rangeBegin), true));
+            }
+            break;
+        default: throw new RuntimeException();
+        }
+        map = smap;
+        beginKey = rangeBegin;
+        if (rangeBegin < 1 || rangeEnd > maxKey) {
+            endKey = rangeBegin - 1; // force empty range for readAll()
+        } else {
+            endKey = rangeEnd;
+        }
+    }
+
+    void clearRange() {
+
+        rangeType = NONE;
+        beginKey = 1;
+        endKey = maxKey;
+        map = saveMap;
+        smap = saveSMap;
+        list = saveList;
+    }
+
+    void createOutOfRange(int rangeBegin, int rangeEnd)
+        throws Exception {
+
+        // map
+
+        if (rangeType != TAIL) {
+            try {
+                smap.subMap(makeKey(rangeBegin), makeKey(rangeEnd + 2));
+                fail();
+            } catch (IllegalArgumentException e) { }
+            try {
+                smap.headMap(makeKey(rangeEnd + 2));
+                fail();
+            } catch (IllegalArgumentException e) { }
+            checkDupsSize(0, smap.duplicates(makeKey(rangeEnd + 2)));
+        }
+        if (rangeType != HEAD) {
+            try {
+                smap.subMap(makeKey(rangeBegin - 1), makeKey(rangeEnd + 1));
+                fail();
+            } catch (IllegalArgumentException e) { }
+            try {
+                smap.tailMap(makeKey(rangeBegin - 1));
+                fail();
+            } catch (IllegalArgumentException e) { }
+            checkDupsSize(0, smap.duplicates(makeKey(rangeBegin - 1)));
+        }
+
+        // keySet
+
+        if (rangeType != TAIL) {
+            SortedSet sset = (SortedSet) map.keySet();
+            try {
+                sset.subSet(makeKey(rangeBegin), makeKey(rangeEnd + 2));
+                fail();
+            } catch (IllegalArgumentException e) { }
+            try {
+                sset.headSet(makeKey(rangeEnd + 2));
+                fail();
+            } catch (IllegalArgumentException e) { }
+            try {
+                iterator(sset.subSet(makeKey(rangeEnd + 1),
+                                     makeKey(rangeEnd + 2)));
+                fail();
+            } catch (IllegalArgumentException e) { }
+        }
+        if (rangeType != HEAD) {
+            SortedSet sset = (SortedSet) map.keySet();
+            try {
+                sset.subSet(makeKey(rangeBegin - 1), makeKey(rangeEnd + 1));
+                fail();
+            } catch (IllegalArgumentException e) { }
+            try {
+                sset.tailSet(makeKey(rangeBegin - 1));
+                fail();
+            } catch (IllegalArgumentException e) { }
+            try {
+                iterator(sset.subSet(makeKey(rangeBegin - 1),
+                                     makeKey(rangeBegin)));
+                fail();
+            } catch (IllegalArgumentException e) { }
+        }
+
+        // entrySet
+
+        if (rangeType != TAIL) {
+            SortedSet sset = (SortedSet) map.entrySet();
+            try {
+                sset.subSet(mapEntry(rangeBegin), mapEntry(rangeEnd + 2));
+                fail();
+            } catch (IllegalArgumentException e) { }
+            try {
+                sset.headSet(mapEntry(rangeEnd + 2));
+                fail();
+            } catch (IllegalArgumentException e) { }
+            try {
+                iterator(sset.subSet(mapEntry(rangeEnd + 1),
+                                     mapEntry(rangeEnd + 2)));
+                fail();
+            } catch (IllegalArgumentException e) { }
+        }
+        if (rangeType != HEAD) {
+            SortedSet sset = (SortedSet) map.entrySet();
+            try {
+                sset.subSet(mapEntry(rangeBegin - 1), mapEntry(rangeEnd + 1));
+                fail();
+            } catch (IllegalArgumentException e) { }
+            try {
+                sset.tailSet(mapEntry(rangeBegin - 1));
+                fail();
+            } catch (IllegalArgumentException e) { }
+            try {
+                iterator(sset.subSet(mapEntry(rangeBegin - 1),
+                                     mapEntry(rangeBegin)));
+                fail();
+            } catch (IllegalArgumentException e) { }
+        }
+
+        // values
+
+        if (map.values() instanceof SortedSet) {
+            SortedSet sset = (SortedSet) map.values();
+            if (rangeType != TAIL) {
+                try {
+                    sset.subSet(makeVal(rangeBegin),
+                                makeVal(rangeEnd + 2));
+                    fail();
+                } catch (IllegalArgumentException e) { }
+                try {
+                    sset.headSet(makeVal(rangeEnd + 2));
+                    fail();
+                } catch (IllegalArgumentException e) { }
+            }
+            if (rangeType != HEAD) {
+                try {
+                    sset.subSet(makeVal(rangeBegin - 1),
+                                makeVal(rangeEnd + 1));
+                    fail();
+                } catch (IllegalArgumentException e) { }
+                try {
+                    sset.tailSet(makeVal(rangeBegin - 1));
+                    fail();
+                } catch (IllegalArgumentException e) { }
+            }
+        }
+
+        // list
+
+        if (list != null) {
+            int size = rangeEnd - rangeBegin + 1;
+            try {
+                list.subList(0, size + 1);
+                fail();
+            } catch (IndexOutOfBoundsException e) { }
+            try {
+                list.subList(-1, size);
+                fail();
+            } catch (IndexOutOfBoundsException e) { }
+            try {
+                list.subList(2, 1);
+                fail();
+            } catch (IndexOutOfBoundsException e) { }
+            try {
+                list.subList(size, size);
+                fail();
+            } catch (IndexOutOfBoundsException e) { }
+        }
+    }
+
+    void writeOutOfRange(Long badNewKey)
+        throws Exception {
+
+        try {
+            map.put(badNewKey, makeVal(badNewKey));
+            fail();
+        } catch (IllegalArgumentException e) {
+            assertTrue(e.toString(), index == null);
+        } catch (UnsupportedOperationException e) {
+            assertTrue(index != null);
+        }
+        try {
+            map.keySet().add(badNewKey);
+            fail();
+        } catch (IllegalArgumentException e) {
+            assertTrue(index == null);
+        } catch (UnsupportedOperationException e) {
+            assertTrue(index != null);
+        }
+        try {
+            map.values().add(makeEntity(badNewKey));
+            fail();
+        } catch (IllegalArgumentException e) {
+            assertTrue(isEntityBinding && index == null);
+        } catch (UnsupportedOperationException e) {
+            assertTrue(!(isEntityBinding && index == null));
+        }
+        if (list != null) {
+            int i = badNewKey.intValue() - beginKey;
+            try {
+                list.set(i, makeVal(i));
+                fail();
+            } catch (IndexOutOfBoundsException e) {
+                assertTrue(index == null);
+            } catch (UnsupportedOperationException e) {
+                assertTrue(index != null);
+            }
+            try {
+                list.add(i, makeVal(badNewKey));
+                fail();
+            } catch (UnsupportedOperationException e) {
+            }
+        }
+    }
+
+    void readWriteDuplicates()
+        throws Exception {
+
+        writeRunner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                if (index == null) {
+                    readWritePrimaryDuplicates(beginKey);
+                    readWritePrimaryDuplicates(beginKey + 1);
+                    readWritePrimaryDuplicates(endKey);
+                    readWritePrimaryDuplicates(endKey - 1);
+                } else {
+                    readWriteIndexedDuplicates(beginKey);
+                    readWriteIndexedDuplicates(beginKey + 1);
+                    readWriteIndexedDuplicates(endKey);
+                    readWriteIndexedDuplicates(endKey - 1);
+                }
+            }
+        });
+    }
+
+    void readWritePrimaryDuplicates(int i)
+        throws Exception {
+
+        Collection dups;
+        // make duplicate values
+        final Long key = makeKey(i);
+        final Object[] values = new Object[5];
+        for (int j = 0; j < values.length; j += 1) {
+            values[j] = isEntityBinding
+                        ? makeEntity(i, i + j)
+                        : makeVal(i + j);
+        }
+        // add duplicates
+        outerLoop: for (int writeMode = 0;; writeMode += 1) {
+            //System.out.println("write mode " + writeMode);
+            switch (writeMode) {
+                case 0:
+                case 1: {
+                    // write with Map.put()
+                    for (int j = 1; j < values.length; j += 1) {
+                        map.put(key, values[j]);
+                    }
+                    break;
+                }
+                case 2: {
+                    // write with Map.duplicates().add()
+                    dups = map.duplicates(key);
+                    for (int j = 1; j < values.length; j += 1) {
+                        dups.add(values[j]);
+                    }
+                    break;
+                }
+                case 3: {
+                    // write with Map.duplicates().iterator().add()
+                    writeIterRunner.run(new TransactionWorker() {
+                        public void doWork() throws Exception {
+                            Collection dups = map.duplicates(key);
+                            Iterator iter = iterator(dups);
+                            assertEquals(values[0], iter.next());
+                            assertTrue(!iter.hasNext());
+                            try {
+                                for (int j = 1; j < values.length; j += 1) {
+                                    ((ListIterator) iter).add(values[j]);
+                                }
+                            } finally {
+                                StoredIterator.close(iter);
+                            }
+                        }
+                    });
+                    break;
+                }
+                case 4: {
+                    // write with Map.values().add()
+                    if (!isEntityBinding) {
+                        continue;
+                    }
+                    Collection set = map.values();
+                    for (int j = 1; j < values.length; j += 1) {
+                        set.add(values[j]);
+                    }
+                    break;
+                }
+                default: {
+                    break outerLoop;
+                }
+            }
+            checkDupsSize(values.length, map.duplicates(key));
+            // read duplicates
+            readDuplicates(i, key, values);
+            // remove duplicates
+            switch (writeMode) {
+                case 0: {
+                    // remove with Map.remove()
+                    checkDupsSize(values.length, map.duplicates(key));
+                    map.remove(key); // remove all values
+                    checkDupsSize(0, map.duplicates(key));
+                    map.put(key, values[0]); // put back original value
+                    checkDupsSize(1, map.duplicates(key));
+                    break;
+                }
+                case 1: {
+                    // remove with Map.keySet().remove()
+                    map.keySet().remove(key); // remove all values
+                    map.put(key, values[0]); // put back original value
+                    break;
+                }
+                case 2: {
+                    // remove with Map.duplicates().clear()
+                    dups = map.duplicates(key);
+                    dups.clear(); // remove all values
+                    dups.add(values[0]); // put back original value
+                    break;
+                }
+                case 3: {
+                    // remove with Map.duplicates().iterator().remove()
+                    writeIterRunner.run(new TransactionWorker() {
+                        public void doWork() throws Exception {
+                            Collection dups = map.duplicates(key);
+                            Iterator iter = iterator(dups);
+                            try {
+                                for (int j = 0; j < values.length; j += 1) {
+                                    assertEquals(values[j], iter.next());
+                                    if (j != 0) {
+                                        iter.remove();
+                                    }
+                                }
+                            } finally {
+                                StoredIterator.close(iter);
+                            }
+                        }
+                    });
+                    break;
+                }
+                case 4: {
+                    // remove with Map.values().remove()
+                    if (!isEntityBinding) {
+                        throw new IllegalStateException();
+                    }
+                    Collection set = map.values();
+                    for (int j = 1; j < values.length; j += 1) {
+                        set.remove(values[j]);
+                    }
+                    break;
+                }
+                default: throw new IllegalStateException();
+            }
+            // verify that only original value is present
+            dups = map.duplicates(key);
+            assertTrue(dups.contains(values[0]));
+            for (int j = 1; j < values.length; j += 1) {
+                assertTrue(!dups.contains(values[j]));
+            }
+            checkDupsSize(1, dups);
+        }
+    }
+
+    void readWriteIndexedDuplicates(int i)
+        throws Exception {
+
+        Object key = makeKey(i);
+        Object[] values = new Object[3];
+        values[0] = makeVal(i);
+        for (int j = 1; j < values.length; j += 1) {
+            values[j] = isEntityBinding
+                        ? makeEntity(endKey + j, i)
+                        : makeVal(i);
+        }
+        // add duplicates
+        for (int j = 1; j < values.length; j += 1) {
+            imap.put(makeKey(endKey + j), values[j]);
+        }
+        // read duplicates
+        readDuplicates(i, key, values);
+        // remove duplicates
+        for (int j = 1; j < values.length; j += 1) {
+            imap.remove(makeKey(endKey + j));
+        }
+        checkDupsSize(1, map.duplicates(key));
+    }
+
+    void readDuplicates(int i, Object key, Object[] values) {
+
+        boolean isOrdered = map.isOrdered();
+        Collection dups;
+        Iterator iter;
+        // read with Map.duplicates().iterator()
+        dups = map.duplicates(key);
+        checkDupsSize(values.length, dups);
+        iter = iterator(dups);
+        try {
+            for (int j = 0; j < values.length; j += 1) {
+                assertTrue(iter.hasNext());
+                Object val = iter.next();
+                assertEquals(values[j], val);
+            }
+            assertTrue(!iter.hasNext());
+        } finally {
+            StoredIterator.close(iter);
+        }
+        // read with Map.values().iterator()
+        Collection clone = ((StoredCollection) map.values()).toList();
+        iter = iterator(map.values());
+        try {
+            for (int j = beginKey; j < i; j += 1) {
+                Object val = iter.next();
+                assertTrue(clone.remove(makeVal(j)));
+                if (isOrdered) {
+                    assertEquals(makeVal(j), val);
+                }
+            }
+            for (int j = 0; j < values.length; j += 1) {
+                Object val = iter.next();
+                assertTrue(clone.remove(values[j]));
+                if (isOrdered) {
+                    assertEquals(values[j], val);
+                }
+            }
+            for (int j = i + 1; j <= endKey; j += 1) {
+                Object val = iter.next();
+                assertTrue(clone.remove(makeVal(j)));
+                if (isOrdered) {
+                    assertEquals(makeVal(j), val);
+                }
+            }
+            assertTrue(!iter.hasNext());
+            assertTrue(clone.isEmpty());
+        } finally {
+            StoredIterator.close(iter);
+        }
+        // read with Map.entrySet().iterator()
+        clone = ((StoredCollection) map.entrySet()).toList();
+        iter = iterator(map.entrySet());
+        try {
+            for (int j = beginKey; j < i; j += 1) {
+                Map.Entry entry = (Map.Entry) iter.next();
+                assertTrue(clone.remove(mapEntry(j)));
+                if (isOrdered) {
+                    assertEquals(makeVal(j), entry.getValue());
+                    assertEquals(makeKey(j), entry.getKey());
+                }
+            }
+            for (int j = 0; j < values.length; j += 1) {
+                Map.Entry entry = (Map.Entry) iter.next();
+                assertTrue(clone.remove(mapEntry(makeKey(i), values[j])));
+                if (isOrdered) {
+                    assertEquals(values[j], entry.getValue());
+                    assertEquals(makeKey(i), entry.getKey());
+                }
+            }
+            for (int j = i + 1; j <= endKey; j += 1) {
+                Map.Entry entry = (Map.Entry) iter.next();
+                assertTrue(clone.remove(mapEntry(j)));
+                if (isOrdered) {
+                    assertEquals(makeVal(j), entry.getValue());
+                    assertEquals(makeKey(j), entry.getKey());
+                }
+            }
+            assertTrue(!iter.hasNext());
+            assertTrue(clone.isEmpty());
+        } finally {
+            StoredIterator.close(iter);
+        }
+        // read with Map.keySet().iterator()
+        clone = ((StoredCollection) map.keySet()).toList();
+        iter = iterator(map.keySet());
+        try {
+            for (int j = beginKey; j < i; j += 1) {
+                Object val = iter.next();
+                assertTrue(clone.remove(makeKey(j)));
+                if (isOrdered) {
+                    assertEquals(makeKey(j), val);
+                }
+            }
+            if (true) {
+                // only one key is iterated for all duplicates
+                Object val = iter.next();
+                assertTrue(clone.remove(makeKey(i)));
+                if (isOrdered) {
+                    assertEquals(makeKey(i), val);
+                }
+            }
+            for (int j = i + 1; j <= endKey; j += 1) {
+                Object val = iter.next();
+                assertTrue(clone.remove(makeKey(j)));
+                if (isOrdered) {
+                    assertEquals(makeKey(j), val);
+                }
+            }
+            assertTrue(!iter.hasNext());
+            assertTrue(clone.isEmpty());
+        } finally {
+            StoredIterator.close(iter);
+        }
+    }
+
+    void duplicatesNotAllowed() {
+
+        Collection dups = map.duplicates(makeKey(beginKey));
+        try {
+            dups.add(makeVal(beginKey));
+            fail();
+        } catch (UnsupportedOperationException expected) { }
+        ListIterator iter = (ListIterator) iterator(dups);
+        try {
+            iter.add(makeVal(beginKey));
+            fail();
+        } catch (UnsupportedOperationException expected) {
+        } finally {
+            StoredIterator.close(iter);
+        }
+    }
+
+    void listOperationsNotAllowed() {
+
+        ListIterator iter = (ListIterator) iterator(map.values());
+        try {
+            try {
+                iter.nextIndex();
+                fail();
+            } catch (UnsupportedOperationException expected) { }
+            try {
+                iter.previousIndex();
+                fail();
+            } catch (UnsupportedOperationException expected) { }
+        } finally {
+            StoredIterator.close(iter);
+        }
+    }
+
+    void testCdbLocking() {
+
+        Iterator readIterator;
+        Iterator writeIterator;
+        StoredKeySet set = (StoredKeySet) map.keySet();
+
+        // can open two CDB read cursors
+        readIterator = set.storedIterator(false);
+        try {
+            Iterator readIterator2 = set.storedIterator(false);
+            StoredIterator.close(readIterator2);
+        } finally {
+            StoredIterator.close(readIterator);
+        }
+
+        // can open two CDB write cursors
+        writeIterator = set.storedIterator(true);
+        try {
+            Iterator writeIterator2 = set.storedIterator(true);
+            StoredIterator.close(writeIterator2);
+        } finally {
+            StoredIterator.close(writeIterator);
+        }
+
+        // cannot open CDB write cursor when read cursor is open,
+        readIterator = set.storedIterator(false);
+        try {
+            writeIterator = set.storedIterator(true);
+            fail();
+            StoredIterator.close(writeIterator);
+        } catch (IllegalStateException e) {
+        } finally {
+            StoredIterator.close(readIterator);
+        }
+
+        if (index == null) {
+            // cannot put() with read cursor open
+            readIterator = set.storedIterator(false);
+            try {
+                map.put(makeKey(1), makeVal(1));
+                fail();
+            } catch (IllegalStateException e) {
+            } finally {
+                StoredIterator.close(readIterator);
+            }
+
+            // cannot append() with write cursor open with RECNO/QUEUE only
+            writeIterator = set.storedIterator(true);
+            try {
+                if (testStore.isQueueOrRecno()) {
+                    try {
+                        map.append(makeVal(1));
+                        fail();
+                    } catch (IllegalStateException e) {}
+                } else {
+                    map.append(makeVal(1));
+                }
+            } finally {
+                StoredIterator.close(writeIterator);
+            }
+        }
+    }
+
+    Object makeVal(int key) {
+
+        if (isEntityBinding) {
+            return makeEntity(key);
+        } else {
+            return new Long(key + 100);
+        }
+    }
+
+    Object makeVal(int key, int val) {
+
+        if (isEntityBinding) {
+            return makeEntity(key, val);
+        } else {
+            return makeVal(val);
+        }
+    }
+
+    Object makeEntity(int key, int val) {
+
+        return new TestEntity(key, val + 100);
+    }
+
+    int intVal(Object val) {
+
+        if (isEntityBinding) {
+            return ((TestEntity) val).value - 100;
+        } else {
+            return ((Long) val).intValue() - 100;
+        }
+    }
+
+    int intKey(Object key) {
+
+        return ((Long) key).intValue();
+    }
+
+    Object makeVal(Long key) {
+
+        return makeVal(key.intValue());
+    }
+
+    Object makeEntity(int key) {
+
+        return makeEntity(key, key);
+    }
+
+    Object makeEntity(Long key) {
+
+        return makeEntity(key.intValue());
+    }
+
+    int intIter(Collection coll, Object value) {
+
+        if (coll instanceof StoredKeySet) {
+            return intKey(value);
+        } else {
+            if (coll instanceof StoredEntrySet) {
+                value = ((Map.Entry) value).getValue();
+            }
+            return intVal(value);
+        }
+    }
+
+    Map.Entry mapEntry(Object key, Object val) {
+
+        return new MapEntryParameter(key, val);
+    }
+
+    Map.Entry mapEntry(int key) {
+
+        return new MapEntryParameter(makeKey(key), makeVal(key));
+    }
+
+    Long makeKey(int key) {
+
+        return new Long(key);
+    }
+
+    boolean isSubMap() {
+
+        return rangeType != NONE;
+    }
+
+    void checkDupsSize(int expected, Collection coll) {
+
+        assertEquals(expected, coll.size());
+        if (coll instanceof StoredCollection) {
+            StoredIterator i = ((StoredCollection) coll).storedIterator(false);
+            try {
+                int actual = 0;
+                if (i.hasNext()) {
+                    i.next();
+                    actual = i.count();
+                }
+                assertEquals(expected, actual);
+            } finally {
+                StoredIterator.close(i);
+            }
+        }
+    }
+
+    private boolean isListAddAllowed() {
+
+        return list != null && testStore.isQueueOrRecno() &&
+               list.areKeysRenumbered();
+    }
+
+    private int countElements(Collection coll) {
+
+        int count = 0;
+        Iterator iter = iterator(coll);
+        try {
+            while (iter.hasNext()) {
+                iter.next();
+                count += 1;
+            }
+        } finally {
+            StoredIterator.close(iter);
+        }
+        return count;
+    }
+}
diff --git a/test/com/sleepycat/collections/test/ForeignKeyTest.java b/test/com/sleepycat/collections/test/ForeignKeyTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..7ca31d0dbde098798e3242809e6c0cbd33fc3419
--- /dev/null
+++ b/test/com/sleepycat/collections/test/ForeignKeyTest.java
@@ -0,0 +1,343 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: ForeignKeyTest.java,v 1.33 2008/02/05 23:28:25 mark Exp $
+ */
+
+package com.sleepycat.collections.test;
+
+import java.util.Map;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.bind.serial.TupleSerialMarshalledKeyCreator;
+import com.sleepycat.bind.serial.test.MarshalledObject;
+import com.sleepycat.collections.CurrentTransaction;
+import com.sleepycat.collections.TupleSerialFactory;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.ForeignKeyDeleteAction;
+import com.sleepycat.je.SecondaryConfig;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.util.ExceptionUnwrapper;
+import com.sleepycat.util.RuntimeExceptionWrapper;
+import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * @author Mark Hayes
+ */
+public class ForeignKeyTest extends TestCase {
+
+    private static final ForeignKeyDeleteAction[] ACTIONS = {
+        ForeignKeyDeleteAction.ABORT,
+        ForeignKeyDeleteAction.NULLIFY,
+        ForeignKeyDeleteAction.CASCADE,
+    };
+    private static final String[] ACTION_LABELS = {
+        "ABORT",
+        "NULLIFY",
+        "CASCADE",
+    };
+
+    public static void main(String[] args)
+        throws Exception {
+
+        junit.framework.TestResult tr =
+            junit.textui.TestRunner.run(suite());
+        if (tr.errorCount() > 0 ||
+            tr.failureCount() > 0) {
+            System.exit(1);
+        } else {
+            System.exit(0);
+        }
+    }
+
+    public static Test suite()
+        throws Exception {
+
+        TestSuite suite = new TestSuite();
+        for (int i = 0; i < TestEnv.ALL.length; i += 1) {
+            for (int j = 0; j < ACTIONS.length; j += 1) {
+                suite.addTest(new ForeignKeyTest(TestEnv.ALL[i],
+                                                 ACTIONS[j],
+                                                 ACTION_LABELS[j]));
+            }
+        }
+        return suite;
+    }
+
+    private TestEnv testEnv;
+    private Environment env;
+    private StoredClassCatalog catalog;
+    private TupleSerialFactory factory;
+    private Database store1;
+    private Database store2;
+    private SecondaryDatabase index1;
+    private SecondaryDatabase index2;
+    private Map storeMap1;
+    private Map storeMap2;
+    private Map indexMap1;
+    private Map indexMap2;
+    private ForeignKeyDeleteAction onDelete;
+
+    public ForeignKeyTest(TestEnv testEnv, ForeignKeyDeleteAction onDelete,
+                          String onDeleteLabel) {
+
+        super("ForeignKeyTest-" + testEnv.getName() + '-' + onDeleteLabel);
+
+        this.testEnv = testEnv;
+        this.onDelete = onDelete;
+    }
+
+    public void setUp()
+        throws Exception {
+
+        SharedTestUtils.printTestName(getName());
+        env = testEnv.open(getName());
+
+        createDatabase();
+    }
+
+    public void tearDown() {
+
+        try {
+            if (index1 != null) {
+                index1.close();
+            }
+            if (index2 != null) {
+                index2.close();
+            }
+            if (store1 != null) {
+                store1.close();
+            }
+            if (store2 != null) {
+                store2.close();
+            }
+            if (catalog != null) {
+                catalog.close();
+            }
+            if (env != null) {
+                env.close();
+            }
+        } catch (Exception e) {
+            System.out.println("Ignored exception during tearDown: " + e);
+	} finally {
+            /* Ensure that GC can cleanup. */
+            env = null;
+	    testEnv = null;
+            catalog = null;
+            store1 = null;
+            store2 = null;
+            index1 = null;
+            index2 = null;
+            factory = null;
+            storeMap1 = null;
+            storeMap2 = null;
+            indexMap1 = null;
+            indexMap2 = null;
+        }
+    }
+
+    public void runTest()
+        throws Exception {
+
+        try {
+            createViews();
+            writeAndRead();
+        } catch (Exception e) {
+            throw ExceptionUnwrapper.unwrap(e);
+        }
+    }
+
+    private void createDatabase()
+        throws Exception {
+
+        catalog = new StoredClassCatalog(openDb("catalog.db"));
+        factory = new TupleSerialFactory(catalog);
+        assertSame(catalog, factory.getCatalog());
+
+        store1 = openDb("store1.db");
+        store2 = openDb("store2.db");
+        index1 = openSecondaryDb(factory, "1", store1, "index1.db", null);
+        index2 = openSecondaryDb(factory, "2", store2, "index2.db", store1);
+    }
+
+    private Database openDb(String file)
+        throws Exception {
+
+        DatabaseConfig config = new DatabaseConfig();
+        DbCompat.setTypeBtree(config);
+        config.setTransactional(testEnv.isTxnMode());
+        config.setAllowCreate(true);
+
+        return DbCompat.testOpenDatabase(env, null, file, null, config);
+    }
+
+    private SecondaryDatabase openSecondaryDb(TupleSerialFactory factory,
+                                              String keyName,
+                                              Database primary,
+                                              String file,
+                                              Database foreignStore)
+        throws Exception {
+
+        TupleSerialMarshalledKeyCreator keyCreator =
+                factory.getKeyCreator(MarshalledObject.class, keyName);
+
+        SecondaryConfig secConfig = new SecondaryConfig();
+        DbCompat.setTypeBtree(secConfig);
+        secConfig.setTransactional(testEnv.isTxnMode());
+        secConfig.setAllowCreate(true);
+        secConfig.setKeyCreator(keyCreator);
+        if (foreignStore != null) {
+            secConfig.setForeignKeyDatabase(foreignStore);
+            secConfig.setForeignKeyDeleteAction(onDelete);
+            if (onDelete == ForeignKeyDeleteAction.NULLIFY) {
+                secConfig.setForeignKeyNullifier(keyCreator);
+            }
+        }
+
+        return DbCompat.testOpenSecondaryDatabase
+            (env, null, file, null, primary, secConfig);
+    }
+
+    private void createViews()
+        throws Exception {
+
+        storeMap1 = factory.newMap(store1, String.class,
+                                   MarshalledObject.class, true);
+        storeMap2 = factory.newMap(store2, String.class,
+                                   MarshalledObject.class, true);
+        indexMap1 = factory.newMap(index1, String.class,
+                                   MarshalledObject.class, true);
+        indexMap2 = factory.newMap(index2, String.class,
+                                   MarshalledObject.class, true);
+    }
+
+    private void writeAndRead()
+        throws Exception {
+
+        CurrentTransaction txn = CurrentTransaction.getInstance(env);
+        if (txn != null) {
+            txn.beginTransaction(null);
+        }
+
+        MarshalledObject o1 = new MarshalledObject("data1", "pk1", "ik1", "");
+        assertNull(storeMap1.put(null, o1));
+
+        assertEquals(o1, storeMap1.get("pk1"));
+        assertEquals(o1, indexMap1.get("ik1"));
+
+        MarshalledObject o2 = new MarshalledObject("data2", "pk2", "", "pk1");
+        assertNull(storeMap2.put(null, o2));
+
+        assertEquals(o2, storeMap2.get("pk2"));
+        assertEquals(o2, indexMap2.get("pk1"));
+
+        if (txn != null) {
+            txn.commitTransaction();
+            txn.beginTransaction(null);
+        }
+
+        /*
+         * store1 contains o1 with primary key "pk1" and index key "ik1".
+         *
+         * store2 contains o2 with primary key "pk2" and foreign key "pk1",
+         * which is the primary key of store1.
+         */
+
+        if (onDelete == ForeignKeyDeleteAction.ABORT) {
+
+            /* Test that we abort trying to delete a referenced key. */
+
+            try {
+                storeMap1.remove("pk1");
+                fail();
+            } catch (RuntimeExceptionWrapper expected) {
+                assertTrue(expected.getCause() instanceof DatabaseException);
+                if (txn != null) {
+                    txn.abortTransaction();
+                    txn.beginTransaction(null);
+                }
+            }
+
+            /* Test that we can put a record into store2 with a null foreign
+             * key value. */
+
+            o2 = new MarshalledObject("data2", "pk2", "", "");
+            assertNotNull(storeMap2.put(null, o2));
+            assertEquals(o2, storeMap2.get("pk2"));
+
+            /* The index2 record should have been deleted since the key was set
+             * to null above. */
+
+            assertNull(indexMap2.get("pk1"));
+
+            /* Test that now we can delete the record in store1, since it is no
+             * longer referenced. */
+
+            assertNotNull(storeMap1.remove("pk1"));
+            assertNull(storeMap1.get("pk1"));
+            assertNull(indexMap1.get("ik1"));
+
+        } else if (onDelete == ForeignKeyDeleteAction.NULLIFY) {
+
+            /* Delete the referenced key. */
+
+            assertNotNull(storeMap1.remove("pk1"));
+            assertNull(storeMap1.get("pk1"));
+            assertNull(indexMap1.get("ik1"));
+
+            /* The store2 record should still exist, but should have an empty
+             * secondary key since it was nullified. */
+
+            o2 = (MarshalledObject) storeMap2.get("pk2");
+            assertNotNull(o2);
+            assertEquals("data2", o2.getData());
+            assertEquals("pk2", o2.getPrimaryKey());
+            assertEquals("", o2.getIndexKey1());
+            assertEquals("", o2.getIndexKey2());
+
+        } else if (onDelete == ForeignKeyDeleteAction.CASCADE) {
+
+            /* Delete the referenced key. */
+
+            assertNotNull(storeMap1.remove("pk1"));
+            assertNull(storeMap1.get("pk1"));
+            assertNull(indexMap1.get("ik1"));
+
+            /* The store2 record should have deleted also. */
+
+            assertNull(storeMap2.get("pk2"));
+            assertNull(indexMap2.get("pk1"));
+
+        } else {
+            throw new IllegalStateException();
+        }
+
+        /*
+         * Test that a foreign key value may not be used that is not present
+         * in the foreign store. "pk2" is not in store1 in this case.
+         */
+        assertNull(storeMap1.get("pk2"));
+        MarshalledObject o3 = new MarshalledObject("data3", "pk3", "", "pk2");
+        try {
+            storeMap2.put(null, o3);
+            fail();
+        } catch (RuntimeExceptionWrapper expected) {
+            assertTrue(expected.getCause() instanceof DatabaseException);
+        }
+
+        if (txn != null) {
+            txn.commitTransaction();
+        }
+    }
+}
diff --git a/test/com/sleepycat/collections/test/IterDeadlockTest.java b/test/com/sleepycat/collections/test/IterDeadlockTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..1d4c54c8b377ee1a4b8c97548262e491da788c51
--- /dev/null
+++ b/test/com/sleepycat/collections/test/IterDeadlockTest.java
@@ -0,0 +1,230 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: IterDeadlockTest.java,v 1.10.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.collections.test;
+
+import java.util.Iterator;
+import java.util.ListIterator;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DeadlockException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.bind.ByteArrayBinding;
+import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+import com.sleepycat.collections.StoredIterator;
+import com.sleepycat.collections.StoredSortedMap;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * Tests the fix for [#10516], where the StoredIterator constructor was not
+ * closing the cursor when an exception occurred. For example, a deadlock
+ * exception might occur if the constructor was unable to move the cursor to
+ * the first element.
+ * @author Mark Hayes
+ */
+public class IterDeadlockTest extends TestCase {
+
+    private static final byte[] ONE = { 1 };
+
+    public static void main(String[] args)
+        throws Exception {
+
+        junit.framework.TestResult tr =
+            junit.textui.TestRunner.run(suite());
+        if (tr.errorCount() > 0 ||
+            tr.failureCount() > 0) {
+            System.exit(1);
+        } else {
+            System.exit(0);
+        }
+    }
+
+    public static Test suite()
+        throws Exception {
+
+        TestSuite suite = new TestSuite(IterDeadlockTest.class);
+        return suite;
+    }
+
+    private Environment env;
+    private Database store1;
+    private Database store2;
+    private StoredSortedMap map1;
+    private StoredSortedMap map2;
+    private ByteArrayBinding binding = new ByteArrayBinding();
+
+    public IterDeadlockTest(String name) {
+
+        super(name);
+    }
+
+    public void setUp()
+        throws Exception {
+
+        env = TestEnv.TXN.open("IterDeadlockTest");
+        store1 = openDb("store1.db");
+        store2 = openDb("store2.db");
+        map1 = new StoredSortedMap(store1, binding, binding, true);
+        map2 = new StoredSortedMap(store2, binding, binding, true);
+    }
+
+    public void tearDown() {
+
+        if (store1 != null) {
+            try {
+                store1.close();
+            } catch (Exception e) {
+                System.out.println("Ignored exception during tearDown: " + e);
+            }
+        }
+        if (store2 != null) {
+            try {
+                store2.close();
+            } catch (Exception e) {
+                System.out.println("Ignored exception during tearDown: " + e);
+            }
+        }
+        if (env != null) {
+            try {
+                env.close();
+            } catch (Exception e) {
+                System.out.println("Ignored exception during tearDown: " + e);
+            }
+        }
+        /* Allow GC of DB objects in the test case. */
+        env = null;
+        store1 = null;
+        store2 = null;
+        map1 = null;
+        map2 = null;
+    }
+
+    private Database openDb(String file)
+        throws Exception {
+
+        DatabaseConfig config = new DatabaseConfig();
+        DbCompat.setTypeBtree(config);
+        config.setTransactional(true);
+        config.setAllowCreate(true);
+
+        return DbCompat.testOpenDatabase(env, null, file, null, config);
+    }
+
+    public void testIterDeadlock()
+        throws Exception {
+
+        final Object parent = new Object();
+        final Object child1 = new Object();
+        final Object child2 = new Object();
+        final TransactionRunner runner = new TransactionRunner(env);
+        runner.setMaxRetries(0);
+
+        /* Write a record in each db. */
+        runner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                assertNull(map1.put(ONE, ONE));
+                assertNull(map2.put(ONE, ONE));
+            }
+        });
+
+        /*
+         * A thread to open iterator 1, then wait to be notified, then open
+         * iterator 2.
+         */
+        final Thread thread1 = new Thread(new Runnable() {
+            public void run() {
+                try {
+                    runner.run(new TransactionWorker() {
+                        public void doWork() throws Exception {
+                            synchronized (child1) {
+                                ListIterator i1 =
+                                    (ListIterator) map1.values().iterator();
+                                i1.next();
+                                i1.set(ONE); /* Write lock. */
+                                StoredIterator.close(i1);
+                                synchronized (parent) { parent.notify(); }
+                                child1.wait();
+                                Iterator i2 = map2.values().iterator();
+                                assertTrue(i2.hasNext());
+                                StoredIterator.close(i2);
+                            }
+                        }
+                    });
+                } catch (DeadlockException expected) {
+                } catch (Exception e) {
+                    e.printStackTrace();
+                    fail(e.toString());
+                }
+            }
+        });
+
+        /*
+         * A thread to open iterator 2, then wait to be notified, then open
+         * iterator 1.
+         */
+        final Thread thread2 = new Thread(new Runnable() {
+            public void run() {
+                try {
+                    runner.run(new TransactionWorker() {
+                        public void doWork() throws Exception {
+                            synchronized (child2) {
+                                ListIterator i2 =
+                                    (ListIterator) map2.values().iterator();
+                                i2.next();
+                                i2.set(ONE); /* Write lock. */
+                                StoredIterator.close(i2);
+                                synchronized (parent) { parent.notify(); }
+                                child2.wait();
+                                Iterator i1 = map1.values().iterator();
+                                assertTrue(i1.hasNext());
+                                StoredIterator.close(i1);
+                            }
+                        }
+                    });
+                } catch (DeadlockException expected) {
+                } catch (Exception e) {
+                    e.printStackTrace();
+                    fail(e.toString());
+                }
+            }
+        });
+
+        /*
+         * Open iterator 1 in thread 1, then iterator 2 in thread 2, then let
+         * the threads run to open the other iterators and cause a deadlock.
+         */
+        synchronized (parent) {
+            thread1.start();
+            parent.wait();
+            thread2.start();
+            parent.wait();
+            synchronized (child1) { child1.notify(); }
+            synchronized (child2) { child2.notify(); }
+            thread1.join();
+            thread2.join();
+        }
+
+        /*
+         * Before the fix for [#10516] we would get an exception indicating
+         * that cursors were not closed, when closing the stores below.
+         */
+        store1.close();
+        store1 = null;
+        store2.close();
+        store2 = null;
+        env.close();
+        env = null;
+    }
+}
diff --git a/test/com/sleepycat/collections/test/JoinTest.java b/test/com/sleepycat/collections/test/JoinTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..c324d6945e88541dff6b5d24937307f85fa713b3
--- /dev/null
+++ b/test/com/sleepycat/collections/test/JoinTest.java
@@ -0,0 +1,233 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: JoinTest.java,v 1.33 2008/02/05 23:28:25 mark Exp $
+ */
+
+package com.sleepycat.collections.test;
+
+import java.util.Map;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.bind.serial.test.MarshalledObject;
+import com.sleepycat.collections.StoredCollection;
+import com.sleepycat.collections.StoredContainer;
+import com.sleepycat.collections.StoredIterator;
+import com.sleepycat.collections.StoredMap;
+import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+import com.sleepycat.collections.TupleSerialFactory;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.SecondaryConfig;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * @author Mark Hayes
+ */
+public class JoinTest extends TestCase
+    implements TransactionWorker {
+
+    private static final String MATCH_DATA = "d4"; // matches both keys = "yes"
+    private static final String MATCH_KEY  = "k4"; // matches both keys = "yes"
+    private static final String[] VALUES = {"yes", "yes"};
+
+    public static void main(String[] args)
+        throws Exception {
+
+        junit.framework.TestResult tr =
+            junit.textui.TestRunner.run(suite());
+        if (tr.errorCount() > 0 ||
+            tr.failureCount() > 0) {
+            System.exit(1);
+        } else {
+            System.exit(0);
+        }
+    }
+
+    public static Test suite()
+        throws Exception {
+
+        return new JoinTest();
+    }
+
+    private Environment env;
+    private TransactionRunner runner;
+    private StoredClassCatalog catalog;
+    private TupleSerialFactory factory;
+    private Database store;
+    private SecondaryDatabase index1;
+    private SecondaryDatabase index2;
+    private StoredMap storeMap;
+    private StoredMap indexMap1;
+    private StoredMap indexMap2;
+
+    public JoinTest() {
+
+        super("JoinTest");
+    }
+
+    public void setUp()
+        throws Exception {
+
+        SharedTestUtils.printTestName(getName());
+        env = TestEnv.TXN.open(getName());
+        runner = new TransactionRunner(env);
+        createDatabase();
+    }
+
+    public void tearDown() {
+
+        try {
+            if (index1 != null) {
+                index1.close();
+            }
+            if (index2 != null) {
+                index2.close();
+            }
+            if (store != null) {
+                store.close();
+            }
+            if (catalog != null) {
+                catalog.close();
+            }
+            if (env != null) {
+                env.close();
+            }
+        } catch (Exception e) {
+            System.out.println("Ignored exception during tearDown: " + e);
+        } finally {
+            /* Ensure that GC can cleanup. */
+            index1 = null;
+            index2 = null;
+            store = null;
+            catalog = null;
+            env = null;
+            runner = null;
+            factory = null;
+            storeMap = null;
+            indexMap1 = null;
+            indexMap2 = null;
+        }
+    }
+
+    public void runTest()
+        throws Exception {
+
+        runner.run(this);
+    }
+
+    public void doWork()
+        throws Exception {
+
+        createViews();
+        writeAndRead();
+    }
+
+    private void createDatabase()
+        throws Exception {
+
+        catalog = new StoredClassCatalog(openDb("catalog.db"));
+        factory = new TupleSerialFactory(catalog);
+        assertSame(catalog, factory.getCatalog());
+
+        store = openDb("store.db");
+        index1 = openSecondaryDb(store, "index1.db", "1");
+        index2 = openSecondaryDb(store, "index2.db", "2");
+    }
+
+    private Database openDb(String file)
+        throws Exception {
+
+        DatabaseConfig config = new DatabaseConfig();
+        DbCompat.setTypeBtree(config);
+        config.setTransactional(true);
+        config.setAllowCreate(true);
+
+        return DbCompat.testOpenDatabase(env, null, file, null, config);
+    }
+
+    private SecondaryDatabase openSecondaryDb(Database primary,
+                                              String file,
+                                              String keyName)
+        throws Exception {
+
+        SecondaryConfig secConfig = new SecondaryConfig();
+        DbCompat.setTypeBtree(secConfig);
+        secConfig.setTransactional(true);
+        secConfig.setAllowCreate(true);
+        DbCompat.setSortedDuplicates(secConfig, true);
+        secConfig.setKeyCreator(factory.getKeyCreator(MarshalledObject.class,
+                                                      keyName));
+
+        return DbCompat.testOpenSecondaryDatabase
+            (env, null, file, null, primary, secConfig);
+    }
+
+    private void createViews()
+        throws Exception {
+
+        storeMap = factory.newMap(store, String.class,
+                                         MarshalledObject.class, true);
+        indexMap1 = factory.newMap(index1, String.class,
+                                           MarshalledObject.class, true);
+        indexMap2 = factory.newMap(index2, String.class,
+                                           MarshalledObject.class, true);
+    }
+
+    private void writeAndRead()
+        throws Exception {
+
+        // write records: Data, PrimaryKey, IndexKey1, IndexKey2
+        assertNull(storeMap.put(null,
+            new MarshalledObject("d1", "k1", "no",  "yes")));
+        assertNull(storeMap.put(null,
+            new MarshalledObject("d2", "k2", "no",  "no")));
+        assertNull(storeMap.put(null,
+            new MarshalledObject("d3", "k3", "no",  "yes")));
+        assertNull(storeMap.put(null,
+            new MarshalledObject("d4", "k4", "yes", "yes")));
+        assertNull(storeMap.put(null,
+            new MarshalledObject("d5", "k5", "yes", "no")));
+
+        Object o;
+        Map.Entry e;
+
+        // join values with index maps
+        o = doJoin((StoredCollection) storeMap.values());
+        assertEquals(MATCH_DATA, ((MarshalledObject) o).getData());
+
+        // join keySet with index maps
+        o = doJoin((StoredCollection) storeMap.keySet());
+        assertEquals(MATCH_KEY, o);
+
+        // join entrySet with index maps
+        o = doJoin((StoredCollection) storeMap.entrySet());
+        e = (Map.Entry) o;
+        assertEquals(MATCH_KEY, e.getKey());
+        assertEquals(MATCH_DATA, ((MarshalledObject) e.getValue()).getData());
+    }
+
+    private Object doJoin(StoredCollection coll) {
+
+        StoredContainer[] indices = { indexMap1, indexMap2 };
+        StoredIterator i = coll.join(indices, VALUES, null);
+        try {
+            assertTrue(i.hasNext());
+            Object result = i.next();
+            assertNotNull(result);
+            assertFalse(i.hasNext());
+            return result;
+        } finally { i.close(); }
+    }
+}
+
diff --git a/test/com/sleepycat/collections/test/NullTransactionRunner.java b/test/com/sleepycat/collections/test/NullTransactionRunner.java
new file mode 100644
index 0000000000000000000000000000000000000000..e915349bec8e8671677eff3e0d55727325031b74
--- /dev/null
+++ b/test/com/sleepycat/collections/test/NullTransactionRunner.java
@@ -0,0 +1,32 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: NullTransactionRunner.java,v 1.18.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.collections.test;
+
+import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+import com.sleepycat.je.Environment;
+import com.sleepycat.util.ExceptionUnwrapper;
+
+class NullTransactionRunner extends TransactionRunner {
+
+    NullTransactionRunner(Environment env) {
+
+        super(env);
+    }
+
+    public void run(TransactionWorker worker)
+        throws Exception {
+
+        try {
+            worker.doWork();
+        } catch (Exception e) {
+            throw ExceptionUnwrapper.unwrap(e);
+        }
+    }
+}
diff --git a/test/com/sleepycat/collections/test/SecondaryDeadlockTest.java b/test/com/sleepycat/collections/test/SecondaryDeadlockTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..66e4a3ed5a1b125f526db873de50a269bce2bae8
--- /dev/null
+++ b/test/com/sleepycat/collections/test/SecondaryDeadlockTest.java
@@ -0,0 +1,208 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SecondaryDeadlockTest.java,v 1.13.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.collections.test;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DeadlockException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.TransactionConfig;
+import com.sleepycat.collections.StoredSortedMap;
+import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+import com.sleepycat.util.ExceptionUnwrapper;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * Tests whether secondary access can cause a self-deadlock when reading via a
+ * secondary because the collections API secondary implementation in DB 4.2
+ * opens two cursors.   Part of the problem in [#10516] was because the
+ * secondary get() was not done in a txn.  This problem should not occur in DB
+ * 4.3 and JE -- an ordinary deadlock occurs instead and is detected.
+ *
+ * @author Mark Hayes
+ */
+public class SecondaryDeadlockTest extends TestCase {
+
+    private static final Long N_ONE = new Long(1);
+    private static final Long N_101 = new Long(101);
+    private static final int N_ITERS = 20;
+    private static final int MAX_RETRIES = 1000;
+
+    public static void main(String[] args)
+        throws Exception {
+
+        junit.framework.TestResult tr =
+            junit.textui.TestRunner.run(suite());
+        if (tr.errorCount() > 0 ||
+            tr.failureCount() > 0) {
+            System.exit(1);
+        } else {
+            System.exit(0);
+        }
+    }
+
+    public static Test suite()
+        throws Exception {
+
+        TestSuite suite = new TestSuite(SecondaryDeadlockTest.class);
+        return suite;
+    }
+
+    private Environment env;
+    private Database store;
+    private Database index;
+    private StoredSortedMap storeMap;
+    private StoredSortedMap indexMap;
+    private Exception exception;
+
+    public SecondaryDeadlockTest(String name) {
+
+        super(name);
+    }
+
+    public void setUp()
+        throws Exception {
+
+        env = TestEnv.TXN.open("SecondaryDeadlockTest");
+        store = TestStore.BTREE_UNIQ.open(env, "store.db");
+        index = TestStore.BTREE_UNIQ.openIndex(store, "index.db");
+        storeMap = new StoredSortedMap(store,
+                                       TestStore.BTREE_UNIQ.getKeyBinding(),
+                                       TestStore.BTREE_UNIQ.getValueBinding(),
+                                       true);
+        indexMap = new StoredSortedMap(index,
+                                       TestStore.BTREE_UNIQ.getKeyBinding(),
+                                       TestStore.BTREE_UNIQ.getValueBinding(),
+                                       true);
+    }
+
+    public void tearDown() {
+
+        if (index != null) {
+            try {
+                index.close();
+            } catch (Exception e) {
+                System.out.println("Ignored exception during tearDown: " + e);
+            }
+        }
+        if (store != null) {
+            try {
+                store.close();
+            } catch (Exception e) {
+                System.out.println("Ignored exception during tearDown: " + e);
+            }
+        }
+        if (env != null) {
+            try {
+                env.close();
+            } catch (Exception e) {
+                System.out.println("Ignored exception during tearDown: " + e);
+            }
+        }
+        /* Allow GC of DB objects in the test case. */
+        env = null;
+        store = null;
+        index = null;
+        storeMap = null;
+        indexMap = null;
+    }
+
+    public void testSecondaryDeadlock()
+        throws Exception {
+
+        final TransactionRunner runner = new TransactionRunner(env);
+        runner.setMaxRetries(MAX_RETRIES);
+
+        /*
+         * This test deadlocks a lot at degree 3 serialization.  In debugging
+         * this I discovered it was not due to phantom prevention per se but
+         * just to a change in timing.
+         */
+        TransactionConfig txnConfig = new TransactionConfig();
+        runner.setTransactionConfig(txnConfig);
+
+        /*
+         * A thread to do put() and delete() via the primary, which will lock
+         * the primary first then the secondary.  Uses transactions.
+         */
+        final Thread thread1 = new Thread(new Runnable() {
+            public void run() {
+                try {
+                    /* The TransactionRunner performs retries. */
+                    for (int i = 0; i < N_ITERS; i +=1 ) {
+                        runner.run(new TransactionWorker() {
+                            public void doWork() throws Exception {
+                                assertEquals(null, storeMap.put(N_ONE, N_101));
+                            }
+                        });
+                        runner.run(new TransactionWorker() {
+                            public void doWork() throws Exception {
+                                assertEquals(N_101, storeMap.remove(N_ONE));
+                            }
+                        });
+                    }
+                } catch (Exception e) {
+                    e.printStackTrace();
+                    exception = e;
+                }
+            }
+        }, "ThreadOne");
+
+        /*
+         * A thread to get() via the secondary, which will lock the secondary
+         * first then the primary.  Does not use a transaction.
+         */
+        final Thread thread2 = new Thread(new Runnable() {
+            public void run() {
+                try {
+                    for (int i = 0; i < N_ITERS; i +=1 ) {
+                        for (int j = 0; j < MAX_RETRIES; j += 1) {
+                            try {
+                                Object value = indexMap.get(N_ONE);
+                                assertTrue(value == null ||
+                                           N_101.equals(value));
+                                break;
+                            } catch (Exception e) {
+                                e = ExceptionUnwrapper.unwrap(e);
+                                if (e instanceof DeadlockException) {
+                                    continue; /* Retry on deadlock. */
+                                } else {
+                                    throw e;
+                                }
+                            }
+                        }
+                    }
+                } catch (Exception e) {
+                    e.printStackTrace();
+                    exception = e;
+                }
+            }
+        }, "ThreadTwo");
+
+        thread1.start();
+        thread2.start();
+        thread1.join();
+        thread2.join();
+
+        index.close();
+        index = null;
+        store.close();
+        store = null;
+        env.close();
+        env = null;
+
+        if (exception != null) {
+            fail(exception.toString());
+        }
+    }
+}
diff --git a/test/com/sleepycat/collections/test/TestDataBinding.java b/test/com/sleepycat/collections/test/TestDataBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..432cd7b2197981fb7b5513583880677763a482c7
--- /dev/null
+++ b/test/com/sleepycat/collections/test/TestDataBinding.java
@@ -0,0 +1,33 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TestDataBinding.java,v 1.26.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.collections.test;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * @author Mark Hayes
+ */
+class TestDataBinding implements EntryBinding {
+
+    public Object entryToObject(DatabaseEntry data) {
+
+        if (data.getSize() != 1) {
+            throw new IllegalStateException("size=" + data.getSize());
+        }
+        byte val = data.getData()[data.getOffset()];
+        return new Long(val);
+    }
+
+    public void objectToEntry(Object object, DatabaseEntry data) {
+
+        byte val = ((Number) object).byteValue();
+        data.setData(new byte[] { val }, 0, 1);
+    }
+}
diff --git a/test/com/sleepycat/collections/test/TestEntity.java b/test/com/sleepycat/collections/test/TestEntity.java
new file mode 100644
index 0000000000000000000000000000000000000000..96c34b239f6d4576a371e56f6cbf411d1319f004
--- /dev/null
+++ b/test/com/sleepycat/collections/test/TestEntity.java
@@ -0,0 +1,44 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TestEntity.java,v 1.15.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.collections.test;
+
+/**
+ * @author Mark Hayes
+ */
+class TestEntity {
+
+    int key;
+    int value;
+
+    TestEntity(int key, int value) {
+
+        this.key = key;
+        this.value = value;
+    }
+
+    public boolean equals(Object o) {
+
+        try {
+            TestEntity e = (TestEntity) o;
+            return e.key == key && e.value == value;
+        } catch (ClassCastException e) {
+            return false;
+        }
+    }
+
+    public int hashCode() {
+
+        return key;
+    }
+
+    public String toString() {
+
+        return "[key " + key + " value " + value + ']';
+    }
+}
diff --git a/test/com/sleepycat/collections/test/TestEntityBinding.java b/test/com/sleepycat/collections/test/TestEntityBinding.java
new file mode 100644
index 0000000000000000000000000000000000000000..ebe31fd8eb0b967950c86e92c8051a183935be2a
--- /dev/null
+++ b/test/com/sleepycat/collections/test/TestEntityBinding.java
@@ -0,0 +1,63 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TestEntityBinding.java,v 1.25.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.collections.test;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.bind.RecordNumberBinding;
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * @author Mark Hayes
+ */
+class TestEntityBinding implements EntityBinding {
+
+    private boolean isRecNum;
+
+    TestEntityBinding(boolean isRecNum) {
+
+        this.isRecNum = isRecNum;
+    }
+
+    public Object entryToObject(DatabaseEntry key, DatabaseEntry value) {
+
+        byte keyByte;
+        if (isRecNum) {
+            if (key.getSize() != 4) {
+                throw new IllegalStateException();
+            }
+            keyByte = (byte) RecordNumberBinding.entryToRecordNumber(key);
+        } else {
+            if (key.getSize() != 1) {
+                throw new IllegalStateException();
+            }
+            keyByte = key.getData()[key.getOffset()];
+        }
+        if (value.getSize() != 1) {
+            throw new IllegalStateException();
+        }
+        byte valByte = value.getData()[value.getOffset()];
+        return new TestEntity(keyByte, valByte);
+    }
+
+    public void objectToKey(Object object, DatabaseEntry key) {
+
+        byte val = (byte) ((TestEntity) object).key;
+        if (isRecNum) {
+            RecordNumberBinding.recordNumberToEntry(val, key);
+        } else {
+            key.setData(new byte[] { val }, 0, 1);
+        }
+    }
+
+    public void objectToData(Object object, DatabaseEntry value) {
+
+        byte val = (byte) ((TestEntity) object).value;
+        value.setData(new byte[] { val }, 0, 1);
+    }
+}
diff --git a/test/com/sleepycat/collections/test/TestKeyAssigner.java b/test/com/sleepycat/collections/test/TestKeyAssigner.java
new file mode 100644
index 0000000000000000000000000000000000000000..1dcfeec0b54f7aae88fbf321784306132c6eeed5
--- /dev/null
+++ b/test/com/sleepycat/collections/test/TestKeyAssigner.java
@@ -0,0 +1,44 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TestKeyAssigner.java,v 1.22.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.collections.test;
+
+import com.sleepycat.bind.RecordNumberBinding;
+import com.sleepycat.collections.PrimaryKeyAssigner;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+
+/**
+ * @author Mark Hayes
+ */
+class TestKeyAssigner implements PrimaryKeyAssigner {
+
+    private byte next = 1;
+    private boolean isRecNum;
+
+    TestKeyAssigner(boolean isRecNum) {
+
+        this.isRecNum = isRecNum;
+    }
+
+    public void assignKey(DatabaseEntry keyData)
+        throws DatabaseException {
+
+        if (isRecNum) {
+            RecordNumberBinding.recordNumberToEntry(next, keyData);
+        } else {
+            keyData.setData(new byte[] { next }, 0, 1);
+        }
+        next += 1;
+    }
+
+    void reset() {
+
+        next = 1;
+    }
+}
diff --git a/test/com/sleepycat/collections/test/TestKeyCreator.java b/test/com/sleepycat/collections/test/TestKeyCreator.java
new file mode 100644
index 0000000000000000000000000000000000000000..7f15a043fdd7a8597fe3229e9b928f52b0ca68d6
--- /dev/null
+++ b/test/com/sleepycat/collections/test/TestKeyCreator.java
@@ -0,0 +1,59 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TestKeyCreator.java,v 1.26.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.collections.test;
+
+import com.sleepycat.bind.RecordNumberBinding;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.je.SecondaryKeyCreator;
+
+/**
+ * Unused until secondaries are available.
+ * @author Mark Hayes
+ */
+class TestKeyCreator implements SecondaryKeyCreator {
+
+    private boolean isRecNum;
+
+    TestKeyCreator(boolean isRecNum) {
+
+        this.isRecNum = isRecNum;
+    }
+
+    public boolean createSecondaryKey(SecondaryDatabase db,
+                                      DatabaseEntry primaryKeyData,
+                                      DatabaseEntry valueData,
+                                      DatabaseEntry indexKeyData)
+        throws DatabaseException {
+
+        if (valueData.getSize() == 0) {
+            return false;
+        }
+        if (valueData.getSize() != 1) {
+            throw new IllegalStateException();
+        }
+        byte val = valueData.getData()[valueData.getOffset()];
+        if (val == 0) {
+            return false; // fixed-len pad value
+        }
+        val -= 100;
+        if (isRecNum) {
+            RecordNumberBinding.recordNumberToEntry(val, indexKeyData);
+        } else {
+            indexKeyData.setData(new byte[] { val }, 0, 1);
+        }
+        return true;
+    }
+
+    public void clearIndexKey(DatabaseEntry valueData) {
+
+        throw new RuntimeException("not supported");
+    }
+}
diff --git a/test/com/sleepycat/collections/test/TestSR15721.java b/test/com/sleepycat/collections/test/TestSR15721.java
new file mode 100644
index 0000000000000000000000000000000000000000..a2ee4057260c3df4dfbae7b2e43691c40890b85e
--- /dev/null
+++ b/test/com/sleepycat/collections/test/TestSR15721.java
@@ -0,0 +1,121 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TestSR15721.java,v 1.8.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.collections.test;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.collections.CurrentTransaction;
+import com.sleepycat.je.Environment;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * @author Chao Huang
+ */
+public class TestSR15721 extends TestCase {
+
+    /**
+     * Runs a command line collection test.
+     * @see #usage
+     */
+    public static void main(String[] args)
+        throws Exception {
+
+        if (args.length == 1 &&
+            (args[0].equals("-h") || args[0].equals("-help"))) {
+            usage();
+        } else {
+            junit.framework.TestResult tr =
+                junit.textui.TestRunner.run(suite());
+            if (tr.errorCount() > 0 ||
+                tr.failureCount() > 0) {
+                System.exit(1);
+            } else {
+                System.exit(0);
+            }
+        }
+    }
+
+    private static void usage() {
+
+        System.out.println(
+              "Usage: java com.sleepycat.collections.test.TestSR15721"
+            + " [-h | -help]\n");
+        System.exit(2);
+    }
+
+    public static Test suite()
+        throws Exception {
+
+        TestSuite suite = new TestSuite(TestSR15721.class);
+        return suite;
+    }
+
+    private Environment env;
+    private CurrentTransaction currentTxn;
+
+    public void setUp()
+        throws Exception {
+
+        env = TestEnv.TXN.open("TestSR15721");
+        currentTxn = CurrentTransaction.getInstance(env);
+    }
+
+    public void tearDown() {
+        try {
+            if (env != null) {
+                env.close();
+            }
+        } catch (Exception e) {
+            System.out.println("Ignored exception during tearDown: " + e);
+        } finally {
+            /* Ensure that GC can cleanup. */
+            env = null;
+            currentTxn = null;
+        }
+    }
+
+    /**
+     * Tests that the CurrentTransaction instance doesn't indeed allow GC to
+     * reclaim while attached environment is open. [#15721]
+     */
+    public void testSR15721Fix()
+        throws Exception {
+
+        int hash = currentTxn.hashCode();
+        int hash2 = -1;
+
+        currentTxn = CurrentTransaction.getInstance(env);
+        hash2 = currentTxn.hashCode();
+        assertTrue(hash == hash2);
+
+        currentTxn.beginTransaction(null);
+        currentTxn = null;
+        hash2 = -1;
+
+        for (int i = 0; i < 10; i += 1) {
+            byte[] x = null;
+            try {
+                 x = new byte[Integer.MAX_VALUE - 1];
+                 fail();
+            } catch (OutOfMemoryError expected) {
+            }
+            assertNull(x);
+
+            System.gc();
+        }
+
+        currentTxn = CurrentTransaction.getInstance(env);
+        hash2 = currentTxn.hashCode();
+        currentTxn.commitTransaction();
+
+        assertTrue(hash == hash2);
+    }
+}
diff --git a/test/com/sleepycat/collections/test/TestStore.java b/test/com/sleepycat/collections/test/TestStore.java
new file mode 100644
index 0000000000000000000000000000000000000000..aea64c19604ce9d93b3f4b5086997ea68f3d58ae
--- /dev/null
+++ b/test/com/sleepycat/collections/test/TestStore.java
@@ -0,0 +1,280 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TestStore.java,v 1.44.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.collections.test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.RecordNumberBinding;
+import com.sleepycat.collections.CurrentTransaction;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.SecondaryConfig;
+
+/**
+ * @author Mark Hayes
+ */
+class TestStore {
+
+    static final TestKeyCreator BYTE_EXTRACTOR = new TestKeyCreator(false);
+    static final TestKeyCreator RECNO_EXTRACTOR = new TestKeyCreator(true);
+    static final EntryBinding VALUE_BINDING = new TestDataBinding();
+    static final EntryBinding BYTE_KEY_BINDING = VALUE_BINDING;
+    static final EntryBinding RECNO_KEY_BINDING = new RecordNumberBinding();
+    static final EntityBinding BYTE_ENTITY_BINDING =
+            new TestEntityBinding(false);
+    static final EntityBinding RECNO_ENTITY_BINDING =
+            new TestEntityBinding(true);
+    static final TestKeyAssigner BYTE_KEY_ASSIGNER =
+            new TestKeyAssigner(false);
+    static final TestKeyAssigner RECNO_KEY_ASSIGNER =
+            new TestKeyAssigner(true);
+
+    static final TestStore BTREE_UNIQ;
+    static final TestStore BTREE_DUP;
+    static final TestStore BTREE_DUPSORT;
+    static final TestStore BTREE_RECNUM;
+    static final TestStore HASH_UNIQ;
+    static final TestStore HASH_DUP;
+    static final TestStore HASH_DUPSORT;
+    static final TestStore QUEUE;
+    static final TestStore RECNO;
+    static final TestStore RECNO_RENUM;
+
+    static final TestStore[] ALL;
+    static {
+        List list = new ArrayList();
+        SecondaryConfig config;
+
+        config = new SecondaryConfig();
+        DbCompat.setTypeBtree(config);
+        BTREE_UNIQ = new TestStore("btree-uniq", config);
+        BTREE_UNIQ.indexOf = BTREE_UNIQ;
+        list.add(BTREE_UNIQ);
+
+        if (DbCompat.INSERTION_ORDERED_DUPLICATES) {
+            config = new SecondaryConfig();
+            DbCompat.setTypeBtree(config);
+            DbCompat.setUnsortedDuplicates(config, true);
+            BTREE_DUP = new TestStore("btree-dup", config);
+            BTREE_DUP.indexOf = null; // indexes must use sorted dups
+            list.add(BTREE_DUP);
+        } else {
+            BTREE_DUP = null;
+        }
+
+        config = new SecondaryConfig();
+        DbCompat.setTypeBtree(config);
+        DbCompat.setSortedDuplicates(config, true);
+        BTREE_DUPSORT = new TestStore("btree-dupsort", config);
+        BTREE_DUPSORT.indexOf = BTREE_UNIQ;
+        list.add(BTREE_DUPSORT);
+
+        if (DbCompat.BTREE_RECNUM_METHOD) {
+            config = new SecondaryConfig();
+            DbCompat.setTypeBtree(config);
+            DbCompat.setBtreeRecordNumbers(config, true);
+            BTREE_RECNUM = new TestStore("btree-recnum", config);
+            BTREE_RECNUM.indexOf = BTREE_RECNUM;
+            list.add(BTREE_RECNUM);
+        } else {
+            BTREE_RECNUM = null;
+        }
+
+        if (DbCompat.HASH_METHOD) {
+            config = new SecondaryConfig();
+            DbCompat.setTypeHash(config);
+            HASH_UNIQ = new TestStore("hash-uniq", config);
+            HASH_UNIQ.indexOf = HASH_UNIQ;
+            list.add(HASH_UNIQ);
+
+            if (DbCompat.INSERTION_ORDERED_DUPLICATES) {
+                config = new SecondaryConfig();
+                DbCompat.setTypeHash(config);
+                DbCompat.setUnsortedDuplicates(config, true);
+                HASH_DUP = new TestStore("hash-dup", config);
+                HASH_DUP.indexOf = null; // indexes must use sorted dups
+                list.add(HASH_DUP);
+            } else {
+                HASH_DUP = null;
+            }
+
+            config = new SecondaryConfig();
+            DbCompat.setTypeHash(config);
+            DbCompat.setSortedDuplicates(config, true);
+            HASH_DUPSORT = new TestStore("hash-dupsort", config);
+            HASH_DUPSORT.indexOf = HASH_UNIQ;
+            list.add(HASH_DUPSORT);
+        } else {
+            HASH_UNIQ = null;
+            HASH_DUP = null;
+            HASH_DUPSORT = null;
+        }
+
+        if (DbCompat.QUEUE_METHOD) {
+            config = new SecondaryConfig();
+            DbCompat.setTypeQueue(config);
+            QUEUE = new TestStore("queue", config);
+            QUEUE.indexOf = QUEUE;
+            list.add(QUEUE);
+        } else {
+            QUEUE = null;
+        }
+
+        if (DbCompat.RECNO_METHOD) {
+            config = new SecondaryConfig();
+            DbCompat.setTypeRecno(config);
+            RECNO = new TestStore("recno", config);
+            RECNO.indexOf = RECNO;
+            list.add(RECNO);
+
+            config = new SecondaryConfig();
+            DbCompat.setTypeRecno(config);
+            DbCompat.setRenumbering(config, true);
+            RECNO_RENUM = new TestStore("recno-renum", config);
+            RECNO_RENUM.indexOf = null; // indexes must have stable keys
+            list.add(RECNO_RENUM);
+        } else {
+            RECNO = null;
+            RECNO_RENUM = null;
+        }
+
+        ALL = new TestStore[list.size()];
+        list.toArray(ALL);
+    }
+
+    private String name;
+    private SecondaryConfig config;
+    private TestStore indexOf;
+    private boolean isRecNumFormat;
+
+    private TestStore(String name, SecondaryConfig config) {
+
+        this.name = name;
+        this.config = config;
+
+        isRecNumFormat = isQueueOrRecno() ||
+                            (DbCompat.isTypeBtree(config) &&
+                             DbCompat.getBtreeRecordNumbers(config));
+    }
+
+    EntryBinding getValueBinding() {
+
+        return VALUE_BINDING;
+    }
+
+    EntryBinding getKeyBinding() {
+
+        return isRecNumFormat ? RECNO_KEY_BINDING : BYTE_KEY_BINDING;
+    }
+
+    EntityBinding getEntityBinding() {
+
+        return isRecNumFormat ? RECNO_ENTITY_BINDING : BYTE_ENTITY_BINDING;
+    }
+
+    TestKeyAssigner getKeyAssigner() {
+
+        if (isQueueOrRecno()) {
+            return null;
+        } else {
+            if (isRecNumFormat) {
+                return RECNO_KEY_ASSIGNER;
+            } else {
+                return BYTE_KEY_ASSIGNER;
+            }
+        }
+    }
+
+    String getName() {
+
+        return name;
+    }
+
+    boolean isOrdered() {
+
+        return !DbCompat.isTypeHash(config);
+    }
+
+    boolean isQueueOrRecno() {
+
+        return DbCompat.isTypeQueue(config) || DbCompat.isTypeRecno(config);
+    }
+
+    boolean areKeyRangesAllowed() {
+        return isOrdered() && !isQueueOrRecno();
+    }
+
+    boolean areDuplicatesAllowed() {
+
+        return DbCompat.getSortedDuplicates(config) ||
+               DbCompat.getUnsortedDuplicates(config);
+    }
+
+    boolean hasRecNumAccess() {
+
+        return isRecNumFormat;
+    }
+
+    boolean areKeysRenumbered() {
+
+        return hasRecNumAccess() &&
+                (DbCompat.isTypeBtree(config) ||
+                 DbCompat.getRenumbering(config));
+    }
+
+    TestStore getIndexOf() {
+
+        return DbCompat.SECONDARIES ? indexOf : null;
+    }
+
+    Database open(Environment env, String fileName)
+        throws IOException, DatabaseException {
+
+        int fixedLen = (isQueueOrRecno() ? 1 : 0);
+        return openDb(env, fileName, fixedLen, null);
+    }
+
+    Database openIndex(Database primary, String fileName)
+        throws IOException, DatabaseException {
+
+        int fixedLen = (isQueueOrRecno() ? 4 : 0);
+        config.setKeyCreator(isRecNumFormat ? RECNO_EXTRACTOR
+                                            : BYTE_EXTRACTOR);
+        Environment env = primary.getEnvironment();
+        return openDb(env, fileName, fixedLen, primary);
+    }
+
+    private Database openDb(Environment env, String fileName, int fixedLen,
+                            Database primary)
+        throws IOException, DatabaseException {
+
+        if (fixedLen > 0) {
+            DbCompat.setRecordLength(config, fixedLen);
+            DbCompat.setRecordPad(config, 0);
+        } else {
+            DbCompat.setRecordLength(config, 0);
+        }
+        config.setAllowCreate(true);
+        DbCompat.setReadUncommitted(config, true);
+        config.setTransactional(CurrentTransaction.getInstance(env) != null);
+        if (primary != null) {
+            return DbCompat.testOpenSecondaryDatabase
+                (env, null, fileName, null, primary, config);
+        } else {
+            return DbCompat.testOpenDatabase
+                (env, null, fileName, null, config);
+        }
+    }
+}
diff --git a/test/com/sleepycat/collections/test/TransactionTest.java b/test/com/sleepycat/collections/test/TransactionTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..c855290c70bda5af1fce61b3f4692d65c61b15d1
--- /dev/null
+++ b/test/com/sleepycat/collections/test/TransactionTest.java
@@ -0,0 +1,816 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TransactionTest.java,v 1.53.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.collections.test;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.List;
+import java.util.SortedSet;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.collections.CurrentTransaction;
+import com.sleepycat.collections.StoredCollections;
+import com.sleepycat.collections.StoredContainer;
+import com.sleepycat.collections.StoredIterator;
+import com.sleepycat.collections.StoredList;
+import com.sleepycat.collections.StoredSortedMap;
+import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.CursorConfig;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.TransactionConfig;
+import com.sleepycat.util.RuntimeExceptionWrapper;
+import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * @author Mark Hayes
+ */
+public class TransactionTest extends TestCase {
+
+    private static final Long ONE = new Long(1);
+    private static final Long TWO = new Long(2);
+    private static final Long THREE = new Long(3);
+
+    /**
+     * Runs a command line collection test.
+     * @see #usage
+     */
+    public static void main(String[] args)
+        throws Exception {
+
+        if (args.length == 1 &&
+            (args[0].equals("-h") || args[0].equals("-help"))) {
+            usage();
+        } else {
+            junit.framework.TestResult tr =
+                junit.textui.TestRunner.run(suite());
+            if (tr.errorCount() > 0 ||
+                tr.failureCount() > 0) {
+                System.exit(1);
+            } else {
+                System.exit(0);
+            }
+        }
+    }
+
+    private static void usage() {
+
+        System.out.println(
+              "Usage: java com.sleepycat.collections.test.TransactionTest"
+            + " [-h | -help]\n");
+        System.exit(2);
+    }
+
+    public static Test suite()
+        throws Exception {
+
+        TestSuite suite = new TestSuite(TransactionTest.class);
+        return suite;
+    }
+
+    private Environment env;
+    private CurrentTransaction currentTxn;
+    private Database store;
+    private StoredSortedMap map;
+    private TestStore testStore = TestStore.BTREE_UNIQ;
+
+    public TransactionTest(String name) {
+
+        super(name);
+    }
+
+    public void setUp()
+        throws Exception {
+
+        SharedTestUtils.printTestName(SharedTestUtils.qualifiedTestName(this));
+        env = TestEnv.TXN.open("TransactionTests");
+        currentTxn = CurrentTransaction.getInstance(env);
+        store = testStore.open(env, dbName(0));
+        map = new StoredSortedMap(store, testStore.getKeyBinding(),
+                                  testStore.getValueBinding(), true);
+    }
+
+    public void tearDown() {
+
+        try {
+            if (store != null) {
+                store.close();
+            }
+            if (env != null) {
+                env.close();
+            }
+        } catch (Exception e) {
+            System.out.println("Ignored exception during tearDown: " + e);
+        } finally {
+            /* Ensure that GC can cleanup. */
+            store = null;
+            env = null;
+            currentTxn = null;
+            map = null;
+            testStore = null;
+        }
+    }
+
+    private String dbName(int i) {
+
+        return "txn-test-" + getName() + '-' + i;
+    }
+
+    public void testGetters()
+        throws Exception {
+
+        assertNotNull(env);
+        assertNotNull(currentTxn);
+        assertNull(currentTxn.getTransaction());
+
+        currentTxn.beginTransaction(null);
+        assertNotNull(currentTxn.getTransaction());
+        currentTxn.commitTransaction();
+        assertNull(currentTxn.getTransaction());
+
+        currentTxn.beginTransaction(null);
+        assertNotNull(currentTxn.getTransaction());
+        currentTxn.abortTransaction();
+        assertNull(currentTxn.getTransaction());
+
+        // read-uncommitted property should be inherited
+
+        assertTrue(!isReadUncommitted(map));
+        assertTrue(!isReadUncommitted(map.values()));
+        assertTrue(!isReadUncommitted(map.keySet()));
+        assertTrue(!isReadUncommitted(map.entrySet()));
+
+        StoredSortedMap other = (StoredSortedMap)
+            StoredCollections.configuredMap
+                (map, CursorConfig.READ_UNCOMMITTED);
+        assertTrue(isReadUncommitted(other));
+        assertTrue(isReadUncommitted(other.values()));
+        assertTrue(isReadUncommitted(other.keySet()));
+        assertTrue(isReadUncommitted(other.entrySet()));
+        assertTrue(!isReadUncommitted(map));
+        assertTrue(!isReadUncommitted(map.values()));
+        assertTrue(!isReadUncommitted(map.keySet()));
+        assertTrue(!isReadUncommitted(map.entrySet()));
+
+        // read-committed property should be inherited
+
+        assertTrue(!isReadCommitted(map));
+        assertTrue(!isReadCommitted(map.values()));
+        assertTrue(!isReadCommitted(map.keySet()));
+        assertTrue(!isReadCommitted(map.entrySet()));
+
+        other = (StoredSortedMap)
+            StoredCollections.configuredMap
+                (map, CursorConfig.READ_COMMITTED);
+        assertTrue(isReadCommitted(other));
+        assertTrue(isReadCommitted(other.values()));
+        assertTrue(isReadCommitted(other.keySet()));
+        assertTrue(isReadCommitted(other.entrySet()));
+        assertTrue(!isReadCommitted(map));
+        assertTrue(!isReadCommitted(map.values()));
+        assertTrue(!isReadCommitted(map.keySet()));
+        assertTrue(!isReadCommitted(map.entrySet()));
+    }
+
+    public void testTransactional()
+        throws Exception {
+
+        // is transactional because DB_AUTO_COMMIT was passed to
+        // Database.open()
+        //
+        assertTrue(map.isTransactional());
+        store.close();
+        store = null;
+
+        // is not transactional
+        //
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        DbCompat.setTypeBtree(dbConfig);
+        dbConfig.setAllowCreate(true);
+        Database db = DbCompat.testOpenDatabase
+            (env, null, dbName(1), null, dbConfig);
+        map = new StoredSortedMap(db, testStore.getKeyBinding(),
+                                      testStore.getValueBinding(), true);
+        assertTrue(!map.isTransactional());
+        map.put(ONE, ONE);
+        readCheck(map, ONE, ONE);
+        db.close();
+
+        // is transactional
+        //
+        dbConfig.setTransactional(true);
+        currentTxn.beginTransaction(null);
+        db = DbCompat.testOpenDatabase
+            (env, currentTxn.getTransaction(), dbName(2), null, dbConfig);
+        currentTxn.commitTransaction();
+        map = new StoredSortedMap(db, testStore.getKeyBinding(),
+                                      testStore.getValueBinding(), true);
+        assertTrue(map.isTransactional());
+        currentTxn.beginTransaction(null);
+        map.put(ONE, ONE);
+        readCheck(map, ONE, ONE);
+        currentTxn.commitTransaction();
+        db.close();
+    }
+
+    public void testExceptions()
+        throws Exception {
+
+        try {
+            currentTxn.commitTransaction();
+            fail();
+        } catch (IllegalStateException expected) {}
+
+        try {
+            currentTxn.abortTransaction();
+            fail();
+        } catch (IllegalStateException expected) {}
+    }
+
+    public void testNested()
+        throws Exception {
+
+        if (!DbCompat.NESTED_TRANSACTIONS) {
+            return;
+        }
+        assertNull(currentTxn.getTransaction());
+
+        Transaction txn1 = currentTxn.beginTransaction(null);
+        assertNotNull(txn1);
+        assertTrue(txn1 == currentTxn.getTransaction());
+
+        assertNull(map.get(ONE));
+        assertNull(map.put(ONE, ONE));
+        assertEquals(ONE, map.get(ONE));
+
+        Transaction txn2 = currentTxn.beginTransaction(null);
+        assertNotNull(txn2);
+        assertTrue(txn2 == currentTxn.getTransaction());
+        assertTrue(txn1 != txn2);
+
+        assertNull(map.put(TWO, TWO));
+        assertEquals(TWO, map.get(TWO));
+
+        Transaction txn3 = currentTxn.beginTransaction(null);
+        assertNotNull(txn3);
+        assertTrue(txn3 == currentTxn.getTransaction());
+        assertTrue(txn1 != txn2);
+        assertTrue(txn1 != txn3);
+        assertTrue(txn2 != txn3);
+
+        assertNull(map.put(THREE, THREE));
+        assertEquals(THREE, map.get(THREE));
+
+        Transaction txn = currentTxn.abortTransaction();
+        assertTrue(txn == txn2);
+        assertTrue(txn == currentTxn.getTransaction());
+        assertNull(map.get(THREE));
+        assertEquals(TWO, map.get(TWO));
+
+        txn3 = currentTxn.beginTransaction(null);
+        assertNotNull(txn3);
+        assertTrue(txn3 == currentTxn.getTransaction());
+        assertTrue(txn1 != txn2);
+        assertTrue(txn1 != txn3);
+        assertTrue(txn2 != txn3);
+
+        assertNull(map.put(THREE, THREE));
+        assertEquals(THREE, map.get(THREE));
+
+        txn = currentTxn.commitTransaction();
+        assertTrue(txn == txn2);
+        assertTrue(txn == currentTxn.getTransaction());
+        assertEquals(THREE, map.get(THREE));
+        assertEquals(TWO, map.get(TWO));
+
+        txn = currentTxn.commitTransaction();
+        assertTrue(txn == txn1);
+        assertTrue(txn == currentTxn.getTransaction());
+        assertEquals(THREE, map.get(THREE));
+        assertEquals(TWO, map.get(TWO));
+        assertEquals(ONE, map.get(ONE));
+
+        txn = currentTxn.commitTransaction();
+        assertNull(txn);
+        assertNull(currentTxn.getTransaction());
+        assertEquals(THREE, map.get(THREE));
+        assertEquals(TWO, map.get(TWO));
+        assertEquals(ONE, map.get(ONE));
+    }
+
+    public void testRunnerCommit()
+        throws Exception {
+
+        commitTest(false);
+    }
+
+    public void testExplicitCommit()
+        throws Exception {
+
+        commitTest(true);
+    }
+
+    private void commitTest(final boolean explicit)
+        throws Exception {
+
+        final TransactionRunner runner = new TransactionRunner(env);
+        runner.setAllowNestedTransactions(DbCompat.NESTED_TRANSACTIONS);
+
+        assertNull(currentTxn.getTransaction());
+
+        runner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                final Transaction txn1 = currentTxn.getTransaction();
+                assertNotNull(txn1);
+                assertNull(map.put(ONE, ONE));
+                assertEquals(ONE, map.get(ONE));
+
+                runner.run(new TransactionWorker() {
+                    public void doWork() throws Exception {
+                        final Transaction txn2 = currentTxn.getTransaction();
+                        assertNotNull(txn2);
+                        if (DbCompat.NESTED_TRANSACTIONS) {
+                            assertTrue(txn1 != txn2);
+                        } else {
+                            assertTrue(txn1 == txn2);
+                        }
+                        assertNull(map.put(TWO, TWO));
+                        assertEquals(TWO, map.get(TWO));
+                        assertEquals(ONE, map.get(ONE));
+                        if (DbCompat.NESTED_TRANSACTIONS && explicit) {
+                            currentTxn.commitTransaction();
+                        }
+                    }
+                });
+
+                Transaction txn3 = currentTxn.getTransaction();
+                assertSame(txn1, txn3);
+
+                assertEquals(TWO, map.get(TWO));
+                assertEquals(ONE, map.get(ONE));
+            }
+        });
+
+        assertNull(currentTxn.getTransaction());
+    }
+
+    public void testRunnerAbort()
+        throws Exception {
+
+        abortTest(false);
+    }
+
+    public void testExplicitAbort()
+        throws Exception {
+
+        abortTest(true);
+    }
+
+    private void abortTest(final boolean explicit)
+        throws Exception {
+
+        final TransactionRunner runner = new TransactionRunner(env);
+        runner.setAllowNestedTransactions(DbCompat.NESTED_TRANSACTIONS);
+
+        assertNull(currentTxn.getTransaction());
+
+        runner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                final Transaction txn1 = currentTxn.getTransaction();
+                assertNotNull(txn1);
+                assertNull(map.put(ONE, ONE));
+                assertEquals(ONE, map.get(ONE));
+
+                if (DbCompat.NESTED_TRANSACTIONS) {
+                    try {
+                        runner.run(new TransactionWorker() {
+                            public void doWork() throws Exception {
+                                final Transaction txn2 =
+                                        currentTxn.getTransaction();
+                                assertNotNull(txn2);
+                                assertTrue(txn1 != txn2);
+                                assertNull(map.put(TWO, TWO));
+                                assertEquals(TWO, map.get(TWO));
+                                if (explicit) {
+                                    currentTxn.abortTransaction();
+                                } else {
+                                    throw new IllegalArgumentException(
+                                                                "test-abort");
+                                }
+                            }
+                        });
+                        assertTrue(explicit);
+                    } catch (IllegalArgumentException e) {
+                        assertTrue(!explicit);
+                        assertEquals("test-abort", e.getMessage());
+                    }
+                }
+
+                Transaction txn3 = currentTxn.getTransaction();
+                assertSame(txn1, txn3);
+
+                assertEquals(ONE, map.get(ONE));
+                assertNull(map.get(TWO));
+            }
+        });
+
+        assertNull(currentTxn.getTransaction());
+    }
+
+    public void testReadCommittedCollection()
+        throws Exception {
+
+        StoredSortedMap degree2Map = (StoredSortedMap)
+            StoredCollections.configuredSortedMap
+                (map, CursorConfig.READ_COMMITTED);
+
+        // original map is not read-committed
+        assertTrue(!isReadCommitted(map));
+
+        // all read-committed containers are read-uncommitted
+        assertTrue(isReadCommitted(degree2Map));
+        assertTrue(isReadCommitted
+            (StoredCollections.configuredMap
+                (map, CursorConfig.READ_COMMITTED)));
+        assertTrue(isReadCommitted
+            (StoredCollections.configuredCollection
+                (map.values(), CursorConfig.READ_COMMITTED)));
+        assertTrue(isReadCommitted
+            (StoredCollections.configuredSet
+                (map.keySet(), CursorConfig.READ_COMMITTED)));
+        assertTrue(isReadCommitted
+            (StoredCollections.configuredSortedSet
+                ((SortedSet) map.keySet(),
+                 CursorConfig.READ_COMMITTED)));
+
+        if (DbCompat.RECNO_METHOD) {
+            // create a list just so we can call configuredList()
+            Database listStore = TestStore.RECNO_RENUM.open(env, null);
+            List list = new StoredList(listStore, TestStore.VALUE_BINDING,
+                                       true);
+            assertTrue(isReadCommitted
+                (StoredCollections.configuredList
+                    (list, CursorConfig.READ_COMMITTED)));
+            listStore.close();
+        }
+
+        map.put(ONE, ONE);
+        doReadCommitted(degree2Map, null);
+    }
+
+    private static boolean isReadCommitted(Object container) {
+        StoredContainer storedContainer = (StoredContainer) container;
+        /* We can't use getReadCommitted until is is added to DB core. */
+        return storedContainer.getCursorConfig() != null &&
+               storedContainer.getCursorConfig().getReadCommitted();
+    }
+
+    public void testReadCommittedTransaction()
+        throws Exception {
+
+        TransactionConfig config = new TransactionConfig();
+        config.setReadCommitted(true);
+        doReadCommitted(map, config);
+    }
+
+    private void doReadCommitted(final StoredSortedMap degree2Map,
+                                 TransactionConfig txnConfig)
+        throws Exception {
+
+        map.put(ONE, ONE);
+        TransactionRunner runner = new TransactionRunner(env);
+        runner.setTransactionConfig(txnConfig);
+        assertNull(currentTxn.getTransaction());
+        runner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                assertNotNull(currentTxn.getTransaction());
+
+                /* Do a read-committed get(), the lock is not retained. */
+                assertEquals(ONE, degree2Map.get(ONE));
+
+                /*
+                 * If we were not using read-committed, the following write of
+                 * key ONE with an auto-commit transaction would self-deadlock
+                 * since two transactions in the same thread would be
+                 * attempting to lock the same key, one for write and one for
+                 * read.  This test passes if we do not deadlock.
+                 */
+                DatabaseEntry key = new DatabaseEntry();
+                DatabaseEntry value = new DatabaseEntry();
+                testStore.getKeyBinding().objectToEntry(ONE, key);
+                testStore.getValueBinding().objectToEntry(TWO, value);
+                store.put(null, key, value);
+            }
+        });
+        assertNull(currentTxn.getTransaction());
+    }
+
+    public void testReadUncommittedCollection()
+        throws Exception {
+
+        StoredSortedMap dirtyMap = (StoredSortedMap)
+            StoredCollections.configuredSortedMap
+                (map, CursorConfig.READ_UNCOMMITTED);
+
+        // original map is not read-uncommitted
+        assertTrue(!isReadUncommitted(map));
+
+        // all read-uncommitted containers are read-uncommitted
+        assertTrue(isReadUncommitted(dirtyMap));
+        assertTrue(isReadUncommitted
+            (StoredCollections.configuredMap
+                (map, CursorConfig.READ_UNCOMMITTED)));
+        assertTrue(isReadUncommitted
+            (StoredCollections.configuredCollection
+                (map.values(), CursorConfig.READ_UNCOMMITTED)));
+        assertTrue(isReadUncommitted
+            (StoredCollections.configuredSet
+                (map.keySet(), CursorConfig.READ_UNCOMMITTED)));
+        assertTrue(isReadUncommitted
+            (StoredCollections.configuredSortedSet
+                ((SortedSet) map.keySet(), CursorConfig.READ_UNCOMMITTED)));
+
+        if (DbCompat.RECNO_METHOD) {
+            // create a list just so we can call configuredList()
+            Database listStore = TestStore.RECNO_RENUM.open(env, null);
+            List list = new StoredList(listStore, TestStore.VALUE_BINDING,
+                                       true);
+            assertTrue(isReadUncommitted
+                (StoredCollections.configuredList
+                    (list, CursorConfig.READ_UNCOMMITTED)));
+            listStore.close();
+        }
+
+        doReadUncommitted(dirtyMap);
+    }
+
+    private static boolean isReadUncommitted(Object container) {
+        StoredContainer storedContainer = (StoredContainer) container;
+        return storedContainer.getCursorConfig() != null &&
+               storedContainer.getCursorConfig().getReadUncommitted();
+    }
+
+    public void testReadUncommittedTransaction()
+        throws Exception {
+
+        TransactionRunner runner = new TransactionRunner(env);
+        TransactionConfig config = new TransactionConfig();
+        config.setReadUncommitted(true);
+        runner.setTransactionConfig(config);
+        assertNull(currentTxn.getTransaction());
+        runner.run(new TransactionWorker() {
+            public void doWork() throws Exception {
+                assertNotNull(currentTxn.getTransaction());
+                doReadUncommitted(map);
+            }
+        });
+        assertNull(currentTxn.getTransaction());
+    }
+
+    /**
+     * Tests that the CurrentTransaction static WeakHashMap does indeed allow
+     * GC to reclaim tine environment when it is closed.  At one point this was
+     * not working because the value object in the map has a reference to the
+     * environment.  This was fixed by wrapping the Environment in a
+     * WeakReference.  [#15444]
+     *
+     * This test only succeeds intermittently, probably due to its reliance
+     * on the GC call.
+     */
+    public void testCurrentTransactionGC()
+        throws Exception {
+
+        /*
+         * This test can have indeterminate results because it depends on
+         * a finalize count, so it's not part of the default run.
+         */
+        if (!SharedTestUtils.runLongTests()) {
+            return;
+        }
+
+        final StringBuffer finalizedFlag = new StringBuffer();
+
+        class MyEnv extends Environment {
+
+            MyEnv(File home, EnvironmentConfig config)
+                throws IOException, DatabaseException {
+
+                super(home, config);
+            }
+
+            protected void finalize() {
+                finalizedFlag.append('.');
+            }
+        }
+
+        MyEnv myEnv = new MyEnv(env.getHome(), env.getConfig());
+        CurrentTransaction myCurrTxn = CurrentTransaction.getInstance(myEnv);
+
+        store.close();
+        store = null;
+        map = null;
+
+        env.close();
+        env = null;
+
+        myEnv.close();
+        myEnv = null;
+
+        myCurrTxn = null;
+        currentTxn = null;
+
+        for (int i = 0; i < 10; i += 1) {
+            byte[] x = null;
+            try {
+                 x = new byte[Integer.MAX_VALUE - 1];
+            } catch (OutOfMemoryError expected) {
+            }
+            assertNull(x);
+            System.gc();
+        }
+
+        for (int i = 0; i < 10; i += 1) {
+            System.gc();
+        }
+
+        assertTrue(finalizedFlag.length() > 0);
+    }
+
+    private synchronized void doReadUncommitted(StoredSortedMap dirtyMap)
+        throws Exception {
+
+        // start thread one
+        ReadUncommittedThreadOne t1 = new ReadUncommittedThreadOne(env, this);
+        t1.start();
+        wait();
+
+        // put ONE
+        synchronized (t1) { t1.notify(); }
+        wait();
+        readCheck(dirtyMap, ONE, ONE);
+        assertTrue(!dirtyMap.isEmpty());
+
+        // abort ONE
+        synchronized (t1) { t1.notify(); }
+        t1.join();
+        readCheck(dirtyMap, ONE, null);
+        assertTrue(dirtyMap.isEmpty());
+
+        // start thread two
+        ReadUncommittedThreadTwo t2 = new ReadUncommittedThreadTwo(env, this);
+        t2.start();
+        wait();
+
+        // put TWO
+        synchronized (t2) { t2.notify(); }
+        wait();
+        readCheck(dirtyMap, TWO, TWO);
+        assertTrue(!dirtyMap.isEmpty());
+
+        // commit TWO
+        synchronized (t2) { t2.notify(); }
+        t2.join();
+        readCheck(dirtyMap, TWO, TWO);
+        assertTrue(!dirtyMap.isEmpty());
+    }
+
+    private static class ReadUncommittedThreadOne extends Thread {
+
+        private CurrentTransaction currentTxn;
+        private TransactionTest parent;
+        private StoredSortedMap map;
+
+        private ReadUncommittedThreadOne(Environment env,
+                                         TransactionTest parent) {
+
+            this.currentTxn = CurrentTransaction.getInstance(env);
+            this.parent = parent;
+            this.map = parent.map;
+        }
+
+        public synchronized void run() {
+
+            try {
+                assertNull(currentTxn.getTransaction());
+                assertNotNull(currentTxn.beginTransaction(null));
+                assertNotNull(currentTxn.getTransaction());
+                readCheck(map, ONE, null);
+                synchronized (parent) { parent.notify(); }
+                wait();
+
+                // put ONE
+                assertNull(map.put(ONE, ONE));
+                readCheck(map, ONE, ONE);
+                synchronized (parent) { parent.notify(); }
+                wait();
+
+                // abort ONE
+                assertNull(currentTxn.abortTransaction());
+                assertNull(currentTxn.getTransaction());
+            } catch (Exception e) {
+                throw new RuntimeExceptionWrapper(e);
+            }
+        }
+    }
+
+    private static class ReadUncommittedThreadTwo extends Thread {
+
+        private Environment env;
+        private CurrentTransaction currentTxn;
+        private TransactionTest parent;
+        private StoredSortedMap map;
+
+        private ReadUncommittedThreadTwo(Environment env,
+                                         TransactionTest parent) {
+
+            this.env = env;
+            this.currentTxn = CurrentTransaction.getInstance(env);
+            this.parent = parent;
+            this.map = parent.map;
+        }
+
+        public synchronized void run() {
+
+            try {
+                final TransactionRunner runner = new TransactionRunner(env);
+                final Object thread = this;
+                assertNull(currentTxn.getTransaction());
+
+                runner.run(new TransactionWorker() {
+                    public void doWork() throws Exception {
+                        assertNotNull(currentTxn.getTransaction());
+                        readCheck(map, TWO, null);
+                        synchronized (parent) { parent.notify(); }
+                        thread.wait();
+
+                        // put TWO
+                        assertNull(map.put(TWO, TWO));
+                        readCheck(map, TWO, TWO);
+                        synchronized (parent) { parent.notify(); }
+                        thread.wait();
+
+                        // commit TWO
+                    }
+                });
+                assertNull(currentTxn.getTransaction());
+            } catch (Exception e) {
+                throw new RuntimeExceptionWrapper(e);
+            }
+        }
+    }
+
+    private static void readCheck(StoredSortedMap checkMap, Object key,
+                                  Object expect) {
+        if (expect == null) {
+            assertNull(checkMap.get(key));
+            assertTrue(checkMap.tailMap(key).isEmpty());
+            assertTrue(!checkMap.tailMap(key).containsKey(key));
+            assertTrue(!checkMap.keySet().contains(key));
+            assertTrue(checkMap.duplicates(key).isEmpty());
+            Iterator i = checkMap.keySet().iterator();
+            try {
+                while (i.hasNext()) {
+                    assertTrue(!key.equals(i.next()));
+                }
+            } finally { StoredIterator.close(i); }
+        } else {
+            assertEquals(expect, checkMap.get(key));
+            assertEquals(expect, checkMap.tailMap(key).get(key));
+            assertTrue(!checkMap.tailMap(key).isEmpty());
+            assertTrue(checkMap.tailMap(key).containsKey(key));
+            assertTrue(checkMap.keySet().contains(key));
+            assertTrue(checkMap.values().contains(expect));
+            assertTrue(!checkMap.duplicates(key).isEmpty());
+            assertTrue(checkMap.duplicates(key).contains(expect));
+            Iterator i = checkMap.keySet().iterator();
+            try {
+                boolean found = false;
+                while (i.hasNext()) {
+                    if (expect.equals(i.next())) {
+                        found = true;
+                    }
+                }
+                assertTrue(found);
+            }
+            finally { StoredIterator.close(i); }
+        }
+    }
+}
diff --git a/test/com/sleepycat/collections/test/XACollectionTest.java b/test/com/sleepycat/collections/test/XACollectionTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..dfb590bbdb02b5516e39c584746dd2af0b1a2e84
--- /dev/null
+++ b/test/com/sleepycat/collections/test/XACollectionTest.java
@@ -0,0 +1,136 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: XACollectionTest.java,v 1.9.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.collections.test;
+
+import java.io.File;
+
+import javax.transaction.xa.XAResource;
+
+import junit.framework.Test;
+import junit.framework.TestSuite;
+
+import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DeadlockException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.XAEnvironment;
+import com.sleepycat.je.log.LogUtils.XidImpl;
+import com.sleepycat.util.ExceptionUnwrapper;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * Runs CollectionTest with special TestEnv and TransactionRunner objects to
+ * simulate XA transactions.
+ *
+ * <p>This test is currently JE-only and will not compile on DB core.</p>
+ */
+public class XACollectionTest extends CollectionTest {
+
+    public static Test suite()
+        throws Exception {
+
+        TestSuite suite = new TestSuite();
+
+        EnvironmentConfig config = new EnvironmentConfig();
+        config.setTransactional(true);
+        TestEnv xaTestEnv = new XATestEnv(config);
+
+        for (int j = 0; j < TestStore.ALL.length; j += 1) {
+            for (int k = 0; k < 2; k += 1) {
+                boolean entityBinding = (k != 0);
+
+                suite.addTest(new XACollectionTest
+                    (xaTestEnv, TestStore.ALL[j], entityBinding));
+            }
+        }
+
+        return suite;
+    }
+
+    public XACollectionTest(TestEnv testEnv,
+                            TestStore testStore,
+                            boolean isEntityBinding) {
+
+        super(testEnv, testStore, isEntityBinding, false /*isAutoCommit*/);
+    }
+
+    protected TransactionRunner newTransactionRunner(Environment env)
+        throws DatabaseException {
+
+        return new XARunner((XAEnvironment) env);
+    }
+
+    private static class XATestEnv extends TestEnv {
+
+        private XATestEnv(EnvironmentConfig config) {
+            super("XA", config);
+        }
+
+        protected Environment newEnvironment(File dir,
+                                             EnvironmentConfig config)
+            throws DatabaseException {
+
+            return new XAEnvironment(dir, config);
+        }
+    }
+
+    private static class XARunner extends TransactionRunner {
+
+        private XAEnvironment xaEnv;
+        private static int sequence;
+
+        private XARunner(XAEnvironment env) {
+            super(env);
+            xaEnv = env;
+        }
+
+        public void run(TransactionWorker worker)
+            throws Exception {
+
+            if (xaEnv.getThreadTransaction() == null) {
+                for (int i = 0;; i += 1) {
+                    sequence += 1;
+                    XidImpl xid = new XidImpl
+                        (1, String.valueOf(sequence).getBytes(), null);
+                    try {
+                        xaEnv.start(xid, 0);
+                        worker.doWork();
+			int ret = xaEnv.prepare(xid);
+			xaEnv.end(xid, 0);
+                        if (ret != XAResource.XA_RDONLY) {
+			    xaEnv.commit(xid, false);
+			}
+                        return;
+                    } catch (Exception e) {
+                        e = ExceptionUnwrapper.unwrap(e);
+                        try {
+                            xaEnv.end(xid, 0);
+                            xaEnv.rollback(xid);
+                        } catch (Exception e2) {
+                            e2.printStackTrace();
+                            throw e;
+                        }
+                        if (i >= getMaxRetries() ||
+                            !(e instanceof DeadlockException)) {
+                            throw e;
+                        }
+                    }
+                }
+            } else { /* Nested */
+                try {
+                    worker.doWork();
+                } catch (Exception e) {
+                    throw ExceptionUnwrapper.unwrap(e);
+                }
+            }
+        }
+    }
+}
diff --git a/test/com/sleepycat/collections/test/serial/CatalogCornerCaseTest.java b/test/com/sleepycat/collections/test/serial/CatalogCornerCaseTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..630442c99f4b8aed2f50f81e70dd99ea333a8cdc
--- /dev/null
+++ b/test/com/sleepycat/collections/test/serial/CatalogCornerCaseTest.java
@@ -0,0 +1,99 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: CatalogCornerCaseTest.java,v 1.9 2008/02/05 23:28:26 mark Exp $
+ */
+package com.sleepycat.collections.test.serial;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.Environment;
+import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * @author Mark Hayes
+ */
+public class CatalogCornerCaseTest extends TestCase {
+
+    public static void main(String[] args)
+        throws Exception {
+
+        junit.framework.TestResult tr =
+            junit.textui.TestRunner.run(suite());
+        if (tr.errorCount() > 0 ||
+            tr.failureCount() > 0) {
+            System.exit(1);
+        } else {
+            System.exit(0);
+        }
+    }
+
+    public static Test suite()
+        throws Exception {
+
+        return new TestSuite(CatalogCornerCaseTest.class);
+    }
+
+    private Environment env;
+
+    public CatalogCornerCaseTest(String name) {
+
+        super(name);
+    }
+
+    public void setUp()
+        throws Exception {
+
+        SharedTestUtils.printTestName(getName());
+        env = TestEnv.BDB.open(getName());
+    }
+
+    public void tearDown() {
+
+        try {
+            if (env != null) {
+                env.close();
+            }
+        } catch (Exception e) {
+            System.out.println("Ignored exception during tearDown: " + e);
+        } finally {
+            /* Ensure that GC can cleanup. */
+            env = null;
+        }
+    }
+
+    public void testReadOnlyEmptyCatalog()
+        throws Exception {
+
+        String file = "catalog.db";
+
+        /* Create an empty database. */
+        DatabaseConfig config = new DatabaseConfig();
+        config.setAllowCreate(true);
+        DbCompat.setTypeBtree(config);
+        Database db =
+            DbCompat.testOpenDatabase(env, null, file, null, config);
+        db.close();
+
+        /* Open the empty database read-only. */
+        config.setAllowCreate(false);
+        config.setReadOnly(true);
+        db = DbCompat.testOpenDatabase(env, null, file, null, config);
+
+        /* Expect exception when creating the catalog. */
+        try {
+            new StoredClassCatalog(db);
+            fail();
+        } catch (IllegalStateException e) { }
+        db.close();
+    }
+}
diff --git a/test/com/sleepycat/collections/test/serial/StoredClassCatalogTest.java b/test/com/sleepycat/collections/test/serial/StoredClassCatalogTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..d1b0b41f170ac0fd6ab0ee266b85626e3e25765c
--- /dev/null
+++ b/test/com/sleepycat/collections/test/serial/StoredClassCatalogTest.java
@@ -0,0 +1,178 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: StoredClassCatalogTest.java,v 1.38 2008/02/05 23:28:26 mark Exp $
+ */
+package com.sleepycat.collections.test.serial;
+
+import java.io.ObjectStreamClass;
+import java.util.Map;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.collections.StoredMap;
+import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.Environment;
+import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * Runs part two of the StoredClassCatalogTest.  This part is run with the
+ * new/updated version of TestSerial in the classpath.  It uses the
+ * environment and databases created by StoredClassCatalogTestInit.  It
+ * verifies that it can read objects serialized using the old class format,
+ * and that it can create new objects with the new class format.
+ *
+ * @author Mark Hayes
+ */
+public class StoredClassCatalogTest extends TestCase
+    implements TransactionWorker {
+
+    static final String CATALOG_FILE = "catalogtest-catalog.db";
+    static final String STORE_FILE = "catalogtest-store.db";
+
+    public static void main(String[] args)
+        throws Exception {
+
+        junit.framework.TestResult tr =
+            junit.textui.TestRunner.run(suite());
+        if (tr.errorCount() > 0 ||
+            tr.failureCount() > 0) {
+            System.exit(1);
+        } else {
+            System.exit(0);
+        }
+    }
+
+    public static Test suite()
+        throws Exception {
+
+        TestSuite suite = new TestSuite();
+        for (int i = 0; i < TestEnv.ALL.length; i += 1) {
+            suite.addTest(new StoredClassCatalogTest(TestEnv.ALL[i]));
+        }
+        return suite;
+    }
+
+    private TestEnv testEnv;
+    private Environment env;
+    private StoredClassCatalog catalog;
+    private StoredClassCatalog catalog2;
+    private Database store;
+    private Map map;
+    private TransactionRunner runner;
+
+    public StoredClassCatalogTest(TestEnv testEnv) {
+
+        super(makeTestName(testEnv));
+        this.testEnv = testEnv;
+    }
+
+    static String makeTestName(TestEnv testEnv) {
+        return "StoredClassCatalogTest-" + testEnv.getName();
+    }
+
+    public void setUp()
+        throws Exception {
+
+        SharedTestUtils.printTestName(getName());
+        env = testEnv.open(makeTestName(testEnv), false);
+        runner = new TransactionRunner(env);
+
+        catalog = new StoredClassCatalog(openDb(CATALOG_FILE, false));
+        catalog2 = new StoredClassCatalog(openDb("catalog2.db", true));
+
+        SerialBinding keyBinding = new SerialBinding(catalog,
+                                                  String.class);
+        SerialBinding valueBinding = new SerialBinding(catalog,
+                                                    TestSerial.class);
+        store = openDb(STORE_FILE, false);
+
+        map = new StoredMap(store, keyBinding, valueBinding, true);
+    }
+
+    private Database openDb(String file, boolean create)
+        throws Exception {
+
+        DatabaseConfig config = new DatabaseConfig();
+        DbCompat.setTypeBtree(config);
+        config.setTransactional(testEnv.isTxnMode());
+        config.setAllowCreate(create);
+
+        return DbCompat.testOpenDatabase(env, null, file, null, config);
+    }
+
+    public void tearDown() {
+
+        try {
+            if (catalog != null) {
+                catalog.close();
+                catalog.close(); // should have no effect
+            }
+            if (catalog2 != null) {
+                catalog2.close();
+            }
+            if (store != null) {
+                store.close();
+            }
+            if (env != null) {
+                env.close();
+            }
+        } catch (Exception e) {
+            System.err.println("Ignored exception during tearDown: ");
+            e.printStackTrace();
+        } finally {
+            /* Ensure that GC can cleanup. */
+            catalog = null;
+            catalog2 = null;
+            store = null;
+            env = null;
+            testEnv = null;
+            map = null;
+            runner = null;
+        }
+    }
+
+    public void runTest()
+        throws Exception {
+
+        runner.run(this);
+    }
+
+    public void doWork()
+        throws Exception {
+
+        TestSerial one = (TestSerial) map.get("one");
+        TestSerial two = (TestSerial) map.get("two");
+        assertNotNull(one);
+        assertNotNull(two);
+        assertEquals(one, two.getOther());
+        assertNull(one.getStringField());
+        assertNull(two.getStringField());
+
+        TestSerial three = new TestSerial(two);
+        assertNotNull(three.getStringField());
+        map.put("three", three);
+        three = (TestSerial) map.get("three");
+        assertEquals(two, three.getOther());
+
+        ObjectStreamClass desc = ObjectStreamClass.lookup(TestSerial.class);
+
+        assertNotNull(catalog.getClassID(desc));
+        assertNotNull(catalog.getClassID(desc));
+
+        // test with empty catalog
+        assertNotNull(catalog2.getClassID(desc));
+        assertNotNull(catalog2.getClassID(desc));
+    }
+}
diff --git a/test/com/sleepycat/collections/test/serial/StoredClassCatalogTestInit.java b/test/com/sleepycat/collections/test/serial/StoredClassCatalogTestInit.java
new file mode 100644
index 0000000000000000000000000000000000000000..fc95d5a55f09771dfb050e7645e42c4155e08f84
--- /dev/null
+++ b/test/com/sleepycat/collections/test/serial/StoredClassCatalogTestInit.java
@@ -0,0 +1,157 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: StoredClassCatalogTestInit.java,v 1.28 2008/02/05 23:28:26 mark Exp $
+ */
+package com.sleepycat.collections.test.serial;
+
+import java.util.Map;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.collections.StoredMap;
+import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.Environment;
+import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * Runs part one of the StoredClassCatalogTest.  This part is run with the
+ * old/original version of TestSerial in the classpath.  It creates a fresh
+ * environment and databases containing serialized versions of the old class.
+ * When StoredClassCatalogTest is run, it will read these objects from the
+ * database created here.
+ *
+ * @author Mark Hayes
+ */
+public class StoredClassCatalogTestInit extends TestCase
+    implements TransactionWorker {
+
+    static final String CATALOG_FILE = StoredClassCatalogTest.CATALOG_FILE;
+    static final String STORE_FILE = StoredClassCatalogTest.STORE_FILE;
+
+    public static void main(String[] args)
+        throws Exception {
+
+        junit.framework.TestResult tr =
+            junit.textui.TestRunner.run(suite());
+        if (tr.errorCount() > 0 ||
+            tr.failureCount() > 0) {
+            System.exit(1);
+        } else {
+            System.exit(0);
+        }
+    }
+
+    public static Test suite()
+        throws Exception {
+
+        TestSuite suite = new TestSuite();
+        for (int i = 0; i < TestEnv.ALL.length; i += 1) {
+            suite.addTest(new StoredClassCatalogTestInit(TestEnv.ALL[i]));
+        }
+        return suite;
+    }
+
+    private TestEnv testEnv;
+    private Environment env;
+    private StoredClassCatalog catalog;
+    private Database store;
+    private Map map;
+    private TransactionRunner runner;
+
+    public StoredClassCatalogTestInit(TestEnv testEnv) {
+
+        super("StoredClassCatalogTestInit-" + testEnv.getName());
+        this.testEnv = testEnv;
+    }
+
+    public void setUp()
+        throws Exception {
+
+        SharedTestUtils.printTestName(getName());
+        env = testEnv.open(StoredClassCatalogTest.makeTestName(testEnv));
+        runner = new TransactionRunner(env);
+
+        catalog = new StoredClassCatalog(openDb(CATALOG_FILE));
+
+        SerialBinding keyBinding = new SerialBinding(catalog, String.class);
+        SerialBinding valueBinding =
+	    new SerialBinding(catalog, TestSerial.class);
+        store = openDb(STORE_FILE);
+
+        map = new StoredMap(store, keyBinding, valueBinding, true);
+    }
+
+    private Database openDb(String file)
+        throws Exception {
+
+        DatabaseConfig config = new DatabaseConfig();
+        DbCompat.setTypeBtree(config);
+        config.setTransactional(testEnv.isTxnMode());
+        config.setAllowCreate(true);
+
+        return DbCompat.testOpenDatabase(env, null, file, null, config);
+    }
+
+    public void tearDown() {
+
+        try {
+            if (catalog != null) {
+                catalog.close();
+                catalog.close(); // should have no effect
+            }
+            if (store != null) {
+                store.close();
+            }
+            if (env != null) {
+                env.close();
+            }
+        } catch (Exception e) {
+            System.err.println("Ignored exception during tearDown: ");
+            e.printStackTrace();
+        } finally {
+            /* Ensure that GC can cleanup. */
+            catalog = null;
+            store = null;
+            env = null;
+            testEnv = null;
+            map = null;
+            runner = null;
+        }
+    }
+
+    public void runTest()
+        throws Exception {
+
+        runner.run(this);
+    }
+
+    public void doWork()
+        throws Exception {
+
+        TestSerial one = new TestSerial(null);
+        TestSerial two = new TestSerial(one);
+        assertNull("Likely the classpath contains the wrong version of the" +
+                   " TestSerial class, the 'original' version is required",
+                   one.getStringField());
+        assertNull(two.getStringField());
+        map.put("one", one);
+        map.put("two", two);
+        one = (TestSerial) map.get("one");
+        two = (TestSerial) map.get("two");
+        assertEquals(one, two.getOther());
+        assertNull(one.getStringField());
+        assertNull(two.getStringField());
+    }
+}
diff --git a/test/com/sleepycat/collections/test/serial/TestSerial.java b/test/com/sleepycat/collections/test/serial/TestSerial.java
new file mode 100644
index 0000000000000000000000000000000000000000..f74587d6145827306aa302b653cda717cbf827d6
--- /dev/null
+++ b/test/com/sleepycat/collections/test/serial/TestSerial.java
@@ -0,0 +1,70 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: TestSerial.java,v 1.19 2008/01/07 14:29:04 cwl Exp $
+ */
+package com.sleepycat.collections.test.serial;
+
+/**
+ * @see StoredClassCatalogTest
+ * @author Mark Hayes
+ */
+class TestSerial implements java.io.Serializable {
+
+    static final long serialVersionUID = -3738980000390384920L;
+
+    private int i = 123;
+    private TestSerial other;
+
+    // The following field 's' was added after this class was compiled and
+    // serialized instances were saved in resource files.  This allows testing
+    // that the original stored instances can be deserialized after changing
+    // the class.  The serialVersionUID is needed for this according to Java
+    // serialization rules, and was generated with the serialver tool.
+    //
+    private String s = "string";
+
+    TestSerial(TestSerial other) {
+
+        this.other = other;
+    }
+
+    TestSerial getOther() {
+
+        return other;
+    }
+
+    int getIntField() {
+
+        return i;
+    }
+
+    String getStringField() {
+
+        return s; // this returned null before field 's' was added.
+    }
+
+    public boolean equals(Object object) {
+
+        try {
+            TestSerial o = (TestSerial) object;
+            if ((o.other == null) ? (this.other != null)
+                                  : (!o.other.equals(this.other))) {
+                return false;
+            }
+            if (this.i != o.i) {
+                return false;
+            }
+            // the following test was not done before field 's' was added
+            if ((o.s == null) ? (this.s != null)
+                              : (!o.s.equals(this.s))) {
+                return false;
+            }
+            return true;
+        } catch (ClassCastException e) {
+            return false;
+        }
+    }
+}
diff --git a/test/com/sleepycat/collections/test/serial/TestSerial.java.original b/test/com/sleepycat/collections/test/serial/TestSerial.java.original
new file mode 100644
index 0000000000000000000000000000000000000000..f5512e6535613709b7b30f119d543a646ba9271d
--- /dev/null
+++ b/test/com/sleepycat/collections/test/serial/TestSerial.java.original
@@ -0,0 +1,72 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: TestSerial.java.original,v 1.15 2008/01/07 14:29:04 cwl Exp $
+ */
+package com.sleepycat.collections.test.serial;
+
+/**
+ * @see StoredClassCatalogTest
+ * @author Mark Hayes
+ */
+class TestSerial implements java.io.Serializable
+{
+    static final long serialVersionUID = -3738980000390384920L;
+
+    private int i = 123;
+    private TestSerial other;
+
+    // The following field 's' was added after this class was compiled and
+    // serialized instances were saved in resource files.  This allows testing
+    // that the original stored instances can be deserialized after changing
+    // the class.  The serialVersionUID is needed for this according to Java
+    // serialization rules, and was generated with the serialver tool.
+    //
+    //private String s = "string";
+
+    TestSerial(TestSerial other)
+    {
+        this.other = other;
+    }
+
+    TestSerial getOther()
+    {
+        return other;
+    }
+
+    int getIntField()
+    {
+        return i;
+    }
+
+    String getStringField()
+    {
+        return null; // this returned null before field 's' was added.
+    }
+
+    public boolean equals(Object object)
+    {
+        try
+        {
+            TestSerial o = (TestSerial) object;
+            if ((o.other == null) ? (this.other != null)
+                                  : (!o.other.equals(this.other)))
+                return false;
+            if (this.i != o.i)
+                return false;
+            // the following test was not done before field 's' was added
+            /*
+            if ((o.s == null) ? (this.s != null)
+                              : (!o.s.equals(this.s)))
+                return false;
+            */
+            return true;
+        }
+        catch (ClassCastException e)
+        {
+            return false;
+        }
+    }
+}
diff --git a/test/com/sleepycat/collections/test/serial/TupleSerialFactoryTest.java b/test/com/sleepycat/collections/test/serial/TupleSerialFactoryTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..a87844a630c0d84949871074907ed00e03c53705
--- /dev/null
+++ b/test/com/sleepycat/collections/test/serial/TupleSerialFactoryTest.java
@@ -0,0 +1,252 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: TupleSerialFactoryTest.java,v 1.42 2008/02/05 23:28:26 mark Exp $
+ */
+package com.sleepycat.collections.test.serial;
+
+import java.util.Map;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.bind.serial.test.MarshalledObject;
+import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+import com.sleepycat.collections.TupleSerialFactory;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.ForeignKeyDeleteAction;
+import com.sleepycat.je.SecondaryConfig;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * @author Mark Hayes
+ */
+public class TupleSerialFactoryTest extends TestCase
+    implements TransactionWorker {
+
+    public static void main(String[] args)
+        throws Exception {
+
+        junit.framework.TestResult tr =
+            junit.textui.TestRunner.run(suite());
+        if (tr.errorCount() > 0 ||
+            tr.failureCount() > 0) {
+            System.exit(1);
+        } else {
+            System.exit(0);
+        }
+    }
+
+    public static Test suite()
+        throws Exception {
+
+        TestSuite suite = new TestSuite();
+        for (int i = 0; i < TestEnv.ALL.length; i += 1) {
+            for (int sorted = 0; sorted < 2; sorted += 1) {
+                suite.addTest(new TupleSerialFactoryTest(TestEnv.ALL[i],
+                                                           sorted != 0));
+            }
+        }
+        return suite;
+    }
+
+    private TestEnv testEnv;
+    private Environment env;
+    private StoredClassCatalog catalog;
+    private TransactionRunner runner;
+    private TupleSerialFactory factory;
+    private Database store1;
+    private Database store2;
+    private SecondaryDatabase index1;
+    private SecondaryDatabase index2;
+    private boolean isSorted;
+    private Map storeMap1;
+    private Map storeMap2;
+    private Map indexMap1;
+    private Map indexMap2;
+
+    public TupleSerialFactoryTest(TestEnv testEnv, boolean isSorted) {
+
+        super(null);
+
+        this.testEnv = testEnv;
+        this.isSorted = isSorted;
+
+        String name = "TupleSerialFactoryTest-" + testEnv.getName();
+        name += isSorted ? "-sorted" : "-unsorted";
+        setName(name);
+    }
+
+    public void setUp()
+        throws Exception {
+
+        SharedTestUtils.printTestName(getName());
+        env = testEnv.open(getName());
+        runner = new TransactionRunner(env);
+
+        createDatabase();
+    }
+
+    public void tearDown() {
+
+        try {
+            if (index1 != null) {
+                index1.close();
+            }
+            if (index2 != null) {
+                index2.close();
+            }
+            if (store1 != null) {
+                store1.close();
+            }
+            if (store2 != null) {
+                store2.close();
+            }
+            if (catalog != null) {
+                catalog.close();
+            }
+            if (env != null) {
+                env.close();
+            }
+        } catch (Exception e) {
+            System.out.println("Ignored exception during tearDown: " + e);
+        } finally {
+            /* Ensure that GC can cleanup. */
+            index1 = null;
+            index2 = null;
+            store1 = null;
+            store2 = null;
+            catalog = null;
+            env = null;
+            testEnv = null;
+            runner = null;
+            factory = null;
+            storeMap1 = null;
+            storeMap2 = null;
+            indexMap1 = null;
+            indexMap2 = null;
+        }
+    }
+
+    public void runTest()
+        throws Exception {
+
+        runner.run(this);
+    }
+
+    public void doWork()
+        throws Exception {
+
+        createViews();
+        writeAndRead();
+    }
+
+    private void createDatabase()
+        throws Exception {
+
+        catalog = new StoredClassCatalog(openDb("catalog.db"));
+        factory = new TupleSerialFactory(catalog);
+        assertSame(catalog, factory.getCatalog());
+
+        store1 = openDb("store1.db");
+        store2 = openDb("store2.db");
+        index1 = openSecondaryDb(factory, "1", store1, "index1.db", null);
+        index2 = openSecondaryDb(factory, "2", store2, "index2.db", store1);
+    }
+
+    private Database openDb(String file)
+        throws Exception {
+
+        DatabaseConfig config = new DatabaseConfig();
+        config.setTransactional(testEnv.isTxnMode());
+        config.setAllowCreate(true);
+        DbCompat.setTypeBtree(config);
+
+        return DbCompat.testOpenDatabase(env, null, file, null, config);
+    }
+
+    private SecondaryDatabase openSecondaryDb(TupleSerialFactory factory,
+                                              String keyName,
+                                              Database primary,
+                                              String file,
+                                              Database foreignStore)
+        throws Exception {
+
+        SecondaryConfig secConfig = new SecondaryConfig();
+        secConfig.setTransactional(testEnv.isTxnMode());
+        secConfig.setAllowCreate(true);
+        DbCompat.setTypeBtree(secConfig);
+        secConfig.setKeyCreator(factory.getKeyCreator(MarshalledObject.class,
+                                                      keyName));
+        if (foreignStore != null) {
+            secConfig.setForeignKeyDatabase(foreignStore);
+            secConfig.setForeignKeyDeleteAction(
+                    ForeignKeyDeleteAction.CASCADE);
+        }
+
+        return DbCompat.testOpenSecondaryDatabase
+            (env, null, file, null, primary, secConfig);
+    }
+
+    private void createViews()
+        throws Exception {
+
+        if (isSorted) {
+            storeMap1 = factory.newSortedMap(store1, String.class,
+                                             MarshalledObject.class, true);
+            storeMap2 = factory.newSortedMap(store2, String.class,
+                                             MarshalledObject.class, true);
+            indexMap1 = factory.newSortedMap(index1, String.class,
+                                             MarshalledObject.class, true);
+            indexMap2 = factory.newSortedMap(index2, String.class,
+                                             MarshalledObject.class, true);
+        } else {
+            storeMap1 = factory.newMap(store1, String.class,
+                                       MarshalledObject.class, true);
+            storeMap2 = factory.newMap(store2, String.class,
+                                       MarshalledObject.class, true);
+            indexMap1 = factory.newMap(index1, String.class,
+                                       MarshalledObject.class, true);
+            indexMap2 = factory.newMap(index2, String.class,
+                                       MarshalledObject.class, true);
+        }
+    }
+
+    private void writeAndRead()
+        throws Exception {
+
+        MarshalledObject o1 = new MarshalledObject("data1", "pk1", "ik1", "");
+        assertNull(storeMap1.put(null, o1));
+
+        assertEquals(o1, storeMap1.get("pk1"));
+        assertEquals(o1, indexMap1.get("ik1"));
+
+        MarshalledObject o2 = new MarshalledObject("data2", "pk2", "", "pk1");
+        assertNull(storeMap2.put(null, o2));
+
+        assertEquals(o2, storeMap2.get("pk2"));
+        assertEquals(o2, indexMap2.get("pk1"));
+
+        /*
+         * store1 contains o1 with primary key "pk1" and index key "ik1"
+         * store2 contains o2 with primary key "pk2" and foreign key "pk1"
+         * which is the primary key of store1
+         */
+
+        storeMap1.remove("pk1");
+        assertNull(storeMap1.get("pk1"));
+        assertNull(indexMap1.get("ik1"));
+        assertNull(storeMap2.get("pk2"));
+        assertNull(indexMap2.get("pk1"));
+    }
+}
diff --git a/test/com/sleepycat/je/ApiTest.java b/test/com/sleepycat/je/ApiTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..9226a898ee3a553be148ce9fc37618379f7d7067
--- /dev/null
+++ b/test/com/sleepycat/je/ApiTest.java
@@ -0,0 +1,31 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ApiTest.java,v 1.19.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import junit.framework.TestCase;
+
+
+/**
+ * Test parameter handling for api methods.
+ */
+public class ApiTest extends TestCase {
+
+    public void testBasic()
+        throws Exception {
+
+        try {
+            new Environment(null, null);
+            fail("Should get exception");
+        } catch (NullPointerException e) {
+            // expected exception
+        } catch (Exception e) {
+            fail("Shouldn't get other exception");
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/CursorEdgeTest.java b/test/com/sleepycat/je/CursorEdgeTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..478bd9682f63b3fdb750ea916526ec0b3ba45e76
--- /dev/null
+++ b/test/com/sleepycat/je/CursorEdgeTest.java
@@ -0,0 +1,668 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: CursorEdgeTest.java,v 1.39.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.io.File;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.LockNotGrantedException;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.junit.JUnitThread;
+import com.sleepycat.je.latch.LatchSupport;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Test edge case in cursor traversals. In particular, look at duplicates and
+ * sets of keys interspersed with deletions.
+ */
+public class CursorEdgeTest extends TestCase {
+
+    private static final boolean DEBUG = false;
+    private Environment env;
+    private File envHome;
+    private boolean operationStarted;
+
+    public CursorEdgeTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+	throws Exception {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+
+        /*
+         * Create an environment w/transactions and a max node size of 6.
+         * Be sure to disable the compressor, we want some holes in the
+         * tree.
+         */
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setTransactional(true);
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6");
+        envConfig.setConfigParam(EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(),
+                                 "false");
+        envConfig.setAllowCreate(true);
+        env = new Environment(envHome, envConfig);
+    }
+
+    public void tearDown()
+	throws Exception {
+
+        try {
+            env.close();
+        } catch (Throwable e) {
+            System.out.println("Exception during tearDown");
+            e.printStackTrace();
+        }
+	env = null;
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+    }
+
+    /**
+     * Insert a number of duplicates, making sure that the duplicate tree
+     * has multiple bins. Make sure that we can skip over the duplicates and
+     * find the right value.
+     */
+    public void testSearchOnDuplicatesWithDeletions()
+	throws Throwable {
+
+        Database myDb = null;
+        Cursor cursor = null;
+	try {
+            /* Set up a db */
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setTransactional(true);
+            dbConfig.setSortedDuplicates(true);
+            dbConfig.setAllowCreate(true);
+            myDb = env.openDatabase(null, "foo", dbConfig);
+
+            /*
+             * Insert k1/d1, then a duplicate range of k2/d1 -> k2/d15, then
+             * k3/d1. Now delete the beginning part of the duplicate
+             * range, trying to get more than a whole bin's worth
+             * (k2/d1 -> k2/d7). Because the compressor is not
+             * enabled, there will be a hole in the k2 range. While
+             * we're at it, delete k2/d10 - k2/d13 too, make sure we
+             * can traverse a hole in the middle of the duplicate
+             * range.
+             */
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            key.setData(TestUtils.getTestArray(1));
+            data.setData(TestUtils.getTestArray(1));
+            myDb.put(null, key, data);          // k1/d1
+            key.setData(TestUtils.getTestArray(3));
+            myDb.put(null, key, data);          // k3/d1
+
+            /* insert k2 range */
+            key.setData(TestUtils.getTestArray(2));
+            for (int i = 1; i <= 15; i++) {
+                data.setData(TestUtils.getTestArray(i));
+                myDb.put(null, key, data);
+            }
+
+            /* Now delete k2/d1 -> k2/d7 */
+            Transaction txn =
+		env.beginTransaction(null, TransactionConfig.DEFAULT);
+            cursor = myDb.openCursor(txn, CursorConfig.DEFAULT);
+            assertEquals(OperationStatus.SUCCESS,
+			 cursor.getSearchKey(key, data, LockMode.DEFAULT));
+            for (int i = 0; i < 7; i ++) {
+                assertEquals(OperationStatus.SUCCESS, cursor.delete());
+                assertEquals(OperationStatus.SUCCESS,
+			     cursor.getNext(key, data, LockMode.DEFAULT));
+            }
+
+            /* Also delete k2/d10 - k2/d13 */
+            data.setData(TestUtils.getTestArray(10));
+            assertEquals(OperationStatus.SUCCESS,
+			 cursor.getSearchBoth(key, data, LockMode.DEFAULT));
+            for (int i = 0; i < 3; i ++) {
+                assertEquals(OperationStatus.SUCCESS, cursor.delete());
+                assertEquals(OperationStatus.SUCCESS,
+			     cursor.getNext(key, data, LockMode.DEFAULT));
+            }
+
+            /* Double check what's in the tree */
+            if (DEBUG) {
+                Cursor checkCursor = myDb.openCursor(txn,
+						     CursorConfig.DEFAULT);
+                while (checkCursor.getNext(key, data, LockMode.DEFAULT) ==
+		       OperationStatus.SUCCESS) {
+                    System.out.println("key=" +
+                                       TestUtils.getTestVal(key.getData()) +
+                                       " data=" +
+                                       TestUtils.getTestVal(data.getData()));
+                }
+                checkCursor.close();
+            }
+            cursor.close();
+            cursor = null;
+            txn.commit();
+
+            /*
+             * Now make sure we can find k2/d8
+             */
+            Cursor readCursor = myDb.openCursor(null, CursorConfig.DEFAULT);
+            key.setData(TestUtils.getTestArray(2));
+
+            /* Use key search */
+            assertEquals(OperationStatus.SUCCESS,
+			 readCursor.getSearchKey(key, data, LockMode.DEFAULT));
+            assertEquals(2, TestUtils.getTestVal(key.getData()));
+            assertEquals(8, TestUtils.getTestVal(data.getData()));
+
+            /* Use range search */
+            assertEquals(OperationStatus.SUCCESS,
+			 readCursor.getSearchKeyRange(key, data,
+						      LockMode.DEFAULT));
+            assertEquals(2, TestUtils.getTestVal(key.getData()));
+            assertEquals(8, TestUtils.getTestVal(data.getData()));
+
+            /* Use search both */
+            data.setData(TestUtils.getTestArray(8));
+            assertEquals(OperationStatus.SUCCESS,
+			 readCursor.getSearchBoth(key, data,
+						  LockMode.DEFAULT));
+            assertEquals(2, TestUtils.getTestVal(key.getData()));
+            assertEquals(8, TestUtils.getTestVal(data.getData()));
+
+            /* Use search both range, starting data at 8 */
+            data.setData(TestUtils.getTestArray(8));
+            assertEquals(OperationStatus.SUCCESS,
+			 readCursor.getSearchBothRange(key, data,
+						       LockMode.DEFAULT));
+            assertEquals(2, TestUtils.getTestVal(key.getData()));
+            assertEquals(8, TestUtils.getTestVal(data.getData()));
+
+            /* Use search both range, starting at 1 */
+            data.setData(TestUtils.getTestArray(1));
+            assertEquals(OperationStatus.SUCCESS,
+			 readCursor.getSearchBothRange(key, data,
+						       LockMode.DEFAULT));
+            assertEquals(2, TestUtils.getTestVal(key.getData()));
+            assertEquals(8, TestUtils.getTestVal(data.getData()));
+
+            /*
+             * Make sure we can find k2/d13 with a range search.
+             */
+
+            /*
+	     * Insert a set of duplicates, k5/d0 -> k5/d9, then delete
+             * all of them (but don't compress). Make sure no form of
+             * search every finds them.
+             */
+            key.setData(TestUtils.getTestArray(5));
+            for (int i = 0; i < 10; i++) {
+                data.setData(TestUtils.getTestArray(i));
+                myDb.put(null, key, data);
+            }
+            myDb.delete(null, key);  // delete all k5's
+
+            /* All searches on key 5 should fail */
+            assertFalse(readCursor.getSearchKey(key, data, LockMode.DEFAULT) ==
+			OperationStatus.SUCCESS);
+            assertFalse(readCursor.getSearchKeyRange(key, data,
+						     LockMode.DEFAULT) ==
+			OperationStatus.SUCCESS);
+            data.setData(TestUtils.getTestArray(0));
+            assertFalse(readCursor.getSearchBoth(key, data,
+						 LockMode.DEFAULT) ==
+			OperationStatus.SUCCESS);
+            assertFalse(readCursor.getSearchBothRange(key, data,
+						      LockMode.DEFAULT) ==
+			OperationStatus.SUCCESS);
+
+            /* All ranges on key 4 should also fail. */
+            key.setData(TestUtils.getTestArray(4));
+            assertFalse(readCursor.getSearchKeyRange(key, data,
+						     LockMode.DEFAULT) ==
+			OperationStatus.SUCCESS);
+            assertFalse(readCursor.getSearchBothRange(key, data,
+                                                      LockMode.DEFAULT) ==
+			OperationStatus.SUCCESS);
+
+            readCursor.close();
+	} catch (Throwable t) {
+
+	    t.printStackTrace();
+	    throw t;
+	} finally {
+            if (cursor != null) {
+                cursor.close();
+            }
+            myDb.close();
+        }
+    }
+
+    /**
+     * Test the case where we allow duplicates in the database, but
+     * don't actually insert a duplicate.  So we have a key/value pair
+     * and do a getSearchBothRange using key and data-1 (i.e. we land
+     * on the key, but just before the data in the dup set (which isn't
+     * a dup set since there's only one).  getSearchBothRange should land
+     * on the key/value pair in this case.  See SR #9248.
+     */
+    public void testSearchBothWithOneDuplicate()
+	throws Throwable {
+
+        Database myDb = null;
+        Cursor cursor = null;
+	try {
+            /* Set up a db */
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setTransactional(true);
+            dbConfig.setSortedDuplicates(true);
+            dbConfig.setAllowCreate(true);
+            myDb = env.openDatabase(null, "foo", dbConfig);
+
+            /* Put one record */
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            key.setData(TestUtils.getTestArray(1));
+            data.setData(TestUtils.getTestArray(1));
+            myDb.put(null, key, data);
+
+            key.setData(TestUtils.getTestArray(1));
+            data.setData(TestUtils.getTestArray(0));
+            cursor = myDb.openCursor(null, CursorConfig.DEFAULT);
+            OperationStatus status =
+		cursor.getSearchBothRange(key, data, LockMode.DEFAULT);
+            assertSame(status, OperationStatus.SUCCESS);
+            assertEquals(1, TestUtils.getTestVal(key.getData()));
+            assertEquals(1, TestUtils.getTestVal(data.getData()));
+	} finally {
+            if (cursor != null) {
+                cursor.close();
+            }
+            if (myDb != null) {
+                myDb.close();
+            }
+        }
+    }
+
+    /**
+     * Tests a bug fix to CursorImpl.fetchCurrent [#11195].
+     *
+     * T1 inserts K1-D1 and holds WRITE on K1-D1 (no dup tree yet)
+     * T2 calls getFirst and waits for READ on K1-D1
+     * T1 inserts K1-D2 which creates the dup tree
+     * T1 commits, allowing T2 to proceed
+     *
+     * T2 is in the middle of CursorImpl.fetchCurrent, and assumes incorrectly
+     * that it has a lock on an LN in BIN; actually the LN was replaced by a
+     * DIN and a ClassCastException occurs.
+     */
+    public void testGetCurrentDuringDupTreeCreation()
+	throws Throwable {
+
+        /* Set up a db */
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setSortedDuplicates(true);
+        dbConfig.setAllowCreate(true);
+        final Database myDb = env.openDatabase(null, "foo", dbConfig);
+
+        /* T1 inserts K1-D1. */
+        Transaction t1 = env.beginTransaction(null, null);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        key.setData(TestUtils.getTestArray(1));
+        data.setData(TestUtils.getTestArray(1));
+        myDb.put(t1, key, data);
+
+        /* T2 calls getFirst. */
+        JUnitThread thread = new JUnitThread("getFirst") {
+            public void testBody()
+                throws DatabaseException {
+                DatabaseEntry key = new DatabaseEntry();
+                DatabaseEntry data = new DatabaseEntry();
+                Transaction t2 = env.beginTransaction(null, null);
+                operationStarted = true;
+                Cursor cursor = myDb.openCursor(t2, null);
+                OperationStatus status = cursor.getFirst(key, data, null);
+                assertEquals(1, TestUtils.getTestVal(key.getData()));
+                assertEquals(1, TestUtils.getTestVal(data.getData()));
+                assertEquals(OperationStatus.SUCCESS, status);
+                cursor.close();
+                t2.commitNoSync();
+            }
+        };
+        thread.start();
+        while (!operationStarted) {
+            Thread.yield();
+        }
+        Thread.sleep(10);
+
+        /* T1 inserts K1-D2. */
+        key.setData(TestUtils.getTestArray(1));
+        data.setData(TestUtils.getTestArray(2));
+        myDb.put(t1, key, data);
+        t1.commitNoSync();
+
+        try {
+            thread.finishTest();
+        } catch (Throwable e) {
+            e.printStackTrace();
+            fail(e.toString());
+        }
+        myDb.close();
+    }
+
+    /**
+     * Tests a bug fix to CursorImpl.fetchCurrent [#11700] that caused
+     * ArrayIndexOutOfBoundsException.
+     */
+    public void testGetPrevNoDupWithEmptyTree()
+	throws Throwable {
+
+        OperationStatus status;
+
+        /*
+         * Set up a db
+         */
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setSortedDuplicates(true);
+        dbConfig.setAllowCreate(true);
+        Database myDb = env.openDatabase(null, "foo", dbConfig);
+
+        /*
+         * Insert two sets of duplicates.
+         */
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        key.setData(TestUtils.getTestArray(1));
+        data.setData(TestUtils.getTestArray(1));
+        myDb.put(null, key, data);
+        data.setData(TestUtils.getTestArray(2));
+        myDb.put(null, key, data);
+
+        key.setData(TestUtils.getTestArray(2));
+        data.setData(TestUtils.getTestArray(1));
+        myDb.put(null, key, data);
+        data.setData(TestUtils.getTestArray(2));
+        myDb.put(null, key, data);
+
+        /*
+         * Delete all duplicates with a cursor.
+         */
+        Cursor cursor = myDb.openCursor(null, null);
+        while ((status = cursor.getNext(key, data, null)) ==
+                OperationStatus.SUCCESS) {
+            cursor.delete();
+        }
+
+        /*
+         * Compress to empty the two DBINs.  The BIN will not be deleted
+         * because a cursor is attached to it.  This causes a cursor to be
+         * positioned on an empty DBIN, which brings out the bug.
+         */
+        env.compress();
+
+        /*
+         * Before the bug fix, getPrevNoDup caused
+         * ArrayIndexOutOfBoundsException.
+         */
+        status = cursor.getPrevNoDup(key, data, null);
+        assertEquals(OperationStatus.NOTFOUND, status);
+
+        cursor.close();
+        myDb.close();
+    }
+
+    /*
+     * Check that non transactional cursors can't do update operations
+     * against a transactional database.
+     */
+    public void testNonTxnalCursorNoUpdates()
+        throws Throwable {
+
+        Database myDb = null;
+        SecondaryDatabase mySecDb = null;
+        Cursor cursor = null;
+        SecondaryCursor secCursor = null;
+	try {
+            /* Set up a db with a secondary, insert something. */
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setTransactional(true);
+            dbConfig.setAllowCreate(true);
+            myDb = env.openDatabase(null, "foo", dbConfig);
+
+            SecondaryConfig secConfig = new SecondaryConfig();
+            secConfig.setTransactional(true);
+            secConfig.setAllowCreate(true);
+            secConfig.setKeyCreator(new KeyCreator());
+            mySecDb = env.openSecondaryDatabase(null, "fooSecDb", myDb,
+                                                secConfig);
+
+            /* Insert something. */
+            DatabaseEntry key = new DatabaseEntry(new byte[1]);
+            assertEquals(myDb.put(null, key, key), OperationStatus.SUCCESS);
+
+            /* Open a non-txnal cursor on the primary database. */
+            cursor = myDb.openCursor(null, null);
+            DatabaseEntry data = new DatabaseEntry();
+            assertEquals(OperationStatus.SUCCESS,
+                         cursor.getNext(key, data, LockMode.DEFAULT));
+
+            /* All updates should be prohibited. */
+            updatesShouldBeProhibited(cursor);
+
+            /* Open a secondary non-txnal cursor */
+            secCursor = mySecDb.openSecondaryCursor(null, null);
+            assertEquals(OperationStatus.SUCCESS,
+                         secCursor.getNext(key, data, LockMode.DEFAULT));
+
+            /* All updates should be prohibited. */
+            updatesShouldBeProhibited(secCursor);
+
+	} catch (Throwable t) {
+	    t.printStackTrace();
+	    throw t;
+	} finally {
+            if (secCursor != null) {
+                secCursor.close();
+            }
+
+            if (cursor != null) {
+                cursor.close();
+            }
+
+            if (mySecDb != null) {
+                mySecDb.close();
+            }
+
+            myDb.close();
+        }
+    }
+
+    /* Updates should not be possible with this cursor. */
+    private void updatesShouldBeProhibited(Cursor cursor)
+        throws Exception {
+
+        try {
+            cursor.delete();
+            fail("Should not be able to do a delete");
+        } catch (DatabaseException e) {
+            checkForTransactionException(e);
+        }
+
+        DatabaseEntry key = new DatabaseEntry(new byte[0]);
+        DatabaseEntry data = new DatabaseEntry(new byte[0]);
+
+        try {
+            cursor.put(key, data);
+            fail("Should not be able to do a put");
+        } catch (UnsupportedOperationException e) {
+            /* disregard for secondary cursors */
+        } catch (DatabaseException e) {
+            checkForTransactionException(e);
+        }
+
+
+        try {
+            cursor.putCurrent(data);
+            fail("Should not be able to do a putCurrent");
+        } catch (UnsupportedOperationException e) {
+            /* disregard for secondary cursors */
+        } catch (DatabaseException e) {
+            checkForTransactionException(e);
+        }
+
+        try {
+            cursor.putNoDupData(key, data);
+            fail("Should not be able to do a putNoDupData");
+        } catch (UnsupportedOperationException e) {
+            /* disregard for secondary cursors */
+        } catch (DatabaseException e) {
+            checkForTransactionException(e);
+        }
+
+        try {
+            cursor.putNoOverwrite(key, data);
+            fail("Should not be able to do a putNoOverwrite");
+        } catch (UnsupportedOperationException e) {
+            /* disregard for secondary cursors */
+        } catch (DatabaseException e) {
+            checkForTransactionException(e);
+        }
+    }
+
+    private void checkForTransactionException(DatabaseException e) {
+        /*
+         * Check that it's a transaction problem. Crude, but since we
+         * don't want to add exception types, necessary.
+         */
+        e.getMessage();
+        assertTrue(TestUtils.skipVersion(e).startsWith("A transaction was not supplied"));
+    }
+
+    private static class KeyCreator implements SecondaryKeyCreator {
+        public boolean createSecondaryKey(SecondaryDatabase secondaryDb,
+                                          DatabaseEntry keyEntry,
+                                          DatabaseEntry dataEntry,
+                                          DatabaseEntry resultEntry) {
+            resultEntry.setData(dataEntry.getData());
+            return true;
+        }
+    }
+
+    /**
+     * Tests that when a LockNotGrantedException is thrown as the result of a
+     * cursor operation, all latches are released properly.  There are two
+     * cases corresponding to the two methods in CursorImpl --
+     * lockLNDeletedAllowed and lockDupCountLN, which lock leaf LNs and dup
+     * count LNs, respectively -- that handle locking and latching.  These
+     * methods optimize by not releasing latches while obtaining a non-blocking
+     * lock.  Prior to the fix for [#15142], these methods did not release
+     * latches when LockNotGrantedException, which can occur when a transaction
+     * is configured for "no wait".
+     */
+    public void testNoWaitLatchRelease()
+	throws Throwable {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        /* Open the database. */
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(true);
+        Database db = env.openDatabase(null, "foo", dbConfig);
+
+        /* Insert record 1. */
+        key.setData(TestUtils.getTestArray(1));
+        data.setData(TestUtils.getTestArray(1));
+        db.put(null, key, data);
+
+        /* Open cursor1 with txn1 and lock record 1. */
+        Transaction txn1 = env.beginTransaction(null, null);
+        Cursor cursor1 = db.openCursor(txn1, null);
+        key.setData(TestUtils.getTestArray(1));
+        data.setData(TestUtils.getTestArray(1));
+        OperationStatus status = cursor1.getSearchBoth(key, data, null);
+        assertSame(status, OperationStatus.SUCCESS);
+        assertEquals(1, TestUtils.getTestVal(key.getData()));
+        assertEquals(1, TestUtils.getTestVal(data.getData()));
+
+        /* Open cursor2 with no-wait txn2 and try to delete record 1. */
+        TransactionConfig noWaitConfig = new TransactionConfig();
+        noWaitConfig.setNoWait(true);
+        Transaction txn2 = env.beginTransaction(null, noWaitConfig);
+        Cursor cursor2 = db.openCursor(txn2, null);
+        key.setData(TestUtils.getTestArray(1));
+        data.setData(TestUtils.getTestArray(1));
+        status = cursor2.getSearchBoth(key, data, null);
+        assertSame(status, OperationStatus.SUCCESS);
+        assertEquals(1, TestUtils.getTestVal(key.getData()));
+        assertEquals(1, TestUtils.getTestVal(data.getData()));
+        try {
+            cursor2.delete();
+            fail("Expected LockNotGrantedException");
+        } catch (LockNotGrantedException expected) {
+        }
+
+        /*
+         * Before the [#15142] bug fix, this could have failed.  However, that
+         * failure was not reproducible because all callers of
+         * lockLNDeletedAllowed redudantly release the BIN latches.  So this is
+         * just an extra check to ensure such a bug is never introduced.
+         */
+        assertEquals(0, LatchSupport.countLatchesHeld());
+
+        /* Close cursors and txns to release locks. */
+        cursor1.close();
+        cursor2.close();
+        txn1.commit();
+        txn2.commit();
+
+        /* Insert duplicate record 2 to create a DupCountLN. */
+        key.setData(TestUtils.getTestArray(1));
+        data.setData(TestUtils.getTestArray(2));
+        db.put(null, key, data);
+
+        /* Get the cursor count with cursor1/txn1 to lock the DupCountLN. */
+        txn1 = env.beginTransaction(null, null);
+        cursor1 = db.openCursor(txn1, null);
+        key.setData(TestUtils.getTestArray(1));
+        status = cursor1.getSearchKey(key, data, null);
+        assertSame(status, OperationStatus.SUCCESS);
+        assertEquals(1, TestUtils.getTestVal(key.getData()));
+        assertEquals(1, TestUtils.getTestVal(data.getData()));
+        assertEquals(2, cursor1.count());
+
+        /* Try to write lock the DupCountLN with txn2 by deleting record 2. */
+        txn2 = env.beginTransaction(null, noWaitConfig);
+        cursor2 = db.openCursor(txn2, null);
+        key.setData(TestUtils.getTestArray(1));
+        data.setData(TestUtils.getTestArray(2));
+        status = cursor2.getSearchBoth(key, data, null);
+        assertSame(status, OperationStatus.SUCCESS);
+        assertEquals(1, TestUtils.getTestVal(key.getData()));
+        assertEquals(2, TestUtils.getTestVal(data.getData()));
+        try {
+            cursor2.delete();
+            fail("Expected LockNotGrantedException");
+        } catch (LockNotGrantedException expected) {
+        }
+
+        /* Before the [#15142] bug fix, this would fail. */
+        assertEquals(0, LatchSupport.countLatchesHeld());
+
+        /* Close all. */
+        cursor1.close();
+        cursor2.close();
+        txn1.commit();
+        txn2.commit();
+        db.close();
+    }
+}
diff --git a/test/com/sleepycat/je/CursorTest.java b/test/com/sleepycat/je/CursorTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..07fbacc02a995ae3a74e9f416d2bc046390969fd
--- /dev/null
+++ b/test/com/sleepycat/je/CursorTest.java
@@ -0,0 +1,975 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: CursorTest.java,v 1.82.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Arrays;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.junit.JUnitThread;
+import com.sleepycat.je.util.TestUtils;
+
+public class CursorTest extends TestCase {
+    private static final boolean DEBUG = false;
+    private static final int NUM_RECS = 257;
+
+    /*
+     * Use a ridiculous value because we've seen extreme slowness on ocicat
+     * where dbperf is often running.
+     */
+    private static final long LOCK_TIMEOUT = 50000000L;
+
+    private static final String DUPKEY = "DUPKEY";
+
+    private Environment env;
+    private Database db;
+    private PhantomTestConfiguration config;
+
+    private File envHome;
+
+    private volatile int sequence;
+
+    public CursorTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+	throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+	throws IOException {
+
+        if (env != null) {
+            try {
+                env.close();
+            } catch (Throwable e) {
+                System.out.println("tearDown: " + e);
+            }
+        }
+	db = null;
+	env = null;
+
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+    }
+
+    public void testGetConfig()
+	throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+        envConfig.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC));
+        env = new Environment(envHome, envConfig);
+        Transaction txn = env.beginTransaction(null, null);
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setSortedDuplicates(true);
+        dbConfig.setAllowCreate(true);
+        db = env.openDatabase(txn, "testDB", dbConfig);
+	txn.commit();
+	Cursor cursor = null;
+	Transaction txn1 =
+	    env.beginTransaction(null, TransactionConfig.DEFAULT);
+	try {
+	    cursor = db.openCursor(txn1, CursorConfig.DEFAULT);
+	    CursorConfig config = cursor.getConfig();
+	    if (config == CursorConfig.DEFAULT) {
+		fail("didn't clone");
+	    }
+	} catch (DatabaseException DBE) {
+	    DBE.printStackTrace();
+	    fail("caught DatabaseException " + DBE);
+	} finally {
+	    if (cursor != null) {
+		cursor.close();
+	    }
+	    txn1.abort();
+	    db.close();
+	    env.close();
+            env = null;
+	}
+    }
+
+    /**
+     * Put some data in a database, take it out. Yank the file size down so we
+     * have many files.
+     */
+    public void testBasic()
+	throws Throwable {
+
+	try {
+	    insertMultiDb(1);
+	} catch (Throwable t) {
+	    t.printStackTrace();
+	    throw t;
+	}
+    }
+
+    public void testMulti()
+	throws Throwable {
+
+	try {
+	    insertMultiDb(4);
+	} catch (Throwable t) {
+	    t.printStackTrace();
+	    throw t;
+	}
+    }
+
+    /**
+     * Specifies a test configuration.  This is just a struct for holding
+     * parameters to be passed down to threads in inner classes.
+     */
+    class PhantomTestConfiguration {
+	String testName;
+	String thread1EntryToLock;
+	String thread1OpArg;
+	String thread2Start;
+	String expectedResult;
+	boolean doInsert;
+	boolean doGetNext;
+	boolean doCommit;
+
+	PhantomTestConfiguration(String testName,
+				 String thread1EntryToLock,
+				 String thread1OpArg,
+				 String thread2Start,
+				 String expectedResult,
+				 boolean doInsert,
+				 boolean doGetNext,
+				 boolean doCommit) {
+	    this.testName = testName;
+	    this.thread1EntryToLock = thread1EntryToLock;
+	    this.thread1OpArg = thread1OpArg;
+	    this.thread2Start = thread2Start;
+	    this.expectedResult = expectedResult;
+	    this.doInsert = doInsert;
+	    this.doGetNext = doGetNext;
+	    this.doCommit = doCommit;
+	}
+    }
+
+    /**
+     * This series of tests sets up a simple 2 BIN tree with a specific set of
+     * elements (see setupDatabaseAndEnv()).  It creates two threads.
+     *
+     * Thread 1 positions a cursor on an element on the edge of a BIN (either
+     * the last element on the left BIN or the first element on the right BIN).
+     * This locks that element.  It throws control to thread 2.
+     *
+     * Thread 2 positions a cursor on the adjacent element on the other BIN
+     * (either the first element on the right BIN or the last element on the
+     * left BIN, resp.)  It throws control to thread 1.  After it signals
+     * thread 1 to continue, thread 2 does either a getNext or getPrev.  This
+     * should block because thread 1 has the next/prev element locked.
+     *
+     * Thread 1 then waits a short time (250ms) so that thread 2 can execute
+     * the getNext/getPrev.  Thread 1 then inserts or deletes the "phantom
+     * element" right in between the cursors that were set up in the previous
+     * two steps, sleeps a second, and either commits or aborts.
+     *
+     * Thread 2 will then return from the getNext/getPrev.  The returned key
+     * from the getNext/getPrev is then verified.
+     *
+     * The Serializable isolation level is not used for either thread so as to
+     * allow phantoms; otherwise, this test would deadlock.
+     *
+     * These parameters are all configured through a PhantomTestConfiguration
+     * instance passed to phantomWorker which has the template for the steps
+     * described above.
+     */
+
+    /**
+     * Phantom test inserting and committing a phantom while doing a getNext.
+     */
+    public void testPhantomInsertGetNextCommit()
+	throws Throwable {
+
+        try {
+            phantomWorker
+                (new PhantomTestConfiguration
+                 ("testPhantomInsertGetNextCommit",
+                  "F", "D", "C", "D",
+                  true, true, true));
+        } catch (Exception e) {
+            e.printStackTrace();
+            throw e;
+        }
+    }
+
+    /**
+     * Phantom test inserting and aborting a phantom while doing a getNext.
+     */
+    public void testPhantomInsertGetNextAbort()
+	throws Throwable {
+
+	phantomWorker
+	    (new PhantomTestConfiguration
+	     ("testPhantomInsertGetNextAbort",
+	      "F", "D", "C", "F",
+	      true, true, false));
+    }
+
+    /**
+     * Phantom test inserting and committing a phantom while doing a getPrev.
+     */
+    public void testPhantomInsertGetPrevCommit()
+	throws Throwable {
+
+	phantomWorker
+	    (new PhantomTestConfiguration
+	     ("testPhantomInsertGetPrevCommit",
+	      "C", "F", "G", "F",
+	      true, false, true));
+    }
+
+    /**
+     * Phantom test inserting and aborting a phantom while doing a getPrev.
+     */
+    public void testPhantomInsertGetPrevAbort()
+	throws Throwable {
+
+	phantomWorker
+	    (new PhantomTestConfiguration
+	     ("testPhantomInsertGetPrevAbort",
+	      "C", "F", "G", "C",
+	      true, false, false));
+    }
+
+    /**
+     * Phantom test deleting and committing an edge element while doing a
+     * getNext.
+     */
+    public void testPhantomDeleteGetNextCommit()
+	throws Throwable {
+
+	phantomWorker
+	    (new PhantomTestConfiguration
+	     ("testPhantomDeleteGetNextCommit",
+	      "F", "F", "C", "G",
+	      false, true, true));
+    }
+
+    /**
+     * Phantom test deleting and aborting an edge element while doing a
+     * getNext.
+     */
+    public void testPhantomDeleteGetNextAbort()
+	throws Throwable {
+
+	phantomWorker
+	    (new PhantomTestConfiguration
+	     ("testPhantomDeleteGetNextAbort",
+	      "F", "F", "C", "F",
+	      false, true, false));
+    }
+
+    /**
+     * Phantom test deleting and committing an edge element while doing a
+     * getPrev.
+     */
+    public void testPhantomDeleteGetPrevCommit()
+	throws Throwable {
+
+	phantomWorker
+	    (new PhantomTestConfiguration
+	     ("testPhantomDeleteGetPrevCommit",
+	      "F", "F", "G", "C",
+	      false, false, true));
+    }
+
+    /**
+     * Phantom test deleting and aborting an edge element while doing a
+     * getPrev.
+     */
+    public void testPhantomDeleteGetPrevAbort()
+	throws Throwable {
+
+	phantomWorker
+	    (new PhantomTestConfiguration
+	     ("testPhantomDeleteGetPrevAbort",
+	      "F", "F", "G", "F",
+	      false, false, false));
+    }
+
+    /**
+     * Phantom Dup test inserting and committing a phantom while doing a
+     * getNext.
+     */
+    public void testPhantomDupInsertGetNextCommit()
+	throws Throwable {
+
+        try {
+            phantomDupWorker
+                (new PhantomTestConfiguration
+                 ("testPhantomDupInsertGetNextCommit",
+                  "F", "D", "C", "D",
+                  true, true, true));
+        } catch (Exception e) {
+            e.printStackTrace();
+            throw e;
+        }
+    }
+
+    /**
+     * Phantom Dup test inserting and aborting a phantom while doing a getNext.
+     */
+    public void testPhantomDupInsertGetNextAbort()
+	throws Throwable {
+
+	phantomDupWorker
+	    (new PhantomTestConfiguration
+	     ("testPhantomDupInsertGetNextAbort",
+	      "F", "D", "C", "F",
+	      true, true, false));
+    }
+
+    /**
+     * Phantom Dup test inserting and committing a phantom while doing a
+     * getPrev.
+     */
+    public void testPhantomDupInsertGetPrevCommit()
+	throws Throwable {
+
+	phantomDupWorker
+	    (new PhantomTestConfiguration
+	     ("testPhantomDupInsertGetPrevCommit",
+	      "C", "F", "G", "F",
+	      true, false, true));
+    }
+
+    /**
+     * Phantom Dup test inserting and aborting a phantom while doing a getPrev.
+     */
+    public void testPhantomDupInsertGetPrevAbort()
+	throws Throwable {
+
+	phantomDupWorker
+	    (new PhantomTestConfiguration
+	     ("testPhantomDupInsertGetPrevAbort",
+	      "C", "F", "G", "C",
+	      true, false, false));
+    }
+
+    /**
+     * Phantom Dup test deleting and committing an edge element while doing a
+     * getNext.
+     */
+    public void testPhantomDupDeleteGetNextCommit()
+	throws Throwable {
+
+	phantomDupWorker
+	    (new PhantomTestConfiguration
+	     ("testPhantomDupDeleteGetNextCommit",
+	      "F", "F", "C", "G",
+	      false, true, true));
+    }
+
+    /**
+     * Phantom Dup test deleting and aborting an edge element while doing a
+     * getNext.
+     */
+    public void testPhantomDupDeleteGetNextAbort()
+	throws Throwable {
+
+	phantomDupWorker
+	    (new PhantomTestConfiguration
+	     ("testPhantomDupDeleteGetNextAbort",
+	      "F", "F", "C", "F",
+	      false, true, false));
+    }
+
+    /**
+     * Phantom Dup test deleting and committing an edge element while doing a
+     * getPrev.
+     */
+    public void testPhantomDupDeleteGetPrevCommit()
+	throws Throwable {
+
+	phantomDupWorker
+	    (new PhantomTestConfiguration
+	     ("testPhantomDupDeleteGetPrevCommit",
+	      "F", "F", "G", "C",
+	      false, false, true));
+    }
+
+    /**
+     * Phantom Dup test deleting and aborting an edge element while doing a
+     * getPrev.
+     */
+    public void testPhantomDupDeleteGetPrevAbort()
+	throws Throwable {
+
+	phantomDupWorker
+	    (new PhantomTestConfiguration
+	     ("testPhantomDupDeleteGetPrevAbort",
+	      "F", "F", "G", "F",
+	      false, false, false));
+    }
+
+    private void phantomWorker(PhantomTestConfiguration c)
+	throws Throwable {
+
+	try {
+	    this.config = c;
+	    setupDatabaseAndEnv(false);
+
+	    if (config.doInsert &&
+		!config.doGetNext) {
+
+		Transaction txnDel =
+		    env.beginTransaction(null, TransactionConfig.DEFAULT);
+
+		/*
+		 * Delete the first entry in the second bin so that we can
+		 * reinsert it in tester1 and have it be the first entry in
+		 * that bin.  If we left F and then tried to insert something
+		 * to the left of F, it would end up in the first bin.
+		 */
+		assertEquals(OperationStatus.SUCCESS,
+			     db.delete(txnDel,
+				       new DatabaseEntry("F".getBytes())));
+		txnDel.commit();
+	    }
+
+	    JUnitThread tester1 =
+		new JUnitThread(config.testName + "1") {
+		    public void testBody()
+			throws Throwable {
+
+			Cursor cursor = null;
+			try {
+			    Transaction txn1 =
+				env.beginTransaction(null, null);
+			    cursor = db.openCursor(txn1, CursorConfig.DEFAULT);
+			    OperationStatus status =
+				cursor.getSearchKey
+				(new DatabaseEntry
+				 (config.thread1EntryToLock.getBytes()),
+				 new DatabaseEntry(),
+				 LockMode.RMW);
+			    assertEquals(OperationStatus.SUCCESS, status);
+			    sequence++;  // 0 -> 1
+
+			    while (sequence < 2) {
+				Thread.yield();
+			    }
+
+			    /*
+			     * Since we can't increment sequence when tester2
+			     * blocks on the getNext call, all we can do is
+			     * bump sequence right before the getNext, and then
+			     * wait a little in this thread for tester2 to
+			     * block.
+			     */
+			    try {
+				Thread.sleep(250);
+			    } catch (InterruptedException IE) {
+			    }
+
+			    if (config.doInsert) {
+				status = db.put
+				    (txn1,
+				     new DatabaseEntry
+				     (config.thread1OpArg.getBytes()),
+				     new DatabaseEntry(new byte[10]));
+			    } else {
+				status = db.delete
+				    (txn1,
+				     new DatabaseEntry
+				     (config.thread1OpArg.getBytes()));
+			    }
+			    assertEquals(OperationStatus.SUCCESS, status);
+			    sequence++;     // 2 -> 3
+
+			    try {
+				Thread.sleep(1000);
+			    } catch (InterruptedException IE) {
+			    }
+
+			    cursor.close();
+			    cursor = null;
+			    if (config.doCommit) {
+				txn1.commit();
+			    } else {
+				txn1.abort();
+			    }
+			} catch (DatabaseException DBE) {
+			    if (cursor != null) {
+				cursor.close();
+			    }
+			    DBE.printStackTrace();
+			    fail("caught DatabaseException " + DBE);
+			}
+		    }
+		};
+
+	    JUnitThread tester2 =
+		new JUnitThread(config.testName + "2") {
+		    public void testBody()
+			throws Throwable {
+
+			Cursor cursor = null;
+			try {
+			    Transaction txn2 =
+				env.beginTransaction(null, null);
+			    txn2.setLockTimeout(LOCK_TIMEOUT);
+			    cursor = db.openCursor(txn2, CursorConfig.DEFAULT);
+
+			    while (sequence < 1) {
+				Thread.yield();
+			    }
+
+			    OperationStatus status =
+				cursor.getSearchKey
+				(new DatabaseEntry
+				 (config.thread2Start.getBytes()),
+				 new DatabaseEntry(),
+				 LockMode.DEFAULT);
+			    assertEquals(OperationStatus.SUCCESS, status);
+
+			    sequence++;           // 1 -> 2
+			    DatabaseEntry nextKey = new DatabaseEntry();
+			    try {
+
+				/*
+				 * This will block until tester1 above commits.
+				 */
+				if (config.doGetNext) {
+				    status =
+					cursor.getNext(nextKey,
+						       new DatabaseEntry(),
+						       LockMode.DEFAULT);
+				} else {
+				    status =
+					cursor.getPrev(nextKey,
+						       new DatabaseEntry(),
+						       LockMode.DEFAULT);
+				}
+			    } catch (DatabaseException DBE) {
+				System.out.println("t2 caught " + DBE);
+			    }
+			    assertEquals(3, sequence);
+			    assertEquals(config.expectedResult,
+					 new String(nextKey.getData()));
+			    cursor.close();
+			    cursor = null;
+			    txn2.commit();
+			} catch (DatabaseException DBE) {
+			    if (cursor != null) {
+				cursor.close();
+			    }
+			    DBE.printStackTrace();
+			    fail("caught DatabaseException " + DBE);
+			}
+		    }
+		};
+
+	    tester1.start();
+	    tester2.start();
+
+	    tester1.finishTest();
+	    tester2.finishTest();
+	} finally {
+	    db.close();
+	    env.close();
+            env = null;
+	}
+    }
+
+    private void phantomDupWorker(PhantomTestConfiguration c)
+	throws Throwable {
+
+	Cursor cursor = null;
+	try {
+	    this.config = c;
+	    setupDatabaseAndEnv(true);
+
+	    if (config.doInsert &&
+		!config.doGetNext) {
+
+		Transaction txnDel =
+		    env.beginTransaction(null, TransactionConfig.DEFAULT);
+		cursor = db.openCursor(txnDel, CursorConfig.DEFAULT);
+
+		/*
+		 * Delete the first entry in the second bin so that we can
+		 * reinsert it in tester1 and have it be the first entry in
+		 * that bin.  If we left F and then tried to insert something
+		 * to the left of F, it would end up in the first bin.
+		 */
+		assertEquals(OperationStatus.SUCCESS, cursor.getSearchBoth
+			     (new DatabaseEntry(DUPKEY.getBytes()),
+			      new DatabaseEntry("F".getBytes()),
+			      LockMode.DEFAULT));
+		assertEquals(OperationStatus.SUCCESS, cursor.delete());
+		cursor.close();
+		cursor = null;
+		txnDel.commit();
+	    }
+
+	    JUnitThread tester1 =
+		new JUnitThread(config.testName + "1") {
+		    public void testBody()
+			throws Throwable {
+
+			Cursor cursor = null;
+			Cursor c = null;
+			try {
+			    Transaction txn1 =
+				env.beginTransaction(null, null);
+			    cursor = db.openCursor(txn1, CursorConfig.DEFAULT);
+			    OperationStatus status =
+				cursor.getSearchBoth
+				(new DatabaseEntry(DUPKEY.getBytes()),
+				 new DatabaseEntry
+				 (config.thread1EntryToLock.getBytes()),
+				 LockMode.RMW);
+			    assertEquals(OperationStatus.SUCCESS, status);
+			    cursor.close();
+			    cursor = null;
+			    sequence++;  // 0 -> 1
+
+			    while (sequence < 2) {
+				Thread.yield();
+			    }
+
+			    /*
+			     * Since we can't increment sequence when tester2
+			     * blocks on the getNext call, all we can do is
+			     * bump sequence right before the getNext, and then
+			     * wait a little in this thread for tester2 to
+			     * block.
+			     */
+			    try {
+				Thread.sleep(250);
+			    } catch (InterruptedException IE) {
+			    }
+
+			    if (config.doInsert) {
+				status = db.put
+				    (txn1,
+				     new DatabaseEntry(DUPKEY.getBytes()),
+				     new DatabaseEntry
+				     (config.thread1OpArg.getBytes()));
+			    } else {
+				c = db.openCursor(txn1, CursorConfig.DEFAULT);
+				assertEquals(OperationStatus.SUCCESS,
+					     c.getSearchBoth
+					     (new DatabaseEntry
+					      (DUPKEY.getBytes()),
+					      new DatabaseEntry
+					      (config.thread1OpArg.getBytes()),
+					      LockMode.DEFAULT));
+				assertEquals(OperationStatus.SUCCESS,
+					     c.delete());
+				c.close();
+				c = null;
+			    }
+			    assertEquals(OperationStatus.SUCCESS, status);
+			    sequence++;     // 2 -> 3
+
+			    try {
+				Thread.sleep(1000);
+			    } catch (InterruptedException IE) {
+			    }
+
+			    if (config.doCommit) {
+				txn1.commit();
+			    } else {
+				txn1.abort();
+			    }
+			} catch (DatabaseException DBE) {
+			    if (cursor != null) {
+				cursor.close();
+			    }
+			    if (c != null) {
+				c.close();
+			    }
+			    DBE.printStackTrace();
+			    fail("caught DatabaseException " + DBE);
+			}
+		    }
+		};
+
+	    JUnitThread tester2 =
+		new JUnitThread("testPhantomInsert2") {
+		    public void testBody()
+			throws Throwable {
+
+			Cursor cursor = null;
+			try {
+			    Transaction txn2 =
+				env.beginTransaction(null, null);
+			    txn2.setLockTimeout(LOCK_TIMEOUT);
+			    cursor = db.openCursor(txn2, CursorConfig.DEFAULT);
+
+			    while (sequence < 1) {
+				Thread.yield();
+			    }
+
+			    OperationStatus status =
+				cursor.getSearchBoth
+				(new DatabaseEntry(DUPKEY.getBytes()),
+				 new DatabaseEntry
+				 (config.thread2Start.getBytes()),
+				 LockMode.DEFAULT);
+			    assertEquals(OperationStatus.SUCCESS, status);
+
+			    sequence++;           // 1 -> 2
+			    DatabaseEntry nextKey = new DatabaseEntry();
+			    DatabaseEntry nextData = new DatabaseEntry();
+			    try {
+
+				/*
+				 * This will block until tester1 above commits.
+				 */
+				if (config.doGetNext) {
+				    status =
+					cursor.getNextDup(nextKey, nextData,
+							  LockMode.DEFAULT);
+				} else {
+				    status =
+					cursor.getPrevDup(nextKey, nextData,
+							  LockMode.DEFAULT);
+				}
+			    } catch (DatabaseException DBE) {
+				System.out.println("t2 caught " + DBE);
+			    }
+			    assertEquals(3, sequence);
+			    byte[] data = nextData.getData();
+			    assertEquals(config.expectedResult,
+					 new String(data));
+			    cursor.close();
+			    cursor = null;
+			    txn2.commit();
+			} catch (DatabaseException DBE) {
+			    if (cursor != null) {
+				cursor.close();
+			    }
+			    DBE.printStackTrace();
+			    fail("caught DatabaseException " + DBE);
+			}
+		    }
+		};
+
+	    tester1.start();
+	    tester2.start();
+
+	    tester1.finishTest();
+	    tester2.finishTest();
+	} finally {
+	    if (cursor != null) {
+		cursor.close();
+	    }
+	    db.close();
+	    env.close();
+            env = null;
+	}
+    }
+
+    /**
+     * Sets up a small database with a tree containing 2 bins, one with A, B,
+     * and C, and the other with F, G, H, and I.
+     */
+    private void setupDatabaseAndEnv(boolean writeAsDuplicateData)
+	throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+
+        /* RepeatableRead isolation is required by this test. */
+        TestUtils.clearIsolationLevel(envConfig);
+
+	DbInternal.disableParameterValidation(envConfig);
+        envConfig.setTransactional(true);
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(),
+                                 "6");
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX_DUPTREE.getName(),
+                                 "6");
+        envConfig.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(),
+                                 "1024");
+        envConfig.setConfigParam(EnvironmentParams.ENV_CHECK_LEAKS.getName(),
+                                 "true");
+        envConfig.setAllowCreate(true);
+        envConfig.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC));
+        env = new Environment(envHome, envConfig);
+        Transaction txn = env.beginTransaction(null, null);
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setSortedDuplicates(true);
+        dbConfig.setAllowCreate(true);
+        db = env.openDatabase(txn, "testDB", dbConfig);
+
+	if (writeAsDuplicateData) {
+	    writeDuplicateData(db, txn);
+	} else {
+	    writeData(db, txn);
+	}
+
+	txn.commit();
+    }
+
+    String[] dataStrings = {
+	"A", "B", "C", "F", "G", "H", "I"
+    };
+
+    private void writeData(Database db, Transaction txn)
+	throws DatabaseException {
+
+	for (int i = 0; i < dataStrings.length; i++) {
+	    db.put(txn, new DatabaseEntry(dataStrings[i].getBytes()),
+		   new DatabaseEntry(new byte[10]));
+	}
+    }
+
+    private void writeDuplicateData(Database db, Transaction txn)
+	throws DatabaseException {
+
+	for (int i = 0; i < dataStrings.length; i++) {
+	    db.put(txn, new DatabaseEntry(DUPKEY.getBytes()),
+		   new DatabaseEntry(dataStrings[i].getBytes()));
+	}
+    }
+
+    /**
+     * Insert data over many databases.
+     */
+    private void insertMultiDb(int numDbs)
+	throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+
+        /* RepeatableRead isolation is required by this test. */
+        TestUtils.clearIsolationLevel(envConfig);
+
+	DbInternal.disableParameterValidation(envConfig);
+        envConfig.setTransactional(true);
+        envConfig.setConfigParam
+	    (EnvironmentParams.LOG_FILE_MAX.getName(), "1024");
+        envConfig.setConfigParam
+	    (EnvironmentParams.ENV_CHECK_LEAKS.getName(), "true");
+	envConfig.setConfigParam
+	    (EnvironmentParams.NODE_MAX.getName(), "6");
+        envConfig.setConfigParam
+	    (EnvironmentParams.NODE_MAX_DUPTREE.getName(), "6");
+        envConfig.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC));
+        envConfig.setAllowCreate(true);
+        Environment env = new Environment(envHome, envConfig);
+
+        Database[] myDb = new Database[numDbs];
+        Cursor[] cursor = new Cursor[numDbs];
+        Transaction txn =
+	    env.beginTransaction(null, TransactionConfig.DEFAULT);
+
+        /* In a non-replicated environment, the txn id should be positive. */
+        assertTrue(txn.getId() > 0);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(true);
+        for (int i = 0; i < numDbs; i++) {
+            myDb[i] = env.openDatabase(txn, "testDB" + i, dbConfig);
+            cursor[i] = myDb[i].openCursor(txn, CursorConfig.DEFAULT);
+
+            /*
+             * In a non-replicated environment, the db id should be
+             * positive.
+             */
+            DatabaseImpl dbImpl = DbInternal.dbGetDatabaseImpl(myDb[i]);
+            assertTrue(dbImpl.getId().getId() > 0);
+        }
+
+        /* Insert data in a round robin fashion to spread over log. */
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        for (int i = NUM_RECS; i > 0; i--) {
+            for (int c = 0; c < numDbs; c++) {
+                key.setData(TestUtils.getTestArray(i + c));
+                data.setData(TestUtils.getTestArray(i + c));
+                if (DEBUG) {
+                    System.out.println("i = " + i +
+                                       TestUtils.dumpByteArray(key.getData()));
+                }
+                cursor[c].put(key, data);
+            }
+        }
+
+        for (int i = 0; i < numDbs; i++) {
+            cursor[i].close();
+            myDb[i].close();
+        }
+        txn.commit();
+
+        assertTrue(env.verify(null, System.err));
+        env.close();
+        env = null;
+
+        envConfig.setAllowCreate(false);
+        env = new Environment(envHome, envConfig);
+
+        /*
+         * Before running the verifier, run the cleaner to make sure it has
+         * completed.  Otherwise, the cleaner will be running when we call
+         * verify, and open txns will be reported.
+         */
+        env.cleanLog();
+
+        env.verify(null, System.err);
+
+        /* Check each db in turn, using null transactions. */
+        dbConfig.setTransactional(false);
+        dbConfig.setAllowCreate(false);
+        for (int d = 0; d < numDbs; d++) {
+            Database checkDb = env.openDatabase(null, "testDB" + d,
+						dbConfig);
+            Cursor myCursor = checkDb.openCursor(null, CursorConfig.DEFAULT);
+
+            OperationStatus status =
+		myCursor.getFirst(key, data, LockMode.DEFAULT);
+
+            int i = 1;
+            while (status == OperationStatus.SUCCESS) {
+                byte[] expectedKey = TestUtils.getTestArray(i + d);
+                byte[] expectedData = TestUtils.getTestArray(i + d);
+
+                if (DEBUG) {
+                    System.out.println("Database " + d + " Key " + i +
+                                       " expected = " +
+                                       TestUtils.dumpByteArray(expectedKey) +
+                                       " seen = " +
+                                       TestUtils.dumpByteArray(key.getData()));
+                }
+
+                assertTrue("Database " + d + " Key " + i + " expected = " +
+                           TestUtils.dumpByteArray(expectedKey) +
+                           " seen = " +
+                           TestUtils.dumpByteArray(key.getData()),
+                           Arrays.equals(expectedKey, key.getData()));
+                assertTrue("Data " + i, Arrays.equals(expectedData,
+                                                      data.getData()));
+                i++;
+
+                status = myCursor.getNext(key, data, LockMode.DEFAULT);
+            }
+	    myCursor.close();
+            assertEquals("Number recs seen", NUM_RECS, i-1);
+            checkDb.close();
+        }
+        env.close();
+        env = null;
+    }
+}
diff --git a/test/com/sleepycat/je/DatabaseComparatorsTest.java b/test/com/sleepycat/je/DatabaseComparatorsTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..4add59db04ec510cc9f4aa1ee55d40407177b4b3
--- /dev/null
+++ b/test/com/sleepycat/je/DatabaseComparatorsTest.java
@@ -0,0 +1,502 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DatabaseComparatorsTest.java,v 1.11.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Comparator;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.bind.tuple.TupleBase;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.util.TestUtils;
+
+public class DatabaseComparatorsTest extends TestCase {
+
+    private File envHome;
+    private Environment env;
+    private boolean DEBUG = false;
+
+    public DatabaseComparatorsTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+	throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+	throws IOException, DatabaseException {
+
+        if (env != null) {
+            try {
+                env.close();
+            } catch (Throwable e) {
+                System.out.println("tearDown: " + e);
+            }
+        }
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+        env = null;
+        envHome = null;
+    }
+
+    private void openEnv()
+        throws DatabaseException {
+
+        openEnv(false);
+    }
+
+    private void openEnv(boolean transactional)
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setTransactional(transactional);
+        envConfig.setConfigParam(EnvironmentParams.ENV_CHECK_LEAKS.getName(),
+                                 "true");
+        /* Prevent compression. */
+        envConfig.setConfigParam("je.env.runINCompressor", "false");
+        envConfig.setConfigParam("je.env.runCheckpointer", "false");
+        envConfig.setConfigParam("je.env.runEvictor", "false");
+        envConfig.setConfigParam("je.env.runCleaner", "false");
+        env = new Environment(envHome, envConfig);
+    }
+
+    private Database openDb(boolean transactional,
+                            boolean dups,
+                            Class<? extends Comparator<byte[]>> btreeComparator,
+                            Class<? extends Comparator<byte[]>> dupComparator)
+        throws DatabaseException {
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(dups);
+        dbConfig.setTransactional(transactional);
+        dbConfig.setBtreeComparator(btreeComparator);
+        dbConfig.setDuplicateComparator(dupComparator);
+        return env.openDatabase(null, "testDB", dbConfig);
+    }
+
+    public void testSR12517()
+        throws Exception {
+
+        openEnv();
+        Database db = openDb(false /*transactional*/, false /*dups*/,
+                             ReverseComparator.class, ReverseComparator.class);
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        /* Insert 5 items. */
+        for (int i = 0; i < 5; i++) {
+            IntegerBinding.intToEntry(i, key);
+            IntegerBinding.intToEntry(i, data);
+            assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+	    /* Add a dup. */
+            IntegerBinding.intToEntry(i * 2, data);
+            assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+        }
+        read(db);
+
+        db.close();
+        env.close();
+
+        openEnv();
+        db = openDb(false /*transactional*/, false /*dups*/,
+                    ReverseComparator.class, ReverseComparator.class);
+
+        read(db);
+        db.close();
+        env.close();
+        env = null;
+    }
+
+    private void read(Database db)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        /* Iterate */
+        Cursor c = db.openCursor(null, null);
+        int expected = 4;
+        while (c.getNext(key, data, LockMode.DEFAULT) ==
+               OperationStatus.SUCCESS) {
+            assertEquals(expected, IntegerBinding.entryToInt(key));
+            expected--;
+	    if (DEBUG) {
+		System.out.println("cursor: k=" +
+				   IntegerBinding.entryToInt(key) +
+				   " d=" +
+				   IntegerBinding.entryToInt(data));
+	    }
+        }
+	assertEquals(expected, -1);
+
+        c.close();
+
+        /* Retrieve 5 items */
+        for (int i = 0; i < 5; i++) {
+            IntegerBinding.intToEntry(i, key);
+            assertEquals(OperationStatus.SUCCESS,
+                         db.get(null, key, data, LockMode.DEFAULT));
+            assertEquals(i, IntegerBinding.entryToInt(key));
+            assertEquals(i * 2, IntegerBinding.entryToInt(data));
+	    if (DEBUG) {
+		System.out.println("k=" +
+				   IntegerBinding.entryToInt(key) +
+				   " d=" +
+				   IntegerBinding.entryToInt(data));
+	    }
+        }
+    }
+
+    public static class ReverseComparator implements Comparator<byte[]> {
+
+	public ReverseComparator() {
+	}
+
+	public int compare(byte[] o1, byte[] o2) {
+
+            DatabaseEntry arg1 = new DatabaseEntry(o1);
+            DatabaseEntry arg2 = new DatabaseEntry(o2);
+            int val1 = IntegerBinding.entryToInt(arg1);
+            int val2 = IntegerBinding.entryToInt(arg2);
+
+            if (val1 < val2) {
+                return 1;
+            } else if (val1 > val2) {
+                return -1;
+            } else {
+                return 0;
+            }
+	}
+    }
+
+    /**
+     * Checks that when reusing a slot and then aborting the transaction, the
+     * original data is restored, when using a btree comparator. [#15704]
+     *
+     * When using partial keys to reuse a slot with a different--but equal
+     * according to a custom comparator--key, a bug caused corruption of an
+     * existing record after an abort.  The sequence for a non-duplicate
+     * database and a btree comparator that compares only the first integer in
+     * a two integer key is:
+     *
+     * 100 Insert LN key={0,0} txn 1
+     * 110 Commit txn 1
+     * 120 Delete LN key={0,0} txn 2
+     * 130 Insert LN key={0,1} txn 2
+     * 140 Abort txn 2
+     *
+     * When key {0,1} is inserted at LSN 130, it reuses the slot for {0,0}
+     * because these two keys are considered equal by the comparator.  When txn
+     * 2 is aborted, it restores LSN 100 in the slot, but the key in the BIN
+     * stays {0,1}.  Fetching the record after the abort gives key {0,1}.
+     */
+    public void testReuseSlotAbortPartialKey()
+        throws DatabaseException {
+
+        doTestReuseSlotPartialKey(false /*runRecovery*/);
+    }
+
+    /**
+     * Same as testReuseSlotAbortPartialKey but runs recovery after the abort.
+     */
+    public void testReuseSlotRecoverPartialKey()
+        throws DatabaseException {
+
+        doTestReuseSlotPartialKey(true /*runRecovery*/);
+    }
+
+    private void doTestReuseSlotPartialKey(boolean runRecovery)
+        throws DatabaseException {
+
+        openEnv(true /*transactional*/);
+        Database db = openDb
+            (true /*transactional*/, false /*dups*/,
+             Partial2PartComparator.class /*btreeComparator*/,
+             null /*dupComparator*/);
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert key={0,0}/data={0} using auto-commit. */
+        status = db.put(null, entry(0, 0), entry(0));
+        assertSame(OperationStatus.SUCCESS, status);
+        key = entry(0, 1);
+        data = entry(0);
+        status = db.getSearchBoth(null, key, data, null);
+        assertSame(OperationStatus.SUCCESS, status);
+        check(key, 0, 0);
+        check(data, 0);
+
+        /* Delete, insert key={0,1}/data={1}, abort. */
+        Transaction txn = env.beginTransaction(null, null);
+        status = db.delete(txn, entry(0, 1));
+        assertSame(OperationStatus.SUCCESS, status);
+        status = db.get(txn, entry(0, 0), data, null);
+        assertSame(OperationStatus.NOTFOUND, status);
+        status = db.put(txn, entry(0, 1), entry(1));
+        assertSame(OperationStatus.SUCCESS, status);
+        key = entry(0, 0);
+        data = entry(1);
+        status = db.getSearchBoth(txn, key, data, null);
+        assertSame(OperationStatus.SUCCESS, status);
+        check(key, 0, 1);
+        check(data, 1);
+        txn.abort();
+
+        if (runRecovery) {
+            db.close();
+            env.close();
+            env = null;
+            openEnv(true /*transactional*/);
+            db = openDb
+                (true /*transactional*/, false /*dups*/,
+                 Partial2PartComparator.class /*btreeComparator*/,
+                 null /*dupComparator*/);
+        }
+
+        /* Check that we rolled back to key={0,0}/data={0}. */
+        key = entry(0, 1);
+        data = entry(0);
+        status = db.getSearchBoth(null, key, data, null);
+        assertSame(OperationStatus.SUCCESS, status);
+        check(key, 0, 0);
+        check(data, 0);
+
+        db.close();
+        env.close();
+        env = null;
+    }
+
+    /**
+     * Same as testReuseSlotAbortPartialKey but for reuse of duplicate data
+     * slots.  [#15704]
+     *
+     * The sequence for a duplicate database and a duplicate comparator that
+     * compares only the first integer in a two integer data value is:
+     *
+     * 100 Insert LN key={0}/data={0,0} txn 1
+     * 110 Insert LN key={0}/data={1,1} txn 1
+     * 120 Commit txn 1
+     * 130 Delete LN key={0}/data={0,0} txn 2
+     * 140 Insert LN key={0}/data={0,1} txn 2
+     * 150 Abort txn 2
+     *
+     * When data {0,1} is inserted at LSN 140, it reuses the slot for {0,0}
+     * because these two data values are considered equal by the comparator.
+     * When txn 2 is aborted, it restores LSN 100 in the slot, but the data in
+     * the DBIN stays {0,1}.  Fetching the record after the abort gives data
+     * {0,1}.
+     */
+    public void testReuseSlotAbortPartialDup()
+        throws DatabaseException {
+
+        doTestReuseSlotPartialDup(false /*runRecovery*/);
+    }
+
+    /**
+     * Same as testReuseSlotAbortPartialDup but runs recovery after the abort.
+     */
+    public void testReuseSlotRecoverPartialDup()
+        throws DatabaseException {
+
+        doTestReuseSlotPartialDup(true /*runRecovery*/);
+    }
+
+    private void doTestReuseSlotPartialDup(boolean runRecovery)
+        throws DatabaseException {
+
+        openEnv(true /*transactional*/);
+        Database db = openDb
+            (true /*transactional*/, true /*dups*/,
+             null /*btreeComparator*/,
+             Partial2PartComparator.class /*dupComparator*/);
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert key={0}/data={0,0} using auto-commit. */
+        Transaction txn = env.beginTransaction(null, null);
+        status = db.put(txn, entry(0), entry(0, 0));
+        assertSame(OperationStatus.SUCCESS, status);
+        status = db.put(txn, entry(0), entry(1, 1));
+        assertSame(OperationStatus.SUCCESS, status);
+        txn.commit();
+        key = entry(0);
+        data = entry(0, 1);
+        status = db.getSearchBoth(null, key, data, null);
+        assertSame(OperationStatus.SUCCESS, status);
+        check(key, 0);
+        check(data, 0, 0);
+
+        /* Delete, insert key={0}/data={0,1}, abort. */
+        txn = env.beginTransaction(null, null);
+        Cursor cursor = db.openCursor(txn, null);
+        key = entry(0);
+        data = entry(0, 1);
+        status = cursor.getSearchBoth(key, data, null);
+        assertSame(OperationStatus.SUCCESS, status);
+        check(key, 0);
+        check(data, 0, 0);
+        status = cursor.delete();
+        assertSame(OperationStatus.SUCCESS, status);
+        status = cursor.put(entry(0), entry(0, 1));
+        assertSame(OperationStatus.SUCCESS, status);
+        key = entry(0);
+        data = entry(0, 1);
+        status = cursor.getSearchBoth(key, data, null);
+        assertSame(OperationStatus.SUCCESS, status);
+        check(key, 0);
+        check(data, 0, 1);
+        cursor.close();
+        txn.abort();
+
+        if (runRecovery) {
+            db.close();
+            env.close();
+            env = null;
+            openEnv(true /*transactional*/);
+            db = openDb
+                (true /*transactional*/, true /*dups*/,
+                 null /*btreeComparator*/,
+                 Partial2PartComparator.class /*dupComparator*/);
+        }
+
+        /* Check that we rolled back to key={0,0}/data={0}. */
+        key = entry(0);
+        data = entry(0, 1);
+        status = db.getSearchBoth(null, key, data, null);
+        assertSame(OperationStatus.SUCCESS, status);
+        check(key, 0);
+        check(data, 0, 0);
+
+        db.close();
+        env.close();
+        env = null;
+    }
+
+    /**
+     * Check that we prohibit the case where dups are configured and the btree
+     * comparator does not compare all bytes of the key.  To support this would
+     * require maintaining the BIN slot and DIN/DBIN.dupKey fields to be
+     * transactionally correct.  This is impractical since INs by design are
+     * non-transctional.  [#15704]
+     */
+    public void testDupsWithPartialComparatorNotAllowed()
+        throws DatabaseException {
+
+        openEnv(false /*transactional*/);
+        Database db = openDb
+            (false /*transactional*/, true /*dups*/,
+             Partial2PartComparator.class /*btreeComparator*/,
+             null /*dupComparator*/);
+
+        OperationStatus status;
+
+        /* Insert key={0,0}/data={0} and data={1}. */
+        status = db.put(null, entry(0, 0), entry(0));
+        assertSame(OperationStatus.SUCCESS, status);
+        try {
+            status = db.put(null, entry(0, 1), entry(1));
+            fail(status.toString());
+        } catch (IllegalArgumentException e) {
+            assertTrue(e.getMessage().indexOf
+                ("Custom Btree comparator matches two non-identical keys " +
+                 "in a Database with duplicates configured") >= 0);
+        }
+
+        db.close();
+        env.close();
+        env = null;
+    }
+
+    private void check(DatabaseEntry entry, int p1) {
+        assertEquals(4, entry.getSize());
+        TupleInput input = TupleBase.entryToInput(entry);
+        assertEquals(p1, input.readInt());
+    }
+
+    private void check(DatabaseEntry entry, int p1, int p2) {
+        assertEquals(8, entry.getSize());
+        TupleInput input = TupleBase.entryToInput(entry);
+        assertEquals(p1, input.readInt());
+        assertEquals(p2, input.readInt());
+    }
+
+    /*
+    private void dump(Database db, Transaction txn)
+        throws DatabaseException {
+
+        System.out.println("-- dump --");
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+        Cursor c = db.openCursor(txn, null);
+        while (c.getNext(key, data, null) == OperationStatus.SUCCESS) {
+            TupleInput keyInput = TupleBase.entryToInput(key);
+            int keyP1 = keyInput.readInt();
+            int keyP2 = keyInput.readInt();
+            int dataVal = IntegerBinding.entryToInt(data);
+            System.out.println("keyP1=" + keyP1 +
+                               " keyP2=" + keyP2 +
+                               " dataVal=" + dataVal);
+        }
+        c.close();
+    }
+    */
+
+    private DatabaseEntry entry(int p1) {
+        DatabaseEntry entry = new DatabaseEntry();
+        TupleOutput output = new TupleOutput();
+        output.writeInt(p1);
+        TupleBase.outputToEntry(output, entry);
+        return entry;
+    }
+
+    private DatabaseEntry entry(int p1, int p2) {
+        DatabaseEntry entry = new DatabaseEntry();
+        TupleOutput output = new TupleOutput();
+        output.writeInt(p1);
+        output.writeInt(p2);
+        TupleBase.outputToEntry(output, entry);
+        return entry;
+    }
+
+    /**
+     * Writes two integers to the byte array.
+     */
+    private void make2PartEntry(int p1, int p2, DatabaseEntry entry) {
+        TupleOutput output = new TupleOutput();
+        output.writeInt(p1);
+        output.writeInt(p2);
+        TupleBase.outputToEntry(output, entry);
+    }
+
+    /**
+     * Compares only the first integer in the byte arrays.
+     */
+    public static class Partial2PartComparator implements Comparator<byte[]> {
+	public int compare(byte[] o1, byte[] o2) {
+            int val1 = new TupleInput(o1).readInt();
+            int val2 = new TupleInput(o2).readInt();
+            return val1 - val2;
+	}
+    }
+}
diff --git a/test/com/sleepycat/je/DatabaseConfigTest.java b/test/com/sleepycat/je/DatabaseConfigTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..4946ed81878e0679da27f04b27a95690d014cd90
--- /dev/null
+++ b/test/com/sleepycat/je/DatabaseConfigTest.java
@@ -0,0 +1,1020 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DatabaseConfigTest.java,v 1.30.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.Comparator;
+import java.util.Set;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Basic database configuration testing.
+ */
+public class DatabaseConfigTest extends TestCase {
+    private static final boolean DEBUG = false;
+
+    private File envHome;
+    private Environment env;
+
+    public DatabaseConfigTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        try {
+            /* Close in case we hit an exception and didn't close. */
+            if (env != null) {
+            	env.close();
+            }
+        } catch (DatabaseException e) {
+            /* Ok if already closed */
+        }
+        env = null; // for JUNIT, to reduce memory usage when run in a suite.
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+    }
+
+    /**
+     * Test that we can retrieve a database configuration and that it clones
+     * its configuration appropriately.
+     */
+    public void testConfig()
+        throws Throwable {
+
+        try {
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setAllowCreate(true);
+            env = new Environment(envHome, envConfig);
+
+            /*
+             * Make sure that the database keeps its own copy of the
+             * configuration object.
+             */
+            DatabaseConfig dbConfigA = new DatabaseConfig();
+            dbConfigA.setAllowCreate(true);
+            Database dbA = env.openDatabase(null, "foo", dbConfigA);
+
+            /* Change the original dbConfig */
+            dbConfigA.setAllowCreate(false);
+            DatabaseConfig getConfig1 = dbA.getConfig();
+            assertEquals(true, getConfig1.getAllowCreate());
+            assertEquals(false, getConfig1.getSortedDuplicates());
+
+            /*
+             * Change the retrieved config, ought to have no effect on what the
+             * Database is storing.
+             */
+            getConfig1.setSortedDuplicates(true);
+            DatabaseConfig getConfig2 = dbA.getConfig();
+            assertEquals(false, getConfig2.getSortedDuplicates());
+
+            dbA.close();
+            env.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testConfigMatching()
+        throws Throwable {
+
+        try {
+	    /* DatabaseConfig matching. */
+
+            DatabaseConfig confA = new DatabaseConfig();
+            DatabaseConfig confB = new DatabaseConfig();
+
+	    try {
+		confA.validate(confB);
+	    } catch (Exception E) {
+		fail("expected valid match");
+	    }
+
+	    try {
+		confB.validate(confA);
+	    } catch (Exception E) {
+		fail("expected valid match");
+	    }
+
+	    try {
+		confA.validate(null); // uses the DEFAULT config
+	    } catch (Exception E) {
+		fail("expected valid match");
+	    }
+
+            confA.setReadOnly(true);
+	    try {
+		confA.validate(confB);
+		fail("expected exception");
+	    } catch (DatabaseException E) {
+		// ok
+	    }
+
+            confA.setReadOnly(false);
+	    confA.setSortedDuplicates(true);
+	    try {
+		confA.validate(confB);
+		fail("expected exception");
+	    } catch (DatabaseException E) {
+		// ok
+	    }
+	    confA.setSortedDuplicates(false);
+
+            confA.setOverrideBtreeComparator(true);
+	    confA.setBtreeComparator(TestComparator.class);
+            confB.setOverrideBtreeComparator(true);
+	    confB.setBtreeComparator(TestComparator2.class);
+	    try {
+		confA.validate(confB);
+		fail("expected exception");
+	    } catch (DatabaseException E) {
+		// ok
+	    }
+	    confA.setBtreeComparator((Class) null);
+            confA.setOverrideBtreeComparator(false);
+	    confB.setBtreeComparator((Class) null);
+            confB.setOverrideBtreeComparator(false);
+
+            confA.setOverrideDuplicateComparator(true);
+	    confA.setDuplicateComparator(TestComparator.class);
+            confB.setOverrideDuplicateComparator(true);
+	    confB.setDuplicateComparator(TestComparator2.class);
+	    try {
+		confA.validate(confB);
+		fail("expected exception");
+	    } catch (DatabaseException E) {
+		// ok
+	    }
+
+            /* Same tests as above but for serialized comparators. */
+
+            confA.setOverrideBtreeComparator(true);
+	    confA.setBtreeComparator(new TestSerialComparator());
+            confB.setOverrideBtreeComparator(true);
+	    confB.setBtreeComparator(new TestSerialComparator2());
+	    try {
+		confA.validate(confB);
+		fail("expected exception");
+	    } catch (DatabaseException E) {
+		// ok
+	    }
+	    confA.setBtreeComparator((Comparator) null);
+            confA.setOverrideBtreeComparator(false);
+	    confB.setBtreeComparator((Comparator) null);
+            confB.setOverrideBtreeComparator(false);
+
+            confA.setOverrideDuplicateComparator(true);
+	    confA.setDuplicateComparator(new TestSerialComparator());
+            confB.setOverrideDuplicateComparator(true);
+	    confB.setDuplicateComparator(new TestSerialComparator2());
+	    try {
+		confA.validate(confB);
+		fail("expected exception");
+	    } catch (DatabaseException E) {
+		// ok
+	    }
+
+	    /* SecondaryConfig matching. */
+
+            SecondaryConfig confC = new SecondaryConfig();
+            SecondaryConfig confD = new SecondaryConfig();
+	    confC.setKeyCreator(new SecKeyCreator1());
+	    confD.setKeyCreator(new SecKeyCreator1());
+
+	    try {
+		confC.validate(confD);
+	    } catch (Exception E) {
+		E.printStackTrace();
+		fail("expected valid match");
+	    }
+
+	    try {
+		confD.validate(confC);
+	    } catch (Exception E) {
+		fail("expected valid match");
+	    }
+
+	    try {
+		confC.validate(null);
+		fail("expected exception");
+	    } catch (DatabaseException E) {
+		// ok
+	    }
+
+	    confD.setKeyCreator(new SecKeyCreator2());
+	    try {
+		confC.validate(confD);
+		fail("expected exception");
+	    } catch (DatabaseException E) {
+		// ok
+	    }
+	    confD.setKeyCreator(new SecKeyCreator1());
+
+	    confD.setMultiKeyCreator(new SecMultiKeyCreator1());
+	    try {
+		confC.validate(confD);
+		fail("expected exception");
+	    } catch (DatabaseException E) {
+		// ok
+	    }
+	    confD.setMultiKeyCreator(null);
+
+	    confC.setForeignKeyDeleteAction(ForeignKeyDeleteAction.NULLIFY);
+	    try {
+		confC.validate(confD);
+		fail("expected exception");
+	    } catch (DatabaseException E) {
+		// ok
+	    }
+	    confC.setForeignKeyDeleteAction(ForeignKeyDeleteAction.ABORT);
+
+	    confC.setForeignKeyNullifier(new ForeignKeyNullifier1());
+	    try {
+		confC.validate(confD);
+		fail("expected exception");
+	    } catch (DatabaseException E) {
+		// ok
+	    }
+	    confC.setForeignKeyNullifier(null);
+
+	    confC.setForeignMultiKeyNullifier(new ForeignMultiKeyNullifier1());
+	    try {
+		confC.validate(confD);
+		fail("expected exception");
+	    } catch (DatabaseException E) {
+		// ok
+	    }
+	    confC.setForeignMultiKeyNullifier(null);
+
+	    confC.setImmutableSecondaryKey(true);
+	    try {
+		confC.validate(confD);
+		fail("expected exception");
+	    } catch (DatabaseException E) {
+		// ok
+	    }
+	    confC.setImmutableSecondaryKey(false);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Make sure we can instantiate a comparator at the time it's set.
+     */
+    public void testComparator()
+        throws Throwable {
+
+        try {
+            /* Can't be instantiated, a nested class */
+            try {
+                DatabaseConfig config = new DatabaseConfig();
+                config.setBtreeComparator(BadComparator1.class);
+                fail("Comparator shouldn't be instantiated");
+            } catch (IllegalArgumentException e) {
+                /* Expected. */
+                if (DEBUG) {
+                    System.out.println(e);
+                }
+            }
+
+            /* No zero-parameter constructor */
+            try {
+                DatabaseConfig config = new DatabaseConfig();
+                config.setBtreeComparator(BadComparator2.class);
+                fail("Comparator shouldn't be instantiated");
+            } catch (IllegalArgumentException e) {
+                /* Expected. */
+                if (DEBUG) {
+                    System.out.println(e);
+                }
+            }
+
+            /* Can't be serialized, not serializable */
+            try {
+                DatabaseConfig config = new DatabaseConfig();
+                config.setBtreeComparator(new BadSerialComparator1());
+                fail("Comparator shouldn't be instantiated");
+            } catch (IllegalArgumentException e) {
+                /* Expected. */
+                if (DEBUG) {
+                    System.out.println(e);
+                }
+            }
+
+            /* Can't be serialized, contains non-serializable field */
+            try {
+                DatabaseConfig config = new DatabaseConfig();
+                config.setBtreeComparator(new BadSerialComparator2());
+                fail("Comparator shouldn't be instantiated");
+            } catch (IllegalArgumentException e) {
+                /* Expected. */
+                if (DEBUG) {
+                    System.out.println(e);
+                }
+            }
+
+            /* Valid comparators */
+            DatabaseConfig config = new DatabaseConfig();
+            config.setBtreeComparator(TestComparator.class);
+            config.setBtreeComparator(TestComparator2.class);
+            config.setBtreeComparator(new TestSerialComparator());
+            config.setBtreeComparator(new TestSerialComparator2());
+
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Test that any conflicts between configuration object settings and the
+     * underlying impl object are detected.
+     */
+    public void testConfigConfict()
+        throws Throwable {
+
+        try {
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setAllowCreate(true);
+            env = new Environment(envHome, envConfig);
+
+            /*
+             * Test conflicts of duplicate allowed configuration.
+             */
+
+            /* 1a. Create allowing duplicates. */
+            DatabaseConfig firstConfig = new DatabaseConfig();
+            firstConfig.setAllowCreate(true);
+            firstConfig.setSortedDuplicates(true);
+            Database firstHandle = env.openDatabase(null, "fooDups",
+                                                    firstConfig);
+            /* 1b. Try to open w/no duplicates. */
+            DatabaseConfig secondConfig = new DatabaseConfig();
+            secondConfig.setSortedDuplicates(false);
+            try {
+                env.openDatabase(null, "fooDups", secondConfig);
+                fail("Conflict in duplicates allowed should be detected.");
+            } catch (IllegalArgumentException expected) {
+            }
+
+            firstHandle.close();
+            env.removeDatabase(null, "fooDups");
+
+            /* 2a. Create dis-allowing duplicates. */
+            firstConfig.setSortedDuplicates(false);
+            firstHandle = env.openDatabase(null, "fooDups", firstConfig);
+            /* 2b. Try to open w/duplicates. */
+            secondConfig.setSortedDuplicates(true);
+            try {
+                env.openDatabase(null, "fooDups", secondConfig);
+                fail("Conflict in duplicates allowed should be detected.");
+            } catch (IllegalArgumentException expected) {
+            }
+            firstHandle.close();
+
+            /*
+             * Test conflicts of read only. If the environment is read/write
+             * we should be able to open handles in read only or read/write
+             * mode. If the environment is readonly, the database handles
+             * must also be read only.
+             */
+            DatabaseConfig readOnlyConfig = new DatabaseConfig();
+            readOnlyConfig.setReadOnly(true);
+            Database roHandle = env.openDatabase(null, "fooDups",
+                                                 readOnlyConfig);
+            roHandle.close();
+
+            /* Open the environment in read only mode. */
+            env.close();
+            envConfig = TestUtils.initEnvConfig();
+            envConfig.setReadOnly(true);
+            env = new Environment(envHome, envConfig);
+
+            /* Open a readOnly database handle, should succeed */
+            roHandle = env.openDatabase(null, "fooDups",
+                                        readOnlyConfig);
+            roHandle.close();
+
+            /* Open a read/write database handle, should not succeed. */
+            try {
+                env.openDatabase(null, "fooDups", null);
+                fail("Should not be able to open read/write");
+            } catch (IllegalArgumentException expected) {
+            }
+            env.close();
+
+            /*
+             * Check comparator changes.
+             */
+            /* 1a. Open w/a null comparator */
+            env = new Environment(envHome, null);
+            firstConfig = new DatabaseConfig();
+            firstConfig.setAllowCreate(true);
+            firstHandle = env.openDatabase(null,
+                                           "fooComparator",
+                                           firstConfig);
+            DatabaseConfig firstRetrievedConfig = firstHandle.getConfig();
+            assertEquals(null, firstRetrievedConfig.getBtreeComparator());
+            assertEquals(null, firstRetrievedConfig.getDuplicateComparator());
+
+            /*
+             * 1b. Open a db w/a different comparator, shouldn't take effect
+             * because override is not set.
+             */
+            secondConfig = new DatabaseConfig();
+            Comparator btreeComparator = new TestComparator();
+            Comparator dupComparator = new TestComparator();
+            secondConfig.setBtreeComparator
+                ((Class<Comparator<byte[]>>)btreeComparator.getClass());
+            secondConfig.setDuplicateComparator
+                ((Class<Comparator<byte[]>>)dupComparator.getClass());
+            Database secondHandle =
+		env.openDatabase(null, "fooComparator", secondConfig);
+            DatabaseConfig retrievedConfig = secondHandle.getConfig();
+            assertEquals(null, retrievedConfig.getBtreeComparator());
+            assertEquals(null, retrievedConfig.getDuplicateComparator());
+            secondHandle.close();
+
+            /* Same as above but with a serialized comparator. */
+            secondConfig = new DatabaseConfig();
+            btreeComparator = new TestSerialComparator();
+            dupComparator = new TestSerialComparator();
+            secondConfig.setBtreeComparator(btreeComparator);
+            secondConfig.setDuplicateComparator(dupComparator);
+            secondHandle =
+		env.openDatabase(null, "fooComparator", secondConfig);
+            retrievedConfig = secondHandle.getConfig();
+            assertEquals(null, retrievedConfig.getBtreeComparator());
+            assertEquals(null, retrievedConfig.getDuplicateComparator());
+            secondHandle.close();
+
+            /* 1c. Allow override */
+            secondConfig.setOverrideBtreeComparator(true);
+            secondConfig.setOverrideDuplicateComparator(true);
+            btreeComparator = new TestComparator();
+            dupComparator = new TestComparator();
+            secondConfig.setBtreeComparator
+                ((Class<Comparator<byte[]>>)btreeComparator.getClass());
+
+            secondConfig.setDuplicateComparator
+                ((Class<Comparator<byte[]>>)dupComparator.getClass());
+            secondHandle = env.openDatabase(null,
+                                            "fooComparator",
+                                            secondConfig);
+
+            retrievedConfig = secondHandle.getConfig();
+            assertEquals(btreeComparator.getClass(),
+                         retrievedConfig.getBtreeComparator().getClass());
+            assertEquals(dupComparator.getClass(),
+                         retrievedConfig.getDuplicateComparator().getClass());
+            secondHandle.close();
+
+            /* Same as above but with a serialized comparator. */
+            secondConfig.setOverrideBtreeComparator(true);
+            secondConfig.setOverrideDuplicateComparator(true);
+            btreeComparator = new TestSerialComparator();
+            dupComparator = new TestSerialComparator();
+            secondConfig.setBtreeComparator(btreeComparator);
+            secondConfig.setDuplicateComparator(dupComparator);
+            secondHandle = env.openDatabase(null,
+                                            "fooComparator",
+                                            secondConfig);
+
+            retrievedConfig = secondHandle.getConfig();
+            assertEquals(btreeComparator,
+                         retrievedConfig.getBtreeComparator());
+            assertEquals(dupComparator,
+                         retrievedConfig.getDuplicateComparator());
+            secondHandle.close();
+
+            firstHandle.close();
+            env.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            env.close();
+            throw t;
+        }
+    }
+
+    public void testIsTransactional()
+        throws Throwable {
+
+        try {
+            /* Open environment in transactional mode.*/
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setTransactional(true);
+            envConfig.setAllowCreate(true);
+            env = new Environment(envHome, envConfig);
+
+            /* Create a db, open transactionally with implied auto-commit. */
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setAllowCreate(true);
+            dbConfig.setTransactional(true);
+            Database myDb = env.openDatabase(null, "testDB", dbConfig);
+            assertTrue(myDb.isTransactional());
+            assertTrue(myDb.getConfig().getTransactional());
+            myDb.close();
+
+            /* Open an existing db, can open it non-transactionally. */
+            dbConfig.setTransactional(false);
+            myDb = env.openDatabase(null, "testDB", null);
+            assertFalse(myDb.isTransactional());
+            assertFalse(myDb.getConfig().getTransactional());
+            myDb.close();
+
+            /* Open another db, pass an explicit transaction. */
+            dbConfig.setTransactional(true);
+            Transaction txn = env.beginTransaction(null, null);
+            myDb = env.openDatabase(txn, "testDB2", dbConfig);
+            assertTrue(myDb.isTransactional());
+            assertTrue(myDb.getConfig().getTransactional());
+
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            key.setData(TestUtils.getTestArray(0));
+            data.setData(TestUtils.getTestArray(0));
+            try {
+                myDb.put(null, key, data);
+            } catch (DatabaseException DBE) {
+                fail("didn't expect DatabaseException, implied autocommit");
+            }
+
+            key.setData(TestUtils.getTestArray(1));
+            data.setData(TestUtils.getTestArray(1));
+            try {
+                myDb.put(txn, key, data);
+            } catch (DatabaseException DBE) {
+                fail("didn't expect DatabaseException with txn passed");
+            }
+
+            try {
+                myDb.get(txn, key, data, LockMode.DEFAULT);
+            } catch (DatabaseException DBE) {
+                fail("didn't expect DatabaseException with txn passed");
+            }
+
+            txn.commit();
+
+            try {
+                myDb.get(null, key, data, LockMode.DEFAULT);
+            } catch (DatabaseException DBE) {
+                fail("didn't expect DatabaseException because no txn passed");
+            }
+
+            myDb.close();
+
+            env.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testOpenReadOnly()
+        throws Throwable {
+
+        try {
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setTransactional(true);
+            envConfig.setAllowCreate(true);
+            env = new Environment(envHome, envConfig);
+
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+
+            Transaction txn = env.beginTransaction(null, null);
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setTransactional(true);
+            dbConfig.setAllowCreate(true);
+            Database myDb = env.openDatabase(txn, "testDB2", dbConfig);
+
+            key.setData(TestUtils.getTestArray(0));
+            data.setData(TestUtils.getTestArray(0));
+            try {
+                myDb.put(txn, key, data);
+            } catch (DatabaseException DBE) {
+                fail("unexpected DatabaseException during put");
+            }
+
+            txn.commit();
+            myDb.close();
+
+            dbConfig = new DatabaseConfig();
+            dbConfig.setTransactional(true);
+            dbConfig.setReadOnly(true);
+            txn = env.beginTransaction(null, null);
+            myDb = env.openDatabase(txn, "testDB2", dbConfig);
+            assertTrue(myDb.isTransactional());
+            assertTrue(myDb.getConfig().getTransactional());
+
+            key.setData(TestUtils.getTestArray(0));
+            data.setData(TestUtils.getTestArray(0));
+            try {
+                myDb.put(txn, key, data);
+                fail
+                ("expected UnsupportedOperationException because open RDONLY");
+            } catch (UnsupportedOperationException expected) {
+            }
+
+            key.setData(TestUtils.getTestArray(0));
+            data.setData(TestUtils.getTestArray(0));
+            assertEquals(OperationStatus.SUCCESS,
+                         myDb.get(txn, key, data, LockMode.DEFAULT));
+
+            Cursor cursor = myDb.openCursor(txn, null);
+
+            assertEquals(OperationStatus.SUCCESS,
+                         cursor.getFirst(key, data, LockMode.DEFAULT));
+
+            try {
+                cursor.delete();
+                fail("expected Exception from delete on RD_ONLY db");
+            } catch (DatabaseException DBE) {
+            }
+
+            key.setData(TestUtils.getTestArray(1));
+            data.setData(TestUtils.getTestArray(1));
+            try {
+                myDb.put(txn, key, data);
+                fail
+              ("expected UnsupportedOperationException because open RDONLY");
+            } catch (UnsupportedOperationException expected) {
+            }
+
+	    cursor.close();
+            txn.commit();
+            myDb.close();
+
+            env.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Test exclusive creation.
+     */
+    public void testExclusive()
+        throws Throwable {
+
+        try {
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+
+            /*
+             * Make sure that the database keeps its own copy of the
+             * configuration object.
+             */
+            envConfig.setAllowCreate(true);
+            env = new Environment(envHome, envConfig);
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setAllowCreate(true);
+            dbConfig.setExclusiveCreate(true);
+
+            /* Should succeed and create the database. */
+            Database dbA = env.openDatabase(null, "foo", dbConfig);
+            dbA.close();
+
+            /* Should not succeed, because the database exists. */
+            try {
+                env.openDatabase(null, "foo", dbConfig);
+                fail("Database already exists");
+            } catch (DatabaseException e) {
+            }
+            env.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /*
+     * Test that changing the Btree comparator really writes it to disk.
+     */
+    public void testConfigOverrideUpdateSR15743()
+        throws Throwable {
+
+        try {
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setAllowCreate(true);
+            env = new Environment(envHome, envConfig);
+
+            /*
+             * Make sure that the database keeps its own copy of the
+             * configuration object.
+             */
+            DatabaseConfig dbConfigA = new DatabaseConfig();
+            dbConfigA.setOverrideBtreeComparator(false);
+	    dbConfigA.setBtreeComparator(TestComparator.class);
+            dbConfigA.setAllowCreate(true);
+            Database dbA = env.openDatabase(null, "foo", dbConfigA);
+
+            /* Change the original dbConfig */
+            dbConfigA.setBtreeComparator(TestComparator2.class);
+            DatabaseConfig getConfig1 = dbA.getConfig();
+            assertEquals(TestComparator.class,
+			 getConfig1.getBtreeComparator().getClass());
+
+            /*
+             * Change the retrieved config, ought to have no effect on what the
+             * Database is storing.
+             */
+            getConfig1.setBtreeComparator(TestComparator2.class);
+            DatabaseConfig getConfig2 = dbA.getConfig();
+            assertEquals(TestComparator.class,
+			 getConfig2.getBtreeComparator().getClass());
+
+            dbA.close();
+            env.close();
+
+	    /* Ensure new comparator is written to disk. */
+            envConfig = TestUtils.initEnvConfig();
+            env = new Environment(envHome, envConfig);
+
+            dbConfigA = new DatabaseConfig();
+	    /* Change the comparator. */
+            dbConfigA.setOverrideBtreeComparator(true);
+	    dbConfigA.setBtreeComparator(TestComparator2.class);
+            dbA = env.openDatabase(null, "foo", dbConfigA);
+
+            getConfig2 = dbA.getConfig();
+            assertEquals(TestComparator2.class,
+			 getConfig2.getBtreeComparator().getClass());
+
+            dbA.close();
+            env.close();
+
+	    /* Read it back during recovery to ensure it was written. */
+            envConfig = TestUtils.initEnvConfig();
+            env = new Environment(envHome, envConfig);
+
+            dbConfigA = new DatabaseConfig();
+            dbA = env.openDatabase(null, "foo", dbConfigA);
+            getConfig2 = dbA.getConfig();
+            assertEquals(TestComparator2.class,
+			 getConfig2.getBtreeComparator().getClass());
+
+	    /* Create a root for the tree. */
+	    dbA.put(null,
+		    new DatabaseEntry(new byte[1]),
+		    new DatabaseEntry(new byte[1]));
+
+            dbA.close();
+            env.close();
+
+	    /* Change it to a third one when there is a root present. */
+            envConfig = TestUtils.initEnvConfig();
+            env = new Environment(envHome, envConfig);
+
+            dbConfigA = new DatabaseConfig();
+	    /* Change the comparator. */
+            dbConfigA.setOverrideBtreeComparator(true);
+	    dbConfigA.setBtreeComparator(TestComparator3.class);
+            dbA = env.openDatabase(null, "foo", dbConfigA);
+            getConfig2 = dbA.getConfig();
+            assertEquals(TestComparator3.class,
+			 getConfig2.getBtreeComparator().getClass());
+	    dbA.close();
+	    env.close();
+
+	    /* Read it back during recovery to ensure it was written. */
+            envConfig = TestUtils.initEnvConfig();
+            env = new Environment(envHome, envConfig);
+
+            dbConfigA = new DatabaseConfig();
+            dbA = env.openDatabase(null, "foo", dbConfigA);
+            getConfig2 = dbA.getConfig();
+            assertEquals(TestComparator3.class,
+			 getConfig2.getBtreeComparator().getClass());
+	    dbA.close();
+	    env.close();
+
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /*
+     * This Comparator can't be instantiated because it's private and not
+     * static.
+     */
+    private class BadComparator1 implements Comparator<byte[]> {
+        public BadComparator1(int foo) {
+        }
+
+        public int compare(byte[] o1, byte[] o2) {
+            return 0;
+        }
+    }
+
+    /*
+     * This Comparator can't be instantiated because it doesn't have zero
+     * parameter constructor.
+     */
+    public static class BadComparator2 implements Comparator<byte[]> {
+        public BadComparator2(int i) {
+        }
+
+        public int compare(byte[] o1, byte[] o2) {
+            return 0;
+        }
+    }
+
+    /*
+     * OK comparator for setting comparators.
+     */
+    public static class TestComparator implements Comparator<byte[]> {
+        public TestComparator() {
+        }
+
+        public int compare(byte[] o1, byte[] o2) {
+            return 0;
+        }
+    }
+
+    /*
+     * OK comparator for setting comparators.
+     */
+    public static class TestComparator2 implements Comparator<byte[]> {
+        public TestComparator2() {
+        }
+
+        public int compare(byte[] o1, byte[] o2) {
+            return 0;
+        }
+    }
+
+    /*
+     * OK comparator for setting comparators.
+     */
+    public static class TestComparator3 implements Comparator<byte[]> {
+        public TestComparator3() {
+        }
+
+        public int compare(byte[] o1, byte[] o2) {
+            return 0;
+        }
+    }
+
+    /*
+     * This Comparator can't be serialized because it's not serializable.
+     */
+    public class BadSerialComparator1 implements Comparator<byte[]> {
+
+        public BadSerialComparator1() {
+        }
+
+        public int compare(byte[] o1, byte[] o2) {
+            return 0;
+        }
+    }
+
+    /*
+     * This Comparator can't be serialized because it contains a reference to
+     * an object that's not serializable.
+     */
+    @SuppressWarnings("serial")
+    public class BadSerialComparator2 implements Comparator<byte[]>,
+                                                 Serializable {
+
+        private BadSerialComparator1 o = new BadSerialComparator1();
+
+        public BadSerialComparator2() {
+        }
+
+        public int compare(byte[] o1, byte[] o2) {
+            return 0;
+        }
+    }
+
+    /*
+     * OK comparator for setting comparators -- private class, private
+     * constructor, and serializable fields are allowed.
+     */
+    @SuppressWarnings("serial")
+    private static class TestSerialComparator
+        implements Comparator<byte[]>, Serializable {
+
+        private String s = "sss";
+
+        private TestSerialComparator() {
+        }
+
+        public int compare(byte[] o1, byte[] o2) {
+            return 0;
+        }
+
+        public boolean equals(Object other) {
+            TestSerialComparator o = (TestSerialComparator) other;
+            return s.equals(o.s);
+        }
+    }
+
+    /*
+     * OK comparator for setting comparators.
+     */
+    @SuppressWarnings("serial")
+    public static class TestSerialComparator2
+        implements Comparator<byte[]>, Serializable {
+
+        public int compare(byte[] o1, byte[] o2) {
+            return 0;
+        }
+    }
+
+    public static class SecKeyCreator1 implements SecondaryKeyCreator {
+	public boolean createSecondaryKey(SecondaryDatabase secondary,
+					  DatabaseEntry key,
+					  DatabaseEntry data,
+					  DatabaseEntry result)
+	    throws DatabaseException {
+
+	    return true;
+	}
+
+	public boolean equals(Object o) {
+	    if (o == null) {
+		return false;
+	    }
+	    return (o.getClass() == getClass());
+	}
+    }
+
+    public static class SecKeyCreator2 implements SecondaryKeyCreator {
+	public boolean createSecondaryKey(SecondaryDatabase secondary,
+					  DatabaseEntry key,
+					  DatabaseEntry data,
+					  DatabaseEntry result)
+	    throws DatabaseException {
+
+	    return true;
+	}
+
+	public boolean equals(Object o) {
+	    if (o == null) {
+		return false;
+	    }
+	    return (o.getClass() == getClass());
+	}
+    }
+
+    public static class SecMultiKeyCreator1
+        implements SecondaryMultiKeyCreator {
+	public void createSecondaryKeys(SecondaryDatabase secondary,
+					DatabaseEntry key,
+					DatabaseEntry data,
+					Set results)
+	    throws DatabaseException {
+	}
+
+	public boolean equals(Object o) {
+	    if (o == null) {
+		return false;
+	    }
+	    return (o.getClass() == getClass());
+	}
+    }
+
+    public static class ForeignKeyNullifier1 implements ForeignKeyNullifier {
+	public boolean nullifyForeignKey(SecondaryDatabase secondary,
+					 DatabaseEntry data)
+	    throws DatabaseException {
+
+	    return true;
+	}
+    }
+
+    public static class ForeignMultiKeyNullifier1
+        implements ForeignMultiKeyNullifier {
+	public boolean nullifyForeignKey(SecondaryDatabase secondary,
+					 DatabaseEntry key,
+					 DatabaseEntry data,
+					 DatabaseEntry secKey)
+	    throws DatabaseException {
+
+	    return true;
+	}
+    }
+}
diff --git a/test/com/sleepycat/je/DatabaseEntryTest.java b/test/com/sleepycat/je/DatabaseEntryTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..2901f45bb5c88d83064c02dc4f23676d56a8767d
--- /dev/null
+++ b/test/com/sleepycat/je/DatabaseEntryTest.java
@@ -0,0 +1,334 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DatabaseEntryTest.java,v 1.34.2.3 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Arrays;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.util.TestUtils;
+
+public class DatabaseEntryTest extends TestCase {
+
+    private File envHome;
+    private Environment env;
+    private Database db;
+
+    public DatabaseEntryTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+	throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+	throws IOException {
+
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+    }
+
+    public void testBasic()
+        throws Exception {
+
+        /* Constructor that takes a byte array. */
+        int size = 10;
+        byte[] foo = new byte[size];
+        byte val = 1;
+        Arrays.fill(foo, val);
+
+        DatabaseEntry dbtA = new DatabaseEntry(foo);
+        assertEquals(foo.length, dbtA.getSize());
+        assertTrue(Arrays.equals(foo, dbtA.getData()));
+
+        /* Set the data to null */
+        dbtA.setData(null);
+        assertEquals(0, dbtA.getSize());
+        assertFalse(Arrays.equals(foo, dbtA.getData()));
+
+        /* Constructor that sets the data later */
+        DatabaseEntry dbtLater = new DatabaseEntry();
+        assertTrue(dbtLater.getData() == null);
+        assertEquals(0, dbtLater.getSize());
+        dbtLater.setData(foo);
+        assertTrue(Arrays.equals(foo, dbtLater.getData()));
+
+        /* Set offset, then reset data and offset should be reset. */
+        DatabaseEntry dbtOffset = new DatabaseEntry(foo, 1, 1);
+        assertEquals(1, dbtOffset.getOffset());
+        assertEquals(1, dbtOffset.getSize());
+        dbtOffset.setData(foo);
+        assertEquals(0, dbtOffset.getOffset());
+        assertEquals(foo.length, dbtOffset.getSize());
+    }
+
+    public void testOffset()
+	throws DatabaseException {
+
+	final int N_BYTES = 30;
+
+        openDb(false);
+
+	DatabaseEntry originalKey = new DatabaseEntry(new byte[N_BYTES]);
+	DatabaseEntry originalData = new DatabaseEntry(new byte[N_BYTES]);
+	for (int i = 0; i < N_BYTES; i++) {
+	    originalKey.getData()[i] = (byte) i;
+	    originalData.getData()[i] = (byte) i;
+	}
+
+	originalKey.setSize(10);
+	originalKey.setOffset(10);
+	originalData.setSize(10);
+	originalData.setOffset(10);
+
+	db.put(null, originalKey, originalData);
+
+	Cursor cursor = db.openCursor(null, CursorConfig.DEFAULT);
+
+	DatabaseEntry foundKey = new DatabaseEntry();
+	DatabaseEntry foundData = new DatabaseEntry();
+
+	assertEquals(OperationStatus.SUCCESS,
+                     cursor.getFirst(foundKey, foundData,
+                                     LockMode.DEFAULT));
+
+	assertEquals(0, foundKey.getOffset());
+	assertEquals(0, foundData.getOffset());
+	assertEquals(10, foundKey.getSize());
+	assertEquals(10, foundData.getSize());
+	for (int i = 0; i < 10; i++) {
+	    assertEquals(i + 10, foundKey.getData()[i]);
+	    assertEquals(i + 10, foundData.getData()[i]);
+	}
+
+	cursor.close();
+        closeDb();
+    }
+
+    public void testPartial()
+	throws DatabaseException {
+
+        openDb(false);
+
+	DatabaseEntry originalKey = new DatabaseEntry(new byte[20]);
+	DatabaseEntry originalData = new DatabaseEntry(new byte[20]);
+	for (int i = 0; i < 20; i++) {
+	    originalKey.getData()[i] = (byte) i;
+	    originalData.getData()[i] = (byte) i;
+	}
+
+	originalData.setPartial(true);
+	originalData.setPartialLength(10);
+	originalData.setPartialOffset(10);
+
+	db.put(null, originalKey, originalData);
+
+	Cursor cursor = db.openCursor(null, CursorConfig.DEFAULT);
+
+	DatabaseEntry foundKey = new DatabaseEntry();
+	DatabaseEntry foundData = new DatabaseEntry();
+
+	assertEquals(OperationStatus.SUCCESS,
+                     cursor.getFirst(foundKey, foundData,
+                                     LockMode.DEFAULT));
+
+	assertEquals(0, foundKey.getOffset());
+	assertEquals(20, foundKey.getSize());
+	for (int i = 0; i < 20; i++) {
+	    assertEquals(i, foundKey.getData()[i]);
+	}
+
+	assertEquals(0, foundData.getOffset());
+	assertEquals(30, foundData.getSize());
+	for (int i = 0; i < 10; i++) {
+	    assertEquals(0, foundData.getData()[i]);
+	}
+	for (int i = 0; i < 20; i++) {
+	    assertEquals(i, foundData.getData()[i + 10]);
+	}
+
+        foundKey.setPartial(5, 10, true);
+        foundData.setPartial(5, 20, true);
+
+	assertEquals(OperationStatus.SUCCESS,
+                     cursor.getFirst(foundKey, foundData,
+                                     LockMode.DEFAULT));
+	assertEquals(0, foundKey.getOffset());
+	assertEquals(10, foundKey.getSize());
+	for (int i = 0; i < 10; i++) {
+	    assertEquals(i + 5, foundKey.getData()[i]);
+	}
+
+	assertEquals(0, foundData.getOffset());
+	assertEquals(20, foundData.getSize());
+	for (int i = 0; i < 5; i++) {
+	    assertEquals(0, foundData.getData()[i]);
+	}
+	for (int i = 0; i < 15; i++) {
+	    assertEquals(i, foundData.getData()[i + 5]);
+	}
+
+        /* Check that partial keys on put() is not allowed. */
+
+	originalKey.setPartial(true);
+	originalKey.setPartialLength(10);
+	originalKey.setPartialOffset(10);
+
+        try {
+            db.put(null, originalKey, originalData);
+            fail();
+        } catch (IllegalArgumentException expected) {}
+        try {
+            db.putNoOverwrite(null, originalKey, originalData);
+            fail();
+        } catch (IllegalArgumentException expected) {}
+        try {
+            db.putNoDupData(null, originalKey, originalData);
+            fail();
+        } catch (IllegalArgumentException expected) {}
+
+        try {
+            cursor.put(originalKey, originalData);
+            fail();
+        } catch (IllegalArgumentException expected) {}
+        try {
+            cursor.putNoOverwrite(originalKey, originalData);
+            fail();
+        } catch (IllegalArgumentException expected) {}
+        try {
+            cursor.putNoDupData(originalKey, originalData);
+            fail();
+        } catch (IllegalArgumentException expected) {}
+
+	cursor.close();
+        closeDb();
+    }
+
+    public void testPartialCursorPuts()
+	throws DatabaseException {
+
+        openDb(false);
+
+	DatabaseEntry originalKey = new DatabaseEntry(new byte[20]);
+	DatabaseEntry originalData = new DatabaseEntry(new byte[20]);
+	for (int i = 0; i < 20; i++) {
+	    originalKey.getData()[i] = (byte) i;
+	    originalData.getData()[i] = (byte) i;
+	}
+
+	/* Put 20 bytes of key and data. */
+	db.put(null, originalKey, originalData);
+
+	Cursor cursor = db.openCursor(null, CursorConfig.DEFAULT);
+
+	DatabaseEntry foundKey = new DatabaseEntry();
+	DatabaseEntry foundData = new DatabaseEntry();
+
+	assertEquals(OperationStatus.SUCCESS,
+                     cursor.getFirst(foundKey, foundData,
+                                     LockMode.DEFAULT));
+
+	assertEquals(0, foundKey.getOffset());
+	assertEquals(20, foundKey.getSize());
+	for (int i = 0; i < 20; i++) {
+	    assertEquals(i, foundKey.getData()[i]);
+	}
+
+	assertEquals(0, foundData.getOffset());
+	assertEquals(20, foundData.getSize());
+
+	for (int i = 0; i < 20; i++) {
+	    assertEquals(i, foundData.getData()[i]);
+	}
+
+	for (int i = 0; i < 10; i++) {
+	    foundData.getData()[i] = (byte) (i + 50);
+	}
+
+	foundData.setPartial(true);
+	foundData.setPartialLength(10);
+	foundData.setPartialOffset(10);
+
+	cursor.putCurrent(foundData);
+
+	foundData = new DatabaseEntry();
+
+	assertEquals(OperationStatus.SUCCESS,
+                     cursor.getFirst(foundKey, foundData,
+                                     LockMode.DEFAULT));
+	assertEquals(0, foundKey.getOffset());
+	assertEquals(20, foundKey.getSize());
+	assertEquals(0, foundData.getOffset());
+	assertEquals(30, foundData.getSize());
+	for (int i = 0; i < 10; i++) {
+	    assertEquals(foundData.getData()[i], i);
+	    assertEquals(foundData.getData()[i + 10], (i + 50));
+	    assertEquals(foundData.getData()[i + 20], (i + 10));
+	}
+
+	cursor.close();
+        closeDb();
+    }
+
+    public void testToString() {
+        DatabaseEntry entry = new DatabaseEntry(new byte[] {1, 2, 3}, 1, 2);
+        String s1 = entry.toString();
+	entry.setPartial(3, 4, true);
+        String s2 = entry.toString();
+
+        /*
+         * Normally leave this disabled. Enable it to manually look at the
+         * toString output and ensure it is valid XML.
+         */
+        if (false) {
+            System.out.println(s1);
+            System.out.println(s2);
+        }
+    }
+
+    private void openDb(boolean dups)
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+	DbInternal.disableParameterValidation(envConfig);
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+        envConfig.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(),
+                                 "1024");
+        envConfig.setConfigParam(EnvironmentParams.ENV_CHECK_LEAKS.getName(),
+                                 "true");
+	envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(),
+                                 "6");
+        env = new Environment(envHome, envConfig);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(dups);
+        db = env.openDatabase(null, "testDB", dbConfig);
+    }
+
+    private void closeDb()
+        throws DatabaseException {
+
+        if (db != null) {
+            db.close();
+            db = null;
+        }
+        if (env != null) {
+            env.close();
+            env = null;
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/DatabaseTest.java b/test/com/sleepycat/je/DatabaseTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..42ba349bd36aabeb67941ee4cd000a4e2d12f100
--- /dev/null
+++ b/test/com/sleepycat/je/DatabaseTest.java
@@ -0,0 +1,1451 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DatabaseTest.java,v 1.111.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.INList;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.junit.JUnitThread;
+import com.sleepycat.je.log.LogUtils;
+import com.sleepycat.je.tree.Key;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Basic database operations, excluding configuration testing.
+ */
+public class DatabaseTest extends TestCase {
+    private static final boolean DEBUG = false;
+    private static final int NUM_RECS = 257;
+    private static final int NUM_DUPS = 10;
+
+    private File envHome;
+    private Environment env;
+
+    public DatabaseTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        try {
+            /* Close in case we hit an exception and didn't close */
+            if (env != null) {
+		env.close();
+            }
+        } catch (DatabaseException e) {
+            /* Ok if already closed */
+        }
+        env = null; // for JUNIT, to reduce memory usage when run in a suite.
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+    }
+
+    /**
+     * Make sure we can't create a transactional cursor on a non-transactional
+     * database.
+     */
+    public void testCursor()
+        throws Exception {
+
+        Environment txnalEnv = null;
+        Database nonTxnalDb = null;
+        Cursor txnalCursor = null;
+        Transaction txn = null;
+
+        try {
+
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setTransactional(true);
+            envConfig.setAllowCreate(true);
+            txnalEnv = new Environment(envHome, envConfig);
+
+            // Make a db and open it
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setAllowCreate(true);
+            dbConfig.setTransactional(false);
+            nonTxnalDb = txnalEnv.openDatabase(null, "testDB", dbConfig);
+
+            // We should not be able to open a txnal cursor.
+            txn = txnalEnv.beginTransaction(null, null);
+            try {
+                txnalCursor = nonTxnalDb.openCursor(txn, null);
+                fail("Openin a txnal cursor on a nontxnal db is invalid.");
+            } catch (DatabaseException e) {
+                // expected
+            }
+        } finally {
+            if (txn != null) {
+                txn.abort();
+            }
+            if (txnalCursor != null) {
+                txnalCursor.close();
+            }
+            if (nonTxnalDb != null) {
+                nonTxnalDb.close();
+            }
+            if (txnalEnv != null) {
+                txnalEnv.close();
+            }
+
+        }
+    }
+
+    public void testPutExisting()
+        throws Throwable {
+
+        try {
+            Database myDb = initEnvAndDb(true, false, true, false, null);
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            DatabaseEntry getData = new DatabaseEntry();
+	    Transaction txn = env.beginTransaction(null, null);
+            for (int i = NUM_RECS; i > 0; i--) {
+                key.setData(TestUtils.getTestArray(i));
+                data.setData(TestUtils.getTestArray(i));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.put(txn, key, data));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.get(txn, key, getData, LockMode.DEFAULT));
+                assertEquals(0, Key.compareKeys(data.getData(),
+						getData.getData(), null));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.put(txn, key, data));
+                assertEquals(OperationStatus.SUCCESS, myDb.getSearchBoth
+                             (txn, key, getData, LockMode.DEFAULT));
+                assertEquals(0, Key.compareKeys(data.getData(),
+						getData.getData(), null));
+            }
+	    txn.commit();
+            myDb.close();
+            env.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /*
+     * Test that zero length data always returns the same (static) byte[].
+     */
+    public void testZeroLengthData()
+        throws Throwable {
+
+        try {
+            Database myDb = initEnvAndDb(true, false, true, false, null);
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            DatabaseEntry getData = new DatabaseEntry();
+	    Transaction txn = env.beginTransaction(null, null);
+	    byte[] appZLBA = new byte[0];
+            for (int i = NUM_RECS; i > 0; i--) {
+                key.setData(TestUtils.getTestArray(i));
+                data.setData(appZLBA);
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.put(txn, key, data));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.get(txn, key, getData, LockMode.DEFAULT));
+		assertFalse(getData.getData() == appZLBA);
+		assertTrue(getData.getData() ==
+			   LogUtils.ZERO_LENGTH_BYTE_ARRAY);
+                assertEquals(0, Key.compareKeys(data.getData(),
+						getData.getData(), null));
+            }
+	    txn.commit();
+            myDb.close();
+            env.close();
+
+	    /*
+	     * Read back from the log.
+	     */
+
+            myDb = initEnvAndDb(true, false, true, false, null);
+            key = new DatabaseEntry();
+            data = new DatabaseEntry();
+            getData = new DatabaseEntry();
+	    txn = env.beginTransaction(null, null);
+            for (int i = NUM_RECS; i > 0; i--) {
+                key.setData(TestUtils.getTestArray(i));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.get(txn, key, getData, LockMode.DEFAULT));
+		assertTrue(getData.getData() ==
+			   LogUtils.ZERO_LENGTH_BYTE_ARRAY);
+            }
+	    txn.commit();
+            myDb.close();
+            env.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testDeleteNonDup()
+        throws Throwable {
+
+        try {
+            Database myDb = initEnvAndDb(true, false, true, false, null);
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            DatabaseEntry getData = new DatabaseEntry();
+	    Transaction txn = env.beginTransaction(null, null);
+            for (int i = NUM_RECS; i > 0; i--) {
+                key.setData(TestUtils.getTestArray(i));
+                data.setData(TestUtils.getTestArray(i));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.put(txn, key, data));
+            }
+
+            for (int i = NUM_RECS; i > 0; i--) {
+                key.setData(TestUtils.getTestArray(i));
+                data.setData(TestUtils.getTestArray(i));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.delete(txn, key));
+                OperationStatus status =
+		    myDb.get(txn, key, getData, LockMode.DEFAULT);
+                if (status != OperationStatus.KEYEMPTY &&
+                    status != OperationStatus.NOTFOUND) {
+                    fail("invalid Database.get return: " + status);
+                }
+                assertEquals(OperationStatus.NOTFOUND,
+			     myDb.delete(txn, key));
+            }
+	    txn.commit();
+            myDb.close();
+            env.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testDeleteDup()
+        throws Throwable {
+
+        try {
+            Database myDb = initEnvAndDb(true, true, true, false, null);
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            DatabaseEntry getData = new DatabaseEntry();
+	    Transaction txn = env.beginTransaction(null, null);
+            for (int i = NUM_RECS; i > 0; i--) {
+                key.setData(TestUtils.getTestArray(i));
+                data.setData(TestUtils.getTestArray(i));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.put(txn, key, data));
+                for (int j = 0; j < NUM_DUPS; j++) {
+                    data.setData(TestUtils.getTestArray(i + j));
+                    assertEquals(OperationStatus.SUCCESS,
+				 myDb.put(txn, key, data));
+                }
+            }
+	    txn.commit();
+
+	    txn = env.beginTransaction(null, null);
+            for (int i = NUM_RECS; i > 0; i--) {
+                key.setData(TestUtils.getTestArray(i));
+                data.setData(TestUtils.getTestArray(i));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.delete(txn, key));
+                OperationStatus status =
+		    myDb.get(txn, key, getData, LockMode.DEFAULT);
+                if (status != OperationStatus.KEYEMPTY &&
+                    status != OperationStatus.NOTFOUND) {
+                    fail("invalid Database.get return");
+                }
+                assertEquals(OperationStatus.NOTFOUND,
+			     myDb.delete(txn, key));
+            }
+	    txn.commit();
+            myDb.close();
+            env.close();
+
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /* Remove until 14264 is resolved.
+    public void testDeleteDupWithData()
+        throws Throwable {
+
+        try {
+            Database myDb = initEnvAndDb(true, true, true, false, null);
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            DatabaseEntry getData = new DatabaseEntry();
+	    Transaction txn = env.beginTransaction(null, null);
+            for (int i = NUM_RECS; i > 0; i--) {
+                key.setData(TestUtils.getTestArray(i));
+                data.setData(TestUtils.getTestArray(i));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.put(txn, key, data));
+                for (int j = 0; j < NUM_DUPS; j++) {
+                    data.setData(TestUtils.getTestArray(i + j));
+                    assertEquals(OperationStatus.SUCCESS,
+				 myDb.put(txn, key, data));
+                }
+            }
+	    txn.commit();
+
+	    txn = env.beginTransaction(null, null);
+            for (int i = NUM_RECS; i > 0; i--) {
+                key.setData(TestUtils.getTestArray(i));
+		for (int j = 0; j < NUM_DUPS; j++) {
+		    data.setData(TestUtils.getTestArray(i + j));
+		    assertEquals(OperationStatus.SUCCESS,
+				 myDb.delete(txn, key, data));
+		    OperationStatus status =
+			myDb.getSearchBoth(txn, key, data, LockMode.DEFAULT);
+		    if (status != OperationStatus.KEYEMPTY &&
+			status != OperationStatus.NOTFOUND) {
+			fail("invalid Database.get return");
+		    }
+		    assertEquals(OperationStatus.NOTFOUND,
+				 myDb.delete(txn, key, data));
+		}
+            }
+	    txn.commit();
+            myDb.close();
+            env.close();
+
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testDeleteDupWithSingleRecord()
+        throws Throwable {
+
+        try {
+            Database myDb = initEnvAndDb(true, true, true, false, null);
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            DatabaseEntry getData = new DatabaseEntry();
+	    Transaction txn = env.beginTransaction(null, null);
+            for (int i = NUM_RECS; i > 0; i--) {
+                key.setData(TestUtils.getTestArray(i));
+                data.setData(TestUtils.getTestArray(i));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.put(txn, key, data));
+            }
+	    txn.commit();
+
+	    txn = env.beginTransaction(null, null);
+            for (int i = NUM_RECS; i > 0; i--) {
+                key.setData(TestUtils.getTestArray(i));
+		data.setData(TestUtils.getTestArray(i));
+		assertEquals(OperationStatus.SUCCESS,
+			     myDb.delete(txn, key, data));
+		OperationStatus status =
+		    myDb.getSearchBoth(txn, key, data, LockMode.DEFAULT);
+		if (status != OperationStatus.KEYEMPTY &&
+		    status != OperationStatus.NOTFOUND) {
+		    fail("invalid Database.get return");
+		}
+		assertEquals(OperationStatus.NOTFOUND,
+			     myDb.delete(txn, key, data));
+	    }
+	    txn.commit();
+            myDb.close();
+            env.close();
+
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+    */
+
+    public void testPutDuplicate()
+        throws Throwable {
+
+        try {
+            Database myDb = initEnvAndDb(true, true, true, false, null);
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+	    Transaction txn = env.beginTransaction(null, null);
+            for (int i = NUM_RECS; i > 0; i--) {
+                key.setData(TestUtils.getTestArray(i));
+                data.setData(TestUtils.getTestArray(i));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.put(txn, key, data));
+                data.setData(TestUtils.getTestArray(i * 2));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.put(txn, key, data));
+            }
+	    txn.commit();
+            myDb.close();
+            env.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testPutNoDupData()
+        throws Throwable {
+        try {
+            Database myDb = initEnvAndDb(true, true, true, false, null);
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+	    Transaction txn = env.beginTransaction(null, null);
+            for (int i = NUM_RECS; i > 0; i--) {
+                key.setData(TestUtils.getTestArray(i));
+                data.setData(TestUtils.getTestArray(i));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.putNoDupData(txn, key, data));
+                assertEquals(OperationStatus.KEYEXIST,
+                             myDb.putNoDupData(txn, key, data));
+                data.setData(TestUtils.getTestArray(i+1));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.putNoDupData(txn, key, data));
+            }
+	    txn.commit();
+            myDb.close();
+            env.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testPutNoOverwriteInANoDupDb()
+        throws Throwable {
+
+        try {
+            Database myDb = initEnvAndDb(true, false, true, false, null);
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+	    Transaction txn = env.beginTransaction(null, null);
+            for (int i = NUM_RECS; i > 0; i--) {
+                key.setData(TestUtils.getTestArray(i));
+                data.setData(TestUtils.getTestArray(i));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.putNoOverwrite(txn, key, data));
+                assertEquals(OperationStatus.KEYEXIST,
+			     myDb.putNoOverwrite(txn, key, data));
+            }
+	    txn.commit();
+            myDb.close();
+            env.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testPutNoOverwriteInADupDbTxn()
+        throws Throwable {
+
+        try {
+            Database myDb = initEnvAndDb(true, true, true, false, null);
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            for (int i = NUM_RECS; i > 0; i--) {
+		Transaction txn1 = env.beginTransaction(null, null);
+                key.setData(TestUtils.getTestArray(i));
+                data.setData(TestUtils.getTestArray(i));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.putNoOverwrite(txn1, key, data));
+                assertEquals(OperationStatus.KEYEXIST,
+			     myDb.putNoOverwrite(txn1, key, data));
+                data.setData(TestUtils.getTestArray(i << 1));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.put(txn1, key, data));
+                data.setData(TestUtils.getTestArray(i << 2));
+                assertEquals(OperationStatus.KEYEXIST,
+			     myDb.putNoOverwrite(txn1, key, data));
+		assertEquals(OperationStatus.SUCCESS,
+			     myDb.delete(txn1, key));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.putNoOverwrite(txn1, key, data));
+		txn1.commit();
+            }
+            myDb.close();
+            env.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testPutNoOverwriteInADupDbNoTxn()
+        throws Throwable {
+
+        try {
+            Database myDb = initEnvAndDb(true, true, false, false, null);
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            for (int i = NUM_RECS; i > 0; i--) {
+                key.setData(TestUtils.getTestArray(i));
+                data.setData(TestUtils.getTestArray(i));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.putNoOverwrite(null, key, data));
+                assertEquals(OperationStatus.KEYEXIST,
+			     myDb.putNoOverwrite(null, key, data));
+                data.setData(TestUtils.getTestArray(i << 1));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.put(null, key, data));
+                data.setData(TestUtils.getTestArray(i << 2));
+                assertEquals(OperationStatus.KEYEXIST,
+			     myDb.putNoOverwrite(null, key, data));
+		assertEquals(OperationStatus.SUCCESS,
+			     myDb.delete(null, key));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.putNoOverwrite(null, key, data));
+            }
+            myDb.close();
+            env.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testDatabaseCount()
+        throws Throwable {
+
+        try {
+            Database myDb = initEnvAndDb(true, false, true, false, null);
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+	    Transaction txn = env.beginTransaction(null, null);
+            for (int i = NUM_RECS; i > 0; i--) {
+                key.setData(TestUtils.getTestArray(i));
+                data.setData(TestUtils.getTestArray(i));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.put(txn, key, data));
+            }
+
+            long count = myDb.count();
+            assertEquals(NUM_RECS, count);
+
+	    txn.commit();
+            myDb.close();
+            env.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testDeferredWriteDatabaseCount()
+        throws Throwable {
+
+        try {
+            Database myDb = initEnvAndDb(true, false, true, true, null);
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            for (int i = NUM_RECS; i > 0; i--) {
+                key.setData(TestUtils.getTestArray(i));
+                data.setData(TestUtils.getTestArray(i));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.put(null, key, data));
+            }
+
+            long count = myDb.count();
+            assertEquals(NUM_RECS, count);
+
+            myDb.close();
+            env.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testStat()
+        throws Throwable {
+
+        try {
+            Database myDb = initEnvAndDb(true, false, true, false, null);
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+	    Transaction txn = env.beginTransaction(null, null);
+            for (int i = NUM_RECS; i > 0; i--) {
+                key.setData(TestUtils.getTestArray(i));
+                data.setData(TestUtils.getTestArray(i));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.put(txn, key, data));
+            }
+
+            BtreeStats stat = (BtreeStats)
+		myDb.getStats(TestUtils.FAST_STATS);
+            	
+	    assertEquals(0, stat.getInternalNodeCount());
+            assertEquals(0, stat.getDuplicateInternalNodeCount());
+            assertEquals(0, stat.getBottomInternalNodeCount());
+            assertEquals(0, stat.getDuplicateBottomInternalNodeCount());
+            assertEquals(0, stat.getLeafNodeCount());
+            assertEquals(0, stat.getDeletedLeafNodeCount());
+            assertEquals(0, stat.getDupCountLeafNodeCount());
+            assertEquals(0, stat.getMainTreeMaxDepth());
+            assertEquals(0, stat.getDuplicateTreeMaxDepth());
+
+            stat = (BtreeStats) myDb.getStats(null);
+	
+            assertEquals(15, stat.getInternalNodeCount());
+            assertEquals(0, stat.getDuplicateInternalNodeCount());
+            assertEquals(52, stat.getBottomInternalNodeCount());
+            assertEquals(0, stat.getDuplicateBottomInternalNodeCount());
+            assertEquals(NUM_RECS, stat.getLeafNodeCount());
+            assertEquals(0, stat.getDeletedLeafNodeCount());
+            assertEquals(0, stat.getDupCountLeafNodeCount());
+            assertEquals(4, stat.getMainTreeMaxDepth());
+            assertEquals(0, stat.getDuplicateTreeMaxDepth());
+
+            stat = (BtreeStats) myDb.getStats(TestUtils.FAST_STATS);
+
+            assertEquals(15, stat.getInternalNodeCount());
+            assertEquals(52, stat.getBottomInternalNodeCount());
+            assertEquals(NUM_RECS, stat.getLeafNodeCount());
+            assertEquals(0, stat.getDeletedLeafNodeCount());
+            assertEquals(0, stat.getDupCountLeafNodeCount());
+            assertEquals(4, stat.getMainTreeMaxDepth());
+            assertEquals(0, stat.getDuplicateTreeMaxDepth());
+
+	    long[] levelsTest = new long[]{ 12, 23, 34, 45, 56,
+		                            67, 78, 89, 90, 0 };
+	    BtreeStats bts = new BtreeStats();
+
+	    bts.setBottomInternalNodeCount(20);
+	    bts.setDuplicateBottomInternalNodeCount(30);
+	    bts.setDeletedLeafNodeCount(40);
+	    bts.setDupCountLeafNodeCount(50);
+	    bts.setInternalNodeCount(60);
+	    bts.setDuplicateInternalNodeCount(70);
+	    bts.setLeafNodeCount(80);
+	    bts.setMainTreeMaxDepth(5);
+	    bts.setDuplicateTreeMaxDepth(2);
+	    bts.setINsByLevel(levelsTest);
+	    bts.setBINsByLevel(levelsTest);
+	    bts.setDINsByLevel(levelsTest);
+	    bts.setDBINsByLevel(levelsTest);
+
+	    assertEquals(20, bts.getBottomInternalNodeCount());
+	    assertEquals(30, bts.getDuplicateBottomInternalNodeCount());
+	    assertEquals(40, bts.getDeletedLeafNodeCount());
+	    assertEquals(50, bts.getDupCountLeafNodeCount());
+	    assertEquals(60, bts.getInternalNodeCount());
+	    assertEquals(70, bts.getDuplicateInternalNodeCount());
+	    assertEquals(80, bts.getLeafNodeCount());
+	    assertEquals(5, bts.getMainTreeMaxDepth());
+	    assertEquals(2, bts.getDuplicateTreeMaxDepth());
+
+	    for(int i = 0; i < levelsTest.length; i++) {
+		assertEquals(levelsTest[i], bts.getINsByLevel()[i]);
+	    }
+
+	    for(int i = 0; i < levelsTest.length; i++) {
+		assertEquals(levelsTest[i], bts.getBINsByLevel()[i]);
+	    }
+
+	    for(int i = 0; i < levelsTest.length; i++) {
+	        assertEquals(levelsTest[i], bts.getDINsByLevel()[i]);
+	    }
+
+	    for(int i = 0; i < levelsTest.length; i++) {
+	        assertEquals(levelsTest[i], bts.getDBINsByLevel()[i]);
+	    }
+	
+	    bts.toString();
+	    bts.setBottomInternalNodeCount(0);
+	    bts.setDuplicateBottomInternalNodeCount(0);
+	    bts.setInternalNodeCount(0);
+	    bts.setDuplicateInternalNodeCount(0);
+
+	    assertEquals(0, bts.getBottomInternalNodeCount());
+	    assertEquals(0, bts.getDuplicateBottomInternalNodeCount());
+	    assertEquals(0, bts.getInternalNodeCount());
+	    assertEquals(0, bts.getDuplicateInternalNodeCount());
+	    bts.toString();
+
+	    txn.commit();
+            myDb.close();
+            env.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testDatabaseCountEmptyDB()
+        throws Throwable {
+
+        try {
+            Database myDb = initEnvAndDb(true, false, true, false, null);
+
+            long count = myDb.count();
+            assertEquals(0, count);
+
+            myDb.close();
+            env.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testDatabaseCountWithDeletedEntries()
+        throws Throwable {
+
+        try {
+            Database myDb = initEnvAndDb(true, false, true, false, null);
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+	    Transaction txn = env.beginTransaction(null, null);
+	    int deletedCount = 0;
+            for (int i = NUM_RECS; i > 0; i--) {
+                key.setData(TestUtils.getTestArray(i));
+                data.setData(TestUtils.getTestArray(i));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.put(txn, key, data));
+		if ((i % 5) == 0) {
+		    myDb.delete(txn, key);
+		    deletedCount++;
+		}
+            }
+
+            long count = myDb.count();
+            assertEquals(NUM_RECS - deletedCount, count);
+
+	    txn.commit();
+            myDb.close();
+            env.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testStatDups()
+        throws Throwable {
+
+        try {
+            Database myDb = initEnvAndDb(true, true, true, false, null);
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+	    Transaction txn = env.beginTransaction(null, null);
+            for (int i = NUM_RECS; i > 0; i--) {
+                key.setData(TestUtils.getTestArray(i));
+                data.setData(TestUtils.getTestArray(i));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.put(txn, key, data));
+		for (int j = 0; j < 10; j++) {
+		    data.setData(TestUtils.getTestArray(i + j));
+		    assertEquals(OperationStatus.SUCCESS,
+				 myDb.put(txn, key, data));
+		}
+            }
+
+            BtreeStats stat = (BtreeStats)
+		myDb.getStats(TestUtils.FAST_STATS);
+
+            assertEquals(0, stat.getInternalNodeCount());
+            assertEquals(0, stat.getDuplicateInternalNodeCount());
+            assertEquals(0, stat.getBottomInternalNodeCount());
+            assertEquals(0, stat.getDuplicateBottomInternalNodeCount());
+            assertEquals(0, stat.getLeafNodeCount());
+            assertEquals(0, stat.getDeletedLeafNodeCount());
+            assertEquals(0, stat.getDupCountLeafNodeCount());
+            assertEquals(0, stat.getMainTreeMaxDepth());
+            assertEquals(0, stat.getDuplicateTreeMaxDepth());
+
+            stat = (BtreeStats) myDb.getStats(null);
+
+            assertEquals(23, stat.getInternalNodeCount());
+            assertEquals(NUM_RECS, stat.getDuplicateInternalNodeCount());
+            assertEquals(85, stat.getBottomInternalNodeCount());
+            assertEquals(771, stat.getDuplicateBottomInternalNodeCount());
+            assertEquals(2570, stat.getLeafNodeCount());
+            assertEquals(0, stat.getDeletedLeafNodeCount());
+            assertEquals(NUM_RECS, stat.getDupCountLeafNodeCount());
+            assertEquals(4, stat.getMainTreeMaxDepth());
+            assertEquals(2, stat.getDuplicateTreeMaxDepth());
+
+            stat = (BtreeStats) myDb.getStats(TestUtils.FAST_STATS);
+
+            assertEquals(23, stat.getInternalNodeCount());
+            assertEquals(NUM_RECS, stat.getDuplicateInternalNodeCount());
+            assertEquals(85, stat.getBottomInternalNodeCount());
+            assertEquals(771, stat.getDuplicateBottomInternalNodeCount());
+            assertEquals(2570, stat.getLeafNodeCount());
+            assertEquals(0, stat.getDeletedLeafNodeCount());
+            assertEquals(NUM_RECS, stat.getDupCountLeafNodeCount());
+            assertEquals(4, stat.getMainTreeMaxDepth());
+            assertEquals(2, stat.getDuplicateTreeMaxDepth());
+
+	    txn.commit();
+            myDb.close();
+            env.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testDatabaseCountDups()
+        throws Throwable {
+
+        try {
+            Database myDb = initEnvAndDb(true, true, true, false, null);
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+	    Transaction txn = env.beginTransaction(null, null);
+            for (int i = NUM_RECS; i > 0; i--) {
+                key.setData(TestUtils.getTestArray(i));
+                data.setData(TestUtils.getTestArray(i));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.put(txn, key, data));
+		for (int j = 0; j < 10; j++) {
+		    data.setData(TestUtils.getTestArray(i + j));
+		    assertEquals(OperationStatus.SUCCESS,
+				 myDb.put(txn, key, data));
+		}
+            }
+
+            long count = myDb.count();
+
+            assertEquals(2570, count);
+
+	    txn.commit();
+            myDb.close();
+            env.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testDeferredWriteDatabaseCountDups()
+        throws Throwable {
+
+        try {
+            Database myDb = initEnvAndDb(true, true, true, true, null);
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            for (int i = NUM_RECS; i > 0; i--) {
+                key.setData(TestUtils.getTestArray(i));
+                data.setData(TestUtils.getTestArray(i));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.put(null, key, data));
+		for (int j = 0; j < 10; j++) {
+		    data.setData(TestUtils.getTestArray(i + j));
+		    assertEquals(OperationStatus.SUCCESS,
+				 myDb.put(null, key, data));
+		}
+            }
+
+            long count = myDb.count();
+
+            assertEquals(2570, count);
+
+            myDb.close();
+            env.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testStatDeletes()
+        throws Throwable {
+
+	deleteTestInternal(1, 2, 0, 2);
+	deleteTestInternal(2, 2, 2, 2);
+	deleteTestInternal(10, 2, 10, 10);
+	deleteTestInternal(11, 2, 10, 12);
+    }
+
+    private void deleteTestInternal(int numRecs,
+				    int numDupRecs,
+				    int expectedLNs,
+				    int expectedDeletedLNs)
+	throws Throwable {
+
+        try {
+	    TestUtils.removeLogFiles("Setup", envHome, false);
+            Database myDb = initEnvAndDb(true, true, true, false, null);
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+	    Transaction txn = env.beginTransaction(null, null);
+            for (int i = numRecs; i > 0; i--) {
+                key.setData(TestUtils.getTestArray(i));
+		for (int j = 0; j < numDupRecs; j++) {
+		    data.setData(TestUtils.getTestArray(i + j));
+		    assertEquals(OperationStatus.SUCCESS,
+				 myDb.put(txn, key, data));
+		}
+            }
+
+            for (int i = numRecs; i > 0; i -= 2) {
+                key.setData(TestUtils.getTestArray(i));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.delete(txn, key));
+            }
+
+            BtreeStats stat = (BtreeStats) myDb.getStats(null);
+
+            assertEquals(expectedLNs, stat.getLeafNodeCount());
+            assertEquals(expectedDeletedLNs, stat.getDeletedLeafNodeCount());
+            assertEquals(numRecs, stat.getDupCountLeafNodeCount());
+
+	    txn.commit();
+            myDb.close();
+            env.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Exercise the preload method, which warms up the cache.
+     */
+    public void testPreloadByteLimit()
+        throws Throwable {
+
+        /* Set up a test db */
+        Database myDb = initEnvAndDb(false, false, true, false, null);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+	Transaction txn = env.beginTransaction(null, null);
+        for (int i = 2500; i > 0; i--) {
+            key.setData(TestUtils.getTestArray(i));
+            data.setData(TestUtils.getTestArray(i));
+            assertEquals(OperationStatus.SUCCESS,
+			 myDb.put(txn, key, data));
+        }
+
+        /* Recover the database, restart w/no evictor. */
+        long postCreateMemUsage = env.getMemoryUsage();
+        INList inlist = env.getEnvironmentImpl().getInMemoryINs();
+        long postCreateResidentNodes = inlist.getSize();
+	txn.commit();
+        myDb.close();
+        env.close();
+        myDb = initEnvAndDb
+	    (true, false, true, false,
+	     MemoryBudget.MIN_MAX_MEMORY_SIZE_STRING);
+
+        /*
+         * Do two evictions, because the first eviction will only strip
+         * LNs. We need to actually evict BINS because preload only pulls in
+         * IN/BINs
+         */
+        env.evictMemory(); // first eviction strips LNS.
+        env.evictMemory(); // second one will evict BINS
+
+        long postEvictMemUsage = env.getMemoryUsage();
+        inlist = env.getEnvironmentImpl().getInMemoryINs(); // re-get inList
+        long postEvictResidentNodes = inlist.getSize();
+
+        /* Now preload, but not up to the full size of the db */
+	PreloadConfig conf = new PreloadConfig();
+	conf.setMaxBytes(92000);
+        PreloadStats stats =
+	    myDb.preload(conf); /* Cache size is currently 92160. */
+
+	assertEquals(PreloadStatus.FILLED_CACHE, stats.getStatus());
+
+        long postPreloadMemUsage = env.getMemoryUsage();
+        long postPreloadResidentNodes = inlist.getSize();
+
+        /* Now iterate to get everything back into memory */
+        Cursor cursor = myDb.openCursor(null, null);
+        int count = 0;
+        OperationStatus status = cursor.getFirst(key, data, LockMode.DEFAULT);
+        while (status == OperationStatus.SUCCESS) {
+            count++;
+            status = cursor.getNext(key, data, LockMode.DEFAULT);
+        }
+	cursor.close();
+
+        long postIterationMemUsage = env.getMemoryUsage();
+        long postIterationResidentNodes = inlist.getSize();
+
+        if (DEBUG) {
+            System.out.println("postCreateMemUsage: " + postCreateMemUsage);
+            System.out.println("postEvictMemUsage: " + postEvictMemUsage);
+            System.out.println("postPreloadMemUsage: " + postPreloadMemUsage);
+            System.out.println("postIterationMemUsage: " +
+                               postIterationMemUsage);
+            System.out.println("postEvictResidentNodes: " +
+                               postEvictResidentNodes);
+            System.out.println("postPreloadResidentNodes: " +
+                               postPreloadResidentNodes);
+            System.out.println("postIterationResidentNodes: " +
+                               postIterationResidentNodes);
+            System.out.println("postCreateResidentNodes: " +
+                               postCreateResidentNodes);
+        }
+
+        assertTrue(postEvictMemUsage < postCreateMemUsage);
+        assertTrue(postEvictMemUsage < postPreloadMemUsage);
+        assertTrue("postPreloadMemUsage=" + postPreloadMemUsage +
+                   " postIterationMemUsage=" + postIterationMemUsage,
+                   postPreloadMemUsage < postIterationMemUsage);
+        assertTrue(postIterationMemUsage <= postCreateMemUsage);
+        assertTrue(postEvictResidentNodes < postPreloadResidentNodes);
+        //assertEquals(postCreateResidentNodes, postIterationResidentNodes);
+        assertTrue(postCreateResidentNodes >= postIterationResidentNodes);
+	
+	stats = new PreloadStats(10, // nINs
+                                 30, // nBINs,
+                                 60, // nLNs
+                                 12, // nDINs
+                                 20, // nDBINs
+                                 30, // nDupcountLNs
+                                 PreloadStatus.EXCEEDED_TIME);
+
+	assertEquals(10, stats.getNINsLoaded());
+	assertEquals(30, stats.getNBINsLoaded());
+	assertEquals(60, stats.getNLNsLoaded());
+	assertEquals(12, stats.getNDINsLoaded());
+	assertEquals(20, stats.getNDBINsLoaded());
+	assertEquals(30, stats.getNDupCountLNsLoaded());
+	assertEquals(PreloadStatus.EXCEEDED_TIME, stats.getStatus());
+	stats.toString();	
+
+	VerifyConfig vcfg = new VerifyConfig();
+
+	vcfg.setPropagateExceptions(true);
+	vcfg.setAggressive(false);
+	vcfg.setPrintInfo(true);
+	vcfg.setShowProgressStream(System.out);
+	vcfg.setShowProgressInterval(5);
+
+	assertEquals(true, vcfg.getPropagateExceptions());
+	assertEquals(false, vcfg.getAggressive());
+	assertEquals(true, vcfg.getPrintInfo());
+	assertEquals(System.out.getClass(),
+		     vcfg.getShowProgressStream().getClass());
+	assertEquals(5, vcfg.getShowProgressInterval());
+	vcfg.toString();
+
+        myDb.close();
+        env.close();
+    }
+
+    public void testPreloadTimeLimit()
+        throws Throwable {
+
+        /* Set up a test db */
+        Database myDb = initEnvAndDb(false, false, true, false, null);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+	Transaction txn = env.beginTransaction(null, null);
+        for (int i = 25000; i > 0; i--) {
+            key.setData(TestUtils.getTestArray(i));
+            data.setData(new byte[1]);
+            assertEquals(OperationStatus.SUCCESS,
+			 myDb.put(txn, key, data));
+        }
+
+        /* Recover the database, restart w/no evictor. */
+        long postCreateMemUsage = env.getMemoryUsage();
+        INList inlist = env.getEnvironmentImpl().getInMemoryINs();
+        long postCreateResidentNodes = inlist.getSize();
+	txn.commit();
+        myDb.close();
+        env.close();
+        myDb = initEnvAndDb(true, false, true, false, null);
+
+        /*
+         * Do two evictions, because the first eviction will only strip
+         * LNs. We need to actually evict BINS because preload only pulls in
+         * IN/BINs
+         */
+        env.evictMemory(); // first eviction strips LNS.
+        env.evictMemory(); // second one will evict BINS
+
+        long postEvictMemUsage = env.getMemoryUsage();
+        inlist = env.getEnvironmentImpl().getInMemoryINs(); // re-get inList
+        long postEvictResidentNodes = inlist.getSize();
+
+        /* Now preload, but not up to the full size of the db */
+	PreloadConfig conf = new PreloadConfig();
+	conf.setMaxMillisecs(50);
+        PreloadStats stats = myDb.preload(conf);
+	assertEquals(PreloadStatus.EXCEEDED_TIME, stats.getStatus());
+
+        long postPreloadMemUsage = env.getMemoryUsage();
+        long postPreloadResidentNodes = inlist.getSize();
+
+        /* Now iterate to get everything back into memory */
+        Cursor cursor = myDb.openCursor(null, null);
+        int count = 0;
+        OperationStatus status = cursor.getFirst(key, data, LockMode.DEFAULT);
+        while (status == OperationStatus.SUCCESS) {
+            count++;
+            status = cursor.getNext(key, data, LockMode.DEFAULT);
+        }
+	cursor.close();
+
+        long postIterationMemUsage = env.getMemoryUsage();
+        long postIterationResidentNodes = inlist.getSize();
+
+        if (DEBUG) {
+            System.out.println("postCreateMemUsage: " + postCreateMemUsage);
+            System.out.println("postEvictMemUsage: " + postEvictMemUsage);
+            System.out.println("postPreloadMemUsage: " + postPreloadMemUsage);
+            System.out.println("postIterationMemUsage: " +
+                               postIterationMemUsage);
+            System.out.println("postEvictResidentNodes: " +
+                               postEvictResidentNodes);
+            System.out.println("postPreloadResidentNodes: " +
+                               postPreloadResidentNodes);
+            System.out.println("postIterationResidentNodes: " +
+                               postIterationResidentNodes);
+            System.out.println("postCreateResidentNodes: " +
+                               postCreateResidentNodes);
+        }
+
+        assertTrue(postEvictMemUsage < postCreateMemUsage);
+        assertTrue(postEvictMemUsage < postPreloadMemUsage);
+        assertTrue("postPreloadMemUsage=" + postPreloadMemUsage +
+                   " postIterationMemUsage=" + postIterationMemUsage,
+                   postPreloadMemUsage < postIterationMemUsage);
+        assertTrue(postIterationMemUsage <= postCreateMemUsage);
+        assertTrue(postEvictResidentNodes < postPreloadResidentNodes);
+        //assertEquals(postCreateResidentNodes, postIterationResidentNodes);
+        assertTrue(postCreateResidentNodes >= postIterationResidentNodes);
+
+        myDb.close();
+        env.close();
+    }
+
+    /**
+     * Load the entire database with preload.
+     */
+    public void testPreloadEntireDatabase()
+        throws Throwable {
+
+        /* Create a test db with one record */
+        Database myDb = initEnvAndDb(false, false, false, false, null);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        key.setData(TestUtils.getTestArray(0));
+        data.setData(TestUtils.getTestArray(0));
+        assertEquals(OperationStatus.SUCCESS, myDb.put(null, key, data));
+
+        /* Close and reopen. */
+        myDb.close();
+        env.close();
+        myDb = initEnvAndDb(false, false, false, false, null);
+
+        /*
+         * Preload the entire database.  In JE 2.0.54 this would cause a
+         * NullPointerException.
+         */
+	PreloadConfig conf = new PreloadConfig();
+	conf.setMaxBytes(100000);
+        myDb.preload(conf);
+
+        myDb.close();
+        env.close();
+    }
+
+    /**
+     * Test preload(N, 0) where N > cache size (throws IllArgException).
+     */
+    public void testPreloadBytesExceedsCache()
+        throws Throwable {
+
+        /* Create a test db with one record */
+        Database myDb = initEnvAndDb(false, false, false, false, "100000");
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        key.setData(TestUtils.getTestArray(0));
+        data.setData(TestUtils.getTestArray(0));
+        assertEquals(OperationStatus.SUCCESS, myDb.put(null, key, data));
+
+        /* Close and reopen. */
+        myDb.close();
+        env.close();
+        myDb = initEnvAndDb(false, false, false, false, "100000");
+
+	/* maxBytes > cache size.  Should throw IllegalArgumentException. */
+	try {
+	    PreloadConfig conf = new PreloadConfig();
+	    conf.setMaxBytes(100001);
+	    myDb.preload(conf);
+	    fail("should have thrown IAE");
+	} catch (IllegalArgumentException IAE) {
+	}
+
+        myDb.close();
+        env.close();
+    }
+
+    public void testDbClose()
+        throws Throwable {
+
+        /* Set up a test db */
+        Database myDb = initEnvAndDb(false, false, true, false, null);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+	Transaction txn = env.beginTransaction(null, null);
+        for (int i = 2500; i > 0; i--) {
+            key.setData(TestUtils.getTestArray(i));
+            data.setData(TestUtils.getTestArray(i));
+            assertEquals(OperationStatus.SUCCESS,
+			 myDb.put(txn, key, data));
+        }
+
+	/* Create a cursor, use it, then close db without closing cursor. */
+        Cursor cursor = myDb.openCursor(txn, null);
+        assertEquals(OperationStatus.SUCCESS,
+        	     cursor.getFirst(key, data, LockMode.DEFAULT));
+
+	try {
+	    myDb.close();
+	    fail("didn't throw DatabaseException for unclosed cursor");
+	} catch (DatabaseException DBE) {
+	}
+	txn.commit();
+        env.close();
+    }
+
+    public void testDbCloseUnopenedDb()
+	throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+        env = new Environment(envHome, envConfig);
+	Database myDb = new Database(env);
+	try {
+	    myDb.close();
+	} catch (DatabaseException DBE) {
+	    fail("shouldn't catch DatabaseException for closing unopened db");
+	}
+	env.close();
+    }
+
+    /**
+     * Test that open cursor isn't possible on a closed database.
+     */
+    public void testOpenCursor()
+        throws DatabaseException {
+        Database db = initEnvAndDb(true, false, true, false, null);
+        Cursor cursor = db.openCursor(null, null);
+        cursor.close();
+        db.close();
+        try {
+            db.openCursor(null, null);
+            fail("Should throw exception because databse is closed");
+        } catch (DatabaseException e) {
+        }
+    }
+
+    public void testBufferOverflowingPut()
+        throws Throwable {
+
+        try {
+
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setTransactional(true);
+            //envConfig.setConfigParam("je.log.totalBufferBytes", "5000");
+            envConfig.setAllowCreate(true);
+            env = new Environment(envHome, envConfig);
+
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setSortedDuplicates(true);
+            dbConfig.setAllowCreate(true);
+            dbConfig.setTransactional(true);
+            Database myDb = env.openDatabase(null, "testDB", dbConfig);
+
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry(new byte[10000000]);
+	    try {
+		key.setData(TestUtils.getTestArray(10));
+		myDb.put(null, key, data);
+	    } catch (DatabaseException DE) {
+		fail("unexpected DatabaseException");
+	    }
+            myDb.close();
+            env.close();
+	    env = null;
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Check that the handle lock is not left behind when a non-transactional
+     * open of a primary DB fails while populating the secondary. [#15558]
+     */
+    public void testFailedNonTxnDbOpen()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        env = new Environment(envHome, envConfig);
+
+        DatabaseConfig priConfig = new DatabaseConfig();
+        priConfig.setAllowCreate(true);
+        Database priDb = env.openDatabase(null, "testDB", priConfig);
+
+        priDb.put(null, new DatabaseEntry(new byte[1]),
+                        new DatabaseEntry(new byte[2]));
+
+        SecondaryConfig secConfig = new SecondaryConfig();
+        secConfig.setAllowCreate(true);
+        secConfig.setAllowPopulate(true);
+        /* Use priDb as foreign key DB for ease of testing. */
+        secConfig.setForeignKeyDatabase(priDb);
+        secConfig.setKeyCreator(new SecondaryKeyCreator() {
+            public boolean createSecondaryKey(SecondaryDatabase secondary,
+                                              DatabaseEntry key,
+                                              DatabaseEntry data,
+                                              DatabaseEntry result)
+                throws DatabaseException {
+                result.setData
+                    (data.getData(), data.getOffset(), data.getSize());
+                return true;
+            }
+        });
+        try {
+            env.openSecondaryDatabase(null, "testDB2", priDb, secConfig);
+            fail();
+        } catch (DatabaseException e) {
+            /* Fails because [0,0] does not exist as a key in priDb. */
+            assertTrue(e.toString(),
+                       e.toString().indexOf("foreign key not allowed") > 0);
+        }
+
+        priDb.close();
+        env.close();
+        env = null;
+    }
+
+    /**
+     * Set up the environment and db.
+     */
+    private Database initEnvAndDb(boolean dontRunEvictor,
+                                  boolean allowDuplicates,
+				  boolean transactional,
+				  boolean deferredWrite,
+                                  String memSize)
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setTransactional(transactional);
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_CHECK_LEAKS.getName(), "false");
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6");
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX_DUPTREE.getName(),
+				 "6");
+        envConfig.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC));
+        if (dontRunEvictor) {
+            envConfig.setConfigParam(EnvironmentParams.
+                                     ENV_RUN_EVICTOR.getName(),
+                                     "false");
+
+            /*
+             * Don't let critical eviction run or it will interfere with the
+             * preload test.
+             */
+            envConfig.setConfigParam(EnvironmentParams.
+                                     EVICTOR_CRITICAL_PERCENTAGE.getName(),
+                                     "500");
+        }
+
+        if (memSize != null) {
+            envConfig.setConfigParam(EnvironmentParams.
+                                     MAX_MEMORY.getName(),
+                                     memSize);
+        }
+
+        envConfig.setAllowCreate(true);
+        env = new Environment(envHome, envConfig);
+
+        /* Make a db and open it. */
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setSortedDuplicates(allowDuplicates);
+        dbConfig.setAllowCreate(true);
+	if (!deferredWrite) {
+	    dbConfig.setTransactional(transactional);
+	}
+        dbConfig.setDeferredWrite(deferredWrite);
+        Database myDb = env.openDatabase(null, "testDB", dbConfig);
+        return myDb;
+    }
+
+    /**
+     * X'd out because this is expected to be used in the debugger to set
+     * specific breakpoints and step through in a synchronous manner.
+     */
+    private Database pNOCDb;
+
+    public void xxtestPutNoOverwriteConcurrently()
+	throws Throwable {
+
+	pNOCDb = initEnvAndDb(true, true, true, false, null);
+	JUnitThread tester1 =
+	    new JUnitThread("testNonBlocking1") {
+		public void testBody() {
+		    try {
+			Transaction txn1 = env.beginTransaction(null, null);
+			DatabaseEntry key = new DatabaseEntry();
+			DatabaseEntry data = new DatabaseEntry();
+			key.setData(TestUtils.getTestArray(1));
+			data.setData(TestUtils.getTestArray(1));
+			OperationStatus status =
+			    pNOCDb.putNoOverwrite(txn1, key, data);
+			txn1.commit();
+			System.out.println("thread1: " + status);
+		    } catch (DatabaseException DBE) {
+			DBE.printStackTrace();
+			fail("caught DatabaseException " + DBE);
+		    }
+		}
+	    };
+
+	JUnitThread tester2 =
+	    new JUnitThread("testNonBlocking2") {
+		public void testBody() {
+		    try {
+			Transaction txn2 = env.beginTransaction(null, null);
+			DatabaseEntry key = new DatabaseEntry();
+			DatabaseEntry data = new DatabaseEntry();
+			key.setData(TestUtils.getTestArray(1));
+			data.setData(TestUtils.getTestArray(2));
+			OperationStatus status =
+			    pNOCDb.putNoOverwrite(txn2, key, data);
+			txn2.commit();
+			System.out.println("thread2: " + status);
+		    } catch (DatabaseException DBE) {
+                        DBE.printStackTrace();
+			fail("caught DatabaseException " + DBE);
+		    }
+		}
+	    };
+
+	tester1.start();
+	tester2.start();
+	tester1.finishTest();
+	tester2.finishTest();
+    }
+}
diff --git a/test/com/sleepycat/je/DbHandleLockTest.java b/test/com/sleepycat/je/DbHandleLockTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..392d8dcf2fa66371c109a63a844680825abc0b11
--- /dev/null
+++ b/test/com/sleepycat/je/DbHandleLockTest.java
@@ -0,0 +1,131 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbHandleLockTest.java,v 1.27.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.io.File;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * BDB's transactional DDL operations (database creation, truncation,
+ * remove and rename) need special support through what we call "handle" locks.
+ */
+public class DbHandleLockTest extends TestCase {
+    private File envHome;
+    private Environment env;
+
+    public DbHandleLockTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws Exception {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+        env = new Environment(envHome, envConfig);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        try {
+            /* Close in case we hit an exception and didn't close */
+            env.close();
+        } catch (DatabaseException e) {
+            /* Ok if already closed */
+        }
+
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+    }
+
+    public void testOpenHandle()
+        throws Throwable {
+
+        try {
+            Transaction txnA =
+		env.beginTransaction(null, TransactionConfig.DEFAULT);
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setTransactional(true);
+            dbConfig.setAllowCreate(true);
+            Database db = env.openDatabase(txnA, "foo", dbConfig);
+
+            /*
+	     * At this point, we expect a write lock on the NameLN (the handle
+	     * lock).
+             */
+            LockStats lockStat = env.getLockStats(null);
+            assertEquals(1, lockStat.getNTotalLocks());
+            assertEquals(1, lockStat.getNWriteLocks());
+            assertEquals(0, lockStat.getNReadLocks());
+
+            txnA.commit();
+            lockStat = env.getLockStats(null);
+            assertEquals(1, lockStat.getNTotalLocks());
+            assertEquals(0, lockStat.getNWriteLocks());
+            assertEquals(1, lockStat.getNReadLocks());
+
+            /* Updating the root from another txn should be possible. */
+            insertData(10, db);
+            db.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testSR12068()
+	throws Throwable {
+
+	try {
+            Transaction txnA =
+		env.beginTransaction(null, TransactionConfig.DEFAULT);
+
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setTransactional(true);
+            dbConfig.setAllowCreate(true);
+            Database db = env.openDatabase(txnA, "foo", dbConfig);
+	    db.close();
+
+            dbConfig.setExclusiveCreate(true);
+	    try {
+		db = env.openDatabase(txnA, "foo", dbConfig);
+		fail("should throw database exeception");
+	    } catch (DatabaseException DE) {
+		/* expected Database already exists. */
+	    }
+            dbConfig.setAllowCreate(false);
+            dbConfig.setExclusiveCreate(false);
+	    db = env.openDatabase(txnA, "foo", dbConfig);
+	    db.close();
+	    txnA.commit();
+	    txnA = env.beginTransaction(null, TransactionConfig.DEFAULT);
+	    env.removeDatabase(txnA, "foo");
+	    txnA.commit();
+	} catch (Throwable T) {
+	    T.printStackTrace();
+	    throw T;
+	}
+    }
+
+    private void insertData(int numRecs, Database db)
+        throws Throwable {
+
+        for (int i = 0; i < numRecs; i++) {
+            DatabaseEntry key = new DatabaseEntry(TestUtils.getTestArray(i));
+            DatabaseEntry data = new DatabaseEntry(TestUtils.getTestArray(i));
+            assertEquals(OperationStatus.SUCCESS,
+			 db.put(null, key, data));
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/DbTestProxy.java b/test/com/sleepycat/je/DbTestProxy.java
new file mode 100644
index 0000000000000000000000000000000000000000..59984df26babc16bc1f9899ef1364749112d5a05
--- /dev/null
+++ b/test/com/sleepycat/je/DbTestProxy.java
@@ -0,0 +1,26 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbTestProxy.java,v 1.13.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import com.sleepycat.je.dbi.CursorImpl;
+
+/**
+ * DbTestProxy is for internal use only. It serves to shelter methods that must
+ * be public to be used by JE unit tests that but are not part of the
+ * public api available to applications.
+ */
+public class DbTestProxy {
+    /**
+     * Proxy to Cursor.getCursorImpl
+     */
+    public static CursorImpl dbcGetCursorImpl(Cursor dbc) {
+        return dbc.getCursorImpl();
+    }
+}
+
diff --git a/test/com/sleepycat/je/DirtyReadTest.java b/test/com/sleepycat/je/DirtyReadTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..46814851c92c865efc85b6bce6eb60b2193656ec
--- /dev/null
+++ b/test/com/sleepycat/je/DirtyReadTest.java
@@ -0,0 +1,166 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DirtyReadTest.java,v 1.20.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Arrays;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.util.StringDbt;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Check that the Database and Cursor classes properly use read-uncommitted
+ * when specified.
+ */
+public class DirtyReadTest extends TestCase {
+    private File envHome;
+    private Environment env;
+
+    public DirtyReadTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+    }
+
+    public void testReadUncommitted()
+        throws Throwable {
+
+        Database db = null;
+        Transaction txnA = null;
+        Cursor cursor = null;
+        try {
+            /* Make an environment, a db, insert a few records */
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setTransactional(true);
+            envConfig.setAllowCreate(true);
+            env = new Environment(envHome, envConfig);
+
+            /* Now open for real, insert a record */
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setTransactional(true);
+            dbConfig.setAllowCreate(true);
+            db = env.openDatabase(null, "foo", dbConfig);
+
+            StringDbt key = new StringDbt("key1");
+            StringDbt data = new StringDbt("data1");
+            txnA = env.beginTransaction(null, TransactionConfig.DEFAULT);
+            OperationStatus status = db.put(txnA, key, data);
+            assertEquals(OperationStatus.SUCCESS, status);
+
+            /*
+             * txnA should have a write lock on this record. Now try
+             * to read-uncommitted it.
+             */
+            DatabaseEntry foundKey = new DatabaseEntry();
+            DatabaseEntry foundData = new DatabaseEntry();
+
+            /*
+             * Make sure we get a deadlock exception without read-uncommitted.
+             */
+            try {
+                db.get(null, key, foundData, LockMode.DEFAULT);
+                fail("Should deadlock");
+            } catch (DeadlockException e) {
+            }
+
+            /*
+             * Specify read-uncommitted as a lock mode.
+             */
+            status = db.get(null, key, foundData, LockMode.READ_UNCOMMITTED);
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertTrue(Arrays.equals(data.getData(), foundData.getData()));
+
+            status = db.getSearchBoth
+                (null, key, data, LockMode.READ_UNCOMMITTED);
+            assertEquals(OperationStatus.SUCCESS, status);
+
+            cursor = db.openCursor(null, CursorConfig.DEFAULT);
+            status = cursor.getFirst(foundKey, foundData,
+                                     LockMode.READ_UNCOMMITTED);
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertTrue(Arrays.equals(key.getData(), foundKey.getData()));
+            assertTrue(Arrays.equals(data.getData(), foundData.getData()));
+            cursor.close();
+
+            /*
+             * Specify read-uncommitted through a read-uncommitted txn.
+             */
+            TransactionConfig txnConfig = new TransactionConfig();
+            txnConfig.setReadUncommitted(true);
+            Transaction readUncommittedTxn =
+                env.beginTransaction(null, txnConfig);
+
+            status = db.get
+                (readUncommittedTxn, key, foundData, LockMode.DEFAULT);
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertTrue(Arrays.equals(data.getData(), foundData.getData()));
+
+            status = db.getSearchBoth
+                (readUncommittedTxn, key, data,LockMode.DEFAULT);
+            assertEquals(OperationStatus.SUCCESS, status);
+
+            cursor = db.openCursor(readUncommittedTxn, CursorConfig.DEFAULT);
+            status = cursor.getFirst(foundKey, foundData, LockMode.DEFAULT);
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertTrue(Arrays.equals(key.getData(), foundKey.getData()));
+            assertTrue(Arrays.equals(data.getData(), foundData.getData()));
+            cursor.close();
+            readUncommittedTxn.abort();
+
+            /*
+             * Specify read-uncommitted through a read-uncommitted cursor
+             */
+            CursorConfig cursorConfig = new CursorConfig();
+            cursorConfig.setReadUncommitted(true);
+            cursor = db.openCursor(null, cursorConfig);
+            status = cursor.getFirst(foundKey, foundData, LockMode.DEFAULT);
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertTrue(Arrays.equals(key.getData(), foundKey.getData()));
+            assertTrue(Arrays.equals(data.getData(), foundData.getData()));
+
+            /*
+             * Open through the compatiblity method, should accept dirty
+             * read (but ignores it)
+             */
+	    // Database compatDb = new Database(env);
+	    // compatDb.open(null, null, "foo", DbConstants.DB_BTREE,
+	    //             DbConstants.DB_DIRTY_READ, DbConstants.DB_UNKNOWN);
+	    // compatDb.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        } finally {
+            if (cursor != null) {
+                cursor.close();
+            }
+
+            if (txnA != null) {
+                txnA.abort();
+            }
+
+            if (db != null) {
+                db.close();
+            }
+            env.close();
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/EnvironmentConfigTest.java b/test/com/sleepycat/je/EnvironmentConfigTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..5fd7c6f72b1ed3664e4053aa84201899a102c737
--- /dev/null
+++ b/test/com/sleepycat/je/EnvironmentConfigTest.java
@@ -0,0 +1,107 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EnvironmentConfigTest.java,v 1.14.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.io.File;
+import java.util.Properties;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.util.TestUtils;
+
+public class EnvironmentConfigTest extends TestCase {
+
+    /**
+     * Try out the validation in EnvironmentConfig.
+     */
+    public void testValidation()
+	throws DatabaseException {
+
+        /*
+         * This validation should be successfull
+         */
+        Properties props = new Properties();
+        props.setProperty("java.util.logging.FileHandler.limit", "2000");
+        props.setProperty("java.util.logging.FileHandler.on", "false");
+        new EnvironmentConfig(props); // Just instantiate a config object.
+
+        /*
+         * Should fail: we should throw because leftover.param is not
+         * a valid parameter.
+         */
+        props.clear();
+        props.setProperty("leftover.param", "foo");
+        checkEnvironmentConfigValidation(props);
+
+        /*
+         * Should fail: we should throw because FileHandlerLimit
+         * is less than its minimum
+         */
+        props.clear();
+        props.setProperty("java.util.logging.FileHandler.limit", "1");
+        checkEnvironmentConfigValidation(props);
+
+        /*
+         * Should fail: we should throw because FileHandler.on is not
+         * a valid value.
+         */
+        props.clear();
+        props.setProperty("java.util.logging.FileHandler.on", "xxx");
+        checkEnvironmentConfigValidation(props);
+    }
+
+    /**
+     * Test single parameter setting.
+     */
+    public void testSingleParam()
+        throws Exception {
+
+        try {
+            EnvironmentConfig config = new EnvironmentConfig();
+            config.setConfigParam("foo", "7");
+            fail("Should fail because of invalid param name");
+        } catch (IllegalArgumentException e) {
+            // expected.
+        }
+
+        EnvironmentConfig config = new EnvironmentConfig();
+        config.setConfigParam(EnvironmentParams.MAX_MEMORY_PERCENT.getName(),
+                              "81");
+        assertEquals(81, config.getCachePercent());
+    }
+
+    public void testInconsistentParams()
+	throws Exception {
+
+	try {
+            EnvironmentConfig config = new EnvironmentConfig();
+	    config.setAllowCreate(true);
+	    config.setLocking(false);
+	    config.setTransactional(true);
+	    File envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+	    new Environment(envHome, config);
+            fail("Should fail because of inconsistent param values");
+        } catch (IllegalArgumentException e) {
+            // expected.
+        }
+    }
+
+    /* Helper to catch expected exceptions. */
+    private void checkEnvironmentConfigValidation(Properties props) {
+        try {
+            new EnvironmentConfig(props);
+            fail("Should fail because of a parameter validation problem");
+        } catch (IllegalArgumentException e) {
+            // expected.
+        }
+    }
+}
+
diff --git a/test/com/sleepycat/je/EnvironmentStatTest.java b/test/com/sleepycat/je/EnvironmentStatTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..4ab14a47778c9f578b4432e26b2a998d55cac3f6
--- /dev/null
+++ b/test/com/sleepycat/je/EnvironmentStatTest.java
@@ -0,0 +1,126 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EnvironmentStatTest.java,v 1.23.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.util.TestUtils;
+
+public class EnvironmentStatTest extends TestCase {
+
+    private Environment env;
+    private File envHome;
+
+    public EnvironmentStatTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        /* Close down environments in case the unit test failed so that
+         * the log files can be removed.
+         */
+        try {
+            if (env != null) {
+                env.close();
+                env = null;
+            }
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+    }
+
+    /**
+     * Test open and close of an environment.
+     */
+    public void testCacheStats()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setTransactional(true);
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6");
+        envConfig.setAllowCreate(true);
+        env = new Environment(envHome, envConfig);
+        EnvironmentStats stat = env.getStats(TestUtils.FAST_STATS);
+        env.close();
+        env = null;
+        assertEquals(0, stat.getNCacheMiss());
+        assertEquals(0, stat.getNNotResident());
+
+        // Try to open and close again, now that the environment exists
+        envConfig.setAllowCreate(false);
+        envConfig.setConfigParam
+            (EnvironmentParams.JE_LOGGING_LEVEL.getName(), "CONFIG");
+        env = new Environment(envHome, envConfig);
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        Database db = env.openDatabase(null, "foo", dbConfig);
+        db.put(null, new DatabaseEntry(new byte[0]),
+                     new DatabaseEntry(new byte[0]));
+        Transaction txn = env.beginTransaction(null, null);
+        db.put(txn, new DatabaseEntry(new byte[0]),
+                    new DatabaseEntry(new byte[0]));
+        stat = env.getStats(TestUtils.FAST_STATS);
+        MemoryBudget mb =
+            DbInternal.envGetEnvironmentImpl(env).getMemoryBudget();
+
+        assertEquals(mb.getCacheMemoryUsage(), stat.getCacheTotalBytes());
+        assertEquals(mb.getLogBufferBudget(), stat.getBufferBytes());
+        assertEquals(mb.getTreeMemoryUsage() + mb.getTreeAdminMemoryUsage(),
+                     stat.getDataBytes());
+        assertEquals(mb.getLockMemoryUsage(), stat.getLockBytes());
+        assertEquals(mb.getAdminMemoryUsage(), stat.getAdminBytes());
+
+        assertTrue(stat.getBufferBytes() > 0);
+        assertTrue(stat.getDataBytes() > 0);
+        assertTrue(stat.getLockBytes() > 0);
+        assertTrue(stat.getAdminBytes() > 0);
+
+        assertEquals(stat.getCacheTotalBytes(),
+                     stat.getBufferBytes() +
+                     stat.getDataBytes() +
+                     stat.getLockBytes() +
+                     stat.getAdminBytes());
+
+        assertEquals(12, stat.getNCacheMiss());
+        assertEquals(12, stat.getNNotResident());
+
+        /* Test deprecated getCacheDataBytes method. */
+        final EnvironmentStats finalStat = stat;
+        final long expectCacheDataBytes = mb.getCacheMemoryUsage() -
+                                          mb.getLogBufferBudget();
+        (new Runnable() {
+            @Deprecated
+            public void run() {
+                assertEquals(expectCacheDataBytes,
+                             finalStat.getCacheDataBytes());
+            }
+        }).run();
+
+        txn.abort();
+        db.close();
+        env.close();
+        env = null;
+    }
+}
diff --git a/test/com/sleepycat/je/EnvironmentTest.java b/test/com/sleepycat/je/EnvironmentTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..3804cbdaf1b2144a9c9a492ed46cd576bccd78ea
--- /dev/null
+++ b/test/com/sleepycat/je/EnvironmentTest.java
@@ -0,0 +1,1561 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EnvironmentTest.java,v 1.213.2.33 2010/03/23 15:02:23 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.config.ConfigParam;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.DbConfigManager;
+import com.sleepycat.je.dbi.DbTree;
+import com.sleepycat.je.dbi.EnvConfigObserver;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.txn.LockInfo;
+import com.sleepycat.je.util.StringDbt;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.DaemonRunner;
+
+public class EnvironmentTest extends TestCase {
+
+    private Environment env1;
+    private Environment env2;
+    private Environment env3;
+    private File envHome;
+
+    public EnvironmentTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        /*
+         * Close down environments in case the unit test failed so that the log
+         * files can be removed.
+         */
+        try {
+            if (env1 != null) {
+                env1.close();
+                env1 = null;
+            }
+        } catch (DatabaseException e) {
+            /* ok, the test closed it */
+        }
+        try {
+            if (env2 != null) {
+                env2.close();
+                env2 = null;
+            }
+        } catch (DatabaseException e) {
+            /* ok, the test closed it */
+        }
+        try {
+            if (env3 != null) {
+                env3.close();
+                env3 = null;
+            }
+        } catch (DatabaseException e) {
+            /* ok, the test closed it */
+        }
+
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+    }
+
+    /**
+     * Test open and close of an environment.
+     */
+    public void testBasic()
+        throws Throwable {
+
+        try {
+            assertEquals("Checking version", "3.3.98",
+                         JEVersion.CURRENT_VERSION.getVersionString());
+
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setTransactional(true);
+            envConfig.setCacheSize(MemoryBudget.MIN_MAX_MEMORY_SIZE);
+            /* Don't track detail with a tiny cache size. */
+            envConfig.setConfigParam
+                (EnvironmentParams.CLEANER_TRACK_DETAIL.getName(), "false");
+            envConfig.setConfigParam
+            (EnvironmentParams.NODE_MAX.getName(), "6");
+            envConfig.setConfigParam
+            (EnvironmentParams.LOG_MEM_SIZE.getName(),
+                    EnvironmentParams.LOG_MEM_SIZE_MIN_STRING);
+            envConfig.setConfigParam
+            (EnvironmentParams.NUM_LOG_BUFFERS.getName(), "2");
+            envConfig.setAllowCreate(true);
+            env1 = new Environment(envHome, envConfig);
+
+            env1.close();
+
+            /* Try to open and close again, now that the environment exists. */
+            envConfig.setAllowCreate(false);
+            env1 = new Environment(envHome, envConfig);
+            env1.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Test creation of a reserved name fails.
+     */
+    public void testNoCreateReservedNameDB()
+        throws Throwable {
+
+        try {
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setTransactional(true);
+            envConfig.setAllowCreate(true);
+            env1 = new Environment(envHome, envConfig);
+
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setAllowCreate(true);
+            dbConfig.setTransactional(true);
+            try {
+                env1.openDatabase(null, DbTree.VLSN_MAP_DB_NAME, dbConfig);
+                fail("expected DatabaseException since Environment not " +
+                     "transactional");
+            } catch (IllegalArgumentException IAE) {
+            }
+
+            env1.close();
+
+            /* Try to open and close again, now that the environment exists. */
+            envConfig.setAllowCreate(false);
+            env1 = new Environment(envHome, envConfig);
+            env1.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Test environment reference counting.
+     */
+    public void testReferenceCounting()
+        throws Throwable {
+
+        try {
+
+            /* Create two environment handles on the same environment. */
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setTransactional(true);
+            envConfig.setCacheSize(MemoryBudget.MIN_MAX_MEMORY_SIZE);
+            /* Don't track detail with a tiny cache size. */
+            envConfig.setConfigParam
+                (EnvironmentParams.CLEANER_TRACK_DETAIL.getName(), "false");
+            envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(),
+                                     "6");
+            envConfig.setConfigParam
+            (EnvironmentParams.LOG_MEM_SIZE.getName(),
+                    EnvironmentParams.LOG_MEM_SIZE_MIN_STRING);
+            envConfig.setConfigParam
+            (EnvironmentParams.NUM_LOG_BUFFERS.getName(), "2");
+            envConfig.setAllowCreate(true);
+            env1 = new Environment(envHome, envConfig);
+            envConfig.setAllowCreate(false);
+            env2 = new Environment(envHome, envConfig);
+
+            assertEquals("DbEnvironments should be equal",
+                         env1.getEnvironmentImpl(),
+                         env2.getEnvironmentImpl());
+
+            /* Try to close one of them twice */
+            env1.close();
+            try {
+                env1.close();
+                fail("Didn't catch DatabaseException");
+            } catch (DatabaseException DENOE) {
+            }
+
+            /*
+             * Close both, open a third handle, should get a new
+             * EnvironmentImpl.
+             */
+            EnvironmentImpl dbenv1 = env1.getEnvironmentImpl();
+            env2.close();
+            env1 = new Environment(envHome, envConfig);
+            assertTrue("EnvironmentImpl did not change",
+                       dbenv1 != env1.getEnvironmentImpl());
+            try {
+                env2.close();
+                fail("Didn't catch DatabaseException");
+            } catch (DatabaseException DENOE) {
+            }
+            env1.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testTransactional()
+        throws Throwable {
+
+        try {
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setAllowCreate(true);
+            env1 = new Environment(envHome, envConfig);
+
+            try {
+                env1.beginTransaction(null, null);
+                fail("should have thrown exception for non transactional "+
+                     " environment");
+            } catch (UnsupportedOperationException expected) {
+            }
+
+            String databaseName = "simpleDb";
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setAllowCreate(true);
+            dbConfig.setTransactional(true);
+            try {
+                env1.openDatabase(null, databaseName, dbConfig);
+                fail("expected IllegalArgumentException since Environment " +
+                     " not transactional");
+            } catch (IllegalArgumentException expected) {
+            }
+
+            env1.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testReadOnly()
+        throws Throwable {
+
+        try {
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setReadOnly(true);
+            envConfig.setAllowCreate(true);
+            env1 = new Environment(envHome, envConfig);
+
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setAllowCreate(true);
+            dbConfig.setTransactional(true);
+            String databaseName = "simpleDb";
+            try {
+                env1.openDatabase(null, databaseName, dbConfig);
+                fail("expected DatabaseException since Environment is " +
+                     "readonly");
+            } catch (IllegalArgumentException expected) {
+            }
+
+            env1.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /*
+     * Tests memOnly mode with a home dir that does not exist. [#15255]
+     */
+    public void testMemOnly()
+        throws Throwable {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setTransactional(true);
+        envConfig.setConfigParam
+            (EnvironmentParams.LOG_MEMORY_ONLY.getName(), "true");
+
+        File noHome = new File("fileDoesNotExist");
+        assertTrue(!noHome.exists());
+        env1 = new Environment(noHome, envConfig);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setTransactional(true);
+        Database db = env1.openDatabase(null, "foo", dbConfig);
+
+        Transaction txn = env1.beginTransaction(null, null);
+        Cursor cursor = db.openCursor(txn, null);
+        doSimpleCursorPutAndDelete(cursor, false);
+        cursor.close();
+        txn.commit();
+        db.close();
+
+        env1.close();
+        assertTrue(!noHome.exists());
+    }
+
+    /**
+     * Tests that opening an environment after a clean close does not add to
+     * the log.
+     */
+    public void testOpenWithoutCheckpoint()
+        throws Throwable {
+
+        /* Open, close, open. */
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        env1 = new Environment(envHome, envConfig);
+        env1.close();
+        env1 = new Environment(envHome, null);
+
+        /* Check that no checkpoint was performed. */
+        EnvironmentStats stats = env1.getStats(null);
+        assertEquals(0, stats.getNCheckpoints());
+
+        env1.close();
+        env1 = null;
+    }
+
+    /**
+     * Test environment configuration.
+     */
+    public void testConfig()
+        throws Throwable {
+
+        /* This tests assumes these props are immutable. */
+        assertTrue(!isMutableConfig("je.lock.timeout"));
+        assertTrue(!isMutableConfig("je.env.isReadOnly"));
+
+        try {
+
+            /*
+             * Make sure that the environment keeps its own copy of the
+             * configuration object.
+             */
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setReadOnly(true);
+            envConfig.setAllowCreate(true);
+            envConfig.setLockTimeout(7777);
+            env1 = new Environment(envHome, envConfig);
+
+            /*
+             * Change the environment config object, make sure the
+             * environment cloned a copy when it was opened.
+             */
+            envConfig.setReadOnly(false);
+            EnvironmentConfig retrievedConfig1 = env1.getConfig();
+            assertTrue(envConfig != retrievedConfig1);
+            assertEquals(true, retrievedConfig1.getReadOnly());
+            assertEquals(true, retrievedConfig1.getAllowCreate());
+            assertEquals(7777, retrievedConfig1.getLockTimeout());
+
+            /*
+             * Make sure that the environment returns a cloned config
+             * object when you call Environment.getConfig.
+             */
+            retrievedConfig1.setReadOnly(false);
+            EnvironmentConfig retrievedConfig2 = env1.getConfig();
+            assertEquals(true, retrievedConfig2.getReadOnly());
+            assertTrue(retrievedConfig1 != retrievedConfig2);
+
+            /*
+             * Open a second environment handle, check that it's attributes
+             * are available.
+             */
+            env2 = new Environment(envHome, null);
+            EnvironmentConfig retrievedConfig3 = env2.getConfig();
+            assertEquals(true, retrievedConfig3.getReadOnly());
+            assertEquals(7777, retrievedConfig3.getLockTimeout());
+
+            /*
+             * Open an environment handle on an existing environment with
+             * mismatching config params.
+             */
+            try {
+                new Environment(envHome, TestUtils.initEnvConfig());
+                fail("Shouldn't open, config param has wrong number of params");
+            } catch (IllegalArgumentException e) {
+                /* expected */
+            }
+
+            try {
+                envConfig.setLockTimeout(8888);
+                new Environment(envHome, envConfig);
+                fail("Shouldn't open, cache size doesn't match");
+            } catch (IllegalArgumentException e) {
+                /* expected */
+            }
+
+            /*
+             * Ditto for the mutable attributes.
+             */
+            EnvironmentMutableConfig mutableConfig =
+                new EnvironmentMutableConfig();
+            mutableConfig.setTxnNoSync(true);
+            env1.setMutableConfig(mutableConfig);
+            EnvironmentMutableConfig retrievedMutableConfig1 =
+                env1.getMutableConfig();
+            assertTrue(mutableConfig != retrievedMutableConfig1);
+            retrievedMutableConfig1.setTxnNoSync(false);
+            EnvironmentMutableConfig retrievedMutableConfig2 =
+                env1.getMutableConfig();
+            assertEquals(true, retrievedMutableConfig2.getTxnNoSync());
+            assertTrue(retrievedMutableConfig1 != retrievedMutableConfig2);
+
+            /*
+             * Plus check that mutables can be retrieved via the main config.
+             */
+            EnvironmentConfig retrievedConfig4 = env1.getConfig();
+            assertEquals(true, retrievedConfig4.getTxnNoSync());
+            retrievedConfig4 = env2.getConfig();
+            assertEquals(false, retrievedConfig4.getTxnNoSync());
+
+            /*
+             * Check that mutables can be passed to the ctor.
+             */
+            EnvironmentConfig envConfig3 = env2.getConfig();
+            assertEquals(false, envConfig3.getTxnNoSync());
+            envConfig3.setTxnNoSync(true);
+            env3 = new Environment(envHome, envConfig3);
+            EnvironmentMutableConfig retrievedMutableConfig3 =
+                env3.getMutableConfig();
+            assertNotSame(envConfig3, retrievedMutableConfig3);
+            assertEquals(true, retrievedMutableConfig3.getTxnNoSync());
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Test the semantics of env-wide mutable config properties.
+     */
+    public void testMutableConfig()
+        throws DatabaseException {
+
+        /*
+         * Note that during unit testing the shared je.properties is expected
+         * to be empty, so we don't test the application of je.properties here.
+         */
+        final String P1 = EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName();
+        final String P2 = EnvironmentParams.ENV_RUN_CLEANER.getName();
+        final String P3 = EnvironmentParams.ENV_RUN_CHECKPOINTER.getName();
+
+        assertTrue(isMutableConfig(P1));
+        assertTrue(isMutableConfig(P2));
+        assertTrue(isMutableConfig(P3));
+
+        EnvironmentConfig config;
+        EnvironmentMutableConfig mconfig;
+
+        /*
+         * Create env1, first handle.
+         * P1 defaults to true.
+         * P2 is set to true (the default).
+         * P3 is set to false (not the default).
+         */
+        config = TestUtils.initEnvConfig();
+        config.setAllowCreate(true);
+        config.setConfigParam(P2, "true");
+        config.setConfigParam(P3, "false");
+        env1 = new Environment(envHome, config);
+        check3Params(env1, P1, "true", P2, "true", P3, "false");
+
+        MyObserver observer = new MyObserver();
+        env1.getEnvironmentImpl().addConfigObserver(observer);
+        assertEquals(0, observer.testAndReset());
+
+        /*
+         * Open env2, second handle, test that no mutable params can be
+         * overridden.
+         * P1 is set to false.
+         * P2 is set to false.
+         * P3 is set to true.
+         */
+        config = TestUtils.initEnvConfig();
+        config.setConfigParam(P1, "false");
+        config.setConfigParam(P2, "false");
+        config.setConfigParam(P3, "true");
+        env2 = new Environment(envHome, config);
+        assertEquals(0, observer.testAndReset());
+        check3Params(env1, P1, "true", P2, "true", P3, "false");
+
+        /*
+         * Set mutable config explicitly.
+         */
+        mconfig = env2.getMutableConfig();
+        mconfig.setConfigParam(P1, "false");
+        mconfig.setConfigParam(P2, "false");
+        mconfig.setConfigParam(P3, "true");
+        env2.setMutableConfig(mconfig);
+        assertEquals(1, observer.testAndReset());
+        check3Params(env2, P1, "false", P2, "false", P3, "true");
+
+        env1.close();
+        env1 = null;
+        env2.close();
+        env2 = null;
+    }
+
+    /**
+     * Checks that je.txn.deadlockStackTrace is mutable and takes effect.
+     */
+    public void testTxnDeadlockStackTrace()
+        throws DatabaseException {
+
+        String name = EnvironmentParams.TXN_DEADLOCK_STACK_TRACE.getName();
+        assertTrue(isMutableConfig(name));
+
+        EnvironmentConfig config = TestUtils.initEnvConfig();
+        config.setAllowCreate(true);
+        config.setConfigParam(name, "true");
+        env1 = new Environment(envHome, config);
+        assertTrue(LockInfo.getDeadlockStackTrace());
+
+        EnvironmentMutableConfig mconfig = env1.getMutableConfig();
+        mconfig.setConfigParam(name, "false");
+        env1.setMutableConfig(mconfig);
+        assertTrue(!LockInfo.getDeadlockStackTrace());
+
+        mconfig = env1.getMutableConfig();
+        mconfig.setConfigParam(name, "true");
+        env1.setMutableConfig(mconfig);
+        assertTrue(LockInfo.getDeadlockStackTrace());
+
+        env1.close();
+        env1 = null;
+    }
+
+    /**
+     * Checks three config parameter values.
+     */
+    private void check3Params(Environment env,
+                              String p1, String v1,
+                              String p2, String v2,
+                              String p3, String v3)
+        throws DatabaseException {
+
+        EnvironmentConfig config = env.getConfig();
+
+        assertEquals(v1, config.getConfigParam(p1));
+        assertEquals(v2, config.getConfigParam(p2));
+        assertEquals(v3, config.getConfigParam(p3));
+
+        EnvironmentMutableConfig mconfig = env.getMutableConfig();
+
+        assertEquals(v1, mconfig.getConfigParam(p1));
+        assertEquals(v2, mconfig.getConfigParam(p2));
+        assertEquals(v3, mconfig.getConfigParam(p3));
+    }
+
+    /**
+     * Returns whether a config parameter is mutable.
+     */
+    private boolean isMutableConfig(String name) {
+        ConfigParam param = (ConfigParam)
+            EnvironmentParams.SUPPORTED_PARAMS.get(name);
+        assert param != null;
+        return param.isMutable();
+    }
+
+    /**
+     * Observes config changes and remembers how many times it was called.
+     */
+    private static class MyObserver implements EnvConfigObserver {
+
+        private int count = 0;
+
+        public void envConfigUpdate(DbConfigManager mgr,
+                EnvironmentMutableConfig ignore) {
+            count += 1;
+        }
+
+        int testAndReset() {
+            int result = count;
+            count = 0;
+            return result;
+        }
+    }
+
+    /**
+     * Make sure that config param loading follows the right precedence.
+     */
+    public void testParamLoading()
+	throws Throwable {
+
+        File testEnvHome = null;
+        try {
+
+            /*
+             * A je.properties file has been put into
+             * <testdestdir>/propTest/je.properties
+             */
+            StringBuffer testPropsEnv = new StringBuffer();
+            testPropsEnv.append(System.getProperty(TestUtils.DEST_DIR));
+            testPropsEnv.append(File.separatorChar);
+            testPropsEnv.append("propTest");
+            testEnvHome = new File(testPropsEnv.toString());
+            TestUtils.removeLogFiles("testParamLoading start",
+                                     testEnvHome, false);
+
+            /*
+             * Set some configuration params programatically.  Do not use
+             * TestUtils.initEnvConfig since we're counting properties.
+             */
+            EnvironmentConfig appConfig = new EnvironmentConfig();
+            appConfig.setConfigParam("je.log.numBuffers", "88");
+            appConfig.setConfigParam
+            ("je.log.totalBufferBytes",
+                    EnvironmentParams.LOG_MEM_SIZE_MIN_STRING + 10);
+            appConfig.setAllowCreate(true);
+
+            Environment appEnv = new Environment(testEnvHome, appConfig);
+            EnvironmentConfig envConfig = appEnv.getConfig();
+
+            assertEquals(3, envConfig.getNumExplicitlySetParams());
+            assertEquals("false",
+                         envConfig.getConfigParam("je.env.recovery"));
+            assertEquals("7001",
+                         envConfig.getConfigParam("je.log.totalBufferBytes"));
+            assertEquals("200",
+                         envConfig.getConfigParam("je.log.numBuffers"));
+            appEnv.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+        finally {
+            TestUtils.removeLogFiles("testParamLoadingEnd",
+                                     testEnvHome, false);
+        }
+    }
+
+    public void testDbRename()
+        throws Throwable {
+
+        try {
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setTransactional(true);
+            envConfig.setAllowCreate(true);
+            env1 = new Environment(envHome, envConfig);
+
+            String databaseName = "simpleDb";
+            String newDatabaseName = "newSimpleDb";
+
+            /* Try to rename a non-existent db. */
+            try {
+                env1.renameDatabase(null, databaseName, newDatabaseName);
+                fail("Rename on non-existent db should fail");
+            } catch (DatabaseException e) {
+                /* expect exception */
+            }
+
+            /* Now create a test db. */
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setAllowCreate(true);
+            dbConfig.setTransactional(true);
+            Database exampleDb = env1.openDatabase(null, databaseName,
+                    dbConfig);
+
+            Transaction txn = env1.beginTransaction(null, null);
+            Cursor cursor = exampleDb.openCursor(txn, null);
+            doSimpleCursorPutAndDelete(cursor, false);
+            cursor.close();
+            txn.commit();
+            exampleDb.close();
+
+            dbConfig.setAllowCreate(false);
+            env1.renameDatabase(null, databaseName, newDatabaseName);
+            exampleDb = env1.openDatabase(null, newDatabaseName, dbConfig);
+            cursor = exampleDb.openCursor(null, null);
+            // XXX doSimpleVerification(cursor);
+            cursor.close();
+
+            /* Check debug name. */
+            DatabaseImpl dbImpl = DbInternal.dbGetDatabaseImpl(exampleDb);
+            assertEquals(newDatabaseName, dbImpl.getDebugName());
+            exampleDb.close();
+            try {
+                exampleDb = env1.openDatabase(null, databaseName, dbConfig);
+                fail("didn't get db not found exception");
+            } catch (DatabaseNotFoundException expected) {
+            }
+            env1.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testDbRenameCommit()
+        throws Throwable {
+
+        try {
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setTransactional(true);
+            envConfig.setAllowCreate(true);
+            env1 = new Environment(envHome, envConfig);
+
+            String databaseName = "simpleRenameCommitDb";
+            String newDatabaseName = "newSimpleRenameCommitDb";
+
+            Transaction txn = env1.beginTransaction(null, null);
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setTransactional(true);
+            dbConfig.setAllowCreate(true);
+            Database exampleDb = env1.openDatabase(txn, databaseName,
+                    dbConfig);
+
+            Cursor cursor = exampleDb.openCursor(txn, null);
+            doSimpleCursorPutAndDelete(cursor, false);
+            cursor.close();
+            exampleDb.close();
+
+            dbConfig.setAllowCreate(false);
+            env1.renameDatabase(txn, databaseName, newDatabaseName);
+            exampleDb = env1.openDatabase(txn, newDatabaseName, dbConfig);
+            cursor = exampleDb.openCursor(txn, null);
+            cursor.close();
+            exampleDb.close();
+            try {
+                exampleDb = env1.openDatabase(txn, databaseName, dbConfig);
+                fail("didn't get db not found exception");
+            } catch (DatabaseNotFoundException expected) {
+            }
+            txn.commit();
+
+            try {
+                exampleDb = env1.openDatabase(null, databaseName, null);
+                fail("didn't catch DatabaseException opening old name");
+            } catch (DatabaseNotFoundException expected) {
+            }
+            try {
+                exampleDb = env1.openDatabase(null, newDatabaseName, null);
+                exampleDb.close();
+            } catch (DatabaseException DBE) {
+                fail("caught unexpected exception");
+            }
+
+            env1.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testDbRenameAbort()
+        throws Throwable {
+
+        try {
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setTransactional(true);
+            envConfig.setAllowCreate(true);
+            env1 = new Environment(envHome, envConfig);
+
+            /* Create a database. */
+            String databaseName = "simpleRenameAbortDb";
+            String newDatabaseName = "newSimpleRenameAbortDb";
+            Transaction txn = env1.beginTransaction(null, null);
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setTransactional(true);
+            dbConfig.setAllowCreate(true);
+            Database exampleDb =
+                env1.openDatabase(txn, databaseName, dbConfig);
+
+            /* Put some data in, close the database, commit. */
+            Cursor cursor = exampleDb.openCursor(txn, null);
+            doSimpleCursorPutAndDelete(cursor, false);
+            cursor.close();
+            exampleDb.close();
+            txn.commit();
+
+            /*
+             * Rename under another txn, shouldn't be able to open under the
+             * old name.
+             */
+            txn = env1.beginTransaction(null, null);
+            env1.renameDatabase(txn, databaseName, newDatabaseName);
+            dbConfig.setAllowCreate(false);
+            exampleDb = env1.openDatabase(txn, newDatabaseName, dbConfig);
+            cursor = exampleDb.openCursor(txn, null);
+            // XXX doSimpleVerification(cursor);
+            cursor.close();
+            exampleDb.close();
+            try {
+                exampleDb = env1.openDatabase(txn, databaseName, dbConfig);
+                fail("didn't get db not found exception");
+            } catch (DatabaseNotFoundException expected) {
+            }
+
+            /*
+             * Abort the rename, should be able to open under the old name with
+             * empty props (DB_CREATE not set)
+             */
+            txn.abort();
+            exampleDb = new Database(env1);
+            try {
+                exampleDb = env1.openDatabase(null, databaseName, null);
+                exampleDb.close();
+            } catch (DatabaseException dbe) {
+                fail("caught DatabaseException opening old name:" +
+                     dbe.getMessage());
+            }
+
+            /* Shouldn't be able to open under the new name. */
+            try {
+                exampleDb = env1.openDatabase(null, newDatabaseName, null);
+                fail("didn't catch DatabaseException opening new name");
+            } catch (DatabaseNotFoundException expected) {
+            }
+
+            env1.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testDbRemove()
+        throws Throwable {
+
+        try {
+            /* Set up an environment. */
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setTransactional(true);
+            envConfig.setAllowCreate(true);
+            env1 = new Environment(envHome, envConfig);
+
+            String databaseName = "simpleDb";
+
+            /* Try to remove a non-existent db */
+            try {
+                env1.removeDatabase(null, databaseName);
+                fail("Remove of non-existent db should fail");
+            } catch (DatabaseNotFoundException expected) {
+            }
+
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setTransactional(true);
+            dbConfig.setAllowCreate(true);
+            Database exampleDb =
+                env1.openDatabase(null, databaseName, dbConfig);
+
+            Transaction txn = env1.beginTransaction(null, null);
+            Cursor cursor = exampleDb.openCursor(txn, null);
+            doSimpleCursorPutAndDelete(cursor, false);
+            cursor.close();
+            txn.commit();
+
+            /* Remove should fail because database is open. */
+            try {
+                env1.removeDatabase(null, databaseName);
+                fail("didn't get db open exception");
+            } catch (DatabaseException DBE) {
+            }
+            exampleDb.close();
+
+            env1.removeDatabase(null, databaseName);
+
+            /* Remove should fail because database does not exist. */
+            try {
+                exampleDb = env1.openDatabase(null, databaseName, null);
+                fail("did not catch db does not exist exception");
+            } catch (DatabaseNotFoundException expected) {
+            }
+            env1.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testDbRemoveCommit()
+        throws Throwable {
+
+        try {
+            /* Set up an environment. */
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setTransactional(true);
+            envConfig.setAllowCreate(true);
+            env1 = new Environment(envHome, envConfig);
+
+            /* Make a database. */
+            String databaseName = "simpleDb";
+            Transaction txn = env1.beginTransaction(null, null);
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setTransactional(true);
+            dbConfig.setAllowCreate(true);
+            Database exampleDb =
+                env1.openDatabase(txn, databaseName, dbConfig);
+
+            /* Insert and delete data in it. */
+            Cursor cursor = exampleDb.openCursor(txn, null);
+            doSimpleCursorPutAndDelete(cursor, false);
+            cursor.close();
+
+            /*
+             * Try a remove without closing the open Database handle.  Should
+             * get an exception.
+             */
+            try {
+                env1.removeDatabase(txn, databaseName);
+                fail("didn't get db open exception");
+            } catch (DatabaseException DBE) {
+            }
+            exampleDb.close();
+
+            /* Do a remove, try to open again. */
+            env1.removeDatabase(txn, databaseName);
+            try {
+                dbConfig.setAllowCreate(false);
+                exampleDb = env1.openDatabase(txn, databaseName, dbConfig);
+                fail("did not catch db does not exist exception");
+            } catch (DatabaseNotFoundException expected) {
+            }
+            txn.commit();
+
+            /* Try to open, the db should have been removed. */
+            try {
+                exampleDb = env1.openDatabase(null, databaseName, null);
+                fail("did not catch db does not exist exception");
+            } catch (DatabaseNotFoundException expected) {
+            }
+            env1.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testDbRemoveAbort()
+        throws Throwable {
+
+        try {
+            /* Set up an environment. */
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setTransactional(true);
+            envConfig.setAllowCreate(true);
+            env1 = new Environment(envHome, envConfig);
+
+            /* Create a database, commit. */
+            String databaseName = "simpleDb";
+            Transaction txn = env1.beginTransaction(null, null);
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setTransactional(true);
+            dbConfig.setAllowCreate(true);
+            Database exampleDb =
+                env1.openDatabase(txn, databaseName, dbConfig);
+            txn.commit();
+
+            /* Start a new txn and put some data in the created db. */
+            txn = env1.beginTransaction(null, null);
+            Cursor cursor = exampleDb.openCursor(txn, null);
+            doSimpleCursorPutAndDelete(cursor, false);
+            cursor.close();
+
+            /*
+             * Try to remove, we should get an exception because the db is
+             * open.
+             */
+            try {
+                env1.removeDatabase(txn, databaseName);
+                fail("didn't get db open exception");
+            } catch (DatabaseException DBE) {
+            }
+            exampleDb.close();
+
+            /*
+             * txn can only be aborted at this point since the removeDatabase()
+             * timed out.
+             */
+            txn.abort();
+            txn = env1.beginTransaction(null, null);
+            env1.removeDatabase(txn, databaseName);
+
+            try {
+                dbConfig.setAllowCreate(false);
+                exampleDb = env1.openDatabase(txn, databaseName, dbConfig);
+                fail("did not catch db does not exist exception");
+            } catch (DatabaseNotFoundException expected) {
+            }
+
+            /* Abort, should rollback the db remove. */
+            txn.abort();
+
+            try {
+                DatabaseConfig dbConfig2 = new DatabaseConfig();
+                dbConfig2.setTransactional(true);
+                exampleDb = env1.openDatabase(null, databaseName, dbConfig2);
+            } catch (DatabaseException DBE) {
+                fail("db does not exist anymore after delete/abort");
+            }
+
+            exampleDb.close();
+            env1.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Provides general testing of getDatabaseNames.  Additionally verifies a
+     * fix for a bug that occurred when the first DB (lowest valued name) was
+     * removed or renamed prior to calling getDatabaseNames.  A NPE occurred
+     * in this case if the compressor had not yet deleted the BIN entry for
+     * the removed/renamed name. [#13377]
+     */
+    public void testGetDatabaseNames()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false");
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+
+        /* Start with no databases. */
+        Set<String> dbNames = new HashSet<String>();
+        env1 = new Environment(envHome, envConfig);
+        checkDbNames(dbNames, env1.getDatabaseNames());
+
+        /* Add DB1. */
+        dbNames.add("DB1");
+        Database db = env1.openDatabase(null, "DB1", dbConfig);
+        db.close();
+        checkDbNames(dbNames, env1.getDatabaseNames());
+
+        /* Add DB2. */
+        dbNames.add("DB2");
+        db = env1.openDatabase(null, "DB2", dbConfig);
+        db.close();
+        checkDbNames(dbNames, env1.getDatabaseNames());
+
+        /* Rename DB2 to DB3 (this caused NPE). */
+        dbNames.remove("DB2");
+        dbNames.add("DB3");
+        env1.renameDatabase(null, "DB2", "DB3");
+        checkDbNames(dbNames, env1.getDatabaseNames());
+
+        /* Remove DB1. */
+        dbNames.remove("DB1");
+        dbNames.add("DB4");
+        env1.renameDatabase(null, "DB1", "DB4");
+        checkDbNames(dbNames, env1.getDatabaseNames());
+
+        /* Add DB0. */
+        dbNames.add("DB0");
+        db = env1.openDatabase(null, "DB0", dbConfig);
+        db.close();
+        checkDbNames(dbNames, env1.getDatabaseNames());
+
+        /* Remove DB0 (this caused NPE). */
+        dbNames.remove("DB0");
+        env1.removeDatabase(null, "DB0");
+        checkDbNames(dbNames, env1.getDatabaseNames());
+
+        env1.close();
+        env1 = null;
+    }
+
+    /**
+     * Checks that the expected set of names equals the list of names returned
+     * from getDatabaseNames.  A list can't be directly compared to a set using
+     * equals().
+     */
+    private void checkDbNames(Set<String> expected, List<String> actual) {
+        assertEquals(expected.size(), actual.size());
+        assertEquals(expected, new HashSet<String>(actual));
+    }
+
+    /*
+     * This little test case can only invoke the compressor, since the evictor,
+     * cleaner and checkpointer are all governed by utilization metrics and are
+     * tested elsewhere.
+     */
+    public void testDaemonManualInvocation()
+        throws Throwable {
+
+        try {
+            /* Set up an environment. */
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setTransactional(true);
+            String testPropVal = "120000000";
+            envConfig.setConfigParam
+                (EnvironmentParams.COMPRESSOR_WAKEUP_INTERVAL.getName(),
+                 testPropVal);
+            envConfig.setConfigParam
+                (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false");
+            envConfig.setAllowCreate(true);
+            envConfig.setConfigParam
+            (EnvironmentParams.LOG_MEM_SIZE.getName(), "20000");
+            envConfig.setConfigParam
+            (EnvironmentParams.NUM_LOG_BUFFERS.getName(), "2");
+            env1 = new Environment(envHome, envConfig);
+
+            String databaseName = "simpleDb";
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setTransactional(true);
+            dbConfig.setAllowCreate(true);
+            Database exampleDb =
+                env1.openDatabase(null, databaseName, dbConfig);
+
+            Transaction txn = env1.beginTransaction(null, null);
+            Cursor cursor = exampleDb.openCursor(txn, null);
+            doSimpleCursorPutAndDelete(cursor, false);
+            cursor.close();
+            txn.commit();
+            exampleDb.close();
+            EnvironmentStats envStats = env1.getStats(TestUtils.FAST_STATS);
+            env1.compress();
+
+            envStats = env1.getStats(TestUtils.FAST_STATS);
+            long compressorTotal =
+                envStats.getSplitBins() +
+                envStats.getDbClosedBins() +
+                envStats.getCursorsBins() +
+                envStats.getNonEmptyBins() +
+                envStats.getProcessedBins() +
+                envStats.getInCompQueueSize();
+            assertTrue(compressorTotal > 0);
+
+            env1.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Tests that each daemon can be turned on and off dynamically.
+     */
+    public void testDaemonRunPause()
+        throws DatabaseException, InterruptedException {
+
+	final boolean isDalvik = EnvironmentImpl.IS_DALVIK;
+
+        final String[] runProps = {
+            EnvironmentParams.ENV_RUN_EVICTOR.getName(),
+            EnvironmentParams.ENV_RUN_CLEANER.getName(),
+            EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(),
+            EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(),
+        };
+
+        EnvironmentConfig config = TestUtils.initEnvConfig();
+        config.setAllowCreate(true);
+
+	if (!isDalvik) {
+            config.setConfigParam
+                (EnvironmentParams.MAX_MEMORY.getName(),
+                 MemoryBudget.MIN_MAX_MEMORY_SIZE_STRING);
+	}
+        /* Don't track detail with a tiny cache size. */
+        config.setConfigParam
+            (EnvironmentParams.CLEANER_TRACK_DETAIL.getName(), "false");
+        config.setConfigParam
+            (EnvironmentParams.CLEANER_BYTES_INTERVAL.getName(),
+             "100");
+        config.setConfigParam
+            (EnvironmentParams.CHECKPOINTER_BYTES_INTERVAL.getName(),
+             "100");
+        config.setConfigParam
+            (EnvironmentParams.COMPRESSOR_WAKEUP_INTERVAL.getName(),
+             "1000000");
+	if (!isDalvik) {
+            config.setConfigParam(EnvironmentParams.LOG_MEM_SIZE.getName(),
+                                  EnvironmentParams.LOG_MEM_SIZE_MIN_STRING);
+	}
+        config.setConfigParam
+        (EnvironmentParams.NUM_LOG_BUFFERS.getName(), "2");
+        setBoolConfigParams(config, runProps,
+                            new boolean[] { false, false, false, false });
+
+        env1 = new Environment(envHome, config);
+        EnvironmentImpl envImpl = env1.getEnvironmentImpl();
+
+        final DaemonRunner[] daemons = {
+            envImpl.getEvictor(),
+            envImpl.getCleaner(),
+            envImpl.getCheckpointer(),
+            envImpl.getINCompressor(),
+        };
+
+        doTestDaemonRunPause(env1, daemons, runProps,
+                             new boolean[] { false, false, false, false });
+        doTestDaemonRunPause(env1, daemons, runProps,
+                             new boolean[] { true,  false, false, false });
+        if (!envImpl.isNoLocking()) {
+            doTestDaemonRunPause(env1, daemons, runProps,
+                    new boolean[] { false, true,  false, false });
+        }
+        doTestDaemonRunPause(env1, daemons, runProps,
+                             new boolean[] { false, false, true,  false });
+        doTestDaemonRunPause(env1, daemons, runProps,
+                             new boolean[] { false, false, false, true  });
+        doTestDaemonRunPause(env1, daemons, runProps,
+                             new boolean[] { false, false, false, false });
+
+        env1.close();
+        env1 = null;
+    }
+
+    /**
+     * Tests a set of daemon on/off settings.
+     */
+    private void doTestDaemonRunPause(Environment env,
+				      DaemonRunner[] daemons,
+                                      String[] runProps,
+                                      boolean[] runValues)
+        throws DatabaseException, InterruptedException {
+
+        /* Set daemon run properties. */
+        EnvironmentMutableConfig config = env.getMutableConfig();
+        setBoolConfigParams(config, runProps, runValues);
+        env.setMutableConfig(config);
+
+        /* Allow previously running daemons to come to a stop. */
+        for (int i = 0; i < 10; i += 1) {
+            Thread.yield();
+            Thread.sleep(10);
+        }
+
+        /* Get current wakeup counts. */
+        int[] prevCounts = new int[daemons.length];
+        for (int i = 0; i < prevCounts.length; i += 1) {
+            prevCounts[i] = daemons[i].getNWakeupRequests();
+        }
+
+        /* Write some data to wakeup the checkpointer, cleaner and evictor. */
+        String dbName = "testDaemonRunPause";
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(true);
+        Database db = env1.openDatabase(null, dbName, dbConfig);
+        Cursor cursor = db.openCursor(null, null);
+        doSimpleCursorPutAndDelete(cursor, !EnvironmentImpl.IS_DALVIK);
+        cursor.close();
+        db.close();
+
+        /* Sleep for a while to wakeup the compressor. */
+        Thread.sleep(1000);
+
+        /* Check that the expected daemons were woken. */
+        for (int i = 0; i < prevCounts.length; i += 1) {
+            int currNWakeups = daemons[i].getNWakeupRequests();
+            boolean woken = prevCounts[i] < currNWakeups;
+            assertEquals(daemons[i].getClass().getName() +
+                         " prevNWakeups=" + prevCounts[i] +
+                         " currNWakeups=" + currNWakeups,
+                         runValues[i], woken);
+        }
+    }
+
+    private void setBoolConfigParams(EnvironmentMutableConfig config,
+                                     String[] names,
+                                     boolean[] values) {
+        for (int i = 0; i < names.length; i += 1) {
+            config.setConfigParam(names[i],
+                                  Boolean.valueOf(values[i]).toString());
+        }
+    }
+
+    public void testExceptions()
+        throws Throwable {
+
+        try {
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setTransactional(true);
+            envConfig.setAllowCreate(true);
+            env1 = new Environment(envHome, envConfig);
+            env1.close();
+
+            /* Test for exceptions on closed environments via public APIs */
+            try {
+                env1.close();
+                fail("Didn't catch DatabaseException");
+            } catch (DatabaseException expected) {
+            }
+
+            try {
+                env1.openDatabase(null, null, null);
+                fail("Didn't catch DatabaseException for op on closed env");
+            } catch (DatabaseException expected) {
+            }
+
+            try {
+                env1.openSecondaryDatabase(null, null, null, null);
+                fail("Didn't catch DatabaseException for op on closed env");
+            } catch (DatabaseException expected) {
+            }
+
+            try {
+                env1.removeDatabase(null, null);
+                fail("Didn't catch DatabaseException for op on closed env");
+            } catch (DatabaseException expected) {
+            }
+
+            try {
+                env1.renameDatabase(null, "old", "new");
+                fail("Didn't catch DatabaseException for op on closed env");
+            } catch (DatabaseException expected) {
+            }
+
+            try {
+                env1.truncateDatabase(null, null, false);
+                fail("Didn't catch DatabaseException for op on closed env");
+            } catch (DatabaseException expected) {
+            }
+
+            try {
+                env1.removeDatabase(null, null);
+                fail("Didn't catch DatabaseException for op on closed env");
+            } catch (DatabaseException expected) {
+            }
+
+            try {
+                env1.getHome();
+                fail("Didn't catch DatabaseException for op on closed env");
+            } catch (DatabaseException expected) {
+            }
+
+            try {
+                env1.beginTransaction(null, null);
+                fail("Didn't catch DatabaseException for op on closed env");
+            } catch (DatabaseException expected) {
+            }
+
+            try {
+                env1.checkpoint(null);
+                fail("Didn't catch DatabaseException for op on closed env");
+            } catch (DatabaseException expected) {
+            }
+
+            try {
+                env1.sync();
+                fail("Didn't catch DatabaseException for op on closed env");
+            } catch (DatabaseException expected) {
+            }
+
+            try {
+                env1.cleanLog();
+                fail("Didn't catch DatabaseException for op on closed env");
+            } catch (DatabaseException expected) {
+            }
+
+            try {
+                env1.evictMemory();
+                fail("Didn't catch DatabaseException for op on closed env");
+            } catch (DatabaseException expected) {
+            }
+
+            try {
+                env1.compress();
+                fail("Didn't catch DatabaseException for op on closed env");
+            } catch (DatabaseException expected) {
+            }
+
+            try {
+                env1.getConfig();
+                fail("Didn't catch DatabaseException for op on closed env");
+            } catch (DatabaseException expected) {
+            }
+
+            try {
+                env1.setMutableConfig(null);
+                fail("Didn't catch DatabaseException for op on closed env");
+            } catch (DatabaseException expected) {
+            }
+
+            try {
+                env1.getMutableConfig();
+                fail("Didn't catch DatabaseException for op on closed env");
+            } catch (DatabaseException expected) {
+            }
+
+            try {
+                env1.getStats(null);
+                fail("Didn't catch DatabaseException for op on closed env");
+            } catch (DatabaseException expected) {
+            }
+
+            try {
+                env1.getLockStats(null);
+                fail("Didn't catch DatabaseException for op on closed env");
+            } catch (DatabaseException expected) {
+            }
+
+            try {
+                env1.getTransactionStats(null);
+                fail("Didn't catch DatabaseException for op on closed env");
+            } catch (DatabaseException expected) {
+            }
+
+            try {
+                env1.getDatabaseNames();
+                fail("Didn't catch DatabaseException for op on closed env");
+            } catch (DatabaseException expected) {
+            }
+
+            try {
+                env1.scanLog(0,0,null,null);
+                fail("Didn't catch DatabaseException for op on closed env");
+            } catch (DatabaseException expected) {
+            }
+
+            try {
+                env1.verify(null,null);
+                fail("Didn't catch DatabaseException for op on closed env");
+            } catch (DatabaseException expected) {
+            }
+
+            try {
+                env1.getThreadTransaction();
+                fail("Didn't catch DatabaseException for op on closed env");
+            } catch (DatabaseException expected) {
+            }
+
+            try {
+                env1.setThreadTransaction(null);
+                fail("Didn't catch DatabaseException for op on closed env");
+            } catch (IllegalStateException expected) {
+            }
+
+
+            try {
+                env1.checkHandleIsValid();
+                fail("Didn't catch DatabaseException for op on closed env");
+            } catch (DatabaseException expected) {
+            }
+
+
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testClose()
+        throws Throwable {
+
+        try {
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setTransactional(true);
+            envConfig.setAllowCreate(true);
+            env1 = new Environment(envHome, envConfig);
+            env1.close();
+
+            envConfig.setAllowCreate(false);
+            env1 = new Environment(envHome, envConfig);
+
+            /* Create a transaction to prevent the close from succeeding */
+            env1.beginTransaction(null, null);
+            try {
+                env1.close();
+                fail("Didn't catch DatabaseException for open transactions");
+            } catch (DatabaseException expected) {
+            }
+
+            try {
+                env1.close();
+                fail("Didn't catch DatabaseException already closed env");
+            } catch (DatabaseException expected) {
+            }
+
+            env1 = new Environment(envHome, envConfig);
+
+            String databaseName = "simpleDb";
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setTransactional(true);
+            dbConfig.setAllowCreate(true);
+            env1.openDatabase(null, databaseName, dbConfig);
+            env1.openDatabase(null, databaseName + "2", dbConfig);
+            try {
+                env1.close();
+                fail("Didn't catch DatabaseException for open dbs");
+            } catch (DatabaseException expected) {
+            }
+            try {
+                env1.close();
+                fail("Didn't catch DatabaseException already closed env");
+            } catch (DatabaseException expected) {
+            }
+
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    protected String[] simpleKeyStrings = {
+        "foo", "bar", "baz", "aaa", "fubar",
+        "foobar", "quux", "mumble", "froboy" };
+
+    protected String[] simpleDataStrings = {
+        "one", "two", "three", "four", "five",
+        "six", "seven", "eight", "nine" };
+
+    protected void doSimpleCursorPutAndDelete(Cursor cursor, boolean extras)
+	throws DatabaseException {
+
+        StringDbt foundKey = new StringDbt();
+        StringDbt foundData = new StringDbt();
+
+        for (int i = 0; i < simpleKeyStrings.length; i++) {
+            foundKey.setString(simpleKeyStrings[i]);
+            foundData.setString(simpleDataStrings[i]);
+            if (cursor.putNoOverwrite(foundKey, foundData) !=
+                OperationStatus.SUCCESS) {
+                throw new DatabaseException("non-0 return");
+            }
+            /* Need to write some extra out to force eviction to run. */
+            if (extras) {
+                for (int j = 0; j < 500; j++) {
+                    foundData.setString(Integer.toString(j));
+                    OperationStatus status =
+                        cursor.put(foundKey, foundData);
+                    if (status != OperationStatus.SUCCESS) {
+                        throw new DatabaseException("non-0 return " + status);
+                    }
+                }
+            }
+        }
+
+        OperationStatus status =
+            cursor.getFirst(foundKey, foundData, LockMode.DEFAULT);
+
+        while (status == OperationStatus.SUCCESS) {
+            cursor.delete();
+            status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT);
+        }
+    }
+
+    protected void doSimpleVerification(Cursor cursor)
+        throws DatabaseException {
+
+        StringDbt foundKey = new StringDbt();
+        StringDbt foundData = new StringDbt();
+
+        int count = 0;
+        OperationStatus status = cursor.getFirst(foundKey, foundData,
+                                                 LockMode.DEFAULT);
+
+        while (status == OperationStatus.SUCCESS) {
+            count++;
+            status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT);
+        }
+        assertEquals(simpleKeyStrings.length, count);
+    }
+}
diff --git a/test/com/sleepycat/je/GetSearchBothRangeTest.java b/test/com/sleepycat/je/GetSearchBothRangeTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..03f4bbd4948e8d04fb6dcb0bd200e0838f47b95e
--- /dev/null
+++ b/test/com/sleepycat/je/GetSearchBothRangeTest.java
@@ -0,0 +1,395 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: GetSearchBothRangeTest.java,v 1.14.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.io.File;
+
+import junit.framework.TestCase;
+import java.util.Comparator;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Tests getSearchBothRange when searching for a key that doesn't exist.
+ * [#11119]
+ */
+public class GetSearchBothRangeTest extends TestCase {
+
+    private File envHome;
+    private Environment env;
+    private Database db;
+    private boolean dups;
+
+    public GetSearchBothRangeTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+	throws Exception {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+	throws Exception {
+
+        if (env != null) {
+            try {
+                env.close();
+            } catch (Exception e) {
+                System.out.println("Ignored during close: " + e);
+            }
+        }
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+        envHome = null;
+        env = null;
+        db = null;
+    }
+
+    /**
+     * Open environment and database.
+     */
+    private void openEnv()
+        throws DatabaseException {
+
+	openEnvWithComparator(null);
+    }
+
+    private void openEnvWithComparator(Class comparatorClass)
+	throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        //*
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false");
+        //*/
+        env = new Environment(envHome, envConfig);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setSortedDuplicates(dups);
+        dbConfig.setAllowCreate(true);
+
+        dbConfig.setBtreeComparator(comparatorClass);
+
+        db = env.openDatabase(null, "GetSearchBothRangeTest", dbConfig);
+    }
+
+    /**
+     * Close environment and database.
+     */
+    private void closeEnv()
+        throws DatabaseException {
+
+        db.close();
+        db = null;
+        env.close();
+        env = null;
+    }
+
+    public void testSearchKeyRangeWithDupTree()
+        throws Exception {
+
+        dups = true;
+        openEnv();
+
+        insert(1, 1);
+        insert(1, 2);
+        insert(3, 1);
+
+        DatabaseEntry key = entry(2);
+        DatabaseEntry data = new DatabaseEntry();
+
+        Cursor cursor = db.openCursor(null, null);
+        OperationStatus status = cursor.getSearchKeyRange(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(3, val(key));
+        assertEquals(1, val(data));
+        cursor.close();
+
+        closeEnv();
+    }
+
+    public void testSearchBothWithNoDupTree()
+        throws Exception {
+
+        dups = true;
+        openEnv();
+
+        insert(1, 1);
+
+        DatabaseEntry key = entry(1);
+        DatabaseEntry data = entry(2);
+
+        Cursor cursor = db.openCursor(null, null);
+        OperationStatus status = cursor.getSearchBoth(key, data, null);
+        assertEquals(OperationStatus.NOTFOUND, status);
+        cursor.close();
+
+        key = entry(1);
+        data = entry(1);
+
+        cursor = db.openCursor(null, null);
+        status = cursor.getSearchBoth(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, val(key));
+        assertEquals(1, val(data));
+        cursor.close();
+
+        key = entry(1);
+        data = entry(0);
+
+        cursor = db.openCursor(null, null);
+        status = cursor.getSearchBothRange(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, val(key));
+        assertEquals(1, val(data));
+        cursor.close();
+
+        closeEnv();
+    }
+
+    public void testSuccess()
+        throws DatabaseException {
+
+        openEnv();
+        insert(1, 1);
+        insert(3, 1);
+        if (dups) {
+            insert(1, 2);
+            insert(3, 2);
+        }
+
+        DatabaseEntry key = entry(3);
+        DatabaseEntry data = entry(0);
+
+        Cursor cursor = db.openCursor(null, null);
+        OperationStatus status = cursor.getSearchBothRange(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(3, val(key));
+        assertEquals(1, val(data));
+        cursor.close();
+
+        closeEnv();
+    }
+
+    public void testSuccessDup()
+        throws DatabaseException {
+
+        dups = true;
+        testSuccess();
+    }
+
+    public void testNotFound()
+        throws DatabaseException {
+
+        openEnv();
+        insert(1, 0);
+        if (dups) {
+            insert(1, 1);
+        }
+
+        DatabaseEntry key = entry(2);
+        DatabaseEntry data = entry(0);
+
+        Cursor cursor = db.openCursor(null, null);
+        OperationStatus status = cursor.getSearchBothRange(key, data, null);
+        assertEquals(OperationStatus.NOTFOUND, status);
+        cursor.close();
+
+        closeEnv();
+    }
+
+    public void testNotFoundDup()
+        throws DatabaseException {
+
+        dups = true;
+        testNotFound();
+    }
+
+    public void testSearchBefore()
+        throws DatabaseException {
+
+        dups = true;
+        openEnv();
+        insert(1, 0);
+
+        DatabaseEntry key = entry(1);
+        DatabaseEntry data = entry(2);
+
+        Cursor cursor = db.openCursor(null, null);
+        OperationStatus status = cursor.getSearchBothRange(key, data, null);
+        assertEquals(OperationStatus.NOTFOUND, status);
+        cursor.close();
+
+        closeEnv();
+    }
+
+    public void testSearchBeforeDups()
+        throws DatabaseException {
+
+        dups = true;
+        openEnv();
+        insert(1, 1);
+        insert(1, 2);
+
+        DatabaseEntry key = entry(1);
+        DatabaseEntry data = entry(0);
+
+        Cursor cursor = db.openCursor(null, null);
+        OperationStatus status = cursor.getSearchBothRange(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, val(key));
+        assertEquals(1, val(data));
+        cursor.close();
+
+        closeEnv();
+    }
+
+    public static class NormalComparator implements Comparator {
+
+	public NormalComparator() {
+	}
+
+	public int compare(Object o1, Object o2) {
+
+            DatabaseEntry arg1 = new DatabaseEntry((byte[]) o1);
+            DatabaseEntry arg2 = new DatabaseEntry((byte[]) o2);
+            int val1 = IntegerBinding.entryToInt(arg1);
+            int val2 = IntegerBinding.entryToInt(arg2);
+
+            if (val1 < val2) {
+                return -1;
+            } else if (val1 > val2) {
+                return 1;
+            } else {
+                return 0;
+            }
+	}
+    }
+
+    public void testSearchAfterDups()
+        throws DatabaseException {
+
+        dups = true;
+        openEnv();
+        insert(1, 0);
+        insert(1, 1);
+        insert(2, 0);
+        insert(2, 1);
+
+        DatabaseEntry key = entry(1);
+        DatabaseEntry data = entry(2);
+
+        Cursor cursor = db.openCursor(null, null);
+        OperationStatus status = cursor.getSearchBothRange(key, data, null);
+        assertEquals(OperationStatus.NOTFOUND, status);
+        cursor.close();
+
+        closeEnv();
+    }
+
+    public void testSearchAfterDupsWithComparator()
+        throws DatabaseException {
+
+        dups = true;
+        openEnvWithComparator(NormalComparator.class);
+        insert(1, 0);
+        insert(1, 1);
+        insert(2, 0);
+        insert(2, 1);
+
+        DatabaseEntry key = entry(1);
+        DatabaseEntry data = entry(2);
+
+        Cursor cursor = db.openCursor(null, null);
+        OperationStatus status = cursor.getSearchBothRange(key, data, null);
+        assertEquals(OperationStatus.NOTFOUND, status);
+        cursor.close();
+
+        closeEnv();
+    }
+
+    public void testSearchAfterDeletedDup()
+        throws DatabaseException {
+
+        dups = true;
+        openEnv();
+        insert(1, 1);
+        insert(1, 2);
+        insert(1, 3);
+
+        /* Delete {1,3} leaving {1,1} in dup tree. */
+        Cursor cursor = db.openCursor(null, null);
+        DatabaseEntry key = entry(1);
+        DatabaseEntry data = entry(3);
+        OperationStatus status = cursor.getSearchBothRange(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        cursor.delete();
+        cursor.close();
+        env.compress();
+
+        /* Search for {1,3} and expect NOTFOUND. */
+        cursor = db.openCursor(null, null);
+        key = entry(1);
+        data = entry(3);
+        status = cursor.getSearchBothRange(key, data, null);
+        assertEquals(OperationStatus.NOTFOUND, status);
+        cursor.close();
+
+        closeEnv();
+    }
+
+    public void testSingleDatumBug()
+        throws DatabaseException {
+
+        dups = true;
+        openEnv();
+        insert(1, 1);
+        insert(2, 2);
+
+        /* Search for {1,2} and expect NOTFOUND. */
+        Cursor cursor = db.openCursor(null, null);
+        DatabaseEntry key = entry(1);
+        DatabaseEntry data = entry(2);
+        OperationStatus status = cursor.getSearchBothRange(key, data, null);
+        assertEquals(OperationStatus.NOTFOUND, status);
+        cursor.close();
+
+        closeEnv();
+    }
+
+    private int val(DatabaseEntry entry) {
+        return IntegerBinding.entryToInt(entry);
+    }
+
+    private DatabaseEntry entry(int val) {
+        DatabaseEntry entry = new DatabaseEntry();
+        IntegerBinding.intToEntry(val, entry);
+        return entry;
+    }
+
+    private void insert(int keyVal, int dataVal)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        IntegerBinding.intToEntry(keyVal, key);
+        IntegerBinding.intToEntry(dataVal, data);
+        OperationStatus status;
+        if (dups) {
+            status = db.putNoDupData(null, key, data);
+        } else {
+            status= db.putNoOverwrite(null, key, data);
+        }
+        assertEquals(OperationStatus.SUCCESS, status);
+    }
+}
diff --git a/test/com/sleepycat/je/InterruptTest.java b/test/com/sleepycat/je/InterruptTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..86348c36c2adddf953d3f646e77e89137779bc69
--- /dev/null
+++ b/test/com/sleepycat/je/InterruptTest.java
@@ -0,0 +1,190 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: InterruptTest.java,v 1.14.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * @author Paul.Kendall@orionhealth.com
+ *
+ * This test throws thread interrupts while JE is doing I/O intensive
+ * work. When an interrupt is received during various NIO activities, NIO
+ * closes the underlying file descriptor. In this multi-threaded test, abruptly
+ * closing the file descriptor causes exceptions such as
+ * java.nio.ChannelClosedException, because the uninterrupted thread may be in
+ * the middle of using that file.
+ *
+ * JE must convert all such exceptions to
+ * com.sleepycat.je.RunRecoveryException.
+ */
+public class InterruptTest extends TestCase {
+
+    private File envHome;
+    private int NUM_OPS = 1000;
+    private int NUM_ITERATIONS = 1;
+
+    public InterruptTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void testInterruptHandling()
+    	throws Exception {
+
+        for (int i = 0; i < NUM_ITERATIONS; i++) {
+            interruptThreads(i);
+        }
+    }
+
+    public void interruptThreads(int i)
+    	throws Exception {
+
+        // TestUtils.removeLogFiles("Loop", envHome, false);
+        Environment env = null;
+        Database db = null;
+
+        try {
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setTransactional(true);
+            envConfig.setAllowCreate(true);
+	    envConfig.setConfigParam
+		(EnvironmentParams.ENV_CHECK_LEAKS.getName(), "false");
+            env = new Environment(envHome, envConfig);
+
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setAllowCreate(true);
+            dbConfig.setTransactional(true);
+            db = env.openDatabase(null, "testDB" + i, dbConfig);
+
+            ActionThread putter = new ActionThread(env, db, 1) {
+                    protected void doStuff(Database db,
+                                           Transaction txn,
+                                           DatabaseEntry key,
+                                           DatabaseEntry value)
+                        throws DatabaseException {
+                        db.put(txn, key, value);
+                    }
+                };
+
+            ActionThread deleter = new ActionThread(env, db, 1) {
+                    protected void doStuff(Database db,
+                                           Transaction txn,
+                                           DatabaseEntry key,
+                                           DatabaseEntry value)
+                        throws DatabaseException {
+                        db.delete(txn, key);
+                    }
+                };
+
+            putter.start();
+            Thread.sleep(1000);
+
+            deleter.start();
+            Thread.sleep(2000);
+
+            /*
+             * Interrupting these threads will catch them in the middle of an
+             * NIO operation, expect a RunRecovery exception.
+             */
+            putter.interrupt();
+            deleter.interrupt();
+
+            putter.join();
+            deleter.join();
+        } finally {
+            try {
+                if (db != null) {
+                    db.close();
+                }
+            } catch (RunRecoveryException ok) {
+
+                /*
+		 * Expect a run recovery exception. Since it will be detected
+                 * when we try to close the database, close the environment
+                 * now so we can re-start in the same JVM.
+                 */
+            } catch (Throwable t) {
+                t.printStackTrace();
+                fail("Should not see any other kind of exception. Iteration=" +
+		     i);
+            } finally {
+                if (env != null) {
+                    try {
+                        env.close();
+                        env = null;
+                    } catch (RunRecoveryException ignore) {
+                        /* Sometimes the checkpointer can't close down. */
+                    }
+                }
+            }
+        }
+    }
+
+    abstract class ActionThread extends Thread {
+    	private Environment env;
+    	private Database db;
+        private int threadNumber;
+    	
+    	public ActionThread(Environment env, Database db, int threadNumber) {
+            this.env = env;
+            this.db = db;
+            this.threadNumber = threadNumber;
+    	}
+    	
+    	public void run() {
+            int i=0;
+            Transaction txn = null;
+            try {
+                for ( ; i < NUM_OPS ; i++) {
+                    txn = env.beginTransaction(null, null);
+                    DatabaseEntry key = new DatabaseEntry();
+                    key.setData(("" + threadNumber * 10000 + i).getBytes());
+                    DatabaseEntry value = new DatabaseEntry();
+                    value.setData(new byte[8192]);
+                    doStuff(db, txn, key, value);
+                    Thread.sleep(10);
+                    txn.commit();
+                    txn = null;
+                }
+            } catch (InterruptedException e) {
+                /* possible outcome. */
+            } catch (RunRecoveryException e) {
+                /* possible outcome. */
+            } catch (DatabaseException e) {
+                /* possible outcome. */
+                //System.out.println("Put to " + i);
+                //e.printStackTrace();
+            } finally {
+                try {
+                    if (txn != null) {
+                        txn.abort();
+                    }
+                } catch (DatabaseException ignored) {
+                }
+            }
+    	}
+
+        abstract protected void doStuff(Database db,
+                                        Transaction txn,
+                                        DatabaseEntry key,
+                                        DatabaseEntry value)
+            throws DatabaseException;
+    }
+}
diff --git a/test/com/sleepycat/je/ReadCommittedTest.java b/test/com/sleepycat/je/ReadCommittedTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..d6ad4235d09953835aa35d32f37b0798a0ca7bc1
--- /dev/null
+++ b/test/com/sleepycat/je/ReadCommittedTest.java
@@ -0,0 +1,288 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ReadCommittedTest.java,v 1.8.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Tests the read-committed (degree 2) isolation level.
+ */
+public class ReadCommittedTest extends TestCase {
+
+    private File envHome;
+    private Environment env;
+    private Database db;
+
+    public ReadCommittedTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        if (env != null) {
+            try {
+                env.close();
+            } catch (Exception e) {
+                System.out.println("tearDown: " + e);
+            }
+        }
+
+        env = null;
+        db = null;
+
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+    }
+
+    private void open()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        /* Control over isolation level is required by this test. */
+        TestUtils.clearIsolationLevel(envConfig);
+
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+        env = new Environment(envHome, envConfig);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        dbConfig.setExclusiveCreate(true);
+        db = env.openDatabase(null, "foo", dbConfig);
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        for (int i = 100; i <= 200; i += 100) {
+            for (int j = 1; j <= 5; j += 1) {
+                IntegerBinding.intToEntry(i + j, key);
+                IntegerBinding.intToEntry(0, data);
+                db.put(null, key, data);
+            }
+        }
+    }
+
+    private void close()
+        throws DatabaseException {
+
+        db.close();
+        db = null;
+        env.close();
+        env = null;
+    }
+
+    public void testIllegalConfig()
+        throws DatabaseException {
+
+        open();
+
+        CursorConfig cursConfig;
+        TransactionConfig txnConfig;
+
+        /* Disallow transaction ReadCommitted and Serializable. */
+        txnConfig = new TransactionConfig();
+        txnConfig.setReadCommitted(true);
+        txnConfig.setSerializableIsolation(true);
+        try {
+            env.beginTransaction(null, txnConfig);
+            fail();
+        } catch (IllegalArgumentException expected) {}
+
+        /* Disallow transaction ReadCommitted and ReadUncommitted. */
+        txnConfig = new TransactionConfig();
+        txnConfig.setReadCommitted(true);
+        txnConfig.setReadUncommitted(true);
+        try {
+            env.beginTransaction(null, txnConfig);
+            fail();
+        } catch (IllegalArgumentException expected) {}
+
+        /* Disallow cursor ReadCommitted and ReadUncommitted. */
+        cursConfig = new CursorConfig();
+        cursConfig.setReadCommitted(true);
+        cursConfig.setReadUncommitted(true);
+        Transaction txn = env.beginTransaction(null, null);
+        try {
+            db.openCursor(txn, cursConfig);
+            fail();
+        } catch (IllegalArgumentException expected) {}
+        txn.abort();
+
+        close();
+    }
+
+    public void testWithTransactionConfig()
+        throws DatabaseException {
+
+        open();
+
+        TransactionConfig config = new TransactionConfig();
+        config.setReadCommitted(true);
+        Transaction txn = env.beginTransaction(null, config);
+        Cursor cursor = db.openCursor(txn, null);
+
+        checkReadCommitted(cursor, 100, true);
+
+        cursor.close();
+        txn.commit();
+        close();
+    }
+
+    public void testWithCursorConfig()
+        throws DatabaseException {
+
+        open();
+
+        Transaction txn = env.beginTransaction(null, null);
+        CursorConfig config = new CursorConfig();
+        config.setReadCommitted(true);
+        Cursor cursor = db.openCursor(txn, config);
+        Cursor degree3Cursor = db.openCursor(txn, null);
+
+        checkReadCommitted(cursor, 100, true);
+        checkReadCommitted(degree3Cursor, 200, false);
+
+        degree3Cursor.close();
+        cursor.close();
+        txn.commit();
+        close();
+    }
+
+    public void testWithLockMode()
+        throws DatabaseException {
+
+        open();
+
+        Transaction txn = env.beginTransaction(null, null);
+
+        checkReadCommitted(txn, LockMode.READ_COMMITTED, 100, true);
+        checkReadCommitted(txn, null, 200, false);
+
+        txn.commit();
+        close();
+    }
+
+    /**
+     * Checks that the given cursor provides the given
+     * expectReadLocksAreReleased behavior.
+     */
+    private void checkReadCommitted(Cursor cursor,
+                                    int startKey,
+                                    boolean expectReadLocksAreReleased)
+        throws DatabaseException {
+
+        LockStats baseStats = env.getLockStats(null);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        checkNReadLocks(baseStats, 0);
+        for (int i = 1; i <= 5; i += 1) {
+            IntegerBinding.intToEntry(startKey + i, key);
+            OperationStatus status = cursor.getSearchKey(key, data, null);
+            assertEquals(OperationStatus.SUCCESS, status);
+            if (expectReadLocksAreReleased) {
+                /* Read locks are released as the cursor moves. */
+                checkNReadLocks(baseStats, 1);
+            } else {
+                /* Read locks are not released. */
+                checkNReadLocks(baseStats, i);
+            }
+        }
+
+        checkNWriteLocks(baseStats, 0);
+        for (int i = 1; i <= 5; i += 1) {
+            IntegerBinding.intToEntry(startKey + i, key);
+            IntegerBinding.intToEntry(0, data);
+            cursor.put(key, data);
+            /* Write locks are not released. */
+            checkNWriteLocks(baseStats, i);
+        }
+
+        if (expectReadLocksAreReleased) {
+            /* The last read lock was released by the put() call above. */
+            checkNReadLocks(baseStats, 0);
+        }
+    }
+
+    /**
+     * Checks that the given lock mode provides the given
+     * expectReadLocksAreReleased behavior.
+     */
+    private void checkReadCommitted(Transaction txn,
+                                    LockMode lockMode,
+                                    int startKey,
+                                    boolean expectReadLocksAreReleased)
+        throws DatabaseException {
+
+        LockStats baseStats = env.getLockStats(null);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        checkNReadLocks(baseStats, 0);
+        for (int i = 1; i <= 5; i += 1) {
+            IntegerBinding.intToEntry(startKey + i, key);
+            OperationStatus status = db.get(txn, key, data, lockMode);
+            assertEquals(OperationStatus.SUCCESS, status);
+            if (expectReadLocksAreReleased) {
+                /* Read locks are released when the cursor is closed. */
+                checkNReadLocks(baseStats, 0);
+            } else {
+                /* Read locks are not released. */
+                checkNReadLocks(baseStats, i);
+            }
+        }
+
+        checkNWriteLocks(baseStats, 0);
+        for (int i = 1; i <= 5; i += 1) {
+            IntegerBinding.intToEntry(startKey + i, key);
+            IntegerBinding.intToEntry(0, data);
+            db.put(txn, key, data);
+            /* Write locks are not released. */
+            checkNWriteLocks(baseStats, i);
+        }
+
+        if (expectReadLocksAreReleased) {
+            /* The last read lock was released by the put() call above. */
+            checkNReadLocks(baseStats, 0);
+        }
+    }
+
+    private void checkNReadLocks(LockStats baseStats, int nReadLocksExpected)
+        throws DatabaseException {
+
+        LockStats stats = env.getLockStats(null);
+        assertEquals
+            ("Read locks -- ",
+             nReadLocksExpected,
+             stats.getNReadLocks() - baseStats.getNReadLocks());
+    }
+
+    private void checkNWriteLocks(LockStats baseStats, int nWriteLocksExpected)
+        throws DatabaseException {
+
+        LockStats stats = env.getLockStats(null);
+        assertEquals
+            ("Write locks -- ",
+             nWriteLocksExpected,
+             stats.getNWriteLocks() - baseStats.getNWriteLocks());
+    }
+}
diff --git a/test/com/sleepycat/je/RunRecoveryFailureTest.java b/test/com/sleepycat/je/RunRecoveryFailureTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..13a510b56cb55e1d2e7166fc31861adb0299604e
--- /dev/null
+++ b/test/com/sleepycat/je/RunRecoveryFailureTest.java
@@ -0,0 +1,152 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RunRecoveryFailureTest.java,v 1.38.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.io.File;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.util.TestUtils;
+
+public class RunRecoveryFailureTest extends TestCase {
+
+    private Environment env;
+    private File envHome;
+
+    public RunRecoveryFailureTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws Exception {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+        openEnv();
+
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        /*
+	 * Close down environments in case the unit test failed so that the log
+	 * files can be removed.
+         */
+        try {
+            if (env != null) {
+                env.close();
+                env = null;
+            }
+        } catch (RunRecoveryException e) {
+            /* ok, the test hosed it. */
+            return;
+        } catch (DatabaseException e) {
+            /* ok, the test closed it */
+	}
+
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+    }
+
+    private void openEnv()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setTransactional(true);
+
+        /*
+         * Run with tiny log buffers, so we can go to disk more (and see the
+         * checksum errors)
+         */
+	DbInternal.disableParameterValidation(envConfig);
+        envConfig.setConfigParam
+	    (EnvironmentParams.NUM_LOG_BUFFERS.getName(), "2");
+        envConfig.setConfigParam
+	    (EnvironmentParams.LOG_MEM_SIZE.getName(),
+	     EnvironmentParams.LOG_MEM_SIZE_MIN_STRING);
+        envConfig.setConfigParam
+	    (EnvironmentParams.LOG_FILE_MAX.getName(), "1024");
+	envConfig.setConfigParam
+	    (EnvironmentParams.JE_LOGGING_LEVEL.getName(), "CONFIG");
+        envConfig.setAllowCreate(true);
+        env = new Environment(envHome, envConfig);
+    }
+
+    /*
+     * Corrupt an environment while open, make sure we get a
+     * RunRecoveryException.
+     */
+    public void testInvalidateEnvMidStream()
+        throws Throwable {
+
+        try {
+
+            /* Make a new db in this env and flush the file. */
+            Transaction txn =
+		env.beginTransaction(null, TransactionConfig.DEFAULT);
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setTransactional(true);
+            dbConfig.setAllowCreate(true);
+            Database db = env.openDatabase(txn, "foo", dbConfig);
+            DatabaseEntry key = new DatabaseEntry(new byte[1000]);
+            DatabaseEntry data = new DatabaseEntry(new byte[1000]);
+            for (int i = 0; i < 100; i += 1) {
+                db.put(txn, key, data);
+            }
+
+            env.getEnvironmentImpl().getLogManager().flush();
+            env.getEnvironmentImpl().getFileManager().clear();
+
+            /*
+	     * Corrupt the file, then abort the txn in order to force it to
+             * re-read. Should get a checksum error, which should invalidate
+             * the environment.
+             */
+            long currentFile = DbInternal.envGetEnvironmentImpl(env)
+                                         .getFileManager()
+                                         .getCurrentFileNum();
+            for (int fileNum = 0; fileNum <= currentFile; fileNum += 1) {
+                File file = new File
+                    (envHome, "0000000" + fileNum + FileManager.JE_SUFFIX);
+                RandomAccessFile starterFile =
+                    new RandomAccessFile(file, "rw");
+                FileChannel channel = starterFile.getChannel();
+                long fileSize = channel.size();
+                if (fileSize > FileManager.firstLogEntryOffset()) {
+                    ByteBuffer junkBuffer = ByteBuffer.allocate
+                        ((int) fileSize - FileManager.firstLogEntryOffset());
+                    int written = channel.write
+                        (junkBuffer, FileManager.firstLogEntryOffset());
+                    assertTrue(written > 0);
+                    starterFile.close();
+                }
+            }
+
+            try {
+                txn.abort();
+                fail("Should see a run recovery exception");
+            } catch (RunRecoveryException e) {
+            }
+
+            try {
+                env.getDatabaseNames();
+                fail("Should see a run recovery exception again");
+            } catch (RunRecoveryException e) {
+            }
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/ScanLogTest.java b/test/com/sleepycat/je/ScanLogTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..d4aab844dddc33a61d8942edb530f9f9f66ef1e2
--- /dev/null
+++ b/test/com/sleepycat/je/ScanLogTest.java
@@ -0,0 +1,402 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ScanLogTest.java,v 1.6.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.je.dbi.DbTree;
+import com.sleepycat.je.tree.Key;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Basic database operations, excluding configuration testing.
+ */
+public class ScanLogTest extends TestCase {
+    private static final int NUM_RECS = 3;
+
+    private File envHome;
+    private Environment env;
+    private String testName;
+    private boolean forwards;
+    private boolean duplicates;
+    private boolean deleteRecs;
+    private boolean useZeroLsn;
+    private boolean doOtherTests;
+
+    public static Test suite()
+        throws Exception {
+
+        TestSuite suite = new TestSuite();
+        for (int i = 0; i < 2; i++) {               // forward
+            for (int j = 0; j < 2; j++) {           // duplicates
+                for (int k = 0; k < 2; k++) {       // deleteRecs
+		    for (int l = 0; l < 2; l++) {   // useZeroLsn
+			suite.addTest(new ScanLogTest
+				      (i == 0, j == 0, k == 0, l == 0, false));
+		    }
+                }
+            }
+        }
+
+	suite.addTest(new ScanLogTest(true, false, false, false, true));
+        return suite;
+    }
+
+    public ScanLogTest(final boolean forwards,
+		       final boolean duplicates,
+		       final boolean deleteRecs,
+		       final boolean useZeroLsn,
+		       final boolean doOtherTests) {
+
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+	this.forwards = forwards;
+	this.duplicates = duplicates;
+	this.deleteRecs = deleteRecs;
+	this.doOtherTests = doOtherTests;
+	this.useZeroLsn = useZeroLsn;
+
+	if (doOtherTests) {
+	    testName = "ScanLogTest-other";
+	} else {
+	    testName = "ScanLogTest-" +
+		(forwards ? "fwd" : "bwd") + "-" +
+		(duplicates ? "dups" : "noDups") + "-" +
+		(deleteRecs ? "del" : "nodel") + "-" +
+		(useZeroLsn ? "LSN0" : "noLSN0");
+	}
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        try {
+            /* Close in case we hit an exception and didn't close */
+            if (env != null) {
+		env.close();
+            }
+        } catch (DatabaseException e) {
+            /* Ok if already closed */
+        }
+        env = null; // for JUNIT, to reduce memory usage when run in a suite.
+	setName(testName);
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+    }
+
+    public void runTest()
+        throws Throwable {
+
+	if (doOtherTests) {
+	    doTest(forwards, duplicates, deleteRecs, useZeroLsn,
+		   true  /* abortScan */);
+	    checkIllegalArgs();
+	} else {
+	    doTest(forwards, duplicates, deleteRecs, useZeroLsn,
+		   false /* abortScan */);
+	}
+    }
+
+    private static class ScanLogTestScanner implements LogScanner {
+
+	private int nDeletedRecsSeen = 0;
+	private int nNonDeletedRecsSeen = 0;
+	private boolean forwards;
+	private boolean duplicates;
+	private boolean deleteRecs;
+	private byte prevKey;
+	private boolean abortScan;
+
+	private ScanLogTestScanner(final boolean forwards,
+				   final boolean duplicates,
+				   final boolean deleteRecs,
+				   final boolean abortScan) {
+
+	    this.forwards = forwards;
+	    this.duplicates = duplicates;
+	    this.deleteRecs = deleteRecs;
+	    this.abortScan = abortScan;
+
+	    if (forwards) {
+		prevKey = (byte) 0;
+	    } else {
+		prevKey = (byte) NUM_RECS;
+	    }
+	}
+
+	public boolean scanRecord(final DatabaseEntry key,
+				  final DatabaseEntry data,
+				  final boolean deleted,
+				  final String databaseName) {
+
+	    byte keyVal = key.getData()[3];
+
+	    assertFalse(DbTree.isReservedDbName(databaseName));
+	    assertTrue(databaseName.equals("testDB"));
+	    assertFalse(deleted && !deleteRecs);
+	    if (deleted) {
+		assertTrue(keyVal == (NUM_RECS - 1));
+		nDeletedRecsSeen++;
+	    } else {
+		if (duplicates) {
+		    /* For duplicates, data[2] will be set (so ignore it). */
+		    assertTrue(key.getData()[3] == data.getData()[3]);
+		} else {
+		    /* If !duplicates compare all of key with all of data. */
+		    assertTrue(Key.compareKeys(key.getData(),
+					       data.getData(),
+					       null) == 0);
+		}
+		nNonDeletedRecsSeen++;
+	    }
+
+	    if (forwards) {
+		assertTrue(prevKey <= keyVal);
+	    } else {
+		assertTrue(prevKey >= keyVal);
+	    }
+	    prevKey = keyVal;
+
+	    if (abortScan) {
+		return false;
+	    } else {
+		return true;
+	    }
+	}
+
+	private int getNDeletedRecsSeen() {
+	    return nDeletedRecsSeen;
+	}
+
+	private int getNonDeletedRecsSeen() {
+	    return nNonDeletedRecsSeen;
+	}
+    }
+
+    private void doTest(final boolean forwards,
+			final boolean duplicates,
+			final boolean deleteRecs,
+			final boolean useZeroLsn,
+			final boolean abortScan)
+	throws Throwable {
+
+        try {
+            Database myDb = initEnvAndDb(true, true);
+	    long startLSN = (useZeroLsn ? 0 : getCurrentLSN());
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+	    Transaction txn = env.beginTransaction(null, null);
+
+            /* Create some data. */
+	    for (int i = 0; i < NUM_RECS; i++) {
+                key.setData(TestUtils.getTestArray(i));
+                data.setData(TestUtils.getTestArray(i));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.put(txn, key, data));
+
+		if (duplicates) {
+		    for (int j = 1; j < NUM_RECS; j++) {
+			data.setData(TestUtils.getTestArray(i + (j << 8)));
+			assertEquals(OperationStatus.SUCCESS,
+				     myDb.put(txn, key, data));
+		    }
+		}
+
+		if (deleteRecs &&
+		    i == NUM_RECS - 1) {
+		    assertEquals(OperationStatus.SUCCESS,
+				 myDb.delete(txn, key));
+		}
+            }
+
+	    txn.commit();
+	    long endLSN = getCurrentLSN();
+
+	    LogScanConfig lsConf = new LogScanConfig();
+	    lsConf.setForwards(forwards);
+
+	    ScanLogTestScanner scanner =
+		new ScanLogTestScanner(forwards, duplicates,
+				       deleteRecs, abortScan);
+
+	    if (lsConf.getForwards()) {
+		env.scanLog(startLSN, endLSN, lsConf, scanner);
+	    } else {
+		env.scanLog(endLSN, startLSN, lsConf, scanner);
+	    }
+
+	    if (duplicates) {
+		if (deleteRecs) {
+		    int expectedNDeletedRecs = NUM_RECS;
+
+		    /*
+		     * Don't subtract off deleted recs because all recs show up
+		     * regardless of whether they're deleted or not.
+		     */
+		    int expectedNRecs = (NUM_RECS * NUM_RECS);
+		    assertTrue(expectedNDeletedRecs ==
+			       scanner.getNDeletedRecsSeen());
+		    assertTrue(expectedNRecs ==
+			       scanner.getNonDeletedRecsSeen());
+		}
+	    } else {
+		assertTrue(scanner.getNDeletedRecsSeen() ==
+			   (deleteRecs ? 1 : 0));
+
+		if (abortScan) {
+		    assertTrue(scanner.getNonDeletedRecsSeen() == 1);
+		} else {
+		    assertTrue(scanner.getNonDeletedRecsSeen() == NUM_RECS);
+		}
+	    }
+
+            myDb.close();
+            env.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    private void checkIllegalArgs()
+	throws Throwable {
+
+        try {
+            Database myDb = initEnvAndDb(true, true);
+	    long startLSN = getCurrentLSN();
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+	    Transaction txn = env.beginTransaction(null, null);
+
+            /* Create some data. */
+	    for (int i = 0; i < NUM_RECS; i++) {
+                key.setData(TestUtils.getTestArray(i));
+                data.setData(TestUtils.getTestArray(i));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.put(txn, key, data));
+
+		if (duplicates) {
+		    for (int j = 1; j < NUM_RECS; j++) {
+			data.setData(TestUtils.getTestArray(i + (j << 8)));
+			assertEquals(OperationStatus.SUCCESS,
+				     myDb.put(txn, key, data));
+		    }
+		}
+
+		if (deleteRecs &&
+		    i == NUM_RECS - 1) {
+		    assertEquals(OperationStatus.SUCCESS,
+				 myDb.delete(txn, key));
+		}
+            }
+
+	    txn.commit();
+	    long endLSN = getCurrentLSN();
+
+	    ScanLogTestScanner scanner =
+		new ScanLogTestScanner(forwards, duplicates,
+				       deleteRecs, false);
+
+	    LogScanConfig lsConf = new LogScanConfig();
+	    lsConf.setForwards(true);
+	    /* Reverse start and end LSNs. */
+	    try {
+		env.scanLog(endLSN, startLSN, lsConf, scanner);
+		fail("expected failure");
+	    } catch (IllegalArgumentException IAE) {
+		/* ignore */
+	    }
+
+	    lsConf.setForwards(false);
+	    /* Reverse start and end LSNs. */
+	    try {
+		env.scanLog(startLSN, endLSN, lsConf, scanner);
+		fail("expected failure");
+	    } catch (IllegalArgumentException IAE) {
+		/* ignore */
+	    }
+
+	    /* Use negative startLSN. */
+	    try {
+		env.scanLog(-1, endLSN, lsConf, scanner);
+		fail("expected failure");
+	    } catch (IllegalArgumentException IAE) {
+		/* ignore */
+	    }
+
+	    lsConf.setForwards(true);
+	    /* Use negative startLSN. */
+	    try {
+		env.scanLog(startLSN, -1, lsConf, scanner);
+		fail("expected failure");
+	    } catch (IllegalArgumentException IAE) {
+		/* ignore */
+	    }
+
+	    lsConf.setForwards(true);
+	    try {
+		env.scanLog(100000, 1000000, lsConf, scanner);
+		fail("expected failure");
+	    } catch (IllegalArgumentException IAE) {
+		/* ignore */
+	    }
+
+	    lsConf.setForwards(false);
+	    try {
+		env.scanLog(1000000, 100000, lsConf, scanner);
+		fail("expected failure");
+	    } catch (IllegalArgumentException IAE) {
+		/* ignore */
+	    }
+
+            myDb.close();
+            env.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    private long getCurrentLSN()
+	throws DatabaseException {
+
+	return env.getStats(null).getEndOfLog();
+    }
+
+    /**
+     * Set up the environment and db.
+     */
+    private Database initEnvAndDb(boolean allowDuplicates,
+				  boolean transactional)
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setTransactional(transactional);
+
+        envConfig.setAllowCreate(true);
+        env = new Environment(envHome, envConfig);
+
+        /* Make a db and open it. */
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setSortedDuplicates(allowDuplicates);
+        dbConfig.setAllowCreate(true);
+	dbConfig.setTransactional(transactional);
+        Database myDb = env.openDatabase(null, "testDB", dbConfig);
+        return myDb;
+    }
+}
diff --git a/test/com/sleepycat/je/TruncateTest.java b/test/com/sleepycat/je/TruncateTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..8b3550c13c26431e92ca80a1482b67c3777a028b
--- /dev/null
+++ b/test/com/sleepycat/je/TruncateTest.java
@@ -0,0 +1,480 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TruncateTest.java,v 1.22.2.3 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.DbTree;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Basic database operations, excluding configuration testing.
+ */
+public class TruncateTest extends TestCase {
+    private static final int NUM_RECS = 257;
+    private static final String DB_NAME = "testDb";
+
+    private File envHome;
+    private Environment env;
+
+    public TruncateTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        if (env != null) {
+            try {
+                /* Close in case we hit an exception and didn't close. */
+                env.close();
+            } catch (DatabaseException e) {
+                /* Ok if already closed */
+            }
+        }
+        env = null; // for JUNIT, to reduce memory usage when run in a suite.
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+    }
+
+    public void testEnvTruncateAbort()
+        throws Throwable {
+
+        doTruncateAndAdd(true,    // transactional
+                         256,     // step1 num records
+                         false,   // step2 autocommit
+                         150,     // step3 num records
+                         true,    // step4 abort
+                         0);      // step5 num records
+    }
+
+    public void testEnvTruncateCommit()
+        throws Throwable {
+
+        doTruncateAndAdd(true,    // transactional
+                         256,     // step1 num records
+                         false,   // step2 autocommit
+                         150,     // step3 num records
+                         false,   // step4 abort
+                         150);    // step5 num records
+    }
+
+    public void testEnvTruncateAutocommit()
+        throws Throwable {
+
+        doTruncateAndAdd(true,    // transactional
+                         256,     // step1 num records
+                         true,    // step2 autocommit
+                         150,     // step3 num records
+                         false,   // step4 abort
+                         150);    // step5 num records
+    }
+
+    public void testEnvTruncateNoFirstInsert()
+        throws Throwable {
+
+        doTruncateAndAdd(true,    // transactional
+                         0,       // step1 num records
+                         false,   // step2 autocommit
+                         150,     // step3 num records
+                         false,   // step4 abort
+                         150);    // step5 num records
+    }
+
+    public void testNoTxnEnvTruncateCommit()
+        throws Throwable {
+
+        doTruncateAndAdd(false,    // transactional
+                         256,      // step1 num records
+                         false,    // step2 autocommit
+                         150,      // step3 num records
+                         false,    // step4 abort
+                         150);     // step5 num records
+    }
+
+    public void testTruncateCommit()
+        throws Throwable {
+
+        doTruncate(false, false);
+    }
+
+    public void testTruncateCommitAutoTxn()
+        throws Throwable {
+
+        doTruncate(false, true);
+    }
+
+    public void testTruncateAbort()
+        throws Throwable {
+
+        doTruncate(true, false);
+    }
+
+    /*
+     * SR 10386, 11252. This used to deadlock, because the truncate did not
+     * use an AutoTxn on the new mapLN, and the put operations conflicted with
+     * the held write lock.
+     */
+    public void testWriteAfterTruncate()
+        throws Throwable {
+
+        try {
+            Database myDb = initEnvAndDb(true);
+
+            myDb.close();
+            Transaction txn = env.beginTransaction(null, null);
+            long truncateCount = env.truncateDatabase(txn, DB_NAME, true);
+            assertEquals(0, truncateCount);
+            txn.commit();
+            env.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testTruncateEmptyDeferredWriteDatabase()
+        throws Throwable {
+
+        try {
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setTransactional(false);
+            envConfig.setConfigParam
+                (EnvironmentParams.ENV_CHECK_LEAKS.getName(), "false");
+            envConfig.setAllowCreate(true);
+            env = new Environment(envHome, envConfig);
+
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setTransactional(false);
+            dbConfig.setSortedDuplicates(true);
+            dbConfig.setAllowCreate(true);
+            dbConfig.setDeferredWrite(true);
+            Database myDb = env.openDatabase(null, DB_NAME, dbConfig);
+            myDb.close();
+            long truncateCount;
+            truncateCount = env.truncateDatabase(null, DB_NAME, true);
+            assertEquals(0, truncateCount);
+        } catch (Throwable T) {
+            T.printStackTrace();
+            throw T;
+        }
+    }
+
+    /**
+     * 1. Populate a database.
+     * 2. Truncate.
+     * 3. Commit or abort.
+     * 4. Check that database has the right amount of records.
+     */
+    private void doTruncate(boolean abort, boolean useAutoTxn)
+        throws Throwable {
+
+        try {
+            int numRecsAfterTruncate =
+                useAutoTxn ? 0 : ((abort) ? NUM_RECS : 0);
+            Database myDb = initEnvAndDb(true);
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+
+            /* Populate database. */
+            for (int i = NUM_RECS; i > 0; i--) {
+                key.setData(TestUtils.getTestArray(i));
+                data.setData(TestUtils.getTestArray(i));
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.put(null, key, data));
+            }
+
+            /* Truncate, check the count, commit. */
+            myDb.close();
+            long truncateCount = 0;
+            if (useAutoTxn) {
+                truncateCount = env.truncateDatabase(null, DB_NAME, true);
+            } else {
+                Transaction txn = env.beginTransaction(null, null);
+                truncateCount = env.truncateDatabase(txn, DB_NAME, true);
+
+                if (abort) {
+                    txn.abort();
+                } else {
+                    txn.commit();
+                }
+            }
+
+            assertEquals(NUM_RECS, truncateCount);
+
+            /* Do a cursor read, make sure there's the right amount of data. */
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setSortedDuplicates(true);
+            myDb = env.openDatabase(null, DB_NAME, dbConfig);
+            int count = 0;
+            Cursor cursor = myDb.openCursor(null, null);
+            while (cursor.getNext(key, data, LockMode.DEFAULT) ==
+                   OperationStatus.SUCCESS) {
+                count++;
+            }
+            assertEquals(numRecsAfterTruncate, count);
+	    cursor.close();
+
+            /* Recover the database. */
+            myDb.close();
+            env.close();
+            myDb = initEnvAndDb(true);
+
+            /* Check data after recovery. */
+            count = 0;
+            cursor = myDb.openCursor(null, null);
+            while (cursor.getNext(key, data, LockMode.DEFAULT) ==
+                   OperationStatus.SUCCESS) {
+                count++;
+            }
+            assertEquals(numRecsAfterTruncate, count);
+	    cursor.close();
+            myDb.close();
+            env.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * This method can be configured to execute a number of these steps:
+     * - Populate a database with 0 or N records
+
+     * 2. Truncate.
+     * 3. add more records
+     * 4. abort or commit
+     * 5. Check that database has the right amount of records.
+     */
+    private void doTruncateAndAdd(boolean transactional,
+                                  int step1NumRecs,
+                                  boolean step2AutoCommit,
+                                  int step3NumRecs,
+                                  boolean step4Abort,
+                                  int step5NumRecs)
+        throws Throwable {
+
+        String databaseName = "testdb";
+        try {
+            /* Use enough records to force a split. */
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setTransactional(transactional);
+            envConfig.setAllowCreate(true);
+            envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(),
+                                     "6");
+            env = new Environment(envHome, envConfig);
+
+            /* Make a db and open it. */
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setTransactional(transactional);
+            dbConfig.setAllowCreate(true);
+            Database myDb = env.openDatabase(null, databaseName, dbConfig);
+
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+
+            /* Populate database with step1NumRecs. */
+            Transaction txn = null;
+            if (transactional) {
+                txn = env.beginTransaction(null, null);
+            }
+            for (int i = 0; i < step1NumRecs; i++) {
+                IntegerBinding.intToEntry(i, key);
+                IntegerBinding.intToEntry(i, data);
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.put(txn, key, data));
+            }
+
+            myDb.close();
+
+            /* Truncate. Possibly autocommit*/
+            if (step2AutoCommit && transactional) {
+                txn.commit();
+                txn = null;
+            }
+
+            /*
+             * Before truncate, there should be two databases in the system:
+             * the testDb database, and the FileSummary database.
+             */
+            countLNs(2, 2);
+            long truncateCount = env.truncateDatabase(txn, databaseName, true);
+            assertEquals(step1NumRecs, truncateCount);
+
+            /*
+             * The naming tree should always have two entries now, the
+             * mapping tree might have 2 or 3, depending on abort.
+             */
+            if (step2AutoCommit || !transactional) {
+                countLNs(2, 2);
+            } else {
+                countLNs(2, 3);
+            }
+
+            /* Add more records. */
+            myDb = env.openDatabase(txn, databaseName, dbConfig);
+            checkCount(myDb, txn, 0);
+            for (int i = 0; i < step3NumRecs; i++) {
+                IntegerBinding.intToEntry(i, key);
+                IntegerBinding.intToEntry(i, data);
+                assertEquals(OperationStatus.SUCCESS,
+			     myDb.put(txn, key, data));
+            }
+
+            checkCount(myDb, txn, step3NumRecs);
+            myDb.close();
+
+            if (txn != null) {
+                if (step4Abort) {
+                    txn.abort();
+                } else {
+                    txn.commit();
+
+                }
+            }
+            /* Now the mapping tree should only have two entries. */
+            countLNs(2, 2);
+
+            /* Do a cursor read, make sure there's the right amount of data. */
+            myDb = env.openDatabase(null, databaseName, dbConfig);
+            checkCount(myDb, null, step5NumRecs);
+            myDb.close();
+            env.close();
+
+            /* Check data after recovery. */
+            env = new Environment(envHome, envConfig);
+            myDb = env.openDatabase(null, databaseName, dbConfig);
+            checkCount(myDb, null, step5NumRecs);
+            myDb.close();
+            env.close();
+
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Test that truncateDatabase and removeDatabase can be called after
+     * replaying an LN in that database during recovery.  This is to test a fix
+     * to a bug where truncateDatabase caused a hang because DbTree.releaseDb
+     * was not called by RecoveryUtilizationTracker.  [#16329]
+     */
+    public void testTruncateAfterRecovery()
+        throws Throwable {
+
+        DatabaseEntry key = new DatabaseEntry(new byte[10]);
+        DatabaseEntry data = new DatabaseEntry(new byte[10]);
+
+        Database db = initEnvAndDb(true);
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+
+        /* Write a single record for recovery. */
+        OperationStatus status = db.put(null, key, data);
+        assertSame(OperationStatus.SUCCESS, status);
+
+        /* Close without a checkpoint and run recovery. */
+        db.close();
+        envImpl.abnormalClose();
+        envImpl = null;
+        env = null;
+        db = initEnvAndDb(true);
+
+        /* Ensure that truncateDatabase does not hang. */
+        db.close();
+        long truncateCount = env.truncateDatabase(null, DB_NAME, true);
+        assertEquals(1, truncateCount);
+
+        /* removeDatabase should also work. */
+        env.removeDatabase(null, DB_NAME);
+        assertTrue(!env.getDatabaseNames().contains(DB_NAME));
+
+        env.close();
+        env = null;
+    }
+
+    /**
+     * Set up the environment and db.
+     */
+    private Database initEnvAndDb(boolean isTransactional)
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setTransactional(isTransactional);
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_CHECK_LEAKS.getName(), "false");
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6");
+        envConfig.setAllowCreate(true);
+        env = new Environment(envHome, envConfig);
+
+        /* Make a db and open it. */
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(isTransactional);
+        dbConfig.setSortedDuplicates(true);
+        dbConfig.setAllowCreate(true);
+        Database myDb = env.openDatabase(null, DB_NAME, dbConfig);
+        return myDb;
+    }
+
+    private void checkCount(Database db, Transaction txn, int expectedCount)
+        throws DatabaseException {
+
+        Cursor cursor = db.openCursor(txn, null);
+        int count = 0;
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        while (cursor.getNext(key, data, null) == OperationStatus.SUCCESS) {
+            count++;
+        }
+        assertEquals(expectedCount, count);
+        cursor.close();
+    }
+
+    /**
+     * Use stats to count the number of LNs in the id and name mapping
+     * trees. It's not possible to use Cursor, and stats areg easier to use
+     * than CursorImpl. This relies on the fact that the stats actually
+     * correctly account for deleted entries.
+     */
+    private void countLNs(int expectNameLNs,
+                          int expectMapLNs)
+    	throws DatabaseException {
+
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+
+        /* check number of LNs in the id mapping tree. */
+        DatabaseImpl mapDbImpl =
+            envImpl.getDbTree().getDb(DbTree.ID_DB_ID);
+        // mapDbImpl.getTree().dump();
+        BtreeStats mapStats =
+            (BtreeStats) mapDbImpl.stat(new StatsConfig());
+        assertEquals(expectMapLNs,
+                     (mapStats.getLeafNodeCount()));
+
+        /* check number of LNs in the naming tree. */
+        DatabaseImpl nameDbImpl =
+            envImpl.getDbTree().getDb(DbTree.NAME_DB_ID);
+        BtreeStats nameStats =
+            (BtreeStats) nameDbImpl.stat(new StatsConfig());
+        assertEquals(expectNameLNs,
+                     (nameStats.getLeafNodeCount()));
+    }
+}
diff --git a/test/com/sleepycat/je/cleaner/BackgroundIOTest.java b/test/com/sleepycat/je/cleaner/BackgroundIOTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..02cd496f5d0fe32cbb65924f9a6e035210c6364f
--- /dev/null
+++ b/test/com/sleepycat/je/cleaner/BackgroundIOTest.java
@@ -0,0 +1,251 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: BackgroundIOTest.java,v 1.10.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.tuple.TupleBase;
+import com.sleepycat.bind.tuple.TupleOutput;
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.latch.LatchSupport;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.TestHook;
+
+public class BackgroundIOTest extends TestCase {
+
+    final static int FILE_SIZE = 1000000;
+
+    private static CheckpointConfig forceConfig;
+    static {
+        forceConfig = new CheckpointConfig();
+        forceConfig.setForce(true);
+    }
+
+    private File envHome;
+    private Environment env;
+    private int readLimit;
+    private int writeLimit;
+    private int nSleeps;
+
+    public BackgroundIOTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException, DatabaseException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+        TestUtils.removeFiles("Setup", envHome, FileManager.DEL_SUFFIX);
+    }
+
+    public void tearDown()
+        throws IOException, DatabaseException {
+
+        if (env != null) {
+            try {
+                env.close();
+            } catch (Throwable e) {
+                System.out.println("tearDown: " + e);
+            }
+            env = null;
+        }
+
+        //*
+        TestUtils.removeLogFiles("TearDown", envHome, true);
+        TestUtils.removeFiles("TearDown", envHome, FileManager.DEL_SUFFIX);
+        //*/
+    }
+
+    public void testBackgroundIO1()
+	throws DatabaseException, InterruptedException {
+
+        openEnv(10, 10);
+        if (isCkptHighPriority()) {
+            doTest(93, 113);
+        } else {
+            doTest(186, 206);
+        }
+    }
+
+    public void testBackgroundIO2()
+	throws DatabaseException, InterruptedException {
+
+        openEnv(10, 5);
+        if (isCkptHighPriority()) {
+            doTest(93, 113);
+        } else {
+            doTest(310, 330);
+        }
+    }
+
+    public void testBackgroundIO3()
+	throws DatabaseException, InterruptedException {
+
+        openEnv(5, 10);
+        if (isCkptHighPriority()) {
+            doTest(167, 187);
+        } else {
+            doTest(259, 279);
+        }
+    }
+
+    public void testBackgroundIO4()
+	throws DatabaseException, InterruptedException {
+
+        openEnv(5, 5);
+        if (isCkptHighPriority()) {
+            doTest(167, 187);
+        } else {
+            doTest(383, 403);
+        }
+    }
+
+    private boolean isCkptHighPriority()
+	throws DatabaseException {
+
+        return "true".equals(env.getConfig().getConfigParam
+            (EnvironmentParams.CHECKPOINTER_HIGH_PRIORITY.getName()));
+    }
+
+    private void openEnv(int readLimit, int writeLimit)
+	throws DatabaseException {
+
+        this.readLimit = readLimit;
+        this.writeLimit = writeLimit;
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setConfigParam
+	    (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        envConfig.setConfigParam
+	    (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+        envConfig.setConfigParam
+	    (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.LOG_BUFFER_MAX_SIZE.getName(),
+             Integer.toString(1024));
+        envConfig.setConfigParam
+            (EnvironmentParams.LOG_FILE_MAX.getName(),
+             Integer.toString(FILE_SIZE));
+        envConfig.setConfigParam
+	    (EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(), "60");
+        //*
+        envConfig.setConfigParam
+	    (EnvironmentParams.ENV_BACKGROUND_READ_LIMIT.getName(),
+             String.valueOf(readLimit));
+        envConfig.setConfigParam
+	    (EnvironmentParams.ENV_BACKGROUND_WRITE_LIMIT.getName(),
+             String.valueOf(writeLimit));
+        //*/
+        env = new Environment(envHome, envConfig);
+    }
+
+    private void doTest(int minSleeps, int maxSleeps)
+	throws DatabaseException, InterruptedException {
+
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+        envImpl.setBackgroundSleepHook(new TestHook() {
+                public void doHook() {
+                    nSleeps += 1;
+                    assertEquals(0, LatchSupport.countLatchesHeld());
+                }
+                public Object getHookValue() {
+                    throw new UnsupportedOperationException();
+                }
+                public void doIOHook() throws IOException {
+                    throw new UnsupportedOperationException();
+                }
+                public void hookSetup() {
+                    throw new UnsupportedOperationException();
+                }
+            });
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setExclusiveCreate(true);
+        Database db = env.openDatabase(null, "BackgroundIO", dbConfig);
+
+        final int nFiles = 3;
+        final int keySize = 20;
+        final int dataSize = 10;
+        final int recSize = keySize + dataSize + 35 /* LN overhead */;
+        final int nRecords = nFiles * (FILE_SIZE / recSize);
+
+        /*
+         * Insert records first so we will have a sizeable checkpoint.  Insert
+         * interleaved because sequential inserts flush the BINs, and we want
+         * to defer BIN flushing until the checkpoint.
+         */
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry(new byte[dataSize]);
+        for (int i = 0; i <= nRecords; i += 2) {
+            setKey(key, i, keySize);
+            db.put(null, key, data);
+        }
+        for (int i = 1; i <= nRecords; i += 2) {
+            setKey(key, i, keySize);
+            db.put(null, key, data);
+        }
+
+        /* Perform a checkpoint to perform background writes. */
+        env.checkpoint(forceConfig);
+
+        /* Delete records so we will have a sizable cleaning. */
+        for (int i = 0; i <= nRecords; i += 1) {
+            setKey(key, i, keySize);
+            db.delete(null, key);
+        }
+
+        /* Perform cleaning to perform background reading. */
+        env.checkpoint(forceConfig);
+        env.cleanLog();
+        env.checkpoint(forceConfig);
+
+	db.close();
+	env.close();
+        env = null;
+
+        String msg;
+        msg = "readLimit=" + readLimit +
+              " writeLimit=" + writeLimit +
+              " minSleeps=" + minSleeps +
+              " maxSleeps=" + maxSleeps +
+              " actualSleeps=" + nSleeps;
+        //System.out.println(msg);
+
+        //*
+        assertTrue(msg, nSleeps >= minSleeps && nSleeps <= maxSleeps);
+        //*/
+    }
+
+    /**
+     * Outputs an integer followed by pad bytes.
+     */
+    private void setKey(DatabaseEntry entry, int val, int len) {
+        TupleOutput out = new TupleOutput();
+        out.writeInt(val);
+        for (int i = 0; i < len - 4; i += 1) {
+            out.writeByte(0);
+        }
+        TupleBase.outputToEntry(out, entry);
+    }
+}
diff --git a/test/com/sleepycat/je/cleaner/CleanerTest.java b/test/com/sleepycat/je/cleaner/CleanerTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..8e14514dce739bf4b0140dc8c576335084264859
--- /dev/null
+++ b/test/com/sleepycat/je/cleaner/CleanerTest.java
@@ -0,0 +1,1714 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: CleanerTest.java,v 1.103.2.8 2010/01/30 01:10:55 mark Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.bind.tuple.LongBinding;
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.EnvironmentMutableConfig;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.cleaner.Cleaner;
+import com.sleepycat.je.cleaner.FileSelector;
+import com.sleepycat.je.cleaner.FileSummary;
+import com.sleepycat.je.cleaner.TrackedFileSummary;
+import com.sleepycat.je.cleaner.UtilizationProfile;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.CursorImpl;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.junit.JUnitThread;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.recovery.Checkpointer;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.tree.FileSummaryLN;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.tree.Node;
+import com.sleepycat.je.txn.BasicLocker;
+import com.sleepycat.je.txn.LockType;
+import com.sleepycat.je.util.StringDbt;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.TestHook;
+
+public class CleanerTest extends TestCase {
+
+    private static final int N_KEYS = 300;
+    private static final int N_KEY_BYTES = 10;
+
+    /*
+     * Make the log file size small enough to allow cleaning, but large enough
+     * not to generate a lot of fsyncing at the log file boundaries.
+     */
+    private static final int FILE_SIZE = 10000;
+    protected File envHome = null;
+    protected Database db = null;
+    private Environment exampleEnv;
+    private Database exampleDb;
+    private CheckpointConfig forceConfig;
+    private JUnitThread junitThread;
+    private volatile int synchronizer;
+
+    public CleanerTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+        forceConfig = new CheckpointConfig();
+        forceConfig.setForce(true);
+    }
+
+    public void setUp()
+        throws IOException, DatabaseException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+        TestUtils.removeFiles("Setup", envHome, FileManager.DEL_SUFFIX);
+    }
+
+    private void initEnv(boolean createDb, boolean allowDups)
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        DbInternal.disableParameterValidation(envConfig);
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+        envConfig.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC));
+        envConfig.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(),
+                                 Integer.toString(FILE_SIZE));
+        envConfig.setConfigParam(EnvironmentParams.ENV_RUN_CLEANER.getName(),
+                                 "false");
+        envConfig.setConfigParam(EnvironmentParams.CLEANER_REMOVE.getName(),
+                                 "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(), "80");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6");
+        envConfig.setConfigParam(EnvironmentParams.BIN_DELTA_PERCENT.getName(),
+                                 "75");
+
+        exampleEnv = new Environment(envHome, envConfig);
+
+        String databaseName = "cleanerDb";
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(createDb);
+        dbConfig.setSortedDuplicates(allowDups);
+        exampleDb = exampleEnv.openDatabase(null, databaseName, dbConfig);
+    }
+
+    public void tearDown()
+        throws IOException, DatabaseException {
+
+        if (junitThread != null) {
+            while (junitThread.isAlive()) {
+                junitThread.interrupt();
+                Thread.yield();
+            }
+            junitThread = null;
+        }
+
+        if (exampleEnv != null) {
+            try {
+                exampleEnv.close();
+            } catch (Throwable e) {
+                System.out.println("tearDown: " + e);
+            }
+        }
+        exampleDb = null;
+        exampleEnv = null;
+
+        //*
+        try {
+            TestUtils.removeLogFiles("TearDown", envHome, true);
+            TestUtils.removeFiles("TearDown", envHome, FileManager.DEL_SUFFIX);
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+        //*/
+    }
+
+    private void closeEnv()
+        throws DatabaseException {
+
+        if (exampleDb != null) {
+            exampleDb.close();
+            exampleDb = null;
+        }
+
+        if (exampleEnv != null) {
+            exampleEnv.close();
+            exampleEnv = null;
+        }
+    }
+
+    public void testCleanerNoDupes()
+        throws Throwable {
+
+        initEnv(true, false);
+        try {
+            doCleanerTest(N_KEYS, 1);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testCleanerWithDupes()
+        throws Throwable {
+
+        initEnv(true, true);
+        try {
+            doCleanerTest(2, 500);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    private void doCleanerTest(int nKeys, int nDupsPerKey)
+        throws DatabaseException {
+
+        EnvironmentImpl environment =
+            DbInternal.envGetEnvironmentImpl(exampleEnv);
+        FileManager fileManager = environment.getFileManager();
+        Map<String, Set<String>> expectedMap =
+            new HashMap<String, Set<String>>();
+        doLargePut(expectedMap, nKeys, nDupsPerKey, true);
+        Long lastNum = fileManager.getLastFileNum();
+
+        /* Read the data back. */
+        StringDbt foundKey = new StringDbt();
+        StringDbt foundData = new StringDbt();
+
+        Cursor cursor = exampleDb.openCursor(null, null);
+
+        while (cursor.getNext(foundKey, foundData, LockMode.DEFAULT) ==
+               OperationStatus.SUCCESS) {
+        }
+
+        exampleEnv.checkpoint(forceConfig);
+
+        for (int i = 0; i < (int) lastNum.longValue(); i++) {
+
+            /*
+             * Force clean one file.  Utilization-based cleaning won't
+             * work here, since utilization is over 90%.
+             */
+            DbInternal.envGetEnvironmentImpl(exampleEnv).
+                getCleaner().
+                doClean(false, // cleanMultipleFiles
+                        true); // forceCleaning
+        }
+
+        EnvironmentStats stats = exampleEnv.getStats(TestUtils.FAST_STATS);
+        assertTrue(stats.getNINsCleaned() > 0);
+
+        cursor.close();
+        closeEnv();
+
+        initEnv(false, (nDupsPerKey > 1));
+
+        checkData(expectedMap);
+        assertTrue(fileManager.getLastFileNum().longValue() >
+                   lastNum.longValue());
+
+        closeEnv();
+    }
+
+    /**
+     * Ensure that INs are cleaned.
+     */
+    public void testCleanInternalNodes()
+        throws DatabaseException {
+
+        initEnv(true, true);
+        int nKeys = 200;
+
+        EnvironmentImpl environment =
+            DbInternal.envGetEnvironmentImpl(exampleEnv);
+        FileManager fileManager = environment.getFileManager();
+        /* Insert a lot of keys. ExpectedMap holds the expected data */
+        Map<String, Set<String>> expectedMap =
+            new HashMap<String, Set<String>>();
+        doLargePut(expectedMap, nKeys, 1, true);
+
+        /* Modify every other piece of data. */
+        modifyData(expectedMap, 10, true);
+        checkData(expectedMap);
+
+        /* Checkpoint */
+        exampleEnv.checkpoint(forceConfig);
+        checkData(expectedMap);
+
+        /* Modify every other piece of data. */
+        modifyData(expectedMap, 10, true);
+        checkData(expectedMap);
+
+        /* Checkpoint -- this should obsolete INs. */
+        exampleEnv.checkpoint(forceConfig);
+        checkData(expectedMap);
+
+        /* Clean */
+        Long lastNum = fileManager.getLastFileNum();
+        exampleEnv.cleanLog();
+
+        /* Validate after cleaning. */
+        checkData(expectedMap);
+        EnvironmentStats stats = exampleEnv.getStats(TestUtils.FAST_STATS);
+
+        /* Make sure we really cleaned something.*/
+        assertTrue(stats.getNINsCleaned() > 0);
+        assertTrue(stats.getNLNsCleaned() > 0);
+
+        closeEnv();
+        initEnv(false, true);
+        checkData(expectedMap);
+        assertTrue(fileManager.getLastFileNum().longValue() >
+                   lastNum.longValue());
+
+        closeEnv();
+    }
+
+    /**
+     * See if we can clean in the middle of the file set.
+     */
+    public void testCleanFileHole()
+        throws Throwable {
+
+        initEnv(true, true);
+
+        int nKeys = 20; // test ends up inserting 2*nKeys
+        int nDupsPerKey = 30;
+
+        EnvironmentImpl environment =
+            DbInternal.envGetEnvironmentImpl(exampleEnv);
+        FileManager fileManager = environment.getFileManager();
+
+        /* Insert some non dup data, modify, insert dup data. */
+        Map<String, Set<String>> expectedMap =
+            new HashMap<String, Set<String>>();
+        doLargePut(expectedMap, nKeys, 1, true);
+        modifyData(expectedMap, 10, true);
+        doLargePut(expectedMap, nKeys, nDupsPerKey, true);
+        checkData(expectedMap);
+
+        /*
+         * Delete all the data, but abort. (Try to fill up the log
+         * with entries we don't need.
+         */
+        deleteData(expectedMap, false, false);
+        checkData(expectedMap);
+
+        /* Do some more insertions, but abort them. */
+        doLargePut(expectedMap, nKeys, nDupsPerKey, false);
+        checkData(expectedMap);
+
+        /* Do some more insertions and commit them. */
+        doLargePut(expectedMap, nKeys, nDupsPerKey, true);
+        checkData(expectedMap);
+
+        /* Checkpoint */
+        exampleEnv.checkpoint(forceConfig);
+        checkData(expectedMap);
+
+        /* Clean */
+        Long lastNum = fileManager.getLastFileNum();
+        exampleEnv.cleanLog();
+
+        /* Validate after cleaning. */
+        checkData(expectedMap);
+        EnvironmentStats stats = exampleEnv.getStats(TestUtils.FAST_STATS);
+
+        /* Make sure we really cleaned something.*/
+        assertTrue(stats.getNINsCleaned() > 0);
+        assertTrue(stats.getNLNsCleaned() > 0);
+
+        closeEnv();
+        initEnv(false, true);
+        checkData(expectedMap);
+        assertTrue(fileManager.getLastFileNum().longValue() >
+                   lastNum.longValue());
+
+        closeEnv();
+    }
+
+    /**
+     * Test for SR13191.  This SR shows a problem where a MapLN is initialized
+     * with a DatabaseImpl that has a null EnvironmentImpl.  When the Database
+     * gets used, a NullPointerException occurs in the Cursor code which
+     * expects there to be an EnvironmentImpl present.  The MapLN gets init'd
+     * by the Cleaner reading through a log file and encountering a MapLN which
+     * is not presently in the DbTree.  As an efficiency, the Cleaner calls
+     * updateEntry on the BIN to try to insert the MapLN into the BIN so that
+     * it won't have to fetch it when it migrates the BIN.  But this is bad
+     * since the MapLN has not been init'd properly.  The fix was to ensure
+     * that the MapLN is init'd correctly by calling postFetchInit on it just
+     * prior to inserting it into the BIN.
+     *
+     * This test first creates an environment and two databases.  The first
+     * database it just adds to the tree with no data.  This will be the MapLN
+     * that eventually gets instantiated by the cleaner.  The second database
+     * is used just to create a bunch of data that will get deleted so as to
+     * create a low utilization for one of the log files.  Once the data for
+     * db2 is created, the log is flipped (so file 0 is the one with the MapLN
+     * for db1 in it), and the environment is closed and reopened.  We insert
+     * more data into db2 until we have enough .jdb files that file 0 is
+     * attractive to the cleaner.  Call the cleaner to have it instantiate the
+     * MapLN and then use the MapLN in a Database.get() call.
+     */
+    public void testSR13191()
+        throws Throwable {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        Environment env = new Environment(envHome, envConfig);
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+        FileManager fileManager =
+            DbInternal.envGetEnvironmentImpl(env).getFileManager();
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        Database db1 =
+            env.openDatabase(null, "db1", dbConfig);
+
+        Database db2 =
+            env.openDatabase(null, "db2", dbConfig);
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        IntegerBinding.intToEntry(1, key);
+        data.setData(new byte[100000]);
+        for (int i = 0; i < 50; i++) {
+            assertEquals(OperationStatus.SUCCESS, db2.put(null, key, data));
+        }
+        db1.close();
+        db2.close();
+        assertEquals("Should have 0 as current file", 0L,
+                     fileManager.getCurrentFileNum());
+        envImpl.forceLogFileFlip();
+        env.close();
+
+        env = new Environment(envHome, envConfig);
+        fileManager = DbInternal.envGetEnvironmentImpl(env).getFileManager();
+        assertEquals("Should have 1 as current file", 1L,
+                     fileManager.getCurrentFileNum());
+
+        db2 = env.openDatabase(null, "db2", dbConfig);
+
+        for (int i = 0; i < 250; i++) {
+            assertEquals(OperationStatus.SUCCESS, db2.put(null, key, data));
+        }
+
+        db2.close();
+        env.cleanLog();
+        db1 = env.openDatabase(null, "db1", dbConfig);
+        db1.get(null, key, data, null);
+        db1.close();
+        env.close();
+    }
+
+    /**
+     * Tests that setting je.env.runCleaner=false stops the cleaner from
+     * processing more files even if the target minUtilization is not met
+     * [#15158].
+     */
+    public void testCleanerStop()
+        throws Throwable {
+
+        final int fileSize = 1000000;
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.LOG_FILE_MAX.getName(),
+             Integer.toString(fileSize));
+        envConfig.setConfigParam
+            (EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(), "80");
+        Environment env = new Environment(envHome, envConfig);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        Database db = env.openDatabase(null, "CleanerStop", dbConfig);
+
+        DatabaseEntry key = new DatabaseEntry(new byte[1]);
+        DatabaseEntry data = new DatabaseEntry(new byte[fileSize]);
+        for (int i = 0; i <= 10; i += 1) {
+            db.put(null, key, data);
+        }
+        env.checkpoint(forceConfig);
+
+        EnvironmentStats stats = env.getStats(null);
+        assertEquals(0, stats.getNCleanerRuns());
+
+        envConfig = env.getConfig();
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "true");
+        env.setMutableConfig(envConfig);
+
+        int iter = 0;
+        while (stats.getNCleanerRuns() == 0) {
+            iter += 1;
+            if (iter == 20) {
+
+                /*
+                 * At one time the DaemonThread did not wakeup immediately in
+                 * this test.  A workaround was to add an item to the job queue
+                 * in FileProcessor.wakeup.  Later the job queue was removed
+                 * and the DaemonThread.run() was fixed to wakeup immediately.
+                 * This test verifies that the cleanup of the run() method
+                 * works properly [#15267].
+                 */
+                fail("Cleaner did not run after " + iter + " tries");
+            }
+            Thread.yield();
+            Thread.sleep(1);
+            stats = env.getStats(null);
+        }
+
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        env.setMutableConfig(envConfig);
+
+        long prevNFiles = stats.getNCleanerRuns();
+        stats = env.getStats(null);
+        long currNFiles = stats.getNCleanerRuns();
+        if (currNFiles - prevNFiles > 5) {
+            fail("Expected less than 5 files cleaned," +
+                 " prevNFiles=" + prevNFiles +
+                 " currNFiles=" + currNFiles);
+        }
+
+        //System.out.println("Num runs: " + stats.getNCleanerRuns());
+
+        db.close();
+        env.close();
+    }
+
+    /**
+     * Tests that the FileSelector memory budget is subtracted when the
+     * environment is closed.  Before the fix in SR [#16368], it was not.
+     */
+    public void testFileSelectorMemBudget()
+        throws Throwable {
+
+        final int fileSize = 1000000;
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.LOG_FILE_MAX.getName(),
+             Integer.toString(fileSize));
+        envConfig.setConfigParam
+            (EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(), "80");
+        Environment env = new Environment(envHome, envConfig);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        Database db = env.openDatabase(null, "foo", dbConfig);
+
+        DatabaseEntry key = new DatabaseEntry(new byte[1]);
+        DatabaseEntry data = new DatabaseEntry(new byte[fileSize]);
+        for (int i = 0; i <= 10; i += 1) {
+            db.put(null, key, data);
+        }
+        env.checkpoint(forceConfig);
+
+        int nFiles = env.cleanLog();
+        assertTrue(nFiles > 0);
+
+        db.close();
+
+        /*
+         * To force the memory leak to be detected we have to close without a
+         * checkpoint.  The checkpoint will finish processing all cleaned files
+         * and subtract them from the budget.  But this should happen during
+         * close, even without a checkpoint.
+         */
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+        envImpl.close(false /*doCheckpoint*/);
+    }
+
+    /**
+     * Tests that the cleanLog cannot be called in a read-only environment.
+     * [#16368]
+     */
+    public void testCleanLogReadOnly()
+        throws Throwable {
+
+        /* Open read-write. */
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        exampleEnv = new Environment(envHome, envConfig);
+        exampleEnv.close();
+        exampleEnv = null;
+
+        /* Open read-only. */
+        envConfig.setAllowCreate(false);
+        envConfig.setReadOnly(true);
+        exampleEnv = new Environment(envHome, envConfig);
+
+        /* Try cleanLog in a read-only env. */
+        try {
+            exampleEnv.cleanLog();
+            fail();
+        } catch (IllegalStateException e) {
+            assertEquals
+                ("Log cleaning not allowed in a read-only or memory-only " +
+                 "environment", e.getMessage());
+
+        }
+    }
+
+    /**
+     * Tests that when a file being cleaned is deleted, we ignore the error and
+     * don't repeatedly try to clean it.  This is happening when we mistakedly
+     * clean a file after it has been queued for deletion.  The workaround is
+     * to catch LogFileNotFoundException in the cleaner and ignore the error.
+     * We're testing the workaround here by forcing cleaning of deleted files.
+     * [#15528]
+     */
+    public void testUnexpectedFileDeletion()
+        throws DatabaseException, IOException {
+
+        initEnv(true, false);
+        EnvironmentMutableConfig config = exampleEnv.getMutableConfig();
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        config.setConfigParam
+            (EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(), "80");
+        exampleEnv.setMutableConfig(config);
+
+        final EnvironmentImpl envImpl =
+            DbInternal.envGetEnvironmentImpl(exampleEnv);
+        final Cleaner cleaner = envImpl.getCleaner();
+        final FileSelector fileSelector = cleaner.getFileSelector();
+
+        Map<String, Set<String>> expectedMap =
+            new HashMap<String, Set<String>>();
+        doLargePut(expectedMap, 1000, 1, true);
+        checkData(expectedMap);
+
+        final long file1 = 0;
+        final long file2 = 1;
+
+        for (int i = 0; i < 100; i += 1) {
+            modifyData(expectedMap, 1, true);
+            checkData(expectedMap);
+            fileSelector.injectFileForCleaning(new Long(file1));
+            fileSelector.injectFileForCleaning(new Long(file2));
+            assertTrue(fileSelector.getToBeCleanedFiles().contains(file1));
+            assertTrue(fileSelector.getToBeCleanedFiles().contains(file2));
+            while (exampleEnv.cleanLog() > 0) {}
+            assertTrue(!fileSelector.getToBeCleanedFiles().contains(file1));
+            assertTrue(!fileSelector.getToBeCleanedFiles().contains(file2));
+            exampleEnv.checkpoint(forceConfig);
+            Map<Long,FileSummary> allFiles = envImpl.getUtilizationProfile().
+                getFileSummaryMap(true /*includeTrackedFiles*/);
+            assertTrue(!allFiles.containsKey(file1));
+            assertTrue(!allFiles.containsKey(file2));
+        }
+        checkData(expectedMap);
+
+        closeEnv();
+    }
+
+    /**
+     * Helper routine. Generates keys with random alpha values while data
+     * is numbered numerically.
+     */
+    private void doLargePut(Map<String, Set<String>> expectedMap,
+                            int nKeys,
+                            int nDupsPerKey,
+                            boolean commit)
+        throws DatabaseException {
+
+        Transaction txn = exampleEnv.beginTransaction(null, null);
+        for (int i = 0; i < nKeys; i++) {
+            byte[] key = new byte[N_KEY_BYTES];
+            TestUtils.generateRandomAlphaBytes(key);
+            String keyString = new String(key);
+
+            /*
+             * The data map is keyed by key value, and holds a hash
+             * map of all data values.
+             */
+            Set<String> dataVals = new HashSet<String>();
+            if (commit) {
+                expectedMap.put(keyString, dataVals);
+            }
+            for (int j = 0; j < nDupsPerKey; j++) {
+                String dataString = Integer.toString(j);
+                exampleDb.put(txn,
+                              new StringDbt(keyString),
+                              new StringDbt(dataString));
+                dataVals.add(dataString);
+            }
+        }
+        if (commit) {
+            txn.commit();
+        } else {
+            txn.abort();
+        }
+    }
+
+    /**
+     * Increment each data value.
+     */
+    private void modifyData(Map<String, Set<String>> expectedMap,
+                            int increment,
+                            boolean commit)
+        throws DatabaseException {
+
+        Transaction txn = exampleEnv.beginTransaction(null, null);
+
+        StringDbt foundKey = new StringDbt();
+        StringDbt foundData = new StringDbt();
+
+        Cursor cursor = exampleDb.openCursor(txn, null);
+        OperationStatus status = cursor.getFirst(foundKey, foundData,
+                                                 LockMode.DEFAULT);
+
+        boolean toggle = true;
+        while (status == OperationStatus.SUCCESS) {
+            if (toggle) {
+
+                String foundKeyString = foundKey.getString();
+                String foundDataString = foundData.getString();
+                int newValue = Integer.parseInt(foundDataString) + increment;
+                String newDataString = Integer.toString(newValue);
+
+                /* If committing, adjust the expected map. */
+                if (commit) {
+
+                    Set<String> dataVals = expectedMap.get(foundKeyString);
+                    if (dataVals == null) {
+                        fail("Couldn't find " +
+                             foundKeyString + "/" + foundDataString);
+                    } else if (dataVals.contains(foundDataString)) {
+                        dataVals.remove(foundDataString);
+                        dataVals.add(newDataString);
+                    } else {
+                        fail("Couldn't find " +
+                             foundKeyString + "/" + foundDataString);
+                    }
+                }
+
+                assertEquals(OperationStatus.SUCCESS,
+                             cursor.delete());
+                assertEquals(OperationStatus.SUCCESS,
+                             cursor.put(foundKey,
+                                        new StringDbt(newDataString)));
+                toggle = false;
+            } else {
+                toggle = true;
+            }
+
+            status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT);
+        }
+
+        cursor.close();
+        if (commit) {
+            txn.commit();
+        } else {
+            txn.abort();
+        }
+    }
+
+    /**
+     * Delete data.
+     */
+    private void deleteData(Map<String, Set<String>> expectedMap,
+                            boolean everyOther,
+                            boolean commit)
+        throws DatabaseException {
+
+        Transaction txn = exampleEnv.beginTransaction(null, null);
+
+        StringDbt foundKey = new StringDbt();
+        StringDbt foundData = new StringDbt();
+
+        Cursor cursor = exampleDb.openCursor(txn, null);
+        OperationStatus status = cursor.getFirst(foundKey, foundData,
+                                                 LockMode.DEFAULT);
+
+        boolean toggle = true;
+        while (status == OperationStatus.SUCCESS) {
+            if (toggle) {
+
+                String foundKeyString = foundKey.getString();
+                String foundDataString = foundData.getString();
+
+                /* If committing, adjust the expected map */
+                if (commit) {
+
+                    Set dataVals = expectedMap.get(foundKeyString);
+                    if (dataVals == null) {
+                        fail("Couldn't find " +
+                             foundKeyString + "/" + foundDataString);
+                    } else if (dataVals.contains(foundDataString)) {
+                        dataVals.remove(foundDataString);
+                        if (dataVals.size() == 0) {
+                            expectedMap.remove(foundKeyString);
+                        }
+                    } else {
+                        fail("Couldn't find " +
+                             foundKeyString + "/" + foundDataString);
+                    }
+                }
+
+                assertEquals(OperationStatus.SUCCESS, cursor.delete());
+            }
+
+            if (everyOther) {
+                toggle = toggle? false: true;
+            }
+
+            status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT);
+        }
+
+        cursor.close();
+        if (commit) {
+            txn.commit();
+        } else {
+            txn.abort();
+        }
+    }
+
+    /**
+     * Check what's in the database against what's in the expected map.
+     */
+    private void checkData(Map<String, Set<String>> expectedMap)
+        throws DatabaseException {
+
+        StringDbt foundKey = new StringDbt();
+        StringDbt foundData = new StringDbt();
+        Cursor cursor = exampleDb.openCursor(null, null);
+        OperationStatus status = cursor.getFirst(foundKey, foundData,
+                                                 LockMode.DEFAULT);
+
+        /*
+         * Make a copy of expectedMap so that we're free to delete out
+         * of the set of expected results when we verify.
+         * Also make a set of counts for each key value, to test count.
+         */
+
+        Map<String, Set<String>> checkMap = new HashMap<String, Set<String>>();
+        Map<String, Integer>countMap = new HashMap<String, Integer>();
+        Iterator<Map.Entry<String, Set<String>>> iter =
+        		expectedMap.entrySet().iterator();
+        while (iter.hasNext()) {
+            Map.Entry<String, Set<String>> entry = iter.next();
+            Set<String> copySet = new HashSet<String>();
+            copySet.addAll(entry.getValue());
+            checkMap.put(entry.getKey(), copySet);
+            countMap.put(entry.getKey(), new Integer(copySet.size()));
+        }
+
+        while (status == OperationStatus.SUCCESS) {
+            String foundKeyString = foundKey.getString();
+            String foundDataString = foundData.getString();
+
+            /* Check that the current value is in the check values map */
+            Set dataVals = checkMap.get(foundKeyString);
+            if (dataVals == null) {
+                fail("Couldn't find " +
+                     foundKeyString + "/" + foundDataString);
+            } else if (dataVals.contains(foundDataString)) {
+                dataVals.remove(foundDataString);
+                if (dataVals.size() == 0) {
+                    checkMap.remove(foundKeyString);
+                }
+            } else {
+                fail("Couldn't find " +
+                     foundKeyString + "/" +
+                     foundDataString +
+                     " in data vals");
+            }
+
+            /* Check that the count is right. */
+            int count = cursor.count();
+            assertEquals(countMap.get(foundKeyString).intValue(),
+                         count);
+
+            status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT);
+        }
+
+        cursor.close();
+
+        if (checkMap.size() != 0) {
+            dumpExpected(checkMap);
+            fail("checkMapSize = " + checkMap.size());
+                        
+        }
+        assertEquals(0, checkMap.size());
+    }
+
+    private void dumpExpected(Map expectedMap) {
+        Iterator iter = expectedMap.entrySet().iterator();
+        while (iter.hasNext()) {
+            Map.Entry entry = (Map.Entry) iter.next();
+            String key = (String) entry.getKey();
+            Iterator dataIter = ((Set) entry.getValue()).iterator();
+            while (dataIter.hasNext()) {
+                System.out.println("key=" + key +
+                                   " data=" + (String) dataIter.next());
+            }
+        }
+    }
+
+    /**
+     * Tests that cleaner mutable configuration parameters can be changed and
+     * that the changes actually take effect.
+     */
+    public void testMutableConfig()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        exampleEnv = new Environment(envHome, envConfig);
+        envConfig = exampleEnv.getConfig();
+        EnvironmentImpl envImpl =
+            DbInternal.envGetEnvironmentImpl(exampleEnv);
+        Cleaner cleaner = envImpl.getCleaner();
+        UtilizationProfile profile = envImpl.getUtilizationProfile();
+        MemoryBudget budget = envImpl.getMemoryBudget();
+        String name;
+        String val;
+
+        /* je.cleaner.minUtilization */
+        name = EnvironmentParams.CLEANER_MIN_UTILIZATION.getName();
+        setParam(name, "33");
+        assertEquals(33, profile.minUtilization);
+
+        /* je.cleaner.minFileUtilization */
+        name = EnvironmentParams.CLEANER_MIN_FILE_UTILIZATION.getName();
+        setParam(name, "7");
+        assertEquals(7, profile.minFileUtilization);
+
+        /* je.cleaner.bytesInterval */
+        name = EnvironmentParams.CLEANER_BYTES_INTERVAL.getName();
+        setParam(name, "1000");
+        assertEquals(1000, cleaner.cleanerBytesInterval);
+
+        /* je.cleaner.deadlockRetry */
+        name = EnvironmentParams.CLEANER_DEADLOCK_RETRY.getName();
+        setParam(name, "7");
+        assertEquals(7, cleaner.nDeadlockRetries);
+
+        /* je.cleaner.lockTimeout */
+        name = EnvironmentParams.CLEANER_LOCK_TIMEOUT.getName();
+        setParam(name, "7000");
+        assertEquals(7, cleaner.lockTimeout);
+
+        /* je.cleaner.expunge */
+        name = EnvironmentParams.CLEANER_REMOVE.getName();
+        val = "false".equals(envConfig.getConfigParam(name)) ?
+            "true" : "false";
+        setParam(name, val);
+        assertEquals(val.equals("true"), cleaner.expunge);
+
+        /* je.cleaner.minAge */
+        name = EnvironmentParams.CLEANER_MIN_AGE.getName();
+        setParam(name, "7");
+        assertEquals(7, profile.minAge);
+
+        /* je.cleaner.cluster */
+        name = EnvironmentParams.CLEANER_CLUSTER.getName();
+        val = "false".equals(envConfig.getConfigParam(name)) ?
+            "true" : "false";
+        setParam(name, val);
+        assertEquals(val.equals("true"), cleaner.clusterResident);
+        /* Cannot set both cluster and clusterAll to true. */
+        setParam(name, "false");
+
+        /* je.cleaner.clusterAll */
+        name = EnvironmentParams.CLEANER_CLUSTER_ALL.getName();
+        val = "false".equals(envConfig.getConfigParam(name)) ?
+            "true" : "false";
+        setParam(name, val);
+        assertEquals(val.equals("true"), cleaner.clusterAll);
+
+        /* je.cleaner.maxBatchFiles */
+        name = EnvironmentParams.CLEANER_MAX_BATCH_FILES.getName();
+        setParam(name, "7");
+        assertEquals(7, cleaner.maxBatchFiles);
+
+        /* je.cleaner.readSize */
+        name = EnvironmentParams.CLEANER_READ_SIZE.getName();
+        setParam(name, "7777");
+        assertEquals(7777, cleaner.readBufferSize);
+
+        /* je.cleaner.detailMaxMemoryPercentage */
+        name = EnvironmentParams.CLEANER_DETAIL_MAX_MEMORY_PERCENTAGE.
+            getName();
+        setParam(name, "7");
+        assertEquals((budget.getMaxMemory() * 7) / 100,
+                     budget.getTrackerBudget());
+
+        /* je.cleaner.threads */
+        name = EnvironmentParams.CLEANER_THREADS.getName();
+        setParam(name, "7");
+        assertEquals((envImpl.isNoLocking() ? 0 : 7),
+                     countCleanerThreads());
+
+        exampleEnv.close();
+        exampleEnv = null;
+    }
+
+    /**
+     * Sets a mutable config param, checking that the given value is not
+     * already set and that it actually changes.
+     */
+    private void setParam(String name, String val)
+        throws DatabaseException {
+
+        EnvironmentMutableConfig config = exampleEnv.getMutableConfig();
+        String myVal = config.getConfigParam(name);
+        assertTrue(!val.equals(myVal));
+
+        config.setConfigParam(name, val);
+        exampleEnv.setMutableConfig(config);
+
+        config = exampleEnv.getMutableConfig();
+        myVal = config.getConfigParam(name);
+        assertTrue(val.equals(myVal));
+    }
+
+    /**
+     * Count the number of threads with the name "Cleaner#".
+     */
+    private int countCleanerThreads() {
+
+        Thread[] threads = new Thread[Thread.activeCount()];
+        Thread.enumerate(threads);
+
+        int count = 0;
+        for (int i = 0; i < threads.length; i += 1) {
+            if (threads[i] != null &&
+                threads[i].getName().startsWith("Cleaner")) {
+                count += 1;
+            }
+        }
+
+        return count;
+    }
+
+    /**
+     * Checks that the memory budget is updated properly by the
+     * UtilizationTracker.  Prior to a bug fix [#15505] amounts were added to
+     * the budget but not subtracted when two TrackedFileSummary objects were
+     * merged.  Merging occurs when a local tracker is added to the global
+     * tracker.  Local trackers are used during recovery, checkpoints, lazy
+     * compression, and reverse splits.
+     */
+    public void testTrackerMemoryBudget()
+        throws DatabaseException {
+
+        /* Open environment. */
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setTransactional(true);
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false");
+        exampleEnv = new Environment(envHome, envConfig);
+
+        /* Open database. */
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        exampleDb = exampleEnv.openDatabase(null, "foo", dbConfig);
+
+        /* Insert data. */
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        for (int i = 1; i <= 200; i += 1) {
+            IntegerBinding.intToEntry(i, key);
+            IntegerBinding.intToEntry(i, data);
+            exampleDb.put(null, key, data);
+        }
+
+        /* Sav the admin budget baseline. */
+        flushTrackedFiles();
+        long admin = exampleEnv.getStats(null).getAdminBytes();
+
+        /*
+         * Nothing becomes obsolete when inserting and no INs are logged, so
+         * the budget does not increase.
+         */
+        IntegerBinding.intToEntry(201, key);
+        exampleDb.put(null, key, data);
+        assertEquals(admin, exampleEnv.getStats(null).getAdminBytes());
+        flushTrackedFiles();
+        assertEquals(admin, exampleEnv.getStats(null).getAdminBytes());
+
+        /*
+         * Update a record and expect the budget to increase because the old
+         * LN becomes obsolete.
+         */
+        exampleDb.put(null, key, data);
+        assertTrue(admin < exampleEnv.getStats(null).getAdminBytes());
+        flushTrackedFiles();
+        assertEquals(admin, exampleEnv.getStats(null).getAdminBytes());
+
+        /*
+         * Delete all records and expect the budget to increase because LNs
+         * become obsolete.
+         */
+        for (int i = 1; i <= 201; i += 1) {
+            IntegerBinding.intToEntry(i, key);
+            exampleDb.delete(null, key);
+        }
+        assertTrue(admin < exampleEnv.getStats(null).getAdminBytes());
+        flushTrackedFiles();
+        assertEquals(admin, exampleEnv.getStats(null).getAdminBytes());
+
+        /*
+         * Compress and expect no change to the budget.  Prior to the fix for
+         * [#15505] the assertion below failed because the baseline admin
+         * budget was not restored.
+         */
+        exampleEnv.compress();
+        flushTrackedFiles();
+        assertEquals(admin, exampleEnv.getStats(null).getAdminBytes());
+
+        closeEnv();
+    }
+
+    /**
+     * Flushes all tracked files to subtract tracked info from the admin memory
+     * budget.
+     */
+    private void flushTrackedFiles()
+        throws DatabaseException {
+
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(exampleEnv);
+        UtilizationTracker tracker = envImpl.getUtilizationTracker();
+        UtilizationProfile profile = envImpl.getUtilizationProfile();
+
+        for (TrackedFileSummary summary : tracker.getTrackedFiles()) {
+            profile.flushFileSummary(summary);
+        }
+    }
+
+    /**
+     * Tests that memory is budgeted correctly for FileSummaryLNs that are
+     * inserted and deleted after calling setTrackedSummary.  The size of the
+     * FileSummaryLN changes during logging when setTrackedSummary is called,
+     * and this is accounted for specially in Tree.logLNAfterInsert. [#15831] 
+     */
+    public void testFileSummaryLNMemoryUsage()
+        throws DatabaseException {
+
+        /* Open environment, prevent concurrent access by daemons. */
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false");
+        exampleEnv = new Environment(envHome, envConfig);
+
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(exampleEnv);
+        UtilizationProfile up = envImpl.getUtilizationProfile();
+        DatabaseImpl fileSummaryDb = up.getFileSummaryDb();
+        MemoryBudget memBudget = envImpl.getMemoryBudget();
+
+        BasicLocker locker = null;
+        CursorImpl cursor = null;
+        try {
+            locker = BasicLocker.createBasicLocker(envImpl);
+            cursor = new CursorImpl(fileSummaryDb, locker);
+
+            /* Get parent BIN.  There should be only one BIN in the tree. */
+            IN root =
+                fileSummaryDb.getTree().getRootIN(CacheMode.DEFAULT);
+            root.releaseLatch();
+            assertEquals(1, root.getNEntries());
+            BIN parent = (BIN) root.getTarget(0);
+
+            /* Use an artificial FileSummaryLN with a tracked summary. */
+            FileSummaryLN ln = new FileSummaryLN(envImpl, new FileSummary());
+            TrackedFileSummary tfs = new TrackedFileSummary
+                (envImpl.getUtilizationTracker(), 0 /*fileNum*/,
+                 true /*trackDetail*/);
+            tfs.trackObsolete(0);
+            byte[] keyBytes =
+                FileSummaryLN.makeFullKey(0 /*fileNum*/, 123 /*sequence*/);
+            int keySize = MemoryBudget.byteArraySize(keyBytes.length);
+
+            /* Perform insert after calling setTrackedSummary. */
+            long oldSize = ln.getMemorySizeIncludedByParent();
+            long oldParentSize = getMemSize(parent, memBudget);
+            ln.setTrackedSummary(tfs);
+            OperationStatus status = cursor.putLN
+                (keyBytes, ln, false /*allowDuplicates*/,
+                 fileSummaryDb.getRepContext());
+            assertSame(status, OperationStatus.SUCCESS);
+
+            assertSame(parent, cursor.latchBIN());
+            ln.addExtraMarshaledMemorySize(parent);
+            cursor.releaseBIN();
+
+            long newSize = ln.getMemorySizeIncludedByParent();
+            long newParentSize = getMemSize(parent, memBudget);
+
+            /* The size of the LN increases during logging. */
+            assertEquals(newSize,
+                         oldSize +
+                         ln.getObsoleteOffsets().getExtraMemorySize());
+
+            /* The correct size is accounted for by the parent BIN. */
+            assertEquals(newSize + keySize, newParentSize - oldParentSize);
+
+            /* Correct size is subtracted during eviction. */
+            oldParentSize = newParentSize;
+            cursor.evict();
+            newParentSize = getMemSize(parent, memBudget);
+            assertEquals(oldParentSize - newSize, newParentSize);
+            long evictedParentSize = newParentSize;
+
+            /* Fetch a fresh FileSummaryLN before deleting it. */
+            oldParentSize = newParentSize;
+            ln = (FileSummaryLN) cursor.getCurrentLN(LockType.READ);
+            newSize = ln.getMemorySizeIncludedByParent();
+            newParentSize = getMemSize(parent, memBudget);
+            assertEquals(newSize, newParentSize - oldParentSize);
+
+            /* Perform delete after calling setTrackedSummary. */
+            oldSize = newSize;
+            oldParentSize = newParentSize;
+            ln.setTrackedSummary(tfs);
+            status = cursor.delete(fileSummaryDb.getRepContext());
+            assertSame(status, OperationStatus.SUCCESS);
+            newSize = ln.getMemorySizeIncludedByParent();
+            newParentSize = getMemSize(parent, memBudget);
+
+            /* Size changes during delete also. */
+            assertTrue(newSize < oldSize);
+            assertTrue(oldSize - newSize >
+                       ln.getObsoleteOffsets().getExtraMemorySize());
+            assertEquals(newSize - oldSize, newParentSize - oldParentSize);
+
+            /* Correct size is subtracted during eviction. */
+            oldParentSize = newParentSize;
+            cursor.evict();
+            newParentSize = getMemSize(parent, memBudget);
+            assertEquals(oldParentSize - newSize, newParentSize);
+            assertEquals(evictedParentSize, newParentSize);
+        } finally {
+            if (cursor != null) {
+                cursor.releaseBINs();
+                cursor.close();
+            }
+            if (locker != null) {
+                locker.operationEnd();
+            }
+        }
+
+        TestUtils.validateNodeMemUsage(envImpl, true /*assertOnError*/);
+
+        /* Insert again, this time using the UtilizationProfile method. */
+        FileSummaryLN ln = new FileSummaryLN(envImpl, new FileSummary());
+        TrackedFileSummary tfs = new TrackedFileSummary
+            (envImpl.getUtilizationTracker(), 0 /*fileNum*/,
+             true /*trackDetail*/);
+        tfs.trackObsolete(0);
+        ln.setTrackedSummary(tfs);
+        assertTrue(up.insertFileSummary(ln, 0 /*fileNum*/, 123 /*sequence*/));
+        TestUtils.validateNodeMemUsage(envImpl, true /*assertOnError*/);
+
+        closeEnv();
+    }
+
+    /**
+     * Checks that log utilization is updated incrementally during the
+     * checkpoint rather than only when the highest dirty level in the Btree is
+     * flushed.  This feature (incremental update) was added so that log
+     * cleaning is not delayed until the end of the checkpoint. [#16037]
+     */
+    public void testUtilizationDuringCheckpoint()
+        throws DatabaseException {
+
+        /*
+         * Use Database.sync of a deferred-write database to perform this test
+         * rather than a checkpoint, because the hook is called at a
+         * predictable place when only a single database is flushed.  The
+         * implementation of Checkpointer.flushDirtyNodes is shared for
+         * Database.sync and checkpoint, so this tests both cases.
+         */
+        final int FANOUT = 25;
+        final int N_KEYS = FANOUT * FANOUT * FANOUT;
+
+        /* Open environment. */
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+        exampleEnv = new Environment(envHome, envConfig);
+
+        /* Open ordinary non-transactional database. */
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setNodeMaxEntries(FANOUT);
+        exampleDb = exampleEnv.openDatabase(null, "foo", dbConfig);
+
+        /* Clear stats. */
+        StatsConfig statsConfig = new StatsConfig();
+        statsConfig.setClear(true);
+        exampleEnv.getStats(statsConfig);
+
+        /* Write to database to create a 3 level Btree. */
+        DatabaseEntry keyEntry = new DatabaseEntry();
+        DatabaseEntry dataEntry = new DatabaseEntry(new byte[0]);
+        for (int i = 0; i < N_KEYS; i += 1) {
+            LongBinding.longToEntry(i, keyEntry);
+            assertSame(OperationStatus.SUCCESS,
+                       exampleDb.put(null, keyEntry, dataEntry));
+            EnvironmentStats stats = exampleEnv.getStats(statsConfig);
+            if (stats.getNEvictPasses() > 0) {
+                break;
+            }
+        }
+
+        /*
+         * Sync and write an LN in each BIN to create a bunch of dirty INs
+         * that, when flushed again, will cause the prior versions to be
+         * obsolete.
+         */
+        exampleEnv.sync();
+        for (int i = 0; i < N_KEYS; i += FANOUT) {
+            LongBinding.longToEntry(i, keyEntry);
+            assertSame(OperationStatus.SUCCESS,
+                       exampleDb.put(null, keyEntry, dataEntry));
+        }
+
+        /*
+         * Close and re-open as a deferred-write DB so that we can call sync.
+         * The INs will remain dirty.
+         */
+        exampleDb.close();
+        dbConfig = new DatabaseConfig();
+        dbConfig.setDeferredWrite(true);
+        exampleDb = exampleEnv.openDatabase(null, "foo", dbConfig);
+
+        /*
+         * The test hook is called just before writing the highest dirty level
+         * in the Btree.  At that point, utilization should be reduced if the
+         * incremental utilization update feature is working properly.  Before
+         * adding this feature, utilization was not changed at this point.
+         */
+        final int oldUtilization = getUtilization();
+        final StringBuilder hookCalledFlag = new StringBuilder();
+
+        Checkpointer.setMaxFlushLevelHook(new TestHook() {
+            public void doHook() {
+                hookCalledFlag.append(1);
+                final int newUtilization;
+                try {
+                    newUtilization = getUtilization();
+                } catch (DatabaseException e) {
+                    throw new RuntimeException(e);
+                }
+                String msg = "oldUtilization=" + oldUtilization +
+                             " newUtilization=" + newUtilization;
+                assertTrue(msg, oldUtilization - newUtilization >= 10);
+                /* Don't call the test hook repeatedly. */
+                Checkpointer.setMaxFlushLevelHook(null);
+            }
+            public Object getHookValue() {
+                throw new UnsupportedOperationException();
+            }
+            public void doIOHook() throws IOException {
+                throw new UnsupportedOperationException();
+            }
+            public void hookSetup() {
+                throw new UnsupportedOperationException();
+            }
+        });
+        exampleDb.sync();
+        assertTrue(hookCalledFlag.length() > 0);
+
+        closeEnv();
+    }
+
+    private int getUtilization()
+        throws DatabaseException {
+
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(exampleEnv);
+        Map<Long,FileSummary> map =
+            envImpl.getUtilizationProfile().getFileSummaryMap(true);
+        FileSummary totals = new FileSummary();
+        for (FileSummary summary : map.values()) {
+            totals.add(summary);
+        }
+        return UtilizationProfile.utilization(totals.getObsoleteSize(),
+                                              totals.totalSize);
+    }
+
+    /**
+     * Returns the memory size taken by the given IN and the tree memory usage.
+     */
+    private long getMemSize(IN in, MemoryBudget memBudget) {
+        return memBudget.getTreeMemoryUsage() +
+               in.getInMemorySize() -
+               in.getBudgetedMemorySize();
+    }
+
+    /**
+     * Tests that dirtiness is logged upwards during a checkpoint, even if a
+     * node is evicted and refetched after being added to the checkpointer's
+     * dirty map, and before that entry in the dirty map is processed by the
+     * checkpointer.  [#16523]
+     *
+     *  Root INa
+     *      /  \
+     *     INb  ...
+     *    /
+     *   INc
+     *  /
+     * BINd
+     *
+     * The scenario that causes the bug is:
+     *
+     * 1) Prior to the final checkpoint, the cleaner processes a log file
+     * containing BINd.  The cleaner marks BINd dirty so that it will be
+     * flushed prior to the end of the next checkpoint, at which point the file
+     * containing BINd will be deleted.  The cleaner also calls
+     * setProhibitNextDelta on BINd to ensure that a full version will be
+     * logged.
+     *
+     * 2) At checkpoint start, BINd is added to the checkpoiner's dirty map.
+     * It so happens that INa is also dirty, perhaps as the result of a split,
+     * and added to the dirty map.  The checkpointer's max flush level is 4.
+     *
+     * 3) The evictor flushes BINd and then its parent INc.  Both are logged
+     * provisionally, since their level is less than 4, the checkpointer's max
+     * flush level.  INb, the parent of INc, is dirty.
+     *
+     * 4) INc, along with BINd, is loaded back into the Btree as the result of
+     * reading an LN in BINd.  INc and BINd are both non-dirty.  INb, the
+     * parent of INc, is still dirty.
+     *
+     * 5) The checkpointer processes its reference to BINd in the dirty map.
+     * It finds that BINd is not dirty, so does not need to be logged.  It
+     * attempts to add the parent, INc, to the dirty map in order to propogate
+     * changes upward.  However, becaue INc is not dirty, it is not added to
+     * the dirty map -- this was the bug, it should be added even if not dirty.
+     * So as the result of this step, the checkpointer does no logging and does
+     * not add anything to the dirty map.
+     *
+     * 6) The checkpointer logs INa (it was dirty at the start of the
+     * checkpoint) and the checkpoint finishes.  It deletes the cleaned log
+     * file that contains the original version of BINd.
+     *
+     * The key thing is that INb is now dirty and was not logged.  It should
+     * have been logged as the result of being an ancestor of BINd, which was
+     * in the dirty map.  Its parent INa was logged, but does not refer to the
+     * latest version of INb/INc/BINd.
+     *
+     * 7) Now we recover.  INc and BINd, which were evicted during step (3),
+     * are not replayed because they are provisional -- they are lost.  When a
+     * search for an LN in BINd is performed, we traverse down to the old
+     * version of BINd, which causes LogFileNotFound.
+     *
+     * The fix is to add INc to the dirty map at step (5), even though it is
+     * not dirty.  When the reference to INc in the dirty map is processed we
+     * will not log INc, but we will add its parent INb to the dirty map.  Then
+     * when the reference to INb is processed, it will be logged because it is
+     * dirty.  Then INa is logged and refers to the latest version of
+     * INb/INc/BINd.
+     *
+     * This problem could only occur with a Btree of depth 4 or greater.
+     */
+    public void testEvictionDuringCheckpoint()
+        throws DatabaseException {
+
+        /* Use small fanout to create a deep tree. */
+        final int FANOUT = 6;
+        final int N_KEYS = FANOUT * FANOUT * FANOUT;
+
+        /* Open environment without interference of daemon threads. */
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false");
+        exampleEnv = new Environment(envHome, envConfig);
+        final EnvironmentImpl envImpl =
+            DbInternal.envGetEnvironmentImpl(exampleEnv);
+
+        /* Open database. */
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setNodeMaxEntries(FANOUT);
+        exampleDb = exampleEnv.openDatabase(null, "foo", dbConfig);
+        DatabaseImpl dbImpl = DbInternal.dbGetDatabaseImpl(exampleDb);
+
+        /* Write to database to create a 4 level Btree. */
+        final DatabaseEntry keyEntry = new DatabaseEntry();
+        final DatabaseEntry dataEntry = new DatabaseEntry(new byte[0]);
+        int nRecords;
+        for (nRecords = 1;; nRecords += 1) {
+            LongBinding.longToEntry(nRecords, keyEntry);
+            assertSame(OperationStatus.SUCCESS,
+                       exampleDb.put(null, keyEntry, dataEntry));
+            if (nRecords % 10 == 0) {
+                int level = envImpl.getDbTree().getHighestLevel(dbImpl);
+                if ((level & IN.LEVEL_MASK) >= 4) {
+                    break;
+                }
+            }
+        }
+
+        /* Flush all dirty nodes. */
+        exampleEnv.sync();
+        
+        /* Get BINd and its ancestors.  Mark BINd and INa dirty. */
+        final IN nodeINa = dbImpl.getTree().getRootIN(CacheMode.DEFAULT);
+        nodeINa.releaseLatch();
+        final IN nodeINb = (IN) nodeINa.getTarget(0);
+        final IN nodeINc = (IN) nodeINb.getTarget(0);
+        final BIN nodeBINd = (BIN) nodeINc.getTarget(0);
+        assertNotNull(nodeBINd);
+        nodeINa.setDirty(true);
+        nodeBINd.setDirty(true);
+
+        /*
+         * The test hook is called after creating the checkpoint dirty map and
+         * just before flushing dirty nodes.
+         */
+        final StringBuilder hookCalledFlag = new StringBuilder();
+
+        Checkpointer.setBeforeFlushHook(new TestHook() {
+            public void doHook() {
+                hookCalledFlag.append(1);
+                /* Don't call the test hook repeatedly. */
+                Checkpointer.setBeforeFlushHook(null);
+                try {
+                    /* Evict BINd and INc. */
+                    simulateEviction(exampleEnv, envImpl, nodeBINd, nodeINc);
+                    simulateEviction(exampleEnv, envImpl, nodeINc, nodeINb);
+
+                    /*
+                     * Force BINd and INc to be loaded into cache by fetching
+                     * the left-most record.
+                     *
+                     * Note that nodeINc and nodeBINd are different instances
+                     * and are no longer in the Btree but we don't change these
+                     * variables because they are final.  They should not be
+                     * used past this point.
+                     */
+                    LongBinding.longToEntry(1, keyEntry);
+                    assertSame(OperationStatus.SUCCESS,
+                               exampleDb.get(null, keyEntry, dataEntry, null));
+                } catch (DatabaseException e) {
+                    throw new RuntimeException(e);
+                }
+            }
+            public Object getHookValue() {
+                throw new UnsupportedOperationException();
+            }
+            public void doIOHook() throws IOException {
+                throw new UnsupportedOperationException();
+            }
+            public void hookSetup() {
+                throw new UnsupportedOperationException();
+            }
+        });
+        exampleEnv.checkpoint(forceConfig);
+        assertTrue(hookCalledFlag.length() > 0);
+        assertTrue(!nodeINa.getDirty());
+        assertTrue(!nodeINb.getDirty()); /* This failed before the bug fix. */
+
+        closeEnv();
+    }
+
+    /**
+     * Simulate eviction by logging this node, updating the LSN in its
+     * parent slot, setting the Node to null in the parent slot, and
+     * removing the IN from the INList.  Logging is provisional.  The
+     * parent is dirtied.  May not be called unless this node is dirty and
+     * none of its children are dirty.  Children may be resident.
+     */
+    private void simulateEviction(Environment env,
+                                  EnvironmentImpl envImpl,
+                                  IN nodeToEvict,
+                                  IN parentNode)
+        throws DatabaseException {
+
+        assertTrue("not dirty " + nodeToEvict.getNodeId(),
+                   nodeToEvict.getDirty());
+        assertTrue(!hasDirtyChildren(nodeToEvict));
+        parentNode.latch();
+        long lsn = TestUtils.logIN
+            (env, nodeToEvict, true /*provisional*/, parentNode);
+        int index;
+        for (index = 0;; index += 1) {
+            if (index >= parentNode.getNEntries()) {
+                fail();
+            }
+            if (parentNode.getTarget(index) == nodeToEvict) {
+                break;
+            }
+        }
+        parentNode.updateNode(index, null /*node*/, lsn, null /*lnSlotKey*/);
+        parentNode.releaseLatch();
+        envImpl.getInMemoryINs().remove(nodeToEvict);
+    }
+
+    private boolean hasDirtyChildren(IN parent) {
+        for (int i = 0; i < parent.getNEntries(); i += 1) {
+            Node child = parent.getTarget(i);
+            if (child instanceof IN) {
+                IN in = (IN) child;
+                if (in.getDirty()) {
+                    return true;
+                }
+            }
+        }
+        return false;
+    }
+
+    public void testMultiCleaningBug()
+        throws DatabaseException {
+
+        initEnv(true, false);
+
+        final EnvironmentImpl envImpl =
+            DbInternal.envGetEnvironmentImpl(exampleEnv);
+        final FileSelector fileSelector =
+            envImpl.getCleaner().getFileSelector();
+
+        Map<String, Set<String>> expectedMap =
+            new HashMap<String, Set<String>>();
+        doLargePut(expectedMap, 1000, 1, true);
+        modifyData(expectedMap, 1, true);
+        checkData(expectedMap);
+
+        final TestHook hook = new TestHook() {
+            public void doHook() {
+                /* Signal that hook was called. */
+                if (synchronizer != 99) {
+                    synchronizer = 1;
+                }
+                /* Wait for signal to proceed with cleaning. */
+                while (synchronizer != 2 &&
+                       synchronizer != 99 &&
+                       !Thread.interrupted()) {
+                    Thread.yield();
+                }
+            }
+            public Object getHookValue() {
+                throw new UnsupportedOperationException();
+            }
+            public void doIOHook() throws IOException {
+                throw new UnsupportedOperationException();
+            }
+            public void hookSetup() {
+                throw new UnsupportedOperationException();
+            }
+        };
+
+        junitThread = new JUnitThread("TestMultiCleaningBug") {
+            public void testBody()
+                throws DatabaseException {
+
+                try {
+                    while (synchronizer != 99) {
+                        /* Wait for initial state. */
+                        while (synchronizer != 0 &&
+                               synchronizer != 99 &&
+                               !Thread.interrupted()) {
+                            Thread.yield();
+                        }
+                        /* Clean with hook set, hook is called next. */
+                        fileSelector.setFileChosenHook(hook);
+                        exampleEnv.cleanLog();
+                        /* Signal that cleaning is done. */
+                        if (synchronizer != 99) {
+                            synchronizer = 3;
+                        }
+                    }
+                } catch (Throwable e) {
+                    e.printStackTrace();
+                }
+            }
+        };
+
+        /* Kick off thread above. */
+        synchronizer = 0;
+        junitThread.start();
+
+        for (int i = 0; i < 100 && junitThread.isAlive(); i += 1) {
+            /* Wait for hook to be called when a file is chosen. */
+            while (synchronizer != 1 && junitThread.isAlive()) {
+                Thread.yield();
+            }
+            /* Allow the thread to clean the chosen file. */
+            synchronizer = 2;
+            /* But immediately clean here, which could select the same file. */
+            fileSelector.setFileChosenHook(null);
+            exampleEnv.cleanLog();
+            /* Wait for both cleaner runs to finish. */
+            while (synchronizer != 3 && junitThread.isAlive()) {
+                Thread.yield();
+            }
+            /* Make more waste to be cleaned. */
+            modifyData(expectedMap, 1, true);
+            synchronizer = 0;
+        }
+
+        synchronizer = 99;
+
+        try {
+            junitThread.finishTest();
+            junitThread = null;
+        } catch (Throwable e) {
+            e.printStackTrace();
+            fail(e.toString());
+        }
+
+        closeEnv();
+    }
+}
diff --git a/test/com/sleepycat/je/cleaner/CleanerTestUtils.java b/test/com/sleepycat/je/cleaner/CleanerTestUtils.java
new file mode 100644
index 0000000000000000000000000000000000000000..b6d89f0c60e2f2d5941cef8e5970c76d52abba8d
--- /dev/null
+++ b/test/com/sleepycat/je/cleaner/CleanerTestUtils.java
@@ -0,0 +1,47 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: CleanerTestUtils.java,v 1.14.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbTestProxy;
+import com.sleepycat.je.dbi.CursorImpl;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * Package utilities.
+ */
+public class CleanerTestUtils {
+
+    /**
+     * Gets the file of the LSN at the cursor position, using internal methods.
+     */
+    static long getLogFile(TestCase test, Cursor cursor)
+        throws DatabaseException {
+
+        CursorImpl impl = DbTestProxy.dbcGetCursorImpl(cursor);
+        int index;
+        BIN bin = impl.getDupBIN();
+        if (bin != null) {
+            index = impl.getDupIndex();
+        } else {
+            bin = impl.getBIN();
+            TestCase.assertNotNull(bin);
+            index = impl.getIndex();
+        }
+        TestCase.assertNotNull(bin.getTarget(index));
+        long lsn = bin.getLsn(index);
+        TestCase.assertTrue(lsn != DbLsn.NULL_LSN);
+        long file = DbLsn.getFileNumber(lsn);
+        return file;
+    }
+}
diff --git a/test/com/sleepycat/je/cleaner/FileSelectionTest.java b/test/com/sleepycat/je/cleaner/FileSelectionTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..b085b7fdc8319e34172c7c9e49daaed3df2414e4
--- /dev/null
+++ b/test/com/sleepycat/je/cleaner/FileSelectionTest.java
@@ -0,0 +1,1328 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: FileSelectionTest.java,v 1.42.2.4 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Enumeration;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.DbTestProxy;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.EnvironmentMutableConfig;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.cleaner.DbFileSummaryMap;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.CursorImpl;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.junit.JUnitThread;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.TestHook;
+
+public class FileSelectionTest extends TestCase {
+
+    private static final String DBNAME = "cleanerFileSelection";
+    private static final int DATA_SIZE = 140;
+    private static final int FILE_SIZE = 4096 * 10;
+    private static final int INITIAL_FILES = 5;
+    private static final int INITIAL_FILES_TEMP = 1;
+    private static final int INITIAL_KEYS = 2000;
+    private static final int INITIAL_KEYS_DUPS = 5000;
+    private static final byte[] MAIN_KEY_FOR_DUPS = {0, 1, 2, 3, 4, 5};
+
+    private static final EnvironmentConfig envConfig = initConfig();
+    private static final EnvironmentConfig highUtilizationConfig =
+                                                                initConfig();
+    private static final EnvironmentConfig steadyStateAutoConfig =
+								initConfig();
+    static {
+        highUtilizationConfig.setConfigParam
+	    (EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(),
+             String.valueOf(90));
+
+        steadyStateAutoConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "true");
+    }
+
+    static EnvironmentConfig initConfig() {
+        EnvironmentConfig config = TestUtils.initEnvConfig();
+	DbInternal.disableParameterValidation(config);
+        config.setTransactional(true);
+        config.setAllowCreate(true);
+        config.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC));
+        config.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(),
+                              Integer.toString(FILE_SIZE));
+        config.setConfigParam(EnvironmentParams.ENV_CHECK_LEAKS.getName(),
+                              "false");
+        config.setConfigParam(EnvironmentParams.ENV_RUN_CLEANER.getName(),
+                              "false");
+        config.setConfigParam(EnvironmentParams.CLEANER_REMOVE.getName(),
+                              "false");
+        config.setConfigParam
+	    (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+        config.setConfigParam
+	    (EnvironmentParams.CLEANER_LOCK_TIMEOUT.getName(), "1");
+        config.setConfigParam
+	    (EnvironmentParams.CLEANER_MAX_BATCH_FILES.getName(), "1");
+        return config;
+    }
+
+    private static final CheckpointConfig forceConfig = new CheckpointConfig();
+    static {
+        forceConfig.setForce(true);
+    }
+
+    private File envHome;
+    private Environment env;
+    private EnvironmentImpl envImpl;
+    private Database db;
+    private JUnitThread junitThread;
+    private volatile int synchronizer;
+    private boolean dups;
+    private boolean deferredWrite;
+    private boolean temporary;
+
+    /* The index is the file number, the value is the first key in the file. */
+    private List firstKeysInFiles;
+
+    /* Set of keys that should exist. */
+    private Set existingKeys;
+
+    public static Test suite() {
+        TestSuite allTests = new TestSuite();
+        addTests(allTests, false, false); // transactional
+        addTests(allTests, true, false);  // deferredwrite
+        addTests(allTests, false, true);  // temporary
+        return allTests;
+    }
+
+    private static void addTests(TestSuite allTests,
+                                 boolean deferredWrite,
+                                 boolean temporary) {
+        TestSuite suite = new TestSuite(FileSelectionTest.class);
+        Enumeration e = suite.tests();
+        while (e.hasMoreElements()) {
+            FileSelectionTest test = (FileSelectionTest) e.nextElement();
+            test.init(deferredWrite, temporary);
+            allTests.addTest(test);
+        }
+    }
+
+    public FileSelectionTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    private void init(boolean deferredWrite, boolean temporary) {
+        this.deferredWrite = deferredWrite;
+        this.temporary = temporary;
+    }
+
+    public void setUp()
+        throws IOException, DatabaseException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+        TestUtils.removeFiles("Setup", envHome, FileManager.DEL_SUFFIX);
+    }
+
+    public void tearDown()
+        throws IOException, DatabaseException {
+
+        /* Set test name for reporting; cannot be done in the ctor or setUp. */
+        setName(getName() +
+                (deferredWrite ? ":deferredWrite" :
+                 (temporary ? ":temporary" : ":txnl")));
+
+        if (junitThread != null) {
+            while (junitThread.isAlive()) {
+                junitThread.interrupt();
+                Thread.yield();
+            }
+            junitThread = null;
+        }
+
+        try {
+            if (env != null) {
+                env.close();
+            }
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        //*
+        try {
+            TestUtils.removeLogFiles("tearDown", envHome, true);
+            TestUtils.removeFiles("tearDown", envHome, FileManager.DEL_SUFFIX);
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+        //*/
+
+        db = null;
+        env = null;
+        envImpl = null;
+        envHome = null;
+        existingKeys = null;
+        firstKeysInFiles = null;
+    }
+
+    private void openEnv()
+        throws DatabaseException {
+
+        openEnv(envConfig);
+    }
+
+    private void openEnv(EnvironmentConfig config)
+        throws DatabaseException {
+
+        env = new Environment(envHome, config);
+        envImpl = DbInternal.envGetEnvironmentImpl(env);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(!isDeferredWriteMode());
+        dbConfig.setDeferredWrite(deferredWrite);
+        dbConfig.setTemporary(temporary);
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(dups);
+        db = env.openDatabase(null, DBNAME, dbConfig);
+    }
+
+    private void closeEnv()
+        throws DatabaseException {
+
+        if (temporary) {
+            existingKeys.clear();
+        }
+        if (db != null) {
+            db.close();
+            db = null;
+        }
+        if (env != null) {
+            env.close();
+            env = null;
+        }
+    }
+
+    /**
+     * Tests that the test utilities work.
+     */
+    public void testBaseline()
+        throws DatabaseException {
+
+        int nCleaned;
+
+        openEnv();
+        writeData();
+        verifyData();
+        nCleaned = cleanRoutine();
+        /* One file may be cleaned after writing, if a checkpoint occurs. */
+        assertTrue(String.valueOf(nCleaned), nCleaned <= 1);
+        env.checkpoint(forceConfig);
+        nCleaned = cleanRoutine();
+        /* One file may be cleaned after cleaning and checkpointing. */
+        assertTrue(String.valueOf(nCleaned), nCleaned <= 1);
+        closeEnv();
+        openEnv();
+        verifyData();
+        nCleaned = cleanRoutine();
+        if (temporary) {
+            /* Temp DBs are automaically deleted and cleaned. */
+            assertTrue(String.valueOf(nCleaned),
+                       nCleaned >= INITIAL_FILES_TEMP);
+        } else {
+            /* No files should be cleaned when no writing occurs. */
+            assertEquals(0, nCleaned);
+        }
+        closeEnv();
+    }
+
+    public void testBaselineDups()
+        throws DatabaseException {
+
+        dups = true;
+        testBaseline();
+    }
+
+    /**
+     * Tests that the expected files are selected for cleaning.
+     */
+    public void testBasic()
+        throws DatabaseException {
+
+        /* Test assumes that keys are written in order. */
+        if (isDeferredWriteMode()) {
+            return;
+        }
+
+        openEnv();
+        writeData();
+        verifyDeletedFiles(null);
+
+        /*
+         * The first file should be the first to be cleaned because it has
+         * relatively few LNs.
+         */
+        forceCleanOne();
+        verifyDeletedFiles(new int[] {0});
+        verifyData();
+
+        /*
+         * Delete most of the LNs in two middle files.  They should be the next
+         * two files cleaned.
+         */
+        int fileNum = INITIAL_FILES / 2;
+        int firstKey = ((Integer) firstKeysInFiles.get(fileNum)).intValue();
+        int nextKey = ((Integer) firstKeysInFiles.get(fileNum + 1)).intValue();
+        int count = nextKey - firstKey - 4;
+        deleteData(firstKey, count);
+
+        fileNum += 1;
+        firstKey = ((Integer) firstKeysInFiles.get(fileNum)).intValue();
+        nextKey = ((Integer) firstKeysInFiles.get(fileNum + 1)).intValue();
+        count = nextKey - firstKey - 4;
+        deleteData(firstKey, count);
+
+        forceCleanOne();
+        forceCleanOne();
+        verifyDeletedFiles(new int[] {0, fileNum - 1, fileNum});
+        verifyData();
+
+        closeEnv();
+    }
+
+    public void testBasicDups()
+        throws DatabaseException {
+
+        dups = true;
+        testBasic();
+    }
+
+    /*
+     * testCleaningMode, testTruncateDatabase, and testRemoveDatabase and are
+     * not tested with dups=true because with duplicates the total utilization
+     * after calling writeData() is 47%, so cleaning will occur and the tests
+     * don't expect that.
+     */
+
+    /**
+     * Tests that routine cleaning does not clean when it should not.
+     */
+    public void testCleaningMode()
+        throws DatabaseException {
+
+        int nextFile = -1;
+        int nCleaned;
+
+        /*
+         * Nothing is cleaned with routine cleaning, even after reopening the
+         * environment.
+         */
+        openEnv();
+        writeData();
+
+        nCleaned = cleanRoutine();
+        assertEquals(0, nCleaned);
+        nextFile = getNextDeletedFile(nextFile);
+        assertTrue(nextFile == -1);
+
+        verifyData();
+        closeEnv();
+        openEnv();
+        verifyData();
+
+        nCleaned = cleanRoutine();
+        if (temporary) {
+            assertTrue(String.valueOf(nCleaned),
+                       nCleaned >= INITIAL_FILES_TEMP);
+        } else {
+            assertEquals(0, nCleaned);
+            nextFile = getNextDeletedFile(nextFile);
+            assertTrue(nextFile == -1);
+        }
+
+        verifyData();
+
+        closeEnv();
+    }
+
+    /**
+     * Test retries after cleaning fails because an LN was write-locked.
+     */
+    public void testRetry()
+        throws DatabaseException {
+
+        /* Test assumes that keys are written in order. */
+        if (isDeferredWriteMode()) {
+            return;
+        }
+
+        openEnv(highUtilizationConfig);
+        writeData();
+        verifyData();
+
+        /*
+         * The first file is full of LNs.  Delete all but the last record to
+         * cause it to be selected next for cleaning.
+         */
+        int firstKey = ((Integer) firstKeysInFiles.get(1)).intValue();
+        int nextKey = ((Integer) firstKeysInFiles.get(2)).intValue();
+        int count = nextKey - firstKey - 1;
+        deleteData(firstKey, count);
+        verifyData();
+
+        /* Write-lock the last record to cause cleaning to fail. */
+        Transaction txn = env.beginTransaction(null, null);
+        Cursor cursor = db.openCursor(txn, null);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+        if (dups) {
+            key.setData(MAIN_KEY_FOR_DUPS);
+            data.setData(TestUtils.getTestArray(nextKey - 1));
+            status = cursor.getSearchBoth(key, data, LockMode.RMW);
+        } else {
+            key.setData(TestUtils.getTestArray(nextKey - 1));
+            status = cursor.getSearchKey(key, data, LockMode.RMW);
+        }
+        assertEquals(OperationStatus.SUCCESS, status);
+        status = cursor.delete();
+        assertEquals(OperationStatus.SUCCESS, status);
+
+
+        /* Cleaning should fail. */
+        forceCleanOne();
+        verifyDeletedFiles(null);
+        forceCleanOne();
+        verifyDeletedFiles(null);
+
+        /* Release the write-lock. */
+        cursor.close();
+        txn.abort();
+        verifyData();
+
+        /* Cleaning should succeed, with all files deleted. */
+        forceCleanOne();
+        verifyDeletedFiles(new int[] {0, 1, 2});
+        verifyData();
+
+        closeEnv();
+    }
+
+    /**
+     * Tests that the je.cleaner.minFileUtilization property works as expected.
+     */
+    public void testMinFileUtilization()
+        throws DatabaseException {
+
+        /* Test assumes that keys are written in order. */
+        if (isDeferredWriteMode()) {
+            return;
+        }
+
+        /* Open with minUtilization=10 and minFileUtilization=0. */
+        EnvironmentConfig myConfig = initConfig();
+        myConfig.setConfigParam
+	    (EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(),
+             String.valueOf(10));
+        myConfig.setConfigParam
+	    (EnvironmentParams.CLEANER_MIN_FILE_UTILIZATION.getName(),
+             String.valueOf(0));
+        openEnv(myConfig);
+
+        /* Write data and delete two thirds of the LNs in the middle file. */
+        writeData();
+        verifyDeletedFiles(null);
+        int fileNum = INITIAL_FILES / 2;
+        int firstKey = ((Integer) firstKeysInFiles.get(fileNum)).intValue();
+        int nextKey = ((Integer) firstKeysInFiles.get(fileNum + 1)).intValue();
+        int count = ((nextKey - firstKey) * 2) / 3;
+        deleteData(firstKey, count);
+
+        /* The file should not be deleted. */
+        env.cleanLog();
+        env.checkpoint(forceConfig);
+        verifyDeletedFiles(null);
+
+        /* Change minFileUtilization=50 */
+        myConfig.setConfigParam
+	    (EnvironmentParams.CLEANER_MIN_FILE_UTILIZATION.getName(),
+             String.valueOf(50));
+        env.setMutableConfig(myConfig);
+
+        /* The file should now be deleted. */
+        env.cleanLog();
+        env.checkpoint(forceConfig);
+        verifyDeletedFiles(new int[] {fileNum});
+        verifyData();
+
+        closeEnv();
+    }
+
+    private void printFiles(String msg) {
+        System.out.print(msg);
+        Long lastNum = envImpl.getFileManager().getLastFileNum();
+        for (int i = 0; i <= (int) lastNum.longValue(); i += 1) {
+            String name = envImpl.getFileManager().
+                                  getFullFileName(i, FileManager.JE_SUFFIX);
+            if (new File(name).exists()) {
+                System.out.print(" " + i);
+            }
+        }
+        System.out.println("");
+    }
+
+    public void testRetryDups()
+        throws DatabaseException {
+
+        dups = true;
+        testRetry();
+    }
+
+    /**
+     * Steady state should occur with normal (50% utilization) configuration
+     * and automatic checkpointing and cleaning.
+     */
+    public void testSteadyStateAutomatic()
+        throws DatabaseException {
+
+        doSteadyState(steadyStateAutoConfig, false, 13);
+    }
+
+    public void testSteadyStateAutomaticDups()
+        throws DatabaseException {
+
+        dups = true;
+        testSteadyStateAutomatic();
+    }
+
+    /**
+     * Steady state utilization with manual checkpointing and cleaning.
+     */
+    public void testSteadyStateManual()
+        throws DatabaseException {
+
+        doSteadyState(envConfig, true, 13);
+    }
+
+    public void testSteadyStateManualDups()
+        throws DatabaseException {
+
+        dups = true;
+        testSteadyStateManual();
+    }
+
+    /**
+     * Steady state should occur when utilization is at the maximum.
+     */
+    public void testSteadyStateHighUtilization()
+        throws DatabaseException {
+
+        /*
+         * I don't know why a larger log is maintained with deferredWrite and
+         * temporary databases.
+         */
+        doSteadyState(highUtilizationConfig, true,
+                      (deferredWrite | temporary) ? 12 : 9);
+    }
+
+    public void testSteadyStateHighUtilizationDups()
+        throws DatabaseException {
+
+        dups = true;
+        testSteadyStateHighUtilization();
+    }
+
+    /**
+     * Tests that we quickly reach a steady state of disk usage when updates
+     * are made but no net increase in data occurs.
+     *
+     * @param manualCleaning is whether to run cleaning manually every
+     * iteration, or to rely on the cleaner thread.
+     *
+     * @param maxFileCount the maximum number of files allowed for this test.
+     */
+    private void doSteadyState(EnvironmentConfig config,
+                               boolean manualCleaning,
+                               int maxFileCount)
+        throws DatabaseException {
+
+        openEnv(config);
+        writeData();
+        verifyData();
+
+        final int iterations = 100;
+
+        for (int i = 0; i < iterations; i += 1) {
+            updateData(100, 100);
+            int cleaned = -1;
+            if (manualCleaning) {
+                cleaned = cleanRoutine();
+            } else {
+	        /* Need to delay a bit for the cleaner to keep up. */
+                try {
+                    Thread.sleep(25);
+                } catch (InterruptedException e) {}
+            }
+
+	    /*
+             * Checkpoints need to occur often for the cleaner to keep up.
+             * and to delete files that were cleaned.
+             */
+	    env.checkpoint(forceConfig);
+            verifyData();
+            int fileCount =
+                envImpl.getFileManager().getAllFileNumbers().length;
+            assertTrue("fileCount=" + fileCount +
+                       " maxFileCount=" + maxFileCount +
+                       " iteration=" + i,
+                       fileCount <= maxFileCount);
+            if (false) {
+                System.out.println("fileCount=" + fileCount +
+                                   " cleaned=" + cleaned);
+            }
+        }
+        closeEnv();
+    }
+
+    /**
+     * Tests that truncate causes cleaning.
+     */
+    public void testTruncateDatabase()
+        throws IOException, DatabaseException {
+
+        int nCleaned;
+
+        openEnv();
+        writeData();
+
+        nCleaned = cleanRoutine();
+        assertEquals(0, nCleaned);
+        db.close();
+        db = null;
+
+        /*
+         * Temporary databases are removed when the database is closed, so
+         * don't call truncate explicitly.
+         */
+        if (!temporary) {
+            env.truncateDatabase(null, DBNAME, false /* returnCount */);
+        }
+
+        nCleaned = cleanRoutine();
+        if (temporary) {
+            assertTrue(String.valueOf(nCleaned),
+                       nCleaned >= INITIAL_FILES_TEMP - 1);
+        } else {
+            assertTrue(String.valueOf(nCleaned),
+                       nCleaned >= INITIAL_FILES - 1);
+        }
+
+        closeEnv();
+    }
+
+    /**
+     * Tests that remove causes cleaning.
+     */
+    public void testRemoveDatabase()
+        throws DatabaseException {
+
+        int nCleaned;
+
+        openEnv();
+        writeData();
+
+        String dbName = db.getDatabaseName();
+        db.close();
+        db = null;
+
+        nCleaned = cleanRoutine();
+        if (temporary) {
+            assertTrue(String.valueOf(nCleaned),
+                       nCleaned >= INITIAL_FILES_TEMP - 1);
+            assertTrue(!env.getDatabaseNames().contains(dbName));
+        } else {
+            assertEquals(0, nCleaned);
+
+            env.removeDatabase(null, dbName);
+            nCleaned = cleanRoutine();
+            assertTrue(String.valueOf(nCleaned),
+                       nCleaned >= INITIAL_FILES - 1);
+        }
+
+        closeEnv();
+    }
+
+    public void testForceCleanFiles()
+        throws DatabaseException {
+
+        /* When the temp DB is closed many files will be cleaned. */
+        if (temporary) {
+            return;
+        }
+
+        /* No files force cleaned. */
+        EnvironmentConfig myConfig = initConfig();
+        openEnv(myConfig);
+        writeData();
+        verifyData();
+        env.cleanLog();
+        env.checkpoint(forceConfig);
+        verifyDeletedFiles(null);
+        closeEnv();
+
+        /* Force cleaning: 3 */
+        myConfig.setConfigParam
+            (EnvironmentParams.CLEANER_FORCE_CLEAN_FILES.getName(),
+             "3");
+        openEnv(myConfig);
+        forceCleanOne();
+        verifyDeletedFiles(new int[] {3});
+        closeEnv();
+
+        /* Force cleaning: 0 - 1 */
+        myConfig.setConfigParam
+            (EnvironmentParams.CLEANER_FORCE_CLEAN_FILES.getName(),
+             "0-1");
+        openEnv(myConfig);
+        forceCleanOne();
+        forceCleanOne();
+        verifyDeletedFiles(new int[] {0, 1, 3});
+        closeEnv();
+    }
+
+    /**
+     * Checks that old version log files are upgraded when
+     * je.cleaner.upgradeToLogVersion is set.  The version 5 log files to be
+     * upgraded in this test were created with MakeMigrationLogFiles.
+     */
+    public void testLogVersionUpgrade()
+        throws DatabaseException, IOException {
+
+        if (temporary) {
+            /* This test is not applicable. */
+            return;
+        }
+
+        /* Copy pre-created files 0 and 1, which are log verion 5. */
+        TestUtils.loadLog
+            (getClass(), "migrate_f0.jdb", envHome, "00000000.jdb");
+        TestUtils.loadLog
+            (getClass(), "migrate_f1.jdb", envHome, "00000001.jdb");
+
+        /*
+         * Write several more files which are log version 6 or greater.  To
+         * check whether these files are cleaned below we need to write more
+         * than 2 files (2 is the minimum age for cleaning).
+         */
+        env = MakeMigrationLogFiles.openEnv(envHome, false /*allowCreate*/);
+        MakeMigrationLogFiles.makeMigrationLogFiles(env);
+        env.checkpoint(forceConfig);
+        MakeMigrationLogFiles.makeMigrationLogFiles(env);
+        env.checkpoint(forceConfig);
+        closeEnv();
+
+        /* With upgradeToLogVersion=0 no files should be cleaned. */
+        openEnvWithUpgradeToLogVersion(0);
+        int nFiles = env.cleanLog();
+        assertEquals(0, nFiles);
+        closeEnv();
+
+        /* With upgradeToLogVersion=5 no files should be cleaned. */
+        openEnvWithUpgradeToLogVersion(5);
+        nFiles = env.cleanLog();
+        assertEquals(0, nFiles);
+        closeEnv();
+
+        /* Upgrade log files to the current version, which is 6 or greater. */
+        openEnvWithUpgradeToLogVersion(-1); // -1 means current version
+
+        /*
+         * Clean one log file at a time so we can check that the backlog is
+         * not impacted by log file migration.
+         */
+        for (int i = 0; i < 2; i += 1) {
+            nFiles = DbInternal.envGetEnvironmentImpl(env).getCleaner().doClean
+                (false /*cleanMultipleFiles*/, false /*forceCleaning*/);
+            assertEquals(1, nFiles);
+            EnvironmentStats stats = env.getStats(null);
+            assertEquals(0, stats.getCleanerBacklog());
+        }
+        env.checkpoint(forceConfig);
+        verifyDeletedFiles(new int[] {0, 1});
+
+        /* No more files should be cleaned. */
+        nFiles = env.cleanLog();
+        assertEquals(0, nFiles);
+        closeEnv();
+
+        /*
+         * Force clean file 2 to ensure that it was not cleaned above because
+         * of its log version and not some other factor.
+         */
+        EnvironmentConfig myConfig = initConfig();
+        myConfig.setConfigParam
+            (EnvironmentParams.CLEANER_FORCE_CLEAN_FILES.getName(), "2");
+        openEnv(myConfig);
+        nFiles = env.cleanLog();
+        assertEquals(1, nFiles);
+        env.checkpoint(forceConfig);
+        verifyDeletedFiles(new int[] {0, 1, 2});
+
+        closeEnv();
+    }
+
+    private void openEnvWithUpgradeToLogVersion(int upgradeToLogVersion)
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setConfigParam
+            (EnvironmentParams.CLEANER_UPGRADE_TO_LOG_VERSION.getName(),
+             String.valueOf(upgradeToLogVersion));
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        envConfig.setConfigParam
+	    (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+        env = new Environment(envHome, envConfig);
+        envImpl = DbInternal.envGetEnvironmentImpl(env);
+    }
+
+    /**
+     * Tests that when cleaned files are deleted during a compression, the
+     * flushing of the local tracker does not transfer tracker information
+     * for the deleted files. [#15528]
+     *
+     * This test also checks that tracker information is not transfered to the
+     * MapLN's per-DB utilization information in DbFileSummaryMap.  This was
+     * occuring in JE 3.3.74 and earlier, under the same circumstances as
+     * tested here (IN compression).  [#16610] 
+     */
+    public void testCompressionBug()
+        throws DatabaseException {
+
+        /*
+         * We need to compress deleted keys and count their utilization under
+         * an explicit compress() call.  With deferred write, no utilization
+         * counting occurs until eviction/sync, and that would also do
+         * compression.
+         */
+        if (isDeferredWriteMode()) {
+            return;
+        }
+
+        EnvironmentConfig envConfig = initConfig();
+        /* Disable compressor so we can compress explicitly. */
+        envConfig.setConfigParam
+	    (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false");
+        /* Ensure that we check for resurrected file leaks. */
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_CHECK_LEAKS.getName(), "true");
+        openEnv(envConfig);
+
+        /* Write and then delete all data. */
+        writeData();
+        for (Iterator i = existingKeys.iterator(); i.hasNext();) {
+            int nextKey = ((Integer) i.next()).intValue();
+            DatabaseEntry key =
+                new DatabaseEntry(TestUtils.getTestArray(nextKey));
+            OperationStatus status = db.delete(null, key);
+            assertSame(OperationStatus.SUCCESS, status);
+        }
+
+        synchronizer = 0;
+
+        /* Create thread that will do the compression. */
+        junitThread = new JUnitThread("TestCompress") {
+            public void testBody()
+                throws DatabaseException {
+                try {
+                    /* compress() will call the test hook below. */
+                    env.compress();
+                } catch (Throwable e) {
+                    e.printStackTrace();
+                }
+            }
+        };
+
+        /*
+         * Set a hook that is called by the INCompressor before it calls
+         * UtilizationProfile.flushLocalTracker.
+         */
+        envImpl.getINCompressor().setBeforeFlushTrackerHook(new TestHook() {
+            public void doHook() {
+                synchronizer = 1;
+                /* Wait for log cleaning to complete. */
+                while (synchronizer < 2 && !Thread.interrupted()) {
+                    Thread.yield();
+                }
+            }
+            public Object getHookValue() {
+                throw new UnsupportedOperationException();
+            }
+            public void doIOHook() throws IOException {
+                throw new UnsupportedOperationException();
+            }
+            public void hookSetup() {
+                throw new UnsupportedOperationException();
+            }
+        });
+
+        /* Kick off test in thread above. */
+        junitThread.start();
+        /* Wait for hook to be called at the end of compression. */
+        while (synchronizer < 1) Thread.yield();
+        /* Clean and checkpoint to delete cleaned files. */
+        while (env.cleanLog() > 0) { }
+        env.checkpoint(forceConfig);
+        /* Allow test hook to return, so that flushLocalTracker is called. */
+        synchronizer = 2;
+
+        /*
+         * Before the fix [#15528], an assertion fired in
+         * BaseUtilizationTracker.getFileSummary when flushLocalTracker was
+         * called.  This assertion fires if the file being tracked does not
+         * exist.  The fix was to check for valid files in flushLocalTracker.
+         */
+        try {
+            junitThread.finishTest();
+            junitThread = null;
+        } catch (Throwable e) {
+            e.printStackTrace();
+            fail(e.toString());
+        }
+
+        closeEnv();
+    }
+
+    /**
+     * Checks that DB utilization is repaired when damaged by JE 3.3.74 or
+     * earlier. Go to some trouble to create a DatabaseImpl with the repair
+     * done flag not set, and with a DB file summary for a deleted file.
+     * [#16610] 
+     */
+    public void testDbUtilizationRepair()
+        throws DatabaseException, IOException {
+
+        openEnv();
+        writeData();
+        forceCleanOne();
+        verifyDeletedFiles(new int[] {0});
+
+        DatabaseImpl dbImpl = DbInternal.dbGetDatabaseImpl(db);
+        DbFileSummaryMap summaries = dbImpl.getDbFileSummaries();
+
+        /* New version DB does not need repair. */
+        assertTrue(dbImpl.getUtilizationRepairDone());
+
+        /* Deleted file is absent from summary map. */
+        assertNull(summaries.get(0L /*fileNum*/, true /*adjustMemBudget*/,
+                                 true /*checkResurrected*/,
+                                 envImpl.getFileManager()));
+
+        /*
+         * Force addition of deleted file to summary map by creating a dummy
+         * file to prevent assertions from firing.
+         */
+        File dummyFile = new File(env.getHome(), "00000000.jdb");
+        assertTrue(dummyFile.createNewFile());
+        assertNotNull(summaries.get(0L /*fileNum*/, true /*adjustMemBudget*/,
+                                    false /*checkResurrected*/,
+                                    envImpl.getFileManager()));
+        assertTrue(dummyFile.delete());
+
+        /* Now an entry in the summary map is there for a deleted file.. */
+        assertNotNull(summaries.get(0L /*fileNum*/, true /*adjustMemBudget*/,
+                                    true /*checkResurrected*/,
+                                    envImpl.getFileManager()));
+
+        /* Force the MapLN with the bad entry to be flushed. */
+        dbImpl.setDirtyUtilization();
+        env.checkpoint(forceConfig);
+        closeEnv();
+
+        /* If the DB is temporary, we can't test it further. */
+        if (temporary) {
+            return;
+        }
+
+        /*
+         * When the DB is opened, the repair should not take place, because we
+         * did not clear the repair done flag above.
+         */
+        openEnv();
+        dbImpl = DbInternal.dbGetDatabaseImpl(db);
+        summaries = dbImpl.getDbFileSummaries();
+        assertTrue(dbImpl.getUtilizationRepairDone());
+        assertNotNull(summaries.get(0L /*fileNum*/, true /*adjustMemBudget*/,
+                                    true /*checkResurrected*/,
+                                    envImpl.getFileManager()));
+
+        /* Clear the repair done flag and force the MapLN to be flushed. */
+        dbImpl.clearUtilizationRepairDone();
+        dbImpl.setDirtyUtilization();
+        env.checkpoint(forceConfig);
+        closeEnv();
+
+        /*
+         * Since the repair done flag was cleared above, when the DB is opened,
+         * the repair should take place.
+         */
+        openEnv();
+        dbImpl = DbInternal.dbGetDatabaseImpl(db);
+        summaries = dbImpl.getDbFileSummaries();
+        assertTrue(dbImpl.getUtilizationRepairDone());
+        assertNull(summaries.get(0L /*fileNum*/, true /*adjustMemBudget*/,
+                                 true /*checkResurrected*/,
+                                 envImpl.getFileManager()));
+        closeEnv();
+    }
+
+    /**
+     * Force cleaning of one file.
+     */
+    private void forceCleanOne()
+        throws DatabaseException {
+
+        envImpl.getCleaner().doClean(false, // cleanMultipleFiles
+                                     true); // forceCleaning
+        /* To force file deletion a checkpoint is necessary. */
+        env.checkpoint(forceConfig);
+    }
+
+    /**
+     * Do routine cleaning just as normally done via the cleaner daemon, and
+     * return the number of files cleaned.
+     */
+    private int cleanRoutine()
+        throws DatabaseException {
+
+        return env.cleanLog();
+    }
+
+    /**
+     * Use transactions when not testing deferred write or temporary DBs.
+     */
+    private boolean isDeferredWriteMode() {
+        return deferredWrite || temporary;
+    }
+
+    /**
+     * Forces eviction when a temporary database is used, since otherwise data
+     * will not be flushed.
+     */
+    private void forceEvictionIfTemporary()
+        throws DatabaseException {
+
+        if (temporary) {
+            EnvironmentMutableConfig config = env.getMutableConfig();
+            long saveSize = config.getCacheSize();
+            config.setCacheSize(MemoryBudget.MIN_MAX_MEMORY_SIZE * 2);
+            env.setMutableConfig(config);
+            env.evictMemory();
+            config.setCacheSize(saveSize);
+            env.setMutableConfig(config);
+        }
+    }
+
+    /**
+     * Writes data to create INITIAL_FILES number of files, storing the first
+     * key for each file in the firstKeysInFiles list.  One extra file is
+     * actually created, to ensure that the firstActiveLSN is not in any of
+     * INITIAL_FILES files.
+     */
+    private void writeData()
+        throws DatabaseException {
+
+        int firstFile =
+            (int) envImpl.getFileManager().getLastFileNum().longValue();
+        assertEquals(0, firstFile);
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry(new byte[DATA_SIZE]);
+        existingKeys = new HashSet();
+
+        if (isDeferredWriteMode()) {
+            firstKeysInFiles = null;
+
+            Cursor cursor = db.openCursor(null, null);
+
+            int maxKey = (dups ? INITIAL_KEYS_DUPS : INITIAL_KEYS);
+            for (int nextKey = 0; nextKey < maxKey; nextKey += 1) {
+
+                OperationStatus status;
+                if (dups) {
+                    key.setData(MAIN_KEY_FOR_DUPS);
+                    data.setData(TestUtils.getTestArray(nextKey));
+                    status = cursor.putNoDupData(key, data);
+                } else {
+                    key.setData(TestUtils.getTestArray(nextKey));
+                    data.setData(new byte[DATA_SIZE]);
+                    status = cursor.putNoOverwrite(key, data);
+                }
+
+                assertEquals(OperationStatus.SUCCESS, status);
+                existingKeys.add(new Integer(nextKey));
+            }
+
+            cursor.close();
+        } else {
+            firstKeysInFiles = new ArrayList();
+
+            Transaction txn = env.beginTransaction(null, null);
+            Cursor cursor = db.openCursor(txn, null);
+            int fileNum = -1;
+
+            for (int nextKey = 0; fileNum < INITIAL_FILES; nextKey += 1) {
+
+                OperationStatus status;
+                if (dups) {
+                    key.setData(MAIN_KEY_FOR_DUPS);
+                    data.setData(TestUtils.getTestArray(nextKey));
+                    status = cursor.putNoDupData(key, data);
+                } else {
+                    key.setData(TestUtils.getTestArray(nextKey));
+                    data.setData(new byte[DATA_SIZE]);
+                    status = cursor.putNoOverwrite(key, data);
+                }
+
+                assertEquals(OperationStatus.SUCCESS, status);
+                existingKeys.add(new Integer(nextKey));
+
+                long lsn = getLsn(cursor);
+                if (DbLsn.getFileNumber(lsn) != fileNum) {
+                    assertTrue(fileNum < DbLsn.getFileNumber(lsn));
+                    fileNum = (int) DbLsn.getFileNumber(lsn);
+                    assertEquals(fileNum, firstKeysInFiles.size());
+                    firstKeysInFiles.add(new Integer(nextKey));
+                }
+            }
+            //System.out.println("Num keys: " + existingKeys.size());
+
+            cursor.close();
+            txn.commit();
+        }
+
+        forceEvictionIfTemporary();
+        env.checkpoint(forceConfig);
+
+        int lastFile =
+            (int) envImpl.getFileManager().getLastFileNum().longValue();
+        if (temporary) {
+            assertTrue(String.valueOf(lastFile),
+                       lastFile >= INITIAL_FILES_TEMP);
+        } else {
+            assertTrue(String.valueOf(lastFile),
+                       lastFile >= INITIAL_FILES);
+        }
+        //System.out.println("last file " + lastFile);
+    }
+
+    /**
+     * Deletes the specified keys.
+     */
+    private void deleteData(int firstKey, int keyCount)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        Transaction txn = !isDeferredWriteMode() ?
+            env.beginTransaction(null, null) : null;
+        Cursor cursor = db.openCursor(txn, null);
+
+        for (int i = 0; i < keyCount; i += 1) {
+            int nextKey = firstKey + i;
+            OperationStatus status;
+            if (dups) {
+                key.setData(MAIN_KEY_FOR_DUPS);
+                data.setData(TestUtils.getTestArray(nextKey));
+                status = cursor.getSearchBoth(key, data, null);
+            } else {
+                key.setData(TestUtils.getTestArray(nextKey));
+                status = cursor.getSearchKey(key, data, null);
+            }
+            assertEquals(OperationStatus.SUCCESS, status);
+            status = cursor.delete();
+            assertEquals(OperationStatus.SUCCESS, status);
+            existingKeys.remove(new Integer(nextKey));
+        }
+
+        cursor.close();
+        if (txn != null) {
+            txn.commit();
+        }
+        forceEvictionIfTemporary();
+    }
+
+    /**
+     * Updates the specified keys.
+     */
+    private void updateData(int firstKey, int keyCount)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        Transaction txn = !isDeferredWriteMode() ?
+            env.beginTransaction(null, null) : null;
+        Cursor cursor = db.openCursor(txn, null);
+
+        for (int i = 0; i < keyCount; i += 1) {
+            int nextKey = firstKey + i;
+            OperationStatus status;
+            if (dups) {
+                key.setData(MAIN_KEY_FOR_DUPS);
+                data.setData(TestUtils.getTestArray(nextKey));
+                status = cursor.getSearchBoth(key, data, null);
+                assertEquals(OperationStatus.SUCCESS, status);
+                assertEquals(MAIN_KEY_FOR_DUPS.length, key.getSize());
+                assertEquals(nextKey, TestUtils.getTestVal(data.getData()));
+            } else {
+                key.setData(TestUtils.getTestArray(nextKey));
+                status = cursor.getSearchKey(key, data, null);
+                assertEquals(OperationStatus.SUCCESS, status);
+                assertEquals(nextKey, TestUtils.getTestVal(key.getData()));
+                assertEquals(DATA_SIZE, data.getSize());
+            }
+            status = cursor.putCurrent(data);
+            assertEquals(OperationStatus.SUCCESS, status);
+        }
+
+        cursor.close();
+        if (txn != null) {
+            txn.commit();
+        }
+        forceEvictionIfTemporary();
+    }
+
+    /**
+     * Verifies that the data written by writeData can be read.
+     */
+    private void verifyData()
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        Transaction txn = !isDeferredWriteMode() ?
+            env.beginTransaction(null, null) : null;
+        Cursor cursor = db.openCursor(txn, null);
+
+        for (Iterator i = existingKeys.iterator(); i.hasNext();) {
+            int nextKey = ((Integer) i.next()).intValue();
+            OperationStatus status;
+            if (dups) {
+                key.setData(MAIN_KEY_FOR_DUPS);
+                data.setData(TestUtils.getTestArray(nextKey));
+                status = cursor.getSearchBoth(key, data, null);
+                assertEquals(OperationStatus.SUCCESS, status);
+                assertEquals(MAIN_KEY_FOR_DUPS.length, key.getSize());
+                assertEquals(nextKey, TestUtils.getTestVal(data.getData()));
+            } else {
+                key.setData(TestUtils.getTestArray(nextKey));
+                status = cursor.getSearchKey(key, data, null);
+                assertEquals(OperationStatus.SUCCESS, status);
+                assertEquals(nextKey, TestUtils.getTestVal(key.getData()));
+                assertEquals(DATA_SIZE, data.getSize());
+            }
+        }
+
+        cursor.close();
+        if (txn != null) {
+            txn.commit();
+        }
+    }
+
+    /**
+     * Checks that all log files exist except those specified.
+     */
+    private void verifyDeletedFiles(int[] shouldNotExist) {
+        Long lastNum = envImpl.getFileManager().getLastFileNum();
+        for (int i = 0; i <= (int) lastNum.longValue(); i += 1) {
+            boolean shouldExist = true;
+            if (shouldNotExist != null) {
+                for (int j = 0; j < shouldNotExist.length; j += 1) {
+                    if (i == shouldNotExist[j]) {
+                        shouldExist = false;
+                        break;
+                    }
+                }
+            }
+            String name = envImpl.getFileManager().
+                getFullFileName(i, FileManager.JE_SUFFIX);
+            assertEquals(name, shouldExist, new File(name).exists());
+        }
+    }
+
+    /**
+     * Returns the first deleted file number or -1 if none.
+     */
+    private int getNextDeletedFile(int afterFile) {
+        Long lastNum = envImpl.getFileManager().getLastFileNum();
+        for (int i = afterFile + 1; i <= (int) lastNum.longValue(); i += 1) {
+            String name = envImpl.getFileManager().
+                                  getFullFileName(i, FileManager.JE_SUFFIX);
+            if (!(new File(name).exists())) {
+                return i;
+            }
+        }
+        return -1;
+    }
+
+    /**
+     * Gets the LSN at the cursor position, using internal methods.
+     */
+    private long getLsn(Cursor cursor)
+        throws DatabaseException {
+
+        CursorImpl impl = DbTestProxy.dbcGetCursorImpl(cursor);
+        BIN bin;
+        int index;
+        if (dups) {
+            bin = impl.getDupBIN();
+            index = impl.getDupIndex();
+            if (bin == null) {
+                bin = impl.getBIN();
+                index = impl.getIndex();
+                assertNotNull(bin);
+            }
+        } else {
+            assertNull(impl.getDupBIN());
+            bin = impl.getBIN();
+            index = impl.getIndex();
+            assertNotNull(bin);
+        }
+        assertTrue(index >= 0);
+        long lsn = bin.getLsn(index);
+        assertTrue(lsn != DbLsn.NULL_LSN);
+        return lsn;
+    }
+}
diff --git a/test/com/sleepycat/je/cleaner/INUtilizationTest.java b/test/com/sleepycat/je/cleaner/INUtilizationTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..944459cf3d7ad34adb3ac254aafb1aae6d6ca126
--- /dev/null
+++ b/test/com/sleepycat/je/cleaner/INUtilizationTest.java
@@ -0,0 +1,970 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: INUtilizationTest.java,v 1.27.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.SearchFileReader;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.CmdUtil;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * Test utilization counting of INs.
+ */
+public class INUtilizationTest extends TestCase {
+
+    private static final String DB_NAME = "foo";
+
+    private static final CheckpointConfig forceConfig = new CheckpointConfig();
+    static {
+        forceConfig.setForce(true);
+    }
+
+    private File envHome;
+    private Environment env;
+    private EnvironmentImpl envImpl;
+    private Database db;
+    private DatabaseImpl dbImpl;
+    private Transaction txn;
+    private Cursor cursor;
+    private boolean dups = false;
+    private DatabaseEntry keyEntry = new DatabaseEntry();
+    private DatabaseEntry dataEntry = new DatabaseEntry();
+    private boolean truncateOrRemoveDone;
+
+    public INUtilizationTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException, DatabaseException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+        TestUtils.removeFiles("Setup", envHome, FileManager.DEL_SUFFIX);
+    }
+
+    public void tearDown()
+        throws IOException, DatabaseException {
+
+        try {
+            if (env != null) {
+                //env.close();
+            }
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        try {
+            //*
+            TestUtils.removeLogFiles("tearDown", envHome, true);
+            TestUtils.removeFiles("tearDown", envHome, FileManager.DEL_SUFFIX);
+            //*/
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        envHome = null;
+        env = null;
+        envImpl = null;
+        db = null;
+        dbImpl = null;
+        txn = null;
+        cursor = null;
+        keyEntry = null;
+        dataEntry = null;
+    }
+
+    /**
+     * Opens the environment and database.
+     */
+    private void openEnv()
+        throws DatabaseException {
+
+        EnvironmentConfig config = TestUtils.initEnvConfig();
+	DbInternal.disableParameterValidation(config);
+        config.setTransactional(true);
+        config.setTxnNoSync(true);
+        config.setAllowCreate(true);
+        /* Do not run the daemons. */
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false");
+        config.setConfigParam
+	    (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false");
+        /* Use a tiny log file size to write one node per file. */
+        config.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(),
+                              Integer.toString(64));
+        env = new Environment(envHome, config);
+        envImpl = DbInternal.envGetEnvironmentImpl(env);
+
+        /* Speed up test that uses lots of very small files. */
+        envImpl.getFileManager().setSyncAtFileEnd(false);
+
+        openDb();
+    }
+
+    /**
+     * Opens the database.
+     */
+    private void openDb()
+        throws DatabaseException {
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(dups);
+        db = env.openDatabase(null, DB_NAME, dbConfig);
+        dbImpl = DbInternal.dbGetDatabaseImpl(db);
+    }
+
+    private void closeEnv(boolean doCheckpoint)
+        throws DatabaseException {
+
+        closeEnv(doCheckpoint,
+                 true,  // expectAccurateObsoleteLNCount
+                 true); // expectAccurateObsoleteLNSize
+    }
+
+    private void closeEnv(boolean doCheckpoint,
+                          boolean expectAccurateObsoleteLNCount)
+        throws DatabaseException {
+
+        closeEnv(doCheckpoint,
+                 expectAccurateObsoleteLNCount,
+                 expectAccurateObsoleteLNCount);
+    }
+
+    /**
+     * Closes the environment and database.
+     *
+     * @param expectAccurateObsoleteLNCount should be false when a deleted LN
+     * is not counted properly by recovery because its parent INs were flushed
+     * and the obsolete LN was not found in the tree.
+     *
+     * @param expectAccurateObsoleteLNSize should be false when a tree walk is
+     * performed for truncate/remove or an abortLsn is counted by recovery.
+     */
+    private void closeEnv(boolean doCheckpoint,
+                          boolean expectAccurateObsoleteLNCount,
+                          boolean expectAccurateObsoleteLNSize)
+        throws DatabaseException {
+
+        /*
+         * We pass expectAccurateDbUtilization as false when
+         * truncateOrRemoveDone, because the database utilization info for that
+         * database is now gone.
+         */
+        VerifyUtils.verifyUtilization
+            (envImpl, expectAccurateObsoleteLNCount,
+             expectAccurateObsoleteLNSize,
+             !truncateOrRemoveDone); // expectAccurateDbUtilization
+
+        if (db != null) {
+            db.close();
+            db = null;
+            dbImpl = null;
+        }
+        if (envImpl != null) {
+            envImpl.close(doCheckpoint);
+            envImpl = null;
+            env = null;
+        }
+    }
+
+    /**
+     * Initial setup for all tests -- open env, put one record (or two for
+     * dups) and sync.
+     */
+    private void openAndWriteDatabase()
+        throws DatabaseException {
+
+        openEnv();
+        txn = env.beginTransaction(null, null);
+        cursor = db.openCursor(txn, null);
+
+        /* Put one record. */
+        IntegerBinding.intToEntry(0, keyEntry);
+        IntegerBinding.intToEntry(0, dataEntry);
+        cursor.put(keyEntry, dataEntry);
+
+        /* Add a duplicate. */
+        if (dups) {
+            IntegerBinding.intToEntry(1, dataEntry);
+            cursor.put(keyEntry, dataEntry);
+        }
+
+        /* Commit the txn to avoid crossing the checkpoint boundary. */
+        cursor.close();
+        txn.commit();
+
+        /* Checkpoint to the root so nothing is dirty. */
+        env.sync();
+
+        /* Open a txn and cursor for use by the test case. */
+        txn = env.beginTransaction(null, null);
+        cursor = db.openCursor(txn, null);
+
+        /* If we added a duplicate, move cursor back to the first record. */
+        cursor.getFirst(keyEntry, dataEntry, null);
+
+        /* Expect that BIN and parent IN files are not obsolete. */
+        long binFile = getBINFile(cursor);
+        long inFile = getINFile(cursor);
+        expectObsolete(binFile, false);
+        expectObsolete(inFile, false);
+    }
+
+    /**
+     * Tests that BIN and IN utilization counting works.
+     */
+    public void testBasic()
+        throws DatabaseException {
+
+        openAndWriteDatabase();
+        long binFile = getBINFile(cursor);
+        long inFile = getINFile(cursor);
+
+        /* Update to make BIN dirty. */
+        cursor.put(keyEntry, dataEntry);
+
+        /* Checkpoint */
+        env.checkpoint(forceConfig);
+
+        /* After checkpoint, expect BIN file is obsolete but not IN. */
+        expectObsolete(binFile, true);
+        expectObsolete(inFile, false);
+        assertTrue(binFile != getBINFile(cursor));
+        assertEquals(inFile, getINFile(cursor));
+
+        /* After second checkpoint, IN file becomes obsolete also. */
+        env.checkpoint(forceConfig);
+
+        /* Both BIN and IN are obsolete. */
+        expectObsolete(binFile, true);
+        expectObsolete(inFile, true);
+        assertTrue(binFile != getBINFile(cursor));
+        assertTrue(inFile != getINFile(cursor));
+
+        /* Expect that new files are not obsolete. */
+        long binFile2 = getBINFile(cursor);
+        long inFile2 = getINFile(cursor);
+        expectObsolete(binFile2, false);
+        expectObsolete(inFile2, false);
+
+        cursor.close();
+        txn.commit();
+        closeEnv(true);
+    }
+
+    /**
+     * Performs testBasic with duplicates.
+     */
+    public void testBasicDup()
+        throws DatabaseException {
+
+        dups = true;
+        testBasic();
+    }
+
+    /**
+     * Similar to testBasic, but logs INs explicitly and performs recovery to
+     * ensure utilization recovery works.
+     */
+    public void testRecovery()
+        throws DatabaseException {
+
+        openAndWriteDatabase();
+        long binFile = getBINFile(cursor);
+        long inFile = getINFile(cursor);
+
+        /* Close normally and reopen. */
+        cursor.close();
+        txn.commit();
+        closeEnv(true);
+        openEnv();
+        txn = env.beginTransaction(null, null);
+        cursor = db.openCursor(txn, null);
+
+        /* Position cursor to load BIN and IN. */
+        cursor.getSearchKey(keyEntry, dataEntry, null);
+
+        /* Expect BIN and IN files have not changed. */
+        assertEquals(binFile, getBINFile(cursor));
+        assertEquals(inFile, getINFile(cursor));
+        expectObsolete(binFile, false);
+        expectObsolete(inFile, false);
+
+        /*
+         * Log explicitly since we have no way to do a partial checkpoint.
+         * The BIN is logged provisionally and the IN non-provisionally.
+         */
+        TestUtils.logBINAndIN(env, cursor);
+
+        /* Expect to obsolete the BIN and IN. */
+        expectObsolete(binFile, true);
+        expectObsolete(inFile, true);
+        assertTrue(binFile != getBINFile(cursor));
+        assertTrue(inFile != getINFile(cursor));
+
+        /* Save current BIN and IN files. */
+        long binFile2 = getBINFile(cursor);
+        long inFile2 = getINFile(cursor);
+        expectObsolete(binFile2, false);
+        expectObsolete(inFile2, false);
+
+        /* Shutdown without a checkpoint and reopen. */
+        cursor.close();
+        txn.commit();
+        closeEnv(false);
+        openEnv();
+        txn = env.beginTransaction(null, null);
+        cursor = db.openCursor(txn, null);
+
+        /* Sync to make all INs non-dirty. */
+        env.sync();
+
+        /* Position cursor to load BIN and IN. */
+        cursor.getSearchKey(keyEntry, dataEntry, null);
+
+        /* Expect that recovery counts BIN and IN as obsolete. */
+        expectObsolete(binFile, true);
+        expectObsolete(inFile, true);
+        assertTrue(binFile != getBINFile(cursor));
+        assertTrue(inFile != getINFile(cursor));
+
+        /*
+         * Even though it is provisional, expect that current BIN is not
+         * obsolete because it is not part of partial checkpoint.  This is
+         * similar to what happens with a split.  The current IN is not
+         * obsolete either (nor is it provisional).
+         */
+        assertTrue(binFile2 == getBINFile(cursor));
+        assertTrue(inFile2 == getINFile(cursor));
+        expectObsolete(binFile2, false);
+        expectObsolete(inFile2, false);
+
+        /* Update to make BIN dirty. */
+        cursor.put(keyEntry, dataEntry);
+
+        /* Check current BIN and IN files. */
+        assertTrue(binFile2 == getBINFile(cursor));
+        assertTrue(inFile2 == getINFile(cursor));
+        expectObsolete(binFile2, false);
+        expectObsolete(inFile2, false);
+
+        /* Close normally and reopen to cause checkpoint of dirty BIN/IN. */
+        cursor.close();
+        txn.commit();
+        closeEnv(true);
+        openEnv();
+        txn = env.beginTransaction(null, null);
+        cursor = db.openCursor(txn, null);
+
+        /* Position cursor to load BIN and IN. */
+        cursor.getSearchKey(keyEntry, dataEntry, null);
+
+        /* Expect BIN was checkpointed during close but not IN. */
+        assertTrue(binFile2 != getBINFile(cursor));
+        assertEquals(inFile2, getINFile(cursor));
+        expectObsolete(binFile2, true);
+        expectObsolete(inFile2, false);
+
+        /* After second checkpoint, IN file becomes obsolete also. */
+        env.checkpoint(forceConfig);
+
+        /* Both BIN and IN are obsolete. */
+        assertTrue(binFile2 != getBINFile(cursor));
+        assertTrue(inFile2 != getINFile(cursor));
+        expectObsolete(binFile2, true);
+        expectObsolete(inFile2, true);
+
+        cursor.close();
+        txn.commit();
+        closeEnv(true);
+    }
+
+    /**
+     * Performs testRecovery with duplicates.
+     */
+    public void testRecoveryDup()
+        throws DatabaseException {
+
+        dups = true;
+        testRecovery();
+    }
+
+    /**
+     * Tests that in a partial checkpoint (CkptStart with no CkptEnd) all
+     * provisional INs are counted as obsolete.
+     */
+    public void testPartialCheckpoint()
+        throws DatabaseException, IOException {
+
+        openAndWriteDatabase();
+        long binFile = getBINFile(cursor);
+        long inFile = getINFile(cursor);
+
+        /* Close with partial checkpoint and reopen. */
+        cursor.close();
+        txn.commit();
+        performPartialCheckpoint(true); // truncateUtilizationInfo
+        openEnv();
+        txn = env.beginTransaction(null, null);
+        cursor = db.openCursor(txn, null);
+
+        /* Position cursor to load BIN and IN. */
+        cursor.getSearchKey(keyEntry, dataEntry, null);
+
+        /* Expect BIN and IN files have not changed. */
+        assertEquals(binFile, getBINFile(cursor));
+        assertEquals(inFile, getINFile(cursor));
+        expectObsolete(binFile, false);
+        expectObsolete(inFile, false);
+
+        /* Update to make BIN dirty. */
+        cursor.put(keyEntry, dataEntry);
+
+        /* Force IN dirty so that BIN is logged provisionally. */
+        TestUtils.getIN(TestUtils.getBIN(cursor)).setDirty(true);
+
+        /* Check current BIN and IN files. */
+        assertTrue(binFile == getBINFile(cursor));
+        assertTrue(inFile == getINFile(cursor));
+        expectObsolete(binFile, false);
+        expectObsolete(inFile, false);
+
+        /* Close with partial checkpoint and reopen. */
+        cursor.close();
+        txn.commit();
+        performPartialCheckpoint(true);  // truncateUtilizationInfo
+        openEnv();
+        txn = env.beginTransaction(null, null);
+        cursor = db.openCursor(txn, null);
+
+        /* Position cursor to load BIN and IN. */
+        cursor.getSearchKey(keyEntry, dataEntry, null);
+
+        /* Expect BIN and IN files are obsolete. */
+        assertTrue(binFile != getBINFile(cursor));
+        assertTrue(inFile != getINFile(cursor));
+        expectObsolete(binFile, true);
+        expectObsolete(inFile, true);
+
+        /*
+         * Expect that the current BIN is obsolete because it was provisional,
+         * and provisional nodes following CkptStart are counted obsolete
+         * even if that is sometimes incorrect.  The parent IN file is not
+         * obsolete because it is not provisonal.
+         */
+        long binFile2 = getBINFile(cursor);
+        long inFile2 = getINFile(cursor);
+        expectObsolete(binFile2, true);
+        expectObsolete(inFile2, false);
+
+        /*
+         * Now repeat the test above but do not truncate the FileSummaryLNs.
+         * The counting will be accurate because the FileSummaryLNs override
+         * what is counted manually during recovery.
+         */
+
+        /* Update to make BIN dirty. */
+        cursor.put(keyEntry, dataEntry);
+
+        /* Close with partial checkpoint and reopen. */
+        cursor.close();
+        txn.commit();
+        performPartialCheckpoint(false,  // truncateUtilizationInfo
+                                 true,   // expectAccurateObsoleteLNCount
+                                 false); // expectAccurateObsoleteLNSize
+
+        openEnv();
+        txn = env.beginTransaction(null, null);
+        cursor = db.openCursor(txn, null);
+
+        /* Position cursor to load BIN and IN. */
+        cursor.getSearchKey(keyEntry, dataEntry, null);
+
+        /* The prior BIN file is now double-counted as obsolete. */
+        assertTrue(binFile2 != getBINFile(cursor));
+        assertTrue(inFile2 != getINFile(cursor));
+        expectObsolete(binFile2, 2);
+        expectObsolete(inFile2, 1);
+
+        /* Expect current BIN and IN files are not obsolete. */
+        binFile2 = getBINFile(cursor);
+        inFile2 = getINFile(cursor);
+        expectObsolete(binFile2, false);
+        expectObsolete(inFile2, false);
+
+        cursor.close();
+        txn.commit();
+        closeEnv(true,   // doCheckpoint
+                 true,   // expectAccurateObsoleteLNCount
+                 false); // expectAccurateObsoleteLNSize
+    }
+
+    /**
+     * Performs testPartialCheckpoint with duplicates.
+     */
+    public void testPartialCheckpointDup()
+        throws DatabaseException, IOException {
+
+        dups = true;
+        testPartialCheckpoint();
+    }
+
+    /**
+     * Tests that deleting a subtree (by deleting the last LN in a BIN) is
+     * counted correctly.
+     */
+    public void testDelete()
+        throws DatabaseException, IOException {
+
+        openAndWriteDatabase();
+        long binFile = getBINFile(cursor);
+        long inFile = getINFile(cursor);
+
+        /* Close normally and reopen. */
+        cursor.close();
+        txn.commit();
+        closeEnv(true);
+        openEnv();
+        txn = env.beginTransaction(null, null);
+        cursor = db.openCursor(txn, null);
+
+        /* Position cursor to load BIN and IN. */
+        cursor.getSearchKey(keyEntry, dataEntry, null);
+
+        /* Expect BIN and IN are still not obsolete. */
+        assertEquals(binFile, getBINFile(cursor));
+        assertEquals(inFile, getINFile(cursor));
+        expectObsolete(binFile, false);
+        expectObsolete(inFile, false);
+
+        if (dups) {
+            /* Delete both records. */
+            OperationStatus status;
+            status = cursor.delete();
+            assertSame(OperationStatus.SUCCESS, status);
+            cursor.getNext(keyEntry, dataEntry, null);
+            status = cursor.delete();
+            assertSame(OperationStatus.SUCCESS, status);
+        } else {
+
+            /*
+             * Add records until we move to the next BIN, so that the
+             * compressor would not need to delete the root in order to delete
+             * the BIN (deleting the root is not configured by default).
+             */
+            int keyVal = 0;
+            while (binFile == getBINFile(cursor)) {
+                keyVal += 1;
+                IntegerBinding.intToEntry(keyVal, keyEntry);
+                cursor.put(keyEntry, dataEntry);
+            }
+            binFile = getBINFile(cursor);
+            inFile = getINFile(cursor);
+
+            /* Delete all records in the last BIN. */
+            while (binFile == getBINFile(cursor)) {
+                cursor.delete();
+                cursor.getLast(keyEntry, dataEntry, null);
+            }
+        }
+
+        /* Compressor daemon is not running -- they're not obsolete yet. */
+        expectObsolete(binFile, false);
+        expectObsolete(inFile, false);
+
+        /* Close cursor and compress. */
+        cursor.close();
+        txn.commit();
+        env.compress();
+
+        /*
+         * Now expect BIN and IN to be obsolete.
+         */
+        expectObsolete(binFile, true);
+        expectObsolete(inFile, true);
+
+        /* Close with partial checkpoint and reopen. */
+        performPartialCheckpoint(true); // truncateUtilizationInfo
+        openEnv();
+
+        /*
+         * Expect both files to be obsolete after recovery, because the
+         * FileSummaryLN and MapLN was written prior to the checkpoint during
+         * compression.
+         */
+        expectObsolete(binFile, true);
+        expectObsolete(inFile, true);
+
+        /*
+         * expectAccurateObsoleteLNCount is false because the deleted LN is not
+         * counted obsolete correctly as described in RecoveryManager
+         * redoUtilizationInfo.
+         */
+        closeEnv(true,   // doCheckpoint
+                 false); // expectAccurateObsoleteLNCount
+    }
+
+    /**
+     * Performs testDelete with duplicates.
+     */
+    public void testDeleteDup()
+        throws DatabaseException, IOException {
+
+        dups = true;
+        testDelete();
+    }
+
+    /**
+     * Tests that truncating a database is counted correctly.
+     * Tests recovery also.
+     */
+    public void testTruncate()
+        throws DatabaseException, IOException {
+
+        /* Expect inaccurate LN sizes only if we force a tree walk. */
+        final boolean expectAccurateObsoleteLNSize =
+            !DatabaseImpl.forceTreeWalkForTruncateAndRemove;
+
+        openAndWriteDatabase();
+        long binFile = getBINFile(cursor);
+        long inFile = getINFile(cursor);
+
+        /* Close normally and reopen. */
+        cursor.close();
+        txn.commit();
+        closeEnv(true,   // doCheckpoint
+                 true,   // expectAccurateObsoleteLNCount
+                 expectAccurateObsoleteLNSize);
+        openEnv();
+        db.close();
+        db = null;
+        /* Truncate. */
+        txn = env.beginTransaction(null, null);
+        env.truncateDatabase(txn, DB_NAME, false /* returnCount */);
+        truncateOrRemoveDone = true;
+        txn.commit();
+
+        /*
+         * Expect BIN and IN are obsolete.  Do not check DbFileSummary when we
+         * truncate/remove, since the old DatabaseImpl is gone.
+         */
+        expectObsolete(binFile, true, false /*checkDbFileSummary*/);
+        expectObsolete(inFile, true, false /*checkDbFileSummary*/);
+
+        /* Close with partial checkpoint and reopen. */
+        performPartialCheckpoint(true,   // truncateUtilizationInfo
+                                 true,   // expectAccurateObsoleteLNCount
+                                 expectAccurateObsoleteLNSize);
+        openEnv();
+
+        /* Expect BIN and IN are counted obsolete during recovery. */
+        expectObsolete(binFile, true, false /*checkDbFileSummary*/);
+        expectObsolete(inFile, true, false /*checkDbFileSummary*/);
+
+        /*
+         * expectAccurateObsoleteLNSize is false because the size of the
+         * deleted NameLN is not counted during recovery, as with other
+         * abortLsns as described in RecoveryManager redoUtilizationInfo.
+         */
+        closeEnv(true,   // doCheckpoint
+                 true,   // expectAccurateObsoleteLNCount
+                 false); // expectAccurateObsoleteLNSize
+    }
+
+    /**
+     * Tests that truncating a database is counted correctly.
+     * Tests recovery also.
+     */
+    public void testRemove()
+        throws DatabaseException, IOException {
+
+        /* Expect inaccurate LN sizes only if we force a tree walk. */
+        final boolean expectAccurateObsoleteLNSize =
+            !DatabaseImpl.forceTreeWalkForTruncateAndRemove;
+
+        openAndWriteDatabase();
+        long binFile = getBINFile(cursor);
+        long inFile = getINFile(cursor);
+
+        /* Close normally and reopen. */
+        cursor.close();
+        txn.commit();
+        closeEnv(true,   // doCheckpoint
+                 true,   // expectAccurateObsoleteLNCount
+                 expectAccurateObsoleteLNSize);
+        openEnv();
+
+        /* Remove. */
+        db.close();
+        db = null;
+        txn = env.beginTransaction(null, null);
+        env.removeDatabase(txn, DB_NAME);
+        truncateOrRemoveDone = true;
+        txn.commit();
+
+        /*
+         * Expect BIN and IN are obsolete.  Do not check DbFileSummary when we
+         * truncate/remove, since the old DatabaseImpl is gone.
+         */
+        expectObsolete(binFile, true, false /*checkDbFileSummary*/);
+        expectObsolete(inFile, true, false /*checkDbFileSummary*/);
+
+        /* Close with partial checkpoint and reopen. */
+        performPartialCheckpoint(true,   // truncateUtilizationInfo
+                                 true,   // expectAccurateObsoleteLNCount
+                                 expectAccurateObsoleteLNSize);
+        openEnv();
+
+        /* Expect BIN and IN are counted obsolete during recovery. */
+        expectObsolete(binFile, true, false /*checkDbFileSummary*/);
+        expectObsolete(inFile, true, false /*checkDbFileSummary*/);
+
+        /*
+         * expectAccurateObsoleteLNCount is false because the deleted NameLN is
+         * not counted obsolete correctly as described in RecoveryManager
+         * redoUtilizationInfo.
+         */
+        closeEnv(true,   // doCheckpoint
+                 false); // expectAccurateObsoleteLNCount
+    }
+
+    /*
+     * The xxxForceTreeWalk tests set the DatabaseImpl
+     * forceTreeWalkForTruncateAndRemove field to true, which will force a walk
+     * of the tree to count utilization during truncate/remove, rather than
+     * using the per-database info.  This is used to test the "old technique"
+     * for counting utilization, which is now used only if the database was
+     * created prior to log version 6.
+     */
+
+    public void testTruncateForceTreeWalk()
+        throws Exception {
+
+        DatabaseImpl.forceTreeWalkForTruncateAndRemove = true;
+        try {
+            testTruncate();
+        } finally {
+            DatabaseImpl.forceTreeWalkForTruncateAndRemove = false;
+        }
+    }
+
+    public void testRemoveForceTreeWalk()
+        throws Exception {
+
+        DatabaseImpl.forceTreeWalkForTruncateAndRemove = true;
+        try {
+            testRemove();
+        } finally {
+            DatabaseImpl.forceTreeWalkForTruncateAndRemove = false;
+        }
+    }
+
+    private void expectObsolete(long file, boolean obsolete)
+        throws DatabaseException {
+
+        expectObsolete(file, obsolete, true /*checkDbFileSummary*/);
+    }
+
+    private void expectObsolete(long file,
+                                boolean obsolete,
+                                boolean checkDbFileSummary)
+        throws DatabaseException {
+
+        FileSummary fileSummary = getFileSummary(file);
+        assertEquals("totalINCount",
+                     1, fileSummary.totalINCount);
+        assertEquals("obsoleteINCount",
+                     obsolete ? 1 : 0, fileSummary.obsoleteINCount);
+
+        if (checkDbFileSummary) {
+            DbFileSummary dbFileSummary = getDbFileSummary(file);
+            assertEquals("db totalINCount",
+                         1, dbFileSummary.totalINCount);
+            assertEquals("db obsoleteINCount",
+                         obsolete ? 1 : 0, dbFileSummary.obsoleteINCount);
+        }
+    }
+
+    private void expectObsolete(long file, int obsoleteCount)
+        throws DatabaseException {
+
+        FileSummary fileSummary = getFileSummary(file);
+        assertEquals("totalINCount",
+                     1, fileSummary.totalINCount);
+        assertEquals("obsoleteINCount",
+                     obsoleteCount, fileSummary.obsoleteINCount);
+
+        DbFileSummary dbFileSummary = getDbFileSummary(file);
+        assertEquals("db totalINCount",
+                     1, dbFileSummary.totalINCount);
+        assertEquals("db obsoleteINCount",
+                     obsoleteCount, dbFileSummary.obsoleteINCount);
+    }
+
+    private long getINFile(Cursor cursor)
+        throws DatabaseException {
+
+        IN in = TestUtils.getIN(TestUtils.getBIN(cursor));
+        long lsn = in.getLastFullVersion();
+        assertTrue(lsn != DbLsn.NULL_LSN);
+        return DbLsn.getFileNumber(lsn);
+    }
+
+    private long getBINFile(Cursor cursor)
+        throws DatabaseException {
+
+        long lsn = TestUtils.getBIN(cursor).getLastFullVersion();
+        assertTrue(lsn != DbLsn.NULL_LSN);
+        return DbLsn.getFileNumber(lsn);
+    }
+
+    /**
+     * Returns the utilization summary for a given log file.
+     */
+    private FileSummary getFileSummary(long file)
+        throws DatabaseException {
+
+	return (FileSummary) envImpl.getUtilizationProfile()
+                                    .getFileSummaryMap(true)
+                                    .get(new Long(file));
+    }
+
+    /**
+     * Returns the per-database utilization summary for a given log file.
+     */
+    private DbFileSummary getDbFileSummary(long file) {
+        return dbImpl.getDbFileSummary
+            (new Long(file), false /*willModify*/);
+    }
+
+    private void performPartialCheckpoint(boolean truncateUtilizationInfo)
+        throws DatabaseException, IOException {
+
+        performPartialCheckpoint(truncateUtilizationInfo,
+                                 true,  // expectAccurateObsoleteLNCount
+                                 true); // expectAccurateObsoleteLNSize
+    }
+
+    private void performPartialCheckpoint(boolean truncateUtilizationInfo,
+                                          boolean
+                                          expectAccurateObsoleteLNCount)
+        throws DatabaseException, IOException {
+
+        performPartialCheckpoint(truncateUtilizationInfo,
+                                 expectAccurateObsoleteLNCount,
+                                 expectAccurateObsoleteLNCount);
+    }
+
+    /**
+     * Performs a checkpoint and truncates the log before the last CkptEnd.  If
+     * truncateUtilizationInfo is true, truncates before the FileSummaryLNs
+     * that appear at the end of the checkpoint.  The environment should be
+     * open when this method is called, and it will be closed when it returns.
+     */
+    private void performPartialCheckpoint
+                    (boolean truncateUtilizationInfo,
+                     boolean expectAccurateObsoleteLNCount,
+                     boolean expectAccurateObsoleteLNSize)
+        throws DatabaseException, IOException {
+
+        /* Do a normal checkpoint. */
+        env.checkpoint(forceConfig);
+        long eofLsn = envImpl.getFileManager().getNextLsn();
+        long lastLsn = envImpl.getFileManager().getLastUsedLsn();
+        long truncateLsn;
+
+        /* Searching backward from end, find last CkptEnd. */
+        SearchFileReader searcher =
+            new SearchFileReader(envImpl, 1000, false, lastLsn, eofLsn,
+                                 LogEntryType.LOG_CKPT_END);
+        assertTrue(searcher.readNextEntry());
+        long ckptEnd = searcher.getLastLsn();
+
+        if (truncateUtilizationInfo) {
+
+            /* Searching backward from CkptEnd, find last CkptStart. */
+            searcher =
+                new SearchFileReader(envImpl, 1000, false, ckptEnd, eofLsn,
+                                     LogEntryType.LOG_CKPT_START);
+            assertTrue(searcher.readNextEntry());
+            long ckptStart = searcher.getLastLsn();
+
+            /*
+             * Searching forward from CkptStart, find first MapLN.  MapLNs are
+             * written after writing root INs and before all FileSummaryLNs.
+             * This will find the position at which to truncate all MapLNs and
+             * FileSummaryLNs, but not INs below the mapping tree.
+             */
+            searcher =
+                new SearchFileReader(envImpl, 1000, true, ckptStart, eofLsn,
+                                     LogEntryType.LOG_MAPLN);
+            assertTrue(searcher.readNextEntry());
+            truncateLsn = searcher.getLastLsn();
+        } else {
+            truncateLsn = ckptEnd;
+        }
+
+        /*
+         * Close without another checkpoint, although it doesn't matter since
+         * we would truncate before it.
+         */
+        closeEnv(false, // doCheckpoint
+                 expectAccurateObsoleteLNCount,
+                 expectAccurateObsoleteLNSize);
+
+        /* Truncate the log. */
+        EnvironmentImpl cmdEnv =
+	    CmdUtil.makeUtilityEnvironment(envHome, false);
+        cmdEnv.getFileManager().truncateLog(DbLsn.getFileNumber(truncateLsn),
+                                            DbLsn.getFileOffset(truncateLsn));
+        cmdEnv.close(false);
+
+        /* Delete files following the truncated file. */
+        String[] fileNames = envHome.list();
+        for (int i = 0; i < fileNames.length; i += 1) {
+            String name = fileNames[i];
+            if (name.endsWith(".jdb")) {
+                String numStr = name.substring(0, name.length() - 4);
+                long fileNum = Long.parseLong(numStr, 16);
+                if (fileNum > DbLsn.getFileNumber(truncateLsn)) {
+                    assertTrue(new File(envHome, name).delete());
+                }
+            }
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/cleaner/MakeMigrationLogFiles.java b/test/com/sleepycat/je/cleaner/MakeMigrationLogFiles.java
new file mode 100644
index 0000000000000000000000000000000000000000..1f3d69b8397a883cd46f39f29b58c395f23248a2
--- /dev/null
+++ b/test/com/sleepycat/je/cleaner/MakeMigrationLogFiles.java
@@ -0,0 +1,109 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: MakeMigrationLogFiles.java,v 1.2.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.io.File;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.config.EnvironmentParams;
+
+/**
+ * Creates two small log files with close to 100% utilization for use by
+ * FileSelectionTest.testLogVersionMigration.  This main program is with the
+ * arguments: -h HOME_DIRECTORY
+ *
+ * This program was used to create two log files (stored in CVS as
+ * migrate_f0.jdb and migrate_f1.jdb) running against JE 3.2.68, which writes
+ * log version 5.  Testing with these files in testLogVersionUpgrade checks
+ * that these files are migrated when je.cleaner.migrateToLogVersion is set.
+ */
+public class MakeMigrationLogFiles {
+
+    private static final int FILE_SIZE = 1000000;
+
+    public static void main(String[] args)
+        throws DatabaseException {
+
+        String homeDir = null;
+        for (int i = 0; i < args.length; i += 1) {
+            if (args[i].equals("-h")) {
+                i += 1;
+                homeDir = args[i];
+            } else {
+                throw new IllegalArgumentException("Unknown arg: " + args[i]);
+            }
+        }
+        if (homeDir == null) {
+            throw new IllegalArgumentException("Missing -h arg");
+        }
+        Environment env = openEnv(new File(homeDir), true /*allowCreate*/);
+        makeMigrationLogFiles(env);
+        env.close();
+    }
+
+    /**
+     * Opens an Environment with a small log file size.
+     */
+    static Environment openEnv(File homeDir, boolean allowCreate)
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setConfigParam
+            (EnvironmentParams.LOG_FILE_MAX.getName(),
+             String.valueOf(FILE_SIZE));
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        envConfig.setConfigParam
+	    (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+        return new Environment(homeDir, envConfig);
+    }
+
+    /**
+     * Creates two log files.
+     */
+    static void makeMigrationLogFiles(Environment env)
+        throws DatabaseException {
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        Database db = env.openDatabase(null, "foo", dbConfig);
+
+        int nextKey = 0;
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        Cursor c = db.openCursor(null, null);
+        OperationStatus status = c.getLast(key, data, null);
+        if (status == OperationStatus.SUCCESS) {
+            nextKey = IntegerBinding.entryToInt(key);
+        }
+        c.close();
+
+        byte[] dataBytes = new byte[1000];
+        final int OVERHEAD = dataBytes.length + 100;
+        data.setData(dataBytes);
+
+        for (int size = 0; size < FILE_SIZE * 2; size += OVERHEAD) {
+            nextKey += 1;
+            IntegerBinding.intToEntry(nextKey, key);
+            status = db.putNoOverwrite(null, key, data);
+            assert status == OperationStatus.SUCCESS;
+        }
+
+        db.close();
+    }
+}
diff --git a/test/com/sleepycat/je/cleaner/OffsetTest.java b/test/com/sleepycat/je/cleaner/OffsetTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..e0d1be425171c9a05502f54b6caf544f8acf759b
--- /dev/null
+++ b/test/com/sleepycat/je/cleaner/OffsetTest.java
@@ -0,0 +1,105 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: OffsetTest.java,v 1.7.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.util.Arrays;
+import java.util.ArrayList;
+import java.util.Collections;
+import junit.framework.TestCase;
+
+import com.sleepycat.je.cleaner.OffsetList;
+import com.sleepycat.je.cleaner.PackedOffsets;
+
+/**
+ * Tests the OffsetList and PackedOffset classes.
+ */
+public class OffsetTest extends TestCase {
+
+    public void testOffsets() {
+
+        doAllTest(new long[] {
+            1,
+            2,
+            0xfffe,
+            0xffff,
+            0xfffff,
+            Integer.MAX_VALUE - 1,
+            Integer.MAX_VALUE,
+
+            /*
+             * The following values don't work, which is probably a bug, but
+             * LSN offsets are not normally this large so the bug currently has
+             * little impact.
+             */
+            //Integer.MAX_VALUE + 1L,
+            //Long.MAX_VALUE - 100L,
+            //Long.MAX_VALUE,
+        });
+    }
+
+    private void doAllTest(long[] offsets) {
+
+        ArrayList list = list(offsets);
+
+        doOneTest(offsets);
+
+        Collections.reverse(list);
+        doOneTest(array(list));
+
+        Collections.shuffle(list);
+        doOneTest(array(list));
+    }
+
+    private void doOneTest(long[] offsets) {
+
+        OffsetList list = new OffsetList();
+        for (int i = 0; i < offsets.length; i += 1) {
+            list.add(offsets[i], true);
+        }
+        long[] array = list.toArray();
+        assertTrue("array=\n" + dump(array) + " offsets=\n" + dump(offsets),
+                   Arrays.equals(offsets, array));
+
+        long[] sorted = new long[array.length];
+        System.arraycopy(array, 0, sorted, 0, array.length);
+        Arrays.sort(sorted);
+
+        PackedOffsets packed = new PackedOffsets();
+        packed.pack(array);
+        assertTrue(Arrays.equals(sorted, packed.toArray()));
+    }
+
+    private ArrayList list(long[] array) {
+
+        ArrayList list = new ArrayList(array.length);
+        for (int i = 0; i < array.length; i += 1) {
+            list.add(new Long(array[i]));
+        }
+        return list;
+    }
+
+    private long[] array(ArrayList list) {
+
+        long[] array = new long[list.size()];
+        for (int i = 0; i < array.length; i += 1) {
+            array[i] = ((Long) list.get(i)).longValue();
+        }
+        return array;
+    }
+
+    private String dump(long[] array) {
+
+        StringBuffer buf = new StringBuffer(array.length * 10);
+        for (int i = 0; i < array.length; i += 1) {
+            buf.append(Long.toString(array[i]));
+            buf.append(' ');
+        }
+        return buf.toString();
+    }
+}
diff --git a/test/com/sleepycat/je/cleaner/RMWLockingTest.java b/test/com/sleepycat/je/cleaner/RMWLockingTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..3292658c732600d313992357e30cd22e8303450a
--- /dev/null
+++ b/test/com/sleepycat/je/cleaner/RMWLockingTest.java
@@ -0,0 +1,181 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RMWLockingTest.java,v 1.9.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Use LockMode.RMW and verify that the FileSummaryLNs accurately reflect only
+ * those LNs that have been made obsolete.
+ */
+public class RMWLockingTest extends TestCase {
+
+    private static final int NUM_RECS = 5;
+
+    private File envHome;
+    private Environment env;
+    private Database db;
+    private DatabaseEntry key;
+    private DatabaseEntry data;
+
+    public RMWLockingTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException, DatabaseException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+        TestUtils.removeFiles("Setup", envHome, FileManager.DEL_SUFFIX);
+    }
+
+    public void tearDown()
+        throws IOException, DatabaseException {
+
+        try {
+            if (db != null) {
+                db.close();
+            }
+            if (env != null) {
+                env.close();
+            }
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        try {
+            TestUtils.removeLogFiles("tearDown", envHome, true);
+            TestUtils.removeFiles("tearDown", envHome, FileManager.DEL_SUFFIX);
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        db = null;
+        env = null;
+        envHome = null;
+    }
+
+    public void testBasic()
+        throws DatabaseException {
+
+        init();
+        insertRecords();
+        rmwModify();
+
+        UtilizationProfile up =
+            DbInternal.envGetEnvironmentImpl(env).getUtilizationProfile();
+
+        /*
+         * Checkpoint the environment to flush all utilization tracking
+         * information before verifying.
+         */
+        CheckpointConfig ckptConfig = new CheckpointConfig();
+        ckptConfig.setForce(true);
+        env.checkpoint(ckptConfig);
+
+        assertTrue(up.verifyFileSummaryDatabase());
+    }
+
+    /**
+     * Tests that we can load a log file containing offsets that correspond to
+     * non-obsolete LNs.  The bad log file was created using testBasic run
+     * against JE 2.0.54.  It contains version 1 FSLNs, one of which has an
+     * offset which is not obsolete.
+     */
+    public void testBadLog()
+        throws DatabaseException, IOException {
+
+        /* Copy a log file with bad offsets to log file zero. */
+        String resName = "rmw_bad_offsets.jdb";
+        TestUtils.loadLog(getClass(), resName, envHome);
+
+        /* Open the log we just copied. */
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(false);
+        envConfig.setReadOnly(true);
+        env = new Environment(envHome, envConfig);
+
+        /*
+         * Verify the UP of the bad log.  Prior to adding the code in
+         * FileSummaryLN.postFetchInit that discards version 1 offsets, this
+         * assertion failed.
+         */
+        UtilizationProfile up =
+            DbInternal.envGetEnvironmentImpl(env).getUtilizationProfile();
+        assertTrue(up.verifyFileSummaryDatabase());
+
+        env.close();
+        env = null;
+    }
+
+    private void init()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setTransactional(true);
+        env = new Environment(envHome, envConfig);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setTransactional(true);
+        db = env.openDatabase(null, "foo", dbConfig);
+    }
+
+    /* Insert records. */
+    private void insertRecords()
+        throws DatabaseException {
+
+        key = new DatabaseEntry();
+        data = new DatabaseEntry();
+
+        IntegerBinding.intToEntry(100, data);
+
+        for (int i = 0; i < NUM_RECS; i++) {
+            IntegerBinding.intToEntry(i, key);
+            assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+        }
+    }
+
+    /* lock two records with RMW, only modify one. */
+    private void rmwModify()
+        throws DatabaseException {
+
+        Transaction txn = env.beginTransaction(null, null);
+        IntegerBinding.intToEntry(0, key);
+        assertEquals(OperationStatus.SUCCESS,
+                     db.get(txn, key, data, LockMode.RMW));
+        IntegerBinding.intToEntry(1, key);
+        assertEquals(OperationStatus.SUCCESS,
+                     db.get(txn, key, data, LockMode.RMW));
+
+        IntegerBinding.intToEntry(200, data);
+        assertEquals(OperationStatus.SUCCESS,
+                     db.put(txn, key, data));
+        txn.commit();
+    }
+}
diff --git a/test/com/sleepycat/je/cleaner/ReadOnlyLockingTest.java b/test/com/sleepycat/je/cleaner/ReadOnlyLockingTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..8cc61eb9dac776db1afa72eb5035c569507fcc8f
--- /dev/null
+++ b/test/com/sleepycat/je/cleaner/ReadOnlyLockingTest.java
@@ -0,0 +1,297 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ReadOnlyLockingTest.java,v 1.12.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Verifies that opening an environment read-only will prevent cleaned files
+ * from being deleted in a read-write environment.  Uses the ReadOnlyProcess
+ * class to open the environment read-only in a separate process.
+ */
+public class ReadOnlyLockingTest extends TestCase {
+
+    private static final int FILE_SIZE = 4096;
+    private static final int READER_STARTUP_SECS = 30;
+
+    private static final CheckpointConfig forceConfig = new CheckpointConfig();
+    static {
+        forceConfig.setForce(true);
+    }
+
+    private File envHome;
+    private Environment env;
+    private EnvironmentImpl envImpl;
+    private Database db;
+    private Process readerProcess;
+
+    private static File getProcessFile() {
+        return new File(System.getProperty(TestUtils.DEST_DIR),
+                        "ReadOnlyProcessFile");
+    }
+
+    private static void deleteProcessFile() {
+        File file = getProcessFile();
+        file.delete();
+        TestCase.assertTrue(!file.exists());
+    }
+
+    static void createProcessFile()
+        throws IOException {
+
+        File file = getProcessFile();
+        TestCase.assertTrue(file.createNewFile());
+        TestCase.assertTrue(file.exists());
+    }
+
+    public ReadOnlyLockingTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException, DatabaseException {
+
+        deleteProcessFile();
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+        TestUtils.removeFiles("Setup", envHome, FileManager.DEL_SUFFIX);
+    }
+
+    public void tearDown()
+        throws IOException, DatabaseException {
+
+        deleteProcessFile();
+
+        try {
+            stopReaderProcess();
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        try {
+            if (env != null) {
+                env.close();
+            }
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        try {
+            TestUtils.removeLogFiles("tearDown", envHome, true);
+            TestUtils.removeFiles("tearDown", envHome, FileManager.DEL_SUFFIX);
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        db = null;
+        env = null;
+        envImpl = null;
+        envHome = null;
+        readerProcess = null;
+    }
+
+    private void openEnv()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+	DbInternal.disableParameterValidation(envConfig);
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+        envConfig.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC));
+        envConfig.setConfigParam
+            (EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(), "80");
+        envConfig.setConfigParam
+            (EnvironmentParams.LOG_FILE_MAX.getName(),
+             Integer.toString(FILE_SIZE));
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        envConfig.setConfigParam
+	    (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+
+        env = new Environment(envHome, envConfig);
+        envImpl = DbInternal.envGetEnvironmentImpl(env);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        db = env.openDatabase(null, "ReadOnlyLockingTest", dbConfig);
+    }
+
+    private void closeEnv()
+        throws DatabaseException {
+
+        if (db != null) {
+            db.close();
+            db = null;
+        }
+        if (env != null) {
+            env.close();
+            env = null;
+        }
+    }
+
+    /**
+     * Tests that cleaned files are deleted when there is no reader process.
+     */
+    public void testBaseline()
+        throws DatabaseException {
+
+        openEnv();
+        writeAndDeleteData();
+        env.checkpoint(forceConfig);
+
+        int nFilesCleaned = env.cleanLog();
+        assertTrue(nFilesCleaned > 0);
+        assertTrue(!areAnyFilesDeleted());
+
+        /* Files are deleted during the checkpoint. */
+        env.checkpoint(forceConfig);
+        assertTrue(areAnyFilesDeleted());
+
+        closeEnv();
+    }
+
+    /**
+     * Tests that cleaned files are not deleted when there is a reader process.
+     */
+    public void testReadOnlyLocking()
+        throws Exception {
+
+        openEnv();
+        writeAndDeleteData();
+        env.checkpoint(forceConfig);
+        int nFilesCleaned = env.cleanLog();
+        assertTrue(nFilesCleaned > 0);
+        assertTrue(!areAnyFilesDeleted());
+
+        /*
+         * No files are deleted after cleaning when the reader process is
+         * running.
+         */
+        startReaderProcess();
+        env.cleanLog();
+        env.checkpoint(forceConfig);
+        assertTrue(!areAnyFilesDeleted());
+
+        /*
+         * Files are deleted when a checkpoint occurs after the reader
+         * process stops.
+         */
+        stopReaderProcess();
+        env.cleanLog();
+        env.checkpoint(forceConfig);
+        assertTrue(areAnyFilesDeleted());
+
+        closeEnv();
+    }
+
+    private void writeAndDeleteData()
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry(new byte[1]);
+        DatabaseEntry data = new DatabaseEntry(new byte[FILE_SIZE]);
+        for (int i = 0; i < 5; i += 1) {
+            db.put(null, key, data);
+        }
+    }
+
+    private boolean areAnyFilesDeleted() {
+        long lastNum = envImpl.getFileManager().getLastFileNum().longValue();
+        for (long i = 0; i <= lastNum; i += 1) {
+            String name = envImpl.getFileManager().getFullFileName
+                (i, FileManager.JE_SUFFIX);
+            if (!(new File(name).exists())) {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    private void startReaderProcess()
+        throws Exception {
+
+        String[] cmd = {
+            "java",
+            "-cp",
+            System.getProperty("java.class.path"),
+            "-D" + TestUtils.DEST_DIR + '=' +
+                System.getProperty(TestUtils.DEST_DIR),
+            ReadOnlyProcess.class.getName(),
+        };
+
+        /* Start it and wait for it to open the environment. */
+        readerProcess = Runtime.getRuntime().exec(cmd);
+        long startTime = System.currentTimeMillis();
+        boolean running = false;
+        while (!running &&
+               ((System.currentTimeMillis() - startTime) <
+                (READER_STARTUP_SECS * 1000))) {
+            if (getProcessFile().exists()) {
+                running = true;
+            } else {
+                Thread.sleep(10);
+            }
+        }
+        //printReaderStatus();
+        assertTrue("ReadOnlyProcess did not start after " +
+                   READER_STARTUP_SECS + " + secs",
+                   running);
+    }
+
+    private void stopReaderProcess()
+        throws Exception {
+
+        if (readerProcess != null) {
+            printReaderErrors();
+            readerProcess.destroy();
+            Thread.sleep(2000);
+            readerProcess = null;
+        }
+    }
+
+    private void printReaderStatus()
+        throws IOException {
+
+        try {
+            int status = readerProcess.exitValue();
+            System.out.println("Process status=" + status);
+        } catch (IllegalThreadStateException e) {
+            System.out.println("Process is still running");
+        }
+    }
+
+    private void printReaderErrors()
+        throws IOException {
+
+        InputStream err = readerProcess.getErrorStream();
+        int len = err.available();
+        if (len > 0) {
+            byte[] data = new byte[len];
+            err.read(data);
+            System.out.println("[ReadOnlyProcess] " + new String(data));
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/cleaner/ReadOnlyProcess.java b/test/com/sleepycat/je/cleaner/ReadOnlyProcess.java
new file mode 100644
index 0000000000000000000000000000000000000000..e0a60a5d97c756eeec116663736471f3d7226d16
--- /dev/null
+++ b/test/com/sleepycat/je/cleaner/ReadOnlyProcess.java
@@ -0,0 +1,50 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ReadOnlyProcess.java,v 1.9.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.io.File;
+
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * @see ReadOnlyLockingTest
+ */
+public class ReadOnlyProcess {
+
+    public static void main(String[] args) {
+
+        /*
+         * Don't write to System.out in this process because the parent
+         * process only reads System.err.
+         */
+        try {
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setTransactional(true);
+            envConfig.setReadOnly(true);
+
+            File envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+            Environment env = new Environment(envHome, envConfig);
+
+            //System.err.println("Opened read-only: " + envHome);
+            //System.err.println(System.getProperty("java.class.path"));
+
+            /* Notify the test that this process has opened the environment. */
+            ReadOnlyLockingTest.createProcessFile();
+
+            /* Sleep until the parent process kills me. */
+            Thread.sleep(Long.MAX_VALUE);
+        } catch (Exception e) {
+
+            e.printStackTrace(System.err);
+            System.exit(1);
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/cleaner/SR10553Test.java b/test/com/sleepycat/je/cleaner/SR10553Test.java
new file mode 100644
index 0000000000000000000000000000000000000000..53cf10d084a98a7e04863cdafb2c8045b80c5549
--- /dev/null
+++ b/test/com/sleepycat/je/cleaner/SR10553Test.java
@@ -0,0 +1,191 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SR10553Test.java,v 1.13.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.util.TestUtils;
+
+public class SR10553Test extends TestCase {
+
+    private static final String DB_NAME = "foo";
+
+    private static final CheckpointConfig forceConfig = new CheckpointConfig();
+    static {
+        forceConfig.setForce(true);
+    }
+
+    private File envHome;
+    private Environment env;
+    private Database db;
+
+    public SR10553Test() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException, DatabaseException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+        TestUtils.removeFiles("Setup", envHome, FileManager.DEL_SUFFIX);
+    }
+
+    public void tearDown()
+        throws IOException, DatabaseException {
+
+        try {
+            if (env != null) {
+                env.close();
+            }
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        try {
+            TestUtils.removeLogFiles("tearDown", envHome, true);
+            TestUtils.removeFiles("tearDown", envHome, FileManager.DEL_SUFFIX);
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        db = null;
+        env = null;
+        envHome = null;
+    }
+
+    /**
+     * Opens the environment and database.
+     */
+    private void openEnv()
+        throws DatabaseException {
+
+        EnvironmentConfig config = TestUtils.initEnvConfig();
+	DbInternal.disableParameterValidation(config);
+        config.setAllowCreate(true);
+        /* Do not run the daemons. */
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false");
+        config.setConfigParam
+	    (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false");
+        /* Use a small log file size to make cleaning more frequent. */
+        config.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(),
+                              Integer.toString(1024));
+        /* Use a small memory size to force eviction. */
+        config.setConfigParam(EnvironmentParams.MAX_MEMORY.getName(),
+                              Integer.toString(1024 * 96));
+        /* Don't track detail with a tiny cache size. */
+        config.setConfigParam
+            (EnvironmentParams.CLEANER_TRACK_DETAIL.getName(), "false");
+        config.setConfigParam(EnvironmentParams.NUM_LOG_BUFFERS.getName(),
+                              Integer.toString(2));
+        /* Set log buffers large enough for trace messages. */
+        config.setConfigParam(EnvironmentParams.LOG_MEM_SIZE.getName(),
+                              Integer.toString(7000));
+
+        env = new Environment(envHome, config);
+
+        openDb();
+    }
+
+    /**
+     * Opens that database.
+     */
+    private void openDb()
+        throws DatabaseException {
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(true);
+        db = env.openDatabase(null, DB_NAME, dbConfig);
+    }
+
+    /**
+     * Closes the environment and database.
+     */
+    private void closeEnv()
+        throws DatabaseException {
+
+        if (db != null) {
+            db.close();
+            db = null;
+        }
+        if (env != null) {
+            env.close();
+            env = null;
+        }
+    }
+
+    /**
+     */
+    public void testSR10553()
+        throws DatabaseException {
+
+        openEnv();
+
+        /* Put some duplicates, enough to fill a log file. */
+        final int COUNT = 10;
+        DatabaseEntry key = new DatabaseEntry(TestUtils.getTestArray(0));
+        DatabaseEntry data = new DatabaseEntry();
+        for (int i = 0; i < COUNT; i += 1) {
+            data.setData(TestUtils.getTestArray(i));
+            db.put(null, key, data);
+        }
+        Cursor cursor = db.openCursor(null, null);
+        assertEquals(OperationStatus.SUCCESS,
+                     cursor.getSearchKey(key, data, null));
+        assertEquals(COUNT, cursor.count());
+        cursor.close();
+
+        /* Delete everything.  Do not compress. */
+        db.delete(null, key);
+
+        /* Checkpoint and clean. */
+        env.checkpoint(forceConfig);
+        int cleaned = env.cleanLog();
+        assertTrue("cleaned=" + cleaned, cleaned > 0);
+
+        /* Force eviction. */
+        env.evictMemory();
+
+        /* Scan all values. */
+        cursor = db.openCursor(null, null);
+        for (OperationStatus status = cursor.getFirst(key, data, null);
+                             status == OperationStatus.SUCCESS;
+                             status = cursor.getNext(key, data, null)) {
+        }
+        cursor.close();
+
+        /*
+         * Before the fix to 10553, while scanning over deleted records, a
+         * LogFileNotFoundException would occur when faulting in a deleted
+         * record, if the log file had been cleaned.  This was because the
+         * cleaner was not setting knownDeleted for deleted records.
+         */
+        closeEnv();
+    }
+}
diff --git a/test/com/sleepycat/je/cleaner/SR10597Test.java b/test/com/sleepycat/je/cleaner/SR10597Test.java
new file mode 100644
index 0000000000000000000000000000000000000000..908f0418908cc3b71d3f63f7c382f436a3c3e2bb
--- /dev/null
+++ b/test/com/sleepycat/je/cleaner/SR10597Test.java
@@ -0,0 +1,175 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SR10597Test.java,v 1.10.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.util.TestUtils;
+
+public class SR10597Test extends TestCase {
+
+    private static final String DB_NAME = "foo";
+
+    private static final CheckpointConfig forceConfig = new CheckpointConfig();
+    static {
+        forceConfig.setForce(true);
+    }
+
+    private File envHome;
+    private Environment env;
+    private Database db;
+
+    public SR10597Test() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException, DatabaseException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+        TestUtils.removeFiles("Setup", envHome, FileManager.DEL_SUFFIX);
+    }
+
+    public void tearDown()
+        throws IOException, DatabaseException {
+
+        try {
+            if (env != null) {
+                env.close();
+            }
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        try {
+            TestUtils.removeLogFiles("tearDown", envHome, true);
+            TestUtils.removeFiles("tearDown", envHome, FileManager.DEL_SUFFIX);
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        db = null;
+        env = null;
+        envHome = null;
+    }
+
+    /**
+     * Opens the environment and database.
+     */
+    private void openEnv()
+        throws DatabaseException {
+
+        EnvironmentConfig config = TestUtils.initEnvConfig();
+	DbInternal.disableParameterValidation(config);
+        config.setAllowCreate(true);
+        /* Do not run the daemons. */
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false");
+        config.setConfigParam
+	    (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false");
+        /* Use a small log file size to make cleaning more frequent. */
+        config.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(),
+                              Integer.toString(1024));
+        env = new Environment(envHome, config);
+
+        openDb();
+    }
+
+    /**
+     * Opens that database.
+     */
+    private void openDb()
+        throws DatabaseException {
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(true);
+        db = env.openDatabase(null, DB_NAME, dbConfig);
+    }
+
+    /**
+     * Closes the environment and database.
+     */
+    private void closeEnv()
+        throws DatabaseException {
+
+        if (db != null) {
+            db.close();
+            db = null;
+        }
+        if (env != null) {
+            env.close();
+            env = null;
+        }
+    }
+
+    /**
+     */
+    public void testSR10597()
+        throws DatabaseException {
+
+        openEnv();
+
+        /* Put some duplicates, enough to fill a log file. */
+        final int COUNT = 10;
+        DatabaseEntry key = new DatabaseEntry(TestUtils.getTestArray(0));
+        DatabaseEntry data = new DatabaseEntry();
+        for (int i = 0; i < COUNT; i += 1) {
+            data.setData(TestUtils.getTestArray(i));
+            db.put(null, key, data);
+        }
+        Cursor cursor = db.openCursor(null, null);
+        assertEquals(OperationStatus.SUCCESS,
+                     cursor.getSearchKey(key, data, null));
+        assertEquals(COUNT, cursor.count());
+        cursor.close();
+
+        /* Delete everything, then compress to delete the DIN. */
+        db.delete(null, key);
+        env.compress();
+        data.setData(TestUtils.getTestArray(0));
+
+        /* Add a single record, which will not create a DIN. */
+        db.put(null, key, data);
+
+        /* Checkpoint and clean. */
+        env.checkpoint(forceConfig);
+        int cleaned = env.cleanLog();
+        assertTrue("cleaned=" + cleaned, cleaned > 0);
+
+        /*
+         * Before the fix to 10597, when cleaning the log we would be looking
+         * for an LN with containsDuplicates=true.  We assumed that when we
+         * found the BIN entry, it must point to a DIN.  But because we
+         * deleted and compressed above, the entry is actually an LN.  This
+         * caused a ClassCastException at the bottom of
+         * Tree.getParentBINForChildLN.
+         */
+        closeEnv();
+    }
+}
diff --git a/test/com/sleepycat/je/cleaner/SR12885Test.java b/test/com/sleepycat/je/cleaner/SR12885Test.java
new file mode 100644
index 0000000000000000000000000000000000000000..aa81bcfcf475e7c08591636de55ac3d15bb47d66
--- /dev/null
+++ b/test/com/sleepycat/je/cleaner/SR12885Test.java
@@ -0,0 +1,274 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SR12885Test.java,v 1.8.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Reproduces a problem found in SR12885 where we failed to migrate a pending
+ * LN if the slot was reused by an active transaction and that transaction was
+ * later aborted.
+ *
+ * This bug can manifest as a LogNotFoundException.  However, there was another
+ * bug that caused this bug to manifest sometimes as a NOTFOUND return value.
+ * This secondary problem -- more sloppyness than a real bug -- was that the
+ * PendingDeleted flag was not cleared during an abort.  If the PendingDeleted
+ * flag is set, the low level fetch method will return null rather than
+ * throwing a LogFileNotFoundException.  This caused a NOTFOUND in some cases.
+ *
+ * The sequence that causes the bug is:
+ *
+ * 1) The cleaner processes a file containing LN-A (node A) for key X.  Key X
+ * is a non-deleted LN.
+ *
+ * 2) The cleaner sets the migrate flag on the BIN entry for LN-A.
+ *
+ * 3) In transaction T-1, LN-A is deleted and replaced by LN-B with key X,
+ * reusing the same slot but assigning a new node ID.  At this point both node
+ * IDs (LN-A and LN-B) are locked.
+ *
+ * 4) The cleaner (via a checkpoint or eviction that logs the BIN) tries to
+ * migrate LN-B, the current LN in the BIN, but finds it locked.  It adds LN-B
+ * to the pending LN list.
+ *
+ * 5) T-1 aborts, putting the LSN of LN-A back into the BIN slot.
+ *
+ * 6) In transaction T-2, LN-A is deleted and replaced by LN-C with key X,
+ * reusing the same slot but assigning a new node ID.  At this point both node
+ * IDs (LN-A and LN-C) are locked.
+ *
+ * 7) The cleaner (via a checkpoint or wakeup) processes the pending LN-B.  It
+ * first gets a lock on node B, then does the tree lookup.  It finds LN-C in
+ * the tree, but it doesn't notice that it has a different node ID than the
+ * node it locked.
+ *
+ * 8) The cleaner sees that LN-C is deleted, and therefore no migration is
+ * necessary -- this is incorrect.  It removes LN-B from the pending list,
+ * allowing the cleaned file to be deleted.
+ *
+ * 9) T-2 aborts, putting the LSN of LN-A back into the BIN slot.
+ *
+ * 10) A fetch of key X will fail, since the file containing the LSN for LN-A
+ * has been deleted.  If we didn't clear the PendingDeleted flag, this will
+ * cause a NOTFOUND error instead of a LogFileNotFoundException.
+ */
+public class SR12885Test extends TestCase {
+
+    private static final String DB_NAME = "foo";
+
+    private static final CheckpointConfig forceConfig = new CheckpointConfig();
+    static {
+        forceConfig.setForce(true);
+    }
+
+    private File envHome;
+    private Environment env;
+    private Database db;
+
+    public SR12885Test() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException, DatabaseException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+        TestUtils.removeFiles("Setup", envHome, FileManager.DEL_SUFFIX);
+    }
+
+    public void tearDown()
+        throws IOException, DatabaseException {
+
+        try {
+            if (env != null) {
+                env.close();
+            }
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        try {
+            TestUtils.removeLogFiles("tearDown", envHome, true);
+            TestUtils.removeFiles("tearDown", envHome, FileManager.DEL_SUFFIX);
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        db = null;
+        env = null;
+        envHome = null;
+    }
+
+    /**
+     * Opens the environment and database.
+     */
+    private void openEnv()
+        throws DatabaseException {
+
+        EnvironmentConfig config = TestUtils.initEnvConfig();
+	DbInternal.disableParameterValidation(config);
+        config.setTransactional(true);
+        config.setAllowCreate(true);
+        /* Do not run the daemons. */
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false");
+        config.setConfigParam
+	    (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false");
+        /* Use a small log file size to make cleaning more frequent. */
+        config.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(),
+                              Integer.toString(1024));
+        env = new Environment(envHome, config);
+
+        openDb();
+    }
+
+    /**
+     * Opens that database.
+     */
+    private void openDb()
+        throws DatabaseException {
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        db = env.openDatabase(null, DB_NAME, dbConfig);
+    }
+
+    /**
+     * Closes the environment and database.
+     */
+    private void closeEnv()
+        throws DatabaseException {
+
+        if (db != null) {
+            db.close();
+            db = null;
+        }
+        if (env != null) {
+            env.close();
+            env = null;
+        }
+    }
+
+    public void testSR12885()
+        throws DatabaseException {
+
+        openEnv();
+
+        final int COUNT = 10;
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry(TestUtils.getTestArray(0));
+        OperationStatus status;
+
+        /* Add some records, enough to fill a log file. */
+        for (int i = 0; i < COUNT; i += 1) {
+            key.setData(TestUtils.getTestArray(i));
+            status = db.putNoOverwrite(null, key, data);
+            assertEquals(OperationStatus.SUCCESS, status);
+        }
+
+        /*
+         * Delete all but key 0, so the first file can be cleaned but key 0
+         * will need to be migrated.
+         */
+        for (int i = 1; i < COUNT; i += 1) {
+            key.setData(TestUtils.getTestArray(i));
+            status = db.delete(null, key);
+            assertEquals(OperationStatus.SUCCESS, status);
+        }
+
+        /*
+         * Checkpoint and clean to set the migrate flag for key 0.  This must
+         * be done when key 0 is not locked, so that it will not be put onto
+         * the pending list yet.  Below we cause it to be put onto the pending
+         * list with a different node ID.
+         */
+        env.checkpoint(forceConfig);
+        int cleaned = env.cleanLog();
+        assertTrue("cleaned=" + cleaned, cleaned > 0);
+
+        /*
+         * Using a transaction, delete then insert key 0, reusing the slot.
+         * The insertion assigns a new node ID.  Don't abort the transaction
+         * until after the cleaner migration is finished.
+         */
+        Transaction txn = env.beginTransaction(null, null);
+        key.setData(TestUtils.getTestArray(0));
+        status = db.delete(txn, key);
+        assertEquals(OperationStatus.SUCCESS, status);
+        status = db.putNoOverwrite(txn, key, data);
+        assertEquals(OperationStatus.SUCCESS, status);
+
+        /*
+         * Checkpoint again to perform LN migration.  LN migration will not
+         * migrate key 0 because it is locked -- it will be put onto the
+         * pending list.  But the LN put on the pending list will be the newly
+         * inserted node, which has a different node ID than the LN that needs
+         * to be migrated -- this is the first condition for the bug.
+         */
+        env.checkpoint(forceConfig);
+
+        /*
+         * Abort the transaction to revert to the original node ID for key 0.
+         * Then perform a delete with a new transaction.  This makes the
+         * current LN for key 0 deleted.
+         */
+        txn.abort();
+        txn = env.beginTransaction(null, null);
+        key.setData(TestUtils.getTestArray(0));
+        status = db.delete(txn, key);
+        assertEquals(OperationStatus.SUCCESS, status);
+
+        /*
+         * The current state of key 0 is that the BIN contains a deleted LN,
+         * and that LN has a node ID that is different than the one in the
+         * pending LN list.  This node is the one that needs to be migrated.
+         *
+         * Perform a checkpoint to cause pending LNs to be processed and then
+         * delete the cleaned file.  When we process the pending LN, we'll lock
+         * the pending LN's node ID (the one we inserted and aborted), which is
+         * the wrong node ID.  We'll then examine the current LN, find it
+         * deleted, and neglect to migrate the LN that needs to be migrated.
+         * The error is that we don't lock the node ID of the current LN.
+         *
+         * Then abort the delete transaction.  That will revert the BIN entry
+         * to the node we failed to migrate.  If we then try to fetch key 0,
+         * we'll get LogNotFoundException.
+         */
+        env.checkpoint(forceConfig);
+        txn.abort();
+        status = db.get(null, key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+
+        /* If we get this far without LogNotFoundException, it's fixed. */
+
+        closeEnv();
+    }
+}
diff --git a/test/com/sleepycat/je/cleaner/SR12978Test.java b/test/com/sleepycat/je/cleaner/SR12978Test.java
new file mode 100644
index 0000000000000000000000000000000000000000..732207c185528b1151a9d9de74c64a67966f7a1e
--- /dev/null
+++ b/test/com/sleepycat/je/cleaner/SR12978Test.java
@@ -0,0 +1,211 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SR12978Test.java,v 1.10.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Tests a fix to 12978, which was a ClassCastException in the following
+ * sequence.
+ *
+ * 1) A LN's BIN entry has the MIGRATE flag set.
+ *
+ * 2) Another LN with the same key is inserted (duplicates are allowed) and the
+ * first LN is moved to a dup tree,
+ *
+ * 3) The MIGRATE flag on the BIN entry is not cleared, and this entry now
+ * contains a DIN.
+ *
+ * 4) A split of the BIN occurs, logging the BIN with DIN entry.  During a
+ * split we can't do migration, so we attempt to put the DIN onto the cleaner's
+ * pending list.  We cast from DIN to LN, causing the exception.
+ *
+ * The fix was to clear the MIGRATE flag on the BIN entry at the time we update
+ * it to contain the DIN.
+ *
+ * This bug also left latches unreleased if a runtime exception occurred during
+ * a split, and that problem was fixed also.
+ */
+public class SR12978Test extends TestCase {
+
+    private static final String DB_NAME = "foo";
+
+    private static final CheckpointConfig forceConfig = new CheckpointConfig();
+    static {
+        forceConfig.setForce(true);
+    }
+
+    private File envHome;
+    private Environment env;
+    private Database db;
+
+    public SR12978Test() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException, DatabaseException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+        TestUtils.removeFiles("Setup", envHome, FileManager.DEL_SUFFIX);
+    }
+
+    public void tearDown()
+        throws IOException, DatabaseException {
+
+        try {
+            if (env != null) {
+                env.close();
+            }
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        try {
+            TestUtils.removeLogFiles("tearDown", envHome, true);
+            TestUtils.removeFiles("tearDown", envHome, FileManager.DEL_SUFFIX);
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        db = null;
+        env = null;
+        envHome = null;
+    }
+
+    /**
+     * Opens the environment and database.
+     */
+    private void open()
+        throws DatabaseException {
+
+        EnvironmentConfig config = TestUtils.initEnvConfig();
+	DbInternal.disableParameterValidation(config);
+        config.setAllowCreate(true);
+        /* Do not run the daemons. */
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false");
+        config.setConfigParam
+	    (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false");
+        /* Configure to make cleaning more frequent. */
+        config.setConfigParam
+            (EnvironmentParams.LOG_FILE_MAX.getName(), "10240");
+        config.setConfigParam
+            (EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(), "90");
+
+        env = new Environment(envHome, config);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(true);
+        db = env.openDatabase(null, DB_NAME, dbConfig);
+    }
+
+    /**
+     * Closes the environment and database.
+     */
+    private void close()
+        throws DatabaseException {
+
+        if (db != null) {
+            db.close();
+            db = null;
+        }
+        if (env != null) {
+            env.close();
+            env = null;
+        }
+    }
+
+    /**
+     */
+    public void testSR12978()
+        throws DatabaseException {
+
+        open();
+
+        final int COUNT = 800;
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /*
+         * Insert enough non-dup records to write a few log files.  Delete
+         * every other so that cleaning will occur.  Leave key space so we can
+         * insert below to cause splits.
+         */
+        IntegerBinding.intToEntry(0, data);
+        for (int i = 0; i < COUNT; i += 4) {
+
+            IntegerBinding.intToEntry(i + 0, key);
+            status = db.putNoOverwrite(null, key, data);
+            assertEquals(OperationStatus.SUCCESS, status);
+
+            IntegerBinding.intToEntry(i + 1, key);
+            status = db.putNoOverwrite(null, key, data);
+            assertEquals(OperationStatus.SUCCESS, status);
+
+            status = db.delete(null, key);
+            assertEquals(OperationStatus.SUCCESS, status);
+        }
+
+        /* Clean to set the MIGRATE flag on some LN entries. */
+        env.checkpoint(forceConfig);
+        int nCleaned = env.cleanLog();
+        assertTrue(nCleaned > 0);
+
+        /* Add dups to cause the LNs to be moved to a dup tree. */
+        IntegerBinding.intToEntry(1, data);
+        for (int i = 0; i < COUNT; i += 4) {
+
+            IntegerBinding.intToEntry(i + 0, key);
+            status = db.putNoDupData(null, key, data);
+            assertEquals(OperationStatus.SUCCESS, status);
+        }
+
+        /*
+         * Insert more unique keys to cause BIN splits.  Before the fix to
+         * 12978, a CastCastException would occur during a split.
+         */
+        IntegerBinding.intToEntry(0, data);
+        for (int i = 0; i < COUNT; i += 4) {
+
+            IntegerBinding.intToEntry(i + 2, key);
+            status = db.putNoOverwrite(null, key, data);
+            assertEquals(OperationStatus.SUCCESS, status);
+
+            IntegerBinding.intToEntry(i + 3, key);
+            status = db.putNoOverwrite(null, key, data);
+            assertEquals(OperationStatus.SUCCESS, status);
+        }
+
+        close();
+    }
+}
diff --git a/test/com/sleepycat/je/cleaner/SR13061Test.java b/test/com/sleepycat/je/cleaner/SR13061Test.java
new file mode 100644
index 0000000000000000000000000000000000000000..e6d2d9d04d1ecbe4e8e62589eff1f207588eaa7d
--- /dev/null
+++ b/test/com/sleepycat/je/cleaner/SR13061Test.java
@@ -0,0 +1,135 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2005,2008 Oracle.  All rights reserved.
+ *
+ * $Id: SR13061Test.java,v 1.8 2008/01/07 14:29:05 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.tuple.TupleOutput;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.tree.FileSummaryLN;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Tests that a FileSummaryLN with an old style string key can be read.  When
+ * we relied solely on log entry version to determine whether an LN had a
+ * string key, we could fail when an old style LN was migrated by the cleaner.
+ * In that case the key was still a string key but the log entry version was
+ * updated to something greater than zero.  See FileSummaryLN.hasStringKey for
+ * details of how we now guard against this.
+ */
+public class SR13061Test extends TestCase {
+
+    private File envHome;
+    private Environment env;
+
+    public SR13061Test() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException, DatabaseException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+        TestUtils.removeFiles("Setup", envHome, FileManager.DEL_SUFFIX);
+    }
+
+    public void tearDown()
+        throws IOException, DatabaseException {
+
+        try {
+            if (env != null) {
+                //env.close();
+            }
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        try {
+            TestUtils.removeLogFiles("tearDown", envHome, true);
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        env = null;
+    }
+
+
+    public void testSR13061()
+	throws DatabaseException {
+
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setAllowCreate(true);
+        env = new Environment(envHome, envConfig);
+
+        FileSummaryLN ln =
+            new FileSummaryLN(DbInternal.envGetEnvironmentImpl(env),
+                              new FileSummary());
+
+        /*
+         * All of these tests failed before checking that the byte array must
+         * be eight bytes for integer keys.
+         */
+        assertTrue(ln.hasStringKey(stringKey(0)));
+        assertTrue(ln.hasStringKey(stringKey(1)));
+        assertTrue(ln.hasStringKey(stringKey(12)));
+        assertTrue(ln.hasStringKey(stringKey(123)));
+        assertTrue(ln.hasStringKey(stringKey(1234)));
+        assertTrue(ln.hasStringKey(stringKey(12345)));
+        assertTrue(ln.hasStringKey(stringKey(123456)));
+        assertTrue(ln.hasStringKey(stringKey(1234567)));
+        assertTrue(ln.hasStringKey(stringKey(123456789)));
+        assertTrue(ln.hasStringKey(stringKey(1234567890)));
+
+        /*
+         * These tests failed before checking that the first byte of the
+         * sequence number (in an eight byte key) must not be '0' to '9' for
+         * integer keys.
+         */
+        assertTrue(ln.hasStringKey(stringKey(12345678)));
+        assertTrue(ln.hasStringKey(stringKey(12340000)));
+
+        /* These tests are just for good measure. */
+        assertTrue(!ln.hasStringKey(intKey(0, 1)));
+        assertTrue(!ln.hasStringKey(intKey(1, 1)));
+        assertTrue(!ln.hasStringKey(intKey(12, 1)));
+        assertTrue(!ln.hasStringKey(intKey(123, 1)));
+        assertTrue(!ln.hasStringKey(intKey(1234, 1)));
+        assertTrue(!ln.hasStringKey(intKey(12345, 1)));
+        assertTrue(!ln.hasStringKey(intKey(123456, 1)));
+        assertTrue(!ln.hasStringKey(intKey(1234567, 1)));
+        assertTrue(!ln.hasStringKey(intKey(12345678, 1)));
+        assertTrue(!ln.hasStringKey(intKey(123456789, 1)));
+        assertTrue(!ln.hasStringKey(intKey(1234567890, 1)));
+    }
+
+    private byte[] stringKey(long fileNum) {
+
+        try {
+            return String.valueOf(fileNum).getBytes("UTF-8");
+        } catch (Exception e) {
+            fail(e.toString());
+            return null;
+        }
+    }
+
+    private byte[] intKey(long fileNum, long seqNum) {
+
+        TupleOutput out = new TupleOutput();
+        out.writeUnsignedInt(fileNum);
+        out.writeUnsignedInt(seqNum);
+        return out.toByteArray();
+    }
+}
diff --git a/test/com/sleepycat/je/cleaner/SR18227Test.java b/test/com/sleepycat/je/cleaner/SR18227Test.java
new file mode 100644
index 0000000000000000000000000000000000000000..be80b29c0246e715a79820b2b2af4446760d0a3d
--- /dev/null
+++ b/test/com/sleepycat/je/cleaner/SR18227Test.java
@@ -0,0 +1,397 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2010 Oracle.  All rights reserved.
+ *
+ * $Id: SR18227Test.java,v 1.1.2.2 2010/01/30 01:10:55 mark Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.junit.JUnitThread;
+import com.sleepycat.je.latch.LatchSupport;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.tree.LN;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.TestHook;
+
+public class SR18227Test extends TestCase {
+
+    private static final String DB_NAME = "foo";
+
+    private File envHome;
+    private Environment env;
+    private EnvironmentImpl envImpl;
+    private Database db;
+    private JUnitThread junitThread;
+    private boolean deferredWrite;
+
+    public SR18227Test() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    @Override
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+        TestUtils.removeFiles("Setup", envHome, FileManager.DEL_SUFFIX);
+    }
+
+    @Override
+    public void tearDown() {
+        if (junitThread != null) {
+            while (junitThread.isAlive()) {
+                junitThread.interrupt();
+                Thread.yield();
+            }
+            junitThread = null;
+        }
+
+        try {
+            if (env != null) {
+                env.close();
+            }
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        //*
+        try {
+            TestUtils.removeLogFiles("tearDown", envHome, true);
+            TestUtils.removeFiles("tearDown", envHome, FileManager.DEL_SUFFIX);
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+        //*/
+
+        db = null;
+        env = null;
+        envImpl = null;
+        envHome = null;
+    }
+
+    /**
+     * Opens the environment and database.
+     */
+    private void openEnv()
+        throws DatabaseException {
+
+        EnvironmentConfig config = TestUtils.initEnvConfig();
+        config.setAllowCreate(true);
+
+        /* Do not run the daemons. */
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false");
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false");
+
+        /* Use a small cache size to increase eviction. */
+        config.setConfigParam(EnvironmentParams.MAX_MEMORY.getName(),
+                              Integer.toString(1024 * 96));
+
+        /*
+         * Disable critical eviction, we want to test under controlled
+         * circumstances.
+         */
+        config.setConfigParam
+            (EnvironmentParams.EVICTOR_CRITICAL_PERCENTAGE.getName(), "1000");
+
+        env = new Environment(envHome, config);
+        envImpl = DbInternal.envGetEnvironmentImpl(env);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        if (deferredWrite) {
+            dbConfig.setDeferredWrite(true);
+        } else {
+            dbConfig.setTemporary(true);
+        }
+        db = env.openDatabase(null, DB_NAME, dbConfig);
+    }
+
+    /**
+     * Closes the environment and database.
+     */
+    private void closeEnv()
+        throws DatabaseException {
+
+        if (db != null) {
+            db.close();
+            db = null;
+        }
+        if (env != null) {
+            env.close();
+            env = null;
+        }
+    }
+
+    /**
+     * Tests additionally with deferred-write instead of a temporary database
+     * as a double-check that the test is correct and that the problem is
+     * limited to temporary DBs.
+     */
+    public void testDeferredWrite()
+        throws DatabaseException {
+
+        deferredWrite = true;
+        testSR18227();
+    }
+
+    /**
+     * Tests a fix for a bug where a BIN was evicted, without flushing it, when
+     * it contained a LN that had been dirtied by log cleaning.
+     */
+    public void testSR18227()
+        throws DatabaseException {
+
+        openEnv();
+
+        /*
+         * Insert many records to cause eviction of BINs.  Critical eviction is
+         * disabled, so no eviction occurs until evictMemory is invoked.
+         */
+        final int RECORD_COUNT = 100000;
+        final DatabaseEntry key = new DatabaseEntry();
+        final DatabaseEntry data = new DatabaseEntry(new byte[100]);
+        for (int i = 0; i < RECORD_COUNT; i += 1) {
+            IntegerBinding.intToEntry(i, key);
+            db.put(null, key, data);
+        }
+        /* Evict to flush data to disk, then load again. */
+        env.evictMemory();
+        for (int i = 0; i < RECORD_COUNT; i += 1) {
+            IntegerBinding.intToEntry(i, key);
+            db.get(null, key, data, null);
+        }
+
+        final AtomicReference<BIN> foundBin = new AtomicReference<BIN>(null);
+        final AtomicLong foundLsn = new AtomicLong(DbLsn.NULL_LSN);
+        final AtomicInteger foundLn = new AtomicInteger(-1);
+
+        /* Simulate processing of an LN in the log cleaner. */
+        junitThread = new JUnitThread("testSR18227") {
+            public void testBody() {
+                final BIN bin = foundBin.get();
+                assertNotNull(bin);
+                final int index = foundLn.get();
+                assertTrue(index >= 0);
+
+                final FileProcessor processor = new FileProcessor
+                    ("testSR18227", envImpl, envImpl.getCleaner(),
+                     envImpl.getUtilizationProfile(),
+                     envImpl.getCleaner().getFileSelector());
+
+                final Map<DatabaseId, DatabaseImpl> dbCache =
+                    new HashMap<DatabaseId, DatabaseImpl>();
+                try {
+                    processor.testProcessLN
+                        ((LN) bin.getTarget(index), bin.getLsn(index),
+                         bin.getKey(index), null /*dupKey*/,
+                         bin.getDatabase().getId(), dbCache);
+                } catch (DatabaseException e) {
+                    throw new RuntimeException(e);
+                } finally {
+                    envImpl.getDbTree().releaseDbs(dbCache);
+                }
+            }
+        };
+
+        /*
+         * When an IN is about to be evicted, get control while it is latched
+         * but before the evictor re-searches for the parent IN.
+         */
+        final TestHook preEvictINHook = new TestHook() {
+            public void doHook() {
+                try {
+                    if (foundLn.get() >= 0) {
+                        return;
+                    }
+                    assertEquals(1, LatchSupport.countLatchesHeld());
+                    final BIN bin = findNonDirtyLatchedBIN();
+                    if (bin != null) {
+                        foundBin.set(bin);
+                        foundLsn.set(bin.getLastFullVersion());
+                        final int index = findDurableLN(bin);
+                        if (index >= 0) {
+                            foundLn.set(index);
+                            final LN ln = (LN) bin.fetchTarget(index);
+                            assertNotNull(ln);
+                            final IN parent = findBINParent(bin);
+                            if (parent.latchNoWait()) {
+                                parent.releaseLatch();
+                            } else {
+                                fail("Parent should not currently be latched.");
+                            }
+                            junitThread.start();
+
+                            /*
+                             * Loop until BIN parent is latched by cleaner in
+                             * separate thread.  When this occurs, the cleaner
+                             * will then try to latch the BIN itself.
+                             */
+                            while (junitThread.isAlive()) {
+                                if (parent.latchNoWait()) {
+                                    parent.releaseLatch();
+                                    Thread.yield();
+                                } else {
+                                    break;
+                                }
+                            }
+
+                            /*
+                             * Perform one final yield to ensure that the
+                             * cleaner has time to request the latch on the
+                             * BIN.
+                             */
+                            Thread.yield();
+                            assertEquals(1, LatchSupport.countLatchesHeld());
+                        }
+                    }
+                } catch (DatabaseException e) {
+                    throw new RuntimeException(e);
+                }
+            }
+            public Object getHookValue() {
+                throw new UnsupportedOperationException();
+            }
+            public void doIOHook() {
+                throw new UnsupportedOperationException();
+            }
+            public void hookSetup() {
+                throw new UnsupportedOperationException();
+            }
+        };
+
+        /*
+         * Set the pre-eviction hook and start eviction in this thread.  When
+         * evictMemory is called, that sets off the following sequence of
+         * events using the thread and hook defined further above.
+         *
+         * 1. The evictor (in this thread) will select a BIN for eviction.
+         * 2. The hook (above) will choose a BIN that is selected by evictor
+         *    (it determines this by finding the BIN that is latched).  It is
+         *    looking for a BIN in the temp DB that is non-dirty.
+         * 3. The hook starts the separate thread to simulate processing of the
+         *    LN by the log cleaner.
+         * 4. When the log cleaner (separate thread) has latched the BIN's
+         *    parent and is attemping to latch the BIN, the hook returns to
+         *    allow the evictor to continue.
+         * 5. The evictor then releases the latch on the BIN, in order to
+         *    re-search for it's parent.  By releasing the BIN latch, the
+         *    separate thread is then activated, since it was waiting on a
+         *    latch request for that BIN.
+         * 6. The separate thread then marks the LN in the BIN dirty.  The bug
+         *    is that it neglected to mark the BIN dirty.  This thread then
+         *    finishes.
+         * 7. The evictor now continues because it can get the latch on the
+         *    BIN.  When the bug was present, it would NOT flush the BIN,
+         *    because it was not dirty.  With the bug fix, the BIN is now
+         *    dirtied by the cleaner, and the evictor will flush it.
+         */
+        envImpl.getEvictor().setPreEvictINHook(preEvictINHook);
+        env.evictMemory();
+
+        /* Ensure separate thread is finished and report any exceptions. */
+        try {
+            junitThread.finishTest();
+            junitThread = null;
+        } catch (Throwable e) {
+            e.printStackTrace();
+            fail(e.toString());
+        }
+
+        /*
+         * After that entire process is complete, we can check that it did what
+         * we expected, and the BIN was flushed by the evictor.
+         */
+        final BIN bin = foundBin.get();
+        assertNotNull(bin);
+        final int index = foundLn.get();
+        assertTrue(index >= 0);
+        /* Ensure the BIN was evicted. */
+        assertFalse(envImpl.getInMemoryINs().contains(bin));
+        /* Ensure the BIN was flushed: this failed before the bug fix. */
+        assertTrue(bin.getLastFullVersion() != foundLsn.get());
+        /* Ensure the dirty LN was written. */
+        final LN ln = (LN) bin.getTarget(index);
+        assertNotNull(ln);
+        assertFalse(ln.isDirty());
+        assertTrue(DbLsn.NULL_LSN != bin.getLsn(index));
+
+        closeEnv();
+    }
+
+    private BIN findNonDirtyLatchedBIN() {
+        for (IN in : envImpl.getInMemoryINs()) {
+            if (in.isLatchOwnerForWrite()) {
+                if (in.getDatabase() != DbInternal.dbGetDatabaseImpl(db)) {
+                    return null;
+                }
+                if (!(in instanceof BIN)) {
+                    return null;
+                }
+                BIN bin = (BIN) in;
+                if (bin.getDirty()) {
+                    return null;
+                }
+                return bin;
+            }
+        }
+        fail("No IN latched");
+        return null; // for compiler
+    }
+
+    private IN findBINParent(BIN bin) {
+        for (IN in : envImpl.getInMemoryINs()) {
+            if (in.getLevel() != IN.BIN_LEVEL + 1) {
+                continue;
+            }
+            for (int i = 0; i < in.getNEntries(); i += 1) {
+                if (in.getTarget(i) == bin) {
+                    return in;
+                }
+            }
+        }
+        fail("No BIN parent");
+        return null; // for compiler
+    }
+
+    private int findDurableLN(BIN bin) {
+        for (int i = 0; i < bin.getNEntries(); i += 1) {
+            if (bin.getLsn(i) != DbLsn.NULL_LSN) {
+                return i;
+            }
+        }
+        return -1;
+    }
+}
diff --git a/test/com/sleepycat/je/cleaner/TruncateAndRemoveTest.java b/test/com/sleepycat/je/cleaner/TruncateAndRemoveTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..1b3312f7c7ffad07cc150e23b36f350148efa5b3
--- /dev/null
+++ b/test/com/sleepycat/je/cleaner/TruncateAndRemoveTest.java
@@ -0,0 +1,1234 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TruncateAndRemoveTest.java,v 1.29.2.3 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Set;
+import java.nio.ByteBuffer;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.junit.JUnitThread;
+import com.sleepycat.je.log.DumpFileReader;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.entry.INLogEntry;
+import com.sleepycat.je.log.entry.LNLogEntry;
+import com.sleepycat.je.log.entry.LogEntry;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.TestHook;
+
+/**
+ * Test cleaning and utilization counting for database truncate and remove.
+ */
+public class TruncateAndRemoveTest extends TestCase {
+
+    private static final String DB_NAME1 = "foo";
+    private static final String DB_NAME2 = "bar";
+    private static final long RECORD_COUNT = 100;
+
+    private static final CheckpointConfig FORCE_CHECKPOINT =
+        new CheckpointConfig();
+    static {
+        FORCE_CHECKPOINT.setForce(true);
+    }
+
+    private static final boolean DEBUG = false;
+
+    private File envHome;
+    private Environment env;
+    private EnvironmentImpl envImpl;
+    private Database db;
+    private DatabaseImpl dbImpl;
+    private JUnitThread junitThread;
+    private boolean fetchObsoleteSize;
+    private boolean truncateOrRemoveDone;
+    private boolean dbEviction;
+
+    public TruncateAndRemoveTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException, DatabaseException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+        TestUtils.removeFiles("Setup", envHome, FileManager.DEL_SUFFIX);
+    }
+
+    public void tearDown()
+        throws IOException, DatabaseException {
+
+        if (junitThread != null) {
+            while (junitThread.isAlive()) {
+                junitThread.interrupt();
+                Thread.yield();
+            }
+            junitThread = null;
+        }
+
+        try {
+            if (env != null) {
+                env.close();
+            }
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        try {
+            //*
+            TestUtils.removeLogFiles("tearDown", envHome, true);
+            TestUtils.removeFiles("tearDown", envHome, FileManager.DEL_SUFFIX);
+            //*/
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        db = null;
+        dbImpl = null;
+        env = null;
+        envImpl = null;
+        envHome = null;
+    }
+
+    /**
+     * Opens the environment.
+     */
+    private void openEnv(boolean transactional)
+        throws DatabaseException {
+
+        EnvironmentConfig config = TestUtils.initEnvConfig();
+        config.setTransactional(transactional);
+        config.setAllowCreate(true);
+        /* Do not run the daemons since they interfere with LN counting. */
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false");
+        config.setConfigParam
+	    (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false");
+
+        /* Use small nodes to test the post-txn scanning. */
+        config.setConfigParam
+            (EnvironmentParams.NODE_MAX.getName(), "10");
+        config.setConfigParam
+            (EnvironmentParams.NODE_MAX_DUPTREE.getName(), "10");
+
+        /* Use small files to ensure that there is cleaning. */
+        config.setConfigParam("je.cleaner.minUtilization", "90");
+        DbInternal.disableParameterValidation(config);
+        config.setConfigParam("je.log.fileMax", "4000");
+
+        /* Obsolete LN size counting is optional per test. */
+        if (fetchObsoleteSize) {
+            config.setConfigParam
+                (EnvironmentParams.CLEANER_FETCH_OBSOLETE_SIZE.getName(),
+                 "true");
+        }
+
+        env = new Environment(envHome, config);
+        envImpl = DbInternal.envGetEnvironmentImpl(env);
+
+        config = env.getConfig();
+        dbEviction = config.getConfigParam
+            (EnvironmentParams.ENV_DB_EVICTION.getName()).equals("true");
+    }
+
+    /**
+     * Opens that database.
+     */
+    private void openDb(Transaction useTxn, String dbName)
+        throws DatabaseException {
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        EnvironmentConfig envConfig = env.getConfig();
+        dbConfig.setTransactional(envConfig.getTransactional());
+        dbConfig.setAllowCreate(true);
+        db = env.openDatabase(useTxn, dbName, dbConfig);
+        dbImpl = DbInternal.dbGetDatabaseImpl(db);
+    }
+
+    /**
+     * Closes the database.
+     */
+    private void closeDb()
+        throws DatabaseException {
+
+        if (db != null) {
+            db.close();
+            db = null;
+            dbImpl = null;
+        }
+    }
+
+    /**
+     * Closes the environment and database.
+     */
+    private void closeEnv()
+        throws DatabaseException {
+
+        closeDb();
+
+        if (env != null) {
+            env.close();
+            env = null;
+            envImpl = null;
+        }
+    }
+
+    /**
+     * Test that truncate generates the right number of obsolete LNs.
+     */
+    public void testTruncate()
+        throws Exception {
+
+        openEnv(true);
+        openDb(null, DB_NAME1);
+        writeAndCountRecords(null, RECORD_COUNT);
+        DatabaseImpl saveDb = dbImpl;
+        DatabaseId saveId = dbImpl.getId();
+        closeDb();
+
+        Transaction txn = env.beginTransaction(null, null);
+        truncate(txn, true);
+        ObsoleteCounts beforeCommit = getObsoleteCounts();
+        txn.commit();
+        truncateOrRemoveDone = true;
+
+        /* Make sure use count is decremented when we commit. */
+        assertDbInUse(saveDb, false);
+        openDb(null, DB_NAME1);
+        saveDb = dbImpl;
+        closeDb();
+        assertDbInUse(saveDb, false);
+
+        verifyUtilization(beforeCommit,
+                          RECORD_COUNT + // LNs
+                          3,   // prev MapLN + deleted MapLN + prev NameLN
+                          15); // 1 root, 2 INs, 12 BINs
+
+        closeEnv();
+        batchCleanAndVerify(saveId);
+    }
+
+    /**
+     * Test that aborting truncate generates the right number of obsolete LNs.
+     */
+    public void testTruncateAbort()
+        throws Exception {
+
+        openEnv(true);
+        openDb(null, DB_NAME1);
+        writeAndCountRecords(null, RECORD_COUNT);
+        DatabaseImpl saveDb = dbImpl;
+        closeDb();
+
+        Transaction txn = env.beginTransaction(null, null);
+        truncate(txn, true);
+        ObsoleteCounts beforeAbort = getObsoleteCounts();
+        txn.abort();
+
+        /* Make sure use count is decremented when we abort. */
+        assertDbInUse(saveDb, false);
+        openDb(null, DB_NAME1);
+        saveDb = dbImpl;
+        closeDb();
+        assertDbInUse(saveDb, false);
+
+        /*
+         * The obsolete count should include the records inserted after
+         * the truncate.
+         */
+        verifyUtilization(beforeAbort,
+                          /* 1 new nameLN, 2 copies of MapLN for new db */
+                           3,
+                           0);
+
+        /* Reopen, db should be populated. */
+        openDb(null, DB_NAME1);
+        assertEquals(RECORD_COUNT, countRecords(null));
+        closeEnv();
+    }
+
+    /**
+     * Test that aborting truncate generates the right number of obsolete LNs.
+     */
+    public void testTruncateRepopulateAbort()
+        throws Exception {
+
+        openEnv(true);
+        openDb(null, DB_NAME1);
+        writeAndCountRecords(null, RECORD_COUNT);
+        closeDb();
+
+        Transaction txn = env.beginTransaction(null, null);
+        truncate(txn, true);
+
+        /* populate the database with some more records. */
+        openDb(txn, DB_NAME1);
+        writeAndCountRecords(txn, RECORD_COUNT/4);
+        DatabaseImpl saveDb = dbImpl;
+        DatabaseId saveId = dbImpl.getId();
+        closeDb();
+        ObsoleteCounts beforeAbort = getObsoleteCounts();
+        txn.abort();
+
+        /*
+         * We set truncateOrRemoveDone to true (meaning that per-DB utilization
+         * will not be verified) even though the txn was aborted because the
+         * discarded new DatabaseImpl will not be counted yet includes INs and
+         * LNs from the operations above.
+         */
+        truncateOrRemoveDone = true;
+
+        /* Make sure use count is decremented when we abort. */
+        assertDbInUse(saveDb, false);
+        openDb(null, DB_NAME1);
+        saveDb = dbImpl;
+        closeDb();
+        assertDbInUse(saveDb, false);
+
+        /*
+         * The obsolete count should include the records inserted after
+         * the truncate.
+         */
+        verifyUtilization(beforeAbort,
+                          /* newly inserted LNs, 1 new nameLN,
+                           * 2 copies of MapLN for new db */
+                          (RECORD_COUNT/4) + 3,
+                          5);
+
+        /* Reopen, db should be populated. */
+        openDb(null, DB_NAME1);
+        assertEquals(RECORD_COUNT, countRecords(null));
+
+        closeEnv();
+        batchCleanAndVerify(saveId);
+    }
+    /**
+     * Test that remove generates the right number of obsolete LNs.
+     */
+    public void testRemove()
+        throws Exception {
+
+        openEnv(true);
+        openDb(null, DB_NAME1);
+        writeAndCountRecords(null, RECORD_COUNT);
+        DatabaseImpl saveDb = dbImpl;
+        DatabaseId saveId = dbImpl.getId();
+        closeDb();
+
+        Transaction txn = env.beginTransaction(null, null);
+        env.removeDatabase(txn, DB_NAME1);
+        ObsoleteCounts beforeCommit = getObsoleteCounts();
+        txn.commit();
+        truncateOrRemoveDone = true;
+
+        /* Make sure use count is decremented when we commit. */
+        assertDbInUse(saveDb, false);
+
+        verifyUtilization(beforeCommit,
+                          /* LNs + old NameLN, old MapLN, delete MapLN */
+                          RECORD_COUNT + 3,
+                          15);
+
+        openDb(null, DB_NAME1);
+        assertEquals(0, countRecords(null));
+
+        closeEnv();
+        batchCleanAndVerify(saveId);
+    }
+
+    /**
+     * Test that remove generates the right number of obsolete LNs.
+     */
+    public void testNonTxnalRemove()
+        throws Exception {
+
+        openEnv(false);
+        openDb(null, DB_NAME1);
+        writeAndCountRecords(null, RECORD_COUNT);
+        DatabaseImpl saveDb = dbImpl;
+        DatabaseId saveId = dbImpl.getId();
+        closeDb();
+        ObsoleteCounts beforeOperation = getObsoleteCounts();
+        env.removeDatabase(null, DB_NAME1);
+        truncateOrRemoveDone = true;
+
+        /* Make sure use count is decremented. */
+        assertDbInUse(saveDb, false);
+
+        verifyUtilization(beforeOperation,
+                          /* LNs + new NameLN, old NameLN, old MapLN, delete
+                             MapLN */
+                          RECORD_COUNT + 4,
+                          15);
+
+        openDb(null, DB_NAME1);
+        assertEquals(0, countRecords(null));
+
+        closeEnv();
+        batchCleanAndVerify(saveId);
+    }
+
+    /**
+     * Test that aborting remove generates the right number of obsolete LNs.
+     */
+    public void testRemoveAbort()
+        throws Exception {
+
+        /* Create database, populate, remove, abort the remove. */
+        openEnv(true);
+        openDb(null, DB_NAME1);
+        writeAndCountRecords(null, RECORD_COUNT);
+        DatabaseImpl saveDb = dbImpl;
+        closeDb();
+        Transaction txn = env.beginTransaction(null, null);
+        env.removeDatabase(txn, DB_NAME1);
+        ObsoleteCounts beforeAbort = getObsoleteCounts();
+        txn.abort();
+
+        /* Make sure use count is decremented when we abort. */
+        assertDbInUse(saveDb, false);
+
+        verifyUtilization(beforeAbort, 0, 0);
+
+        /* All records should be there. */
+        openDb(null, DB_NAME1);
+        assertEquals(RECORD_COUNT, countRecords(null));
+
+        closeEnv();
+
+        /*
+         * Batch clean and then check the record count again, just to make sure
+         * we don't lose any valid data.
+         */
+        openEnv(true);
+        while (env.cleanLog() > 0) {
+        }
+        CheckpointConfig force = new CheckpointConfig();
+        force.setForce(true);
+        env.checkpoint(force);
+        closeEnv();
+
+        openEnv(true);
+        openDb(null, DB_NAME1);
+        assertEquals(RECORD_COUNT, countRecords(null));
+        closeEnv();
+    }
+
+    /**
+     * The same as testRemoveNotResident but forces fetching of obsolets LNs
+     * in order to count their sizes accurately.
+     */
+    public void testRemoveNotResidentFetchObsoleteSize()
+        throws Exception {
+
+        fetchObsoleteSize = true;
+        testRemoveNotResident();
+    }
+
+    /**
+     * Test that we can properly account for a non-resident database.
+     */
+    public void testRemoveNotResident()
+        throws Exception {
+
+        /* Create a database, populate. */
+        openEnv(true);
+        openDb(null, DB_NAME1);
+        writeAndCountRecords(null, RECORD_COUNT);
+        DatabaseId saveId = DbInternal.dbGetDatabaseImpl(db).getId();
+        closeEnv();
+
+        /*
+         * Open the environment and remove the database. The
+         * database is not resident at all.
+         */
+        openEnv(true);
+        Transaction txn = env.beginTransaction(null, null);
+        env.removeDatabase(txn, DB_NAME1);
+        ObsoleteCounts beforeCommit = getObsoleteCounts();
+        txn.commit();
+        truncateOrRemoveDone = true;
+
+        verifyUtilization(beforeCommit,
+                          /* LNs + old NameLN, old MapLN, delete MapLN */
+                          RECORD_COUNT + 3,
+                          /* 15 INs for data tree */
+                          15,
+                          true);
+
+        /* check record count. */
+        openDb(null, DB_NAME1);
+        assertEquals(0, countRecords(null));
+
+        closeEnv();
+        batchCleanAndVerify(saveId);
+    }
+
+    /**
+     * The same as testRemovePartialResident but forces fetching of obsolets
+     * LNs in order to count their sizes accurately.
+     */
+    public void testRemovePartialResidentFetchObsoleteSize()
+        throws Exception {
+
+        fetchObsoleteSize = true;
+        testRemovePartialResident();
+    }
+
+    /**
+     * Test that we can properly account for partially resident tree.
+     */
+    public void testRemovePartialResident()
+        throws Exception {
+
+        /* Create a database, populate. */
+        openEnv(true);
+        openDb(null, DB_NAME1);
+        writeAndCountRecords(null, RECORD_COUNT);
+        DatabaseId saveId = DbInternal.dbGetDatabaseImpl(db).getId();
+        closeEnv();
+
+        /*
+         * Open the environment and remove the database. Pull 1 BIN in.
+         */
+        openEnv(true);
+        openDb(null, DB_NAME1);
+        Cursor c = db.openCursor(null, null);
+        assertEquals(OperationStatus.SUCCESS,
+                     c.getFirst(new DatabaseEntry(), new DatabaseEntry(),
+                                LockMode.DEFAULT));
+        c.close();
+        DatabaseImpl saveDb = dbImpl;
+        closeDb();
+
+        Transaction txn = env.beginTransaction(null, null);
+        env.removeDatabase(txn, DB_NAME1);
+        ObsoleteCounts beforeCommit = getObsoleteCounts();
+        txn.commit();
+        truncateOrRemoveDone = true;
+
+        /* Make sure use count is decremented when we commit. */
+        assertDbInUse(saveDb, false);
+
+        verifyUtilization(beforeCommit,
+                          /* LNs + old NameLN, old MapLN, delete MapLN */
+                          RECORD_COUNT + 3,
+                          /* 15 INs for data tree */
+                          15,
+                          true);
+
+        /* check record count. */
+        openDb(null, DB_NAME1);
+        assertEquals(0, countRecords(null));
+
+        closeEnv();
+        batchCleanAndVerify(saveId);
+    }
+
+    /**
+     * Tests that a log file is not deleted by the cleaner when it contains
+     * entries in a database that is pending deletion.
+     */
+    public void testDBPendingDeletion()
+        throws DatabaseException, InterruptedException {
+
+        doDBPendingTest(RECORD_COUNT + 30, false /*deleteAll*/, 5);
+    }
+
+    /**
+     * Like testDBPendingDeletion but creates a scenario where only a single
+     * log file is cleaned, and that log file contains only known obsolete
+     * log entries.  This reproduced a bug where we neglected to add pending
+     * deleted DBs to the cleaner's pending DB set if all entries in the log
+     * file were known obsoleted. [#13333]
+     */
+    public void testObsoleteLogFile()
+        throws DatabaseException, InterruptedException {
+
+        doDBPendingTest(70, true /*deleteAll*/, 1);
+    }
+
+    private void doDBPendingTest(long recordCount,
+                                 boolean deleteAll,
+                                 int expectFilesCleaned)
+        throws DatabaseException, InterruptedException {
+
+        /* Create a database, populate, close. */
+        Set logFiles = new HashSet();
+        openEnv(true);
+        openDb(null, DB_NAME1);
+        writeAndMakeWaste(recordCount, logFiles, deleteAll);
+        long remainingRecordCount = deleteAll ? 0 : recordCount;
+        env.checkpoint(FORCE_CHECKPOINT);
+        ObsoleteCounts obsoleteCounts = getObsoleteCounts();
+        DatabaseImpl saveDb = dbImpl;
+        closeDb();
+        assertTrue(!saveDb.isDeleteFinished());
+        assertTrue(!saveDb.isDeleted());
+        assertDbInUse(saveDb, false);
+
+        /* Make sure that we wrote a full file's worth of LNs. */
+        assertTrue(logFiles.size() >= 2);
+        assertTrue(logFilesExist(logFiles));
+
+        /* Remove the database but do not commit yet. */
+        final Transaction txn = env.beginTransaction(null, null);
+        env.removeDatabase(txn, DB_NAME1);
+
+        /* The obsolete count should be <= 1 (for the NameLN). */
+        obsoleteCounts = verifyUtilization(obsoleteCounts, 1, 0);
+        truncateOrRemoveDone = true;
+
+        junitThread = new JUnitThread("Committer") {
+            public void testBody()
+                throws DatabaseException {
+                try {
+                    txn.commit();
+                } catch (Throwable e) {
+                    e.printStackTrace();
+                }
+            }
+        };
+
+        /*
+         * Set a hook to cause the commit to block.  The commit is done in a
+         * separate thread.  The commit will set the DB state to pendingDeleted
+         * and will then wait for the hook to return.
+         */
+        final Object lock = new Object();
+
+        saveDb.setPendingDeletedHook(new TestHook() {
+            public void doHook() {
+                synchronized (lock) {
+                    try {
+                        lock.notify();
+                        lock.wait();
+                    } catch (InterruptedException e) {
+                        e.printStackTrace();
+                        throw new RuntimeException(e.toString());
+                    }
+                }
+            }
+            public Object getHookValue() {
+                throw new UnsupportedOperationException();
+            }
+            public void doIOHook() throws IOException {
+                throw new UnsupportedOperationException();
+            }
+            public void hookSetup() {
+                throw new UnsupportedOperationException();
+            }
+        });
+
+        /* Start the committer thread; expect the pending deleted state. */
+        synchronized (lock) {
+            junitThread.start();
+            lock.wait();
+        }
+        assertTrue(!saveDb.isDeleteFinished());
+        assertTrue(saveDb.isDeleted());
+        assertDbInUse(saveDb, true);
+
+        /* Expect obsolete LNs: NameLN */
+        obsoleteCounts = verifyUtilization(obsoleteCounts, 1, 0);
+
+        /* The DB deletion is pending; the log file should still exist. */
+        int filesCleaned = env.cleanLog();
+        assertEquals(expectFilesCleaned, filesCleaned);
+        assertTrue(filesCleaned > 0);
+        env.checkpoint(FORCE_CHECKPOINT);
+        env.checkpoint(FORCE_CHECKPOINT);
+        assertTrue(logFilesExist(logFiles));
+
+        /*
+         * When the commiter thread finishes, the DB deletion will be
+         * complete and the DB state will change to deleted.
+         */
+        synchronized (lock) {
+            lock.notify();
+        }
+        try {
+            junitThread.finishTest();
+            junitThread = null;
+        } catch (Throwable e) {
+            e.printStackTrace();
+            fail(e.toString());
+        }
+        assertTrue(saveDb.isDeleteFinished());
+        assertTrue(saveDb.isDeleted());
+        assertDbInUse(saveDb, false);
+
+        /* Expect obsolete LNs: recordCount + MapLN + FSLNs (apprx). */
+        verifyUtilization(obsoleteCounts, remainingRecordCount + 6, 0);
+
+        /* The DB deletion is complete; the log file should be deleted. */
+        env.checkpoint(FORCE_CHECKPOINT);
+        env.checkpoint(FORCE_CHECKPOINT);
+        assertTrue(!logFilesExist(logFiles));
+    }
+
+    /*
+     * The xxxForceTreeWalk tests set the DatabaseImpl
+     * forceTreeWalkForTruncateAndRemove field to true, which will force a walk
+     * of the tree to count utilization during truncate/remove, rather than
+     * using the per-database info.  This is used to test the "old technique"
+     * for counting utilization, which is now used only if the database was
+     * created prior to log version 6.
+     */
+
+    public void testTruncateForceTreeWalk()
+        throws Exception {
+
+        DatabaseImpl.forceTreeWalkForTruncateAndRemove = true;
+        try {
+            testTruncate();
+        } finally {
+            DatabaseImpl.forceTreeWalkForTruncateAndRemove = false;
+        }
+    }
+
+    public void testTruncateAbortForceTreeWalk()
+        throws Exception {
+
+        DatabaseImpl.forceTreeWalkForTruncateAndRemove = true;
+        try {
+            testTruncateAbort();
+        } finally {
+            DatabaseImpl.forceTreeWalkForTruncateAndRemove = false;
+        }
+    }
+
+    public void testTruncateRepopulateAbortForceTreeWalk()
+        throws Exception {
+
+        DatabaseImpl.forceTreeWalkForTruncateAndRemove = true;
+        try {
+            testTruncateRepopulateAbort();
+        } finally {
+            DatabaseImpl.forceTreeWalkForTruncateAndRemove = false;
+        }
+    }
+
+    public void testRemoveForceTreeWalk()
+        throws Exception {
+
+        DatabaseImpl.forceTreeWalkForTruncateAndRemove = true;
+        try {
+            testRemove();
+        } finally {
+            DatabaseImpl.forceTreeWalkForTruncateAndRemove = false;
+        }
+    }
+
+
+    public void testNonTxnalRemoveForceTreeWalk()
+        throws Exception {
+
+        DatabaseImpl.forceTreeWalkForTruncateAndRemove = true;
+        try {
+            testNonTxnalRemove();
+        } finally {
+            DatabaseImpl.forceTreeWalkForTruncateAndRemove = false;
+        }
+    }
+
+    public void testRemoveAbortForceTreeWalk()
+        throws Exception {
+
+        DatabaseImpl.forceTreeWalkForTruncateAndRemove = true;
+        try {
+            testRemoveAbort();
+        } finally {
+            DatabaseImpl.forceTreeWalkForTruncateAndRemove = false;
+        }
+    }
+
+    public void testRemoveNotResidentForceTreeWalk()
+        throws Exception {
+
+        DatabaseImpl.forceTreeWalkForTruncateAndRemove = true;
+        try {
+            testRemoveNotResident();
+        } finally {
+            DatabaseImpl.forceTreeWalkForTruncateAndRemove = false;
+        }
+    }
+
+    public void testRemovePartialResidentForceTreeWalk()
+        throws Exception {
+
+        DatabaseImpl.forceTreeWalkForTruncateAndRemove = true;
+        try {
+            testRemovePartialResident();
+        } finally {
+            DatabaseImpl.forceTreeWalkForTruncateAndRemove = false;
+        }
+    }
+
+    public void testDBPendingDeletionForceTreeWalk()
+        throws Exception {
+
+        DatabaseImpl.forceTreeWalkForTruncateAndRemove = true;
+        try {
+            testDBPendingDeletion();
+        } finally {
+            DatabaseImpl.forceTreeWalkForTruncateAndRemove = false;
+        }
+    }
+
+    public void testObsoleteLogFileForceTreeWalk()
+        throws Exception {
+
+        DatabaseImpl.forceTreeWalkForTruncateAndRemove = true;
+        try {
+            testObsoleteLogFile();
+        } finally {
+            DatabaseImpl.forceTreeWalkForTruncateAndRemove = false;
+        }
+    }
+
+    /**
+     * Tickles a bug that caused NPE during recovery during the sequence:
+     * delete record, trucate DB, crash (close without checkpoint), and
+     * recover. [#16515]
+     */
+    public void testDeleteTruncateRecover()
+        throws DatabaseException {
+
+        /* Delete a record. */
+        openEnv(true);
+        openDb(null, DB_NAME1);
+        writeAndCountRecords(null, 1);
+        closeDb();
+
+        /* Truncate DB. */
+        Transaction txn = env.beginTransaction(null, null);
+        truncate(txn, false);
+        txn.commit();
+
+        /* Close without checkpoint. */
+        envImpl.close(false /*doCheckpoint*/);
+        envImpl = null;
+        env = null;
+
+        /* Recover -- the bug cause NPE here. */
+        openEnv(true);
+        closeEnv();
+    }
+
+    private void writeAndCountRecords(Transaction txn, long count)
+        throws DatabaseException {
+
+        for (int i = 1; i <= count; i += 1) {
+            DatabaseEntry entry = new DatabaseEntry(TestUtils.getTestArray(i));
+
+            db.put(txn, entry, entry);
+        }
+
+        /* Insert and delete some records, insert and abort some records. */
+        DatabaseEntry entry =
+            new DatabaseEntry(TestUtils.getTestArray((int)count+1));
+        db.put(txn, entry, entry);
+        db.delete(txn, entry);
+
+        EnvironmentConfig envConfig = env.getConfig();
+        if (envConfig.getTransactional()) {
+            entry = new DatabaseEntry(TestUtils.getTestArray(0));
+            Transaction txn2 = env.beginTransaction(null, null);
+            db.put(txn2, entry, entry);
+            txn2.abort();
+            txn2 = null;
+        }
+
+        assertEquals(count, countRecords(txn));
+    }
+
+    /**
+     * Writes the specified number of records to db.  Check the number of
+     * records, and return the number of obsolete records.  Returns a set of
+     * the file numbers that are written to.
+     *
+     * Makes waste (obsolete records):  If doDelete=true, deletes records as
+     * they are added; otherwise does updates to produce obsolete records
+     * interleaved with non-obsolete records.
+     */
+    private void writeAndMakeWaste(long count,
+                                   Set logFilesWritten,
+                                   boolean doDelete)
+        throws DatabaseException {
+
+        Transaction txn = env.beginTransaction(null, null);
+        Cursor cursor = db.openCursor(txn, null);
+        for (int i = 0; i < count; i += 1) {
+            DatabaseEntry entry = new DatabaseEntry(TestUtils.getTestArray(i));
+            cursor.put(entry, entry);
+            /* Add log file written. */
+            long file = CleanerTestUtils.getLogFile(this, cursor);
+            logFilesWritten.add(new Long(file));
+            /* Make waste. */
+            if (!doDelete) {
+                cursor.put(entry, entry);
+                cursor.put(entry, entry);
+            }
+        }
+        if (doDelete) {
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            OperationStatus status;
+            for (status = cursor.getFirst(key, data, null);
+                 status == OperationStatus.SUCCESS;
+                 status = cursor.getNext(key, data, null)) {
+                /* Make waste. */
+                cursor.delete();
+                /* Add log file written. */
+                long file = CleanerTestUtils.getLogFile(this, cursor);
+                logFilesWritten.add(new Long(file));
+            }
+        }
+        cursor.close();
+        txn.commit();
+        assertEquals(doDelete ? 0 : count, countRecords(null));
+    }
+
+    /* Truncate database and check the count. */
+    private void truncate(Transaction useTxn, boolean getCount)
+        throws DatabaseException {
+
+        long nTruncated = env.truncateDatabase(useTxn, DB_NAME1, getCount);
+
+        if (getCount) {
+            assertEquals(RECORD_COUNT, nTruncated);
+        }
+
+        assertEquals(0, countRecords(useTxn));
+    }
+
+    /**
+     * Returns how many records are in the database.
+     */
+    private int countRecords(Transaction useTxn)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        boolean opened = false;
+        if (db == null) {
+            openDb(useTxn, DB_NAME1);
+            opened = true;
+        }
+        Cursor cursor = db.openCursor(useTxn, null);
+        int count = 0;
+        try {
+            OperationStatus status = cursor.getFirst(key, data, null);
+            while (status == OperationStatus.SUCCESS) {
+                count += 1;
+                status = cursor.getNext(key, data, null);
+            }
+        } finally {
+            cursor.close();
+        }
+        if (opened) {
+            closeDb();
+        }
+        return count;
+    }
+
+    /**
+     * Return the total number of obsolete node counts according to the
+     * UtilizationProfile and UtilizationTracker.
+     */
+    private ObsoleteCounts getObsoleteCounts()
+        throws DatabaseException {
+
+        FileSummary[] files = (FileSummary[])
+            envImpl.getUtilizationProfile()
+                   .getFileSummaryMap(true)
+                   .values().toArray(new FileSummary[0]);
+        int lnCount = 0;
+        int inCount = 0;
+        int lnSize = 0;
+        int lnSizeCounted = 0;
+        for (int i = 0; i < files.length; i += 1) {
+            lnCount += files[i].obsoleteLNCount;
+            inCount += files[i].obsoleteINCount;
+            lnSize += files[i].obsoleteLNSize;
+            lnSizeCounted += files[i].obsoleteLNSizeCounted;
+        }
+
+        return new ObsoleteCounts(lnCount, inCount, lnSize, lnSizeCounted);
+    }
+
+    private class ObsoleteCounts {
+        int obsoleteLNs;
+        int obsoleteINs;
+        int obsoleteLNSize;
+        int obsoleteLNSizeCounted;
+
+        ObsoleteCounts(int obsoleteLNs,
+                       int obsoleteINs,
+                       int obsoleteLNSize,
+                       int obsoleteLNSizeCounted) {
+            this.obsoleteLNs = obsoleteLNs;
+            this.obsoleteINs = obsoleteINs;
+            this.obsoleteLNSize = obsoleteLNSize;
+            this.obsoleteLNSizeCounted = obsoleteLNSizeCounted;
+        }
+
+        public String toString() {
+            return "lns=" + obsoleteLNs + " ins=" + obsoleteINs +
+                   " lnSize=" + obsoleteLNSize +
+                   " lnSizeCounted=" + obsoleteLNSizeCounted;
+        }
+    }
+
+    private ObsoleteCounts verifyUtilization(ObsoleteCounts prev,
+                                             long expectedLNs,
+                                             int expectedINs)
+        throws DatabaseException {
+
+        return verifyUtilization(prev, expectedLNs, expectedINs, false);
+    }
+
+    /*
+     * Check obsolete counts. If the expected IN count is zero, don't
+     * check the obsolete IN count.  Always check the obsolete LN count.
+     */
+    private ObsoleteCounts verifyUtilization(ObsoleteCounts prev,
+                                             long expectedLNs,
+                                             int expectedINs,
+                                             boolean expectNonResident)
+        throws DatabaseException {
+
+        /*
+         * If we are not forcing a tree walk OR all nodes are resident OR we
+         * have explicitly configured fetchObsoleteSize, then the size of every
+         * LN should have been counted.
+         */
+        boolean expectAccurateObsoleteLNSize =
+            !DatabaseImpl.forceTreeWalkForTruncateAndRemove ||
+            !expectNonResident ||
+            fetchObsoleteSize;
+	
+        ObsoleteCounts now = getObsoleteCounts();
+        String beforeAndAfter = "before: " + prev + " now: " + now;
+        if (DEBUG) {
+            System.out.println(beforeAndAfter);
+        }
+
+        assertEquals(beforeAndAfter, expectedLNs,
+		     now.obsoleteLNs - prev.obsoleteLNs);
+        if (expectedLNs > 0) {
+            int size = now.obsoleteLNSize - prev.obsoleteLNSize;
+            int counted = now.obsoleteLNSizeCounted -
+                          prev.obsoleteLNSizeCounted;
+            assertTrue(String.valueOf(size), size > 0);
+
+            if (expectAccurateObsoleteLNSize) {
+                assertEquals(beforeAndAfter, counted,
+			     now.obsoleteLNs - prev.obsoleteLNs);
+            }
+        }
+        if (expectedINs > 0) {
+            assertEquals(beforeAndAfter, expectedINs,
+                         now.obsoleteINs - prev.obsoleteINs);
+        }
+
+        /*
+         * We pass expectAccurateDbUtilization as false when
+         * truncateOrRemoveDone, because the database utilization info for that
+         * database is now gone.
+         */
+        VerifyUtils.verifyUtilization
+            (envImpl,
+             true,                   // expectAccurateObsoleteLNCount
+             expectAccurateObsoleteLNSize,
+             !truncateOrRemoveDone); // expectAccurateDbUtilization
+
+        return now;
+    }
+
+    /**
+     * Checks whether a given DB has a non-zero use count.  Does nothing if
+     * je.dbEviction is not enabled, since reference counts are only maintained
+     * if that config parameter is enabled.
+     */
+    private void assertDbInUse(DatabaseImpl db, boolean inUse) {
+        if (dbEviction) {
+            assertEquals(inUse, db.isInUse());
+        }
+    }
+
+    /**
+     * Returns true if all files exist, or false if any file is deleted.
+     */
+    private boolean logFilesExist(Set fileNumbers) {
+
+        Iterator iter = fileNumbers.iterator();
+        while (iter.hasNext()) {
+            long fileNum = ((Long) iter.next()).longValue();
+            File file = new File
+                (envHome,
+                 FileManager.getFileName(fileNum, FileManager.JE_SUFFIX));
+            if (!file.exists()) {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    /*
+     * Run batch cleaning and verify that there are no files with these
+     * log entries.
+     */
+    private void batchCleanAndVerify(DatabaseId dbId)
+        throws Exception {
+
+        /*
+         * Open the environment, flip the log files to reduce mixing of new
+         * records and old records and add more records to force the
+         * utilization level of the removed records down.
+         */
+        openEnv(true);
+        openDb(null, DB_NAME2);
+        long lsn = envImpl.forceLogFileFlip();
+        CheckpointConfig force = new CheckpointConfig();
+        force.setForce(true);
+        env.checkpoint(force);
+
+        writeAndCountRecords(null, RECORD_COUNT * 3);
+        env.checkpoint(force);
+
+        closeDb();
+
+        /* Check log files, there should be entries with this database. */
+        CheckReader checker = new CheckReader(envImpl, dbId, true);
+        while (checker.readNextEntry()) {
+        }
+
+        if (DEBUG) {
+            System.out.println("entries for this db =" + checker.getCount());
+        }
+
+        assertTrue(checker.getCount() > 0);
+
+        /* batch clean. */
+        boolean anyCleaned = false;
+        while (env.cleanLog() > 0) {
+            anyCleaned = true;
+        }
+
+        assertTrue(anyCleaned);
+
+        if (anyCleaned) {
+            env.checkpoint(force);
+        }
+
+        /* Check log files, there should be no entries with this database. */
+        checker = new CheckReader(envImpl, dbId, false);
+        while (checker.readNextEntry()) {
+        }
+
+        closeEnv();
+    }
+
+    class CheckReader extends DumpFileReader{
+
+        private DatabaseId dbId;
+        private boolean expectEntries;
+        private int count;
+
+        /*
+         * @param databaseId we're looking for log entries for this database.
+         * @param expectEntries if false, there should be no log entries
+         * with this database id. If true, the log should have entries
+         * with this database id.
+         */
+        CheckReader(EnvironmentImpl envImpl,
+                    DatabaseId dbId,
+                    boolean expectEntries)
+            throws DatabaseException, IOException {
+
+            super(envImpl, 1000, DbLsn.NULL_LSN, DbLsn.NULL_LSN,
+                  null, null, false);
+            this.dbId = dbId;
+            this.expectEntries = expectEntries;
+        }
+
+        protected boolean processEntry(ByteBuffer entryBuffer)
+            throws DatabaseException {
+
+            /* Figure out what kind of log entry this is */
+            byte type = currentEntryHeader.getType();
+            LogEntryType lastEntryType = LogEntryType.findType(type);
+            boolean isNode = LogEntryType.isNodeType(type);
+
+            /* Read the entry. */
+            LogEntry entry = lastEntryType.getSharedLogEntry();
+            entry.readEntry(currentEntryHeader,
+            		        entryBuffer,
+            		        true); // readFullItem
+
+            long lsn = getLastLsn();
+            if (isNode) {
+                boolean found = false;
+                if (entry instanceof INLogEntry) {
+                    INLogEntry inEntry = (INLogEntry) entry;
+                    found = dbId.equals(inEntry.getDbId());
+                } else {
+                    LNLogEntry lnEntry = (LNLogEntry) entry;
+                    found = dbId.equals(lnEntry.getDbId());
+                }
+                if (found) {
+                    if (expectEntries) {
+                        count++;
+                    } else {
+                        StringBuffer sb = new StringBuffer();
+                        entry.dumpEntry(sb, false);
+                        fail("lsn=" + DbLsn.getNoFormatString(lsn) +
+                             " dbId = " + dbId +
+                             " entry= " + sb.toString());
+                    }
+                }
+            }
+
+
+            return true;
+        }
+
+        /* Num entries with this database id seen by reader. */
+        int getCount() {
+            return count;
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/cleaner/UtilizationTest.java b/test/com/sleepycat/je/cleaner/UtilizationTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..0e58fa4496568f169e5605c61ccad229dd1e65ea
--- /dev/null
+++ b/test/com/sleepycat/je/cleaner/UtilizationTest.java
@@ -0,0 +1,1455 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: UtilizationTest.java,v 1.28.2.2 2010/01/04 15:30:42 cwl Exp $
+ */
+
+package com.sleepycat.je.cleaner;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Enumeration;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.log.LogEntryHeader;
+import com.sleepycat.je.log.LogManager;
+import com.sleepycat.je.log.LogSource;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * Test utilization counting of LNs.
+ */
+public class UtilizationTest extends TestCase {
+
+    private static final String DB_NAME = "foo";
+
+    private static final String OP_NONE = "op-none";
+    private static final String OP_CHECKPOINT = "op-checkpoint";
+    private static final String OP_RECOVER = "op-recover";
+    //private static final String[] OPERATIONS = { OP_NONE, };
+    //*
+    private static final String[] OPERATIONS = { OP_NONE,
+                                                 OP_CHECKPOINT,
+                                                 OP_RECOVER,
+                                                 OP_RECOVER };
+    //*/
+
+    /*
+     * Set fetchObsoleteSize=true only for the second OP_RECOVER test.
+     * We check that OP_RECOVER works with without fetching, but with fetching
+     * we check that all LN sizes are counted.
+     */
+    private static final boolean[] FETCH_OBSOLETE_SIZE = { false,
+                                                           false,
+                                                           false,
+                                                           true };
+
+    private static final CheckpointConfig forceConfig = new CheckpointConfig();
+    static {
+        forceConfig.setForce(true);
+    }
+
+    private File envHome;
+    private Environment env;
+    private EnvironmentImpl envImpl;
+    private Database db;
+    private DatabaseImpl dbImpl;
+    private boolean dups = false;
+    private DatabaseEntry keyEntry = new DatabaseEntry();
+    private DatabaseEntry dataEntry = new DatabaseEntry();
+    private String operation;
+    private long lastFileSeen;
+    private boolean fetchObsoleteSize;
+    private boolean truncateOrRemoveDone;
+
+    public static Test suite() {
+        TestSuite allTests = new TestSuite();
+        for (int i = 0; i < OPERATIONS.length; i += 1) {
+            TestSuite suite = new TestSuite(UtilizationTest.class);
+            Enumeration e = suite.tests();
+            while (e.hasMoreElements()) {
+                UtilizationTest test = (UtilizationTest) e.nextElement();
+                test.init(OPERATIONS[i], FETCH_OBSOLETE_SIZE[i]);
+                allTests.addTest(test);
+            }
+        }
+        return allTests;
+    }
+
+    public UtilizationTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    private void init(String operation, boolean fetchObsoleteSize) {
+        this.operation = operation;
+        this.fetchObsoleteSize = fetchObsoleteSize;
+    }
+
+    public void setUp()
+        throws IOException, DatabaseException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+        TestUtils.removeFiles("Setup", envHome, FileManager.DEL_SUFFIX);
+    }
+
+    public void tearDown()
+        throws IOException, DatabaseException {
+
+        /* Set test name for reporting; cannot be done in the ctor or setUp. */
+        setName(operation +
+                (fetchObsoleteSize ? "-fetch" : "") +
+                ':' + getName());
+
+        try {
+            if (env != null) {
+                env.close();
+            }
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        try {
+            //*
+            TestUtils.removeLogFiles("tearDown", envHome, true);
+            TestUtils.removeFiles("tearDown", envHome, FileManager.DEL_SUFFIX);
+            //*/
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        db = null;
+        dbImpl = null;
+        env = null;
+        envImpl = null;
+        envHome = null;
+        keyEntry = null;
+        dataEntry = null;
+    }
+
+    /**
+     * Opens the environment and database.
+     */
+    private void openEnv()
+        throws DatabaseException {
+
+        EnvironmentConfig config = TestUtils.initEnvConfig();
+	DbInternal.disableParameterValidation(config);
+        config.setTransactional(true);
+        config.setTxnNoSync(true);
+        config.setAllowCreate(true);
+        /* Do not run the daemons. */
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false");
+        config.setConfigParam
+	    (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false");
+        /* Use a tiny log file size to write one LN per file. */
+        config.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(),
+                              Integer.toString(64));
+        /* Don't use NIO direct buffers or we run out of memory. */
+        config.setConfigParam
+            (EnvironmentParams.LOG_DIRECT_NIO.getName(), "false");
+
+        /* Obsolete LN size counting is optional per test. */
+        if (fetchObsoleteSize) {
+            config.setConfigParam
+                (EnvironmentParams.CLEANER_FETCH_OBSOLETE_SIZE.getName(),
+                 "true");
+        }
+
+        env = new Environment(envHome, config);
+        envImpl = DbInternal.envGetEnvironmentImpl(env);
+
+        /* Speed up test that uses lots of very small files. */
+        envImpl.getFileManager().setSyncAtFileEnd(false);
+
+        openDb();
+    }
+
+    /**
+     * Opens the database.
+     */
+    private void openDb()
+        throws DatabaseException {
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(dups);
+        db = env.openDatabase(null, DB_NAME, dbConfig);
+        dbImpl = DbInternal.dbGetDatabaseImpl(db);
+    }
+
+    /**
+     * Closes the environment and database.
+     */
+    private void closeEnv(boolean doCheckpoint)
+        throws DatabaseException {
+
+        /*
+         * We pass expectAccurateDbUtilization as false when
+         * truncateOrRemoveDone, because the database utilization info for that
+         * database is now gone.
+         */
+        VerifyUtils.verifyUtilization
+            (envImpl,
+             true, // expectAccurateObsoleteLNCount
+             expectAccurateObsoleteLNSize(),
+             !truncateOrRemoveDone); // expectAccurateDbUtilization
+
+        if (db != null) {
+            db.close();
+            db = null;
+        }
+        if (env != null) {
+            envImpl.close(doCheckpoint);
+            env = null;
+        }
+    }
+
+    public void testReuseSlotAfterDelete()
+        throws DatabaseException {
+
+        openEnv();
+
+        /* Insert and delete without compress to create a knownDeleted slot. */
+        Transaction txn = env.beginTransaction(null, null);
+        long file0 = doPut(0, txn);
+        long file1 = doDelete(0, txn);
+        txn.commit();
+
+        /* Insert key 0 to reuse the knownDeleted slot. */
+        txn = env.beginTransaction(null, null);
+        long file2 = doPut(0, txn);
+        /* Delete and insert to reuse deleted slot in same txn. */
+        long file3 = doDelete(0, txn);
+        long file4 = doPut(0, txn);
+        txn.commit();
+        performRecoveryOperation();
+
+        expectObsolete(file0, true);
+        expectObsolete(file1, true);
+        expectObsolete(file2, true);
+        expectObsolete(file3, true);
+        expectObsolete(file4, false);
+
+        closeEnv(true);
+    }
+
+    public void testReuseKnownDeletedSlot()
+        throws DatabaseException {
+
+        openEnv();
+
+        /* Insert key 0 and abort to create a knownDeleted slot.  */
+        Transaction txn = env.beginTransaction(null, null);
+        long file0 = doPut(0, txn);
+        txn.abort();
+
+        /* Insert key 0 to reuse the knownDeleted slot. */
+        txn = env.beginTransaction(null, null);
+        long file1 = doPut(0, txn);
+        txn.commit();
+        performRecoveryOperation();
+
+        /* Verify that file0 is still obsolete. */
+        expectObsolete(file0, true);
+        expectObsolete(file1, false);
+
+        closeEnv(true);
+    }
+
+    public void testReuseKnownDeletedSlotAbort()
+        throws DatabaseException {
+
+        openEnv();
+
+        /* Insert key 0 and abort to create a knownDeleted slot.  */
+        Transaction txn = env.beginTransaction(null, null);
+        long file0 = doPut(0, txn);
+        txn.abort();
+
+        /* Insert key 0 to reuse the knownDeleted slot, and abort. */
+        txn = env.beginTransaction(null, null);
+        long file1 = doPut(0, txn);
+        txn.abort();
+        performRecoveryOperation();
+
+        /* Verify that file0 is still obsolete. */
+        expectObsolete(file0, true);
+        expectObsolete(file1, true);
+
+        closeEnv(true);
+    }
+
+    public void testReuseKnownDeletedSlotDup()
+        throws DatabaseException {
+
+        dups = true;
+        openEnv();
+
+        /* Insert two key 0 dups and checkpoint. */
+        Transaction txn = env.beginTransaction(null, null);
+        long file0 = doPut(0, 0, txn); // 1st LN
+        long file2 = doPut(0, 1, txn); // 2nd LN
+        long file1 = file2 - 1;        // DupCountLN
+        txn.commit();
+        env.checkpoint(forceConfig);
+
+        /* Insert {0, 2} and abort to create a knownDeleted slot. */
+        txn = env.beginTransaction(null, null);
+        long file3 = doPut(0, 2, txn); // 3rd LN
+        long file4 = file3 + 1;        // DupCountLN
+        txn.abort();
+
+        /* Insert {0, 2} to reuse the knownDeleted slot. */
+        txn = env.beginTransaction(null, null);
+        long file5 = doPut(0, 2, txn); // 4th LN
+        long file6 = file5 + 1;        // DupCountLN
+        txn.commit();
+        performRecoveryOperation();
+
+        /* Verify that file3 is still obsolete. */
+        expectObsolete(file0, false);
+        expectObsolete(file1, true);
+        expectObsolete(file2, false);
+        expectObsolete(file3, true);
+        expectObsolete(file4, true);
+        expectObsolete(file5, false);
+        expectObsolete(file6, false);
+
+        closeEnv(true);
+    }
+
+    public void testReuseKnownDeletedSlotDupAbort()
+        throws DatabaseException {
+
+        dups = true;
+        openEnv();
+
+        /* Insert two key 0 dups and checkpoint. */
+        Transaction txn = env.beginTransaction(null, null);
+        long file0 = doPut(0, 0, txn); // 1st LN
+        long file2 = doPut(0, 1, txn); // 2nd LN
+        long file1 = file2 - 1;        // DupCountLN
+        txn.commit();
+        env.checkpoint(forceConfig);
+
+        /* Insert {0, 2} and abort to create a knownDeleted slot. */
+        txn = env.beginTransaction(null, null);
+        long file3 = doPut(0, 2, txn); // 3rd LN
+        long file4 = file3 + 1;        // DupCountLN
+        txn.abort();
+
+        /* Insert {0, 2} to reuse the knownDeleted slot, then abort. */
+        txn = env.beginTransaction(null, null);
+        long file5 = doPut(0, 2, txn); // 4th LN
+        long file6 = file5 + 1;        // DupCountLN
+        txn.abort();
+        performRecoveryOperation();
+
+        /* Verify that file3 is still obsolete. */
+        expectObsolete(file0, false);
+        expectObsolete(file1, false);
+        expectObsolete(file2, false);
+        expectObsolete(file3, true);
+        expectObsolete(file4, true);
+        expectObsolete(file5, true);
+        expectObsolete(file6, true);
+
+        closeEnv(true);
+    }
+
+    public void testInsert()
+        throws DatabaseException {
+
+        openEnv();
+
+        /* Insert key 0. */
+        long file0 = doPut(0, true);
+        performRecoveryOperation();
+
+        /* Expect that LN is not obsolete. */
+        FileSummary fileSummary = getFileSummary(file0);
+        assertEquals(1, fileSummary.totalLNCount);
+        assertEquals(0, fileSummary.obsoleteLNCount);
+        DbFileSummary dbFileSummary = getDbFileSummary(file0);
+        assertEquals(1, dbFileSummary.totalLNCount);
+        assertEquals(0, dbFileSummary.obsoleteLNCount);
+
+        closeEnv(true);
+    }
+
+    public void testInsertAbort()
+        throws DatabaseException {
+
+        openEnv();
+
+        /* Insert key 0. */
+        long file0 = doPut(0, false);
+        performRecoveryOperation();
+
+        /* Expect that LN is obsolete. */
+        FileSummary fileSummary = getFileSummary(file0);
+        assertEquals(1, fileSummary.totalLNCount);
+        assertEquals(1, fileSummary.obsoleteLNCount);
+        DbFileSummary dbFileSummary = getDbFileSummary(file0);
+        assertEquals(1, dbFileSummary.totalLNCount);
+        assertEquals(1, dbFileSummary.obsoleteLNCount);
+
+        closeEnv(true);
+    }
+
+    public void testInsertDup()
+        throws DatabaseException {
+
+        dups = true;
+        openEnv();
+
+        /* Insert key 0 and a dup. */
+        Transaction txn = env.beginTransaction(null, null);
+        long file0 = doPut(0, 0, txn);
+        long file3 = doPut(0, 1, txn);
+        txn.commit();
+        performRecoveryOperation();
+
+        /*
+         * The dup tree is created on 2nd insert.  In between the two
+         * DupCountLNs are two INs.
+         */
+        long file1 = file0 + 1; // DupCountLN (provisional)
+        long file2 = file1 + 3; // DupCountLN (non-provisional)
+        assertEquals(file3, file2 + 1); // new LN
+
+        expectObsolete(file0, false); // 1st LN
+        expectObsolete(file1, true);  // 1st DupCountLN
+        expectObsolete(file2, false); // 2nd DupCountLN
+        expectObsolete(file3, false); // 2nd LN
+
+        closeEnv(true);
+    }
+
+    public void testInsertDupAbort()
+        throws DatabaseException {
+
+        dups = true;
+        openEnv();
+
+        /* Insert key 0 and a dup. */
+        Transaction txn = env.beginTransaction(null, null);
+        long file0 = doPut(0, 0, txn);
+        long file3 = doPut(0, 1, txn);
+        txn.abort();
+        performRecoveryOperation();
+
+        /*
+         * The dup tree is created on 2nd insert.  In between the two
+         * DupCountLNs are two INs.
+         */
+        long file1 = file0 + 1; // DupCountLN (provisional)
+        long file2 = file1 + 3; // DupCountLN (non-provisional)
+        assertEquals(file3, file2 + 1); // new LN
+
+        expectObsolete(file0, true);  // 1st LN
+        expectObsolete(file1, false); // 1st DupCountLN
+        expectObsolete(file2, true);  // 2nd DupCountLN
+        expectObsolete(file3, true);  // 2nd LN
+
+        closeEnv(true);
+    }
+
+    public void testUpdate()
+        throws DatabaseException {
+
+        openEnv();
+
+        /* Insert key 0 and checkpoint. */
+        long file0 = doPut(0, true);
+        env.checkpoint(forceConfig);
+
+        /* Update key 0. */
+        long file1 = doPut(0, true);
+        performRecoveryOperation();
+
+        expectObsolete(file0, true);
+        expectObsolete(file1, false);
+
+        closeEnv(true);
+    }
+
+    public void testUpdateAbort()
+        throws DatabaseException {
+
+        openEnv();
+
+        /* Insert key 0 and checkpoint. */
+        long file0 = doPut(0, true);
+        env.checkpoint(forceConfig);
+
+        /* Update key 0 and abort. */
+        long file1 = doPut(0, false);
+        performRecoveryOperation();
+
+        expectObsolete(file0, false);
+        expectObsolete(file1, true);
+
+        closeEnv(true);
+    }
+
+    public void testUpdateDup()
+        throws DatabaseException {
+
+        dups = true;
+        openEnv();
+
+        /* Insert two key 0 dups and checkpoint. */
+        Transaction txn = env.beginTransaction(null, null);
+        long file0 = doPut(0, 0, txn); // 1st LN
+        long file2 = doPut(0, 1, txn); // 2nd LN
+        long file1 = file2 - 1;        // DupCountLN
+        txn.commit();
+        env.checkpoint(forceConfig);
+
+        /* Update {0, 0}. */
+        txn = env.beginTransaction(null, null);
+        long file3 = doUpdate(0, 0, txn); // 3rd LN
+        txn.commit();
+        performRecoveryOperation();
+
+        expectObsolete(file0, true);
+        expectObsolete(file1, false);
+        expectObsolete(file2, false);
+        expectObsolete(file3, false);
+
+        closeEnv(true);
+    }
+
+    public void testUpdateDupAbort()
+        throws DatabaseException {
+
+        dups = true;
+        openEnv();
+
+        /* Insert two key 0 dups and checkpoint. */
+        Transaction txn = env.beginTransaction(null, null);
+        long file0 = doPut(0, 0, txn); // 1st LN
+        long file2 = doPut(0, 1, txn); // 2nd LN
+        long file1 = file2 - 1;        // DupCountLN
+        txn.commit();
+        env.checkpoint(forceConfig);
+
+        /* Update {0, 0}. */
+        txn = env.beginTransaction(null, null);
+        long file3 = doUpdate(0, 0, txn); // 3rd LN
+        txn.abort();
+        performRecoveryOperation();
+
+        expectObsolete(file0, false);
+        expectObsolete(file1, false);
+        expectObsolete(file2, false);
+        expectObsolete(file3, true);
+
+        closeEnv(true);
+    }
+
+    public void testDelete()
+        throws DatabaseException {
+
+        openEnv();
+
+        /* Insert key 0 and checkpoint. */
+        long file0 = doPut(0, true);
+        env.checkpoint(forceConfig);
+
+        /* Delete key 0. */
+        long file1 = doDelete(0, true);
+        performRecoveryOperation();
+
+        expectObsolete(file0, true);
+        expectObsolete(file1, true);
+
+        closeEnv(true);
+    }
+
+    public void testDeleteAbort()
+        throws DatabaseException {
+
+        openEnv();
+
+        /* Insert key 0 and checkpoint. */
+        long file0 = doPut(0, true);
+        env.checkpoint(forceConfig);
+
+        /* Delete key 0 and abort. */
+        long file1 = doDelete(0, false);
+        performRecoveryOperation();
+
+        expectObsolete(file0, false);
+        expectObsolete(file1, true);
+
+        closeEnv(true);
+    }
+
+    public void testDeleteDup()
+        throws DatabaseException {
+
+        dups = true;
+        openEnv();
+
+        /* Insert two key 0 dups and checkpoint. */
+        Transaction txn = env.beginTransaction(null, null);
+        long file0 = doPut(0, 0, txn); // 1st LN
+        long file2 = doPut(0, 1, txn); // 2nd LN
+        long file1 = file2 - 1;        // DupCountLN
+        txn.commit();
+        env.checkpoint(forceConfig);
+
+        /* Delete {0, 0} and abort. */
+        txn = env.beginTransaction(null, null);
+        long file3 = doDelete(0, 0, txn); // 3rd LN
+        long file4 = file3 + 1;           // DupCountLN
+        txn.commit();
+        performRecoveryOperation();
+
+        expectObsolete(file0, true);
+        expectObsolete(file1, true);
+        expectObsolete(file2, false);
+        expectObsolete(file3, true);
+        expectObsolete(file4, false);
+
+        closeEnv(true);
+    }
+
+    public void testDeleteDupAbort()
+        throws DatabaseException {
+
+        dups = true;
+        openEnv();
+
+        /* Insert two key 0 dups and checkpoint. */
+        Transaction txn = env.beginTransaction(null, null);
+        long file0 = doPut(0, 0, txn); // 1st LN
+        long file2 = doPut(0, 1, txn); // 2nd LN
+        long file1 = file2 - 1;        // DupCountLN
+        txn.commit();
+        env.checkpoint(forceConfig);
+
+        /* Delete {0, 0} and abort. */
+        txn = env.beginTransaction(null, null);
+        long file3 = doDelete(0, 0, txn); // 3rd LN
+        long file4 = file3 + 1;           // DupCountLN
+        txn.abort();
+        performRecoveryOperation();
+
+        expectObsolete(file0, false);
+        expectObsolete(file1, false);
+        expectObsolete(file2, false);
+        expectObsolete(file3, true);
+        expectObsolete(file4, true);
+
+        closeEnv(true);
+    }
+
+    public void testInsertUpdate()
+        throws DatabaseException {
+
+        openEnv();
+
+        /* Insert and update key 0. */
+        Transaction txn = env.beginTransaction(null, null);
+        long file0 = doPut(0, txn);
+        long file1 = doPut(0, txn);
+        txn.commit();
+        performRecoveryOperation();
+
+        expectObsolete(file0, true);
+        expectObsolete(file1, false);
+
+        closeEnv(true);
+    }
+
+    public void testInsertUpdateAbort()
+        throws DatabaseException {
+
+        openEnv();
+
+        /* Insert and update key 0. */
+        Transaction txn = env.beginTransaction(null, null);
+        long file0 = doPut(0, txn);
+        long file1 = doPut(0, txn);
+        txn.abort();
+        performRecoveryOperation();
+
+        expectObsolete(file0, true);
+        expectObsolete(file1, true);
+
+        closeEnv(true);
+    }
+
+    public void testInsertUpdateDup()
+        throws DatabaseException {
+
+        dups = true;
+        openEnv();
+
+        /* Insert two key 0 dups and checkpoint. */
+        Transaction txn = env.beginTransaction(null, null);
+        long file0 = doPut(0, 0, txn); // 1st LN
+        long file2 = doPut(0, 1, txn); // 2nd LN
+        long file1 = file2 - 1;        // DupCountLN
+        txn.commit();
+        env.checkpoint(forceConfig);
+
+        /* Insert and update {0, 2}. */
+        txn = env.beginTransaction(null, null);
+        long file3 = doPut(0, 2, txn);    // 3rd LN
+        long file4 = file3 + 1;           // DupCountLN
+        long file5 = doUpdate(0, 2, txn); // 4rd LN
+        txn.commit();
+        performRecoveryOperation();
+
+        expectObsolete(file0, false);
+        expectObsolete(file1, true);
+        expectObsolete(file2, false);
+        expectObsolete(file3, true);
+        expectObsolete(file4, false);
+        expectObsolete(file5, false);
+
+        closeEnv(true);
+    }
+
+    public void testInsertUpdateDupAbort()
+        throws DatabaseException {
+
+        dups = true;
+        openEnv();
+
+        /* Insert two key 0 dups and checkpoint. */
+        Transaction txn = env.beginTransaction(null, null);
+        long file0 = doPut(0, 0, txn); // 1st LN
+        long file2 = doPut(0, 1, txn); // 2nd LN
+        long file1 = file2 - 1;        // DupCountLN
+        txn.commit();
+        env.checkpoint(forceConfig);
+
+        /* Insert and update {0, 2}. */
+        txn = env.beginTransaction(null, null);
+        long file3 = doPut(0, 2, txn);    // 3rd LN
+        long file4 = file3 + 1;           // DupCountLN
+        long file5 = doUpdate(0, 2, txn); // 4rd LN
+        txn.abort();
+        performRecoveryOperation();
+
+        expectObsolete(file0, false);
+        expectObsolete(file1, false);
+        expectObsolete(file2, false);
+        expectObsolete(file3, true);
+        expectObsolete(file4, true);
+        expectObsolete(file5, true);
+
+        closeEnv(true);
+    }
+
+    public void testInsertDelete()
+        throws DatabaseException {
+
+        openEnv();
+
+        /* Insert and update key 0. */
+        Transaction txn = env.beginTransaction(null, null);
+        long file0 = doPut(0, txn);
+        long file1 = doDelete(0, txn);
+        txn.commit();
+        performRecoveryOperation();
+
+        expectObsolete(file0, true);
+        expectObsolete(file1, true);
+
+        closeEnv(true);
+    }
+
+    public void testInsertDeleteAbort()
+        throws DatabaseException {
+
+        openEnv();
+
+        /* Insert and update key 0. */
+        Transaction txn = env.beginTransaction(null, null);
+        long file0 = doPut(0, txn);
+        long file1 = doDelete(0, txn);
+        txn.abort();
+        performRecoveryOperation();
+
+        expectObsolete(file0, true);
+        expectObsolete(file1, true);
+
+        closeEnv(true);
+    }
+
+    public void testInsertDeleteDup()
+        throws DatabaseException {
+
+        dups = true;
+        openEnv();
+
+        /* Insert two key 0 dups and checkpoint. */
+        Transaction txn = env.beginTransaction(null, null);
+        long file0 = doPut(0, 0, txn); // 1st LN
+        long file2 = doPut(0, 1, txn); // 2nd LN
+        long file1 = file2 - 1;        // DupCountLN
+        txn.commit();
+        env.checkpoint(forceConfig);
+
+        /* Insert and delete {0, 2}. */
+        txn = env.beginTransaction(null, null);
+        long file3 = doPut(0, 2, txn);    // 3rd LN
+        long file4 = file3 + 1;           // DupCountLN
+        long file5 = doDelete(0, 2, txn); // 4rd LN
+        long file6 = file5 + 1;           // DupCountLN
+        txn.commit();
+        performRecoveryOperation();
+
+        expectObsolete(file0, false);
+        expectObsolete(file1, true);
+        expectObsolete(file2, false);
+        expectObsolete(file3, true);
+        expectObsolete(file4, true);
+        expectObsolete(file5, true);
+        expectObsolete(file6, false);
+
+        closeEnv(true);
+    }
+
+    public void testInsertDeleteDupAbort()
+        throws DatabaseException {
+
+        dups = true;
+        openEnv();
+
+        /* Insert two key 0 dups and checkpoint. */
+        Transaction txn = env.beginTransaction(null, null);
+        long file0 = doPut(0, 0, txn); // 1st LN
+        long file2 = doPut(0, 1, txn); // 2nd LN
+        long file1 = file2 - 1;        // DupCountLN
+        txn.commit();
+        env.checkpoint(forceConfig);
+
+        /* Insert and delete {0, 2} and abort. */
+        txn = env.beginTransaction(null, null);
+        long file3 = doPut(0, 2, txn);    // 3rd LN
+        long file4 = file3 + 1;           // DupCountLN
+        long file5 = doDelete(0, 2, txn); // 4rd LN
+        long file6 = file5 + 1;           // DupCountLN
+        txn.abort();
+        performRecoveryOperation();
+
+        expectObsolete(file0, false);
+        expectObsolete(file1, false);
+        expectObsolete(file2, false);
+        expectObsolete(file3, true);
+        expectObsolete(file4, true);
+        expectObsolete(file5, true);
+        expectObsolete(file6, true);
+
+        closeEnv(true);
+    }
+
+    public void testUpdateUpdate()
+        throws DatabaseException {
+
+        openEnv();
+
+        /* Insert key 0 and checkpoint. */
+        long file0 = doPut(0, true);
+        env.checkpoint(forceConfig);
+
+        /* Update key 0 twice. */
+        Transaction txn = env.beginTransaction(null, null);
+        long file1 = doPut(0, txn);
+        long file2 = doPut(0, txn);
+        txn.commit();
+        performRecoveryOperation();
+
+        expectObsolete(file0, true);
+        expectObsolete(file1, true);
+        expectObsolete(file2, false);
+
+        closeEnv(true);
+    }
+
+    public void testUpdateUpdateAbort()
+        throws DatabaseException {
+
+        openEnv();
+
+        /* Insert key 0 and checkpoint. */
+        long file0 = doPut(0, true);
+        env.checkpoint(forceConfig);
+
+        /* Update key 0 twice and abort. */
+        Transaction txn = env.beginTransaction(null, null);
+        long file1 = doPut(0, txn);
+        long file2 = doPut(0, txn);
+        txn.abort();
+        performRecoveryOperation();
+
+        expectObsolete(file0, false);
+        expectObsolete(file1, true);
+        expectObsolete(file2, true);
+
+        closeEnv(true);
+    }
+
+    public void testUpdateUpdateDup()
+        throws DatabaseException {
+
+        dups = true;
+        openEnv();
+
+        /* Insert two key 0 dups and checkpoint. */
+        Transaction txn = env.beginTransaction(null, null);
+        long file0 = doPut(0, 0, txn); // 1st LN
+        long file2 = doPut(0, 1, txn); // 2nd LN
+        long file1 = file2 - 1;        // DupCountLN
+        txn.commit();
+        env.checkpoint(forceConfig);
+
+        /* Update {0, 1} twice. */
+        txn = env.beginTransaction(null, null);
+        long file3 = doUpdate(0, 1, txn); // 3rd LN
+        long file4 = doUpdate(0, 1, txn); // 4rd LN
+        txn.commit();
+        performRecoveryOperation();
+
+        expectObsolete(file0, false);
+        expectObsolete(file1, false);
+        expectObsolete(file2, true);
+        expectObsolete(file3, true);
+        expectObsolete(file4, false);
+
+        closeEnv(true);
+    }
+
+    public void testUpdateUpdateDupAbort()
+        throws DatabaseException {
+
+        dups = true;
+        openEnv();
+
+        /* Insert two key 0 dups and checkpoint. */
+        Transaction txn = env.beginTransaction(null, null);
+        long file0 = doPut(0, 0, txn); // 1st LN
+        long file2 = doPut(0, 1, txn); // 2nd LN
+        long file1 = file2 - 1;        // DupCountLN
+        txn.commit();
+        env.checkpoint(forceConfig);
+
+        /* Update {0, 1} twice and abort. */
+        txn = env.beginTransaction(null, null);
+        long file3 = doUpdate(0, 1, txn); // 3rd LN
+        long file4 = doUpdate(0, 1, txn); // 4rd LN
+        txn.abort();
+        performRecoveryOperation();
+
+        expectObsolete(file0, false);
+        expectObsolete(file1, false);
+        expectObsolete(file2, false);
+        expectObsolete(file3, true);
+        expectObsolete(file4, true);
+
+        closeEnv(true);
+    }
+
+    public void testUpdateDelete()
+        throws DatabaseException {
+
+        openEnv();
+
+        /* Insert key 0 and checkpoint. */
+        long file0 = doPut(0, true);
+        env.checkpoint(forceConfig);
+
+        /* Update and delete key 0. */
+        Transaction txn = env.beginTransaction(null, null);
+        long file1 = doPut(0, txn);
+        long file2 = doDelete(0, txn);
+        txn.commit();
+        performRecoveryOperation();
+
+        expectObsolete(file0, true);
+        expectObsolete(file1, true);
+        expectObsolete(file2, true);
+
+        closeEnv(true);
+    }
+
+    public void testUpdateDeleteAbort()
+        throws DatabaseException {
+
+        openEnv();
+
+        /* Insert key 0 and checkpoint. */
+        long file0 = doPut(0, true);
+        env.checkpoint(forceConfig);
+
+        /* Update and delete key 0 and abort. */
+        Transaction txn = env.beginTransaction(null, null);
+        long file1 = doPut(0, txn);
+        long file2 = doDelete(0, txn);
+        txn.abort();
+        performRecoveryOperation();
+
+        expectObsolete(file0, false);
+        expectObsolete(file1, true);
+        expectObsolete(file2, true);
+
+        closeEnv(true);
+    }
+
+    public void testUpdateDeleteDup()
+        throws DatabaseException {
+
+        dups = true;
+        openEnv();
+
+        /* Insert two key 0 dups and checkpoint. */
+        Transaction txn = env.beginTransaction(null, null);
+        long file0 = doPut(0, 0, txn); // 1st LN
+        long file2 = doPut(0, 1, txn); // 2nd LN
+        long file1 = file2 - 1;        // DupCountLN
+        txn.commit();
+        env.checkpoint(forceConfig);
+
+        /* Update and delete {0, 1}. */
+        txn = env.beginTransaction(null, null);
+        long file3 = doUpdate(0, 1, txn); // 3rd LN
+        long file4 = doDelete(0, 1, txn); // 4rd LN
+        long file5 = file4 + 1;           // DupCountLN
+        txn.commit();
+        performRecoveryOperation();
+
+        expectObsolete(file0, false);
+        expectObsolete(file1, true);
+        expectObsolete(file2, true);
+        expectObsolete(file3, true);
+        expectObsolete(file4, true);
+        expectObsolete(file5, false);
+
+        closeEnv(true);
+    }
+
+    public void testUpdateDeleteDupAbort()
+        throws DatabaseException {
+
+        dups = true;
+        openEnv();
+
+        /* Insert two key 0 dups and checkpoint. */
+        Transaction txn = env.beginTransaction(null, null);
+        long file0 = doPut(0, 0, txn); // 1st LN
+        long file2 = doPut(0, 1, txn); // 2nd LN
+        long file1 = file2 - 1;        // DupCountLN
+        txn.commit();
+        env.checkpoint(forceConfig);
+
+        /* Update and delete {0, 1} and abort. */
+        txn = env.beginTransaction(null, null);
+        long file3 = doUpdate(0, 1, txn); // 3rd LN
+        long file4 = doDelete(0, 1, txn); // 4rd LN
+        long file5 = file4 + 1;           // DupCountLN
+        txn.abort();
+        performRecoveryOperation();
+
+        expectObsolete(file0, false);
+        expectObsolete(file1, false);
+        expectObsolete(file2, false);
+        expectObsolete(file3, true);
+        expectObsolete(file4, true);
+        expectObsolete(file5, true);
+
+        closeEnv(true);
+    }
+
+    public void testTruncate()
+        throws DatabaseException {
+
+        truncateOrRemove(true, true);
+    }
+
+    public void testTruncateAbort()
+        throws DatabaseException {
+
+        truncateOrRemove(true, false);
+    }
+
+    public void testRemove()
+        throws DatabaseException {
+
+        truncateOrRemove(false, true);
+    }
+
+    public void testRemoveAbort()
+        throws DatabaseException {
+
+        truncateOrRemove(false, false);
+    }
+
+    /**
+     */
+    private void truncateOrRemove(boolean truncate, boolean commit)
+        throws DatabaseException {
+
+        openEnv();
+
+        /* Insert 3 keys and checkpoint. */
+        Transaction txn = env.beginTransaction(null, null);
+        long file0 = doPut(0, txn);
+        long file1 = doPut(1, txn);
+        long file2 = doPut(2, txn);
+        txn.commit();
+        env.checkpoint(forceConfig);
+
+        /* Truncate. */
+        txn = env.beginTransaction(null, null);
+        if (truncate) {
+            db.close();
+            db = null;
+            long count = env.truncateDatabase(txn, DB_NAME,
+                                              true /* returnCount */);
+            assertEquals(3, count);
+        } else {
+            db.close();
+            db = null;
+            env.removeDatabase(txn, DB_NAME);
+        }
+        if (commit) {
+            txn.commit();
+        } else {
+            txn.abort();
+        }
+        truncateOrRemoveDone = true;
+        performRecoveryOperation();
+
+        /*
+         * Do not check DbFileSummary when we truncate/remove, since the old
+         * DatabaseImpl is gone.
+         */
+        expectObsolete(file0, commit, !commit /*checkDbFileSummary*/);
+        expectObsolete(file1, commit, !commit /*checkDbFileSummary*/);
+        expectObsolete(file2, commit, !commit /*checkDbFileSummary*/);
+
+        closeEnv(true);
+    }
+
+    /*
+     * The xxxForceTreeWalk tests set the DatabaseImpl
+     * forceTreeWalkForTruncateAndRemove field to true, which will force a walk
+     * of the tree to count utilization during truncate/remove, rather than
+     * using the per-database info.  This is used to test the "old technique"
+     * for counting utilization, which is now used only if the database was
+     * created prior to log version 6.
+     */
+
+    public void testTruncateForceTreeWalk()
+        throws Exception {
+
+        DatabaseImpl.forceTreeWalkForTruncateAndRemove = true;
+        try {
+            testTruncate();
+        } finally {
+            DatabaseImpl.forceTreeWalkForTruncateAndRemove = false;
+        }
+    }
+
+    public void testTruncateAbortForceTreeWalk()
+        throws Exception {
+
+        DatabaseImpl.forceTreeWalkForTruncateAndRemove = true;
+        try {
+            testTruncateAbort();
+        } finally {
+            DatabaseImpl.forceTreeWalkForTruncateAndRemove = false;
+        }
+    }
+
+    public void testRemoveForceTreeWalk()
+        throws Exception {
+
+        DatabaseImpl.forceTreeWalkForTruncateAndRemove = true;
+        try {
+            testRemove();
+        } finally {
+            DatabaseImpl.forceTreeWalkForTruncateAndRemove = false;
+        }
+    }
+
+    public void testRemoveAbortForceTreeWalk()
+        throws Exception {
+
+        DatabaseImpl.forceTreeWalkForTruncateAndRemove = true;
+        try {
+            testRemoveAbort();
+        } finally {
+            DatabaseImpl.forceTreeWalkForTruncateAndRemove = false;
+        }
+    }
+
+    private void expectObsolete(long file, boolean obsolete)
+        throws DatabaseException {
+
+        expectObsolete(file, obsolete, true /*checkDbFileSummary*/);
+    }
+
+    private void expectObsolete(long file,
+                                boolean obsolete,
+                                boolean checkDbFileSummary)
+        throws DatabaseException {
+
+        FileSummary fileSummary = getFileSummary(file);
+        assertEquals("totalLNCount",
+                     1, fileSummary.totalLNCount);
+        assertEquals("obsoleteLNCount",
+                     obsolete ? 1 : 0, fileSummary.obsoleteLNCount);
+
+        DbFileSummary dbFileSummary = getDbFileSummary(file);
+        if (checkDbFileSummary) {
+            assertEquals("db totalLNCount",
+                         1, dbFileSummary.totalLNCount);
+            assertEquals("db obsoleteLNCount",
+                         obsolete ? 1 : 0, dbFileSummary.obsoleteLNCount);
+        }
+
+        if (obsolete) {
+            if (expectAccurateObsoleteLNSize()) {
+                assertTrue(fileSummary.obsoleteLNSize > 0);
+                assertEquals(1, fileSummary.obsoleteLNSizeCounted);
+                if (checkDbFileSummary) {
+                    assertTrue(dbFileSummary.obsoleteLNSize > 0);
+                    assertEquals(1, dbFileSummary.obsoleteLNSizeCounted);
+                }
+            }
+            /* If we counted the size, make sure it is the actual LN size. */
+            if (fileSummary.obsoleteLNSize > 0) {
+                assertEquals(getLNSize(file), fileSummary.obsoleteLNSize);
+            }
+            if (checkDbFileSummary) {
+                if (dbFileSummary.obsoleteLNSize > 0) {
+                    assertEquals(getLNSize(file), dbFileSummary.obsoleteLNSize);
+                }
+                assertEquals(fileSummary.obsoleteLNSize > 0,
+                             dbFileSummary.obsoleteLNSize > 0);
+            }
+        } else {
+            assertEquals(0, fileSummary.obsoleteLNSize);
+            assertEquals(0, fileSummary.obsoleteLNSizeCounted);
+            if (checkDbFileSummary) {
+                assertEquals(0, dbFileSummary.obsoleteLNSize);
+                assertEquals(0, dbFileSummary.obsoleteLNSizeCounted);
+            }
+        }
+    }
+
+    /**
+     * If an LN is obsolete, expect the size to be counted unless we ran
+     * recovery and we did NOT configure fetchObsoleteSize=true.  In that
+     * case, the size may or may not be counted depending on how the redo
+     * or undo was processed during reocvery.
+     */
+    private boolean expectAccurateObsoleteLNSize() {
+        return fetchObsoleteSize || !OP_RECOVER.equals(operation);
+    }
+
+    private long doPut(int key, boolean commit)
+        throws DatabaseException {
+
+        Transaction txn = env.beginTransaction(null, null);
+        long file = doPut(key, txn);
+        if (commit) {
+            txn.commit();
+        } else {
+            txn.abort();
+        }
+        return file;
+    }
+
+    private long doPut(int key, Transaction txn)
+        throws DatabaseException {
+
+        return doPut(key, key, txn);
+    }
+
+    private long doPut(int key, int data, Transaction txn)
+        throws DatabaseException {
+
+        Cursor cursor = db.openCursor(txn, null);
+        IntegerBinding.intToEntry(key, keyEntry);
+        IntegerBinding.intToEntry(data, dataEntry);
+        cursor.put(keyEntry, dataEntry);
+        long file = getFile(cursor);
+        cursor.close();
+        return file;
+    }
+
+    private long doUpdate(int key, int data, Transaction txn)
+        throws DatabaseException {
+
+        Cursor cursor = db.openCursor(txn, null);
+        IntegerBinding.intToEntry(key, keyEntry);
+        IntegerBinding.intToEntry(data, dataEntry);
+        assertEquals(OperationStatus.SUCCESS,
+                     cursor.getSearchBoth(keyEntry, dataEntry, null));
+        cursor.putCurrent(dataEntry);
+        long file = getFile(cursor);
+        cursor.close();
+        return file;
+    }
+
+    private long doDelete(int key, boolean commit)
+        throws DatabaseException {
+
+        Transaction txn = env.beginTransaction(null, null);
+        long file = doDelete(key, txn);
+        if (commit) {
+            txn.commit();
+        } else {
+            txn.abort();
+        }
+        return file;
+    }
+
+    private long doDelete(int key, Transaction txn)
+        throws DatabaseException {
+
+        Cursor cursor = db.openCursor(txn, null);
+        IntegerBinding.intToEntry(key, keyEntry);
+        assertEquals(OperationStatus.SUCCESS,
+                     cursor.getSearchKey(keyEntry, dataEntry, null));
+        cursor.delete();
+        long file = getFile(cursor);
+        cursor.close();
+        return file;
+    }
+
+    private long doDelete(int key, int data, Transaction txn)
+        throws DatabaseException {
+
+        Cursor cursor = db.openCursor(txn, null);
+        IntegerBinding.intToEntry(key, keyEntry);
+        IntegerBinding.intToEntry(data, dataEntry);
+        assertEquals(OperationStatus.SUCCESS,
+                     cursor.getSearchBoth(keyEntry, dataEntry, null));
+        cursor.delete();
+        long file = getFile(cursor);
+        cursor.close();
+        return file;
+    }
+
+    /**
+     * Checkpoint, recover, or do neither, depending on the configured
+     * operation for this test.  Always compress to count deleted LNs.
+     */
+    private void performRecoveryOperation()
+        throws DatabaseException {
+
+        if (OP_NONE.equals(operation)) {
+            /* Compress to count deleted LNs. */
+            env.compress();
+        } else if (OP_CHECKPOINT.equals(operation)) {
+            /* Compress before checkpointing to count deleted LNs. */
+            env.compress();
+            env.checkpoint(forceConfig);
+        } else if (OP_RECOVER.equals(operation)) {
+            closeEnv(false);
+            openEnv();
+            /* Compress after recovery to count deleted LNs. */
+            env.compress();
+        } else {
+            assert false : operation;
+        }
+    }
+
+    /**
+     * Gets the file of the LSN at the cursor position, using internal methods.
+     * Also check that the file number is greater than the last file returned,
+     * to ensure that we're filling a file every time we write.
+     */
+    private long getFile(Cursor cursor)
+        throws DatabaseException {
+
+        long file = CleanerTestUtils.getLogFile(this, cursor);
+        assert file > lastFileSeen;
+        lastFileSeen = file;
+        return file;
+    }
+
+    /**
+     * Returns the utilization summary for a given log file.
+     */
+    private FileSummary getFileSummary(long file)
+        throws DatabaseException {
+
+        return (FileSummary)
+            envImpl.getUtilizationProfile()
+                   .getFileSummaryMap(true)
+                   .get(new Long(file));
+    }
+
+    /**
+     * Returns the per-database utilization summary for a given log file.
+     */
+    private DbFileSummary getDbFileSummary(long file) {
+        return dbImpl.getDbFileSummary
+            (new Long(file), false /*willModify*/);
+    }
+
+    /**
+     * Peek into the file to get the total size of the first entry past the
+     * file header, which is known to be the LN log entry.
+     */
+    private int getLNSize(long file)
+        throws DatabaseException {
+
+        try {
+            long offset = FileManager.firstLogEntryOffset();
+            long lsn = DbLsn.makeLsn(file, offset);
+            LogManager lm = envImpl.getLogManager();
+            LogSource src = lm.getLogSource(lsn);
+            ByteBuffer buf = src.getBytes(offset);
+            LogEntryHeader header =
+                new LogEntryHeader(null,   // envImpl, only needed for
+                                   buf,    //      error reporting
+                                   false); // anticipateChecksumError
+            int size = header.getItemSize();
+            src.release();
+            return size + header.getSize();
+        } catch (IOException e) {
+            throw new DatabaseException(e);
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/cleaner/migrate_f0.jdb b/test/com/sleepycat/je/cleaner/migrate_f0.jdb
new file mode 100644
index 0000000000000000000000000000000000000000..17f43dfaae6d98bcd8c833588096c45ff48f9d4e
Binary files /dev/null and b/test/com/sleepycat/je/cleaner/migrate_f0.jdb differ
diff --git a/test/com/sleepycat/je/cleaner/migrate_f1.jdb b/test/com/sleepycat/je/cleaner/migrate_f1.jdb
new file mode 100644
index 0000000000000000000000000000000000000000..6b16ee1c6e8c870a134ce2db71646e1c46f685f6
Binary files /dev/null and b/test/com/sleepycat/je/cleaner/migrate_f1.jdb differ
diff --git a/test/com/sleepycat/je/cleaner/rmw_bad_offsets.jdb b/test/com/sleepycat/je/cleaner/rmw_bad_offsets.jdb
new file mode 100644
index 0000000000000000000000000000000000000000..285e240e4f6b8af89d4f2a4e8e3299e992d17644
Binary files /dev/null and b/test/com/sleepycat/je/cleaner/rmw_bad_offsets.jdb differ
diff --git a/test/com/sleepycat/je/config/EnvironmentParamsTest.java b/test/com/sleepycat/je/config/EnvironmentParamsTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..271687ee8f4a71803f42109001b68d60d2801a58
--- /dev/null
+++ b/test/com/sleepycat/je/config/EnvironmentParamsTest.java
@@ -0,0 +1,81 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EnvironmentParamsTest.java,v 1.15.2.2 2010/01/04 15:30:43 cwl Exp $
+ */
+
+package com.sleepycat.je.config;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.EnvironmentConfig;
+
+public class EnvironmentParamsTest extends TestCase {
+
+    private IntConfigParam intParam =
+        new IntConfigParam("param.int",
+			   new Integer(2),
+			   new Integer(10),
+			   new Integer(5),
+                           false, // mutable
+			   false);// for replication
+
+    private LongConfigParam longParam =
+        new LongConfigParam("param.long",
+			    new Long(2),
+			    new Long(10),
+			    new Long(5),
+                            false, // mutable
+			    false);// for replication
+
+    private ConfigParam mvParam =
+	new ConfigParam("some.mv.param.#", null, true /* mutable */,
+			false /* for replication */);
+
+    /**
+     * Test param validation.
+     */
+    public void testValidation() {
+	assertTrue(mvParam.isMultiValueParam());
+
+        try {
+		new ConfigParam(null, "foo", false /* mutable */,
+				false /* for replication */);
+            fail("should disallow null name");
+        } catch (IllegalArgumentException e) {
+            // expected.
+        }
+
+        /* Test bounds. These are all invalid and should fail */
+        checkValidateParam(intParam, "1");
+        checkValidateParam(intParam, "11");
+        checkValidateParam(longParam, "1");
+        checkValidateParam(longParam, "11");
+    }
+
+    /**
+     * Check that an invalid parameter isn't mistaken for a multivalue
+     * param.
+     */
+    public void testInvalidVsMultiValue() {
+	try {
+	    EnvironmentConfig envConfig = new EnvironmentConfig();
+	    envConfig.setConfigParam("je.maxMemory.stuff", "true");
+            fail("Should throw exception");
+	} catch (IllegalArgumentException IAE) {
+	    // expected
+	}
+    }
+
+    /* Helper to catch expected exceptions */
+    private void checkValidateParam(ConfigParam param, String value) {
+        try {
+            param.validateValue(value);
+            fail("Should throw exception");
+        } catch (IllegalArgumentException e) {
+            // expect this exception
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/dbi/APILockoutTest.java b/test/com/sleepycat/je/dbi/APILockoutTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..816695c83165d8020bb9211269728acd9f58610b
--- /dev/null
+++ b/test/com/sleepycat/je/dbi/APILockoutTest.java
@@ -0,0 +1,383 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: APILockoutTest.java,v 1.10.2.2 2010/01/04 15:30:43 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.APILockedException;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.junit.JUnitThread;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.txn.BasicLocker;
+import com.sleepycat.je.util.TestUtils;
+
+public class APILockoutTest extends TestCase {
+    private File envHome;
+
+    private JUnitThread tester1 = null;
+    private JUnitThread tester2 = null;
+
+    private volatile int flag = 0;
+
+    public APILockoutTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp() throws IOException, DatabaseException {
+        TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX);
+    }
+
+    public void tearDown() throws IOException, DatabaseException {
+        TestUtils.removeFiles("TearDown", envHome, FileManager.JE_SUFFIX);
+    }
+
+    public void testBasic()
+	throws Throwable {
+
+	EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+	envConfig.setTransactional(true);
+	envConfig.setAllowCreate(true);
+	envConfig.setConfigParam
+	    (EnvironmentParams.ENV_LOCKOUT_TIMEOUT.getName(), "1000");
+	Environment env = new Environment(envHome, envConfig);
+	final EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+	envImpl.setupAPILock();
+	envImpl.acquireAPIWriteLock(1, TimeUnit.SECONDS);
+
+	tester1 =
+	    new JUnitThread("testWait-Thread1") {
+		public void testBody()
+		    throws DatabaseException {
+
+		    BasicLocker bl = BasicLocker.
+			createBasicLocker(envImpl, false /*noWait*/,
+					  true /*noAPIReadLock*/);
+		    try {
+			envImpl.acquireAPIReadLock(bl);
+			fail("expected timeout");
+		    } catch (Exception E) {
+			assertTrue(E instanceof APILockedException);
+		    }
+		    bl.operationEnd(false);
+		    try {
+			bl = BasicLocker.
+			    createBasicLocker(envImpl,
+					      false /*noWait*/,
+					      false /*noAPIReadLock*/);
+			fail("expected timeout");
+		    } catch (Exception E) {
+			// expected
+		    }
+		    flag = 1;
+		}
+	    };
+
+	tester2 =
+	    new JUnitThread("testWait-Thread2") {
+		public void testBody()
+		    throws DatabaseException {
+
+		    while (flag < 2) {
+			Thread.yield();
+		    }
+		    BasicLocker bl =
+			BasicLocker.createBasicLocker(envImpl,
+						      false /*noWait*/,
+						      true /*noAPIReadLock*/);
+		    try {
+			envImpl.acquireAPIReadLock(bl);
+		    } catch (Exception E) {
+			E.printStackTrace();
+			fail("expected success");
+		    }
+
+		    envImpl.releaseAPIReadLock(bl);
+
+		    /* Second release should succeed -- we're not checking. */
+		    try {
+			envImpl.releaseAPIReadLock(bl);
+		    } catch (IllegalMonitorStateException IMSE) {
+			fail("expected success");
+		    }
+		    bl.operationEnd(true);
+		}
+	    };
+
+	tester1.start();
+	tester2.start();
+	/* Wait for acquireAPIReadLock to complete. */
+	while (flag < 1) {
+	    Thread.yield();
+	}
+
+	/*
+	 * Make sure that write locking thread (main) can't read lock, too.
+	 */
+	try {
+	    BasicLocker bl =
+		BasicLocker.createBasicLocker(envImpl, false /*noWait*/,
+					      true /*noAPIReadLock*/);
+	    envImpl.acquireAPIReadLock(bl);
+	    fail("expected exception");
+	} catch (DatabaseException DE) {
+	    /* ignore */
+	}
+
+	envImpl.releaseAPIWriteLock();
+	flag = 2;
+	tester1.finishTest();
+	tester2.finishTest();
+	try {
+
+	    /*
+	     * Expect an IllegalMonitorStateException saying that environment
+	     * is not currently locked.
+	     */
+	    envImpl.releaseAPIWriteLock();
+	    fail("expected exception");
+	} catch (IllegalMonitorStateException IMSE) {
+	    /* Ignore */
+	}
+	env.close();
+    }
+
+    enum BlockingOperation {
+	TRANSACTION, CURSOR_OPEN, TRANSACTION_WITH_CURSOR,
+	NON_TRANSACTIONAL_DB_PUT, TRANSACTIONAL_DB_PUT, CURSOR_PUT
+    };
+
+    public void testTransactionBlocking()
+	throws Throwable {
+
+	doBlockingTest(BlockingOperation.TRANSACTION);
+    }
+
+    public void testCursorWithNullTransactionBlocking()
+	throws Throwable {
+
+	doBlockingTest(BlockingOperation.CURSOR_OPEN);
+    }
+
+    public void testCursorWithTransactionBlocking()
+	throws Throwable {
+
+	doBlockingTest(BlockingOperation.TRANSACTION_WITH_CURSOR);
+    }
+
+    public void testDbPutWithNullTransactionBlocking()
+	throws Throwable {
+
+	doBlockingTest(BlockingOperation.NON_TRANSACTIONAL_DB_PUT);
+    }
+
+    public void testDbPutWithTransactionBlocking()
+	throws Throwable {
+
+	doBlockingTest(BlockingOperation.TRANSACTIONAL_DB_PUT);
+    }
+
+    public void testCursorPut()
+	throws Throwable {
+
+	doBlockingTest(BlockingOperation.CURSOR_PUT);
+    }
+
+    private void doBlockingTest(final BlockingOperation operation)
+	throws Throwable {
+
+	EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+	envConfig.setTransactional(true);
+	envConfig.setAllowCreate(true);
+	envConfig.setConfigParam
+	    (EnvironmentParams.ENV_LOCKOUT_TIMEOUT.getName(), "1000");
+	final Environment env = new Environment(envHome, envConfig);
+	final EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+	DatabaseConfig dbConf = new DatabaseConfig();
+	dbConf.setTransactional(true);
+	dbConf.setAllowCreate(true);
+
+	envImpl.setupAPILock();
+
+	final Database db = env.openDatabase(null, "foo", dbConf);
+
+	envImpl.acquireAPIWriteLock(1, TimeUnit.SECONDS);
+
+	tester1 =
+	    new JUnitThread("testWait-Thread1") {
+		public void testBody()
+		    throws DatabaseException {
+
+		    Transaction txn = null;
+		    Cursor cursor = null;
+		    DatabaseEntry key = new DatabaseEntry();
+		    DatabaseEntry data = new DatabaseEntry();
+		    key.setData(new byte[] { 0, 1 });
+		    data.setData(new byte[] { 0, 1 });
+		    /* Try to do opn. while api is locked.  Should fail. */
+		    try {
+			switch (operation) {
+			case CURSOR_OPEN:
+			    cursor = db.openCursor(null, null);
+			    break;
+
+			case TRANSACTION:
+			case TRANSACTION_WITH_CURSOR:
+			    txn = env.beginTransaction(null, null);
+			    break;
+
+			case NON_TRANSACTIONAL_DB_PUT:
+			    db.put(null, key, data);
+			    break;
+
+			case TRANSACTIONAL_DB_PUT:
+			case CURSOR_PUT:
+			    throw new DatabaseException("fake DE");
+			}
+			fail("expected timeout");
+		    } catch (DatabaseException DE) {
+			/* Ignore. */
+		    }
+
+		    flag = 1;
+
+		    /* Wait for main to unlock the API, then do operation. */
+		    while (flag < 2) {
+			Thread.yield();
+		    }
+		    try {
+			switch (operation) {
+			case CURSOR_OPEN:
+			    cursor = db.openCursor(null, null);
+			    break;
+
+			case TRANSACTION:
+			case TRANSACTION_WITH_CURSOR:
+			case CURSOR_PUT:
+			    txn = env.beginTransaction(null, null);
+			    if (operation ==
+				BlockingOperation.TRANSACTION_WITH_CURSOR ||
+				operation ==
+				BlockingOperation.CURSOR_PUT) {
+				cursor = db.openCursor(txn, null);
+				if (operation ==
+				    BlockingOperation.CURSOR_PUT) {
+				    cursor.put(key, data);
+				}
+			    }
+			    break;
+
+			case NON_TRANSACTIONAL_DB_PUT:
+			    db.put(null, key, data);
+			    break;
+
+			case TRANSACTIONAL_DB_PUT:
+			    txn = env.beginTransaction(null, null);
+			    db.put(txn, key, data);
+			}
+		    } catch (Exception E) {
+			fail("expected success");
+		    }
+
+		    /* Return control to main. */
+		    flag = 3;
+
+		    /* Wait for main to attempt lock on the API (and fail). */
+		    while (flag < 4) {
+			Thread.yield();
+		    }
+		    try {
+			switch (operation) {
+			case CURSOR_OPEN:
+			    cursor.close();
+			    break;
+
+			case TRANSACTION:
+			case TRANSACTION_WITH_CURSOR:
+			case TRANSACTIONAL_DB_PUT:
+			case CURSOR_PUT:
+			    if (operation ==
+				BlockingOperation.TRANSACTION_WITH_CURSOR ||
+				operation ==
+				BlockingOperation.CURSOR_PUT) {
+				cursor.close();
+			    }
+			    if (txn == null) {
+				fail("txn is null");
+			    }
+			    txn.abort();
+			    break;
+
+			case NON_TRANSACTIONAL_DB_PUT:
+			    /* Do nothing. */
+			    break;
+			}
+		    } catch (Exception E) {
+			fail("expected success");
+		    }
+
+		    flag = 5;
+		}
+	    };
+
+	tester1.start();
+	/* Wait for acquireAPIReadLock to complete. */
+	while (flag < 1) {
+	    Thread.yield();
+	}
+	envImpl.releaseAPIWriteLock();
+	flag = 2;
+
+	/* Wait for tester1 to begin a txn. */
+	while (flag < 3) {
+	    Thread.yield();
+	}
+
+	if (operation == BlockingOperation.TRANSACTION ||
+	    operation == BlockingOperation.TRANSACTION_WITH_CURSOR) {
+	    /* Attempt lock.  Should timeout. */
+	    try {
+		envImpl.acquireAPIWriteLock(1, TimeUnit.SECONDS);
+		fail("expected timeout.");
+	    } catch (DatabaseException DE) {
+		/* Ignore.  Expect timeout. */
+	    }
+	}
+
+	/* Back to tester1 to end the txn. */
+	flag = 4;
+	while (flag < 5) {
+	    Thread.yield();
+	}
+
+	/* Attempt lock.  Should complete. */
+	try {
+	    envImpl.acquireAPIWriteLock(1, TimeUnit.SECONDS);
+	} catch (DatabaseException DE) {
+	    fail("expected success.");
+	}
+
+	tester1.finishTest();
+	envImpl.releaseAPIWriteLock();
+	db.close();
+	env.close();
+    }
+}
diff --git a/test/com/sleepycat/je/dbi/CodeCoverageTest.java b/test/com/sleepycat/je/dbi/CodeCoverageTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..2dd85871fec9695a4df83cf5491658ae5aaed9a8
--- /dev/null
+++ b/test/com/sleepycat/je/dbi/CodeCoverageTest.java
@@ -0,0 +1,57 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: CodeCoverageTest.java,v 1.9.2.2 2010/01/04 15:30:43 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.util.StringDbt;
+
+/**
+ * Various unit tests for CursorImpl to enhance code coverage.
+ */
+public class CodeCoverageTest extends DbCursorTestBase {
+
+    public CodeCoverageTest()
+        throws DatabaseException {
+
+        super();
+    }
+
+    /**
+     * Test the internal CursorImpl.delete() deleted LN code..
+     */
+    public void testDeleteDeleted()
+        throws Throwable {
+
+        try {
+	    initEnv(false);
+            doSimpleCursorPuts();
+
+	    StringDbt foundKey = new StringDbt();
+	    StringDbt foundData = new StringDbt();
+
+	    OperationStatus status = cursor.getFirst(foundKey, foundData,
+						     LockMode.DEFAULT);
+	    assertEquals(OperationStatus.SUCCESS, status);
+
+	    cursor.delete();
+	    cursor.delete();
+
+	    /*
+	     * While we've got a cursor in hand, call CursorImpl.dumpToString()
+	     */
+	    DbInternal.getCursorImpl(cursor).dumpToString(true);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/dbi/DbConfigManagerTest.java b/test/com/sleepycat/je/dbi/DbConfigManagerTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..847358a08e99c2300b91a364609c2d1761c88364
--- /dev/null
+++ b/test/com/sleepycat/je/dbi/DbConfigManagerTest.java
@@ -0,0 +1,41 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbConfigManagerTest.java,v 1.29.2.2 2010/01/04 15:30:43 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.util.TestUtils;
+
+public class DbConfigManagerTest extends TestCase {
+
+    /**
+     * Test that parameter defaults work, that we can add and get
+     * parameters
+     */
+    public void testBasicParams()
+	throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setCacheSize(2000);
+        DbConfigManager configManager = new DbConfigManager(envConfig);
+
+        /**
+         * Longs: The config manager should return the value for an
+         * explicitly set param and the default for one not set.
+         *
+         */
+        assertEquals(2000,
+                     configManager.getLong(EnvironmentParams.MAX_MEMORY));
+        assertEquals(EnvironmentParams.ENV_RECOVERY.getDefault(),
+                     configManager.get(EnvironmentParams.ENV_RECOVERY));
+    }
+}
diff --git a/test/com/sleepycat/je/dbi/DbCursorDeleteTest.java b/test/com/sleepycat/je/dbi/DbCursorDeleteTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..06749526f30c60a851557f8754a2cadba4f85587
--- /dev/null
+++ b/test/com/sleepycat/je/dbi/DbCursorDeleteTest.java
@@ -0,0 +1,426 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbCursorDeleteTest.java,v 1.39.2.2 2010/01/04 15:30:43 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import java.io.IOException;
+import java.util.Hashtable;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.util.StringDbt;
+
+/**
+ * Various unit tests for CursorImpl.delete().
+ */
+public class DbCursorDeleteTest extends DbCursorTestBase {
+
+    public DbCursorDeleteTest()
+        throws DatabaseException {
+        super();
+    }
+
+    /**
+     * Put a small number of data items into the database in a specific order,
+     * delete all the ones beginning with 'f', and then make sure they were
+     * really deleted.
+     */
+    public void testSimpleDelete()
+	throws DatabaseException {
+
+        initEnv(false);
+	doSimpleCursorPuts();
+
+	int deletedEntries = 0;
+	DataWalker dw = new DataWalker(simpleDataMap) {
+		void perData(String foundKey, String foundData) {
+		    try {
+			if (foundKey.charAt(0) == 'f') {
+			    cursor.delete();
+			    deletedEntries++;
+			}
+		    } catch (DatabaseException DBE) {
+			System.out.println("DBE " + DBE);
+		    }
+		}
+	    };
+	dw.walkData();
+	deletedEntries = dw.deletedEntries;
+	dw = new DataWalker(simpleDataMap) {
+		void perData(String foundKey, String foundData) {
+		    assertTrue(foundKey.compareTo(prevKey) >= 0);
+		    assertTrue(foundKey.charAt(0) != 'f');
+		    prevKey = foundKey;
+		}
+	    };
+	dw.walkData();
+	assertTrue(dw.nEntries == simpleKeyStrings.length - deletedEntries);
+    }
+
+    /**
+     * Put a small number of data items into the database in a specific order.
+     * For each one: delete, getCurrent (make sure failure), reinsert
+     * (success), delete (success).  Once iterated through all of them,
+     * reinsert and make sure successful.
+     */
+    public void testSimpleDeleteInsert()
+	throws DatabaseException {
+
+        initEnv(false);
+	doSimpleCursorPuts();
+	DataWalker dw = new DataWalker(simpleDataMap) {
+		void perData(String foundKey, String foundData) {
+		    try {
+			cursor.delete();
+			deletedEntries++;
+			assertEquals(OperationStatus.KEYEMPTY,
+				     cursor.getCurrent
+				     (new StringDbt(), new StringDbt(),
+				      LockMode.DEFAULT));
+			StringDbt newKey = new StringDbt(foundKey);
+			StringDbt newData = new StringDbt(foundData);
+			assertEquals(OperationStatus.SUCCESS,
+                                     cursor2.putNoOverwrite(newKey, newData));
+			assertEquals(OperationStatus.SUCCESS,
+                                     cursor2.delete());
+		    } catch (DatabaseException DBE) {
+			System.out.println("DBE " + DBE);
+		    }
+		}
+	    };
+
+	dw.walkData();
+	doSimpleCursorPuts();
+
+	dw = new DataWalker(simpleDataMap) {
+		void perData(String foundKey, String foundData) {
+		    assertEquals(foundData,
+				 (String) simpleDataMap.get(foundKey));
+		    simpleDataMap.remove(foundKey);
+		}
+	    };
+	dw.walkData();
+	assertTrue(simpleDataMap.size() == 0);
+    }
+
+    /**
+     * Put a small number of data items into the database in a specific order.
+     * For each one: delete, getCurrent (make sure failure), reinsert
+     * (success), delete (success).  Once iterated through all of them,
+     * reinsert and make sure successful.
+     */
+    public void testSimpleDeletePutCurrent()
+	throws DatabaseException {
+
+        initEnv(false);
+	doSimpleCursorPuts();
+	DataWalker dw = new DataWalker(simpleDataMap) {
+		void perData(String foundKey, String foundData) {
+		    try {
+			cursor.delete();
+			deletedEntries++;
+			assertEquals(OperationStatus.KEYEMPTY,
+				     cursor.getCurrent
+				     (new StringDbt(), new StringDbt(),
+				      LockMode.DEFAULT));
+			StringDbt newData = new StringDbt(foundData);
+			assertEquals(OperationStatus.NOTFOUND,
+                                     cursor.putCurrent(newData));
+		    } catch (DatabaseException DBE) {
+			System.out.println("DBE " + DBE);
+		    }
+		}
+	    };
+
+	dw.walkData();
+	doSimpleCursorPuts();
+
+	dw = new DataWalker(simpleDataMap) {
+		void perData(String foundKey, String foundData) {
+		    assertEquals(foundData,
+				 (String) simpleDataMap.get(foundKey));
+		    simpleDataMap.remove(foundKey);
+		}
+	    };
+	dw.walkData();
+	assertTrue(simpleDataMap.size() == 0);
+    }
+
+    /**
+     * Similar to above test, but there was some question about whether this
+     * tests new functionality or not.  Insert k1/d1 and d1/k1.  Iterate
+     * through the data and delete k1/d1.  Reinsert k1/d1 and make sure it
+     * inserts ok.
+     */
+    public void testSimpleInsertDeleteInsert()
+	throws DatabaseException {
+
+        initEnv(true);
+	StringDbt key = new StringDbt("k1");
+	StringDbt data1 = new StringDbt("d1");
+
+	assertEquals(OperationStatus.SUCCESS,
+                     putAndVerifyCursor(cursor, key, data1, true));
+	assertEquals(OperationStatus.SUCCESS,
+                     putAndVerifyCursor(cursor, data1, key, true));
+
+	DataWalker dw = new DataWalker(null) {
+		void perData(String foundKey, String foundData)
+		    throws DatabaseException {
+
+		    if (foundKey.equals("k1")) {
+			if (cursor.delete() == OperationStatus.SUCCESS) {
+			    deletedEntries++;
+			}
+		    }
+		}
+	    };
+	dw.setIgnoreDataMap(true);
+	dw.walkData();
+
+	assertEquals(OperationStatus.SUCCESS,
+                     putAndVerifyCursor(cursor, key, data1, true));
+    }
+
+    /**
+     * Put a small number of data items into the database in a specific order,
+     * delete all of them and then make sure they were really deleted.
+     */
+    public void testSimpleDeleteAll()
+	throws DatabaseException {
+
+        initEnv(false);
+	doSimpleCursorPuts();
+
+	int deletedEntries = 0;
+	DataWalker dw = new DataWalker(simpleDataMap) {
+		void perData(String foundKey, String foundData) {
+		    try {
+			cursor.delete();
+			deletedEntries++;
+			assertEquals(OperationStatus.KEYEMPTY,
+				     cursor.getCurrent
+				     (new StringDbt(), new StringDbt(),
+				      LockMode.DEFAULT));
+		    } catch (DatabaseException DBE) {
+			System.out.println("DBE " + DBE);
+		    }
+		}
+	    };
+	dw.walkData();
+	deletedEntries = dw.deletedEntries;
+	dw = new DataWalker(simpleDataMap) {
+		void perData(String foundKey, String foundData) {
+		    fail("didn't delete everything");
+		}
+	    };
+	dw.walkData();
+	assertTrue(dw.nEntries == 0);
+	assertTrue(simpleKeyStrings.length == deletedEntries);
+    }
+
+    /**
+     * Insert N_KEYS data items into a tree.  Iterate through the tree in
+     * ascending order deleting anything that has 'F' as the second character.
+     * Iterate through the tree again and make sure they are all correctly
+     * deleted.
+     */
+    public void testLargeDelete()
+        throws IOException, DatabaseException {
+
+        tearDown();
+	for (int i = 0; i < N_ITERS; i++) {
+	    setUp();
+            initEnv(false);
+	    doLargeDelete();
+            tearDown();
+	}
+    }
+
+    /**
+     * Helper routine for above.
+     */
+    private void doLargeDeleteAll()
+	throws DatabaseException {
+
+	Hashtable dataMap = new Hashtable();
+	int n_keys = 2000;
+	doLargePut(dataMap, /* N_KEYS */ n_keys);
+
+	int deletedEntries = 0;
+	DataWalker dw = new DataWalker(dataMap) {
+		void perData(String foundKey, String foundData)
+                    throws DatabaseException {
+		    cursor.delete();
+		    deletedEntries++;
+		    assertEquals(OperationStatus.KEYEMPTY,
+				 cursor.getCurrent
+				 (new StringDbt(), new StringDbt(),
+				  LockMode.DEFAULT));
+		}
+	    };
+	dw.walkData();
+	deletedEntries = dw.deletedEntries;
+	dw = new DataWalker(dataMap) {
+		void perData(String foundKey, String foundData) {
+		    fail("didn't delete everything");
+		}
+	    };
+	dw.walkData();
+	assertTrue(dw.nEntries == 0);
+	assertTrue(/* N_KEYS */ n_keys == deletedEntries);
+    }
+
+    /**
+     * Insert N_KEYS data items into a tree.  Iterate through the tree in
+     * ascending order deleting all entries.  Iterate through the tree again
+     * and make sure they are all correctly deleted.
+     */
+    public void testLargeDeleteAll()
+        throws IOException, DatabaseException {
+
+        tearDown();
+	for (int i = 0; i < N_ITERS; i++) {
+	    setUp();
+            initEnv(false);
+	    doLargeDeleteAll();
+	    tearDown();
+	}
+    }
+
+    /**
+     * Helper routine for above.
+     */
+    private void doLargeDelete()
+	throws DatabaseException {
+
+	Hashtable dataMap = new Hashtable();
+	doLargePut(dataMap, N_KEYS);
+
+	int deletedEntries = 0;
+	DataWalker dw = new DataWalker(dataMap) {
+		void perData(String foundKey, String foundData)
+                    throws DatabaseException {
+		    if (foundKey.charAt(1) == 'F') {
+			cursor.delete();
+			deletedEntries++;
+		    }
+		}
+	    };
+	dw.walkData();
+	deletedEntries = dw.deletedEntries;
+	dw = new DataWalker(dataMap) {
+		void perData(String foundKey, String foundData) {
+		    assertTrue(foundKey.compareTo(prevKey) >= 0);
+		    assertTrue(foundKey.charAt(1) != 'F');
+		    prevKey = foundKey;
+		}
+	    };
+	dw.walkData();
+	assertTrue(dw.nEntries == N_KEYS - deletedEntries);
+    }
+
+    /**
+     * Insert N_KEYS data items into a tree.  Iterate through the tree in
+     * ascending order deleting the first entry.  Iterate through the tree
+     * again and make sure only the first entry is deleted.
+     */
+    public void testLargeDeleteFirst()
+        throws IOException, DatabaseException {
+
+        tearDown();
+	for (int i = 0; i < N_ITERS; i++) {
+	    setUp();
+            initEnv(false);
+	    doLargeDeleteFirst();
+	    tearDown();
+	}
+    }
+
+    /**
+     * Helper routine for above.
+     */
+    private void doLargeDeleteFirst()
+	throws DatabaseException {
+
+	Hashtable dataMap = new Hashtable();
+	doLargePut(dataMap, N_KEYS);
+
+	DataWalker dw = new DataWalker(dataMap) {
+		void perData(String foundKey, String foundData)
+                    throws DatabaseException {
+		    if (deletedEntry == null) {
+			deletedEntry = foundKey;
+			cursor.delete();
+		    }
+		}
+	    };
+	dw.walkData();
+
+	String deletedEntry = dw.deletedEntry;
+
+	dw = new DataWalker(dataMap) {
+		void perData(String foundKey, String foundData) {
+		    assertFalse(deletedEntry.equals(foundKey));
+		}
+	    };
+	dw.deletedEntry = deletedEntry;
+	dw.walkData();
+	assertTrue(dw.nEntries == N_KEYS - 1);
+    }
+
+    /**
+     * Insert N_KEYS data items into a tree.  Iterate through the tree in
+     * ascending order deleting the last entry.  Iterate through the tree again
+     * and make sure only the last entry is deleted.
+     */
+    public void testLargeDeleteLast()
+        throws IOException, DatabaseException {
+
+        tearDown();
+	for (int i = 0; i < N_ITERS; i++) {
+	    setUp();
+            initEnv(false);
+	    doLargeDeleteLast();
+	    tearDown();
+	}
+    }
+
+    /**
+     * Helper routine for above.
+     */
+    private void doLargeDeleteLast()
+	throws DatabaseException {
+
+	Hashtable dataMap = new Hashtable();
+	doLargePut(dataMap, N_KEYS);
+
+	DataWalker dw = new BackwardsDataWalker(dataMap) {
+		void perData(String foundKey, String foundData)
+                    throws DatabaseException {
+		    if (deletedEntry == null) {
+			deletedEntry = foundKey;
+			cursor.delete();
+		    }
+		}
+	    };
+	dw.walkData();
+
+	String deletedEntry = dw.deletedEntry;
+
+	dw = new BackwardsDataWalker(dataMap) {
+		void perData(String foundKey, String foundData) {
+		    assertFalse(deletedEntry.equals(foundKey));
+		}
+	    };
+	dw.deletedEntry = deletedEntry;
+	dw.walkData();
+	assertTrue(dw.nEntries == N_KEYS - 1);
+    }
+}
diff --git a/test/com/sleepycat/je/dbi/DbCursorDupTest.java b/test/com/sleepycat/je/dbi/DbCursorDupTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..d8ec8547f84bda182f2f64332cc8de4ae033b289
--- /dev/null
+++ b/test/com/sleepycat/je/dbi/DbCursorDupTest.java
@@ -0,0 +1,194 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbCursorDupTest.java,v 1.33.2.2 2010/01/04 15:30:43 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import java.io.IOException;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.util.StringDbt;
+
+/**
+ * Various unit tests for CursorImpl.dup().
+ */
+public class DbCursorDupTest extends DbCursorTestBase {
+
+    public DbCursorDupTest()
+        throws DatabaseException {
+
+        super();
+    }
+
+    public void testCursorDupAndCloseDb()
+	throws DatabaseException {
+
+        initEnv(false);
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        Database myDb = exampleEnv.openDatabase(null, "fooDb", dbConfig);
+
+	myDb.put(null, new StringDbt("blah"), new StringDbt("blort"));
+	Cursor cursor = myDb.openCursor(null, null);
+	OperationStatus status = cursor.getNext(new DatabaseEntry(),
+                                                new DatabaseEntry(),
+                                                LockMode.DEFAULT);
+        assertEquals(OperationStatus.SUCCESS, status);
+	Cursor cursorDup = cursor.dup(true);
+	cursor.close();
+	cursorDup.close();
+	myDb.close();
+    }
+
+    public void testDupInitialized()
+        throws DatabaseException {
+
+        /* Open db. */
+        initEnv(false);
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        Database myDb = exampleEnv.openDatabase(null, "fooDb", dbConfig);
+
+        /* Open uninitialized cursor. */
+        Cursor c1 = myDb.openCursor(null, null);
+        try {
+            c1.getCurrent(new DatabaseEntry(), new DatabaseEntry(), null);
+            fail();
+        } catch (DatabaseException expected) {}
+
+        /* Dup uninitialized cursor with samePosition=false. */
+        Cursor c2 = c1.dup(false);
+        try {
+            c2.getCurrent(new DatabaseEntry(), new DatabaseEntry(), null);
+            fail();
+        } catch (DatabaseException expected) {}
+
+        /* Dup uninitialized cursor with samePosition=true. */
+        Cursor c3 = c1.dup(true);
+        try {
+            c3.getCurrent(new DatabaseEntry(), new DatabaseEntry(), null);
+            fail();
+        } catch (DatabaseException expected) {}
+
+        /* Ensure dup'ed cursors are usable. */
+        assertEquals(OperationStatus.SUCCESS,
+                     c1.put(new DatabaseEntry(new byte[0]),
+                            new DatabaseEntry(new byte[0])));
+        assertEquals(OperationStatus.SUCCESS,
+                     c2.getFirst(new DatabaseEntry(), new DatabaseEntry(),
+                                 null));
+        assertEquals(OperationStatus.NOTFOUND,
+                     c2.getNext(new DatabaseEntry(), new DatabaseEntry(),
+                                 null));
+        assertEquals(OperationStatus.SUCCESS,
+                     c3.getFirst(new DatabaseEntry(), new DatabaseEntry(),
+                                 null));
+        assertEquals(OperationStatus.NOTFOUND,
+                     c3.getNext(new DatabaseEntry(), new DatabaseEntry(),
+                                 null));
+
+        /* Close db. */
+        c3.close();
+        c2.close();
+        c1.close();
+	myDb.close();
+    }
+
+    /**
+     * Create some duplicate data.
+     *
+     * Pass 1, walk over the data and with each iteration, dup() the
+     * cursor at the same position.  Ensure that the dup points to the
+     * same key/data pair.  Advance the dup'd cursor and ensure that
+     * the data is different (key may be the same since it's a
+     * duplicate set).  Then dup() the cursor without maintaining
+     * position.  Ensure that getCurrent() throws a Cursor Not Init'd
+     * exception.
+     *
+     * Pass 2, iterate through the data, and dup the cursor in the
+     * same position.  Advance the original cursor and ensure that the
+     * dup()'d points to the original data and the original cursor
+     * points at new data.
+     */
+    public void testCursorDupSamePosition()
+        throws IOException, DatabaseException {
+
+        initEnv(true);
+	createRandomDuplicateData(null, false);
+
+	DataWalker dw = new DataWalker(null) {
+		void perData(String foundKey, String foundData)
+                    throws DatabaseException {
+		    DatabaseEntry keyDbt = new DatabaseEntry();
+		    DatabaseEntry dataDbt = new DatabaseEntry();
+		    Cursor cursor2 = cursor.dup(true);
+		    cursor2.getCurrent(keyDbt, dataDbt, LockMode.DEFAULT);
+		    String c2Key = new String(keyDbt.getData());
+		    String c2Data = new String(dataDbt.getData());
+		    assertTrue(c2Key.equals(foundKey));
+		    assertTrue(c2Data.equals(foundData));
+		    if (cursor2.getNext(keyDbt,
+					dataDbt,
+					LockMode.DEFAULT) ==
+                        OperationStatus.SUCCESS) {
+			/* Keys can be the same because we have duplicates. */
+			/*
+			  assertFalse(new String(keyDbt.getData()).
+			  equals(foundKey));
+			*/
+			assertFalse(new String(dataDbt.getData()).
+				    equals(foundData));
+		    }
+		    cursor2.close();
+		    try {
+			cursor2 = cursor.dup(false);
+			cursor2.getCurrent(keyDbt, dataDbt, LockMode.DEFAULT);
+			fail("didn't catch Cursor not initialized exception");
+		    } catch (DatabaseException DBE) {
+		    }
+		    cursor2.close();
+		}
+	    };
+	dw.setIgnoreDataMap(true);
+	dw.walkData();
+
+	dw = new DataWalker(null) {
+		void perData(String foundKey, String foundData)
+                    throws DatabaseException {
+		    DatabaseEntry keyDbt = new DatabaseEntry();
+		    DatabaseEntry dataDbt = new DatabaseEntry();
+		    DatabaseEntry key2Dbt = new DatabaseEntry();
+		    DatabaseEntry data2Dbt = new DatabaseEntry();
+		    Cursor cursor2 = cursor.dup(true);
+
+		    OperationStatus status =
+			cursor.getNext(keyDbt, dataDbt, LockMode.DEFAULT);
+
+		    cursor2.getCurrent(key2Dbt, data2Dbt, LockMode.DEFAULT);
+		    String c2Key = new String(key2Dbt.getData());
+		    String c2Data = new String(data2Dbt.getData());
+		    assertTrue(c2Key.equals(foundKey));
+		    assertTrue(c2Data.equals(foundData));
+		    if (status == OperationStatus.SUCCESS) {
+			assertFalse(new String(dataDbt.getData()).
+				    equals(foundData));
+			assertFalse(new String(dataDbt.getData()).
+				    equals(c2Data));
+		    }
+		    cursor2.close();
+		}
+	    };
+	dw.setIgnoreDataMap(true);
+	dw.walkData();
+    }
+}
diff --git a/test/com/sleepycat/je/dbi/DbCursorDuplicateDeleteTest.java b/test/com/sleepycat/je/dbi/DbCursorDuplicateDeleteTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..aca811f5ec68376be341db85a9cdb5caa20d64ed
--- /dev/null
+++ b/test/com/sleepycat/je/dbi/DbCursorDuplicateDeleteTest.java
@@ -0,0 +1,1109 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbCursorDuplicateDeleteTest.java,v 1.61.2.2 2010/01/04 15:30:43 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import java.util.Hashtable;
+
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.DeadlockException;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.VerifyConfig;
+import com.sleepycat.je.junit.JUnitThread;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.tree.DIN;
+import com.sleepycat.je.util.StringDbt;
+
+/**
+ * Various unit tests for CursorImpl using duplicates.
+ */
+public class DbCursorDuplicateDeleteTest extends DbCursorTestBase {
+
+    private volatile int sequence;
+
+    public DbCursorDuplicateDeleteTest()
+        throws DatabaseException {
+
+        super();
+    }
+
+    /**
+     * Create some simple duplicate data.  Delete it all.  Try to create
+     * it again.
+     */
+    public void testSimpleDeleteInsert()
+	throws Exception {
+
+        try {
+            initEnv(true);
+            doSimpleDuplicatePuts();
+            DataWalker dw = new DataWalker(null) {
+                    void perData(String foundKey, String foundData)
+                        throws DatabaseException {
+
+			if (prevKey.equals("")) {
+			    prevKey = foundKey;
+			}
+			if (!prevKey.equals(foundKey)) {
+			    deletedEntries = 0;
+			}
+			prevKey = foundKey;
+                        if (cursor.delete() == OperationStatus.SUCCESS) {
+			    deletedEntries++;
+			}
+			assertEquals(simpleKeyStrings.length - deletedEntries,
+				     cursor.count());
+                    }
+                };
+            dw.setIgnoreDataMap(true);
+            dw.walkData();
+            doSimpleDuplicatePuts();
+
+            dw = new DataWalker(null);
+            dw.setIgnoreDataMap(true);
+            dw.walkData();
+            assertEquals(simpleKeyStrings.length * simpleKeyStrings.length,
+                         dw.nEntries);
+            closeEnv();
+        } catch (Exception e) {
+            e.printStackTrace();
+            throw e;
+        }
+    }
+
+    public void testCountAfterDelete()
+	throws Throwable {
+        initEnv(true);
+        DatabaseEntry key =
+            new DatabaseEntry(new byte[] {(byte) 'n',
+                                          (byte) 'o', (byte) 0 });
+        DatabaseEntry val1 =
+            new DatabaseEntry(new byte[] {(byte) 'k',
+                                          (byte) '1', (byte) 0 });
+        DatabaseEntry val2 =
+            new DatabaseEntry(new byte[] {(byte) 'k',
+                                          (byte) '2', (byte) 0 });
+        OperationStatus status =
+            exampleDb.putNoDupData(null, key, val1);
+        if (status != OperationStatus.SUCCESS)
+            throw new Exception("status on put 1=" + status);
+        status = exampleDb.putNoDupData(null, key, val2);
+        if (status != OperationStatus.SUCCESS)
+            throw new Exception("status on put 2=" + status);
+
+        Cursor c = exampleDb.openCursor(null, null);
+        try {
+            status = c.getSearchKey(key, new DatabaseEntry(),
+                                    LockMode.DEFAULT);
+            if (status != OperationStatus.SUCCESS)
+                throw new Exception("status on search=" + status);
+	    assertEquals(2, c.count());
+            status = c.delete();
+            if (status != OperationStatus.SUCCESS)
+                throw new Exception("err on del 1=" + status);
+            status = c.getNext(key, new DatabaseEntry(), LockMode.DEFAULT);
+            if (status != OperationStatus.SUCCESS)
+                throw new Exception("err on next=" + status);
+            status = c.delete();
+            if (status != OperationStatus.SUCCESS)
+                throw new Exception("err on del 2=" + status);
+	    assertEquals(0, c.count());
+        } finally {
+            c.close();
+        }
+
+        status = exampleDb.putNoDupData(null, key, val1);
+        if (status != OperationStatus.SUCCESS)
+            throw new Exception("err on put 3=" + status);
+
+        c = exampleDb.openCursor(null, null);
+        try {
+            status =
+		c.getSearchKey(key, new DatabaseEntry(), LockMode.DEFAULT);
+            if (status != OperationStatus.SUCCESS)
+		throw new Exception("err on search=" + status);
+	    assertEquals(1, c.count());
+        } finally {
+            c.close();
+        }
+    }
+
+    public void testDuplicateDeletionAll()
+	throws Throwable {
+
+        try {
+            initEnv(true);
+            Hashtable dataMap = new Hashtable();
+            createRandomDuplicateData(10, 1000, dataMap, false, false);
+
+            DataWalker dw = new DataWalker(dataMap) {
+                    void perData(String foundKey, String foundData)
+                        throws DatabaseException {
+
+                        Hashtable ht = (Hashtable) dataMap.get(foundKey);
+                        if (ht == null) {
+                            fail("didn't find ht " +
+				 foundKey + "/" + foundData);
+                        }
+
+                        if (ht.get(foundData) != null) {
+                            ht.remove(foundData);
+                            if (ht.size() == 0) {
+                                dataMap.remove(foundKey);
+                            }
+                        } else {
+                            fail("didn't find " + foundKey + "/" + foundData);
+                        }
+
+                        /* Make sure keys are ascending/descending. */
+                        assertTrue(foundKey.compareTo(prevKey) >= 0);
+
+                        /*
+			 * Make sure duplicate items within key are asc/desc.
+			 */
+                        if (prevKey.equals(foundKey)) {
+                            if (duplicateComparisonFunction == null) {
+                                assertTrue(foundData.compareTo(prevData) >= 0);
+                            } else {
+                                assertTrue
+                                    (duplicateComparisonFunction.compare
+                                     (foundData.getBytes(),
+                                      prevData.getBytes()) >= 0);
+                            }
+                            prevData = foundData;
+                        } else {
+                            prevData = "";
+                        }
+
+                        prevKey = foundKey;
+                        assertTrue(cursor.delete() == OperationStatus.SUCCESS);
+			assertEquals(ht.size(), cursor.count());
+                    }
+                };
+            dw.setIgnoreDataMap(true);
+            dw.walkData();
+            assertTrue(dataMap.size() == 0);
+
+            dw = new DataWalker(dataMap) {
+                    void perData(String foundKey, String foundData)
+                        throws DatabaseException {
+
+                        fail("data found after deletion: " +
+			     foundKey + "/" + foundData);
+                    }
+                };
+            dw.setIgnoreDataMap(true);
+            dw.walkData();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testDuplicateDeletionAssorted()
+	throws Throwable {
+
+        try {
+            initEnv(true);
+            Hashtable dataMap = new Hashtable();
+            Hashtable deletedDataMap = new Hashtable();
+            createRandomDuplicateData(10, 1000, dataMap, false, false);
+
+            /* Use the DataWalker.addedData field for a deleted Data Map. */
+            DataWalker dw = new DataWalker(dataMap, deletedDataMap) {
+                    void perData(String foundKey, String foundData)
+                        throws DatabaseException {
+
+                        Hashtable ht = (Hashtable) dataMap.get(foundKey);
+                        if (ht == null) {
+                            fail("didn't find ht " +
+				 foundKey + "/" + foundData);
+                        }
+
+                        /* Make sure keys are ascending/descending. */
+                        assertTrue(foundKey.compareTo(prevKey) >= 0);
+
+                        /*
+			 * Make sure duplicate items within key are asc/desc.
+			 */
+                        if (prevKey.equals(foundKey)) {
+                            if (duplicateComparisonFunction == null) {
+                                assertTrue(foundData.compareTo(prevData) >= 0);
+                            } else {
+                                assertTrue
+                                    (duplicateComparisonFunction.compare
+                                     (foundData.getBytes(),
+                                      prevData.getBytes()) >= 0);
+                            }
+                            prevData = foundData;
+                        } else {
+                            prevData = "";
+                        }
+
+                        prevKey = foundKey;
+                        if (rnd.nextInt(10) < 8) {
+                            Hashtable delht =
+                                (Hashtable) addedDataMap.get(foundKey);
+                            if (delht == null) {
+                                delht = new Hashtable();
+                                addedDataMap.put(foundKey, delht);
+                            }
+                            delht.put(foundData, foundData);
+                            assertTrue(cursor.delete() ==
+				       OperationStatus.SUCCESS);
+
+                            if (ht.get(foundData) == null) {
+                                fail("didn't find " +
+				     foundKey + "/" + foundData);
+                            }
+                            ht.remove(foundData);
+			    assertEquals(ht.size(), cursor.count());
+                            if (ht.size() == 0) {
+                                dataMap.remove(foundKey);
+                            }
+                        }
+                    }
+                };
+            dw.setIgnoreDataMap(true);
+            dw.walkData();
+
+            dw = new DataWalker(dataMap, deletedDataMap) {
+                    void perData(String foundKey, String foundData)
+                        throws DatabaseException {
+
+                        Hashtable delht =
+			    (Hashtable) addedDataMap.get(foundKey);
+                        if (delht != null &&
+                            delht.get(foundData) != null) {
+                            fail("found deleted entry for " +
+                                 foundKey + "/" + foundData);
+                        }
+
+                        Hashtable ht = (Hashtable) dataMap.get(foundKey);
+                        if (ht == null) {
+                            fail("couldn't find hashtable for " + foundKey);
+                        }
+                        if (ht.get(foundData) == null) {
+                            fail("couldn't find entry for " +
+                                 foundKey + "/" + foundData);
+                        }
+                        ht.remove(foundData);
+                        if (ht.size() == 0) {
+                            dataMap.remove(foundKey);
+                        }
+                    }
+                };
+            dw.setIgnoreDataMap(true);
+            dw.walkData();
+            assertTrue(dataMap.size() == 0);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testDuplicateDeletionAssortedSR15375()
+	throws Throwable {
+
+        try {
+            initEnv(true);
+            Hashtable dataMap = new Hashtable();
+            Hashtable deletedDataMap = new Hashtable();
+            createRandomDuplicateData(10, 1000, dataMap, false, false);
+
+            /* Use the DataWalker.addedData field for a deleted Data Map. */
+            DataWalker dw = new DataWalker(dataMap, deletedDataMap) {
+                    void perData(String foundKey, String foundData)
+                        throws DatabaseException {
+
+                        Hashtable ht = (Hashtable) dataMap.get(foundKey);
+                        if (ht == null) {
+                            fail("didn't find ht " +
+				 foundKey + "/" + foundData);
+                        }
+
+                        /* Make sure keys are ascending/descending. */
+                        assertTrue(foundKey.compareTo(prevKey) >= 0);
+
+                        /*
+			 * Make sure duplicate items within key are asc/desc.
+			 */
+                        if (prevKey.equals(foundKey)) {
+                            if (duplicateComparisonFunction == null) {
+                                assertTrue(foundData.compareTo(prevData) >= 0);
+                            } else {
+                                assertTrue
+                                    (duplicateComparisonFunction.compare
+                                     (foundData.getBytes(),
+                                      prevData.getBytes()) >= 0);
+                            }
+                            prevData = foundData;
+                        } else {
+                            prevData = "";
+                        }
+
+                        prevKey = foundKey;
+                        if (rnd.nextInt(10) < 8) {
+                            Hashtable delht =
+                                (Hashtable) addedDataMap.get(foundKey);
+                            if (delht == null) {
+                                delht = new Hashtable();
+                                addedDataMap.put(foundKey, delht);
+                            }
+                            delht.put(foundData, foundData);
+                            assertTrue(cursor.delete() ==
+				       OperationStatus.SUCCESS);
+
+                            if (ht.get(foundData) == null) {
+                                fail("didn't find " +
+				     foundKey + "/" + foundData);
+                            }
+                            ht.remove(foundData);
+			    assertEquals(ht.size(), cursor.count());
+                            if (ht.size() == 0) {
+                                dataMap.remove(foundKey);
+                            }
+
+			    /*
+			     * Add back in a duplicate for each one deleted.
+			     */
+			    String newDupData = foundData + "x";
+			    StringDbt newDupDBT =
+				new StringDbt(newDupData);
+			    assertTrue
+				(putAndVerifyCursor
+				 (cursor,
+				  new StringDbt(foundKey),
+				  newDupDBT, true) ==
+				 OperationStatus.SUCCESS);
+			    ht.put(newDupData, newDupData);
+                        }
+                    }
+                };
+            dw.setIgnoreDataMap(true);
+            dw.walkData();
+
+            dw = new DataWalker(dataMap, deletedDataMap) {
+                    void perData(String foundKey, String foundData)
+                        throws DatabaseException {
+
+                        Hashtable delht =
+			    (Hashtable) addedDataMap.get(foundKey);
+                        if (delht != null &&
+                            delht.get(foundData) != null) {
+                            fail("found deleted entry for " +
+                                 foundKey + "/" + foundData);
+                        }
+
+                        Hashtable ht = (Hashtable) dataMap.get(foundKey);
+                        if (ht == null) {
+                            fail("couldn't find hashtable for " + foundKey);
+                        }
+                        if (ht.get(foundData) == null) {
+                            fail("couldn't find entry for " +
+                                 foundKey + "/" + foundData);
+                        }
+                        ht.remove(foundData);
+                        if (ht.size() == 0) {
+                            dataMap.remove(foundKey);
+                        }
+                    }
+                };
+            dw.setIgnoreDataMap(true);
+            dw.walkData();
+            assertTrue(dataMap.size() == 0);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testDuplicateDeleteFirst()
+	throws Throwable {
+
+        try {
+            initEnv(true);
+            Hashtable dataMap = new Hashtable();
+            Hashtable deletedDataMap = new Hashtable();
+            createRandomDuplicateData(-10, 10, dataMap, false, false);
+
+            /* Use the DataWalker.addedData field for a deleted Data Map. */
+            DataWalker dw = new DataWalker(dataMap, deletedDataMap) {
+                    void perData(String foundKey, String foundData)
+                        throws DatabaseException {
+
+                        Hashtable ht = (Hashtable) dataMap.get(foundKey);
+                        if (ht == null) {
+                            fail("didn't find ht " +
+				 foundKey + "/" + foundData);
+                        }
+
+                        /* Make sure keys are ascending/descending. */
+                        assertTrue(foundKey.compareTo(prevKey) >= 0);
+
+                        /*
+			 * Make sure duplicate items within key are asc/desc.
+			 */
+                        if (prevKey.equals(foundKey)) {
+                            if (duplicateComparisonFunction == null) {
+                                assertTrue(foundData.compareTo(prevData) >= 0);
+                            } else {
+                                assertTrue
+                                    (duplicateComparisonFunction.compare
+                                     (foundData.getBytes(),
+                                      prevData.getBytes()) >= 0);
+                            }
+                            prevData = foundData;
+                        } else {
+                            prevData = "";
+			    if (cursor.count() > 1) {
+				Hashtable delht =
+				    (Hashtable) addedDataMap.get(foundKey);
+				if (delht == null) {
+				    delht = new Hashtable();
+				    addedDataMap.put(foundKey, delht);
+				}
+				delht.put(foundData, foundData);
+				assertTrue(cursor.delete() ==
+					   OperationStatus.SUCCESS);
+
+				if (ht.get(foundData) == null) {
+				    fail("didn't find " +
+					 foundKey + "/" + foundData);
+				}
+				ht.remove(foundData);
+				assertEquals(ht.size(), cursor.count());
+				if (ht.size() == 0) {
+				    dataMap.remove(foundKey);
+				}
+			    }
+                        }
+
+                        prevKey = foundKey;
+                    }
+                };
+            dw.setIgnoreDataMap(true);
+            dw.walkData();
+
+            dw = new DataWalker(dataMap, deletedDataMap) {
+                    void perData(String foundKey, String foundData)
+                        throws DatabaseException {
+
+                        Hashtable delht =
+			    (Hashtable) addedDataMap.get(foundKey);
+                        if (delht != null &&
+                            delht.get(foundData) != null) {
+                            fail("found deleted entry for " +
+                                 foundKey + "/" + foundData);
+                        }
+
+                        Hashtable ht = (Hashtable) dataMap.get(foundKey);
+                        if (ht == null) {
+                            fail("couldn't find hashtable for " + foundKey);
+                        }
+                        if (ht.get(foundData) == null) {
+                            fail("couldn't find entry for " +
+                                 foundKey + "/" + foundData);
+                        }
+                        ht.remove(foundData);
+                        if (ht.size() == 0) {
+                            dataMap.remove(foundKey);
+                        }
+                    }
+                };
+            dw.setIgnoreDataMap(true);
+            dw.walkData();
+            assertTrue(dataMap.size() == 0);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Similar to above test, but there was some question about whether
+     * this tests new functionality or not.  Insert k1/d1 and d1/k1.
+     * Iterate through the data and delete k1/d1.  Reinsert k1/d1 and
+     * make sure it inserts ok.
+     */
+    public void testSimpleSingleElementDupTree()
+	throws DatabaseException {
+
+        initEnv(true);
+	StringDbt key = new StringDbt("k1");
+	StringDbt data1 = new StringDbt("d1");
+	StringDbt data2 = new StringDbt("d2");
+
+	assertEquals(OperationStatus.SUCCESS,
+		     putAndVerifyCursor(cursor, key, data1, true));
+	assertEquals(OperationStatus.SUCCESS,
+		     putAndVerifyCursor(cursor, key, data2, true));
+
+	DataWalker dw = new DataWalker(null) {
+		void perData(String foundKey, String foundData)
+		    throws DatabaseException {
+
+		    if (foundKey.equals("k1") && deletedEntries == 0) {
+			if (cursor.delete() == OperationStatus.SUCCESS) {
+			    deletedEntries++;
+			}
+		    }
+		}
+	    };
+	dw.setIgnoreDataMap(true);
+	dw.walkData();
+
+	dw = new DataWalker(null) {
+		void perData(String foundKey, String foundData)
+		    throws DatabaseException {
+
+		    deletedEntries++;
+		}
+	    };
+	dw.setIgnoreDataMap(true);
+	dw.walkData();
+
+	assertEquals(1, dw.deletedEntries);
+    }
+
+    public void testEmptyNodes()
+	throws Throwable {
+
+        initEnv(true);
+	synchronized (DbInternal.envGetEnvironmentImpl(exampleEnv).
+		      getINCompressor()) {
+	    writeEmptyNodeData();
+
+	    BIN bin = null;
+	    try {
+		bin = (BIN) DbInternal.dbGetDatabaseImpl(exampleDb).
+		    getTree().
+		    getFirstNode(CacheMode.DEFAULT);
+		DIN dupRoot = (DIN) bin.fetchTarget(0);
+		bin.releaseLatch();
+		bin = null;
+		dupRoot.latch();
+		bin = (BIN) DbInternal.dbGetDatabaseImpl(exampleDb).
+		    getTree().
+		    getFirstNode(dupRoot, CacheMode.DEFAULT);
+		bin.compress(null, true, null);
+		bin.releaseLatch();
+		bin = null;
+
+		Cursor cursor = exampleDb.openCursor(null, null);
+		DatabaseEntry foundKey = new DatabaseEntry();
+		DatabaseEntry foundData = new DatabaseEntry();
+		OperationStatus status = cursor.getFirst(foundKey, foundData,
+							 LockMode.DEFAULT);
+		cursor.close();
+		assertEquals(OperationStatus.SUCCESS, status);
+	    } finally {
+		if (bin != null) {
+		    bin.releaseLatch();
+		}
+	    }
+	}
+    }
+
+    public void testDeletedReplaySR8984()
+	throws DatabaseException {
+
+	initEnvTransactional(true);
+	Transaction txn = exampleEnv.beginTransaction(null, null);
+	Cursor c = exampleDb.openCursor(txn, null);
+	c.put(simpleKeys[0], simpleData[0]);
+	c.delete();
+	for (int i = 1; i < 3; i++) {
+	    c.put(simpleKeys[0], simpleData[i]);
+	}
+	c.close();
+	txn.abort();
+	txn = exampleEnv.beginTransaction(null, null);
+	c = exampleDb.openCursor(txn, null);
+	assertEquals(OperationStatus.NOTFOUND,
+		     c.getFirst(new DatabaseEntry(),
+				new DatabaseEntry(),
+				LockMode.DEFAULT));
+	c.close();
+	txn.commit();
+    }
+
+    public void testDuplicateDeadlockSR9885()
+	throws DatabaseException {
+
+	initEnvTransactional(true);
+	Transaction txn = exampleEnv.beginTransaction(null, null);
+	Cursor c = exampleDb.openCursor(txn, null);
+	for (int i = 0; i < simpleKeyStrings.length; i++) {
+	    c.put(simpleKeys[0], simpleData[i]);
+	}
+	c.close();
+	txn.commit();
+	sequence = 0;
+
+	JUnitThread tester1 =
+	    new JUnitThread("testDuplicateDeadlock1") {
+		public void testBody()
+		    throws DatabaseException {
+
+		    DatabaseEntry key = new DatabaseEntry();
+		    DatabaseEntry data = new DatabaseEntry();
+		    Transaction txn1 = exampleEnv.beginTransaction(null, null);
+		    Cursor cursor1 = exampleDb.openCursor(txn1, null);
+		    try {
+			cursor1.getFirst(key, data, LockMode.DEFAULT);
+			sequence++;
+			while (sequence < 2) {
+			    Thread.yield();
+			}
+			cursor1.delete();
+			sequence++;
+			while (sequence < 4) {
+			    Thread.yield();
+			}
+
+		    } catch (DeadlockException DBE) {
+		    } finally {
+			cursor1.close();
+			txn1.abort();
+			sequence = 4;
+		    }
+		}
+	    };
+
+	JUnitThread tester2 =
+	    new JUnitThread("testDuplicateDeadlock2") {
+		public void testBody()
+		    throws DatabaseException {
+		
+		    DatabaseEntry key = new DatabaseEntry();
+		    DatabaseEntry data = new DatabaseEntry();
+		    Transaction txn2 = exampleEnv.beginTransaction(null, null);
+		    Cursor cursor2 = exampleDb.openCursor(txn2, null);
+		    try {
+			while (sequence < 1) {
+			    Thread.yield();
+			}
+			cursor2.getLast(key, data, LockMode.DEFAULT);
+			sequence++;
+			//cursor2.put(key,
+			//new DatabaseEntry("d1d1d1".getBytes()));
+			cursor2.delete();
+			sequence++;
+			while (sequence < 4) {
+			    Thread.yield();
+			}
+
+		    } catch (DeadlockException DBE) {
+		    } finally {
+			cursor2.close();
+			txn2.abort();
+			sequence = 4;
+		    }
+		}
+	    };
+
+	try {
+	    tester1.start();
+	    tester2.start();
+	    tester1.finishTest();
+	    tester2.finishTest();
+	    DatabaseImpl dbImpl = DbInternal.dbGetDatabaseImpl(exampleDb);
+	    assertTrue
+		(dbImpl.verify(new VerifyConfig(), dbImpl.getEmptyStats()));
+	} catch (Throwable T) {
+	    fail("testDuplicateDeadlock caught: " + T);
+	}
+    }
+
+    public void testSR9992()
+	throws DatabaseException {
+
+	initEnvTransactional(true);
+	Transaction txn = exampleEnv.beginTransaction(null, null);
+	Cursor c = exampleDb.openCursor(txn, null);
+	for (int i = 1; i < simpleKeys.length; i++) {
+	    c.put(simpleKeys[0], simpleData[i]);
+	}
+	DatabaseEntry key = new DatabaseEntry();
+	DatabaseEntry data = new DatabaseEntry();
+	c.getCurrent(key, data, LockMode.DEFAULT);
+	c.delete();
+	/* Expect "Can't replace a duplicate with different data." */
+	assertEquals(OperationStatus.NOTFOUND,
+		     c.putCurrent(new DatabaseEntry("aaaa".getBytes())));
+	c.close();
+	txn.commit();
+    }
+
+    public void testSR9900()
+	throws DatabaseException {
+
+	initEnvTransactional(true);
+	Transaction txn = exampleEnv.beginTransaction(null, null);
+	Cursor c = exampleDb.openCursor(txn, null);
+	c.put(simpleKeys[0], simpleData[0]);
+	DatabaseEntry key = new DatabaseEntry();
+	DatabaseEntry data = new DatabaseEntry();
+	c.getCurrent(key, data, LockMode.DEFAULT);
+	c.delete();
+	/* Expect "Can't replace a duplicate with different data." */
+	assertEquals(OperationStatus.NOTFOUND,
+		     c.putCurrent(new DatabaseEntry("aaaa".getBytes())));
+	c.close();
+	txn.commit();
+    }
+
+    private void put(int data, int key)
+	throws DatabaseException {
+
+	byte[] keyBytes = new byte[1];
+	keyBytes[0] = (byte) (key & 0xff);
+	DatabaseEntry keyDbt = new DatabaseEntry(keyBytes);
+
+	byte[] dataBytes = new byte[1];
+	if (data == -1) {
+	    dataBytes = new byte[0];
+	} else {
+	    dataBytes[0] = (byte) (data & 0xff);
+	}
+	DatabaseEntry dataDbt = new DatabaseEntry(dataBytes);
+
+	OperationStatus status = exampleDb.put(null, keyDbt, dataDbt);
+	if (status != OperationStatus.SUCCESS) {
+	    System.out.println("db.put returned " + status +
+			       " for key " + key + "/" + data);
+	}
+    }
+
+    private void del(int key)
+	throws DatabaseException {
+
+	byte[] keyBytes = new byte[1];
+	keyBytes[0] = (byte) (key & 0xff);
+	DatabaseEntry keyDbt = new DatabaseEntry(keyBytes);
+
+	OperationStatus status = exampleDb.delete(null, keyDbt);
+	if (status != OperationStatus.SUCCESS) {
+	    System.out.println("db.del returned " + status +
+			       " for key " + key);
+	}
+    }
+
+    private void delBoth(int key, int data)
+	throws DatabaseException {
+
+	byte[] keyBytes = new byte[1];
+	keyBytes[0] = (byte) (key & 0xff);
+	DatabaseEntry keyDbt = new DatabaseEntry(keyBytes);
+
+	byte[] dataBytes = new byte[1];
+	dataBytes[0] = (byte) (data & 0xff);
+	DatabaseEntry dataDbt = new DatabaseEntry(dataBytes);
+
+	Cursor cursor = exampleDb.openCursor(null, null);
+	OperationStatus status =
+	    cursor.getSearchBoth(keyDbt, dataDbt, LockMode.DEFAULT);
+	if (status != OperationStatus.SUCCESS) {
+	    System.out.println("getSearchBoth returned " + status +
+			       " for key " + key + "/" + data);
+	}
+
+	status = cursor.delete();
+	if (status != OperationStatus.SUCCESS) {
+	    System.out.println("Dbc.delete returned " + status +
+			       " for key " + key + "/" + data);
+	}
+	cursor.close();
+    }
+
+    private void writeEmptyNodeData()
+	throws DatabaseException {
+
+	put(101, 1);
+	put(102, 2);
+	put(103, 3);
+	put(104, 4);
+	put(105, 5);
+	put(106, 6);
+	del(1);
+	del(3);
+	del(5);
+	put(101, 1);
+	put(103, 3);
+	put(105, 5);
+	del(1);
+	del(3);
+	del(5);
+	put(101, 1);
+	put(103, 3);
+	put(105, 5);
+	del(1);
+	del(3);
+	del(5);
+	put(101, 1);
+	put(103, 3);
+	put(105, 5);
+	del(1);
+	del(2);
+	del(3);
+	del(4);
+	del(5);
+	del(6);
+	put(102, 2);
+	put(104, 4);
+	put(106, 6);
+	put(101, 1);
+	put(103, 3);
+	put(105, 5);
+	del(1);
+	del(2);
+	del(3);
+	del(4);
+	del(5);
+	del(6);
+	put(102, 2);
+	put(104, 4);
+	put(106, 6);
+	put(101, 1);
+	put(103, 3);
+	put(105, 5);
+	del(1);
+	del(2);
+	del(3);
+	del(4);
+	del(5);
+	del(6);
+	put(102, 2);
+	put(104, 4);
+	put(106, 6);
+	put(101, 1);
+	put(103, 3);
+	put(105, 5);
+	del(1);
+	del(2);
+	del(3);
+	del(4);
+	del(5);
+	del(6);
+	put(102, 2);
+	put(104, 4);
+	put(106, 6);
+	put(101, 1);
+	put(103, 3);
+	put(105, 5);
+	del(1);
+	del(2);
+	del(3);
+	del(4);
+	del(5);
+	del(6);
+	put(102, 2);
+	put(104, 4);
+	put(106, 6);
+	put(101, 1);
+	put(103, 3);
+	put(105, 5);
+	del(1);
+	del(2);
+	del(3);
+	del(4);
+	del(5);
+	del(6);
+	put(-1, 2);
+	put(-1, 4);
+	put(-1, 6);
+	put(-1, 1);
+	put(-1, 3);
+	put(-1, 5);
+	del(1);
+	del(2);
+	del(3);
+	del(4);
+	del(5);
+	del(6);
+	put(102, 2);
+	put(104, 4);
+	put(106, 6);
+	put(101, 1);
+	put(103, 3);
+	put(105, 5);
+	del(1);
+	del(2);
+	del(3);
+	del(4);
+	del(5);
+	del(6);
+	put(102, 2);
+	put(104, 4);
+	put(106, 6);
+	put(101, 1);
+	put(103, 3);
+	put(105, 5);
+	put(102, 1);
+	put(103, 1);
+	put(104, 1);
+	put(105, 1);
+	delBoth(1, 101);
+	delBoth(1, 102);
+	delBoth(1, 103);
+	delBoth(1, 104);
+	delBoth(1, 105);
+	put(101, 1);
+	put(102, 1);
+	put(103, 1);
+	put(104, 1);
+	put(105, 1);
+	delBoth(1, 101);
+	delBoth(1, 102);
+	delBoth(1, 103);
+	delBoth(1, 104);
+	delBoth(1, 105);
+	put(101, 1);
+	put(102, 1);
+	put(103, 1);
+	put(104, 1);
+	put(105, 1);
+	delBoth(1, 101);
+	delBoth(1, 102);
+	delBoth(1, 103);
+	delBoth(1, 104);
+	delBoth(1, 105);
+	put(101, 1);
+	put(102, 1);
+	put(103, 1);
+	put(104, 1);
+	put(105, 1);
+	delBoth(1, 102);
+	delBoth(1, 103);
+	delBoth(1, 104);
+	delBoth(1, 105);
+	put(103, 2);
+	put(104, 2);
+	put(105, 2);
+	put(106, 2);
+	delBoth(2, 102);
+	delBoth(2, 103);
+	delBoth(2, 104);
+	delBoth(2, 105);
+	delBoth(2, 106);
+	put(102, 2);
+	put(103, 2);
+	put(104, 2);
+	put(105, 2);
+	put(106, 2);
+	delBoth(2, 102);
+	delBoth(2, 103);
+	delBoth(2, 104);
+	delBoth(2, 105);
+	delBoth(2, 106);
+	put(102, 2);
+	put(103, 2);
+	put(104, 2);
+	put(105, 2);
+	put(106, 2);
+	delBoth(2, 102);
+	delBoth(2, 103);
+	delBoth(2, 104);
+	delBoth(2, 105);
+	delBoth(2, 106);
+	put(102, 2);
+	put(103, 2);
+	put(104, 2);
+	put(105, 2);
+	put(106, 2);
+	delBoth(2, 102);
+	delBoth(2, 103);
+	delBoth(2, 104);
+	delBoth(2, 105);
+	delBoth(2, 106);
+	put(107, 6);
+	put(108, 6);
+	put(109, 6);
+	put(110, 6);
+	delBoth(6, 106);
+	delBoth(6, 107);
+	delBoth(6, 108);
+	delBoth(6, 109);
+	delBoth(6, 110);
+	put(106, 6);
+	put(107, 6);
+	put(108, 6);
+	put(109, 6);
+	put(110, 6);
+	delBoth(6, 106);
+	delBoth(6, 107);
+	delBoth(6, 108);
+	delBoth(6, 109);
+	delBoth(6, 110);
+	put(106, 6);
+	put(107, 6);
+	put(108, 6);
+	put(109, 6);
+	put(110, 6);
+	delBoth(6, 106);
+	delBoth(6, 107);
+	delBoth(6, 108);
+	delBoth(6, 109);
+	delBoth(6, 110);
+	put(106, 6);
+	put(107, 6);
+	put(108, 6);
+	put(109, 6);
+	put(110, 6);
+	delBoth(6, 107);
+	delBoth(6, 108);
+	delBoth(6, 109);
+	delBoth(6, 110);
+	put(106, 5);
+	put(107, 5);
+	put(108, 5);
+	put(109, 5);
+	delBoth(5, 105);
+	delBoth(5, 106);
+	delBoth(5, 107);
+	delBoth(5, 108);
+	delBoth(5, 109);
+	put(105, 5);
+	put(106, 5);
+	put(107, 5);
+	put(108, 5);
+	put(109, 5);
+	delBoth(5, 105);
+	delBoth(5, 106);
+	delBoth(5, 107);
+	delBoth(5, 108);
+	delBoth(5, 109);
+	put(105, 5);
+	put(106, 5);
+	put(107, 5);
+	put(108, 5);
+	put(109, 5);
+	delBoth(5, 105);
+	delBoth(5, 106);
+	delBoth(5, 107);
+	delBoth(5, 108);
+	delBoth(5, 109);
+	put(105, 5);
+	put(106, 5);
+	put(107, 5);
+	put(108, 5);
+	put(109, 5);
+	delBoth(5, 106);
+	delBoth(5, 107);
+	delBoth(5, 108);
+	delBoth(5, 109);
+	delBoth(1, 101);
+    }
+}
diff --git a/test/com/sleepycat/je/dbi/DbCursorDuplicateTest.java b/test/com/sleepycat/je/dbi/DbCursorDuplicateTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..e092b492098c240b88cd5167d5938ecab746e5ab
--- /dev/null
+++ b/test/com/sleepycat/je/dbi/DbCursorDuplicateTest.java
@@ -0,0 +1,999 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbCursorDuplicateTest.java,v 1.61.2.2 2010/01/04 15:30:43 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import java.io.Serializable;
+import java.util.Comparator;
+import java.util.Hashtable;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.tree.DuplicateEntryException;
+import com.sleepycat.je.util.StringDbt;
+
+/**
+ * Various unit tests for CursorImpl using duplicates.
+ */
+public class DbCursorDuplicateTest extends DbCursorTestBase {
+
+    public DbCursorDuplicateTest()
+        throws DatabaseException {
+
+        super();
+    }
+
+    /**
+     * Rudimentary insert/retrieve test.  Walk over the results forwards.
+     */
+    public void testDuplicateCreationForward()
+	throws Throwable {
+
+        initEnv(true);
+        try {
+            doDuplicateTest(true, false);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Same as testDuplicateCreationForward except uses keylast.
+     */
+    public void testDuplicateCreationForwardKeyLast()
+	throws Throwable {
+
+        initEnv(true);
+        try {
+            doDuplicateTest(true, true);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Rudimentary insert/retrieve test.  Walk over the results backwards.
+     */
+    public void testDuplicateCreationBackwards()
+	throws Throwable {
+
+        initEnv(true);
+        try {
+            doDuplicateTest(false, false);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Insert N_KEYS data items into a tree.  Set a btreeComparison function.
+     * Iterate through the tree in ascending order.  Ensure that the elements
+     * are returned in ascending order.
+     */
+    public void testLargeGetForwardTraverseWithNormalComparisonFunction()
+        throws Throwable {
+
+        try {
+            tearDown();
+            duplicateComparisonFunction = duplicateComparator;
+            setUp();
+            initEnv(true);
+            doDuplicateTest(true, false);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Insert N_KEYS data items into a tree.  Set a reverse order
+     * btreeComparison function. Iterate through the tree in ascending order.
+     * Ensure that the elements are returned in ascending order.
+     */
+    public void testLargeGetForwardTraverseWithReverseComparisonFunction()
+        throws Throwable {
+
+        try {
+            tearDown();
+            duplicateComparisonFunction = reverseDuplicateComparator;
+            setUp();
+            initEnv(true);
+            doDuplicateTest(false, false);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Put a bunch of data items into the database in a specific order and
+     * ensure that when read back that we can't putNoDupData without receiving
+     * an error return code.
+     */
+    public void testPutNoDupData()
+	throws Throwable {
+
+        try {
+            initEnv(true);
+            createRandomDuplicateData(null, false);
+
+            DataWalker dw = new DataWalker(simpleDataMap) {
+                    void perData(String foundKey, String foundData)
+                        throws DatabaseException {
+
+                        assertEquals
+                            (OperationStatus.KEYEXIST,
+                             cursor.putNoDupData(new StringDbt(foundKey),
+                                                 new StringDbt(foundData)));
+                    }
+                };
+            dw.setIgnoreDataMap(true);
+            dw.walkData();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testPutNoDupData2()
+	throws Throwable {
+
+        try {
+            initEnv(true);
+	    for (int i = 0; i < simpleKeyStrings.length; i++) {
+		OperationStatus status =
+		    cursor.putNoDupData(new StringDbt("oneKey"),
+					new StringDbt(simpleDataStrings[i]));
+		assertEquals(OperationStatus.SUCCESS, status);
+	    }
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testAbortDuplicateTreeCreation()
+	throws Throwable {
+
+        try {
+            initEnvTransactional(true);
+	    Transaction txn = exampleEnv.beginTransaction(null, null);
+	    Cursor c = exampleDb.openCursor(txn, null);
+	    OperationStatus status =
+		c.put(new StringDbt("oneKey"),
+		      new StringDbt("firstData"));
+	    assertEquals(OperationStatus.SUCCESS, status);
+	    c.close();
+	    txn.commit();
+	    txn = exampleEnv.beginTransaction(null, null);
+	    c = exampleDb.openCursor(txn, null);
+	    status =
+		c.put(new StringDbt("oneKey"),
+		      new StringDbt("secondData"));
+	    assertEquals(OperationStatus.SUCCESS, status);
+	    c.close();
+	    txn.abort();
+	    txn = exampleEnv.beginTransaction(null, null);
+	    c = exampleDb.openCursor(txn, null);
+	    DatabaseEntry keyRet = new DatabaseEntry();
+	    DatabaseEntry dataRet = new DatabaseEntry();
+	    assertEquals(OperationStatus.SUCCESS,
+			 c.getFirst(keyRet, dataRet, LockMode.DEFAULT));
+	    assertEquals(1, c.count());
+	    assertEquals(OperationStatus.NOTFOUND,
+			 c.getNext(keyRet, dataRet, LockMode.DEFAULT));
+	    c.close();
+	    txn.commit();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Create the usual random duplicate data.  Iterate back over it calling
+     * count at each element.  Make sure the number of duplicates returned for
+     * a particular key is N_DUPLICATE_PER_KEY.  Note that this is somewhat
+     * inefficient, but cautious, in that it calls count for every duplicate
+     * returned, rather than just once for each unique key returned.
+     */
+    public void testDuplicateCount()
+        throws Throwable {
+
+        try {
+            initEnv(true);
+            Hashtable dataMap = new Hashtable();
+
+            createRandomDuplicateData(N_COUNT_TOP_KEYS,
+                                      N_COUNT_DUPLICATES_PER_KEY,
+                                      dataMap, false, true);
+
+            DataWalker dw = new DataWalker(dataMap) {
+                    void perData(String foundKey, String foundData)
+                        throws DatabaseException {
+
+                        assertEquals(N_COUNT_DUPLICATES_PER_KEY,
+				     cursor.count());
+                    }
+                };
+            dw.setIgnoreDataMap(true);
+            dw.walkData();
+            assertEquals(N_COUNT_DUPLICATES_PER_KEY, dw.nEntries);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testDuplicateDuplicates()
+	throws Throwable {
+
+        try {
+            initEnv(true);
+            Hashtable dataMap = new Hashtable();
+
+            String keyString = "aaaa";
+            String dataString = "d1d1";
+            DatabaseEntry keyDbt = new DatabaseEntry();
+            DatabaseEntry dataDbt = new DatabaseEntry();
+            keyDbt.setData(keyString.getBytes());
+            dataDbt.setData(dataString.getBytes());
+            assertTrue(cursor.putNoDupData(keyDbt, dataDbt) ==
+                       OperationStatus.SUCCESS);
+            assertTrue(cursor.putNoDupData(keyDbt, dataDbt) !=
+                       OperationStatus.SUCCESS);
+            assertTrue(cursor.put(keyDbt, dataDbt) ==
+                       OperationStatus.SUCCESS);
+            dataString = "d2d2";
+            dataDbt.setData(dataString.getBytes());
+            assertTrue(cursor.putNoDupData(keyDbt, dataDbt) ==
+                       OperationStatus.SUCCESS);
+            assertTrue(cursor.putNoDupData(keyDbt, dataDbt) !=
+                       OperationStatus.SUCCESS);
+            assertTrue(cursor.put(keyDbt, dataDbt) ==
+                       OperationStatus.SUCCESS);
+            DataWalker dw = new DataWalker(dataMap) {
+                    void perData(String foundKey, String foundData) {
+                    }
+                };
+            dw.setIgnoreDataMap(true);
+            dw.walkData();
+            assertTrue(dw.nEntries == 2);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testDuplicateDuplicatesWithComparators() //cwl
+	throws Throwable {
+
+        try {
+            tearDown();
+            duplicateComparisonFunction = invocationCountingComparator;
+	    btreeComparisonFunction = invocationCountingComparator;
+	    invocationCountingComparator.setInvocationCount(0);
+            setUp();
+            initEnv(true);
+
+            String keyString = "aaaa";
+            String dataString = "d1d1";
+            DatabaseEntry keyDbt = new DatabaseEntry();
+            DatabaseEntry dataDbt = new DatabaseEntry();
+            keyDbt.setData(keyString.getBytes());
+            dataDbt.setData(dataString.getBytes());
+            assertTrue(cursor.put(keyDbt, dataDbt) ==
+                       OperationStatus.SUCCESS);
+            assertTrue(cursor.put(keyDbt, dataDbt) ==
+                       OperationStatus.SUCCESS);
+
+	    InvocationCountingBtreeComparator bTreeICC =
+		(InvocationCountingBtreeComparator)
+		(exampleDb.getConfig().getBtreeComparator());
+
+	    InvocationCountingBtreeComparator dupICC =
+		(InvocationCountingBtreeComparator)
+		(exampleDb.getConfig().getDuplicateComparator());
+
+            assertEquals(1, bTreeICC.getInvocationCount());
+            assertEquals(2, dupICC.getInvocationCount());
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testDuplicateReplacement()
+	throws Throwable {
+
+        try {
+            initEnv(true);
+            String keyString = "aaaa";
+            String dataString = "d1d1";
+            DatabaseEntry keyDbt = new DatabaseEntry();
+            DatabaseEntry dataDbt = new DatabaseEntry();
+            keyDbt.setData(keyString.getBytes());
+            dataDbt.setData(dataString.getBytes());
+            assertTrue(cursor.putNoDupData(keyDbt, dataDbt) ==
+		       OperationStatus.SUCCESS);
+            assertTrue(cursor.putNoDupData(keyDbt, dataDbt) !=
+		       OperationStatus.SUCCESS);
+            dataString = "d2d2";
+            dataDbt.setData(dataString.getBytes());
+            assertTrue(cursor.putNoDupData(keyDbt, dataDbt) ==
+		       OperationStatus.SUCCESS);
+            assertTrue(cursor.putNoDupData(keyDbt, dataDbt) !=
+		       OperationStatus.SUCCESS);
+            DataWalker dw = new DataWalker(null) {
+                    void perData(String foundKey, String foundData)
+                        throws DatabaseException {
+
+                        StringDbt dataDbt = new StringDbt();
+                        dataDbt.setString(foundData);
+                        assertEquals(OperationStatus.SUCCESS,
+				     cursor.putCurrent(dataDbt));
+                    }
+                };
+            dw.setIgnoreDataMap(true);
+            dw.walkData();
+            assertTrue(dw.nEntries == 2);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testDuplicateReplacementFailure()
+	throws Throwable {
+
+        try {
+            initEnv(true);
+            String keyString = "aaaa";
+            String dataString = "d1d1";
+            DatabaseEntry keyDbt = new DatabaseEntry();
+            DatabaseEntry dataDbt = new DatabaseEntry();
+            keyDbt.setData(keyString.getBytes());
+            dataDbt.setData(dataString.getBytes());
+            assertTrue(cursor.putNoDupData(keyDbt, dataDbt) ==
+                       OperationStatus.SUCCESS);
+            assertTrue(cursor.putNoDupData(keyDbt, dataDbt) !=
+                       OperationStatus.SUCCESS);
+            dataString = "d2d2";
+            dataDbt.setData(dataString.getBytes());
+            assertTrue(cursor.putNoDupData(keyDbt, dataDbt) ==
+                       OperationStatus.SUCCESS);
+            assertTrue(cursor.putNoDupData(keyDbt, dataDbt) !=
+                       OperationStatus.SUCCESS);
+            DataWalker dw = new DataWalker(null) {
+                    void perData(String foundKey, String foundData)
+                        throws DatabaseException {
+
+                        StringDbt dataDbt = new StringDbt();
+                        dataDbt.setString("blort");
+                        try {
+                            cursor.putCurrent(dataDbt);
+                            fail("didn't catch DatabaseException");
+                        } catch (DatabaseException DBE) {
+                        }
+                    }
+                };
+            dw.setIgnoreDataMap(true);
+            dw.walkData();
+            assertTrue(dw.nEntries == 2);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testDuplicateReplacementFailure1Dup()
+	throws Throwable {
+
+        try {
+            initEnv(true);
+            String keyString = "aaaa";
+            String dataString = "d1d1";
+            DatabaseEntry keyDbt = new DatabaseEntry();
+            DatabaseEntry dataDbt = new DatabaseEntry();
+            keyDbt.setData(keyString.getBytes());
+            dataDbt.setData(dataString.getBytes());
+            assertTrue(cursor.putNoDupData(keyDbt, dataDbt) ==
+                       OperationStatus.SUCCESS);
+            assertTrue(cursor.putNoDupData(keyDbt, dataDbt) !=
+                       OperationStatus.SUCCESS);
+            DataWalker dw = new DataWalker(null) {
+                    void perData(String foundKey, String foundData)
+                        throws DatabaseException {
+
+                        StringDbt dataDbt = new StringDbt();
+                        dataDbt.setString("blort");
+                        try {
+                            cursor.putCurrent(dataDbt);
+                            fail("didn't catch DatabaseException");
+                        } catch (DatabaseException DBE) {
+                        }
+                    }
+                };
+            dw.setIgnoreDataMap(true);
+            dw.walkData();
+            assertTrue(dw.nEntries == 1);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * When using a duplicate comparator that does not compare all bytes,
+     * attempting to change the data for a duplicate data item should work when
+     * a byte not compared is changed. [#15527] [#15704]
+     */
+    public void testDuplicateReplacementFailureWithComparisonFunction1()
+	throws Throwable {
+
+        try {
+            tearDown();
+            duplicateComparisonFunction = truncatedComparator;
+            setUp();
+            initEnv(true);
+            String keyString = "aaaa";
+            String dataString = "d1d1";
+            DatabaseEntry keyDbt = new DatabaseEntry();
+            DatabaseEntry dataDbt = new DatabaseEntry();
+            keyDbt.setData(keyString.getBytes());
+            dataDbt.setData(dataString.getBytes());
+            assertTrue(cursor.putNoDupData(keyDbt, dataDbt) ==
+                       OperationStatus.SUCCESS);
+            assertTrue(cursor.putNoDupData(keyDbt, dataDbt) !=
+                       OperationStatus.SUCCESS);
+            dataString = "d2d2";
+            dataDbt.setData(dataString.getBytes());
+            assertTrue(cursor.putNoDupData(keyDbt, dataDbt) ==
+                       OperationStatus.SUCCESS);
+            assertTrue(cursor.putNoDupData(keyDbt, dataDbt) !=
+                       OperationStatus.SUCCESS);
+            DataWalker dw = new DataWalker(null) {
+                    void perData(String foundKey, String foundData)
+                        throws DatabaseException {
+
+                        StringDbt dataDbt = new StringDbt();
+                        StringBuffer sb = new StringBuffer(foundData);
+                        sb.replace(3, 4, "3");
+                        dataDbt.setString(sb.toString());
+                        try {
+                            cursor.putCurrent(dataDbt);
+                        } catch (DatabaseException e) {
+                            fail(e.toString());
+                        }
+                        StringDbt keyDbt = new StringDbt();
+                        assertSame(OperationStatus.SUCCESS,
+                                   cursor.getCurrent(keyDbt, dataDbt, null));
+                        assertEquals(foundKey, keyDbt.getString());
+                        assertEquals(sb.toString(), dataDbt.getString());
+                    }
+                };
+            dw.setIgnoreDataMap(true);
+            dw.walkData();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * When using a duplicate comparator that compares all bytes, attempting to
+     * change the data for a duplicate data item should cause an error.
+     * [#15527]
+     */
+    public void testDuplicateReplacementFailureWithComparisonFunction2()
+	throws Throwable {
+
+        try {
+            tearDown();
+            duplicateComparisonFunction = truncatedComparator;
+            setUp();
+            initEnv(true);
+
+            String keyString = "aaaa";
+            String dataString = "d1d1";
+            DatabaseEntry keyDbt = new DatabaseEntry();
+            DatabaseEntry dataDbt = new DatabaseEntry();
+            keyDbt.setData(keyString.getBytes());
+            dataDbt.setData(dataString.getBytes());
+            assertTrue(cursor.putNoDupData(keyDbt, dataDbt) ==
+                       OperationStatus.SUCCESS);
+            assertTrue(cursor.putNoDupData(keyDbt, dataDbt) !=
+                       OperationStatus.SUCCESS);
+            dataString = "d2d2";
+            dataDbt.setData(dataString.getBytes());
+            assertTrue(cursor.putNoDupData(keyDbt, dataDbt) ==
+                       OperationStatus.SUCCESS);
+            assertTrue(cursor.putNoDupData(keyDbt, dataDbt) !=
+                       OperationStatus.SUCCESS);
+            DataWalker dw = new DataWalker(null) {
+                    void perData(String foundKey, String foundData)
+                        throws DatabaseException {
+
+                        StringDbt dataDbt = new StringDbt();
+                        StringBuffer sb = new StringBuffer(foundData);
+                        sb.replace(2, 2, "3");
+                        sb.setLength(4);
+                        dataDbt.setString(sb.toString());
+                        try {
+                            cursor.putCurrent(dataDbt);
+                            fail("didn't catch DatabaseException");
+                        } catch (DatabaseException DBE) {
+                        }
+                    }
+                };
+            dw.setIgnoreDataMap(true);
+            dw.walkData();
+            assertTrue(dw.nEntries == 2);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    private void doDuplicateTest(boolean forward, boolean useKeyLast)
+	throws Throwable {
+
+	Hashtable dataMap = new Hashtable();
+	createRandomDuplicateData(dataMap, useKeyLast);
+
+	DataWalker dw;
+	if (forward) {
+	    dw = new DataWalker(dataMap) {
+		    void perData(String foundKey, String foundData) {
+			Hashtable ht = (Hashtable) dataMap.get(foundKey);
+			if (ht == null) {
+			    fail("didn't find ht " + foundKey + "/" +
+				 foundData);
+			}
+
+			if (ht.get(foundData) != null) {
+			    ht.remove(foundData);
+			    if (ht.size() == 0) {
+				dataMap.remove(foundKey);
+			    }
+			} else {
+			    fail("didn't find " + foundKey + "/" + foundData);
+			}
+
+			assertTrue(foundKey.compareTo(prevKey) >= 0);
+
+			if (prevKey.equals(foundKey)) {
+			    if (duplicateComparisonFunction == null) {
+				assertTrue(foundData.compareTo(prevData) >= 0);
+			    } else {
+				assertTrue
+				    (duplicateComparisonFunction.compare
+				     (foundData.getBytes(),
+				      prevData.getBytes()) >= 0);
+			    }
+			    prevData = foundData;
+			} else {
+			    prevData = "";
+			}
+
+			prevKey = foundKey;
+		    }
+		};
+	} else {
+	    dw = new BackwardsDataWalker(dataMap) {
+		    void perData(String foundKey, String foundData) {
+			Hashtable ht = (Hashtable) dataMap.get(foundKey);
+			if (ht == null) {
+			    fail("didn't find ht " + foundKey + "/" +
+				 foundData);
+			}
+
+			if (ht.get(foundData) != null) {
+			    ht.remove(foundData);
+			    if (ht.size() == 0) {
+				dataMap.remove(foundKey);
+			    }
+			} else {
+			    fail("didn't find " + foundKey + "/" + foundData);
+			}
+
+			if (!prevKey.equals("")) {
+			    assertTrue(foundKey.compareTo(prevKey) <= 0);
+			}
+
+			if (prevKey.equals(foundKey)) {
+			    if (!prevData.equals("")) {
+				if (duplicateComparisonFunction == null) {
+				    assertTrue
+					(foundData.compareTo(prevData) <= 0);
+				} else {
+				    assertTrue
+					(duplicateComparisonFunction.compare
+					 (foundData.getBytes(),
+					  prevData.getBytes()) <= 0);
+				}
+			    }
+			    prevData = foundData;
+			} else {
+			    prevData = "";
+			}
+
+			prevKey = foundKey;
+		    }
+		};
+	}
+	dw.setIgnoreDataMap(true);
+	dw.walkData();
+	assertTrue(dataMap.size() == 0);
+    }
+
+    /**
+     * Create a bunch of random duplicate data.  Iterate over it using
+     * getNextDup until the end of the dup set.  At end of set, handleEndOfSet
+     * is called to do a getNext onto the next dup set.  Verify that ascending
+     * order is maintained and that we reach end of set the proper number of
+     * times.
+     */
+    public void testGetNextDup()
+	throws Throwable {
+
+        try {
+            initEnv(true);
+            Hashtable dataMap = new Hashtable();
+
+            createRandomDuplicateData(dataMap, false);
+
+            DataWalker dw = new DupDataWalker(dataMap) {
+                    void perData(String foundKey, String foundData) {
+                        Hashtable ht = (Hashtable) dataMap.get(foundKey);
+                        if (ht == null) {
+                            fail("didn't find ht " +
+				 foundKey + "/" + foundData);
+                        }
+
+                        if (ht.get(foundData) != null) {
+                            ht.remove(foundData);
+                            if (ht.size() == 0) {
+                                dataMap.remove(foundKey);
+                            }
+                        } else {
+                            fail("didn't find " + foundKey + "/" + foundData);
+                        }
+
+                        assertTrue(foundKey.compareTo(prevKey) >= 0);
+
+                        if (prevKey.equals(foundKey)) {
+                            if (duplicateComparisonFunction == null) {
+                                assertTrue(foundData.compareTo(prevData) >= 0);
+                            } else {
+                                assertTrue
+                                    (duplicateComparisonFunction.compare
+                                     (foundData.getBytes(),
+                                      prevData.getBytes()) >= 0);
+                            }
+                            prevData = foundData;
+                        } else {
+                            prevData = "";
+                        }
+
+                        prevKey = foundKey;
+                    }
+
+                    OperationStatus handleEndOfSet(OperationStatus status)
+                        throws DatabaseException {
+
+                        String foundKeyString = foundKey.getString();
+                        Hashtable ht = (Hashtable) dataMap.get(foundKeyString);
+                        assertNull(ht);
+                        return cursor.getNext(foundKey, foundData,
+                                              LockMode.DEFAULT);
+                    }
+                };
+            dw.setIgnoreDataMap(true);
+            dw.walkData();
+            assertEquals(N_TOP_LEVEL_KEYS, dw.nHandleEndOfSet);
+            assertTrue(dataMap.size() == 0);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Create a bunch of random duplicate data.  Iterate over it using
+     * getNextDup until the end of the dup set.  At end of set, handleEndOfSet
+     * is called to do a getNext onto the next dup set.  Verify that descending
+     * order is maintained and that we reach end of set the proper number of
+     * times.
+     */
+    public void testGetPrevDup()
+	throws Throwable {
+
+        try {
+            initEnv(true);
+            Hashtable dataMap = new Hashtable();
+
+            createRandomDuplicateData(dataMap, false);
+
+            DataWalker dw = new BackwardsDupDataWalker(dataMap) {
+                    void perData(String foundKey, String foundData) {
+                        Hashtable ht = (Hashtable) dataMap.get(foundKey);
+                        if (ht == null) {
+                            fail("didn't find ht " +
+				 foundKey + "/" + foundData);
+                        }
+
+                        if (ht.get(foundData) != null) {
+                            ht.remove(foundData);
+                            if (ht.size() == 0) {
+                                dataMap.remove(foundKey);
+                            }
+                        } else {
+                            fail("didn't find " + foundKey + "/" + foundData);
+                        }
+
+                        if (!prevKey.equals("")) {
+                            assertTrue(foundKey.compareTo(prevKey) <= 0);
+                        }
+
+                        if (prevKey.equals(foundKey)) {
+                            if (!prevData.equals("")) {
+                                if (duplicateComparisonFunction == null) {
+                                    assertTrue(foundData.compareTo
+					       (prevData) <= 0);
+                                } else {
+                                    assertTrue
+                                        (duplicateComparisonFunction.compare
+                                         (foundData.getBytes(),
+                                          prevData.getBytes()) <= 0);
+                                }
+                            }
+                            prevData = foundData;
+                        } else {
+                            prevData = "";
+                        }
+
+                        prevKey = foundKey;
+                    }
+
+                    OperationStatus handleEndOfSet(OperationStatus status)
+                        throws DatabaseException {
+
+                        String foundKeyString = foundKey.getString();
+                        Hashtable ht = (Hashtable) dataMap.get(foundKeyString);
+                        assertNull(ht);
+                        return cursor.getPrev(foundKey, foundData,
+                                              LockMode.DEFAULT);
+                    }
+                };
+            dw.setIgnoreDataMap(true);
+            dw.walkData();
+            assertEquals(N_TOP_LEVEL_KEYS, dw.nHandleEndOfSet);
+            assertTrue(dataMap.size() == 0);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Create a bunch of random duplicate data.  Iterate over it using
+     * getNextNoDup until the end of the top level set.  Verify that
+     * ascending order is maintained and that we reach see the proper
+     * number of top-level keys.
+     */
+    public void testGetNextNoDup()
+	throws Throwable {
+
+        try {
+            initEnv(true);
+            Hashtable dataMap = new Hashtable();
+
+            createRandomDuplicateData(dataMap, false);
+
+            DataWalker dw = new NoDupDataWalker(dataMap) {
+                    void perData(String foundKey, String foundData) {
+                        Hashtable ht = (Hashtable) dataMap.get(foundKey);
+                        if (ht == null) {
+                            fail("didn't find ht " +
+				 foundKey + "/" + foundData);
+                        }
+
+                        if (ht.get(foundData) != null) {
+                            dataMap.remove(foundKey);
+                        } else {
+                            fail("saw " +
+				 foundKey + "/" + foundData + " twice.");
+                        }
+
+                        assertTrue(foundKey.compareTo(prevKey) > 0);
+                        prevKey = foundKey;
+                    }
+                };
+            dw.setIgnoreDataMap(true);
+            dw.walkData();
+            assertEquals(N_TOP_LEVEL_KEYS, dw.nEntries);
+            assertTrue(dataMap.size() == 0);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Create a bunch of random duplicate data.  Iterate over it using
+     * getNextNoDup until the end of the top level set.  Verify that descending
+     * order is maintained and that we reach see the proper number of top-level
+     * keys.
+     */
+    public void testGetPrevNoDup()
+	throws Throwable {
+
+        try {
+            initEnv(true);
+            Hashtable dataMap = new Hashtable();
+
+            createRandomDuplicateData(dataMap, false);
+
+            DataWalker dw = new NoDupBackwardsDataWalker(dataMap) {
+                    void perData(String foundKey, String foundData) {
+                        Hashtable ht = (Hashtable) dataMap.get(foundKey);
+                        if (ht == null) {
+                            fail("didn't find ht " +
+				 foundKey + "/" + foundData);
+                        }
+
+                        if (ht.get(foundData) != null) {
+                            dataMap.remove(foundKey);
+                        } else {
+                            fail("saw " +
+				 foundKey + "/" + foundData + " twice.");
+                        }
+
+                        if (!prevKey.equals("")) {
+                            assertTrue(foundKey.compareTo(prevKey) < 0);
+                        }
+                        prevKey = foundKey;
+                    }
+                };
+            dw.setIgnoreDataMap(true);
+            dw.walkData();
+            assertEquals(N_TOP_LEVEL_KEYS, dw.nEntries);
+            assertTrue(dataMap.size() == 0);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testIllegalDuplicateCreation()
+        throws Throwable {
+
+        try {
+            initEnv(false);
+            Hashtable dataMap = new Hashtable();
+
+            try {
+                createRandomDuplicateData(dataMap, false);
+                fail("didn't throw DuplicateEntryException");
+            } catch (DuplicateEntryException DEE) {
+            }
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Just use the BtreeComparator that's already available.
+     */
+    private static Comparator duplicateComparator =
+	new DuplicateAscendingComparator();
+
+    private static Comparator reverseDuplicateComparator =
+	new DuplicateReverseComparator();
+
+    private static InvocationCountingBtreeComparator
+	invocationCountingComparator =
+	new InvocationCountingBtreeComparator();
+
+    @SuppressWarnings("serial")
+    public static class DuplicateAscendingComparator
+        extends BtreeComparator {
+
+	public DuplicateAscendingComparator() {
+	    super();
+	}
+    }
+
+    @SuppressWarnings("serial")
+    public static class DuplicateReverseComparator
+        extends ReverseBtreeComparator {
+
+	public DuplicateReverseComparator() {
+	    super();
+	}
+    }
+
+    @SuppressWarnings("serial")
+    public static class InvocationCountingBtreeComparator
+	extends BtreeComparator {
+
+	private int invocationCount = 0;
+
+	public int compare(Object o1, Object o2) {
+	    invocationCount++;
+	    return super.compare(o1, o2);
+	}
+
+	public int getInvocationCount() {
+	    return invocationCount;
+	}
+
+	public void setInvocationCount(int invocationCount) {
+	    this.invocationCount = invocationCount;
+	}
+    }
+
+    /*
+     * A special comparator that only looks at the first length-1 bytes of data
+     * so that the last byte can be changed without affecting "equality".  Use
+     * this for putCurrent tests of duplicates.
+     */
+    private static Comparator truncatedComparator = new TruncatedComparator();
+
+    @SuppressWarnings("serial")
+    private static class TruncatedComparator implements Comparator, 
+                                                        Serializable {
+	protected TruncatedComparator() {
+	}
+
+	public int compare(Object o1, Object o2) {
+	    byte[] arg1;
+	    byte[] arg2;
+	    arg1 = (byte[]) o1;
+	    arg2 = (byte[]) o2;
+	    int a1Len = arg1.length - 1;
+	    int a2Len = arg2.length - 1;
+
+	    int limit = Math.min(a1Len, a2Len);
+
+	    for (int i = 0; i < limit; i++) {
+		byte b1 = arg1[i];
+		byte b2 = arg2[i];
+		if (b1 == b2) {
+		    continue;
+		} else {
+		    /*
+		     * Remember, bytes are signed, so convert to
+		     * shorts so that we effectively do an unsigned
+		     * byte comparison.
+		     */
+		    short s1 = (short) (b1 & 0x7F);
+		    short s2 = (short) (b2 & 0x7F);
+		    if (b1 < 0) {
+			s1 |= 0x80;
+		    }
+		    if (b2 < 0) {
+			s2 |= 0x80;
+		    }
+		    return (s1 - s2);
+		}
+	    }
+
+	    return (a1Len - a2Len);
+	}
+    }
+}
diff --git a/test/com/sleepycat/je/dbi/DbCursorDuplicateValidationTest.java b/test/com/sleepycat/je/dbi/DbCursorDuplicateValidationTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..014fe0cb480efc791358abb11d1a1048fdbd2a06
--- /dev/null
+++ b/test/com/sleepycat/je/dbi/DbCursorDuplicateValidationTest.java
@@ -0,0 +1,58 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbCursorDuplicateValidationTest.java,v 1.24.2.2 2010/01/04 15:30:43 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import java.util.Enumeration;
+import java.util.Hashtable;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbTestProxy;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.tree.DBIN;
+
+public class DbCursorDuplicateValidationTest extends DbCursorTestBase {
+
+    public DbCursorDuplicateValidationTest()
+        throws DatabaseException {
+
+        super();
+    }
+
+    public void testValidateCursors()
+	throws Throwable {
+
+        initEnv(true);
+	Hashtable dataMap = new Hashtable();
+	createRandomDuplicateData(10, 1000, dataMap, false, false);
+
+	Hashtable bins = new Hashtable();
+
+	DataWalker dw = new DataWalker(bins) {
+		void perData(String foundKey, String foundData)
+		    throws DatabaseException {
+                    CursorImpl cursorImpl = DbTestProxy.dbcGetCursorImpl(cursor);
+		    BIN lastBin = cursorImpl.getBIN();
+		    DBIN lastDupBin = cursorImpl.getDupBIN();
+		    if (rnd.nextInt(10) < 8) {
+			cursor.delete();
+		    }
+                    dataMap.put(lastBin, lastBin);
+                    dataMap.put(lastDupBin, lastDupBin);
+		}
+	    };
+	dw.setIgnoreDataMap(true);
+	dw.walkData();
+	dw.close();
+	Enumeration e = bins.keys();
+	while (e.hasMoreElements()) {
+	    BIN b = (BIN) e.nextElement();
+	    assertFalse(b.getCursorSet().size() > 0);
+	}
+    }
+}
diff --git a/test/com/sleepycat/je/dbi/DbCursorSearchTest.java b/test/com/sleepycat/je/dbi/DbCursorSearchTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..aa83fa3aa8dcd6751e02bef8aca618e5468cde7a
--- /dev/null
+++ b/test/com/sleepycat/je/dbi/DbCursorSearchTest.java
@@ -0,0 +1,298 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbCursorSearchTest.java,v 1.37.2.2 2010/01/04 15:30:43 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import java.util.Enumeration;
+import java.util.Hashtable;
+import java.util.Iterator;
+import java.util.Map;
+
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.util.StringDbt;
+
+/**
+ * Test cursor getSearch*
+ */
+public class DbCursorSearchTest extends DbCursorTestBase {
+
+    public DbCursorSearchTest()
+        throws DatabaseException {
+
+        super();
+    }
+
+    /**
+     * Put a small number of data items into the database
+     * then make sure we can retrieve them with getSearchKey.
+     */
+    public void testSimpleSearchKey()
+	throws DatabaseException {
+        initEnv(false);
+	doSimpleCursorPuts();
+        verify(simpleDataMap, false);
+    }
+
+    /**
+     * Put a small number of data items into the database
+     * then make sure we can retrieve them with getSearchKey.
+     * Delete them, and make sure we can't search for them anymore.
+     */
+    public void testSimpleDeleteAndSearchKey()
+	throws DatabaseException {
+
+        initEnv(false);
+	doSimpleCursorPuts();
+        verify(simpleDataMap, true);
+    }
+
+    /**
+     * Put a large number of data items into the database,
+     * then make sure we can retrieve them with getSearchKey.
+     */
+    public void testLargeSearchKey()
+	throws DatabaseException {
+
+        initEnv(false);
+        Hashtable expectedData = new Hashtable();
+	doLargePut(expectedData, N_KEYS);
+        verify(expectedData, false);
+    }
+
+    /**
+     * Put a large number of data items into the database,
+     * then make sure we can retrieve them with getSearchKey.
+     */
+    public void testLargeDeleteAndSearchKey()
+	throws DatabaseException {
+
+        initEnv(false);
+        Hashtable expectedData = new Hashtable();
+	doLargePut(expectedData, N_KEYS);
+        verify(expectedData, true);
+    }
+
+    public void testLargeSearchKeyDuplicates()
+	throws DatabaseException {
+
+        initEnv(true);
+        Hashtable expectedData = new Hashtable();
+	createRandomDuplicateData(expectedData, false);
+
+        verifyDuplicates(expectedData);
+    }
+
+    /**
+     * Put a small number of data items into the database
+     * then make sure we can retrieve them with getSearchKey.
+     * See [#9337].
+     */
+    public void testSimpleSearchBothWithPartialDbt()
+	throws DatabaseException {
+
+        initEnv(false);
+	doSimpleCursorPuts();
+	DatabaseEntry key = new DatabaseEntry("bar".getBytes());
+	DatabaseEntry data = new DatabaseEntry(new byte[100]);
+	data.setSize(3);
+	System.arraycopy("two".getBytes(), 0, data.getData(), 0, 3);
+	OperationStatus status =
+	    cursor2.getSearchBoth(key, data, LockMode.DEFAULT);
+	assertEquals(OperationStatus.SUCCESS, status);
+    }
+
+    public void testGetSearchBothNoDuplicatesAllowedSR9522()
+	throws DatabaseException {
+
+        initEnv(false);
+	doSimpleCursorPuts();
+	DatabaseEntry key = new DatabaseEntry("bar".getBytes());
+	DatabaseEntry data = new DatabaseEntry("two".getBytes());
+	OperationStatus status =
+	    cursor2.getSearchBoth(key, data, LockMode.DEFAULT);
+	assertEquals(OperationStatus.SUCCESS, status);
+    }
+
+    /**
+     * Make sure the database contains the set of data we put in.
+     */
+    private void verify(Hashtable expectedData, boolean doDelete)
+	throws DatabaseException {
+
+        Iterator iter = expectedData.entrySet().iterator();
+        StringDbt testKey = new StringDbt();
+        StringDbt testData = new StringDbt();
+
+        // Iterate over the expected values.
+        while (iter.hasNext()) {
+            Map.Entry entry = (Map.Entry) iter.next();
+            testKey.setString((String) entry.getKey());
+
+            // search for the expected values using SET.
+            OperationStatus status = cursor2.getSearchKey(testKey, testData,
+							  LockMode.DEFAULT);
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals((String) entry.getValue(), testData.getString());
+            assertEquals((String) entry.getKey(), testKey.getString());
+
+            // check that getCurrent returns the same thing.
+            status = cursor2.getCurrent(testKey, testData, LockMode.DEFAULT);
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals((String) entry.getValue(), testData.getString());
+            assertEquals((String) entry.getKey(), testKey.getString());
+
+	    if (doDelete) {
+		// Delete the entry and make sure that getSearchKey doesn't
+		// return it again.
+		status = cursor2.delete();
+		assertEquals(OperationStatus.SUCCESS, status);
+
+		// search for the expected values using SET.
+		status =
+		    cursor2.getSearchKey(testKey, testData, LockMode.DEFAULT);
+		assertEquals(OperationStatus.NOTFOUND, status);
+
+		// search for the expected values using SET_BOTH.
+		status =
+		    cursor2.getSearchBoth(testKey, testData, LockMode.DEFAULT);
+		assertEquals(OperationStatus.NOTFOUND, status);
+
+		// search for the expected values using SET_RANGE - should
+		// give 0 except if this is the last key in the tree, in which
+		// case DB_NOTFOUND.  It should never be DB_KEYEMPTY.
+		// It would be nice to be definite about the expected
+		// status, but to do that we have to know whether this is the
+		// highest key in the set, which we don't currently track.
+		status = cursor2.getSearchKeyRange
+		    (testKey, testData, LockMode.DEFAULT);
+		assertTrue(status == OperationStatus.SUCCESS ||
+			   status == OperationStatus.NOTFOUND);
+	    } else {
+		// search for the expected values using SET_BOTH.
+		status =
+		    cursor2.getSearchBoth(testKey, testData, LockMode.DEFAULT);
+		assertEquals(OperationStatus.SUCCESS, status);
+		assertEquals((String) entry.getValue(), testData.getString());
+		assertEquals((String) entry.getKey(), testKey.getString());
+
+		// check that getCurrent returns the same thing.
+		status =
+		    cursor2.getCurrent(testKey, testData, LockMode.DEFAULT);
+		assertEquals(OperationStatus.SUCCESS, status);
+		assertEquals((String) entry.getValue(), testData.getString());
+		assertEquals((String) entry.getKey(), testKey.getString());
+
+		// check that SET_RANGE works as expected for exact keys
+		status = cursor2.getSearchKeyRange
+		    (testKey, testData, LockMode.DEFAULT);
+		assertEquals(OperationStatus.SUCCESS, status);
+		assertEquals((String) entry.getValue(), testData.getString());
+		assertEquals((String) entry.getKey(), testKey.getString());
+
+		// search for the expected values using SET_RANGE.
+		byte[] keyBytes = testKey.getData();
+		keyBytes[keyBytes.length - 1]--;
+		status = cursor2.getSearchKeyRange
+		    (testKey, testData, LockMode.DEFAULT);
+		assertEquals(OperationStatus.SUCCESS, status);
+		assertEquals((String) entry.getValue(), testData.getString());
+		assertEquals((String) entry.getKey(), testKey.getString());
+
+		// check that getCurrent returns the same thing.
+		status =
+		    cursor2.getCurrent(testKey, testData, LockMode.DEFAULT);
+		assertEquals(OperationStatus.SUCCESS, status);
+		assertEquals((String) entry.getValue(), testData.getString());
+		assertEquals((String) entry.getKey(), testKey.getString());
+	    }
+        }
+    }
+
+    private void verifyDuplicates(Hashtable expectedData)
+	throws DatabaseException {
+
+        Enumeration iter = expectedData.keys();
+        StringDbt testKey = new StringDbt();
+        StringDbt testData = new StringDbt();
+
+        // Iterate over the expected values.
+        while (iter.hasMoreElements()) {
+	    String key = (String) iter.nextElement();
+            testKey.setString(key);
+
+            // search for the expected values using SET.
+            OperationStatus status = cursor2.getSearchKey(testKey, testData,
+							  LockMode.DEFAULT);
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals(key, testKey.getString());
+	    String dataString = testData.getString();
+
+            // check that getCurrent returns the same thing.
+            status = cursor2.getCurrent(testKey, testData, LockMode.DEFAULT);
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals(dataString, testData.getString());
+            assertEquals(key, testKey.getString());
+
+            // search for the expected values using SET_RANGE.
+	    byte[] keyBytes = testKey.getData();
+	    keyBytes[keyBytes.length - 1]--;
+            status =
+		cursor2.getSearchKeyRange(testKey, testData, LockMode.DEFAULT);
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals(dataString, testData.getString());
+            assertEquals(key, testKey.getString());
+
+            // check that getCurrent returns the same thing.
+            status = cursor2.getCurrent(testKey, testData, LockMode.DEFAULT);
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals(dataString, testData.getString());
+            assertEquals(key, testKey.getString());
+
+	    Hashtable ht = (Hashtable) expectedData.get(key);
+
+	    Enumeration iter2 = ht.keys();
+	    while (iter2.hasMoreElements()) {
+		String expectedDataString = (String) iter2.nextElement();
+		testData.setString(expectedDataString);
+
+		// search for the expected values using SET_BOTH.
+		status =
+		    cursor2.getSearchBoth(testKey, testData, LockMode.DEFAULT);
+		assertEquals(OperationStatus.SUCCESS, status);
+		assertEquals(expectedDataString, testData.getString());
+		assertEquals(key, testKey.getString());
+
+		// check that getCurrent returns the same thing.
+		status =
+		    cursor2.getCurrent(testKey, testData, LockMode.DEFAULT);
+		assertEquals(OperationStatus.SUCCESS, status);
+		assertEquals(expectedDataString, testData.getString());
+		assertEquals(key, testKey.getString());
+
+		// search for the expected values using SET_RANGE_BOTH.
+		byte[] dataBytes = testData.getData();
+		dataBytes[dataBytes.length - 1]--;
+		status = cursor2.getSearchBothRange(testKey, testData,
+						    LockMode.DEFAULT);
+		assertEquals(OperationStatus.SUCCESS, status);
+		assertEquals(key, testKey.getString());
+		assertEquals(expectedDataString, testData.getString());
+
+		// check that getCurrent returns the same thing.
+		status = cursor2.getCurrent(testKey, testData,
+					    LockMode.DEFAULT);
+		assertEquals(OperationStatus.SUCCESS, status);
+		assertEquals(expectedDataString, testData.getString());
+		assertEquals(key, testKey.getString());
+	    }
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/dbi/DbCursorTest.java b/test/com/sleepycat/je/dbi/DbCursorTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..687066e04e3da837ff53bf19fee36951b4b191ff
--- /dev/null
+++ b/test/com/sleepycat/je/dbi/DbCursorTest.java
@@ -0,0 +1,1471 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbCursorTest.java,v 1.85.2.2 2010/01/04 15:30:43 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import java.io.IOException;
+import java.util.Comparator;
+import java.util.Enumeration;
+import java.util.Hashtable;
+
+import junit.framework.Test;
+import junit.framework.TestSuite;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.util.StringDbt;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Various unit tests for CursorImpl.
+ */
+public class DbCursorTest extends DbCursorTestBase {
+
+    public static Test suite() {
+        TestSuite allTests = new TestSuite();
+        addTests(allTests, false/*keyPrefixing*/);
+        addTests(allTests, true/*keyPrefixing*/);
+        return allTests;
+    }
+
+    private static void addTests(TestSuite allTests,
+                                 boolean keyPrefixing) {
+
+        TestSuite suite = new TestSuite(DbCursorTest.class);
+        Enumeration e = suite.tests();
+        while (e.hasMoreElements()) {
+            DbCursorTest test = (DbCursorTest) e.nextElement();
+            test.keyPrefixing = keyPrefixing;
+            allTests.addTest(test);
+        }
+    }
+
+    public DbCursorTest()
+        throws DatabaseException {
+
+        super();
+    }
+
+    private boolean alreadyTornDown = false;
+    public void tearDown()
+        throws DatabaseException, IOException {
+
+        /*
+         * Don't keep appending ":keyPrefixing" to name for the tests which
+         * invoke setup() and tearDown() on their own.
+         * e.g. testSimpleGetPutNextKeyForwardTraverse().
+         */
+        if (!alreadyTornDown) {
+
+            /*
+             * Set test name for reporting; cannot be done in the ctor or
+             * setUp.
+             */
+            setName(getName() +
+                    (keyPrefixing ? ":keyPrefixing" : ":noKeyPrefixing"));
+            alreadyTornDown = true;
+        }
+        super.tearDown();
+    }
+
+    /**
+     * Put a small number of data items into the database in a specific order
+     * and ensure that they read back in ascending order.
+     */
+    public void testSimpleGetPut()
+        throws Throwable {
+
+        try {
+            initEnv(false);
+            doSimpleCursorPuts();
+
+            DataWalker dw = new DataWalker(simpleDataMap) {
+                    void perData(String foundKey, String foundData) {
+                        assertTrue(foundKey.compareTo(prevKey) >= 0);
+                        prevKey = foundKey;
+                    }
+                };
+            dw.walkData();
+            assertTrue(dw.nEntries == simpleKeyStrings.length);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Test the internal Cursor.advanceCursor() entrypoint.
+     */
+    public void testCursorAdvance()
+        throws Throwable {
+
+        try {
+            initEnv(false);
+            doSimpleCursorPuts();
+
+	    StringDbt foundKey = new StringDbt();
+	    StringDbt foundData = new StringDbt();
+	    String prevKey = "";
+
+	    OperationStatus status = cursor.getFirst(foundKey, foundData,
+						     LockMode.DEFAULT);
+
+	    /*
+	     * Advance forward and then back to the first.  Rest of scan
+	     * should be as normal.
+	     */
+	    DbInternal.advanceCursor(cursor, foundKey, foundData);
+	    DbInternal.retrieveNext
+		(cursor, foundKey, foundData, LockMode.DEFAULT, GetMode.PREV);
+	    int nEntries = 0;
+	    while (status == OperationStatus.SUCCESS) {
+                String foundKeyString = foundKey.getString();
+                String foundDataString = foundData.getString();
+
+		assertTrue(foundKeyString.compareTo(prevKey) >= 0);
+		prevKey = foundKeyString;
+                nEntries++;
+
+		status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT);
+	    }
+
+            assertTrue(nEntries == simpleKeyStrings.length);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Put a small number of data items into the database in a specific order
+     * and ensure that they read back in descending order.
+     */
+    public void testSimpleGetPutBackwards()
+        throws Throwable {
+
+        try {
+            initEnv(false);
+            doSimpleCursorPuts();
+
+            DataWalker dw = new BackwardsDataWalker(simpleDataMap) {
+                    void perData(String foundKey, String foundData) {
+                        if (!prevKey.equals("")) {
+                            assertTrue(foundKey.compareTo(prevKey) <= 0);
+                        }
+                        prevKey = foundKey;
+                    }
+
+                    OperationStatus getFirst(StringDbt foundKey,
+					     StringDbt foundData)
+                        throws DatabaseException {
+
+                        return cursor.getLast(foundKey, foundData,
+                                              LockMode.DEFAULT);
+                    }
+
+                    OperationStatus getData(StringDbt foundKey,
+					    StringDbt foundData)
+                        throws DatabaseException {
+
+                        return cursor.getPrev(foundKey, foundData,
+                                              LockMode.DEFAULT);
+                    }
+                };
+            dw.walkData();
+            assertTrue(dw.nEntries == simpleKeyStrings.length);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Put a small number of data items into the database in a specific order
+     * and ensure that they read back in descending order.  When "quux" is
+     * found, insert "fub" into the database and make sure that it is also read
+     * back in the cursor.
+     */
+    public void testSimpleGetPut2()
+        throws Throwable {
+
+        try {
+            initEnv(false);
+            doSimpleGetPut2("quux", "fub");
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void doSimpleGetPut2(String whenFoundDoInsert,
+                                String newKey)
+        throws DatabaseException {
+
+        doSimpleCursorPuts();
+
+        DataWalker dw =
+            new BackwardsDataWalker(whenFoundDoInsert, newKey, simpleDataMap) {
+                void perData(String foundKey, String foundData)
+                    throws DatabaseException {
+
+                    if (foundKey.equals(whenFoundDoInsert)) {
+                        putAndVerifyCursor(cursor2, new StringDbt(newKey),
+                                           new StringDbt("ten"), true);
+                        simpleDataMap.put(newKey, "ten");
+                    }
+                }
+
+                OperationStatus getFirst(StringDbt foundKey,
+					 StringDbt foundData)
+                    throws DatabaseException {
+
+                    return cursor.getLast(foundKey, foundData,
+                                          LockMode.DEFAULT);
+                }
+
+                OperationStatus getData(StringDbt foundKey,
+					StringDbt foundData)
+                    throws DatabaseException {
+
+                    return cursor.getPrev(foundKey, foundData,
+                                          LockMode.DEFAULT);
+                }
+            };
+        dw.walkData();
+        assertTrue(dw.nEntries == simpleKeyStrings.length + 1);
+    }
+
+    /**
+     * Iterate through each of the keys in the list of "simple keys".  For each
+     * one, create the database afresh, iterate through the entries in
+     * ascending order, and when the key being tested is found, insert the next
+     * highest key into the database.  Make sure that the key just inserted is
+     * retrieved during the cursor walk.  Lather, rinse, repeat.
+     */
+    public void testSimpleGetPutNextKeyForwardTraverse()
+        throws Throwable {
+
+        try {
+            tearDown();
+            for (int i = 0; i < simpleKeyStrings.length; i++) {
+                setUp();
+                initEnv(false);
+                doSimpleGetPut(true,
+                               simpleKeyStrings[i],
+                               nextKey(simpleKeyStrings[i]),
+                               1);
+                tearDown();
+            }
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Iterate through each of the keys in the list of "simple keys".  For each
+     * one, create the database afresh, iterate through the entries in
+     * ascending order, and when the key being tested is found, insert the next
+     * lowest key into the database.  Make sure that the key just inserted is
+     * not retrieved during the cursor walk.  Lather, rinse, repeat.
+     */
+    public void testSimpleGetPutPrevKeyForwardTraverse()
+        throws Throwable {
+
+        try {
+            tearDown();
+            for (int i = 0; i < simpleKeyStrings.length; i++) {
+                setUp();
+                initEnv(false);
+                doSimpleGetPut(true, simpleKeyStrings[i],
+                               prevKey(simpleKeyStrings[i]), 0);
+                tearDown();
+            }
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Iterate through each of the keys in the list of "simple keys".  For each
+     * one, create the database afresh, iterate through the entries in
+     * descending order, and when the key being tested is found, insert the
+     * next lowest key into the database.  Make sure that the key just inserted
+     * is retrieved during the cursor walk.  Lather, rinse, repeat.
+     */
+    public void testSimpleGetPutPrevKeyBackwardsTraverse()
+        throws Throwable {
+
+        try {
+            tearDown();
+            for (int i = 0; i < simpleKeyStrings.length; i++) {
+                setUp();
+                initEnv(false);
+                doSimpleGetPut(false, simpleKeyStrings[i],
+                               prevKey(simpleKeyStrings[i]), 1);
+                tearDown();
+            }
+        } catch (Throwable t) {
+            t.printStackTrace();
+        }
+    }
+
+    /**
+     * Iterate through each of the keys in the list of "simple keys".  For each
+     * one, create the database afresh, iterate through the entries in
+     * descending order, and when the key being tested is found, insert the
+     * next highest key into the database.  Make sure that the key just
+     * inserted is not retrieved during the cursor walk.  Lather, rinse,
+     * repeat.
+     */
+    public void testSimpleGetPutNextKeyBackwardsTraverse()
+        throws Throwable {
+
+        try {
+            tearDown();
+            for (int i = 0; i < simpleKeyStrings.length; i++) {
+                setUp();
+                initEnv(false);
+                doSimpleGetPut(true, simpleKeyStrings[i],
+                               prevKey(simpleKeyStrings[i]), 0);
+                tearDown();
+            }
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Helper routine for the above four tests.
+     */
+    private void doSimpleGetPut(boolean forward,
+                                String whenFoundDoInsert,
+                                String newKey,
+                                int additionalEntries)
+        throws DatabaseException {
+
+        doSimpleCursorPuts();
+
+        DataWalker dw;
+        if (forward) {
+            dw = new DataWalker(whenFoundDoInsert, newKey, simpleDataMap) {
+                    void perData(String foundKey, String foundData)
+                        throws DatabaseException {
+
+                        if (foundKey.equals(whenFoundDoInsert)) {
+                            putAndVerifyCursor(cursor2, new StringDbt(newKey),
+                                               new StringDbt("ten"), true);
+                            simpleDataMap.put(newKey, "ten");
+                        }
+                    }
+                };
+        } else {
+            dw = new BackwardsDataWalker(whenFoundDoInsert,
+					 newKey,
+                                         simpleDataMap) {
+		    void perData(String foundKey, String foundData)
+			throws DatabaseException {
+
+			if (foundKey.equals(whenFoundDoInsert)) {
+			    putAndVerifyCursor(cursor2, new StringDbt(newKey),
+					       new StringDbt("ten"), true);
+			    simpleDataMap.put(newKey, "ten");
+			}
+		    }
+
+		    OperationStatus getFirst(StringDbt foundKey,
+					     StringDbt foundData)
+			throws DatabaseException {
+
+			return cursor.getLast(foundKey, foundData,
+					      LockMode.DEFAULT);
+		    }
+
+		    OperationStatus getData(StringDbt foundKey,
+					    StringDbt foundData)
+			throws DatabaseException {
+
+			return cursor.getPrev(foundKey, foundData,
+					      LockMode.DEFAULT);
+		    }
+                };
+        }
+        dw.walkData();
+        assertEquals(simpleKeyStrings.length + additionalEntries, dw.nEntries);
+    }
+
+    /**
+     * Put a small number of data items into the database in a specific order
+     * and ensure that they read back in descending order.  Replace the data
+     * portion for each one, then read back again and make sure it was replaced
+     * properly.
+     */
+    public void testSimpleReplace()
+        throws Throwable {
+
+        try {
+            initEnv(false);
+            doSimpleReplace();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void doSimpleReplace()
+        throws DatabaseException {
+
+        doSimpleCursorPuts();
+
+        DataWalker dw =
+            new DataWalker(simpleDataMap) {
+                void perData(String foundKey, String foundData)
+                    throws DatabaseException {
+
+                    String newData = foundData + "x";
+                    cursor.putCurrent(new StringDbt(newData));
+                    simpleDataMap.put(foundKey, newData);
+                }
+            };
+        dw.walkData();
+        dw = new DataWalker(simpleDataMap) {
+                void perData(String foundKey, String foundData)
+                    throws DatabaseException {
+
+                    assertTrue(foundData.equals(simpleDataMap.get(foundKey)));
+                }
+            };
+        dw.walkData();
+    }
+
+    /**
+     * Insert N_KEYS data items into a tree.  Iterate through the tree in
+     * ascending order.  After each element is retrieved, insert the next
+     * lowest key into the tree.  Ensure that the element just inserted is not
+     * returned by the cursor.  Ensure that the elements are returned in
+     * ascending order.  Lather, rinse, repeat.
+     */
+    public void testLargeGetPutPrevKeyForwardTraverse()
+        throws Throwable {
+
+        try {
+            initEnv(false);
+            doLargeGetPutPrevKeyForwardTraverse();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Helper routine for above.
+     */
+    private void doLargeGetPutPrevKeyForwardTraverse()
+        throws DatabaseException {
+
+        Hashtable dataMap = new Hashtable();
+        doLargePut(dataMap, N_KEYS);
+
+        DataWalker dw = new DataWalker(dataMap) {
+                void perData(String foundKey, String foundData)
+                    throws DatabaseException {
+
+                    assertTrue(foundKey.compareTo(prevKey) >= 0);
+                    putAndVerifyCursor(cursor2,
+                                       new StringDbt(prevKey(foundKey)),
+                                       new StringDbt
+                                       (Integer.toString(dataMap.size() +
+                                                         nEntries)),
+                                       true);
+                    prevKey = foundKey;
+                    assertTrue(dataMap.get(foundKey) != null);
+                    dataMap.remove(foundKey);
+                }
+            };
+        dw.walkData();
+        if (dataMap.size() > 0) {
+            fail("dataMap still has entries");
+        }
+        assertTrue(dw.nEntries == N_KEYS);
+    }
+
+    /**
+     * Insert N_KEYS data items into a tree.  Iterate through the tree
+     * in ascending order.  Ensure that count() always returns 1 for each
+     * data item returned.
+     */
+    public void testLargeCount()
+        throws Throwable {
+
+        try {
+            initEnv(false);
+            doLargeCount();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Helper routine for above.
+     */
+    private void doLargeCount()
+        throws DatabaseException {
+
+        Hashtable dataMap = new Hashtable();
+        doLargePut(dataMap, N_KEYS);
+
+        DataWalker dw = new DataWalker(dataMap) {
+                void perData(String foundKey, String foundData)
+                    throws DatabaseException {
+
+                    assertTrue(cursor.count() == 1);
+                    assertTrue(foundKey.compareTo(prevKey) >= 0);
+                    prevKey = foundKey;
+                    assertTrue(dataMap.get(foundKey) != null);
+                    dataMap.remove(foundKey);
+                }
+            };
+        dw.walkData();
+        if (dataMap.size() > 0) {
+            fail("dataMap still has entries");
+        }
+        assertTrue(dw.nEntries == N_KEYS);
+    }
+
+    public void xxtestGetPerf()
+        throws Throwable {
+
+        try {
+            initEnv(false);
+            final int N = 50000;
+            int count = 0;
+            doLargePutPerf(N);
+
+            StringDbt foundKey = new StringDbt();
+            StringDbt foundData = new StringDbt();
+            OperationStatus status;
+            status = cursor.getFirst(foundKey, foundData, LockMode.DEFAULT);
+
+            while (status == OperationStatus.SUCCESS) {
+                status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT);
+                count++;
+            }
+
+            assertTrue(count == N);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Insert a bunch of key/data pairs.  Read them back and replace each of
+     * the data.  Read the pairs back again and make sure the replace did the
+     * right thing.
+     */
+    public void testLargeReplace()
+        throws Throwable {
+
+        try {
+            initEnv(false);
+            Hashtable dataMap = new Hashtable();
+            doLargePut(dataMap, N_KEYS);
+
+            DataWalker dw = new DataWalker(dataMap) {
+                    void perData(String foundKey, String foundData)
+                        throws DatabaseException {
+
+                        String newData = foundData + "x";
+                        cursor.putCurrent(new StringDbt(newData));
+                        dataMap.put(foundKey, newData);
+                    }
+                };
+            dw.walkData();
+            dw = new DataWalker(dataMap) {
+                    void perData(String foundKey, String foundData)
+                        throws DatabaseException {
+
+                        assertTrue(foundData.equals(dataMap.get(foundKey)));
+                        dataMap.remove(foundKey);
+                    }
+                };
+            dw.walkData();
+            assertTrue(dw.nEntries == N_KEYS);
+            assertTrue(dataMap.size() == 0);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Insert N_KEYS data items into a tree.  Iterate through the tree in
+     * descending order.  After each element is retrieved, insert the next
+     * highest key into the tree.  Ensure that the element just inserted is not
+     * returned by the cursor.  Ensure that the elements are returned in
+     * descending order.  Lather, rinse, repeat.
+     */
+    public void testLargeGetPutNextKeyBackwardsTraverse()
+        throws Throwable {
+
+        try {
+            tearDown();
+            for (int i = 0; i < N_ITERS; i++) {
+                setUp();
+                initEnv(false);
+                doLargeGetPutNextKeyBackwardsTraverse();
+                tearDown();
+            }
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Helper routine for above.
+     */
+    private void doLargeGetPutNextKeyBackwardsTraverse()
+        throws DatabaseException {
+
+        Hashtable dataMap = new Hashtable();
+        doLargePut(dataMap, N_KEYS);
+
+        DataWalker dw = new BackwardsDataWalker(dataMap) {
+                void perData(String foundKey, String foundData)
+                    throws DatabaseException {
+
+                    if (!prevKey.equals("")) {
+                        assertTrue(foundKey.compareTo(prevKey) <= 0);
+                    }
+                    putAndVerifyCursor(cursor2,
+                                       new StringDbt(nextKey(foundKey)),
+                                       new StringDbt
+                                       (Integer.toString(dataMap.size() +
+                                                         nEntries)),
+                                       true);
+                    prevKey = foundKey;
+                    assertTrue(dataMap.get(foundKey) != null);
+                    dataMap.remove(foundKey);
+                }
+
+                OperationStatus getFirst(StringDbt foundKey,
+					 StringDbt foundData)
+                    throws DatabaseException {
+
+                    return cursor.getLast(foundKey, foundData,
+                                          LockMode.DEFAULT);
+                }
+
+                OperationStatus getData(StringDbt foundKey,
+					StringDbt foundData)
+                    throws DatabaseException {
+
+                    return cursor.getPrev(foundKey, foundData,
+                                          LockMode.DEFAULT);
+                }
+            };
+        dw.walkData();
+        if (dataMap.size() > 0) {
+            fail("dataMap still has entries");
+        }
+        assertTrue(dw.nEntries == N_KEYS);
+    }
+
+    /**
+     * Insert N_KEYS data items into a tree.  Iterate through the tree in
+     * ascending order.  After each element is retrieved, insert the next
+     * highest key into the tree.  Ensure that the element just inserted is
+     * returned by the cursor.  Ensure that the elements are returned in
+     * ascending order.  Lather, rinse, repeat.
+     */
+    public void testLargeGetPutNextKeyForwardTraverse()
+        throws Throwable {
+
+        try {
+            initEnv(false);
+            doLargeGetPutNextKeyForwardTraverse(N_KEYS);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Helper routine for above.
+     */
+    private void doLargeGetPutNextKeyForwardTraverse(int nKeys)
+        throws DatabaseException {
+
+        Hashtable dataMap = new Hashtable();
+        Hashtable addedDataMap = new Hashtable();
+        doLargePut(dataMap, nKeys);
+
+        DataWalker dw = new DataWalker(dataMap, addedDataMap) {
+                void perData(String foundKey, String foundData)
+                    throws DatabaseException {
+
+                    assertTrue(foundKey.compareTo(prevKey) >= 0);
+                    if (addedDataMap.get(foundKey) == null) {
+                        String newKey = nextKey(foundKey);
+                        String newData =
+                            Integer.toString(dataMap.size() + nEntries);
+                        putAndVerifyCursor(cursor2,
+                                           new StringDbt(newKey),
+                                           new StringDbt(newData),
+                                           true);
+                        addedDataMap.put(newKey, newData);
+                        prevKey = foundKey;
+                        assertTrue(dataMap.get(foundKey) != null);
+                        dataMap.remove(foundKey);
+                    } else {
+                        addedDataMap.remove(foundKey);
+                    }
+                }
+            };
+        dw.walkData();
+        if (dataMap.size() > 0) {
+            fail("dataMap still has entries");
+        }
+        if (addedDataMap.size() > 0) {
+            System.out.println(addedDataMap);
+            fail("addedDataMap still has entries");
+        }
+        assertTrue(dw.nEntries == nKeys * 2);
+    }
+
+    /**
+     * Insert N_KEYS data items into a tree.  Iterate through the tree in
+     * descending order.  After each element is retrieved, insert the next
+     * lowest key into the tree.  Ensure that the element just inserted is
+     * returned by the cursor.  Ensure that the elements are returned in
+     * descending order.  Lather, rinse, repeat.
+     */
+    public void testLargeGetPutPrevKeyBackwardsTraverse()
+        throws Throwable {
+
+        try {
+            initEnv(false);
+            doLargeGetPutPrevKeyBackwardsTraverse(N_KEYS);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Helper routine for above.
+     */
+    private void doLargeGetPutPrevKeyBackwardsTraverse(int nKeys)
+        throws DatabaseException {
+
+        Hashtable dataMap = new Hashtable();
+        Hashtable addedDataMap = new Hashtable();
+        doLargePut(dataMap, nKeys);
+
+        DataWalker dw = new BackwardsDataWalker(dataMap, addedDataMap) {
+                void perData(String foundKey, String foundData)
+                    throws DatabaseException {
+
+		    if (!prevKey.equals("")) {
+                        assertTrue(foundKey.compareTo(prevKey) <= 0);
+                    }
+                    if (addedDataMap.get(foundKey) == null) {
+                        String newKey = prevKey(foundKey);
+                        String newData =
+                            Integer.toString(dataMap.size() + nEntries);
+                        putAndVerifyCursor(cursor2,
+                                           new StringDbt(newKey),
+                                           new StringDbt(newData),
+                                           true);
+                        addedDataMap.put(newKey, newData);
+                        prevKey = foundKey;
+                        assertTrue(dataMap.get(foundKey) != null);
+                        dataMap.remove(foundKey);
+                    } else {
+                        addedDataMap.remove(foundKey);
+                    }
+                }
+
+                OperationStatus getFirst(StringDbt foundKey,
+					 StringDbt foundData)
+                    throws DatabaseException {
+
+                    return cursor.getLast(foundKey, foundData,
+                                          LockMode.DEFAULT);
+                }
+
+                OperationStatus getData(StringDbt foundKey,
+					StringDbt foundData)
+                    throws DatabaseException {
+
+                    return cursor.getPrev(foundKey, foundData,
+                                          LockMode.DEFAULT);
+                }
+            };
+        dw.walkData();
+        if (dataMap.size() > 0) {
+            fail("dataMap still has entries");
+        }
+        if (addedDataMap.size() > 0) {
+            System.out.println(addedDataMap);
+            fail("addedDataMap still has entries");
+        }
+        assertTrue(dw.nEntries == nKeys * 2);
+    }
+
+    /**
+     * Insert N_KEYS data items into a tree.  Iterate through the tree in
+     * ascending order.  After each element is retrieved, insert the next
+     * highest and next lowest keys into the tree.  Ensure that the next
+     * highest element just inserted is returned by the cursor.  Ensure that
+     * the elements are returned in ascending order.  Lather, rinse, repeat.
+     */
+    public void testLargeGetPutBothKeyForwardTraverse()
+        throws Throwable {
+
+        try {
+            initEnv(false);
+            doLargeGetPutBothKeyForwardTraverse(N_KEYS);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Helper routine for above.
+     */
+    private void doLargeGetPutBothKeyForwardTraverse(int nKeys)
+        throws DatabaseException {
+
+        Hashtable dataMap = new Hashtable();
+        Hashtable addedDataMap = new Hashtable();
+        doLargePut(dataMap, nKeys);
+
+        DataWalker dw = new DataWalker(dataMap, addedDataMap) {
+                void perData(String foundKey, String foundData)
+                    throws DatabaseException {
+
+                    assertTrue(foundKey.compareTo(prevKey) >= 0);
+                    if (addedDataMap.get(foundKey) == null) {
+                        String newKey = nextKey(foundKey);
+                        String newData =
+                            Integer.toString(dataMap.size() + nEntries);
+                        putAndVerifyCursor(cursor2,
+                                           new StringDbt(newKey),
+                                           new StringDbt(newData),
+                                           true);
+                        addedDataMap.put(newKey, newData);
+                        newKey = prevKey(foundKey);
+                        newData = Integer.toString(dataMap.size() + nEntries);
+                        putAndVerifyCursor(cursor2,
+                                           new StringDbt(newKey),
+                                           new StringDbt(newData),
+                                           true);
+                        prevKey = foundKey;
+                        assertTrue(dataMap.get(foundKey) != null);
+                        dataMap.remove(foundKey);
+                    } else {
+                        addedDataMap.remove(foundKey);
+                    }
+                }
+            };
+        dw.walkData();
+        if (dataMap.size() > 0) {
+            fail("dataMap still has entries");
+        }
+        if (addedDataMap.size() > 0) {
+            System.out.println(addedDataMap);
+            fail("addedDataMap still has entries");
+        }
+        assertTrue(dw.nEntries == nKeys * 2);
+    }
+
+    /**
+     * Insert N_KEYS data items into a tree.  Iterate through the tree in
+     * descending order.  After each element is retrieved, insert the next
+     * highest and next lowest keys into the tree.  Ensure that the next lowest
+     * element just inserted is returned by the cursor.  Ensure that the
+     * elements are returned in descending order.  Lather, rinse, repeat.
+     */
+    public void testLargeGetPutBothKeyBackwardsTraverse()
+        throws Throwable {
+
+        try {
+            initEnv(false);
+            doLargeGetPutBothKeyBackwardsTraverse(N_KEYS);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Helper routine for above.
+     */
+    private void doLargeGetPutBothKeyBackwardsTraverse(int nKeys)
+        throws DatabaseException {
+
+        Hashtable dataMap = new Hashtable();
+        Hashtable addedDataMap = new Hashtable();
+        doLargePut(dataMap, nKeys);
+
+        DataWalker dw = new BackwardsDataWalker(dataMap, addedDataMap) {
+                void perData(String foundKey, String foundData)
+                    throws DatabaseException {
+
+                    if (!prevKey.equals("")) {
+                        assertTrue(foundKey.compareTo(prevKey) <= 0);
+                    }
+                    if (addedDataMap.get(foundKey) == null) {
+                        String newKey = nextKey(foundKey);
+                        String newData =
+                            Integer.toString(dataMap.size() + nEntries);
+                        putAndVerifyCursor(cursor2,
+                                           new StringDbt(newKey),
+                                           new StringDbt(newData),
+                                           true);
+                        newKey = prevKey(foundKey);
+                        newData = Integer.toString(dataMap.size() + nEntries);
+                        putAndVerifyCursor(cursor2,
+                                           new StringDbt(newKey),
+                                           new StringDbt(newData),
+                                           true);
+                        addedDataMap.put(newKey, newData);
+                        prevKey = foundKey;
+                        assertTrue(dataMap.get(foundKey) != null);
+                        dataMap.remove(foundKey);
+                    } else {
+                        addedDataMap.remove(foundKey);
+                    }
+                }
+
+                OperationStatus getFirst(StringDbt foundKey,
+					 StringDbt foundData)
+                    throws DatabaseException {
+
+                    return cursor.getLast(foundKey, foundData,
+                                          LockMode.DEFAULT);
+                }
+
+                OperationStatus getData(StringDbt foundKey,
+					StringDbt foundData)
+                    throws DatabaseException {
+
+                    return cursor.getPrev(foundKey, foundData,
+                                          LockMode.DEFAULT);
+                }
+            };
+        dw.walkData();
+        if (dataMap.size() > 0) {
+            fail("dataMap still has entries");
+        }
+        if (addedDataMap.size() > 0) {
+            System.out.println(addedDataMap);
+            fail("addedDataMap still has entries");
+        }
+        assertTrue(dw.nEntries == nKeys * 2);
+    }
+
+    /**
+     * Insert N_KEYS data items into a tree.  Iterate through the tree in
+     * ascending order.  After each element is retrieved, insert a new random
+     * key/data pair into the tree.  Ensure that the element just inserted is
+     * returned by the cursor if it is greater than the current element.
+     * Ensure that the elements are returned in ascending order.  Lather,
+     * rinse, repeat.
+     */
+    public void testLargeGetPutRandomKeyForwardTraverse()
+        throws Throwable {
+
+        try {
+            initEnv(false);
+            doLargeGetPutRandomKeyForwardTraverse(N_KEYS);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Helper routine for above.
+     */
+    private void doLargeGetPutRandomKeyForwardTraverse(int nKeys)
+        throws DatabaseException {
+
+        Hashtable dataMap = new Hashtable();
+        Hashtable addedDataMap = new Hashtable();
+        doLargePut(dataMap, nKeys);
+
+        DataWalker dw = new DataWalker(dataMap, addedDataMap) {
+                void perData(String foundKey, String foundData)
+                    throws DatabaseException {
+
+                    assertTrue(foundKey.compareTo(prevKey) >= 0);
+                    byte[] key = new byte[N_KEY_BYTES];
+                    TestUtils.generateRandomAlphaBytes(key);
+                    String newKey = new String(key);
+                    String newData =
+                        Integer.toString(dataMap.size() + nEntries);
+                    putAndVerifyCursor(cursor2,
+                                       new StringDbt(newKey),
+                                       new StringDbt(newData),
+                                       true);
+                    if (newKey.compareTo(foundKey) > 0) {
+                        addedDataMap.put(newKey, newData);
+                        extraVisibleEntries++;
+                    }
+                    if (addedDataMap.get(foundKey) == null) {
+                        prevKey = foundKey;
+                        assertTrue(dataMap.get(foundKey) != null);
+                        dataMap.remove(foundKey);
+                    } else {
+                        if (addedDataMap.remove(foundKey) == null) {
+                            fail(foundKey + " not found in either datamap");
+                        }
+                    }
+                }
+            };
+        dw.walkData();
+        if (dataMap.size() > 0) {
+            fail("dataMap still has entries");
+        }
+        if (addedDataMap.size() > 0) {
+            System.out.println(addedDataMap);
+            fail("addedDataMap still has entries");
+        }
+        assertTrue(dw.nEntries == nKeys + dw.extraVisibleEntries);
+    }
+
+    /**
+     * Insert N_KEYS data items into a tree.  Iterate through the tree in
+     * descending order.  After each element is retrieved, insert a new random
+     * key/data pair into the tree.  Ensure that the element just inserted is
+     * returned by the cursor if it is less than the current element.  Ensure
+     * that the elements are returned in descending order.  Lather, rinse,
+     * repeat.
+     */
+    public void testLargeGetPutRandomKeyBackwardsTraverse()
+        throws Throwable {
+
+        try {
+            initEnv(false);
+            doLargeGetPutRandomKeyBackwardsTraverse(N_KEYS);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Helper routine for above.
+     */
+    private void doLargeGetPutRandomKeyBackwardsTraverse(int nKeys)
+        throws DatabaseException {
+
+        Hashtable dataMap = new Hashtable();
+        Hashtable addedDataMap = new Hashtable();
+        doLargePut(dataMap, nKeys);
+
+        DataWalker dw = new BackwardsDataWalker(dataMap, addedDataMap) {
+                void perData(String foundKey, String foundData)
+                    throws DatabaseException {
+
+                    if (!prevKey.equals("")) {
+                        assertTrue(foundKey.compareTo(prevKey) <= 0);
+                    }
+                    byte[] key = new byte[N_KEY_BYTES];
+                    TestUtils.generateRandomAlphaBytes(key);
+                    String newKey = new String(key);
+                    String newData =
+                        Integer.toString(dataMap.size() + nEntries);
+                    putAndVerifyCursor(cursor2,
+                                       new StringDbt(newKey),
+                                       new StringDbt(newData),
+                                       true);
+                    if (newKey.compareTo(foundKey) < 0) {
+                        addedDataMap.put(newKey, newData);
+                        extraVisibleEntries++;
+                    }
+                    if (addedDataMap.get(foundKey) == null) {
+                        prevKey = foundKey;
+                        assertTrue(dataMap.get(foundKey) != null);
+                        dataMap.remove(foundKey);
+                    } else {
+                        if (addedDataMap.remove(foundKey) == null) {
+                            fail(foundKey + " not found in either datamap");
+                        }
+                    }
+                }
+
+                OperationStatus getFirst(StringDbt foundKey,
+					 StringDbt foundData)
+                    throws DatabaseException {
+
+                    return cursor.getLast(foundKey, foundData,
+                                          LockMode.DEFAULT);
+                }
+
+                OperationStatus getData(StringDbt foundKey,
+					StringDbt foundData)
+                    throws DatabaseException {
+
+                    return cursor.getPrev(foundKey, foundData,
+                                          LockMode.DEFAULT);
+                }
+            };
+        dw.walkData();
+        if (dataMap.size() > 0) {
+            fail("dataMap still has entries");
+        }
+        if (addedDataMap.size() > 0) {
+            System.out.println(addedDataMap);
+            fail("addedDataMap still has entries");
+        }
+        assertTrue(dw.nEntries == nKeys + dw.extraVisibleEntries);
+    }
+
+    /**
+     * Insert N_KEYS data items into a tree.  Set a btreeComparison function.
+     * Iterate through the tree in ascending order.  Ensure that the elements
+     * are returned in ascending order.
+     */
+    public void testLargeGetForwardTraverseWithNormalComparisonFunction()
+        throws Throwable {
+
+        try {
+            tearDown();
+            btreeComparisonFunction = btreeComparator;
+            setUp();
+            initEnv(false);
+            doLargeGetForwardTraverseWithNormalComparisonFunction();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Helper routine for above.
+     */
+    private void doLargeGetForwardTraverseWithNormalComparisonFunction()
+        throws DatabaseException {
+
+        Hashtable dataMap = new Hashtable();
+        doLargePut(dataMap, N_KEYS);
+
+        DataWalker dw = new DataWalker(dataMap) {
+                void perData(String foundKey, String foundData)
+                    throws DatabaseException {
+
+                    assertTrue(foundKey.compareTo(prevKey) >= 0);
+                    prevKey = foundKey;
+                    assertTrue(dataMap.get(foundKey) != null);
+                    dataMap.remove(foundKey);
+                }
+            };
+        dw.walkData();
+        if (dataMap.size() > 0) {
+            fail("dataMap still has entries");
+        }
+        assertTrue(dw.nEntries == N_KEYS);
+    }
+
+    /**
+     * Insert N_KEYS data items into a tree.  Set a reverse order
+     * btreeComparison function. Iterate through the tree in ascending order.
+     * Ensure that the elements are returned in ascending order.
+     */
+    public void testLargeGetForwardTraverseWithReverseComparisonFunction()
+        throws Throwable {
+
+        try {
+            tearDown();
+            btreeComparisonFunction = reverseBtreeComparator;
+            setUp();
+            initEnv(false);
+            doLargeGetForwardTraverseWithReverseComparisonFunction();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Helper routine for above.
+     */
+    private void doLargeGetForwardTraverseWithReverseComparisonFunction()
+        throws DatabaseException {
+
+        Hashtable dataMap = new Hashtable();
+        doLargePut(dataMap, N_KEYS);
+
+        DataWalker dw = new DataWalker(dataMap) {
+                void perData(String foundKey, String foundData)
+                    throws DatabaseException {
+
+                    if (prevKey.length() != 0) {
+                        assertTrue(foundKey.compareTo(prevKey) <= 0);
+                    }
+                    prevKey = foundKey;
+                    assertTrue(dataMap.get(foundKey) != null);
+                    dataMap.remove(foundKey);
+                }
+            };
+        dw.walkData();
+        if (dataMap.size() > 0) {
+            fail("dataMap still has entries");
+        }
+        assertTrue(dw.nEntries == N_KEYS);
+    }
+
+    public void testIllegalArgs()
+        throws Throwable {
+
+        try {
+            initEnv(false);
+            /* Put some data so that we can get a cursor. */
+            doSimpleCursorPuts();
+
+            DataWalker dw = new DataWalker(simpleDataMap) {
+                    void perData(String foundKey, String foundData) {
+
+                        /* getCurrent() */
+                        try {
+                            cursor.getCurrent(new StringDbt(""),
+                                              null,
+                                              LockMode.DEFAULT);
+                            fail("didn't throw NullPointerException");
+                        } catch (NullPointerException IAE) {
+                        } catch (DatabaseException DBE) {
+                            fail("threw DatabaseException not " +
+				 "NullPointerException");
+                        }
+
+                        try {
+                            cursor.getCurrent(null,
+                                              new StringDbt(""),
+                                              LockMode.DEFAULT);
+                            fail("didn't throw NullPointerException");
+                        } catch (NullPointerException IAE) {
+                        } catch (DatabaseException DBE) {
+                            fail("threw DatabaseException not " +
+				 "NullPointerException");
+                        }
+
+                        /* getFirst() */
+                        try {
+                            cursor.getFirst(new StringDbt(""),
+                                            null,
+                                            LockMode.DEFAULT);
+                            fail("didn't throw NullPointerException");
+                        } catch (NullPointerException IAE) {
+                        } catch (DatabaseException DBE) {
+                            fail("threw DatabaseException not " +
+				 "NullPointerException");
+                        }
+
+                        try {
+                            cursor.getFirst(null,
+                                            new StringDbt(""),
+                                            LockMode.DEFAULT);
+                            fail("didn't throw NullPointerException");
+                        } catch (NullPointerException IAE) {
+                        } catch (DatabaseException DBE) {
+                            fail("threw DatabaseException not " +
+				 "NullPointerException");
+                        }
+
+                        /* getNext(), getPrev, getNextDup,
+                           getNextNoDup, getPrevNoDup */
+                        try {
+                            cursor.getNext(new StringDbt(""),
+                                           null,
+                                           LockMode.DEFAULT);
+                            fail("didn't throw NullPointerException");
+                        } catch (NullPointerException IAE) {
+                        } catch (DatabaseException DBE) {
+                            fail("threw DatabaseException not " +
+				 "NullPointerException");
+                        }
+
+                        try {
+                            cursor.getNext(null,
+                                           new StringDbt(""),
+                                           LockMode.DEFAULT);
+                            fail("didn't throw NullPointerException");
+                        } catch (NullPointerException IAE) {
+                        } catch (DatabaseException DBE) {
+                            fail("threw DatabaseException not " +
+				 "NullPointerException");
+                        }
+
+                        /* putXXX() */
+                        try {
+                            cursor.put(new StringDbt(""), null);
+                            fail("didn't throw NullPointerException");
+                        } catch (NullPointerException IAE) {
+                        } catch (DatabaseException DBE) {
+                            fail("threw DatabaseException not " +
+				 "NullPointerException");
+                        }
+
+                        try {
+                            cursor.put(null, new StringDbt(""));
+                            fail("didn't throw NullPointerException");
+                        } catch (NullPointerException IAE) {
+                        } catch (DatabaseException DBE) {
+                            fail("threw DatabaseException not " +
+				 "NullPointerException");
+                        }
+                    }
+                };
+            dw.walkData();
+            assertTrue(dw.nEntries == simpleKeyStrings.length);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testCursorOutOfBoundsBackwards()
+        throws Throwable {
+
+        try {
+            initEnv(false);
+            doSimpleCursorPuts();
+
+            StringDbt foundKey = new StringDbt();
+            StringDbt foundData = new StringDbt();
+            OperationStatus status;
+            status = cursor.getFirst(foundKey, foundData, LockMode.DEFAULT);
+
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals("aaa", foundKey.getString());
+            assertEquals("four", foundData.getString());
+
+            status = cursor.getPrev(foundKey, foundData, LockMode.DEFAULT);
+
+            assertEquals(OperationStatus.NOTFOUND, status);
+
+            status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT);
+
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals("bar", foundKey.getString());
+            assertEquals("two", foundData.getString());
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testCursorOutOfBoundsForwards()
+        throws Throwable {
+
+        try {
+            initEnv(false);
+            doSimpleCursorPuts();
+
+            StringDbt foundKey = new StringDbt();
+            StringDbt foundData = new StringDbt();
+            OperationStatus status;
+            status = cursor.getLast(foundKey, foundData, LockMode.DEFAULT);
+
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals("quux", foundKey.getString());
+            assertEquals("seven", foundData.getString());
+
+            status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT);
+            assertEquals(OperationStatus.NOTFOUND, status);
+
+            status = cursor.getPrev(foundKey, foundData, LockMode.DEFAULT);
+
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals("mumble", foundKey.getString());
+            assertEquals("eight", foundData.getString());
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testTwiceClosedCursor()
+	throws Throwable {
+
+        try {
+            initEnv(false);
+            doSimpleCursorPuts();
+            Cursor cursor = exampleDb.openCursor(null, null);
+            cursor.close();
+            try {
+                cursor.close();
+                fail("didn't catch DatabaseException for twice closed cursor");
+            } catch (DatabaseException DBE) {
+            }
+            try {
+                cursor.put
+                    (new StringDbt("bogus"), new StringDbt("thingy"));
+                fail("didn't catch DatabaseException for re-use of cursor");
+            } catch (DatabaseException DBE) {
+            }
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testTreeSplittingWithDeletedIdKey()
+	throws Throwable {
+
+	treeSplittingWithDeletedIdKeyWorker();
+    }
+
+    public void testTreeSplittingWithDeletedIdKeyWithUserComparison()
+	throws Throwable {
+
+	tearDown();
+	btreeComparisonFunction = btreeComparator;
+	setUp();
+	treeSplittingWithDeletedIdKeyWorker();
+    }
+
+    static private Comparator btreeComparator = new BtreeComparator();
+
+    static private Comparator reverseBtreeComparator =
+        new ReverseBtreeComparator();
+
+    private void treeSplittingWithDeletedIdKeyWorker()
+	throws Throwable {
+
+	initEnv(false);
+	StringDbt data = new StringDbt("data");
+
+	Cursor cursor = exampleDb.openCursor(null, null);
+	cursor.put(new StringDbt("AGPFX"), data);
+	cursor.put(new StringDbt("AHHHH"), data);
+	cursor.put(new StringDbt("AIIII"), data);
+	cursor.put(new StringDbt("AAAAA"), data);
+	cursor.put(new StringDbt("AABBB"), data);
+	cursor.put(new StringDbt("AACCC"), data);
+	cursor.close();
+	exampleDb.delete(null, new StringDbt("AGPFX"));
+	exampleEnv.compress();
+	cursor = exampleDb.openCursor(null, null);
+	cursor.put(new StringDbt("AAAAB"), data);
+	cursor.put(new StringDbt("AAAAC"), data);
+	cursor.close();
+	validateDatabase();
+    }
+}
diff --git a/test/com/sleepycat/je/dbi/DbCursorTestBase.java b/test/com/sleepycat/je/dbi/DbCursorTestBase.java
new file mode 100644
index 0000000000000000000000000000000000000000..3de77a7cb9ee90e3ade8d97425a0fdabe1257020
--- /dev/null
+++ b/test/com/sleepycat/je/dbi/DbCursorTestBase.java
@@ -0,0 +1,757 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbCursorTestBase.java,v 1.106.2.2 2010/01/04 15:30:43 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.Comparator;
+import java.util.Enumeration;
+import java.util.Hashtable;
+import java.util.Random;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.DbTestProxy;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.LockStats;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.VerifyConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.tree.DuplicateEntryException;
+import com.sleepycat.je.tree.Node;
+import com.sleepycat.je.tree.Tree;
+import com.sleepycat.je.util.StringDbt;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Various unit tests for CursorImpl.
+ */
+public class DbCursorTestBase extends TestCase {
+    protected File envHome;
+    protected Cursor cursor;
+    protected Cursor cursor2;
+    protected Database exampleDb;
+    protected Environment exampleEnv;
+    protected Hashtable simpleDataMap;
+    protected Comparator<byte[]> btreeComparisonFunction = null;
+    protected Comparator<byte[]> duplicateComparisonFunction = null;
+    protected StringDbt[] simpleKeys;
+    protected StringDbt[] simpleData;
+    protected boolean duplicatesAllowed;
+    protected boolean keyPrefixing;
+
+    protected static final int N_KEY_BYTES = 10;
+    protected static final int N_ITERS = 2;
+    protected static final int N_KEYS = 5000;
+    protected static final int N_TOP_LEVEL_KEYS = 10;
+    protected static final int N_DUPLICATES_PER_KEY = 2500;
+    protected static final int N_COUNT_DUPLICATES_PER_KEY = 500;
+    protected static final int N_COUNT_TOP_KEYS = 1;
+
+    protected static int dbCnt = 0;
+
+    public DbCursorTestBase()
+        throws DatabaseException {
+
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException, DatabaseException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    protected void initEnv(boolean duplicatesAllowed)
+        throws DatabaseException {
+
+        initEnvInternal(duplicatesAllowed, false);
+    }
+
+    protected void initEnvTransactional(boolean duplicatesAllowed)
+        throws DatabaseException {
+
+        initEnvInternal(duplicatesAllowed, true);
+    }
+
+    private void initEnvInternal(boolean duplicatesAllowed,
+                                 boolean transactionalDatabase)
+        throws DatabaseException {
+
+        this.duplicatesAllowed = duplicatesAllowed;
+
+        /* Set up sample data. */
+        int nKeys = simpleKeyStrings.length;
+        simpleKeys = new StringDbt[nKeys];
+        simpleData = new StringDbt[nKeys];
+        for (int i = 0; i < nKeys; i++) {
+            simpleKeys[i] = new StringDbt(simpleKeyStrings[i]);
+            simpleData[i] = new StringDbt(simpleDataStrings[i]);
+        }
+
+        /* Set up an environment. */
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC));
+        envConfig.setTransactional(true);
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6");
+        envConfig.setConfigParam(EnvironmentParams.MAX_MEMORY.getName(),
+                                 new Long(1 << 24).toString());
+        envConfig.setAllowCreate(true);
+        exampleEnv = new Environment(envHome, envConfig);
+
+        /* Set up a database. */
+        String databaseName = "simpleDb" + dbCnt++;
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        if (btreeComparisonFunction != null) {
+            dbConfig.setBtreeComparator(btreeComparisonFunction);
+        }
+        if (duplicateComparisonFunction != null) {
+            dbConfig.setDuplicateComparator(duplicateComparisonFunction);
+        }
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(duplicatesAllowed);
+	dbConfig.setTransactional(transactionalDatabase);
+        dbConfig.setKeyPrefixing(keyPrefixing);
+        exampleDb = exampleEnv.openDatabase(null, databaseName, dbConfig);
+
+        /* Set up cursors. */
+        cursor = exampleDb.openCursor(null, null);
+        cursor2 = exampleDb.openCursor(null, null);
+        simpleDataMap = new Hashtable();
+    }
+
+    void closeEnv() {
+        simpleKeys = null;
+        simpleData = null;
+        simpleDataMap = null;
+
+        try {
+            if (cursor != null) {
+                cursor.close();
+                cursor = null;
+            }
+        } catch (DatabaseException ignore) {
+        }
+
+        try {
+            if (cursor2 != null) {
+                cursor2.close();
+                cursor2 = null;
+            }
+        } catch (DatabaseException ignore) {
+            /* Same as above. */
+        }
+
+
+        try {
+            if (exampleDb != null) {
+                exampleDb.close();
+                exampleDb = null;
+            }
+        } catch (Exception ignore) {
+        }
+
+        try {
+            if (exampleEnv != null) {
+                exampleEnv.close();
+                exampleEnv = null;
+            } 
+        } catch (Exception ignore) {
+
+            /*
+             * Ignore this exception.  It's caused by us calling
+             * tearDown() within the test.  Each tearDown() call
+             * forces the database closed.  So when the call from
+             * junit comes along, it's already closed.
+             */
+        }
+    }
+
+    public void tearDown()
+        throws IOException, DatabaseException {
+
+        closeEnv();
+
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+    }
+
+    protected String[] simpleKeyStrings = {
+        "foo", "bar", "baz", "aaa", "fubar",
+        "foobar", "quux", "mumble", "froboy" };
+
+    protected String[] simpleDataStrings = {
+        "one", "two", "three", "four", "five",
+        "six", "seven", "eight", "nine" };
+
+    protected void doSimpleCursorPuts()
+        throws DatabaseException {
+
+        for (int i = 0; i < simpleKeyStrings.length; i++) {
+            putAndVerifyCursor(cursor, simpleKeys[i], simpleData[i], true);
+            simpleDataMap.put(simpleKeyStrings[i], simpleDataStrings[i]);
+        }
+    }
+
+    /**
+     * A class that performs cursor walking.  The walkData method iterates
+     * over all data in the database and calls the "perData()" method on
+     * each data item.  The perData() method is expected to be overridden
+     * by the user.
+     */
+    protected class DataWalker {
+        String prevKey = "";
+        String prevData = "";
+        int nEntries = 0;
+        int deletedEntries = 0;
+        int extraVisibleEntries = 0;
+        protected int nHandleEndOfSet = 0;
+        String whenFoundDoInsert;
+        String newKey;
+        String deletedEntry = null;
+        Hashtable dataMap;
+        Hashtable addedDataMap;
+        Random rnd = new Random();
+        /* True if the datamap processing should not happen in the walkData
+           routine. */
+        boolean ignoreDataMap = false;
+
+        DataWalker(Hashtable dataMap) {
+            this.dataMap = dataMap;
+            this.addedDataMap = null;
+        }
+
+        DataWalker(Hashtable dataMap,
+                   Hashtable addedDataMap) {
+            this.dataMap = dataMap;
+            this.addedDataMap = addedDataMap;
+        }
+
+        DataWalker() {
+            this.dataMap = simpleDataMap;
+            this.addedDataMap = null;
+        }
+
+        DataWalker(String whenFoundDoInsert,
+                   String newKey,
+                   Hashtable dataMap) {
+            this.whenFoundDoInsert = whenFoundDoInsert;
+            this.newKey = newKey;
+            this.dataMap = dataMap;
+            this.addedDataMap = null;
+        }
+
+        void setIgnoreDataMap(boolean ignoreDataMap) {
+            this.ignoreDataMap = ignoreDataMap;
+        }
+
+        OperationStatus getFirst(StringDbt foundKey, StringDbt foundData)
+            throws DatabaseException {
+
+            return cursor.getFirst(foundKey, foundData,
+                                   LockMode.DEFAULT);
+        }
+
+        OperationStatus getData(StringDbt foundKey, StringDbt foundData)
+            throws DatabaseException {
+
+            return cursor.getNext(foundKey, foundData,
+                                  LockMode.DEFAULT);
+        }
+
+        StringDbt foundKey = new StringDbt();
+        StringDbt foundData = new StringDbt();
+
+        void walkData()
+            throws DatabaseException {
+
+            /* get some data back */
+            OperationStatus status = getFirst(foundKey, foundData);
+
+            while (status == OperationStatus.SUCCESS) {
+                String foundKeyString = foundKey.getString();
+                String foundDataString = foundData.getString();
+
+                if (!ignoreDataMap) {
+                    if (dataMap.get(foundKeyString) != null) {
+                        assertEquals(dataMap.get(foundKeyString),
+                                     foundDataString);
+                    } else if (addedDataMap != null &&
+                               addedDataMap.get(foundKeyString) != null) {
+                        assertEquals(addedDataMap.get(foundKeyString),
+                                     foundDataString);
+                    } else {
+                        fail("didn't find key in either map (" +
+                             foundKeyString +
+                             ")");
+                    }
+                }
+
+                LockStats stat =
+                    DbTestProxy.dbcGetCursorImpl(cursor).getLockStats();
+                assertEquals(1, stat.getNReadLocks());
+                assertEquals(0, stat.getNWriteLocks());
+                perData(foundKeyString, foundDataString);
+                nEntries++;
+                status = getData(foundKey, foundData);
+                if (status != OperationStatus.SUCCESS) {
+                    nHandleEndOfSet++;
+                    status = handleEndOfSet(status);
+                }
+            }
+            TestUtils.validateNodeMemUsage(DbInternal.
+                                           envGetEnvironmentImpl(exampleEnv),
+                                           false);
+        }
+
+        void perData(String foundKey, String foundData)
+            throws DatabaseException {
+
+            /* to be overridden */
+        }
+
+        OperationStatus handleEndOfSet(OperationStatus status)
+            throws DatabaseException {
+
+            return status;
+        }
+
+        void close()
+            throws DatabaseException {
+
+            cursor.close();
+        }
+    }
+
+    protected class BackwardsDataWalker extends DataWalker {
+        BackwardsDataWalker(Hashtable dataMap) {
+            super(dataMap);
+        }
+
+        BackwardsDataWalker(Hashtable dataMap,
+                            Hashtable addedDataMap) {
+            super(dataMap, addedDataMap);
+        }
+
+        BackwardsDataWalker(String whenFoundDoInsert,
+                            String newKey,
+                            Hashtable dataMap) {
+            super(whenFoundDoInsert, newKey, dataMap);
+        }
+
+        OperationStatus getFirst(StringDbt foundKey, StringDbt foundData)
+            throws DatabaseException {
+
+            return cursor.getLast(foundKey, foundData,
+                                  LockMode.DEFAULT);
+        }
+
+        OperationStatus getData(StringDbt foundKey, StringDbt foundData)
+            throws DatabaseException {
+
+            return cursor.getPrev(foundKey, foundData,
+                                  LockMode.DEFAULT);
+        }
+    }
+
+    protected class DupDataWalker extends DataWalker {
+        DupDataWalker(Hashtable dataMap) {
+            super(dataMap);
+        }
+
+        DupDataWalker(Hashtable dataMap,
+                      Hashtable addedDataMap) {
+            super(dataMap, addedDataMap);
+        }
+
+        DupDataWalker(String whenFoundDoInsert,
+                      String newKey,
+                      Hashtable dataMap) {
+            super(whenFoundDoInsert, newKey, dataMap);
+        }
+
+        OperationStatus getData(StringDbt foundKey, StringDbt foundData)
+            throws DatabaseException {
+
+            return cursor.getNextDup(foundKey, foundData,
+                                     LockMode.DEFAULT);
+        }
+    }
+
+    protected class BackwardsDupDataWalker extends BackwardsDataWalker {
+        BackwardsDupDataWalker(Hashtable dataMap) {
+            super(dataMap);
+        }
+
+        BackwardsDupDataWalker(Hashtable dataMap,
+                               Hashtable addedDataMap) {
+            super(dataMap, addedDataMap);
+        }
+
+        BackwardsDupDataWalker(String whenFoundDoInsert,
+                               String newKey,
+                               Hashtable dataMap) {
+            super(whenFoundDoInsert, newKey, dataMap);
+        }
+
+        OperationStatus getData(StringDbt foundKey, StringDbt foundData)
+            throws DatabaseException {
+
+            return cursor.getPrevDup(foundKey, foundData,
+                                     LockMode.DEFAULT);
+        }
+    }
+
+    protected class NoDupDataWalker extends DataWalker {
+        NoDupDataWalker(Hashtable dataMap) {
+            super(dataMap);
+        }
+
+        NoDupDataWalker(Hashtable dataMap,
+                        Hashtable addedDataMap) {
+            super(dataMap, addedDataMap);
+        }
+
+        NoDupDataWalker(String whenFoundDoInsert,
+                        String newKey,
+                        Hashtable dataMap) {
+            super(whenFoundDoInsert, newKey, dataMap);
+        }
+
+        OperationStatus getData(StringDbt foundKey, StringDbt foundData)
+            throws DatabaseException {
+
+            return cursor.getNextNoDup(foundKey, foundData,
+                                       LockMode.DEFAULT);
+        }
+    }
+
+    protected class NoDupBackwardsDataWalker extends BackwardsDataWalker {
+        NoDupBackwardsDataWalker(Hashtable dataMap) {
+            super(dataMap);
+        }
+
+        NoDupBackwardsDataWalker(Hashtable dataMap,
+                                 Hashtable addedDataMap) {
+            super(dataMap, addedDataMap);
+        }
+
+        NoDupBackwardsDataWalker(String whenFoundDoInsert,
+                                 String newKey,
+                                 Hashtable dataMap) {
+            super(whenFoundDoInsert, newKey, dataMap);
+        }
+
+        OperationStatus getData(StringDbt foundKey, StringDbt foundData)
+            throws DatabaseException {
+
+            return cursor.getPrevNoDup(foundKey, foundData,
+                                       LockMode.DEFAULT);
+        }
+    }
+
+    /**
+     * Construct the next highest key.
+     */
+    protected String nextKey(String key) {
+        byte[] sb = key.getBytes();
+        sb[sb.length - 1]++;
+        return new String(sb);
+    }
+
+    /**
+     * Construct the next lowest key.
+     */
+    protected String prevKey(String key) {
+        byte[] sb = key.getBytes();
+        sb[sb.length - 1]--;
+        return new String(sb);
+    }
+
+    /**
+     * Helper routine for testLargeXXX routines.
+     */
+    protected void doLargePut(Hashtable dataMap, int nKeys)
+        throws DatabaseException {
+
+        for (int i = 0; i < nKeys; i++) {
+            byte[] key = new byte[N_KEY_BYTES];
+            TestUtils.generateRandomAlphaBytes(key);
+            String keyString = new String(key);
+            String dataString = Integer.toString(i);
+            putAndVerifyCursor(cursor, new StringDbt(keyString),
+                               new StringDbt(dataString), true);
+            if (dataMap != null) {
+                dataMap.put(keyString, dataString);
+            }
+        }
+    }
+
+    /**
+     * Helper routine for testLargeXXX routines.
+     */
+    protected void doLargePutPerf(int nKeys)
+        throws DatabaseException {
+
+        byte[][] keys = new byte[nKeys][];
+        for (int i = 0; i < nKeys; i++) {
+            byte[] key = new byte[20];
+            keys[i] = key;
+            TestUtils.generateRandomAlphaBytes(key);
+            String keyString = new String(key);
+            byte[] dataBytes = new byte[120];
+            TestUtils.generateRandomAlphaBytes(dataBytes);
+            String dataString = new String(dataBytes);
+            putAndVerifyCursor(cursor, new StringDbt(keyString),
+                               new StringDbt(dataString), true);
+        }
+    }
+
+    /**
+     * Create some simple duplicate data.
+     */
+    protected void doSimpleDuplicatePuts()
+        throws DatabaseException {
+
+        for (int i = 0; i < simpleKeyStrings.length; i++) {
+            for (int j = 0; j < simpleKeyStrings.length; j++) {
+                putAndVerifyCursor(cursor, simpleKeys[i], simpleData[j], true);
+            }
+        }
+    }
+
+    /**
+     * Create a tree with N_TOP_LEVEL_KEYS keys and N_DUPLICATES_PER_KEY
+     * data items per key.
+     *
+     * @param dataMap A Hashtable of hashtables.  This routine adds entries
+     * to the top level hash for each key created.  Secondary hashes contain
+     * the duplicate data items for each key in the top level hash.
+     *
+     * @param putVariant a boolean for varying the way the data is put with the
+     * cursor, currently unused..
+     */
+    protected void createRandomDuplicateData(Hashtable dataMap,
+                                             boolean putVariant)
+        throws DatabaseException {
+
+        createRandomDuplicateData(N_TOP_LEVEL_KEYS,
+                                  N_DUPLICATES_PER_KEY,
+                                  dataMap,
+                                  putVariant,
+                                  false);
+    }
+
+    /**
+     * Create a tree with a given number of keys and nDup
+     * data items per key.
+     *
+     * @param nTopKeys the number of top level keys to create.  If negative,
+     * create that number of top level keys with dupes underneath and the
+     * same number of top level keys without any dupes.
+     *
+     * @param nDup The number of duplicates to create in the duplicate subtree.
+     *
+     * @param dataMap A Hashtable of hashtables.  This routine adds entries
+     * to the top level hash for each key created.  Secondary hashes contain
+     * the duplicate data items for each key in the top level hash.
+     *
+     * @param putVariant a boolean for varying the way the data is put with the
+     * cursor, currently unused..
+     */
+    protected void createRandomDuplicateData(int nTopKeys,
+                                             int nDup,
+                                             Hashtable dataMap,
+                                             boolean putVariant,
+                                             boolean verifyCount)
+        throws DatabaseException {
+
+        boolean createSomeNonDupes = false;
+        if (nTopKeys < 0) {
+            nTopKeys = Math.abs(nTopKeys);
+            nTopKeys <<= 1;
+            createSomeNonDupes = true;
+        }
+
+        byte[][] keys = new byte[nTopKeys][];
+        for (int i = 0; i < nTopKeys; i++) {
+            byte[] key = new byte[N_KEY_BYTES];
+            keys[i] = key;
+            TestUtils.generateRandomAlphaBytes(key);
+            String keyString = new String(key);
+            Hashtable ht = new Hashtable();
+            if (dataMap != null) {
+                dataMap.put(keyString, ht);
+            }
+            int nDupesThisTime = nDup;
+            if (createSomeNonDupes && (i % 2) == 0) {
+                nDupesThisTime = 1;
+            }
+            for (int j = 1; j <= nDupesThisTime; j++) {
+                byte[] data = new byte[N_KEY_BYTES];
+                TestUtils.generateRandomAlphaBytes(data);
+                OperationStatus status =
+                    putAndVerifyCursor(cursor, new StringDbt(keyString),
+                                       new StringDbt(data), putVariant);
+
+                if (verifyCount) {
+                    assertTrue(cursor.count() == j);
+                }
+
+                if (status != OperationStatus.SUCCESS) {
+                    throw new DuplicateEntryException
+                        ("Duplicate Entry");
+                }
+                String dataString = new String(data);
+                ht.put(dataString, dataString);
+            }
+        }
+    }
+
+    /**
+     * Debugging routine.  Iterate through the transient hashtable of
+     * key/data pairs and ensure that each key can be retrieved from
+     * the tree.
+     */
+    protected void verifyEntries(Hashtable dataMap)
+        throws DatabaseException {
+
+        Tree tree = DbInternal.dbGetDatabaseImpl(exampleDb).getTree();
+        Enumeration e = dataMap.keys();
+        while (e.hasMoreElements()) {
+            String key = (String) e.nextElement();
+            if (!retrieveData(tree, key.getBytes())) {
+                System.out.println("Couldn't find: " + key);
+            }
+        }
+    }
+
+    /* Throw assertion if the database is not valid. */
+    protected void validateDatabase()
+        throws DatabaseException {
+
+        DatabaseImpl dbImpl = DbInternal.dbGetDatabaseImpl(exampleDb);
+        assertTrue(dbImpl.verify(new VerifyConfig(), dbImpl.getEmptyStats()));
+    }
+
+    /**
+     * Helper routine for above.
+     */
+    protected boolean retrieveData(Tree tree, byte[] key)
+        throws DatabaseException {
+
+	TestUtils.checkLatchCount();
+	Node n =
+	    tree.search(key, Tree.SearchType.NORMAL, -1, null,
+                        CacheMode.DEFAULT);
+	if (!(n instanceof BIN)) {
+	    fail("search didn't return a BIN for key: " + key);
+	}
+	BIN bin = (BIN) n;
+	try {
+	    int index = bin.findEntry(key, false, true);
+	    if (index == -1) {
+		return false;
+	    }
+	    return true;
+	} finally {
+	    bin.releaseLatch();
+	    TestUtils.checkLatchCount();
+	}
+    }
+
+    protected OperationStatus putAndVerifyCursor(Cursor cursor,
+                                                 StringDbt key,
+                                                 StringDbt data,
+                                                 boolean putVariant)
+        throws DatabaseException {
+
+        OperationStatus status;
+        if (duplicatesAllowed) {
+            status = cursor.putNoDupData(key, data);
+        } else {
+            status = cursor.putNoOverwrite(key, data);
+        }
+
+        if (status == OperationStatus.SUCCESS) {
+            StringDbt keyCheck = new StringDbt();
+            StringDbt dataCheck = new StringDbt();
+
+            assertEquals(OperationStatus.SUCCESS, cursor.getCurrent
+                         (keyCheck, dataCheck, LockMode.DEFAULT));
+            assertEquals(key.getString(), keyCheck.getString());
+            assertEquals(data.getString(), dataCheck.getString());
+        }
+
+        return status;
+    }
+
+    @SuppressWarnings("serial")
+    protected static class BtreeComparator implements Comparator, 
+                                                      Serializable {
+        protected boolean ascendingComparison = true;
+
+        protected BtreeComparator() {
+        }
+
+        public int compare(Object o1, Object o2) {
+            byte[] arg1;
+            byte[] arg2;
+            if (ascendingComparison) {
+                arg1 = (byte[]) o1;
+                arg2 = (byte[]) o2;
+            } else {
+                arg1 = (byte[]) o2;
+                arg2 = (byte[]) o1;
+            }
+            int a1Len = arg1.length;
+            int a2Len = arg2.length;
+
+            int limit = Math.min(a1Len, a2Len);
+
+            for (int i = 0; i < limit; i++) {
+                byte b1 = arg1[i];
+                byte b2 = arg2[i];
+                if (b1 == b2) {
+                    continue;
+                } else {
+                    /* Remember, bytes are signed, so convert to shorts so that
+                       we effectively do an unsigned byte comparison. */
+                    short s1 = (short) (b1 & 0x7F);
+                    short s2 = (short) (b2 & 0x7F);
+                    if (b1 < 0) {
+                        s1 |= 0x80;
+                    }
+                    if (b2 < 0) {
+                        s2 |= 0x80;
+                    }
+                    return (s1 - s2);
+                }
+            }
+
+            return (a1Len - a2Len);
+        }
+    }
+
+    @SuppressWarnings("serial")
+    protected static class ReverseBtreeComparator extends BtreeComparator {
+        protected ReverseBtreeComparator() {
+            ascendingComparison = false;
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/dbi/DbEnvPoolTest.java b/test/com/sleepycat/je/dbi/DbEnvPoolTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..881da35e677f9574ad597076e2619c0ac8ba0878
--- /dev/null
+++ b/test/com/sleepycat/je/dbi/DbEnvPoolTest.java
@@ -0,0 +1,68 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2004,2008 Oracle.  All rights reserved.
+ *
+ * $Id: DbEnvPoolTest.java,v 1.13 2008/07/01 03:21:04 tao Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.util.TestUtils;
+
+public class DbEnvPoolTest extends TestCase {
+
+    private File envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+
+    public DbEnvPoolTest() {
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+	TestUtils.removeLogFiles("TearDown", envHome, false);
+    }
+
+    public void testCanonicalEnvironmentName ()
+        throws Throwable {
+
+        try {
+            File file2 = new File("build/test/classes");
+
+            /* Create an environment. */
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setAllowCreate(true);
+            Environment envA = new Environment(envHome, envConfig);
+
+            /* Look in the environment pool with the relative path name. */
+            EnvironmentImpl envImpl =
+                DbEnvPool.getInstance().getEnvironment
+                    (file2, TestUtils.initEnvConfig(),
+                     false /*checkImmutableParams*/,
+                     false /*openIfNeeded*/,
+                     false /*replicationIntended*/);
+            /* We should find this file in the pool without opening it. */
+            assertNotNull(envImpl);
+            envImpl.decReferenceCount();
+            envA.close();
+
+        } catch (Throwable t) {
+            /* Dump stack trace before trying to tear down. */
+            t.printStackTrace();
+            throw t;
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/dbi/DbTreeTest.java b/test/com/sleepycat/je/dbi/DbTreeTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..f6b62eeeca23b256d70fe780fc7f0e6ddf33973c
--- /dev/null
+++ b/test/com/sleepycat/je/dbi/DbTreeTest.java
@@ -0,0 +1,70 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbTreeTest.java,v 1.29.2.2 2010/01/04 15:30:43 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.util.TestUtils;
+
+public class DbTreeTest extends TestCase {
+    private File envHome;
+
+    public DbTreeTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp() throws IOException, DatabaseException {
+        TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX);
+    }
+
+    public void tearDown() throws IOException, DatabaseException {
+        TestUtils.removeFiles("TearDown", envHome, FileManager.JE_SUFFIX);
+    }
+
+    public void testDbLookup() throws Throwable {
+        try {
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setTransactional(true);
+            envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6");
+            envConfig.setAllowCreate(true);
+            Environment env = new Environment(envHome, envConfig);
+
+            // Make two databases
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setTransactional(true);
+            dbConfig.setAllowCreate(true);
+            Database dbHandleAbcd = env.openDatabase(null, "abcd", dbConfig);
+            Database dbHandleXyz = env.openDatabase(null, "xyz", dbConfig);
+
+            // Can we get them back?
+            dbConfig.setAllowCreate(false);
+            Database newAbcdHandle = env.openDatabase(null, "abcd", dbConfig);
+            Database newXyzHandle = env.openDatabase(null, "xyz", dbConfig);
+
+            dbHandleAbcd.close();
+            dbHandleXyz.close();
+            newAbcdHandle.close();
+            newXyzHandle.close();
+            env.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/dbi/INListTest.java b/test/com/sleepycat/je/dbi/INListTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..943b2ac63b3b7ef14c9ff2c9f92ddfb3cc955d6f
--- /dev/null
+++ b/test/com/sleepycat/je/dbi/INListTest.java
@@ -0,0 +1,436 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: INListTest.java,v 1.44.2.2 2010/01/04 15:30:43 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Iterator;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.junit.JUnitThread;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.util.TestUtils;
+
+public class INListTest extends TestCase {
+    private static String DB_NAME = "INListTestDb";
+    private File envHome;
+    private volatile int sequencer = 0;
+    private Environment env;
+    private EnvironmentImpl envImpl;
+    private Database db;
+    private DatabaseImpl dbImpl;
+
+    public INListTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+
+    }
+
+    public void setUp()
+        throws DatabaseException, IOException {
+
+        TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX);
+        sequencer = 0;
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setConfigParam(EnvironmentParams.ENV_RUN_EVICTOR.getName(),
+                                 "false");
+        env = new Environment(envHome, envConfig);
+        envImpl = DbInternal.envGetEnvironmentImpl(env);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        db = env.openDatabase(null, DB_NAME, dbConfig);
+        dbImpl = DbInternal.dbGetDatabaseImpl(db);
+    }
+
+    private void close()
+        throws DatabaseException {
+
+        if (db != null) {
+            db.close();
+        }
+        if (env != null) {
+            env.close();
+        }
+        db = null;
+        dbImpl = null;
+        env = null;
+        envImpl = null;
+    }
+
+    public void tearDown()
+        throws DatabaseException, IOException  {
+
+        try {
+            close();
+        } catch (Exception e) {
+            System.out.println("During tearDown: " + e);
+        }
+
+        TestUtils.removeFiles("TearDown", envHome, FileManager.JE_SUFFIX);
+
+        envHome = null;
+    }
+
+    /**
+     * This test was originally written when the INList had a major and minor
+     * latch.  It was used to test the addition of INs holding the minor latch
+     * while another thread holds the major latch.  Now that we're using
+     * ConcurrentHashMap this type of testing is not important, but I've left
+     * the test in place (without the latching) since it does exercise the
+     * INList API a little.
+     */
+    public void testConcurrentAdditions()
+	throws Throwable {
+
+        final INList inList1 = new INList(envImpl);
+        inList1.enable();
+
+        JUnitThread tester1 =
+            new JUnitThread("testConcurrentAdditions-Thread1") {
+                public void testBody() {
+
+                    try {
+                        /* Create two initial elements. */
+                        for (int i = 0; i < 2; i++) {
+                            IN in = new IN(dbImpl, null, 1, 1);
+                            inList1.add(in);
+                        }
+
+                        /* Wait for tester2 to try to acquire the
+                           /* minor latch */
+                        sequencer = 1;
+                        while (sequencer <= 1) {
+                            Thread.yield();
+                        }
+
+                        /*
+                         * Sequencer is now 2. There should be three elements
+                         * in the list right now because thread 2 added a third
+                         * one.
+                         */
+                        int count = 0;
+                        Iterator iter = inList1.iterator();
+                        while (iter.hasNext()) {
+                            iter.next();
+                            count++;
+                        }
+
+                        assertEquals(3, count);
+
+                        /*
+                         * Allow thread2 to run again.  It will
+                         * add another element and throw control
+                         * back to thread 1.
+                         */
+                        sequencer++;   // now it's 3
+                        while (sequencer <= 3) {
+                            Thread.yield();
+                        }
+
+                        /*
+                         * Check that the entry added by tester2 was really
+                         * added.
+                         */
+                        count = 0;
+                        iter = inList1.iterator();
+                        while (iter.hasNext()) {
+                            iter.next();
+                            count++;
+                        }
+
+                        assertEquals(4, count);
+                    } catch (Throwable T) {
+                        T.printStackTrace(System.out);
+                        fail("Thread 1 caught some Throwable: " + T);
+                    }
+                }
+            };
+
+        JUnitThread tester2 =
+            new JUnitThread("testConcurrentAdditions-Thread2") {
+                public void testBody() {
+
+                    try {
+                        /* Wait for tester1 to start */
+                        while (sequencer < 1) {
+                            Thread.yield();
+                        }
+
+                        assertEquals(1, sequencer);
+
+                        inList1.add(new IN(dbImpl, null, 1, 1));
+                        sequencer++;
+
+                        /* Sequencer is now 2. */
+
+                        while (sequencer < 3) {
+                            Thread.yield();
+                        }
+
+                        assertEquals(3, sequencer);
+                        /* Add one more element. */
+                        inList1.add(new IN(dbImpl, null, 1, 1));
+                        sequencer++;
+                    } catch (Throwable T) {
+                        T.printStackTrace(System.out);
+                        fail("Thread 2 caught some Throwable: " + T);
+                    }
+                }
+            };
+
+        tester1.start();
+        tester2.start();
+        tester1.finishTest();
+        tester2.finishTest();
+    }
+
+    /*
+     * Variations of this loop are used in the following tests to simulate the
+     * INList memory budget recalculation that is performed by the same loop
+     * construct in DirtyINMap.selectDirtyINsForCheckpoint.
+     *
+     *  inList.memRecalcBegin();
+     *  boolean completed = false;
+     *  try {
+     *      for (IN in : inList) {
+     *          inList.memRecalcIterate(in);
+     *      }
+     *      completed = true;
+     *  } finally {
+     *      inList.memRecalcEnd(completed);
+     *  }
+     */
+
+    /**
+     * Scenario #1: IN size is unchanged during the iteration
+     *  begin
+     *   iterate -- add total IN size, mark processed
+     *  end
+     */
+    public void testMemBudgetReset1()
+        throws DatabaseException {
+
+        INList inList = envImpl.getInMemoryINs();
+        MemoryBudget mb = envImpl.getMemoryBudget();
+
+        long origTreeMem = mb.getTreeMemoryUsage();
+        inList.memRecalcBegin();
+        boolean completed = false;
+        try {
+            for (IN in : inList) {
+                inList.memRecalcIterate(in);
+            }
+            completed = true;
+        } finally {
+            inList.memRecalcEnd(completed);
+        }
+        assertEquals(origTreeMem, mb.getTreeMemoryUsage());
+
+        close();
+    }
+
+    /**
+     * Scenario #2: IN size is updated during the iteration
+     *  begin
+     *   update  -- do not add delta because IN is not yet processed
+     *   iterate -- add total IN size, mark processed
+     *   update  -- do add delta because IN was already processed
+     *  end
+     */
+    public void testMemBudgetReset2()
+        throws DatabaseException {
+
+        INList inList = envImpl.getInMemoryINs();
+        MemoryBudget mb = envImpl.getMemoryBudget();
+
+        /*
+         * Size changes must be greater than IN.ACCUMULATED_LIMIT to be
+         * counted in the budget, and byte array lengths should be a multiple
+         * of 4 to give predictable sizes, since array sizes are allowed in
+         * multiples of 4.
+         */
+        final int SIZE = IN.ACCUMULATED_LIMIT + 100;
+        DatabaseEntry key = new DatabaseEntry(new byte[1]);
+        db.put(null, key, new DatabaseEntry(new byte[SIZE * 1]));
+
+        /* Test increasing size. */
+        long origTreeMem = mb.getTreeMemoryUsage();
+        inList.memRecalcBegin();
+        boolean completed = false;
+        try {
+            db.put(null, key, new DatabaseEntry(new byte[SIZE * 2]));
+            for (IN in : inList) {
+                inList.memRecalcIterate(in);
+            }
+            db.put(null, key, new DatabaseEntry(new byte[SIZE * 3]));
+            completed = true;
+        } finally {
+            inList.memRecalcEnd(completed);
+        }
+        assertEquals(origTreeMem + SIZE * 2, mb.getTreeMemoryUsage());
+
+        /* Test decreasing size. */
+        inList.memRecalcBegin();
+        completed = false;
+        try {
+            db.put(null, key, new DatabaseEntry(new byte[SIZE * 2]));
+            for (IN in : inList) {
+                inList.memRecalcIterate(in);
+            }
+            db.put(null, key, new DatabaseEntry(new byte[SIZE * 1]));
+            completed = true;
+        } finally {
+            inList.memRecalcEnd(completed);
+        }
+        assertEquals(origTreeMem, mb.getTreeMemoryUsage());
+
+        close();
+    }
+
+    /**
+     * Scenario #3: IN is added during the iteration but not iterated
+     *  begin
+     *   add -- add IN size, mark processed
+     *  end
+     */
+    public void testMemBudgetReset3()
+        throws DatabaseException {
+
+        INList inList = envImpl.getInMemoryINs();
+        MemoryBudget mb = envImpl.getMemoryBudget();
+
+        IN newIn = new IN(dbImpl, null, 1, 1);
+        long size = newIn.getBudgetedMemorySize();
+
+        long origTreeMem = mb.getTreeMemoryUsage();
+        inList.memRecalcBegin();
+        boolean completed = false;
+        try {
+            for (IN in : inList) {
+                inList.memRecalcIterate(in);
+            }
+            inList.add(newIn);
+            completed = true;
+        } finally {
+            inList.memRecalcEnd(completed);
+        }
+        assertEquals(origTreeMem + size, mb.getTreeMemoryUsage());
+
+        close();
+    }
+
+    /**
+     * Scenario #4: IN is added during the iteration and is iterated
+     *  begin
+     *   add     -- add IN size, mark processed
+     *   iterate -- do not add size because IN was already processed
+     *  end
+     */
+    public void testMemBudgetReset4()
+        throws DatabaseException {
+
+        INList inList = envImpl.getInMemoryINs();
+        MemoryBudget mb = envImpl.getMemoryBudget();
+
+        IN newIn = new IN(dbImpl, null, 1, 1);
+        long size = newIn.getBudgetedMemorySize();
+
+        long origTreeMem = mb.getTreeMemoryUsage();
+        inList.memRecalcBegin();
+        boolean completed = false;
+        try {
+            inList.add(newIn);
+            for (IN in : inList) {
+                inList.memRecalcIterate(in);
+            }
+            completed = true;
+        } finally {
+            inList.memRecalcEnd(completed);
+        }
+        assertEquals(origTreeMem + size, mb.getTreeMemoryUsage());
+
+        close();
+    }
+
+    /**
+     * Scenario #5: IN is removed during the iteration but not iterated
+     *  begin
+     *   remove  -- do not add delta because IN is not yet processed
+     *  end
+     */
+    public void testMemBudgetReset5()
+        throws DatabaseException {
+
+        INList inList = envImpl.getInMemoryINs();
+        MemoryBudget mb = envImpl.getMemoryBudget();
+
+        IN oldIn = inList.iterator().next();
+        long size = oldIn.getBudgetedMemorySize();
+
+        long origTreeMem = mb.getTreeMemoryUsage();
+        inList.memRecalcBegin();
+        boolean completed = false;
+        try {
+            inList.remove(oldIn);
+            for (IN in : inList) {
+                inList.memRecalcIterate(in);
+            }
+            completed = true;
+        } finally {
+            inList.memRecalcEnd(completed);
+        }
+        assertEquals(origTreeMem - size, mb.getTreeMemoryUsage());
+
+        close();
+    }
+
+    /**
+     * Scenario #6: IN is removed during the iteration and is iterated
+     *  begin
+     *   iterate -- add total IN size, mark processed
+     *   remove  -- add delta because IN was already processed
+     *  end
+     */
+    public void testMemBudgetReset6()
+        throws DatabaseException {
+
+        INList inList = envImpl.getInMemoryINs();
+        MemoryBudget mb = envImpl.getMemoryBudget();
+
+        IN oldIn = inList.iterator().next();
+        long size = oldIn.getBudgetedMemorySize();
+
+        long origTreeMem = mb.getTreeMemoryUsage();
+        inList.memRecalcBegin();
+        boolean completed = false;
+        try {
+            for (IN in : inList) {
+                inList.memRecalcIterate(in);
+            }
+            inList.remove(oldIn);
+            completed = true;
+        } finally {
+            inList.memRecalcEnd(completed);
+        }
+        assertEquals(origTreeMem - size, mb.getTreeMemoryUsage());
+
+        close();
+    }
+}
diff --git a/test/com/sleepycat/je/dbi/MemoryBudgetTest.java b/test/com/sleepycat/je/dbi/MemoryBudgetTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..7e4996dfeaa7e5a885dbae43364d8bbcb041f3b9
--- /dev/null
+++ b/test/com/sleepycat/je/dbi/MemoryBudgetTest.java
@@ -0,0 +1,119 @@
+/*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: MemoryBudgetTest.java,v 1.19.2.2 2010/01/04 15:30:43 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ */
+public class MemoryBudgetTest extends TestCase {
+    private File envHome;
+
+    public MemoryBudgetTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+    }
+
+    public void testDefaults()
+        throws Exception {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        Environment env = new Environment(envHome, envConfig);
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+        MemoryBudget testBudget = envImpl.getMemoryBudget();
+
+	/*
+        System.out.println("max=    " + testBudget.getMaxMemory());
+        System.out.println("log=    " + testBudget.getLogBufferBudget());
+        System.out.println("thresh= " + testBudget.getEvictorCheckThreshold());
+	*/
+
+        assertTrue(testBudget.getMaxMemory() > 0);
+        assertTrue(testBudget.getLogBufferBudget() > 0);
+
+        assertTrue(testBudget.getMaxMemory() <=
+                   MemoryBudget.getRuntimeMaxMemory());
+
+        env.close();
+    }
+
+    /* Verify that the proportionally based setting works. */
+    public void testCacheSizing()
+        throws Exception {
+
+        long jvmMemory = MemoryBudget.getRuntimeMaxMemory();
+
+	/*
+	 * Runtime.maxMemory() may return Long.MAX_VALUE if there is no
+	 * inherent limit.
+	 */
+	if (jvmMemory == Long.MAX_VALUE) {
+	    jvmMemory = 1 << 26;
+	}
+
+        /* The default cache size ought to be percentage based. */
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        Environment env = new Environment(envHome, envConfig);
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+        long percentConfig = envImpl.getConfigManager().
+            getInt(EnvironmentParams.MAX_MEMORY_PERCENT);
+
+        EnvironmentConfig c = env.getConfig();
+        long expectedMem = (jvmMemory * percentConfig) / 100;
+        assertEquals(expectedMem, c.getCacheSize());
+        assertEquals(expectedMem, envImpl.getMemoryBudget().getMaxMemory());
+        env.close();
+
+        /* Try setting the percentage.*/
+        expectedMem = (jvmMemory * 30) / 100;
+        envConfig = TestUtils.initEnvConfig();
+        envConfig.setCachePercent(30);
+        env = new Environment(envHome, envConfig);
+        envImpl = DbInternal.envGetEnvironmentImpl(env);
+        c = env.getConfig();
+        assertEquals(expectedMem, c.getCacheSize());
+        assertEquals(expectedMem, envImpl.getMemoryBudget().getMaxMemory());
+        env.close();
+
+        /* Try overriding */
+        envConfig = TestUtils.initEnvConfig();
+        envConfig.setCacheSize(MemoryBudget.MIN_MAX_MEMORY_SIZE + 10);
+        env = new Environment(envHome, envConfig);
+        envImpl = DbInternal.envGetEnvironmentImpl(env);
+        c = env.getConfig();
+        assertEquals(MemoryBudget.MIN_MAX_MEMORY_SIZE + 10, c.getCacheSize());
+        assertEquals(MemoryBudget.MIN_MAX_MEMORY_SIZE + 10,
+		     envImpl.getMemoryBudget().getMaxMemory());
+        env.close();
+    }
+}
+
diff --git a/test/com/sleepycat/je/dbi/NullCursor.java b/test/com/sleepycat/je/dbi/NullCursor.java
new file mode 100644
index 0000000000000000000000000000000000000000..4e7a5cc5f8532f393f8e8e9e02b035538eec002f
--- /dev/null
+++ b/test/com/sleepycat/je/dbi/NullCursor.java
@@ -0,0 +1,31 @@
+/*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: NullCursor.java,v 1.17.2.2 2010/01/04 15:30:43 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.txn.Locker;
+
+/**
+ * A NullCursor is used as a no-op object by tree unit tests, which
+ * wish to speak directly to Tree methods.
+ */
+public class NullCursor extends CursorImpl {
+    /**
+     * Cursor constructor.
+     */
+    public NullCursor(DatabaseImpl database, Locker txn)
+        throws DatabaseException {
+
+        super(database, txn);
+    }
+
+    public void addCursor(BIN bin) {}
+    public void addCursor() {}
+}
+
diff --git a/test/com/sleepycat/je/dbi/SR12641.java b/test/com/sleepycat/je/dbi/SR12641.java
new file mode 100644
index 0000000000000000000000000000000000000000..b57cbfb25f94d2e883dcec4fa4a806b52bfae650
--- /dev/null
+++ b/test/com/sleepycat/je/dbi/SR12641.java
@@ -0,0 +1,197 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SR12641.java,v 1.9.2.2 2010/01/04 15:30:43 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.PrintStream;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.CursorConfig;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.junit.JUnitThread;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * This reproduces the bug described SR [#12641], also related to SR [#9543].
+ *
+ * Note that allthough this is a JUnit test case, it is not run as part of the
+ * JUnit test suite.  It takes a long time, and when it fails it hangs.
+ * Therefore, it was only used for debugging and is not intended to be a
+ * regression test.
+ *
+ * For some reason the bug was not reproducible with a simple main program,
+ * which is why a JUnit test was used.
+ */
+public class SR12641 extends TestCase {
+
+    /* Use small NODE_MAX to cause lots of splits. */
+    private static final int NODE_MAX = 6;
+
+    private File envHome;
+    private Environment env;
+    private Database db;
+    private boolean dups;
+    private boolean writerStopped;
+
+    public SR12641()
+        throws Exception {
+
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws Exception {
+
+        TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX);
+    }
+
+    public void tearDown()
+	throws Exception {
+
+        if (env != null) {
+            try {
+                env.close();
+            } catch (Exception e) {
+                System.err.println("TearDown: " + e);
+            }
+        }
+        env = null;
+        db = null;
+        TestUtils.removeFiles("TearDown", envHome, FileManager.JE_SUFFIX);
+    }
+
+    public void testSplitsWithScansDups()
+        throws Throwable {
+
+        dups = true;
+        testSplitsWithScans();
+    }
+
+    public void testSplitsWithScans()
+        throws Throwable {
+
+        open();
+
+        /* Cause splits in the last BIN. */
+        JUnitThread writer = new JUnitThread("writer") {
+            public void testBody() {
+                try {
+                    DatabaseEntry key = new DatabaseEntry(new byte[1]);
+                    DatabaseEntry data = new DatabaseEntry(new byte[1]);
+                    OperationStatus status;
+
+                    Cursor cursor = db.openCursor(null, null);
+
+                    for (int i = 0; i < 100000; i += 1) {
+                        IntegerBinding.intToEntry(i, dups ? data : key);
+                        if (dups) {
+                            status = cursor.putNoDupData(key, data);
+                        } else {
+                            status = cursor.putNoOverwrite(key, data);
+                        }
+                        assertEquals(OperationStatus.SUCCESS, status);
+
+                        if (i % 5000 == 0) {
+                            System.out.println("Iteration: " + i);
+                        }
+                    }
+
+                    cursor.close();
+                    writerStopped = true;
+
+                } catch (Exception e) {
+                    try {
+                        FileOutputStream os =
+                            new FileOutputStream(new File("./err.txt"));
+                        e.printStackTrace(new PrintStream(os));
+                        os.close();
+                    } catch (IOException ignored) {}
+                    System.exit(1);
+                }
+            }
+        };
+
+        /* Move repeatedly from the last BIN to the prior BIN. */
+        JUnitThread reader = new JUnitThread("reader") {
+            public void testBody() {
+                try {
+                    DatabaseEntry key = new DatabaseEntry();
+                    DatabaseEntry data = new DatabaseEntry();
+
+                    CursorConfig cursorConfig = new CursorConfig();
+                    cursorConfig.setReadUncommitted(true);
+                    Cursor cursor = db.openCursor(null, cursorConfig);
+
+                    while (!writerStopped) {
+                        cursor.getLast(key, data, null);
+                        for (int i = 0; i <= NODE_MAX; i += 1) {
+                            cursor.getPrev(key, data, null);
+                        }
+                    }
+
+                    cursor.close();
+
+                } catch (Exception e) {
+                    try {
+                        FileOutputStream os =
+                            new FileOutputStream(new File("./err.txt"));
+                        e.printStackTrace(new PrintStream(os));
+                        os.close();
+                    } catch (IOException ignored) {}
+                    System.exit(1);
+                }
+            }
+        };
+
+        writer.start();
+        reader.start();
+        writer.finishTest();
+        reader.finishTest();
+
+        close();
+        System.out.println("SUCCESS");
+    }
+
+    private void open()
+        throws Exception {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setConfigParam
+            (EnvironmentParams.NODE_MAX.getName(), String.valueOf(NODE_MAX));
+        envConfig.setAllowCreate(true);
+        env = new Environment(envHome, envConfig);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setExclusiveCreate(true);
+        dbConfig.setSortedDuplicates(dups);
+        db = env.openDatabase(null, "testDb", dbConfig);
+    }
+
+    private void close()
+        throws Exception {
+
+        db.close();
+        db = null;
+        env.close();
+        env = null;
+    }
+}
diff --git a/test/com/sleepycat/je/dbi/SortedLSNTreeWalkerTest.java b/test/com/sleepycat/je/dbi/SortedLSNTreeWalkerTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..49b79139101f58d5d10ec5ad49349a00fda9dd13
--- /dev/null
+++ b/test/com/sleepycat/je/dbi/SortedLSNTreeWalkerTest.java
@@ -0,0 +1,461 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SortedLSNTreeWalkerTest.java,v 1.17.2.2 2010/01/04 15:30:43 cwl Exp $
+ */
+
+package com.sleepycat.je.dbi;
+
+import java.io.File;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.BtreeStats;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.SortedLSNTreeWalker.TreeNodeProcessor;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.tree.LN;
+import com.sleepycat.je.tree.Node;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.DbLsn;
+
+public class SortedLSNTreeWalkerTest extends TestCase {
+    private static boolean DEBUG = false;
+
+    /* Use small NODE_MAX to cause lots of splits. */
+    private static final int NODE_MAX = 6;
+    private static final int N_RECS = 30;
+
+    private File envHome;
+    private Environment env;
+    private Database db;
+
+    public SortedLSNTreeWalkerTest()
+        throws Exception {
+
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws Exception {
+
+        TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX);
+    }
+
+    public void tearDown()
+	throws Exception {
+
+        if (env != null) {
+            try {
+                env.close();
+            } catch (Exception e) {
+                System.err.println("TearDown: " + e);
+            }
+        }
+        env = null;
+        TestUtils.removeFiles("TearDown", envHome, FileManager.JE_SUFFIX);
+    }
+
+    public void testSortedLSNTreeWalkerNoDupsReadingINList()
+        throws Throwable {
+
+	open(false);
+	writeData(false);
+	BtreeStats stats = (BtreeStats) db.getStats(null);
+	if (DEBUG) {
+	    System.out.println("***************");
+	    DbInternal.dbGetDatabaseImpl(db).getTree().dump();
+	}
+	close();
+	if (DEBUG) {
+	    System.out.println("***************");
+	}
+	open(false);
+	readData();
+	if (DEBUG) {
+	    DbInternal.dbGetDatabaseImpl(db).getTree().dump();
+	    System.out.println("***************");
+	}
+	DatabaseImpl dbImpl = DbInternal.dbGetDatabaseImpl(db);
+	db.close();
+	db = null;
+	assertEquals(N_RECS, walkTree(dbImpl, false, stats, true));
+	close();
+    }
+
+    public void testSortedLSNTreeWalkerNoDupsLoadLNs()
+        throws Throwable {
+
+	doTestSortedLSNTreeWalkerNoDups(true);
+    }
+
+    public void testSortedLSNTreeWalkerNoDupsNoLoadLNs()
+        throws Throwable {
+
+	doTestSortedLSNTreeWalkerNoDups(false);
+    }
+
+    private void doTestSortedLSNTreeWalkerNoDups(boolean loadLNs)
+	throws Throwable {
+
+	open(false);
+	writeData(false);
+	if (DEBUG) {
+	    System.out.println("***************");
+	    DbInternal.dbGetDatabaseImpl(db).getTree().dump();
+	}
+	BtreeStats stats = (BtreeStats) db.getStats(null);
+	close();
+	if (DEBUG) {
+	    System.out.println("***************");
+	}
+	open(false);
+	readData();
+	if (DEBUG) {
+	    DbInternal.dbGetDatabaseImpl(db).getTree().dump();
+	    System.out.println("***************");
+	}
+	DatabaseImpl dbImpl = DbInternal.dbGetDatabaseImpl(db);
+        db.close();
+        db = null;
+	assertEquals(N_RECS, walkTree(dbImpl, false, stats, loadLNs));
+	close();
+    }
+
+    public void testSortedLSNTreeWalkerNoDupsDupsAllowed()
+        throws Throwable {
+
+	open(true);
+	writeData(false);
+	if (DEBUG) {
+	    System.out.println("***************");
+	    DbInternal.dbGetDatabaseImpl(db).getTree().dump();
+	}
+	BtreeStats stats = (BtreeStats) db.getStats(null);
+	close();
+	if (DEBUG) {
+	    System.out.println("***************");
+	}
+	open(true);
+	if (DEBUG) {
+	    DbInternal.dbGetDatabaseImpl(db).getTree().dump();
+	    System.out.println("***************");
+	}
+	DatabaseImpl dbImpl = DbInternal.dbGetDatabaseImpl(db);
+        db.close();
+        db = null;
+	assertEquals(N_RECS, walkTree(dbImpl, false, stats, true));
+	close();
+    }
+
+    public void testSortedLSNTreeWalkerDups()
+        throws Throwable {
+
+	doTestSortedLSNTreeWalkerDups(true);
+    }
+
+    public void testSortedLSNTreeWalkerDupsNoLoadLNs()
+        throws Throwable {
+
+	doTestSortedLSNTreeWalkerDups(false);
+    }
+
+    private void doTestSortedLSNTreeWalkerDups(boolean loadLNs)
+	throws Throwable {
+
+	open(true);
+	writeData(true);
+	BtreeStats stats = (BtreeStats) db.getStats(null);
+	close();
+	open(true);
+	DatabaseImpl dbImpl = DbInternal.dbGetDatabaseImpl(db);
+        db.close();
+        db = null;
+	assertEquals(N_RECS * 2, walkTree(dbImpl, true, stats, loadLNs));
+	close();
+    }
+
+    public void testSortedLSNTreeWalkerDupsReadingINList()
+        throws Throwable {
+
+	open(true);
+	writeData(true);
+	BtreeStats stats = (BtreeStats) db.getStats(null);
+	close();
+	open(true);
+	readData();
+	DatabaseImpl dbImpl = DbInternal.dbGetDatabaseImpl(db);
+	db.close();
+	db = null;
+	assertEquals(N_RECS * 2, walkTree(dbImpl, false, stats, true));
+	close();
+    }
+
+    public void testSortedLSNTreeWalkerPendingDeleted()
+        throws Throwable {
+
+	open(true);
+	int numRecs = writeDataWithDeletes();
+	BtreeStats stats = (BtreeStats) db.getStats(null);
+	close();
+	open(true);
+	readData();
+	DatabaseImpl dbImpl = DbInternal.dbGetDatabaseImpl(db);
+	db.close();
+	db = null;
+	assertEquals(numRecs, walkTree(dbImpl, false, stats, true));
+	close();
+    }
+
+    private void open(boolean allowDuplicates)
+        throws Exception {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setConfigParam
+            (EnvironmentParams.NODE_MAX.getName(), String.valueOf(NODE_MAX));
+	/*
+        envConfig.setConfigParam
+            (EnvironmentParams.MAX_MEMORY.getName(), "10000000");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+	*/
+
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false");
+
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false");
+
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+        env = new Environment(envHome, envConfig);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setExclusiveCreate(false);
+        dbConfig.setTransactional(true);
+        dbConfig.setSortedDuplicates(allowDuplicates);
+        db = env.openDatabase(null, "testDb", dbConfig);
+    }
+
+    private void writeData(boolean dups)
+	throws DatabaseException {
+
+	DatabaseEntry key = new DatabaseEntry();
+	DatabaseEntry data = new DatabaseEntry();
+	for (int i = 0; i < N_RECS; i++) {
+	    IntegerBinding.intToEntry(i, key);
+	    data.setData(new byte[1000]);
+	    assertEquals(db.put(null, key, data),
+			 OperationStatus.SUCCESS);
+	    if (dups) {
+		IntegerBinding.intToEntry(i + N_RECS + N_RECS, data);
+		assertEquals(db.put(null, key, data),
+			     OperationStatus.SUCCESS);
+	    }
+	}
+    }
+
+    private int writeDataWithDeletes()
+	throws DatabaseException {
+
+	DatabaseEntry key = new DatabaseEntry();
+	DatabaseEntry data = new DatabaseEntry();
+        int numInserted = 0;
+
+        data.setData(new byte[10]);
+
+	for (int i = 0; i < N_RECS; i++) {
+	    IntegerBinding.intToEntry(i, key);
+            Transaction txn = env.beginTransaction(null, null);
+	    assertEquals(db.put(txn, key, data),
+			 OperationStatus.SUCCESS);
+            boolean deleted = false;
+            if ((i%2) ==0) {
+                assertEquals(db.delete(txn, key),
+                             OperationStatus.SUCCESS);
+                deleted = true;
+            }
+            if ((i%3)== 0){
+                txn.abort();
+            } else {
+                txn.commit();
+                if (!deleted) {
+                    numInserted++;
+                }
+            }
+	}
+        return numInserted;
+    }
+
+    private void readData()
+	throws DatabaseException {
+
+	DatabaseEntry key = new DatabaseEntry();
+	DatabaseEntry data = new DatabaseEntry();
+	IntegerBinding.intToEntry(N_RECS - 1, key);
+	assertEquals(db.get(null, key, data, LockMode.DEFAULT),
+		     OperationStatus.SUCCESS);
+    }
+
+    private void scanData()
+	throws DatabaseException {
+
+	DatabaseEntry key = new DatabaseEntry();
+	DatabaseEntry data = new DatabaseEntry();
+	Cursor cursor = db.openCursor(null, null);
+	while (cursor.getNext(key, data, LockMode.DEFAULT) ==
+	       OperationStatus.SUCCESS) {
+	}
+	cursor.close();
+    }
+
+    /* Return the number of keys seen in all BINs. */
+    private int walkTree(DatabaseImpl dbImpl,
+			 boolean dups,
+			 BtreeStats stats,
+			 final boolean loadLNNodes)
+	throws DatabaseException {
+
+	TestingTreeNodeProcessor tnp = new TestingTreeNodeProcessor() {
+		public void processLSN(long childLSN,
+				       LogEntryType childType,
+				       Node node,
+                                       byte[] lnKey)
+		    throws DatabaseException {
+
+		    if (DEBUG) {
+			System.out.println
+			    (childType + " " + DbLsn.toString(childLSN));
+		    }
+
+		    if (childType.equals(LogEntryType.LOG_DBIN)) {
+			dbinCount++;
+                        assertNull(lnKey);
+                        assertNotNull(node);
+		    } else if (childType.equals(LogEntryType.LOG_BIN)) {
+			binCount++;
+                        assertNull(lnKey);
+                        assertNotNull(node);
+		    } else if (childType.equals(LogEntryType.LOG_DIN)) {
+			dinCount++;
+                        assertNull(lnKey);
+                        assertNotNull(node);
+		    } else if (childType.equals(LogEntryType.LOG_IN)) {
+			inCount++;
+                        assertNull(lnKey);
+                        assertNotNull(node);
+		    } else if (childType.equals(LogEntryType.LOG_LN)) {
+			entryCount++;
+                        assertNotNull(lnKey);
+                        if (loadLNNodes) {
+                            assertNotNull(node);
+                        }
+		    } else if (childType.equals(LogEntryType.LOG_DUPCOUNTLN)) {
+			dupLNCount++;
+                        assertNotNull(lnKey);
+                        assertNotNull(node);
+		    } else {
+			throw new RuntimeException
+			    ("unknown entry type: " + childType);
+		    }
+		}
+
+		public void processDupCount(long ignore) {
+		}
+	    };
+
+	SortedLSNTreeWalker walker =
+	    new SortedLSNTreeWalker(dbImpl, false,
+                                    dbImpl.getTree().getRootLsn(), tnp,
+                                    null,  /* savedExceptions */
+				    null);
+
+	walker.accumulateLNs = loadLNNodes;
+
+	walker.walk();
+
+	if (DEBUG) {
+	    System.out.println(stats);
+	}
+
+	/* Add one since the root LSN is not passed to the walker. */
+	assertEquals(stats.getInternalNodeCount(), tnp.inCount + 1);
+	assertEquals(stats.getBottomInternalNodeCount(), tnp.binCount);
+	assertEquals(stats.getDuplicateInternalNodeCount(), tnp.dinCount);
+	assertEquals(stats.getDuplicateBottomInternalNodeCount(),
+		     tnp.dbinCount);
+	assertEquals(stats.getLeafNodeCount(), tnp.entryCount);
+	assertEquals(stats.getDupCountLeafNodeCount(), tnp.dupLNCount);
+	if (DEBUG) {
+	    System.out.println("INs: " + tnp.inCount);
+	    System.out.println("BINs: " + tnp.binCount);
+	    System.out.println("DINs: " + tnp.dinCount);
+	    System.out.println("DBINs: " + tnp.dbinCount);
+	    System.out.println("entries: " + tnp.entryCount);
+	    System.out.println("dupLN: " + tnp.dupLNCount);
+	}
+
+	return tnp.entryCount;
+    }
+
+    private static class TestingTreeNodeProcessor
+	implements TreeNodeProcessor {
+
+	int binCount = 0;
+	int dbinCount = 0;
+	int dinCount = 0;
+	int inCount = 0;
+	int entryCount = 0;
+        int dupLNCount = 0;
+
+	public void processLSN(long childLSN,
+			       LogEntryType childType,
+			       Node ignore,
+                               byte[] ignore2)
+	    throws DatabaseException {
+
+	    throw new RuntimeException("override me please");
+	}
+	
+        public void processDirtyDeletedLN(long childLsn, LN ln, byte[] lnKey)
+	    throws DatabaseException {
+            /* Do nothing. */
+        }
+
+	public void processDupCount(int ignore) {
+	    throw new RuntimeException("override me please");
+	}
+    }
+
+    private void close()
+        throws Exception {
+
+        if (db != null) {
+            db.close();
+            db = null;
+        }
+
+        if (env != null) {
+            env.close();
+            env = null;
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/evictor/EvictActionTest.java b/test/com/sleepycat/je/evictor/EvictActionTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..93e2cd01c7ab789b7049dc1f21a39828572e5729
--- /dev/null
+++ b/test/com/sleepycat/je/evictor/EvictActionTest.java
@@ -0,0 +1,901 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EvictActionTest.java,v 1.35.2.3 2010/01/04 15:30:43 cwl Exp $
+ */
+
+package com.sleepycat.je.evictor;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.EnvironmentMutableConfig;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.DbTree;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.junit.JUnitThread;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.txn.Txn;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * This tests exercises the act of eviction and determines whether the
+ * expected nodes have been evicted properly.
+ */
+public class EvictActionTest extends TestCase {
+
+    private static final boolean DEBUG = false;
+    private static final int NUM_KEYS = 60;
+    private static final int NUM_DUPS = 30;
+    private static final int BIG_CACHE_SIZE = 500000;
+    private static final int SMALL_CACHE_SIZE = (int)
+	MemoryBudget.MIN_MAX_MEMORY_SIZE;
+
+    private File envHome = null;
+    private Environment env = null;
+    private Database db = null;
+    private int actualLNs = 0;
+    private int actualINs = 0;
+
+    public EvictActionTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException {
+
+	IN.ACCUMULATED_LIMIT = 0;
+	Txn.ACCUMULATED_LIMIT = 0;
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        if (env != null) {
+            try {
+                env.close();
+            } catch (Throwable e) {
+                System.out.println("tearDown: " + e);
+            }
+        }
+
+        try {
+            TestUtils.removeLogFiles("TearDown", envHome, false);
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        envHome = null;
+        env = null;
+        db = null;
+    }
+
+    public void testEvict()
+        throws Throwable {
+
+        doEvict(50, SMALL_CACHE_SIZE, true);
+    }
+
+    public void testNoNeedToEvict()
+        throws Throwable {
+
+        doEvict(80, BIG_CACHE_SIZE, false);
+    }
+
+    /**
+     * Evict in very controlled circumstances. Check that we first strip
+     * BINs and later evict BINS.
+     */
+    private void doEvict(int floor,
+                         int maxMem,
+                         boolean shouldEvict)
+        throws Throwable {
+
+        openEnv(floor, maxMem);
+        insertData(NUM_KEYS);
+
+        /* Evict once after insert. */
+        evictAndCheck(shouldEvict, NUM_KEYS);
+
+        /* Evict again after verification. */
+        evictAndCheck(shouldEvict, NUM_KEYS);
+
+        closeEnv();
+    }
+
+    public void testSetCacheSize()
+        throws DatabaseException {
+
+        /* Start with large cache size. */
+        openEnv(80, BIG_CACHE_SIZE);
+        EnvironmentMutableConfig config = env.getMutableConfig();
+        insertData(NUM_KEYS);
+
+        /* No need to evict. */
+        verifyData(NUM_KEYS);
+        evictAndCheck(false, NUM_KEYS);
+
+        /* Set small cache size. */
+        config.setCacheSize(SMALL_CACHE_SIZE);
+        env.setMutableConfig(config);
+
+        /* Expect eviction. */
+        verifyData(NUM_KEYS);
+        evictAndCheck(true, NUM_KEYS);
+
+        /* Set large cache size. */
+        config.setCacheSize(BIG_CACHE_SIZE);
+        env.setMutableConfig(config);
+
+        /* Expect no eviction. */
+        verifyData(NUM_KEYS);
+        evictAndCheck(false, NUM_KEYS);
+
+        closeEnv();
+    }
+
+    public void testSetCachePercent()
+        throws DatabaseException {
+
+        int nKeys = NUM_KEYS * 500;
+
+        /* Start with large cache size. */
+        openEnv(80, BIG_CACHE_SIZE);
+        EnvironmentMutableConfig config = env.getMutableConfig();
+        config.setCacheSize(0);
+        config.setCachePercent(90);
+        env.setMutableConfig(config);
+        insertData(nKeys);
+
+        /* No need to evict. */
+        verifyData(nKeys);
+        evictAndCheck(false, nKeys);
+
+        /* Set small cache percent. */
+        config.setCacheSize(0);
+        config.setCachePercent(1);
+        env.setMutableConfig(config);
+
+        /* Expect eviction. */
+        verifyData(nKeys);
+        evictAndCheck(true, nKeys);
+
+        /* Set large cache percent. */
+        config.setCacheSize(0);
+        config.setCachePercent(90);
+        env.setMutableConfig(config);
+
+        /* Expect no eviction. */
+        verifyData(nKeys);
+        evictAndCheck(false, nKeys);
+
+        closeEnv();
+    }
+
+    public void testThreadedCacheSizeChanges()
+        throws DatabaseException {
+
+        final int N_ITERS = 10;
+        openEnv(80, BIG_CACHE_SIZE);
+        insertData(NUM_KEYS);
+
+        JUnitThread writer = new JUnitThread("Writer") {
+            public void testBody()
+                throws DatabaseException {
+                for (int i = 0; i < N_ITERS; i += 1) {
+                    env.evictMemory();
+                    /* insertData will update if data exists. */
+                    insertData(NUM_KEYS);
+                    env.evictMemory();
+                    EnvironmentMutableConfig config = env.getMutableConfig();
+                    config.setCacheSize(SMALL_CACHE_SIZE);
+                    env.setMutableConfig(config);
+                }
+            }
+        };
+
+        JUnitThread reader = new JUnitThread("Reader") {
+            public void testBody()
+                throws DatabaseException {
+                for (int i = 0; i < N_ITERS; i += 1) {
+                    env.evictMemory();
+                    verifyData(NUM_KEYS);
+                    env.evictMemory();
+                    EnvironmentMutableConfig config = env.getMutableConfig();
+                    config.setCacheSize(BIG_CACHE_SIZE);
+                    env.setMutableConfig(config);
+                }
+            }
+        };
+
+        writer.start();
+        reader.start();
+
+        try {
+            writer.finishTest();
+        } catch (Throwable e) {
+            try {
+                reader.finishTest();
+            } catch (Throwable ignore) { }
+            e.printStackTrace();
+            fail(e.toString());
+        }
+
+        try {
+            reader.finishTest();
+        } catch (Throwable e) {
+            e.printStackTrace();
+            fail(e.toString());
+        }
+
+        closeEnv();
+    }
+
+    public void testSmallCacheSettings()
+        throws DatabaseException {
+
+        /*
+         * With a cache size > 600 KB, the min tree usage should be the default
+         * value.
+         */
+        openEnv(0, 1200 * 1024);
+        EnvironmentMutableConfig config = env.getMutableConfig();
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+        MemoryBudget mb = envImpl.getMemoryBudget();
+        assertEquals(500 * 1024, mb.getMinTreeMemoryUsage());
+
+        /*
+         * With a cache size > 1000 KB, evict bytes may be > 500 KB but we
+         * should not evict over half the cache size.
+         */
+        putLargeData(1200, 1024);
+        env.evictMemory();
+        EnvironmentStats stats = env.getStats(null);
+        assertTrue(stats.getRequiredEvictBytes() > 500 * 1024);
+        assertTrue(stats.getCacheTotalBytes() >= 1200 * 1024 / 2);
+
+        /*
+         * With a cache size of 500 KB, the min tree usage should be the amount
+         * available in the cache after the buffer bytes are subtracted.
+         */
+        config.setCacheSize(500 * 1024);
+        env.setMutableConfig(config);
+        stats = env.getStats(null);
+        assertEquals(500 * 1024 - stats.getBufferBytes(),
+                     mb.getMinTreeMemoryUsage());
+
+        /*
+         * With a cache size of 500 KB, evict bytes may be < 500 KB but we
+         * should not evict over half the cache size.
+         */
+        putLargeData(500, 1024);
+        env.evictMemory();
+        stats = env.getStats(null);
+        assertTrue(stats.getCacheTotalBytes() >= 500 * 1024 / 2);
+
+        /*
+         * Even when using a large amount of non-tree memory, the tree memory
+         * usage should not go below the minimum.
+         */
+        mb.updateAdminMemoryUsage(500 * 1024);
+        env.evictMemory();
+        stats = env.getStats(null);
+        long treeBytes = stats.getDataBytes()  +
+                         50 * 1024 /* larger than any LN or IN */;
+        assertTrue(treeBytes >= mb.getMinTreeMemoryUsage());
+        mb.updateAdminMemoryUsage(-(500 * 1024));
+
+        /* Allow changing the min tree usage explicitly. */
+        config.setCacheSize(500 * 1024);
+        config.setConfigParam("je.tree.minMemory", String.valueOf(50 * 1024));
+        env.setMutableConfig(config);
+        assertEquals(50 * 1024, mb.getMinTreeMemoryUsage());
+
+        /* The min tree usage may not be larger than the cache. */
+        config.setCacheSize(500 * 1024);
+        config.setConfigParam("je.tree.minMemory", String.valueOf(900 * 1024));
+        env.setMutableConfig(config);
+        stats = env.getStats(null);
+        assertEquals(500 * 1024 - stats.getBufferBytes(),
+                     mb.getMinTreeMemoryUsage());
+
+        closeEnv();
+    }
+
+    /**
+     * We now allow eviction of the root IN of a DB, whether the DB is closed
+     * or not.  Check that basic root eviction works.  [#13415]
+     */
+    public void testRootINEviction()
+        throws DatabaseException {
+
+        DatabaseEntry entry = new DatabaseEntry(new byte[1]);
+        OperationStatus status;
+
+        openEnv(80, SMALL_CACHE_SIZE);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        Database db1 = env.openDatabase(null, "db1", dbConfig);
+
+        /* Root starts out null. */
+        assertTrue(!isRootResident(db1));
+        /* It is created when we insert the first record. */
+        status = db1.put(null, entry, entry);
+        assertSame(OperationStatus.SUCCESS, status);
+        assertTrue(isRootResident(db1));
+        /* It is evicted when necessary. */
+        forceEviction();
+        assertTrue(!isRootResident(db1));
+        /* And fetched again when needed. */
+        status = db1.get(null, entry, entry, null);
+        assertSame(OperationStatus.SUCCESS, status);
+        assertTrue(isRootResident(db1));
+
+        /* Deferred write DBs have special rules. */
+        dbConfig.setDeferredWrite(true);
+        Database db2 = env.openDatabase(null, "db2", dbConfig);
+        status = db2.put(null, entry, entry);
+        assertSame(OperationStatus.SUCCESS, status);
+        assertTrue(isRootResident(db2));
+        /* Root eviction is disallowed if the root is dirty. */
+        forceEviction();
+        assertTrue(isRootResident(db2));
+        db2.sync();
+        forceEviction();
+        assertTrue(!isRootResident(db2));
+
+        db2.close();
+        db1.close();
+        closeEnv();
+    }
+
+    /**
+     * We now allow eviction of the MapLN and higher level INs in the DB mappng
+     * tree when DBs are closed.  Check that basic mapping tree IN eviction
+     * works.  [#13415]
+     */
+    public void testMappingTreeEviction()
+        throws DatabaseException {
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+
+        DatabaseEntry entry = new DatabaseEntry(new byte[1]);
+        OperationStatus status;
+
+        openEnv(80, SMALL_CACHE_SIZE);
+
+        /* Baseline mappng tree LNs and INs. */
+        final int baseLNs = 2; // Utilization DB and test DB
+        final int baseINs = 2; // Root IN and BIN
+        checkMappingTree(baseLNs, baseINs);
+        forceEviction();
+        checkMappingTree(baseLNs, baseINs);
+
+        /*
+         * Create enough DBs to fill up a BIN in the mapping DB.  NODE_MAX is
+         * configured to be 4 in this test.  There are already 2 DBs open.
+         */
+        final int nDbs = 4;
+        Database[] dbs = new Database[nDbs];
+        for (int i = 0; i < nDbs; i += 1) {
+            dbs[i] = env.openDatabase(null, "db" + i, dbConfig);
+            status = dbs[i].put(null, entry, entry);
+            assertSame(OperationStatus.SUCCESS, status);
+            assertTrue(isRootResident(dbs[i]));
+        }
+        final int openLNs = baseLNs + nDbs; // Add 1 MapLN per open DB
+        final int openINs = baseINs + 1;    // Add 1 BIN in the mapping tree
+        checkMappingTree(openLNs, openINs);
+        forceEviction();
+        checkMappingTree(openLNs, openINs);
+
+        /* Close DBs and force eviction. */
+        for (int i = 0; i < nDbs; i += 1) {
+            dbs[i].close();
+        }
+        forceEviction();
+        checkMappingTree(baseLNs, baseINs);
+
+        /* Re-open the DBs, opening each DB twice. */
+        Database[] dbs2 = new Database[nDbs];
+        for (int i = 0; i < nDbs; i += 1) {
+            dbs[i] = env.openDatabase(null, "db" + i, dbConfig);
+            dbs2[i] = env.openDatabase(null, "db" + i, dbConfig);
+        }
+        checkMappingTree(openLNs, openINs);
+        forceEviction();
+        checkMappingTree(openLNs, openINs);
+
+        /* Close one handle only, MapLN eviction should not occur. */
+        for (int i = 0; i < nDbs; i += 1) {
+            dbs[i].close();
+        }
+        forceEviction();
+        checkMappingTree(openLNs, openINs);
+
+        /* Close the other handles, eviction should occur. */
+        for (int i = 0; i < nDbs; i += 1) {
+            dbs2[i].close();
+        }
+        forceEviction();
+        checkMappingTree(baseLNs, baseINs);
+
+        closeEnv();
+    }
+
+    /**
+     * Checks that a dirty root IN is not evicted in a read-only environment.
+     * [#16368]
+     */
+    public void testReadOnlyRootINEviction()
+        throws DatabaseException {
+
+        OperationStatus status;
+
+        openEnv(80, SMALL_CACHE_SIZE);
+
+        /* Big record will be used to force eviction. */
+        DatabaseEntry bigRecordKey = new DatabaseEntry(new byte[1]);
+        status = db.put(null, bigRecordKey,
+                        new DatabaseEntry(new byte[BIG_CACHE_SIZE]));
+        assertSame(OperationStatus.SUCCESS, status);
+
+        /* Open DB1 and insert a record to create the root IN. */
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        Database db1 = env.openDatabase(null, "db1", dbConfig);
+
+        DatabaseEntry smallRecordKey = new DatabaseEntry(new byte[1]);
+        DatabaseEntry smallData = new DatabaseEntry(new byte[1]);
+        status = db1.put(null, smallRecordKey, smallData);
+        assertSame(OperationStatus.SUCCESS, status);
+
+        /* Close environment and re-open it read-only. */
+        db1.close();
+        closeEnv();
+
+        EnvironmentConfig envConfig = 
+            getEnvConfig(80, SMALL_CACHE_SIZE, true /*readOnly*/);
+        envConfig.setConfigParam
+            (EnvironmentParams.EVICTOR_NODES_PER_SCAN.getName(), "1");
+        openEnv(envConfig);
+
+        dbConfig.setReadOnly(true);
+        dbConfig.setAllowCreate(false);
+        db1 = env.openDatabase(null, "db1", dbConfig);
+
+        /* Load a record to load the root IN. */
+        status = db1.get(null, smallRecordKey, new DatabaseEntry(), null);
+        assertSame(OperationStatus.SUCCESS, status);
+        assertTrue(isRootResident(db1));
+
+        /*
+         * Set the root dirty to prevent eviction.  In real life, this can only
+         * be done by recovery in a read-only environment, but that's very
+         * difficult to simulate precisely.
+         */
+        IN rootIN = DbInternal.dbGetDatabaseImpl(db1).
+                               getTree().
+                               getRootIN(CacheMode.DEFAULT);
+        rootIN.setDirty(true);
+        rootIN.releaseLatch();
+
+        /* Root should not be evicted while dirty. */
+        forceReadOnlyEviction(bigRecordKey);
+        assertTrue(isRootResident(db1));
+        forceReadOnlyEviction(bigRecordKey);
+        assertTrue(isRootResident(db1));
+
+        /* When made non-dirty, it can be evicted. */
+        rootIN.setDirty(false);
+        forceReadOnlyEviction(bigRecordKey);
+        assertTrue(!isRootResident(db1));
+
+        db1.close();
+        closeEnv();
+    }
+
+    /**
+     * Check that opening a database in a transaction and then aborting the
+     * transaction will decrement the database use count.  [#13415]
+     */
+    public void testAbortOpen()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setTransactional(true);
+        envConfig.setConfigParam(EnvironmentParams.
+                                 ENV_DB_EVICTION.getName(), "true");
+        env = new Environment(envHome, envConfig);
+
+        /* Abort the txn used to open a database. */
+        Transaction txn = env.beginTransaction(null, null);
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setTransactional(true);
+        Database db1 = env.openDatabase(txn, "db1", dbConfig);
+        DatabaseImpl saveDb = DbInternal.dbGetDatabaseImpl(db1);
+        txn.abort();
+
+        /* DB should not be in use and does not have to be closed. */
+        assertEquals(false, saveDb.isInUse());
+
+        /*
+         * Environment.close will not throw an exception, even though the DB
+         * has not been closed.  The abort took care of cleaning up the handle.
+         */
+        closeEnv();
+
+        /*
+         * Try a non-transactional DB open that throws an exception because we
+         * create it exclusively and it already exists.  The use count should
+         * be decremented.
+         */
+        env = new Environment(envHome, envConfig);
+        dbConfig.setAllowCreate(true);
+        dbConfig.setExclusiveCreate(true);
+        dbConfig.setTransactional(false);
+        db1 = env.openDatabase(null, "db1", dbConfig);
+        saveDb = DbInternal.dbGetDatabaseImpl(db1);
+        try {
+            env.openDatabase(null, "db1", dbConfig);
+            fail();
+        } catch (DatabaseException e) {
+            assertTrue(e.getMessage().indexOf("already exists") >= 0);
+        }
+        db1.close();
+        assertEquals(false, saveDb.isInUse());
+
+        /*
+         * Try a non-transactional DB open that throws an exception because we
+         * change the duplicatesAllowed setting.  The use count should be
+         * decremented.
+         */
+        dbConfig.setSortedDuplicates(true);
+        dbConfig.setExclusiveCreate(false);
+        try {
+            env.openDatabase(null, "db1", dbConfig);
+            fail();
+        } catch (IllegalArgumentException e) {
+            assertTrue(e.getMessage().indexOf("sortedDuplicates") >= 0);
+        }
+        assertEquals(false, saveDb.isInUse());
+
+        closeEnv();
+    }
+
+    /**
+     * Check for the expected number of nodes in the mapping DB.
+     */
+    private void checkMappingTree(int expectLNs, int expectINs)
+        throws DatabaseException {
+
+        IN root = DbInternal.envGetEnvironmentImpl(env).
+            getDbTree().getDb(DbTree.ID_DB_ID).getTree().
+            getRootIN(CacheMode.UNCHANGED);
+        actualLNs = 0;
+        actualINs = 0;
+        countMappingTree(root);
+        root.releaseLatch();
+        assertEquals("LNs", expectLNs, actualLNs);
+        assertEquals("INs", expectINs, actualINs);
+    }
+
+    private void countMappingTree(IN parent) {
+        actualINs += 1;
+        for (int i = 0; i < parent.getNEntries(); i += 1) {
+            if (parent.getTarget(i) != null) {
+                if (parent.getTarget(i) instanceof IN) {
+                    countMappingTree((IN) parent.getTarget(i));
+                } else {
+                    actualLNs += 1;
+                }
+            }
+        }
+    }
+
+    /**
+     * Returns whether the root IN is currently resident for the given DB.
+     */
+    private boolean isRootResident(Database dbParam) {
+        return DbInternal.dbGetDatabaseImpl(dbParam)
+                         .getTree()
+                         .isRootResident();
+    }
+
+    /**
+     * Force eviction by inserting a large record in the pre-opened DB.
+     */
+    private void forceEviction()
+        throws DatabaseException {
+
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+        OperationStatus status;
+
+        /*
+         * Repeat twice to cause a 2nd pass over the INList.  The second pass
+         * evicts BINs that were only stripped of LNs in the first pass.
+         */
+        for (int i = 0; i < 2; i += 1) {
+            Cursor c = db.openCursor(null, null);
+            status = c.put(new DatabaseEntry(new byte[1]),
+                           new DatabaseEntry(new byte[BIG_CACHE_SIZE]));
+            assertSame(OperationStatus.SUCCESS, status);
+
+            /*
+             * Evict while cursor pins LN memory, to ensure eviction of other
+             * DB INs, including the DB root.  When lruOnly=false, root IN
+             * eviction may not occur unless a cursor is used to pin the LN.
+             */
+            env.evictMemory();
+
+            status = c.delete();
+            assertSame(OperationStatus.SUCCESS, status);
+
+            c.close();
+        }
+
+        TestUtils.validateNodeMemUsage(envImpl, true);
+    }
+
+    /**
+     * Force eviction by reading a large record.
+     */
+    private void forceReadOnlyEviction(DatabaseEntry key)
+        throws DatabaseException {
+
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+        OperationStatus status;
+
+        /*
+         * Repeat twice to cause a 2nd pass over the INList.  The second pass
+         * evicts BINs that were only stripped of LNs in the first pass.
+         */
+        for (int i = 0; i < 2; i += 1) {
+            Cursor c = db.openCursor(null, null);
+            status = c.getSearchKey(key, new DatabaseEntry(), null);
+            assertSame(OperationStatus.SUCCESS, status);
+
+            /*
+             * Evict while cursor pins LN memory, to ensure eviction of other
+             * DB INs, including the DB root.  When lruOnly=false, root IN
+             * eviction may not occur unless a cursor is used to pin the LN.
+             */
+            env.evictMemory();
+
+            c.close();
+        }
+
+        TestUtils.validateNodeMemUsage(envImpl, true);
+    }
+
+    /**
+     * Open an environment and database.
+     */
+    private void openEnv(int floor,
+                         int maxMem)
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig =
+            getEnvConfig(floor, maxMem, false /*readonly*/);
+        openEnv(envConfig);
+    } 
+
+    /**
+     * Open an environment and database.
+     */
+    private EnvironmentConfig getEnvConfig(int floor,
+                                           int maxMem,
+                                           boolean readOnly)
+        throws DatabaseException {
+
+        /* Convert floor percentage into bytes. */
+        long evictBytes = 0;
+        if (floor > 0) {
+            evictBytes = maxMem - ((maxMem * floor) / 100);
+        }
+
+        /* Make a non-txnal env w/no daemons and small nodes. */
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(!readOnly);
+        envConfig.setReadOnly(readOnly);
+        envConfig.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC));
+        envConfig.setConfigParam(EnvironmentParams.
+                                 ENV_RUN_EVICTOR.getName(), "false");
+        envConfig.setConfigParam(EnvironmentParams.
+                                 ENV_RUN_INCOMPRESSOR.getName(), "false");
+        envConfig.setConfigParam(EnvironmentParams.
+                                 ENV_RUN_CLEANER.getName(), "false");
+        envConfig.setConfigParam(EnvironmentParams.
+                                 ENV_RUN_CHECKPOINTER.getName(), "false");
+        if (evictBytes > 0) {
+            envConfig.setConfigParam(EnvironmentParams.
+                                     EVICTOR_EVICT_BYTES.getName(),
+                                     (new Long(evictBytes)).toString());
+        }
+        envConfig.setConfigParam(EnvironmentParams.
+                                 MAX_MEMORY.getName(),
+                                 new Integer(maxMem).toString());
+        /* Don't track detail with a tiny cache size. */
+        envConfig.setConfigParam
+            (EnvironmentParams.CLEANER_TRACK_DETAIL.getName(), "false");
+        envConfig.setConfigParam(EnvironmentParams.LOG_MEM_SIZE.getName(),
+				 EnvironmentParams.LOG_MEM_SIZE_MIN_STRING);
+        envConfig.setConfigParam(EnvironmentParams.NUM_LOG_BUFFERS.getName(),
+				 "2");
+        /* Enable DB (MapLN) eviction for eviction tests. */
+        envConfig.setConfigParam(EnvironmentParams.
+                                 ENV_DB_EVICTION.getName(), "true");
+
+        /*
+         * Disable critical eviction, we want to test under controlled
+         * circumstances.
+         */
+        envConfig.setConfigParam(EnvironmentParams.
+                                 EVICTOR_CRITICAL_PERCENTAGE.getName(),
+                                 "1000");
+
+        /* Make small nodes */
+        envConfig.setConfigParam(EnvironmentParams.
+                                 NODE_MAX.getName(), "4");
+        envConfig.setConfigParam(EnvironmentParams.
+                                 NODE_MAX_DUPTREE.getName(), "4");
+
+        return envConfig;
+    }
+
+    private void openEnv(EnvironmentConfig envConfig)
+        throws DatabaseException {
+
+        env = new Environment(envHome, envConfig);
+        boolean readOnly = envConfig.getReadOnly();
+
+        /* Open database. */
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(!readOnly);
+        dbConfig.setReadOnly(readOnly);
+        dbConfig.setSortedDuplicates(true);
+        db = env.openDatabase(null, "foo", dbConfig);
+    }
+
+    private void closeEnv()
+        throws DatabaseException {
+
+        if (db != null) {
+            db.close();
+            db = null;
+        }
+        if (env != null) {
+            env.close();
+            env = null;
+        }
+    }
+
+    private void insertData(int nKeys)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        for (int i = 0; i < nKeys; i++) {
+
+            IntegerBinding.intToEntry(i, key);
+
+            if ((i % 5) == 0) {
+                for (int j = 10; j < (NUM_DUPS + 10); j++) {
+                    IntegerBinding.intToEntry(j, data);
+                    db.put(null, key, data);
+                }
+            } else {
+                IntegerBinding.intToEntry(i+1, data);
+                db.put(null, key, data);
+            }
+        }
+    }
+
+    private void putLargeData(int nKeys, int dataSize) 
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry(new byte[dataSize]);
+        for (int i = 0; i < nKeys; i++) {
+            IntegerBinding.intToEntry(i, key);
+            db.put(null, key, data);
+        }
+    }
+
+    private void verifyData(int nKeys)
+        throws DatabaseException {
+
+        /* Full scan of data, make sure we can bring everything back in. */
+        Cursor cursor = db.openCursor(null, null);
+        DatabaseEntry data = new DatabaseEntry();
+        DatabaseEntry key = new DatabaseEntry();
+
+        for (int i = 0; i < nKeys; i++) {
+            if ((i % 5) ==0) {
+                for (int j = 10; j < (NUM_DUPS + 10); j++) {
+                    assertEquals(OperationStatus.SUCCESS,
+                                 cursor.getNext(key, data, LockMode.DEFAULT));
+                    assertEquals(i, IntegerBinding.entryToInt(key));
+                    assertEquals(j, IntegerBinding.entryToInt(data));
+                }
+            } else {
+                assertEquals(OperationStatus.SUCCESS,
+                             cursor.getNext(key, data, LockMode.DEFAULT));
+                assertEquals(i, IntegerBinding.entryToInt(key));
+                assertEquals(i+1, IntegerBinding.entryToInt(data));
+            }
+        }
+
+        assertEquals(OperationStatus.NOTFOUND,
+                     cursor.getNext(key, data, LockMode.DEFAULT));
+        cursor.close();
+    }
+
+    private void evictAndCheck(boolean shouldEvict, int nKeys)
+        throws DatabaseException {
+
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+        MemoryBudget mb = envImpl.getMemoryBudget();
+
+        /*
+         * The following batches are run in a single evictMemory() call:
+         * 1st eviction will strip DBINs.
+         * 2nd will evict DBINs
+         * 3rd will evict DINs
+         * 4th will strip BINs
+         * 5th will evict BINs
+         * 6th will evict INs
+         * 7th will evict INs
+         */
+        long preEvictMem = mb.getCacheMemoryUsage();
+        TestUtils.validateNodeMemUsage(envImpl, true);
+        env.evictMemory();
+        long postEvictMem = mb.getCacheMemoryUsage();
+
+        TestUtils.validateNodeMemUsage(envImpl, true);
+        if (DEBUG) {
+            System.out.println("preEvict=" + preEvictMem +
+                               " postEvict=" + postEvictMem);
+        }
+
+        if (shouldEvict) {
+            assertTrue("preEvict=" + preEvictMem +
+                       " postEvict=" + postEvictMem +
+                       " maxMem=" + mb.getMaxMemory(),
+                       (preEvictMem > postEvictMem));
+        } else {
+            assertTrue("preEvict=" + preEvictMem +
+                       " postEvict=" + postEvictMem,
+                       (preEvictMem == postEvictMem));
+        }
+
+        verifyData(nKeys);
+        TestUtils.validateNodeMemUsage(envImpl, true);
+    }
+}
diff --git a/test/com/sleepycat/je/evictor/EvictNNodesStatsTest.java b/test/com/sleepycat/je/evictor/EvictNNodesStatsTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..a5cc90a3d926d6e01367662b4dee4af202fc62fc
--- /dev/null
+++ b/test/com/sleepycat/je/evictor/EvictNNodesStatsTest.java
@@ -0,0 +1,359 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EvictNNodesStatsTest.java,v 1.3.2.2 2010/01/04 15:30:43 cwl Exp $
+ */
+
+package com.sleepycat.je.evictor;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DbTree;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.txn.Txn;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * This tests exercises the act of eviction and determines whether the
+ * expected nodes have been evicted properly.
+ */
+public class EvictNNodesStatsTest extends TestCase {
+
+    private static final boolean DEBUG = false;
+    private static final int BIG_CACHE_SIZE = 500000;
+    private static final int SMALL_CACHE_SIZE = (int)
+    MemoryBudget.MIN_MAX_MEMORY_SIZE;
+
+    private File envHome = null;
+    private Environment env = null;
+    private Database db = null;
+    private int actualLNs = 0;
+    private int actualINs = 0;
+
+    public EvictNNodesStatsTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException {
+
+        IN.ACCUMULATED_LIMIT = 0;
+        Txn.ACCUMULATED_LIMIT = 0;
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        if (env != null) {
+            try {
+                env.close();
+            } catch (Throwable e) {
+                System.out.println("tearDown: " + e);
+            }
+        }
+
+        try {
+            TestUtils.removeLogFiles("TearDown", envHome, false);
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        envHome = null;
+        env = null;
+        db = null;
+    }
+
+    /**
+     * Check that the counters of evicted MapLNs in the DB mapping tree and
+     * the counter of evicted BINs in a regular DB eviction works.  [#13415]
+     */
+    public void testRegularDB()
+        throws DatabaseException {
+
+        /* Initialize an environment and open a test DB. */
+        openEnv(80, SMALL_CACHE_SIZE);
+
+        EnvironmentStats stats = new EnvironmentStats();
+        StatsConfig statsConfig = new StatsConfig();
+        statsConfig.setClear(true);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+
+        DatabaseEntry entry = new DatabaseEntry(new byte[1]);
+        OperationStatus status;
+
+        /* Baseline mapping tree LNs and INs. */
+        final int baseLNs = 2; // Utilization DB and test DB
+        final int baseINs = 2; // Root IN and BIN
+        checkMappingTree(baseLNs, baseINs);
+
+        /*
+         * Create enough DBs to fill up a BIN in the mapping DB.  NODE_MAX is
+         * configured to be 4 in this test.  There are already 2 DBs open.
+         */
+        final int nDbs = 4;
+        Database[] dbs = new Database[nDbs];
+        for (int i = 0; i < nDbs; i += 1) {
+            dbs[i] = env.openDatabase(null, "db" + i, dbConfig);
+            status = dbs[i].put(null, entry, entry);
+            assertSame(OperationStatus.SUCCESS, status);
+            assertTrue(isRootResident(dbs[i]));
+        }
+        checkMappingTree(baseLNs + nDbs /*Add 1 MapLN per open DB*/,
+                         baseINs + 1 /*Add 1 BIN in the mapping tree*/);
+
+        /* Close DBs and force eviction. */
+        for (int i = 0; i < nDbs; i += 1) {
+            dbs[i].close();
+        }
+
+        forceEviction();
+        /* Load Stats. */
+        DbInternal.envGetEnvironmentImpl(env).
+                   getEvictor().
+                   loadStats(statsConfig, stats);
+        assertEquals("Evicted MapLNs",
+                     nDbs + 1, // nDbs and Utilization DB
+                     stats.getNRootNodesEvicted());
+        assertEquals("Evicted BINs",
+                     nDbs + 4, // 2 BINs for Name DB, 1 for Mapping DB,
+                               // 1 for Utilization DB and 1 per each nDb
+                     stats.getNNodesExplicitlyEvicted());
+        checkMappingTree(baseLNs, baseINs);
+
+        closeEnv();
+    }
+
+    /**
+     * Check that the counters of evicted MapLNs in the DB mapping tree and
+     * the counter of evicted BINs in a deferred write DB eviction works.
+     * [#13415]
+     */
+    public void testDeferredWriteDB()
+        throws DatabaseException {
+
+        /* Initialize an environment and open a test DB. */
+        openEnv(80, SMALL_CACHE_SIZE);
+
+        EnvironmentStats stats = new EnvironmentStats();
+        StatsConfig statsConfig = new StatsConfig();
+        statsConfig.setClear(true);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+
+        DatabaseEntry entry = new DatabaseEntry(new byte[1]);
+        OperationStatus status;
+
+        /* Baseline mapping tree LNs and INs. */
+        final int baseLNs = 2; // Utilization DB and test DB
+        final int baseINs = 2; // Root IN and BIN
+
+        checkMappingTree(baseLNs, baseINs);
+
+        /* Deferred write DBs have special rules. */
+        dbConfig.setDeferredWrite(true);
+        Database db2 = env.openDatabase(null, "db2", dbConfig);
+        status = db2.put(null, entry, entry);
+        assertSame(OperationStatus.SUCCESS, status);
+        assertTrue(isRootResident(db2));
+        checkMappingTree(baseLNs + 1, baseINs); // Deferred Write DB.
+
+        /* Root eviction is disallowed if the root is dirty. */
+        forceEviction();
+        /* Load Stats. */
+        DbInternal.envGetEnvironmentImpl(env).
+                   getEvictor().
+                   loadStats(statsConfig, stats);
+        assertEquals("Evicted MapLNs",
+                     1, // Utilization DB.
+                     stats.getNRootNodesEvicted());
+        assertEquals("Evicted BINs",
+                     3, // 1 BIN for Name DB, 1 for Utilization DB,
+                        // and 1 for Deferred Write DB.
+                     stats.getNNodesExplicitlyEvicted());
+        assertTrue(isRootResident(db2));
+        checkMappingTree(baseLNs + 1, baseINs); // Deferred Write DB.
+
+        db2.sync();
+        forceEviction();
+        /* Load Stats. */
+        DbInternal.envGetEnvironmentImpl(env).
+                   getEvictor().
+                   loadStats(statsConfig, stats);
+        assertEquals("Evicted MapLNs",
+                     1, // Root eviction.
+                     stats.getNRootNodesEvicted());
+        assertEquals("Evicted BINs",
+                     0,
+                     stats.getNNodesExplicitlyEvicted());
+        assertTrue(!isRootResident(db2));
+        checkMappingTree(baseLNs + 1, baseINs); // Deferred Write DB.
+
+        db2.close();
+        forceEviction();
+        /* Load Stats. */
+        DbInternal.envGetEnvironmentImpl(env).
+                   getEvictor().
+                   loadStats(statsConfig, stats);
+        assertEquals("Evicted MapLNs",
+                     1, // Root eviction.
+                     stats.getNRootNodesEvicted());
+        assertEquals("Evicted BINs",
+                     0,
+                     stats.getNNodesExplicitlyEvicted());
+
+        checkMappingTree(baseLNs + 1, baseINs); // Deferred Write DB.
+
+        closeEnv();
+    }
+
+    private void forceEviction()
+        throws DatabaseException {
+
+        OperationStatus status;
+
+        /*
+         * Repeat twice to cause a 2nd pass over the INList.  The second pass
+         * evicts BINs that were only stripped of LNs in the first pass.
+         */
+        for (int i = 0; i < 2; i += 1) {
+            /* Fill up cache so as to call eviction. */
+            status = db.put(null, new DatabaseEntry(new byte[1]),
+                                  new DatabaseEntry(new byte[BIG_CACHE_SIZE]));
+            assertSame(OperationStatus.SUCCESS, status);
+
+            /* Do a manual call eviction. */
+            env.evictMemory();
+
+            status = db.delete(null, new DatabaseEntry(new byte[1]));
+            assertSame(OperationStatus.SUCCESS, status);
+        }
+    }
+
+    /**
+     * Check for the expected number of nodes in the mapping DB.
+     */
+    private void checkMappingTree(int expectLNs, int expectINs)
+        throws DatabaseException {
+
+        IN root = DbInternal.envGetEnvironmentImpl(env).
+            getDbTree().getDb(DbTree.ID_DB_ID).getTree().
+            getRootIN(CacheMode.UNCHANGED);
+        actualLNs = 0;
+        actualINs = 0;
+        countMappingTree(root);
+        root.releaseLatch();
+        assertEquals("LNs", expectLNs, actualLNs);
+        assertEquals("INs", expectINs, actualINs);
+    }
+
+    private void countMappingTree(IN parent) {
+        actualINs += 1;
+        for (int i = 0; i < parent.getNEntries(); i += 1) {
+            if (parent.getTarget(i) != null) {
+                if (parent.getTarget(i) instanceof IN) {
+                    countMappingTree((IN) parent.getTarget(i));
+                } else {
+                    actualLNs += 1;
+                }
+            }
+        }
+    }
+
+    /**
+     * Returns whether the root IN is currently resident for the given DB.
+     */
+    private boolean isRootResident(Database dbParam) {
+        return DbInternal.dbGetDatabaseImpl(dbParam).
+                          getTree().
+                          isRootResident();
+    }
+
+    /**
+     * Open an environment and database.
+     */
+    private void openEnv(int floor,
+                         int maxMem)
+        throws DatabaseException {
+
+        /* Convert floor percentage into bytes. */
+        long evictBytes = maxMem - ((maxMem * floor) / 100);
+
+        /* Make a non-txnal env w/no daemons and small nodes. */
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC));
+        envConfig.setConfigParam(EnvironmentParams.
+                                 ENV_RUN_EVICTOR.getName(), "false");
+        envConfig.setConfigParam(EnvironmentParams.
+                                 ENV_RUN_INCOMPRESSOR.getName(), "false");
+        envConfig.setConfigParam(EnvironmentParams.
+                                 ENV_RUN_CLEANER.getName(), "false");
+        envConfig.setConfigParam(EnvironmentParams.
+                                 ENV_RUN_CHECKPOINTER.getName(), "false");
+        envConfig.setConfigParam(EnvironmentParams.
+                                 EVICTOR_EVICT_BYTES.getName(),
+                                 (new Long(evictBytes)).toString());
+        envConfig.setConfigParam(EnvironmentParams.
+                                 MAX_MEMORY.getName(),
+                                 new Integer(maxMem).toString());
+        /* Enable DB (MapLN) eviction for eviction tests. */
+        envConfig.setConfigParam(EnvironmentParams.
+                                 ENV_DB_EVICTION.getName(), "true");
+
+        /* Make small nodes */
+        envConfig.setConfigParam(EnvironmentParams.
+                                 NODE_MAX.getName(), "4");
+        envConfig.setConfigParam(EnvironmentParams.
+                                 NODE_MAX_DUPTREE.getName(), "4");
+        if (DEBUG) {
+            envConfig.setConfigParam(EnvironmentParams.
+                                     JE_LOGGING_CONSOLE.getName(), "true");
+            envConfig.setConfigParam(EnvironmentParams.
+                                     JE_LOGGING_LEVEL_EVICTOR.getName(),
+                                     "SEVERE");
+        }
+        env = new Environment(envHome, envConfig);
+
+        /* Open a database. */
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(true);
+        db = env.openDatabase(null, "foo", dbConfig);
+    }
+
+    private void closeEnv()
+        throws DatabaseException {
+
+        if (db != null) {
+            db.close();
+            db = null;
+        }
+        if (env != null) {
+            env.close();
+            env = null;
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/evictor/EvictSelectionTest.java b/test/com/sleepycat/je/evictor/EvictSelectionTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..e27a9a8ff77fd7c2bc5afbab94da40cad47f095b
--- /dev/null
+++ b/test/com/sleepycat/je/evictor/EvictSelectionTest.java
@@ -0,0 +1,411 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EvictSelectionTest.java,v 1.22.2.3 2010/01/04 15:30:43 cwl Exp $
+ */
+
+package com.sleepycat.je.evictor;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.EnvironmentMutableConfig;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.INList;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.util.TestUtils;
+
+public class EvictSelectionTest extends TestCase {
+    private static boolean DEBUG = false;
+    private static String DB_NAME = "EvictionSelectionTestDb";
+    private File envHome;
+    private int scanSize = 5;
+    private Environment env;
+    private EnvironmentImpl envImpl;
+
+    public EvictSelectionTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws Exception {
+
+        TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX);
+    }
+
+    public void tearDown()
+	throws Exception {
+
+        try {
+            if (env != null) {
+                env.close();
+            }
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+        env = null;
+        envImpl = null;
+
+        try {
+            TestUtils.removeFiles("TearDown", envHome, FileManager.JE_SUFFIX);
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+    }
+
+    public void testEvictPass()
+        throws Throwable {
+
+        /* Create an environment, database, and insert some data. */
+        initialize(true);
+
+        /* The SharedEvictor is not testable using getExpectedCandidates. */
+        if (env.getConfig().getSharedCache()) {
+            env.close();
+            env = null;
+            return;
+        }
+
+        EnvironmentStats stats = new EnvironmentStats();
+        StatsConfig statsConfig = new StatsConfig();
+        statsConfig.setClear(true);
+
+        /*
+         * Set up the test w/a number of INs that doesn't divide evenly
+         * into scan sets.
+         */
+        int startingNumINs = envImpl.getInMemoryINs().getSize();
+        assertTrue((startingNumINs % scanSize) != 0);
+
+        Evictor evictor = envImpl.getEvictor();
+        /* Evict once to initialize the scan iterator. */
+        evictor.evictBatch("test", false, 1);
+        evictor.loadStats(statsConfig, stats);
+
+        /*
+         * Test evictBatch, where each batch only evicts one node because
+         * we are passing one byte for the currentRequiredEvictBytes
+         * parameter.  To predict the evicted nodes when more than one
+         * target is selected, we would have to simulate eviction and
+         * maintain a parallel IN tree, which is too complex.
+         */
+        for (int batch = 1;; batch += 1) {
+
+            List<Long> expectedCandidates = new ArrayList<Long>();
+            int expectedNScanned = getExpectedCandidates
+                (envImpl, evictor, expectedCandidates);
+
+            evictor.evictBatch("test", false, 1);
+
+            evictor.loadStats(statsConfig, stats);
+            assertEquals(1, stats.getNEvictPasses());
+            assertEquals(expectedNScanned, stats.getNNodesScanned());
+
+            List<Long> candidates = evictor.evictProfile.getCandidates();
+            assertEquals(expectedCandidates, candidates);
+
+            /* Stop when no more nodes are evictable. */
+            if (expectedCandidates.isEmpty()) {
+                break;
+            }
+        }
+
+        env.close();
+        env = null;
+    }
+
+    /*
+     * We might call evict on an empty INList if the cache is set very low
+     * at recovery time.
+     */
+    public void testEmptyINList()
+        throws Throwable {
+
+        /* Create an environment, database, and insert some data. */
+        initialize(true);
+
+        env.close();
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setCacheSize(MemoryBudget.MIN_MAX_MEMORY_SIZE);
+        env = new Environment(envHome, envConfig);
+        env.close();
+        env = null;
+    }
+
+    /*
+     * Create an environment, database, and insert some data.
+     */
+    private void initialize(boolean makeDatabase)
+        throws DatabaseException {
+
+        /* Environment */
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setConfigParam(EnvironmentParams.
+                                 ENV_RUN_EVICTOR.getName(),
+                                 "false");
+        envConfig.setConfigParam(EnvironmentParams.
+                                 ENV_RUN_CLEANER.getName(),
+                                 "false");
+        envConfig.setConfigParam(EnvironmentParams.
+                                 ENV_RUN_CHECKPOINTER.getName(),
+                                 "false");
+        envConfig.setConfigParam(EnvironmentParams.
+                                 ENV_RUN_INCOMPRESSOR.getName(),
+                                 "false");
+        envConfig.setConfigParam(EnvironmentParams.
+                                 NODE_MAX.getName(), "4");
+        envConfig.setConfigParam(EnvironmentParams.
+                                 EVICTOR_NODES_PER_SCAN.getName(), "5");
+        if (DEBUG) {
+            envConfig.setConfigParam(EnvironmentParams.
+                                     JE_LOGGING_CONSOLE.getName(), "true");
+            envConfig.setConfigParam(EnvironmentParams.
+                                     JE_LOGGING_LEVEL_EVICTOR.getName(),
+                                     "SEVERE");
+        }
+        env = new Environment(envHome, envConfig);
+        envImpl = DbInternal.envGetEnvironmentImpl(env);
+
+        if (makeDatabase) {
+            /* Database */
+
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setAllowCreate(true);
+            Database db = env.openDatabase(null, "foo", dbConfig);
+
+            /* Insert enough keys to get an odd number of nodes */
+
+            DatabaseEntry keyAndData = new DatabaseEntry();
+            for (int i = 0; i < 110; i++) {
+                IntegerBinding.intToEntry(i, keyAndData);
+                db.put(null, keyAndData, keyAndData);
+            }
+
+            db.close();
+        }
+    }
+
+    /**
+     * Returns the number of INs selected (examined) and fills the expected
+     * list with the selected targets.  Currently only one target is selected.
+     */
+    private int getExpectedCandidates(EnvironmentImpl envImpl,
+                                      Evictor evictor,
+                                      List<Long> expected)
+        throws DatabaseException {
+
+        if (!envImpl.getMemoryBudget().isTreeUsageAboveMinimum()) {
+            return 0;
+        }
+
+        boolean evictByLruOnly = envImpl.getConfigManager().getBoolean
+            (EnvironmentParams.EVICTOR_LRU_ONLY);
+        INList inList = envImpl.getInMemoryINs();
+
+        Iterator<IN> inIter = evictor.getScanIterator();
+        IN firstScanned = null;
+        boolean firstWrapped = false;
+
+        long targetGeneration = Long.MAX_VALUE;
+        int targetLevel = Integer.MAX_VALUE;
+        boolean targetDirty = true;
+        IN target = null;
+
+        boolean wrapped = false;
+        int nIterated = 0;
+        int maxNodesToIterate = evictor.getMaxINsPerBatch();
+        int nCandidates = 0;
+
+        /* Simulate the eviction alorithm. */
+        while (nIterated <  maxNodesToIterate && nCandidates < scanSize) {
+
+            if (!inIter.hasNext()) {
+                inIter = inList.iterator();
+                wrapped = true;
+            }
+
+            IN in = inIter.next();
+            nIterated += 1;
+
+            if (firstScanned == null) {
+                firstScanned = in;
+                firstWrapped = wrapped;
+            }
+
+            if (in.getDatabase() == null || in.getDatabase().isDeleted()) {
+                continue;
+            }
+
+            int evictType = in.getEvictionType();
+            if (evictType == IN.MAY_NOT_EVICT) {
+                continue;
+            }
+
+            if (evictByLruOnly) {
+                if (in.getGeneration() < targetGeneration) {
+                    targetGeneration = in.getGeneration();
+                    target = in;
+                }
+            } else {
+                int level = evictor.normalizeLevel(in, evictType);
+                if (targetLevel != level) {
+                    if (targetLevel > level) {
+                        targetLevel = level;
+                        targetDirty = in.getDirty();
+                        targetGeneration = in.getGeneration();
+                        target = in;
+                    }
+                } else if (targetDirty != in.getDirty()) {
+                    if (targetDirty) {
+                        targetDirty = false;
+                        targetGeneration = in.getGeneration();
+                        target = in;
+                    }
+                } else {
+                    if (targetGeneration > in.getGeneration()) {
+                        targetGeneration = in.getGeneration();
+                        target = in;
+                    }
+                }
+            }
+
+            nCandidates++;
+        }
+
+        /*
+         * Restore the Evictor's iterator position to just before the
+         * firstScanned IN.  There is no way to clone an iterator and we can't
+         * create a tailSet iterator because the map is unsorted.
+         */
+        int prevPosition = 0;
+        if (firstWrapped) {
+            for (IN in : inList) {
+                prevPosition += 1;
+            }
+        } else {
+            boolean firstScannedFound = false;
+            for (IN in : inList) {
+                if (in == firstScanned) {
+                    firstScannedFound = true;
+                    break;
+                } else {
+                    prevPosition += 1;
+                }
+            }
+            assertTrue(firstScannedFound);
+        }
+        inIter = inList.iterator();
+        while (prevPosition > 0) {
+            inIter.next();
+            prevPosition -= 1;
+        }
+        evictor.setScanIterator(inIter);
+
+        /* Return the expected IN. */
+        expected.clear();
+        if (target != null) {
+            expected.add(new Long(target.getNodeId()));
+        }
+        return nIterated;
+    }
+
+    /**
+     * Tests a fix for an eviction bug that could cause an OOME in a read-only
+     * environment.  [#17590]
+     *
+     * Before the bug fix, a dirty IN prevented eviction from working if the
+     * dirty IN is returned by Evictor.selectIN repeatedly, only to be rejected
+     * by Evictor.evictIN because it is dirty.  A dirty IN was considered as a
+     * target and sometimes selected by selectIN as a way to avoid an infinite
+     * loop when all INs are dirty.  This is unnecessary, since a condition was
+     * added to cause the selectIN loop to terminate when all INs in the INList
+     * have been iterated.  Now, with the fix, a dirty IN in a read-only
+     * environment is never considered as a target or returned by selectIN.
+     *
+     * The OOME was reproduced with a simple test that uses a cursor to iterate
+     * through 100k records, each 100k in size, in a read-only enviroment with
+     * a 16m heap.  However, reproducing the problem in a fast-running unit
+     * test is very difficult.  Instead, since the code change only impacts a
+     * read-only environment, this unit test only ensures that the fix does not
+     * cause an infinte loop when all nodes are dirty.
+     */
+    public void testReadOnlyAllDirty()
+        throws Throwable {
+
+        /* Create an environment, database, and insert some data. */
+        initialize(true /*makeDatabase*/);
+        env.close();
+        env = null;
+        envImpl = null;
+
+        /* Open the environment read-only. */
+        final EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setReadOnly(true);
+        env = new Environment(envHome, envConfig);
+        envImpl = DbInternal.envGetEnvironmentImpl(env);
+
+        /* Load everything into cache. */
+        {
+            final DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setReadOnly(true);
+            final Database db = env.openDatabase(null, "foo", dbConfig);
+            final Cursor cursor = db.openCursor(null, null);
+            final DatabaseEntry key = new DatabaseEntry();
+            final DatabaseEntry data = new DatabaseEntry();
+            OperationStatus status = cursor.getFirst(key, data, null);
+            while (status == OperationStatus.SUCCESS) {
+                status = cursor.getNext(key, data, null);
+            }
+            cursor.close();
+            db.close();
+        }
+
+        /* Artificially make all nodes dirty in a read-only environment. */
+        for (IN in : envImpl.getInMemoryINs()) {
+            in.setDirty(true);
+        }
+
+        /*
+         * Force an eviction.  No nodes will be selected for an eviction,
+         * because all nodes are dirty.  If the (nIterated < maxNodesToIterate)
+         * condition is removed from the selectIN loop, an infinite loop will
+         * occur.
+         */
+        final EnvironmentMutableConfig mutableConfig = env.getMutableConfig();
+        mutableConfig.setCacheSize(MemoryBudget.MIN_MAX_MEMORY_SIZE);
+        env.setMutableConfig(mutableConfig);
+        final StatsConfig clearStats = new StatsConfig();
+        clearStats.setClear(true);
+        EnvironmentStats stats = env.getStats(clearStats);
+        env.evictMemory();
+        stats = env.getStats(clearStats);
+        assertEquals(0, stats.getNNodesSelected());
+
+        env.close();
+        env = null;
+        envImpl = null;
+    }
+}
diff --git a/test/com/sleepycat/je/evictor/LRUTest.java b/test/com/sleepycat/je/evictor/LRUTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..47d33c326283ca8aa33356326eac9204be5288fd
--- /dev/null
+++ b/test/com/sleepycat/je/evictor/LRUTest.java
@@ -0,0 +1,441 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LRUTest.java,v 1.3.2.4 2010/03/26 13:23:55 mark Exp $
+ */
+
+package com.sleepycat.je.evictor;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.Arrays;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.dbi.INList;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Tests that the LRU algorithm is accurate.
+ */
+public class LRUTest extends TestCase {
+
+    private static final int N_DBS = 5;
+    private static final int ONE_MB = 1 << 20;
+    private static final int DB_CACHE_SIZE = ONE_MB;
+    private static final int ENV_CACHE_SIZE = N_DBS * DB_CACHE_SIZE;
+    private static final int MIN_DATA_SIZE = 50 * 1024;
+    private static final int LRU_ACCURACY_PCT = 60;
+    private static final int ENTRY_DATA_SIZE = 500;
+
+    private File envHome;
+    private Environment env;
+    private Database[] dbs = new Database[N_DBS];
+
+    public LRUTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown() {
+        if (env != null) {
+            try {
+                env.close();
+            } catch (Throwable e) {
+                System.out.println("tearDown: " + e);
+            }
+        }
+
+        try {
+            TestUtils.removeLogFiles("TearDown", envHome, false);
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        envHome = null;
+        env = null;
+        dbs = null;
+    }
+
+    private void open()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setCacheSize(ENV_CACHE_SIZE);
+        envConfig.setConfigParam("je.tree.minMemory",
+                                 String.valueOf(MIN_DATA_SIZE));
+        envConfig.setConfigParam("je.env.runCleaner", "false");
+        envConfig.setConfigParam("je.env.runCheckpointer", "false");
+        envConfig.setConfigParam("je.env.runINCompressor", "false");
+
+        env = new Environment(envHome, envConfig);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+
+        for (int i = 0; i < dbs.length; i += 1) {
+            dbs[i] = env.openDatabase(null, "foo-" + i, dbConfig);
+        }
+    }
+
+    private void close()
+        throws DatabaseException {
+
+        for (int i = 0; i < N_DBS; i += 1) {
+            if (dbs[i] != null) {
+                dbs[i].close();
+                dbs[i] = null;
+            }
+        }
+        if (env != null) {
+            env.close();
+            env = null;
+        }
+    }
+
+    public void testBaseline()
+        throws DatabaseException {
+
+        open();
+        for (int i = 0; i < N_DBS; i += 1) {
+            write(dbs[i], DB_CACHE_SIZE);
+        }
+        long[] results = new long[100];
+        for (int repeat = 0; repeat < 100; repeat += 1) {
+
+            /* Read all DBs evenly. */
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            boolean done = false;
+            for (int i = 0; !done; i += 1) {
+                IntegerBinding.intToEntry(i, key);
+                for (int j = 0; j < N_DBS; j += 1) {
+                    if (dbs[j].get(null, key, data, null) !=
+                        OperationStatus.SUCCESS) {
+                        done = true;
+                    }
+                }
+            }
+
+            /*
+             * Check that each DB uses approximately equal portions of the
+             * cache.
+             */
+            StringBuffer buf = new StringBuffer();
+            long low = Long.MAX_VALUE;
+            long high = 0;
+            for (int i = 0; i < N_DBS; i += 1) {
+                long val = getDatabaseCacheBytes(dbs[i]);
+                buf.append(" db=" + i + " bytes=" + val);
+                if (low > val) {
+                    low = val;
+                }
+                if (high < val) {
+                    high = val;
+                }
+            }
+            long pct = (low * 100) / high;
+            assertTrue("failed with pct=" + pct + buf,
+                       pct >= LRU_ACCURACY_PCT);
+            results[repeat] = pct;
+        }
+        Arrays.sort(results);
+        //System.out.println(Arrays.toString(results));
+
+        close();
+    }
+
+    public void testCacheMode_KEEP_HOT()
+        throws DatabaseException {
+
+        open();
+        for (int i = 0; i < N_DBS; i += 1) {
+            write(dbs[i], DB_CACHE_SIZE);
+        }
+        long[] results = new long[100];
+        for (int repeat = 0; repeat < 100; repeat += 1) {
+
+            /* Read all DBs evenly. */
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            boolean done = false;
+            Cursor[] cursors = new Cursor[N_DBS];
+            for (int j = 0; j < N_DBS; j++) {
+                cursors[j] = dbs[j].openCursor(null, null);
+            }
+            cursors[0].setCacheMode(CacheMode.KEEP_HOT);
+            cursors[1].setCacheMode(CacheMode.KEEP_HOT);
+            cursors[2].setCacheMode(CacheMode.DEFAULT);
+            cursors[3].setCacheMode(CacheMode.DEFAULT);
+            cursors[4].setCacheMode(CacheMode.DEFAULT);
+            for (int i = 0; !done; i += 1) {
+                IntegerBinding.intToEntry(i, key);
+                for (int j = 0; j < N_DBS; j += 1) {
+                    if (cursors[j].getSearchKey(key, data, null) !=
+                        OperationStatus.SUCCESS) {
+                        done = true;
+                    }
+                }
+            }
+
+            for (int j = 0; j < N_DBS; j++) {
+                cursors[j].close();
+            }
+
+            /*
+             * Check that db[0] and db[1] use more than the other three.
+             */
+            StringBuffer buf = new StringBuffer();
+            long[] dbBytes = new long[N_DBS];
+            for (int i = 0; i < N_DBS; i += 1) {
+                dbBytes[i] = getDatabaseCacheBytes(dbs[i]);
+                buf.append(" db=" + i + " bytes=" + dbBytes[i]);
+            }
+            assertTrue(dbBytes[0] > dbBytes[2]);
+            assertTrue(dbBytes[0] > dbBytes[3]);
+            assertTrue(dbBytes[0] > dbBytes[4]);
+            assertTrue(dbBytes[1] > dbBytes[2]);
+            assertTrue(dbBytes[1] > dbBytes[3]);
+            assertTrue(dbBytes[1] > dbBytes[4]);
+        }
+        Arrays.sort(results);
+        //System.out.println(Arrays.toString(results));
+
+        close();
+    }
+
+    public void testCacheMode_UNCHANGED()
+        throws DatabaseException {
+
+        open();
+        for (int i = 0; i < N_DBS; i += 1) {
+            write(dbs[i], DB_CACHE_SIZE);
+        }
+        long[] results = new long[100];
+        for (int repeat = 0; repeat < 100; repeat += 1) {
+
+            /* Read all DBs evenly. */
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            boolean done = false;
+            Cursor[] cursors = new Cursor[N_DBS];
+            for (int j = 0; j < N_DBS; j++) {
+                cursors[j] = dbs[j].openCursor(null, null);
+            }
+            cursors[0].setCacheMode(CacheMode.UNCHANGED);
+            cursors[1].setCacheMode(CacheMode.UNCHANGED);
+            cursors[2].setCacheMode(CacheMode.DEFAULT);
+            cursors[3].setCacheMode(CacheMode.DEFAULT);
+            cursors[4].setCacheMode(CacheMode.DEFAULT);
+            for (int i = 0; !done; i += 1) {
+                IntegerBinding.intToEntry(i, key);
+                for (int j = 0; j < N_DBS; j += 1) {
+                    if (cursors[j].getSearchKey(key, data, null) !=
+                        OperationStatus.SUCCESS) {
+                        done = true;
+                    }
+                }
+            }
+
+            for (int j = 0; j < N_DBS; j++) {
+                cursors[j].close();
+            }
+
+            /*
+             * Check that db[0] and db[1] use more than the other three.
+             */
+            StringBuffer buf = new StringBuffer();
+            long[] dbBytes = new long[N_DBS];
+            for (int i = 0; i < N_DBS; i += 1) {
+                dbBytes[i] = getDatabaseCacheBytes(dbs[i]);
+                buf.append(" db=" + i + " bytes=" + dbBytes[i]);
+            }
+            assertTrue(dbBytes[0] < dbBytes[2]);
+            assertTrue(dbBytes[0] < dbBytes[3]);
+            assertTrue(dbBytes[0] < dbBytes[4]);
+            assertTrue(dbBytes[1] < dbBytes[2]);
+            assertTrue(dbBytes[1] < dbBytes[3]);
+            assertTrue(dbBytes[1] < dbBytes[4]);
+            //System.out.println(buf);
+        }
+        Arrays.sort(results);
+        //System.out.println(Arrays.toString(results));
+
+        close();
+    }
+
+    public void testCacheMode_MAKE_COLD()
+        throws DatabaseException {
+
+        open();
+        for (int i = 0; i < N_DBS; i += 1) {
+            write(dbs[i], DB_CACHE_SIZE);
+        }
+        long[] results = new long[100];
+        for (int repeat = 0; repeat < 100; repeat += 1) {
+
+            /* Read all DBs evenly. */
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            boolean done = false;
+            Cursor[] cursors = new Cursor[N_DBS];
+            for (int j = 0; j < N_DBS; j++) {
+                cursors[j] = dbs[j].openCursor(null, null);
+            }
+            cursors[0].setCacheMode(CacheMode.MAKE_COLD);
+            cursors[1].setCacheMode(CacheMode.MAKE_COLD);
+            cursors[2].setCacheMode(CacheMode.UNCHANGED);
+            cursors[3].setCacheMode(CacheMode.UNCHANGED);
+            cursors[4].setCacheMode(CacheMode.UNCHANGED);
+            for (int i = 0; !done; i += 1) {
+                IntegerBinding.intToEntry(i, key);
+                for (int j = 0; j < N_DBS; j += 1) {
+                    if (cursors[j].getSearchKey(key, data, null) !=
+                        OperationStatus.SUCCESS) {
+                        done = true;
+                    }
+                }
+            }
+
+            for (int j = 0; j < N_DBS; j++) {
+                cursors[j].close();
+            }
+
+            /*
+             * Check that db[0] and db[1] use more than the other three.
+             */
+            StringBuffer buf = new StringBuffer();
+            long[] dbBytes = new long[N_DBS];
+            for (int i = 0; i < N_DBS; i += 1) {
+                dbBytes[i] = getDatabaseCacheBytes(dbs[i]);
+                buf.append(" db=" + i + " bytes=" + dbBytes[i]);
+            }
+            assertTrue(dbBytes[0] < dbBytes[2]);
+            assertTrue(dbBytes[0] < dbBytes[3]);
+            assertTrue(dbBytes[0] < dbBytes[4]);
+            assertTrue(dbBytes[1] < dbBytes[2]);
+            assertTrue(dbBytes[1] < dbBytes[3]);
+            assertTrue(dbBytes[1] < dbBytes[4]);
+            //System.out.println(buf);
+        }
+        Arrays.sort(results);
+        //System.out.println(Arrays.toString(results));
+
+        close();
+    }
+
+    public void testCacheMode_EVICT_LN()
+        throws DatabaseException {
+
+        open();
+        for (int i = 0; i < N_DBS; i += 1) {
+            write(dbs[i], DB_CACHE_SIZE);
+        }
+        long[] results = new long[100];
+        for (int repeat = 0; repeat < 100; repeat += 1) {
+
+            /* Read all DBs evenly. */
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            boolean done = false;
+            Cursor[] cursors = new Cursor[N_DBS];
+            for (int j = 0; j < N_DBS; j++) {
+                cursors[j] = dbs[j].openCursor(null, null);
+            }
+            cursors[0].setCacheMode(CacheMode.EVICT_LN);
+            cursors[1].setCacheMode(CacheMode.EVICT_LN);
+            cursors[2].setCacheMode(CacheMode.MAKE_COLD);
+            cursors[3].setCacheMode(CacheMode.MAKE_COLD);
+            cursors[4].setCacheMode(CacheMode.MAKE_COLD);
+            for (int i = 0; !done; i += 1) {
+                IntegerBinding.intToEntry(i, key);
+                for (int j = 0; j < N_DBS; j += 1) {
+                    if (cursors[j].getSearchKey(key, data, null) !=
+                        OperationStatus.SUCCESS) {
+                        done = true;
+                    }
+                }
+            }
+
+            for (int j = 0; j < N_DBS; j++) {
+                cursors[j].close();
+            }
+
+            /*
+             * Check that db[0] and db[1] use more than the other three.
+             */
+            StringBuffer buf = new StringBuffer();
+            long[] dbBytes = new long[N_DBS];
+            for (int i = 0; i < N_DBS; i += 1) {
+                dbBytes[i] = getDatabaseCacheBytes(dbs[i]);
+                buf.append(" db=" + i + " bytes=" + dbBytes[i]);
+            }
+            assertTrue(dbBytes[0] < dbBytes[2]);
+            assertTrue(dbBytes[0] < dbBytes[3]);
+            assertTrue(dbBytes[0] < dbBytes[4]);
+            assertTrue(dbBytes[1] < dbBytes[2]);
+            assertTrue(dbBytes[1] < dbBytes[3]);
+            assertTrue(dbBytes[1] < dbBytes[4]);
+            //System.out.println(buf);
+        }
+        Arrays.sort(results);
+        //System.out.println(Arrays.toString(results));
+
+        close();
+    }
+
+    private long getDatabaseCacheBytes(Database db)
+        throws DatabaseException {
+
+        long total = 0;
+        DatabaseImpl dbImpl = DbInternal.dbGetDatabaseImpl(db);
+        INList ins = dbImpl.getDbEnvironment().getInMemoryINs();
+        Iterator i = ins.iterator();
+        while (i.hasNext()) {
+            IN in = (IN) i.next();
+            if (in.getDatabase() == dbImpl) {
+                total += in.getInMemorySize();
+            }
+        }
+        return total;
+    }
+
+    /**
+     * Writes enough records in the given envIndex environment to cause at
+     * least minSizeToWrite bytes to be used in the cache.
+     */
+    private int write(Database db, int minSizeToWrite)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry(new byte[ENTRY_DATA_SIZE]);
+        int i;
+        for (i = 0; i < minSizeToWrite / ENTRY_DATA_SIZE; i += 1) {
+            IntegerBinding.intToEntry(i, key);
+            db.put(null, key, data);
+        }
+        return i;
+    }
+}
diff --git a/test/com/sleepycat/je/evictor/SharedCacheTest.java b/test/com/sleepycat/je/evictor/SharedCacheTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..025377e30426d70bd2e7d38545dfe791f68d8221
--- /dev/null
+++ b/test/com/sleepycat/je/evictor/SharedCacheTest.java
@@ -0,0 +1,510 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SharedCacheTest.java,v 1.11.2.2 2010/01/04 15:30:43 cwl Exp $
+ */
+
+package com.sleepycat.je.evictor;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.EnvironmentMutableConfig;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Tests the shared cache feature enabled via Environment.setSharedCache(true).
+ */
+public class SharedCacheTest extends TestCase {
+
+    private static final int N_ENVS = 5;
+    private static final int ONE_MB = 1 << 20;
+    private static final int ENV_CACHE_SIZE = ONE_MB;
+    private static final int TOTAL_CACHE_SIZE = N_ENVS * ENV_CACHE_SIZE;
+    private static final int LOG_BUFFER_SIZE = (ENV_CACHE_SIZE * 7) / 100;
+    private static final int MIN_DATA_SIZE = 50 * 1024;
+    private static final int LRU_ACCURACY_PCT = 60;
+    private static final int ENTRY_DATA_SIZE = 500;
+    private static final String TEST_PREFIX = "SharedCacheTest_";
+    private static final StatsConfig CLEAR_CONFIG = new StatsConfig();
+    static {
+        CLEAR_CONFIG.setClear(true);
+    }
+
+    private File envHome;
+    private File[] dirs;
+    private Environment[] envs;
+    private Database[] dbs;
+    private boolean sharedCache = true;
+
+    public SharedCacheTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException {
+
+        dirs = new File[N_ENVS];
+        envs = new Environment[N_ENVS];
+        dbs = new Database[N_ENVS];
+
+        for (int i = 0; i < N_ENVS; i += 1) {
+            dirs[i] = new File(envHome, TEST_PREFIX + i);
+            dirs[i].mkdir();
+            assertTrue(dirs[i].isDirectory());
+            TestUtils.removeLogFiles("Setup", dirs[i], false);
+        }
+    }
+
+    public void tearDown() {
+        for (int i = 0; i < N_ENVS; i += 1) {
+            if (dbs[i] != null) {
+                try {
+                    dbs[i].close();
+                } catch (Throwable e) {
+                    System.out.println("tearDown: " + e);
+                }
+                dbs[i] = null;
+            }
+            if (envs[i] != null) {
+                try {
+                    envs[i].close();
+                } catch (Throwable e) {
+                    System.out.println("tearDown: " + e);
+                }
+                envs[i] = null;
+            }
+            if (dirs[i] != null) {
+                try {
+                    TestUtils.removeLogFiles("TearDown", dirs[i], false);
+                } catch (Throwable e) {
+                    System.out.println("tearDown: " + e);
+                }
+                dirs[i] = null;
+            }
+        }
+        envHome = null;
+        dirs = null;
+        envs = null;
+        dbs = null;
+    }
+
+    public void testBaseline()
+        throws DatabaseException {
+
+        /* Open all DBs in the same environment. */
+        final int N_DBS = N_ENVS;
+        sharedCache = false;
+        openOne(0);
+        DatabaseConfig dbConfig = dbs[0].getConfig();
+        for (int i = 1; i < N_DBS; i += 1) {
+            dbs[i] = envs[0].openDatabase(null, "foo" + i, dbConfig);
+        }
+        for (int i = 0; i < N_DBS; i += 1) {
+            write(i, ENV_CACHE_SIZE);
+        }
+
+        for (int repeat = 0; repeat < 50; repeat += 1) {
+
+            /* Read all DBs evenly. */
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            boolean done = false;
+            for (int i = 0; !done; i += 1) {
+                IntegerBinding.intToEntry(i, key);
+                for (int j = 0; j < N_DBS; j += 1) {
+                    if (dbs[j].get(null, key, data, null) !=
+                        OperationStatus.SUCCESS) {
+                        done = true;
+                    }
+                }
+            }
+
+            /*
+             * Check that each DB uses approximately equal portions of the
+             * cache.
+             */
+            StringBuffer buf = new StringBuffer();
+            long low = Long.MAX_VALUE;
+            long high = 0;
+            for (int i = 0; i < N_DBS; i += 1) {
+                long val = getDatabaseCacheBytes(dbs[i]);
+                buf.append(" db=" + i + " bytes=" + val);
+                if (low > val) {
+                    low = val;
+                }
+                if (high < val) {
+                    high = val;
+                }
+            }
+            long pct = (low * 100) / high;
+            assertTrue("failed with pct=" + pct + buf,
+                       pct >= LRU_ACCURACY_PCT);
+        }
+
+        for (int i = 1; i < N_DBS; i += 1) {
+            dbs[i].close();
+            dbs[i] = null;
+        }
+        closeOne(0);
+    }
+
+    private long getDatabaseCacheBytes(Database db) {
+        long total = 0;
+        DatabaseImpl dbImpl = DbInternal.dbGetDatabaseImpl(db);
+        for (IN in : dbImpl.getDbEnvironment().getInMemoryINs()) {
+            if (in.getDatabase() == dbImpl) {
+                total += in.getInMemorySize();
+            }
+        }
+        return total;
+    }
+
+    /**
+     * Writes to each env one at a time, writing enough data in each env to
+     * fill the entire cache.  Each env in turn takes up a large majority of
+     * the cache.
+     */
+    public void testWriteOneEnvAtATime()
+        throws DatabaseException {
+
+        final int SMALL_DATA_SIZE = MIN_DATA_SIZE + (20 * 1024);
+        final int SMALL_TOTAL_SIZE = SMALL_DATA_SIZE + LOG_BUFFER_SIZE;
+        final int BIG_TOTAL_SIZE = ENV_CACHE_SIZE -
+                                   ((N_ENVS - 1) * SMALL_TOTAL_SIZE);
+        openAll();
+        for (int i = 0; i < N_ENVS; i += 1) {
+            write(i, TOTAL_CACHE_SIZE);
+            EnvironmentStats stats = envs[i].getStats(null);
+            String msg = "env=" + i +
+                         " total=" + stats.getCacheTotalBytes() +
+                         " shared=" + stats.getSharedCacheTotalBytes();
+            assertTrue(stats.getSharedCacheTotalBytes() >= BIG_TOTAL_SIZE);
+            assertTrue(msg, stats.getCacheTotalBytes() >= BIG_TOTAL_SIZE);
+        }
+        closeAll();
+    }
+
+    /**
+     * Writes alternating records to each env, writing enough data to fill the
+     * entire cache.  Each env takes up roughly equal portions of the cache.
+     */
+    public void testWriteAllEnvsEvenly()
+        throws DatabaseException {
+
+        openAll();
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry(new byte[ENTRY_DATA_SIZE]);
+        for (int i = 0; i < ENV_CACHE_SIZE / ENTRY_DATA_SIZE; i += 1) {
+            IntegerBinding.intToEntry(i, key);
+            for (int j = 0; j < N_ENVS; j += 1) {
+                dbs[j].put(null, key, data);
+            }
+            checkStatsConsistency();
+        }
+        checkEvenCacheUsage();
+        closeAll();
+    }
+
+    /**
+     * Checks that the cache usage changes appropriately as environments are
+     * opened and closed.
+     */
+    public void testOpenClose()
+        throws DatabaseException {
+
+        openAll();
+        int nRecs = 0;
+        for (int i = 0; i < N_ENVS; i += 1) {
+            int n = write(i, TOTAL_CACHE_SIZE);
+            if (nRecs < n) {
+                nRecs = n;
+            }
+        }
+        closeAll();
+        openAll();
+        readEvenly(nRecs);
+        /* Close only one. */
+        for (int i = 0; i < N_ENVS; i += 1) {
+            closeOne(i);
+            readEvenly(nRecs);
+            openOne(i);
+            readEvenly(nRecs);
+        }
+        /* Close all but one. */
+        for (int i = 0; i < N_ENVS; i += 1) {
+            for (int j = 0; j < N_ENVS; j += 1) {
+                if (j != i) {
+                    closeOne(j);
+                }
+            }
+            readEvenly(nRecs);
+            for (int j = 0; j < N_ENVS; j += 1) {
+                if (j != i) {
+                    openOne(j);
+                }
+            }
+            readEvenly(nRecs);
+        }
+        closeAll();
+    }
+
+    /**
+     * Checks that an environment with hot data uses more of the cache.
+     */
+    public void testHotness()
+        throws DatabaseException {
+
+        final int HOT_CACHE_SIZE = (int) (1.5 * ENV_CACHE_SIZE);
+        openAll();
+        int nRecs = Integer.MAX_VALUE;
+        for (int i = 0; i < N_ENVS; i += 1) {
+            int n = write(i, TOTAL_CACHE_SIZE);
+            if (nRecs > n) {
+                nRecs = n;
+            }
+        }
+        readEvenly(nRecs);
+        /* Keep one env "hot". */
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        for (int i = 0; i < N_ENVS; i += 1) {
+            for (int j = 0; j < N_ENVS; j += 1) {
+                for (int k = 0; k < nRecs; k += 1) {
+                    IntegerBinding.intToEntry(k, key);
+                    dbs[i].get(null, key, data, null);
+                    dbs[j].get(null, key, data, null);
+                }
+                checkStatsConsistency();
+                EnvironmentStats iStats = envs[i].getStats(null);
+                EnvironmentStats jStats = envs[j].getStats(null);
+
+                if (iStats.getCacheTotalBytes() < HOT_CACHE_SIZE ||
+                    jStats.getCacheTotalBytes() < HOT_CACHE_SIZE) {
+
+                    StringBuilder msg = new StringBuilder();
+                    msg.append("Hot cache size is below " + HOT_CACHE_SIZE +
+                               " for env " + i + " or " + j);
+                    for (int k = 0; k < N_ENVS; k += 1) {
+                        msg.append("\n**** ENV " + k + " ****\n");
+                        msg.append(envs[k].getStats(null));
+                    }
+                    fail(msg.toString());
+                }
+            }
+        }
+        closeAll();
+    }
+
+    /**
+     * Tests changing the cache size.
+     */
+    public void testMutateCacheSize()
+        throws DatabaseException {
+
+        final int HALF_CACHE_SIZE = TOTAL_CACHE_SIZE / 2;
+        openAll();
+        int nRecs = 0;
+        for (int i = 0; i < N_ENVS; i += 1) {
+            int n = write(i, ENV_CACHE_SIZE);
+            if (nRecs < n) {
+                nRecs = n;
+            }
+        }
+        /* Full cache size. */
+        readEvenly(nRecs);
+        EnvironmentStats stats = envs[0].getStats(null);
+        assertTrue(Math.abs
+                   (TOTAL_CACHE_SIZE - stats.getSharedCacheTotalBytes())
+                   < (TOTAL_CACHE_SIZE / 10));
+        /* Halve cache size. */
+        EnvironmentMutableConfig config = envs[0].getMutableConfig();
+        config.setCacheSize(HALF_CACHE_SIZE);
+        envs[0].setMutableConfig(config);
+        readEvenly(nRecs);
+        stats = envs[0].getStats(null);
+        assertTrue(Math.abs
+                   (HALF_CACHE_SIZE - stats.getSharedCacheTotalBytes())
+                   < (HALF_CACHE_SIZE / 10));
+        /* Full cache size. */
+        config = envs[0].getMutableConfig();
+        config.setCacheSize(TOTAL_CACHE_SIZE);
+        envs[0].setMutableConfig(config);
+        readEvenly(nRecs);
+        stats = envs[0].getStats(null);
+        assertTrue(Math.abs
+                   (TOTAL_CACHE_SIZE - stats.getSharedCacheTotalBytes())
+                   < (TOTAL_CACHE_SIZE / 10));
+        closeAll();
+    }
+
+    private void openAll()
+        throws DatabaseException {
+
+        for (int i = 0; i < N_ENVS; i += 1) {
+            openOne(i);
+        }
+    }
+
+    private void openOne(int i)
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setSharedCache(sharedCache);
+        envConfig.setCacheSize(TOTAL_CACHE_SIZE);
+        envConfig.setConfigParam("je.tree.minMemory",
+                                 String.valueOf(MIN_DATA_SIZE));
+        envConfig.setConfigParam("je.env.runCleaner", "false");
+        envConfig.setConfigParam("je.env.runCheckpointer", "false");
+        envConfig.setConfigParam("je.env.runINCompressor", "false");
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+
+        envs[i] = new Environment(dirs[i], envConfig);
+        dbs[i] = envs[i].openDatabase(null, "foo", dbConfig);
+    }
+
+    private void closeAll()
+        throws DatabaseException {
+
+        for (int i = 0; i < N_ENVS; i += 1) {
+            closeOne(i);
+        }
+    }
+
+    private void closeOne(int i)
+        throws DatabaseException {
+
+        if (dbs[i] != null) {
+            dbs[i].close();
+            dbs[i] = null;
+        }
+        if (envs[i] != null) {
+            envs[i].close();
+            envs[i] = null;
+        }
+    }
+
+    /**
+     * Writes enough records in the given envIndex environment to cause at
+     * least minSizeToWrite bytes to be used in the cache.
+     */
+    private int write(int envIndex, int minSizeToWrite)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry(new byte[ENTRY_DATA_SIZE]);
+        int i;
+        for (i = 0; i < minSizeToWrite / ENTRY_DATA_SIZE; i += 1) {
+            IntegerBinding.intToEntry(i, key);
+            dbs[envIndex].put(null, key, data);
+        }
+        checkStatsConsistency();
+        return i;
+    }
+
+    /**
+     * Reads alternating records from each env, reading all records from each
+     * env.  Checks that all environments use roughly equal portions of the
+     * cache.
+     */
+    private void readEvenly(int nRecs)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        /* Repeat reads twice to give the LRU a fighting chance. */
+        for (int repeat = 0; repeat < 2; repeat += 1) {
+            for (int i = 0; i < nRecs; i += 1) {
+                IntegerBinding.intToEntry(i, key);
+                for (int j = 0; j < N_ENVS; j += 1) {
+                    if (dbs[j] != null) {
+                        dbs[j].get(null, key, data, null);
+                    }
+                }
+                checkStatsConsistency();
+            }
+        }
+        checkEvenCacheUsage();
+    }
+
+    /**
+     * Checks that each env uses approximately equal portions of the cache.
+     * How equal the portions are depends on the accuracy of the LRU.
+     */
+    private void checkEvenCacheUsage()
+        throws DatabaseException {
+
+        StringBuffer buf = new StringBuffer();
+        long low = Long.MAX_VALUE;
+        long high = 0;
+        for (int i = 0; i < N_ENVS; i += 1) {
+            if (envs[i] != null) {
+                EnvironmentStats stats = envs[i].getStats(null);
+                long val = stats.getCacheTotalBytes();
+                buf.append(" env=" + i + " bytes=" + val);
+                if (low > val) {
+                    low = val;
+                }
+                if (high < val) {
+                    high = val;
+                }
+            }
+        }
+        long pct = (low * 100) / high;
+        assertTrue("failed with pct=" + pct + buf, pct >= LRU_ACCURACY_PCT);
+    }
+
+    /**
+     * Checks that the sum of all env cache usages is the total cache usage,
+     * and other self-consistency checks.
+     */
+    private void checkStatsConsistency()
+        throws DatabaseException {
+
+        if (!sharedCache) {
+            return;
+        }
+        long total = 0;
+        long sharedTotal = -1;
+        int nShared = 0;
+        EnvironmentStats stats = null;
+
+        for (int i = 0; i < N_ENVS; i += 1) {
+            if (envs[i] != null) {
+                stats = envs[i].getStats(null);
+                total += stats.getCacheTotalBytes();
+                nShared += 1;
+                if (sharedTotal == -1) {
+                    sharedTotal = stats.getSharedCacheTotalBytes();
+                } else {
+                    assertEquals(sharedTotal, stats.getSharedCacheTotalBytes());
+                }
+            }
+        }
+        assertEquals(sharedTotal, total);
+        assertTrue(sharedTotal < TOTAL_CACHE_SIZE + (TOTAL_CACHE_SIZE / 10));
+        assertEquals(nShared, stats.getNSharedCacheEnvironments());
+    }
+}
diff --git a/test/com/sleepycat/je/incomp/EmptyBINTest.java b/test/com/sleepycat/je/incomp/EmptyBINTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..8c420b423cf3730a968bcea86d78355c7aae4ea4
--- /dev/null
+++ b/test/com/sleepycat/je/incomp/EmptyBINTest.java
@@ -0,0 +1,453 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EmptyBINTest.java,v 1.11.2.2 2010/01/04 15:30:43 cwl Exp $
+ */
+
+package com.sleepycat.je.incomp;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Enumeration;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.CursorImpl;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.TestHook;
+
+/**
+ * Test that searches and cursor traversals execute correctly in the face of
+ * a BIN with 0 entries, and with tree pruning at key points.
+ */
+public class EmptyBINTest extends TestCase {
+    private static final boolean DEBUG = false;
+
+    private static final byte DEFAULT_VAL = 100;
+    private File envHome;
+    private Environment env;
+    private Database db;
+
+    private boolean useDups;
+    private boolean doPruningAtCursorLevel;
+    private boolean doPruningAtTreeLevel;
+
+    public EmptyBINTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        if (db != null) {
+            try {
+		db.close();
+	    } catch (DatabaseException ignore) {
+	    }
+        }
+
+        if (env != null) {
+            try {
+		env.close();
+	    } catch (DatabaseException ignore) {
+	    }
+        }
+        env = null;
+        db = null;
+        setName(getName() + "-" +
+		(useDups ? "DUPS" : "!DUPS") +
+		"/" +
+		(doPruningAtCursorLevel ? "CURSORPRUNE" : "!CURSORPRUNE") +
+		"/" +
+		(doPruningAtTreeLevel ? "TREEPRUNE" : "!TREEPRUNE"));
+	super.tearDown();
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+    }
+
+    /*
+     * Run all tests in four combinations, using dups, and invoking bin
+     * pruning.
+     */
+    public static Test suite() {
+        TestSuite allTests = new TestSuite();
+        boolean[] dupCombo = new boolean[] {true, false};
+        boolean[] pruneCombo = new boolean[] {true, false};
+        for (int dup = 0; dup < dupCombo.length; dup++) {
+            for (int pruneCursor = 0;
+		 pruneCursor < pruneCombo.length;
+		 pruneCursor++) {
+		for (int pruneTree = 0;
+		     pruneTree < pruneCombo.length;
+		     pruneTree++) {
+		    TestSuite suite = new TestSuite(EmptyBINTest.class);
+		    Enumeration e = suite.tests();
+		    while (e.hasMoreElements()) {
+			EmptyBINTest test = (EmptyBINTest) e.nextElement();
+			boolean pruneC = pruneCombo[pruneCursor];
+			boolean pruneT = pruneCombo[pruneTree];
+			if (pruneC && pruneT) {
+			    /* Only do one hook at a time. */
+			    break;
+			}
+			test.init(dupCombo[dup], pruneC, pruneT);
+			allTests.addTest(test);
+		    }
+		}
+	    }
+        }
+        return allTests;
+    }
+
+    private void init(boolean useDups,
+		      boolean doPruningAtCursorLevel,
+		      boolean doPruningAtTreeLevel) {
+        this.useDups = useDups;
+        this.doPruningAtCursorLevel = doPruningAtCursorLevel;
+        this.doPruningAtTreeLevel = doPruningAtTreeLevel;
+        if (DEBUG) {
+            System.out.println("useDups=" + useDups +
+                               " doPruningAtCursorLevel=" +
+			       doPruningAtCursorLevel +
+                               " doPruningAtTreeLevel=" +
+			       doPruningAtTreeLevel);
+        }
+    }
+
+    /* Non-dupes scans across an empty BIN. */
+    public void testScanFromEndOfFirstBin()
+        throws DatabaseException {
+
+	/*
+         * Tree holds <0,1>  <2,3,4> <empty> <8,9,10>.
+         *                        |
+         *   fwd scan starts --- -+
+         * Fwd scan starting at 4.  Expect 4, 8, 9, 10
+         */
+        doScanAcrossEmptyBin(true,      // forward
+                             (byte) 4,  // start
+                             new byte[] {4,8,9,10}); // expected
+    }
+
+    public void testScanFromLeftSideOfEmptyBin()
+        throws DatabaseException {
+
+	/*
+         * Tree holds <0,1>  <2,3,4> <empty> <8,9,10>.
+         *                            |
+         *   scan starts -------------+
+         * Fwd scan starting at 5 (deleted).  Expect 8, 9, 10
+         */
+        doScanAcrossEmptyBin(true,       // forward
+                             (byte) 5,   // start
+                             new byte[] {8,9,10}); // expected
+    }
+
+    public void testScanFromRightSideOfEmptyBin()
+        throws DatabaseException {
+
+	/*
+         * Tree holds <0,1>  <2,3,4> <empty> <8,9,10>.
+         *                                |
+         *   backwards scan starts ------+
+         * Backwards scan starting at 7 (deleted).  Expect 8,4,3,2,1,0
+         */
+        doScanAcrossEmptyBin(false,      // backwards
+                             (byte) 7,   // start
+                             new byte[] {8,4,3,2,1,0}); // expected
+    }
+
+    public void testScanFromBeginningOfLastBin()
+        throws DatabaseException {
+
+	/*
+         * Tree holds <0,1>  <2,3,4> <empty> <8,9,10>.
+         *                                    |
+         *   backwards scan starts -----------+
+         */
+        doScanAcrossEmptyBin(false,      // backwards
+                             (byte) 8,   // start
+                             new byte[] {8,4,3,2,1,0});  // expected vals
+    }
+
+    public void testScanForward()
+        throws DatabaseException {
+
+	/*
+         * Tree holds <0,1>  <2,3,4> <empty> <8,9,10>.
+         * Fwd scan starting with first.  Expect 0, 1, 2, 4, 8, 9, 10.
+         */
+        doScanAcrossEmptyBin(true,    // forward
+                             (byte) -1,
+                             new byte[] {0,1,2,3,4,8,9,10});
+    }
+
+    public void testScanBackwards()
+        throws DatabaseException {
+
+	/*
+         * Tree holds <0,1>  <2,3,4> <empty> <8,9,10>.
+         * Bwd scan starting with last. 10 -> 0
+         */
+        doScanAcrossEmptyBin(false,   // backwards
+                             (byte) -1,
+                             new byte[] {10,9,8,4,3,2,1,0});
+    }
+
+    /**
+     * Scan over an empty BIN that is in the middle of the tree. [#11778]
+     * The tree holds values from 0 - 10. Values 5, 6, 7 have been deleted.
+     * @param forward indicates use getNext().
+     * @param startKey >= 0 indicates do getSearchKeyRange to init cursor.
+     * @param expectVals are the elements to expect find
+     */
+    private void doScanAcrossEmptyBin(boolean forward,
+				      byte startKey,
+                                      byte[] expectVals)
+        throws DatabaseException {
+
+        int deleteStartVal = 5;
+        int deleteEndVal = 7;
+        openAndInitEmptyMiddleBIN(deleteStartVal, deleteEndVal);
+
+        if (DEBUG) {
+	    DbInternal.dbGetDatabaseImpl(db).getTree().dump();
+        }
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+	/*
+	 * Position a cursor and check that we get the expected values.
+	 */
+	int cnt = 0;
+        Cursor cursor = db.openCursor(null, null);
+        CursorImpl cursorImpl = DbInternal.getCursorImpl(cursor);
+
+        if (doPruningAtCursorLevel) {
+            cursorImpl.setTestHook(new PruningHook(env));
+        }
+
+        if (doPruningAtTreeLevel) {
+            DbInternal.dbGetDatabaseImpl(db).getTree().
+		setSearchHook(new PruningHook(env));
+        }
+
+        int expectIndex = 0;
+	if (startKey < 0) {
+	    if (forward) {
+		assertEquals(OperationStatus.SUCCESS,
+                             cursor.getFirst(key, data, null));
+	    } else {
+		assertEquals(OperationStatus.SUCCESS,
+                             cursor.getLast(key, data, null));
+	    }
+	} else {
+            if (useDups) {
+                key.setData(new byte[] {DEFAULT_VAL});
+                data.setData(new byte[] {startKey});
+            } else {
+                key.setData(new byte[] { startKey });
+            }
+
+	    if ((startKey >= deleteStartVal) &&
+		(startKey <= deleteEndVal)) {
+		/* Test range query. */
+                if (useDups) {
+                    assertEquals(OperationStatus.SUCCESS,
+                                 cursor.getSearchBothRange(key, data, null));
+                } else {
+                    assertEquals(OperationStatus.SUCCESS,
+                                 cursor.getSearchKeyRange(key, data, null));
+                }
+	    } else {
+		/* Test from getSearchKey(). */
+                if (useDups) {
+                    assertEquals(OperationStatus.SUCCESS,
+                                 cursor.getSearchBoth(key, data, null));
+                } else {
+                    assertEquals(OperationStatus.SUCCESS,
+                                 cursor.getSearchKey(key, data, null));
+                }
+	    }
+	}
+
+        OperationStatus status;
+        do {
+            cnt++;
+
+            /* check value. */
+            if (DEBUG) {
+                System.out.println("=>key=" + key.getData()[0] +
+                                   " data=" + data.getData()[0]);
+            }
+            if (useDups) {
+                assertEquals(expectVals[expectIndex++], data.getData()[0]);
+            } else {
+                assertEquals(expectVals[expectIndex++], key.getData()[0]);
+            }
+
+	    if (forward) {
+		status = cursor.getNext(key, data, null);
+	    } else {
+		status = cursor.getPrev(key, data, null);
+            }
+        } while (status == OperationStatus.SUCCESS);
+
+	assertEquals(expectVals.length, cnt);
+	cursor.close();
+        closeEnv();
+    }
+
+    /**
+     * Create a tree with:
+     *                         IN
+     *                      /     \
+     *                    IN       IN
+     *                    / \     /   \
+     *                BIN1 BIN2  BIN3 BIN4
+     *
+     * where BIN1 has values 0,1
+     *       BIN2 has valus 2,3,4
+     *       BIN3 has valus 5,6,7
+     *       BIN4 has valus 8,9,10
+     * Depending on configuration, the entries in BIN2 or BIN3
+     */
+    private void openAndInitEmptyMiddleBIN(int deleteStartVal,
+                                           int deleteEndVal)
+        throws DatabaseException {
+
+        openEnv(false, "4");
+        DatabaseEntry data = new DatabaseEntry();
+        data.setData(new byte[] {DEFAULT_VAL});
+        DatabaseEntry key = new DatabaseEntry();
+        key.setData(new byte[] {DEFAULT_VAL});
+
+        /* Create four BINs */
+        OperationStatus status;
+        for (int i = 0; i < 11; i++) {
+            if (useDups) {
+                data = new DatabaseEntry(new byte[] { (byte) i });
+            } else {
+                key = new DatabaseEntry(new byte[] { (byte) i });
+            }
+            status = db.put(null, key, data);
+            assertEquals(OperationStatus.SUCCESS, status);
+	}
+
+        /* Empty out one of the middle ones. */
+        if (useDups) {
+            Cursor cursor = db.openCursor(null, null);
+            data = new DatabaseEntry(new byte[] { (byte) deleteStartVal });
+            assertEquals(OperationStatus.SUCCESS,
+                         cursor.getSearchBoth(key, data, LockMode.DEFAULT));
+            for (int i = deleteStartVal; i <= deleteEndVal; i++) {
+                assertEquals(OperationStatus.SUCCESS,
+                             cursor.delete());
+                assertEquals(OperationStatus.SUCCESS,
+                             cursor.getNext(key, data, LockMode.DEFAULT));
+            }
+            cursor.close();
+        } else {
+            for (int i = deleteStartVal; i <= deleteEndVal; i++) {
+                key = new DatabaseEntry(new byte[] { (byte) i });
+                status = db.delete(null, key);
+                assertEquals(OperationStatus.SUCCESS, status);
+            }
+        }
+
+        CheckpointConfig config = new CheckpointConfig();
+        config.setForce(true);
+        env.checkpoint(config);
+    }
+
+    /**
+     * Opens the environment and db.
+     */
+    private void openEnv(boolean transactional, String nodeMax)
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setTransactional(transactional);
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "true");
+	if (nodeMax != null) {
+	    envConfig.setConfigParam
+		(EnvironmentParams.NODE_MAX.getName(), nodeMax);
+	    envConfig.setConfigParam
+		(EnvironmentParams.NODE_MAX_DUPTREE.getName(), nodeMax);
+	}
+        envConfig.setAllowCreate(true);
+        env = new Environment(envHome, envConfig);
+
+        /* Make a db and open it. */
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(transactional);
+        dbConfig.setSortedDuplicates(useDups);
+        dbConfig.setAllowCreate(true);
+        db = env.openDatabase(null, "testDB", dbConfig);
+    }
+
+    /**
+     * Closes the db and environment.
+     */
+    private void closeEnv()
+        throws DatabaseException {
+
+        db.close();
+        db = null;
+        env.close();
+        env = null;
+    }
+
+    private static class PruningHook implements TestHook {
+        Environment env;
+
+        PruningHook(Environment env) {
+            this.env = env;
+        }
+
+        public void doHook() {
+	    DbInternal.envGetEnvironmentImpl(env).getINCompressor().
+		wakeup();
+	    Thread.yield();
+	    try {
+		Thread.sleep(100);
+	    } catch (Throwable T) {
+	    }
+        }
+
+        public Object getHookValue() {
+            throw new UnsupportedOperationException();
+        }
+
+        public void doIOHook() throws IOException {
+            throw new UnsupportedOperationException();
+        }
+
+        public void hookSetup() {
+            throw new UnsupportedOperationException();
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/incomp/INCompressorTest.java b/test/com/sleepycat/je/incomp/INCompressorTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..f44a78ed12c2fb303f218e08b0d6756a03affa79
--- /dev/null
+++ b/test/com/sleepycat/je/incomp/INCompressorTest.java
@@ -0,0 +1,923 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: INCompressorTest.java,v 1.20.2.2 2010/01/04 15:30:43 cwl Exp $
+ */
+
+package com.sleepycat.je.incomp;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.tree.DBIN;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Test that BIN compression occurs in the various ways it is supposed to.
+ * <p>These are:</p>
+ * <ul>
+ * <li>transactional and non-transactional delete,</li>
+ * <li>delete duplicates and non-duplicates,</li>
+ * <li>removal of empty sub-trees (duplicates and non-duplicates),</li>
+ * <li>compression of BIN for deleted DIN subtree.</li>
+ * <li>removal of empty BIN after deleting a DIN subtree.</li>
+ * <li>undo causes compression of inserted LN during abort and recovery,</li>
+ * <li>redo causes compression of deleted LN during recovery,</li>
+ * </ul>
+ *
+ * <p>Also test that compression retries occur after we attempt to compress but
+ * cannot because:</p>
+ * <ul>
+ * <li>cursors are open on the BIN when the compressor dequeues them,</li>
+ * <li>cursors are open when attempting to delete a sub-tree (dup and non-dup
+ * are two separate code paths).</li>
+ * <li>a deleted key is locked during compression (NOT TESTED - this is very
+ * difficult to reproduce),</li>
+ * </ul>
+ *
+ * <p>Possible problem:  When we attempt to delete a subtree because the BIN is
+ * empty, we give up when NodeNotEmptyException is thrown by the search.
+ * However, this is thrown not only when entries have been added but also when
+ * there are cursors on the BIN; it seems like we should retry in the latter
+ * case.  Or is it impossible to have a cursor on an empty BIN?</p>
+ *
+ * <p>We do not test here the last ditch effort to compress to make room in
+ * IN.insertEntry1; that should never happen in theory, so I dodn't think it
+ * is worthwhile to try to reproduce it.</p>
+ */
+public class INCompressorTest extends TestCase {
+    private File envHome;
+    private Environment env;
+    private Database db;
+    private IN in;
+    private BIN bin;
+    private DBIN dbin;
+    /* Use high keys since we fill the first BIN with low keys. */
+    private DatabaseEntry entry0 = new DatabaseEntry(new byte[] {0});
+    private DatabaseEntry entry1 = new DatabaseEntry(new byte[] {1});
+    private DatabaseEntry entry2 = new DatabaseEntry(new byte[] {2});
+    private DatabaseEntry keyFound = new DatabaseEntry();
+    private DatabaseEntry dataFound = new DatabaseEntry();
+
+    public INCompressorTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        if (env != null) {
+            try { env.close(); } catch (DatabaseException ignored) { }
+        }
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+        env = null;
+        db = null;
+        in = null;
+        bin = null;
+        dbin = null;
+        entry0 = null;
+        entry1 = null;
+        entry2 = null;
+        keyFound = null;
+        dataFound = null;
+    }
+
+    public void testDeleteTransactional()
+        throws DatabaseException {
+
+        /* Transactional no-dups, 2 keys. */
+        openAndInit(true, false);
+        OperationStatus status;
+
+        /* Cursor appears on BIN. */
+        Transaction txn = env.beginTransaction(null, null);
+        Cursor cursor = db.openCursor(txn, null);
+        status = cursor.getFirst(keyFound, dataFound, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        checkBinEntriesAndCursors(bin, 2, 1);
+
+        /* Delete without closing the cursor does not compress. */
+        status = cursor.delete();
+        assertEquals(OperationStatus.SUCCESS, status);
+        env.compress();
+        checkBinEntriesAndCursors(bin, 2, 1);
+
+        /* Closing the cursor without commit does not compress. */
+        cursor.close();
+        env.compress();
+        checkBinEntriesAndCursors(bin, 2, 0);
+
+        /* Commit without calling compress does not compress. */
+        txn.commit();
+        checkBinEntriesAndCursors(bin, 2, 0);
+
+        /* Finally compress can compress. */
+        env.compress();
+        checkBinEntriesAndCursors(bin, 1, 0);
+
+        /* Should be no change in parent nodes. */
+        assertEquals(2, in.getNEntries());
+
+        closeEnv();
+    }
+
+    public void testDeleteNonTransactional()
+        throws DatabaseException {
+
+        /* Non-transactional no-dups, 2 keys. */
+        openAndInit(false, false);
+        OperationStatus status;
+
+        /* Cursor appears on BIN. */
+        Cursor cursor = db.openCursor(null, null);
+        status = cursor.getFirst(keyFound, dataFound, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        checkBinEntriesAndCursors(bin, 2, 1);
+
+        /* Delete without closing the cursor does not compress. */
+        status = cursor.delete();
+        assertEquals(OperationStatus.SUCCESS, status);
+        env.compress();
+        checkBinEntriesAndCursors(bin, 2, 1);
+
+        /* Closing the cursor without calling compress does not compress. */
+        cursor.close();
+        checkBinEntriesAndCursors(bin, 2, 0);
+
+        /* Finally compress can compress. */
+        env.compress();
+        checkBinEntriesAndCursors(bin, 1, 0);
+
+        /* Should be no change in parent nodes. */
+        assertEquals(2, in.getNEntries());
+
+        closeEnv();
+    }
+
+    public void testDeleteDuplicate()
+        throws DatabaseException {
+
+        /* Non-transactional dups, 2 keys and 2 dups for 1st key. */
+        openAndInit(false, true);
+        OperationStatus status;
+
+        /* Cursor appears on DBIN. */
+        Cursor cursor = db.openCursor(null, null);
+        status = cursor.getFirst(keyFound, dataFound, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        checkBinEntriesAndCursors(dbin, 2, 1);
+
+        /* Delete without closing the cursor does not compress. */
+        status = cursor.delete();
+        assertEquals(OperationStatus.SUCCESS, status);
+        env.compress();
+        checkBinEntriesAndCursors(dbin, 2, 1);
+
+        /* Closing the cursor without calling compress does not compress. */
+        cursor.close();
+        checkBinEntriesAndCursors(dbin, 2, 0);
+
+        /* Finally compress can compress. */
+        env.compress();
+        checkBinEntriesAndCursors(dbin, 1, 0);
+
+        /* Should be no change in parent nodes. */
+        assertEquals(2, in.getNEntries());
+        checkBinEntriesAndCursors(bin, 2, 0);
+
+        closeEnv();
+    }
+
+    public void testRemoveEmptyBIN()
+        throws DatabaseException {
+
+        /* Non-transactional no-dups, 2 keys. */
+        openAndInit(false, false);
+        OperationStatus status;
+
+        /* Cursor appears on BIN. */
+        Cursor cursor = db.openCursor(null, null);
+        status = cursor.getFirst(keyFound, dataFound, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        checkBinEntriesAndCursors(bin, 2, 1);
+
+        /* Delete without closing the cursor does not compress. */
+        status = cursor.delete();
+        assertEquals(OperationStatus.SUCCESS, status);
+        status = cursor.getNext(keyFound, dataFound, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        status = cursor.delete();
+        assertEquals(OperationStatus.SUCCESS, status);
+        env.compress();
+        checkBinEntriesAndCursors(bin, 2, 1);
+
+        /* Closing the cursor without calling compress does not compress. */
+        cursor.close();
+        checkBinEntriesAndCursors(bin, 2, 0);
+
+        /* Finally compress can compress. */
+        env.compress();
+        checkBinEntriesAndCursors(bin, 0, 0);
+
+        /* BIN is empty so parent entry should be gone also. */
+        assertEquals(1, in.getNEntries());
+
+        closeEnv();
+    }
+
+    public void testRemoveEmptyDBIN()
+        throws DatabaseException {
+
+        /* Non-transactional dups, 2 keys and 2 dups for 1st key. */
+        openAndInit(false, true);
+        OperationStatus status;
+
+        /* Cursor appears on DBIN. */
+        Cursor cursor = db.openCursor(null, null);
+        status = cursor.getFirst(keyFound, dataFound, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        checkBinEntriesAndCursors(dbin, 2, 1);
+
+        /* Delete without closing the cursor does not compress. */
+        status = cursor.delete();
+        assertEquals(OperationStatus.SUCCESS, status);
+        status = cursor.getNext(keyFound, dataFound, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        status = cursor.delete();
+        assertEquals(OperationStatus.SUCCESS, status);
+        env.compress();
+        checkBinEntriesAndCursors(dbin, 2, 1);
+
+        /* Closing the cursor without calling compress does not compress. */
+        cursor.close();
+        checkBinEntriesAndCursors(dbin, 2, 0);
+
+        /* Finally compress can compress. */
+        env.compress();
+        checkBinEntriesAndCursors(dbin, 0, 0);
+
+        /* BIN parent should have one less entry. */
+        assertEquals(2, in.getNEntries());
+        checkBinEntriesAndCursors(bin, 1, 0);
+
+        closeEnv();
+    }
+
+    public void testRemoveEmptyDBINandBIN()
+        throws DatabaseException {
+
+        /* Non-transactional dups, 2 keys and 2 dups for 1st key. */
+        openAndInit(false, true);
+        OperationStatus status;
+
+        /* Delete key 1, cursor appears on BIN, no compression yet. */
+        Cursor cursor = db.openCursor(null, null);
+        status = cursor.getSearchKey(entry1, dataFound, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        status = cursor.delete();
+        assertEquals(OperationStatus.SUCCESS, status);
+        env.compress();
+        checkBinEntriesAndCursors(bin, 2, 1);
+        checkBinEntriesAndCursors(dbin, 2, 0);
+
+        /* Move cursor to 1st dup, cursor moves to DBIN, no compresion yet. */
+        status = cursor.getFirst(keyFound, dataFound, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        env.compress();
+        checkBinEntriesAndCursors(bin, 2, 1);
+        checkBinEntriesAndCursors(dbin, 2, 1);
+
+        /* Delete the duplicates for key 0, no compression yet. */
+        status = cursor.delete();
+        assertEquals(OperationStatus.SUCCESS, status);
+        status = cursor.getNext(keyFound, dataFound, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        status = cursor.delete();
+        assertEquals(OperationStatus.SUCCESS, status);
+        env.compress();
+        checkBinEntriesAndCursors(bin, 2, 1);
+        checkBinEntriesAndCursors(dbin, 2, 1);
+
+        /* Closing the cursor without calling compress does not compress. */
+        cursor.close();
+        checkBinEntriesAndCursors(bin, 2, 0);
+        checkBinEntriesAndCursors(dbin, 2, 0);
+
+        /* Finally compress can compress. */
+        env.compress();
+	/*
+	 * Do this twice.  The test is depending on the iterator in
+	 * doCompress() getting the DBINReference first and the BINReference
+	 * second.  In JRockit, it's the opposite so the compress of the BIN
+	 * doesn't do any good on the first time around.  So take two
+	 * iterations to get the job done.
+	 */
+        env.compress();
+
+        checkBinEntriesAndCursors(bin, 0, 0);
+        checkBinEntriesAndCursors(dbin, 0, 0);
+
+        /* BIN is empty so parent entry should be gone also. */
+        assertEquals(1, in.getNEntries());
+
+        closeEnv();
+    }
+
+    public void testAbortInsert()
+        throws DatabaseException {
+
+        /* Transactional no-dups, 2 keys. */
+        openAndInit(true, false);
+
+        /* Add key 2, cursor appears on BIN. */
+        Transaction txn = env.beginTransaction(null, null);
+        Cursor cursor = db.openCursor(txn, null);
+        cursor.put(entry2, entry0);
+        checkBinEntriesAndCursors(bin, 3, 1);
+
+        /* Closing the cursor without abort does not compress. */
+        cursor.close();
+        env.compress();
+        checkBinEntriesAndCursors(bin, 3, 0);
+
+        /* Abort without calling compress does not compress. */
+        txn.abort();
+        checkBinEntriesAndCursors(bin, 3, 0);
+
+        /* Finally compress can compress. */
+        env.compress();
+        checkBinEntriesAndCursors(bin, 2, 0);
+
+        /* Should be no change in parent nodes. */
+        assertEquals(2, in.getNEntries());
+
+        closeEnv();
+    }
+
+    public void testAbortInsertDuplicate()
+        throws DatabaseException {
+
+        /* Transactional dups, 2 keys and 2 dups for 1st key. */
+        openAndInit(true, true);
+
+        /* Add datum 2 for key 0, cursor appears on DBIN. */
+        Transaction txn = env.beginTransaction(null, null);
+        Cursor cursor = db.openCursor(txn, null);
+        cursor.put(entry0, entry2);
+        checkBinEntriesAndCursors(bin, 2, 1);
+        checkBinEntriesAndCursors(dbin, 3, 1);
+
+        /* Closing the cursor without abort does not compress. */
+        cursor.close();
+        env.compress();
+        checkBinEntriesAndCursors(bin, 2, 0);
+        checkBinEntriesAndCursors(dbin, 3, 0);
+
+        /* Abort without calling compress does not compress. */
+        txn.abort();
+        checkBinEntriesAndCursors(bin, 2, 0);
+        checkBinEntriesAndCursors(dbin, 3, 0);
+
+        /* Finally compress can compress. */
+        env.compress();
+        checkBinEntriesAndCursors(bin, 2, 0);
+        checkBinEntriesAndCursors(dbin, 2, 0);
+
+        /* Should be no change in parent nodes. */
+        assertEquals(2, in.getNEntries());
+
+        closeEnv();
+    }
+
+    public void testRollBackInsert()
+        throws DatabaseException {
+
+        /* Transactional no-dups, 2 keys. */
+        openAndInit(true, false);
+
+        /* Add key 2, cursor appears on BIN. */
+        Transaction txn = env.beginTransaction(null, null);
+        Cursor cursor = db.openCursor(txn, null);
+        cursor.put(entry2, entry0);
+        checkBinEntriesAndCursors(bin, 3, 1);
+
+        /* Closing the cursor without abort does not compress. */
+        cursor.close();
+        env.compress();
+        checkBinEntriesAndCursors(bin, 3, 0);
+
+        /* Checkpoint to preserve internal nodes through recovery. */
+        CheckpointConfig config = new CheckpointConfig();
+        config.setForce(true);
+        env.checkpoint(config);
+
+        /* Abort without calling compress does not compress. */
+        txn.abort();
+        checkBinEntriesAndCursors(bin, 3, 0);
+
+        /*
+         * Shutdown and reopen to run recovery. This will call a checkpoint,
+         * but it doesn't compress because the child is not resident.
+         */
+        db.close();
+        DbInternal.envGetEnvironmentImpl(env).close(false);
+        env = null;
+        openEnv(true, false, null);
+        initInternalNodes();
+        checkBinEntriesAndCursors(bin, 3, 0);
+
+        /* Should be no change in parent nodes. */
+        assertEquals(2, in.getNEntries());
+
+        /* Finally compress can compress. */
+        env.compress();
+        checkBinEntriesAndCursors(bin, 2, 0);
+
+        closeEnv();
+    }
+
+    public void testRollBackInsertDuplicate()
+        throws DatabaseException {
+
+        /* Transactional dups, 2 keys and 2 dups for 1st key. */
+        openAndInit(true, true);
+
+        /* Add datum 2 for key 0, cursor appears on DBIN. */
+        Transaction txn = env.beginTransaction(null, null);
+        Cursor cursor = db.openCursor(txn, null);
+        cursor.put(entry0, entry2);
+        checkBinEntriesAndCursors(bin, 2, 1);
+        checkBinEntriesAndCursors(dbin, 3, 1);
+
+        /* Closing the cursor without abort does not compress. */
+        cursor.close();
+        env.compress();
+        checkBinEntriesAndCursors(bin, 2, 0);
+        checkBinEntriesAndCursors(dbin, 3, 0);
+
+        /* Checkpoint to preserve internal nodes through recovery. */
+        CheckpointConfig config = new CheckpointConfig();
+        config.setForce(true);
+        env.checkpoint(config);
+
+        /* Abort without calling compress does not compress. */
+        txn.abort();
+        checkBinEntriesAndCursors(bin, 2, 0);
+        checkBinEntriesAndCursors(dbin, 3, 0);
+
+        /*
+         * Shutdown and reopen to run recovery. This will call a checkpoint,
+         * but it doesn't compress because the child is not resident.
+         */
+        db.close();
+        DbInternal.envGetEnvironmentImpl(env).close(false);
+        env = null;
+        openEnv(true, true, null);
+        initInternalNodes();
+        checkBinEntriesAndCursors(bin, 2, 0);
+        checkBinEntriesAndCursors(dbin, 3, 0);
+
+        /* Finally compress can compress. */
+        env.compress();
+        checkBinEntriesAndCursors(bin, 2, 0);
+        checkBinEntriesAndCursors(dbin, 2, 0);
+
+        /* Should be no change in parent nodes. */
+        assertEquals(2, in.getNEntries());
+
+        closeEnv();
+    }
+
+    public void testRollForwardDelete()
+        throws DatabaseException {
+
+        /* Non-transactional no-dups, 2 keys. */
+        openAndInit(false, false);
+        OperationStatus status;
+
+        /* Checkpoint to preserve internal nodes through recovery. */
+        CheckpointConfig config = new CheckpointConfig();
+        config.setForce(true);
+        env.checkpoint(config);
+
+        /* Cursor appears on BIN. */
+        Cursor cursor = db.openCursor(null, null);
+        status = cursor.getFirst(keyFound, dataFound, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        checkBinEntriesAndCursors(bin, 2, 1);
+
+        /* Delete without closing the cursor does not compress. */
+        status = cursor.delete();
+        assertEquals(OperationStatus.SUCCESS, status);
+        env.compress();
+        checkBinEntriesAndCursors(bin, 2, 1);
+
+        /* Closing the cursor without calling compress does not compress. */
+        cursor.close();
+        checkBinEntriesAndCursors(bin, 2, 0);
+
+        /*
+         * Shutdown and reopen to run recovery. This will call a checkpoint,
+         * but it doesn't compress because the child is not resident.
+         */
+        db.close();
+        DbInternal.envGetEnvironmentImpl(env).close(false);
+        openEnv(false, false, null);
+        initInternalNodes();
+        checkBinEntriesAndCursors(bin, 2, 0);
+
+        /* Finally compress can compress. */
+        env.compress();
+        checkBinEntriesAndCursors(bin, 1, 0);
+
+        /* Should be no change in parent nodes. */
+        assertEquals(2, in.getNEntries());
+
+        closeEnv();
+    }
+
+    public void testRollForwardDeleteDuplicate()
+        throws DatabaseException {
+
+        /* Non-transactional dups, 2 keys and 2 dups for 1st key. */
+        openAndInit(false, true);
+        OperationStatus status;
+
+        /* Checkpoint to preserve internal nodes through recovery. */
+        CheckpointConfig config = new CheckpointConfig();
+        config.setForce(true);
+        env.checkpoint(config);
+
+        /* Cursor appears on DBIN. */
+        Cursor cursor = db.openCursor(null, null);
+        status = cursor.getFirst(keyFound, dataFound, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        checkBinEntriesAndCursors(dbin, 2, 1);
+
+        /* Delete without closing the cursor does not compress. */
+        status = cursor.delete();
+        assertEquals(OperationStatus.SUCCESS, status);
+        env.compress();
+        checkBinEntriesAndCursors(dbin, 2, 1);
+
+        /* Closing the cursor without calling compress does not compress. */
+        cursor.close();
+        checkBinEntriesAndCursors(dbin, 2, 0);
+
+        /*
+         * Shutdown and reopen to run recovery. This will call a checkpoint,
+         * but it doesn't compress because the child is not resident.
+         */
+        db.close();
+        DbInternal.envGetEnvironmentImpl(env).close(false);
+        openEnv(false, true, null);
+        initInternalNodes();
+        checkBinEntriesAndCursors(dbin, 2, 0);
+
+        /* Finally compress can compress. */
+        env.compress();
+        checkBinEntriesAndCursors(dbin, 1, 0);
+
+        /* Should be no change in parent nodes. */
+        assertEquals(2, in.getNEntries());
+        checkBinEntriesAndCursors(bin, 2, 0);
+
+        closeEnv();
+    }
+
+    /**
+     * Test that we can handle cases where lazy compression runs first, but the
+     * daemon handles pruning.  Testing against BINs.
+     */
+    public void testLazyPruning()
+        throws DatabaseException {
+
+        /* Non-transactional no-dups, 2 keys. */
+        openAndInit(false, false);
+
+        deleteAndLazyCompress(false);
+
+        /* Now compress, empty BIN should disappear. */
+        env.compress();
+        checkINCompQueueSize(0);
+        assertEquals(1, in.getNEntries());
+
+        closeEnv();
+    }
+
+    /**
+     * Test that we can handle cases where lazy compression runs first, but the
+     * daemon handles pruning.  Testing against DBINs.  [#11778]
+     */
+    public void testLazyPruningDups()
+        throws DatabaseException {
+
+        /* Non-transactional no-dups, 2 keys. */
+        openAndInit(false, true);
+
+        deleteAndLazyCompress(true);
+
+        /* Now compress, empty DBIN should disappear. */
+        env.compress();
+	/* Compress again. Empty BIN should disappear. */
+	env.compress();
+        checkINCompQueueSize(0);
+        assertEquals(1, in.getNEntries());
+
+        closeEnv();
+    }
+
+    /**
+     * Scan over an empty DBIN.  [#11778]
+     */
+    public void testEmptyInitialDBINScan()
+        throws DatabaseException {
+
+        /* Non-transactional no-dups, 2 keys. */
+        openAndInit(false, true);
+
+        deleteAndLazyCompress(true);
+
+	/*
+	 * Have IN with two entries, first entry is BIN with 1 entry.  That
+	 * entry is DIN with 1 entry.  That entry is a DBIN with 0 entries.
+	 * Position the cursor at the first entry so that we move over that
+	 * zero-entry DBIN.
+	 */
+        Cursor cursor = db.openCursor(null, null);
+        OperationStatus status = cursor.getFirst(keyFound, dataFound, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+	assertTrue(keyFound.getData()[0] == 64);
+	cursor.close();
+        closeEnv();
+    }
+
+    /**
+     * Scan over an empty BIN.  This looks very similar to
+     * com.sleepycat.je.test.SR11297Test. [#11778]
+     */
+    public void testEmptyInitialBINScan()
+        throws DatabaseException {
+
+        /* Non-transactional no-dups, 2 keys. */
+        openAndInit(false, false);
+
+        deleteAndLazyCompress(false);
+
+	/*
+	 * Have IN with two entries, first entry is BIN with 0 entries.
+	 * Position the cursor at the first entry so that we move over that
+	 * zero-entry BIN.
+	 */
+        Cursor cursor = db.openCursor(null, null);
+        OperationStatus status = cursor.getFirst(keyFound, dataFound, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+	assertTrue(keyFound.getData()[0] == 64);
+	cursor.close();
+        closeEnv();
+    }
+
+    /**
+     * Test that we can handle cases where lazy compression runs first, but the
+     * daemon handles pruning.
+     */
+    public void testNodeNotEmpty()
+        throws DatabaseException {
+
+        /* Non-transactional no-dups, 2 keys. */
+        openAndInit(false, false);
+
+        deleteAndLazyCompress(false);
+
+        /*
+         * We now have an entry on the compressor queue, but let's re-insert a
+         * value to make pruning hit the NodeNotEmptyException case.
+         */
+        assertEquals(OperationStatus.SUCCESS, db.put(null, entry0, entry0));
+        checkBinEntriesAndCursors(bin, 1, 0);
+
+        env.compress();
+        assertEquals(2, in.getNEntries());
+        checkINCompQueueSize(0);
+
+        closeEnv();
+    }
+
+    /* Todo: Check cursor movement across an empty bin. */
+
+    /* Delete all records from the first bin and invoke lazy compression. */
+    private void deleteAndLazyCompress(boolean doDups)
+        throws DatabaseException {
+
+        /* Position the cursor at the first BIN and delete both keys. */
+        Cursor cursor = db.openCursor(null, null);
+        OperationStatus status = cursor.getFirst(keyFound, dataFound, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        checkBinEntriesAndCursors(bin, 2, 1);
+
+        status = cursor.delete();
+        assertEquals(OperationStatus.SUCCESS, status);
+        status = cursor.getNext(keyFound, dataFound, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+	status = cursor.delete();
+	assertEquals(OperationStatus.SUCCESS, status);
+	if (doDups) {
+	    status = cursor.getNext(keyFound, dataFound, null);
+	    assertEquals(OperationStatus.SUCCESS, status);
+	    status = cursor.delete();
+	    assertEquals(OperationStatus.SUCCESS, status);
+	}
+        cursor.close();
+
+        /*
+	 * Do lazy compression, leaving behind an empty BIN (and DBIN if dups.)
+	 */
+        checkINCompQueueSize(doDups ? 2 : 1);
+        CheckpointConfig config = new CheckpointConfig();
+        config.setForce(true);
+        env.checkpoint(config);
+        checkBinEntriesAndCursors((doDups ? dbin : bin), 0, 0);
+
+        /* BIN is empty but tree pruning hasn't happened. */
+        assertEquals(2, in.getNEntries());
+        checkINCompQueueSize(1);
+    }
+
+    /**
+     * Checks for expected entry and cursor counts on the given BIN or DBIN.
+     */
+    private void checkBinEntriesAndCursors(BIN checkBin,
+                                           int nEntries,
+                                           int nCursors)
+        throws DatabaseException {
+
+        assertEquals("nEntries", nEntries, checkBin.getNEntries());
+        assertEquals("nCursors", nCursors, checkBin.nCursors());
+    }
+
+    /**
+     * Check expected size of the INCompressor queue.
+     */
+    private void checkINCompQueueSize(int expected)
+        throws DatabaseException {
+
+        assertEquals(expected,
+           DbInternal.envGetEnvironmentImpl(env).getINCompressorQueueSize());
+    }
+
+    /**
+     * Opens the environment and db and writes 2 records (3 if dups are used).
+     *
+     * <p>Without dups: {0,0}, {1,0}. This gives two LNs in the main tree.</p>
+     *
+     * <p>With dups: {0,0}, {0,1}, {1,0}. This gives one LN in the main tree,
+     * and a dup tree with two LNs.</p>
+     */
+    private void openAndInit(boolean transactional, boolean dups)
+        throws DatabaseException {
+
+        openEnv(transactional, dups, null);
+
+        /*
+         * We need at least 2 BINs, otherwise empty BINs won't be deleted.  So
+         * we add keys until the BIN splits, then delete everything in the
+         * first BIN except the first two keys.  Those are the keys we'll use
+         * for testing, and are key values 0 and 1.
+         */
+        BIN firstBin = null;
+        OperationStatus status;
+        for (int i = 0;; i += 1) {
+            DatabaseEntry key = new DatabaseEntry(new byte[] { (byte) i });
+            status = db.put(null, key, entry0);
+            assertEquals(OperationStatus.SUCCESS, status);
+            Cursor cursor = db.openCursor(null, null);
+            status = cursor.getLast(keyFound, dataFound, null);
+            assertEquals(OperationStatus.SUCCESS, status);
+            BIN b = DbInternal.getCursorImpl(cursor).getBIN();
+            cursor.close();
+            if (firstBin == null) {
+                firstBin = b;
+            } else if (firstBin != b) {
+                /* Now delete all but the first two keys in the first BIN. */
+                while (firstBin.getNEntries() > 2) {
+                    cursor = db.openCursor(null, null);
+                    keyFound.setData(entry2.getData());
+                    status =
+			cursor.getSearchKeyRange(keyFound, dataFound, null);
+                    assertEquals(OperationStatus.SUCCESS, status);
+                    cursor.close();
+                    status = db.delete(null, keyFound);
+                    assertEquals(OperationStatus.SUCCESS, status);
+                    env.compress();
+                }
+                break;
+            }
+        }
+
+        /* Write dup records. */
+        if (dups) {
+            status = db.put(null, entry0, entry1);
+            assertEquals(OperationStatus.SUCCESS, status);
+        }
+
+        /* Set in, bin, dbin. */
+        initInternalNodes();
+        assertSame(bin, firstBin);
+
+        /* Check that all tree nodes are populated. */
+        assertEquals(2, in.getNEntries());
+        checkBinEntriesAndCursors(bin, 2, 0);
+        if (dups) {
+            checkBinEntriesAndCursors(dbin, 2, 0);
+        } else {
+            assertNull(dbin);
+        }
+    }
+
+    /**
+     * Initialize in, bin, dbin.
+     */
+    private void initInternalNodes()
+        throws DatabaseException {
+
+        /* Find the BIN/DBIN. */
+        Cursor cursor = db.openCursor(null, null);
+        OperationStatus status = cursor.getFirst(keyFound, dataFound, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        bin = DbInternal.getCursorImpl(cursor).getBIN();
+        dbin = DbInternal.getCursorImpl(cursor).getDupBIN();
+        cursor.close();
+
+        /* Find the IN parent of the BIN. */
+        bin.latch();
+        in = DbInternal.dbGetDatabaseImpl(db).
+            getTree().getParentINForChildIN(bin, true, CacheMode.DEFAULT).
+            parent;
+        assertNotNull(in);
+        in.releaseLatch();
+    }
+
+    /**
+     * Opens the environment and db.
+     */
+    private void openEnv(boolean transactional, boolean dups, String nodeMax)
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setTransactional(transactional);
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false");
+	if (nodeMax != null) {
+	    envConfig.setConfigParam
+		(EnvironmentParams.NODE_MAX.getName(), nodeMax);
+	    envConfig.setConfigParam
+		(EnvironmentParams.NODE_MAX_DUPTREE.getName(), nodeMax);
+	}
+        envConfig.setAllowCreate(true);
+        env = new Environment(envHome, envConfig);
+
+        /* Make a db and open it. */
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(transactional);
+        dbConfig.setSortedDuplicates(dups);
+        dbConfig.setAllowCreate(true);
+        db = env.openDatabase(null, "testDB", dbConfig);
+    }
+
+    /**
+     * Closes the db and environment.
+     */
+    private void closeEnv()
+        throws DatabaseException {
+
+        db.close();
+        db = null;
+        env.close();
+        env = null;
+    }
+}
diff --git a/test/com/sleepycat/je/je.properties b/test/com/sleepycat/je/je.properties
new file mode 100644
index 0000000000000000000000000000000000000000..7e85a8d7e85bd18a1bb849c766af9ef90d508d92
--- /dev/null
+++ b/test/com/sleepycat/je/je.properties
@@ -0,0 +1,4 @@
+# $Id: je.properties,v 1.3 2004/03/30 17:20:54 linda Exp $
+je.env.recovery = false
+je.log.totalBufferBytes=7001
+je.log.numBuffers=200
diff --git a/test/com/sleepycat/je/jmx/MBeanTest.java b/test/com/sleepycat/je/jmx/MBeanTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..552d468ea24dfba6c3a2dc2c329b75d0aba8b980
--- /dev/null
+++ b/test/com/sleepycat/je/jmx/MBeanTest.java
@@ -0,0 +1,503 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: MBeanTest.java,v 1.18.2.2 2010/01/04 15:30:43 cwl Exp $
+ */
+
+package com.sleepycat.je.jmx;
+
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.ObjectOutputStream;
+import java.io.Serializable;
+import java.lang.reflect.Method;
+import java.util.Arrays;
+import java.util.List;
+
+import javax.management.Attribute;
+import javax.management.DynamicMBean;
+import javax.management.JMException;
+import javax.management.MBeanAttributeInfo;
+import javax.management.MBeanConstructorInfo;
+import javax.management.MBeanException;
+import javax.management.MBeanInfo;
+import javax.management.MBeanNotificationInfo;
+import javax.management.MBeanOperationInfo;
+import javax.management.MBeanParameterInfo;
+
+import jmx.JEApplicationMBean;
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.BtreeStats;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Instantiate and exercise the JEMonitor.
+ */
+public class MBeanTest extends TestCase {
+
+    private static final boolean DEBUG = true;
+    private File envHome;
+    private String environmentDir;
+
+    public MBeanTest() {
+        environmentDir = System.getProperty(TestUtils.DEST_DIR);
+        envHome = new File(environmentDir);
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        TestUtils.removeLogFiles("tearDown", envHome, true);
+    }
+
+    /**
+     * Test an mbean which is prohibited from configuring and opening an
+     * environment.
+     */
+    public void testNoOpenMBean()
+        throws Throwable {
+
+        Environment env = null;
+        try {
+
+            /* Environment is not open, and we can't open. */
+            DynamicMBean mbean = new JEMonitor(environmentDir);
+            validateGetters(mbean, 2);
+            validateOperations(mbean, 0, true, null, null);
+
+            /* Now open the environment transactionally by other means. */
+            env = openEnv(true);
+            validateGetters(mbean, 2 ); // alas, takes two refreshes to
+            validateGetters(mbean, 9 ); // see the change.
+            validateOperations(mbean, 8, true, null, null);
+
+            /* Close the environment. */
+            env.close();
+            validateGetters(mbean, 2);
+            validateOperations(mbean, 0, true, null, null);
+
+            /*
+             * Try this kind of mbean against an environment that's already
+             * open.
+             */
+            env = openEnv(true);
+            mbean = new JEMonitor(environmentDir);
+            validateGetters(mbean, 9 ); // see the change.
+            validateOperations(mbean, 8, true, null, null);
+
+            /*
+             * Getting database stats against a non-existing db ought to
+             * throw an exception.
+             */
+            try {
+                validateOperations(mbean, 8, true, "bozo", null);
+                fail("Should not have run stats on a non-existent db");
+            } catch (MBeanException expected) {
+                // ignore
+            }
+
+            /*
+             * Make sure the vanilla db open within the helper can open
+             * a db created with a non-default configuration.
+             */
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setAllowCreate(true);
+            dbConfig.setTransactional(true);
+            Database db = env.openDatabase(null, "bozo", dbConfig);
+
+            /* insert a record. */
+            DatabaseEntry entry = new DatabaseEntry();
+            IntegerBinding.intToEntry(1, entry);
+            db.put(null, entry, entry);
+
+            validateOperations(mbean, 8, true, "bozo", new String[] {"bozo"});
+            db.close();
+
+            env.close();
+            validateGetters(mbean, 2);
+            validateOperations(mbean, 0, true, null, null);
+
+            checkForNoOpenHandles(environmentDir);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            if (env != null) {
+                env.close();
+            }
+            throw t;
+        }
+    }
+
+    /**
+     * MBean which can configure and open an environment.
+     */
+    public void testOpenableBean()
+        throws Throwable {
+
+        Environment env = null;
+        try {
+            /* Environment is not open, and we can open. */
+            env = openEnv(false);
+            env.close();
+
+            DynamicMBean mbean = new JEApplicationMBean(environmentDir);
+            validateGetters(mbean, 5);
+            validateOperations(mbean, 1, false, null, null); // don't invoke
+
+            /* Open the environment. */
+            mbean.invoke(JEApplicationMBean.OP_OPEN, null, null);
+
+            validateGetters(mbean, 7 );
+            validateOperations(mbean, 8, true, null, null);
+
+            /*
+             * The last call to validateOperations ended up closing the
+             * environment.
+             */
+            validateGetters(mbean, 5);
+            validateOperations(mbean, 1, false, null, null);
+
+            /* Should be no open handles. */
+            checkForNoOpenHandles(environmentDir);
+        } catch (Throwable t) {
+            t.printStackTrace();
+
+            if (env != null) {
+                env.close();
+            }
+            throw t;
+        }
+    }
+
+    /**
+     * Exercise setters.
+     */
+    public void testMBeanSetters()
+        throws Throwable {
+
+        Environment env = null;
+        try {
+            /* Mimic an application by opening an environment. */
+            env = openEnv(false);
+
+            /* Open an mbean and set the environment home. */
+            DynamicMBean mbean = new JEMonitor(environmentDir);
+
+            /*
+             * Try setting different attributes. Check against the
+             * initial value, and the value after setting.
+             */
+            EnvironmentConfig config = env.getConfig();
+            Class configClass = config.getClass();
+
+            Method getCacheSize = configClass.getMethod("getCacheSize", (Class[]) null);
+            checkAttribute(env,
+                           mbean,
+                           getCacheSize,
+                           JEMBeanHelper.ATT_CACHE_SIZE,
+                           new Long(100000)); // new value
+
+            Method getCachePercent =
+                configClass.getMethod("getCachePercent", (Class[]) null);
+            checkAttribute(env,
+                           mbean,
+                           getCachePercent,
+                           JEMBeanHelper.ATT_CACHE_PERCENT,
+                           new Integer(10));
+            env.close();
+
+            checkForNoOpenHandles(environmentDir);
+        } catch (Throwable t) {
+            t.printStackTrace();
+
+            if (env != null) {
+                env.close();
+            }
+
+            throw t;
+        }
+    }
+
+    private void checkAttribute(Environment env,
+                                DynamicMBean mbean,
+                                Method configMethod,
+                                String attributeName,
+                                Object newValue)
+        throws Exception {
+        /* check starting value. */
+        EnvironmentConfig config = env.getConfig();
+        Object result = configMethod.invoke(config, (Object[]) null);
+        assertTrue(!result.toString().equals(newValue.toString()));
+
+        /* set through mbean */
+        mbean.setAttribute(new Attribute(attributeName, newValue));
+
+        /* check present environment config. */
+        config = env.getConfig();
+        assertEquals(newValue.toString(),
+                     configMethod.invoke(config, (Object[]) null).toString());
+
+        /* check through mbean. */
+        Object mbeanNewValue = mbean.getAttribute(attributeName);
+        assertEquals(newValue.toString(), mbeanNewValue.toString());
+    }
+
+    /*
+     */
+    private void validateGetters(DynamicMBean mbean,
+                                 int numExpectedAttributes)
+        throws Throwable {
+
+        MBeanInfo info = mbean.getMBeanInfo();
+
+        MBeanAttributeInfo[] attrs = info.getAttributes();
+
+        /* test getters. */
+        int attributesWithValues = 0;
+        for (int i = 0; i < attrs.length; i++) {
+            String name = attrs[i].getName();
+            Object result = mbean.getAttribute(name);
+            if (DEBUG) {
+                System.out.println("Attribute " + i +
+                                   " name=" + name +
+                                   " result=" + result);
+            }
+            if (result != null) {
+                attributesWithValues++;
+                checkObjectType
+                    ("Attribute", name, attrs[i].getType(), result);
+            }
+        }
+
+        assertEquals(numExpectedAttributes, attributesWithValues);
+    }
+
+    /*
+     * Check that there are the expected number of operations.
+     * If specified, invoke and check the results.
+     * @param tryInvoke if true, invoke the operations.
+     * @param databaseName if not null, execute the database specific
+     * operations using the database name.
+     */
+    private void validateOperations(DynamicMBean mbean,
+                                    int numExpectedOperations,
+                                    boolean tryInvoke,
+                                    String databaseName,
+                                    String[] expectedDatabases)
+        throws Throwable {
+
+        MBeanInfo info = mbean.getMBeanInfo();
+
+        MBeanOperationInfo[] ops = info.getOperations();
+        if (DEBUG) {
+            for (int i = 0; i < ops.length; i++) {
+                System.out.println("op: " + ops[i].getName());
+            }
+        }
+        assertEquals(numExpectedOperations, ops.length);
+
+        if (tryInvoke) {
+            for (int i = 0; i < ops.length; i++) {
+                String opName = ops[i].getName();
+
+                /* Try the per-database operations if specified. */
+                if ((databaseName != null) &&
+                    opName.equals(JEMBeanHelper.OP_DB_STAT)) {
+                    /* invoke with the name of the database. */
+                    Object result = mbean.invoke
+                        (opName,
+                         new Object[] {null, null, databaseName},
+                         null);
+                    assertTrue(result instanceof BtreeStats);
+                    checkObjectType
+                        ("Operation", opName, ops[i].getReturnType(), result);
+                }
+
+                if ((expectedDatabases != null) &&
+                    opName.equals(JEMBeanHelper.OP_DB_NAMES)) {
+                    Object result = mbean.invoke(opName, null, null);
+                    List names = (List) result;
+                    assertTrue(Arrays.equals(expectedDatabases,
+                                             names.toArray()));
+                    checkObjectType
+                        ("Operation", opName, ops[i].getReturnType(), result);
+                }
+
+                /*
+                 * Also invoke all operations with null params, to sanity
+                 * check.
+                 */
+                Object result = mbean.invoke(opName, null, null);
+                if (result != null) {
+                    checkObjectType
+                        ("Operation", opName, ops[i].getReturnType(), result);
+                }
+            }
+        }
+    }
+
+    /**
+     * Checks that all parameters and return values are Serializable to
+     * support JMX over RMI.
+     */
+    public void testSerializable()
+        throws JMException, DatabaseException {
+
+        /* Create and close the environment. */
+        Environment env = openEnv(false);
+        env.close();
+
+        /* Test without an open environment. */
+        DynamicMBean mbean = new JEApplicationMBean(environmentDir);
+        doTestSerializable(mbean);
+
+        /* Test with an open environment. */
+        mbean.invoke(JEApplicationMBean.OP_OPEN, null, null);
+        doTestSerializable(mbean);
+
+        /* Close. */
+        mbean.invoke(JEApplicationMBean.OP_CLOSE, null, null);
+    }
+
+    /**
+     * Checks that all types for the given mbean are serializable.
+     */
+    private void doTestSerializable(DynamicMBean mbean) {
+
+        MBeanInfo info = mbean.getMBeanInfo();
+
+        MBeanAttributeInfo[] attrs = info.getAttributes();
+        for (int i = 0; i < attrs.length; i++) {
+            checkSerializable
+                ("Attribute", attrs[i].getName(), attrs[i].getType());
+        }
+
+        MBeanOperationInfo[] ops = info.getOperations();
+        for (int i = 0; i < ops.length; i += 1) {
+            checkSerializable
+                ("Operation",
+                 ops[i].getName() + " return type",
+                 ops[i].getReturnType());
+            MBeanParameterInfo[] params = ops[i].getSignature();
+            for (int j = 0; j < params.length; j += 1) {
+                checkSerializable
+                    ("Operation",
+                     ops[i].getName() + " parameter " + j,
+                     params[j].getType());
+            }
+        }
+
+        MBeanConstructorInfo[] ctors = info.getConstructors();
+        for (int i = 0; i < ctors.length; i++) {
+            MBeanParameterInfo[] params = ctors[i].getSignature();
+            for (int j = 0; j < params.length; j += 1) {
+                checkSerializable
+                    ("Constructor",
+                     ctors[i].getName() + " parameter " + j,
+                     params[j].getType());
+            }
+        }
+
+        MBeanNotificationInfo[] notifs = info.getNotifications();
+        for (int i = 0; i < notifs.length; i++) {
+            String[] types = notifs[i].getNotifTypes();
+            for (int j = 0; j < types.length; j += 1) {
+                checkSerializable
+                    ("Notification", notifs[i].getName(), types[j]);
+            }
+        }
+    }
+
+    /**
+     * Checks that a given type is serializable.
+     */
+    private void checkSerializable(String identifier,
+                                   String name,
+                                   String type) {
+
+        if ("void".equals(type)) {
+            return;
+        }
+        String msg = identifier + ' ' + name + " is type " + type;
+        try {
+            Class cls = Class.forName(type);
+            if (!Serializable.class.isAssignableFrom(cls)) {
+                fail(msg + " -- not Serializable");
+            }
+        } catch (Exception e) {
+            fail(msg + " -- " + e);
+        }
+    }
+
+    /**
+     * Checks that an object (parameter or return value) is of the type
+     * specified in the BeanInfo.
+     */
+    private void checkObjectType(String identifier,
+                                 String name,
+                                 String type,
+                                 Object object) {
+
+        String msg = identifier + ' ' + name + " is type " + type;
+        if ("void".equals(type)) {
+            assertNull(msg + "-- should be null", object);
+            return;
+        }
+        try {
+            Class cls = Class.forName(type);
+            assertTrue
+                (msg + " -- object class is " + object.getClass().getName(),
+                 cls.isAssignableFrom(object.getClass()));
+        } catch (Exception e) {
+            fail(msg + " -- " + e);
+        }
+
+        /*
+         * The true test of serializable is to serialize.  This checks the
+         * a elements of a list, for example.
+         */
+        try {
+            ByteArrayOutputStream baos = new ByteArrayOutputStream();
+            ObjectOutputStream oos = new ObjectOutputStream(baos);
+            oos.writeObject(object);
+        } catch (Exception e) {
+            fail(msg + " -- " + e);
+        }
+    }
+
+    private void checkForNoOpenHandles(String environmentDir) {
+        File envFile = new File(environmentDir);
+        Environment testEnv = DbInternal.getEnvironmentShell(envFile);
+        assertTrue(testEnv == null);
+    }
+
+    /*
+     * Helper to open an environment.
+     */
+    private Environment openEnv(boolean openTransactionally)
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setTransactional(openTransactionally);
+        return new Environment(envHome, envConfig);
+    }
+
+}
diff --git a/test/com/sleepycat/je/junit/JUnitMethodThread.java b/test/com/sleepycat/je/junit/JUnitMethodThread.java
new file mode 100644
index 0000000000000000000000000000000000000000..47ae6020a948d9d480711a3855c224c72c1ada6f
--- /dev/null
+++ b/test/com/sleepycat/je/junit/JUnitMethodThread.java
@@ -0,0 +1,50 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: JUnitMethodThread.java,v 1.8.2.2 2010/01/04 15:30:43 cwl Exp $
+ */
+
+package com.sleepycat.je.junit;
+
+import java.lang.reflect.Method;
+
+import junit.framework.TestCase;
+
+/**
+ * A JUnitThread whose testBody calls a given TestCase method.
+ */
+public class JUnitMethodThread extends JUnitThread {
+
+    private TestCase testCase;
+    private Method method;
+    private Object param;
+
+    public JUnitMethodThread(String threadName, String methodName,
+                             TestCase testCase)
+        throws NoSuchMethodException {
+
+        this(threadName, methodName, testCase, null);
+    }
+
+    public JUnitMethodThread(String threadName, String methodName,
+                             TestCase testCase, Object param)
+        throws NoSuchMethodException {
+
+        super(threadName);
+        this.testCase = testCase;
+        this.param = param;
+        method = testCase.getClass().getMethod(methodName, new Class[0]);
+    }
+
+    public void testBody()
+        throws Exception {
+
+        if (param != null) {
+            method.invoke(testCase, new Object[] { param });
+        } else {
+            method.invoke(testCase, new Object[0]);
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/junit/JUnitThread.java b/test/com/sleepycat/je/junit/JUnitThread.java
new file mode 100644
index 0000000000000000000000000000000000000000..fbc63722cfe6e45a32b1a82b4834dc2f25134406
--- /dev/null
+++ b/test/com/sleepycat/je/junit/JUnitThread.java
@@ -0,0 +1,99 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: JUnitThread.java,v 1.21.2.2 2010/01/04 15:30:43 cwl Exp $
+ */
+
+package com.sleepycat.je.junit;
+
+import junit.framework.Assert;
+
+/**
+ * JUnitThread is a utility class that allows JUtil assertions to be
+ * run in other threads.  A JUtil assertion thrown from a
+ * thread other than the invoking one can not be caught by JUnit.
+ * This class allows these AssertionFailedErrors to be caught and
+ * passed back to the original thread.
+ * <p>
+ * To use, create a JUnitThread and override the testBody() method with
+ * the test code.  Then call doTest() on the thread to run the test
+ * and re-throw any assertion failures that were thrown by the
+ * subthread.
+ * <p>
+ * Example:
+ * <pre>
+    public void testEquality() {
+    JUnitThread tester =
+    new JUnitThread("testEquality") {
+    public void testBody() {
+    int one = 1;
+    assertTrue(one == 1);
+    }
+    };
+    tester.doTest();
+    }
+ * </pre>
+ */
+public class JUnitThread extends Thread {
+    private Throwable errorReturn;
+
+    /**
+     * Construct a new JUnitThread.
+     */
+    public JUnitThread(String name) {
+	super(name);
+    }
+
+    public void run() {
+	try {
+	    testBody();
+	} catch (Throwable T) {
+	    errorReturn = T;
+	}
+    }
+
+    /**
+     * Method that is to be overridden by the user.  Code should be
+     * the guts of the test.  assertXXXX() methods may be called in
+     * this method.
+     */
+    public void testBody()
+	throws Throwable {
+
+    }
+
+    /**
+     * This method should be called after the JUnitThread has been
+     * constructed to cause the actual test to be run and any failures
+     * to be returned.
+     */
+    public void doTest()
+	throws Throwable {
+
+	start();
+        finishTest();
+    }
+
+    /**
+     * This method should be called after the JUnitThread has been
+     * started to cause the test to report any failures.
+     */
+    public void finishTest()
+	throws Throwable {
+
+	try {
+	    join();
+	} catch (InterruptedException IE) {
+	    Assert.fail("caught unexpected InterruptedException");
+	}
+	if (errorReturn != null) {
+	    throw errorReturn;
+	}
+    }
+
+    public String toString() {
+	return "<JUnitThread: " + super.toString() + ">";
+    }
+}
diff --git a/test/com/sleepycat/je/latch/LatchTest.java b/test/com/sleepycat/je/latch/LatchTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..7cd81d3e82ab28b8e719f75237eaa58e92231f49
--- /dev/null
+++ b/test/com/sleepycat/je/latch/LatchTest.java
@@ -0,0 +1,485 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LatchTest.java,v 1.37.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.latch;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.junit.JUnitThread;
+
+public class LatchTest extends TestCase {
+    private Latch latch1 = null;
+    private Latch latch2 = null;
+    private JUnitThread tester1 = null;
+    private JUnitThread tester2 = null;
+
+    static private final boolean DEBUG = false;
+
+    private void debugMsg(String message) {
+	if (DEBUG) {
+	    System.out.println(Thread.currentThread().toString()
+			       + " " +  message);
+	}
+    }
+
+    public void setUp() {
+    }
+
+    private void initExclusiveLatches() {
+	latch1 = new Latch("LatchTest-latch1");
+	latch2 = new Latch("LatchTest-latch2");
+    }
+
+    public void tearDown() {
+	latch1 = null;
+	latch2 = null;
+    }
+
+    public void testDebugOutput()
+	throws Throwable {
+
+	/* Stupid test solely for the sake of code coverage. */
+	initExclusiveLatches();
+	/* Acquire a latch. */
+	try {
+	    latch1.acquire();
+	} catch (DatabaseException LE) {
+	    fail("caught DatabaseException");
+	}
+
+	LatchSupport.latchesHeldToString();
+    }
+
+    public void testAcquireAndReacquire()
+	throws Throwable {
+
+	initExclusiveLatches();
+	JUnitThread tester =
+	    new JUnitThread("testAcquireAndReacquire") {
+		public void testBody() {
+		    /* Acquire a latch. */
+		    try {
+			latch1.acquire();
+		    } catch (DatabaseException LE) {
+			fail("caught DatabaseException");
+		    }
+
+		    /* Try to acquire it again -- should fail. */
+		    try {
+			latch1.acquire();
+			fail("didn't catch LatchException");
+		    } catch (LatchException LE) {
+			assertTrue
+			    (latch1.getLatchStats().nAcquiresSelfOwned == 1);
+		    } catch (DatabaseException DE) {
+			fail("didn't catch LatchException-caught DE instead");
+		    }
+
+		    /* Release it. */
+		    try {
+			latch1.release();
+		    } catch (LatchNotHeldException LNHE) {
+			fail("unexpected LatchNotHeldException");
+		    }
+
+		    /* Release it again -- should fail. */
+		    try {
+			latch1.release();
+			fail("didn't catch LatchNotHeldException");
+		    } catch (LatchNotHeldException LNHE) {
+		    }
+		}
+	    };
+
+	tester.doTest();
+    }
+
+    public void testAcquireAndReacquireShared()
+	throws Throwable {
+
+	final SharedLatch latch = new SharedLatch("LatchTest-latch2");
+
+	JUnitThread tester =
+	    new JUnitThread("testAcquireAndReacquireShared") {
+		public void testBody() {
+		    /* Acquire a shared latch. */
+		    try {
+			latch.acquireShared();
+		    } catch (DatabaseException LE) {
+			fail("caught DatabaseException");
+		    }
+
+		    assert latch.isOwner();
+
+		    /* Try to acquire it again -- should succeed. */
+		    try {
+			latch.acquireShared();
+		    } catch (LatchException LE) {
+			fail("didn't catch LatchException");
+		    } catch (DatabaseException DE) {
+			fail("didn't catch LatchException-caught DE instead");
+		    }
+
+		    assert latch.isOwner();
+
+		    /* Release it. */
+		    try {
+			latch.release();
+		    } catch (LatchNotHeldException LNHE) {
+			fail("unexpected LatchNotHeldException");
+		    }
+
+		    /* Release it again -- should succeed. */
+		    try {
+			latch.release();
+		    } catch (LatchNotHeldException LNHE) {
+			fail("didn't catch LatchNotHeldException");
+		    }
+
+		    /* Release it again -- should fail. */
+		    try {
+			latch.release();
+			fail("didn't catch LatchNotHeldException");
+		    } catch (LatchNotHeldException LNHE) {
+		    }
+		}
+	    };
+
+	tester.doTest();
+    }
+
+    /*
+     * Do a million acquire/release pairs.  The junit output will tell us how
+     * long it took.
+     */
+    public void testAcquireReleasePerformance()
+	throws Throwable {
+
+	initExclusiveLatches();
+	JUnitThread tester =
+	    new JUnitThread("testAcquireReleasePerformance") {
+		public void testBody() {
+		    final int N_PERF_TESTS = 1000000;
+		    for (int i = 0; i < N_PERF_TESTS; i++) {
+			/* Acquire a latch */
+			try {
+			    latch1.acquire();
+			} catch (DatabaseException LE) {
+			    fail("caught DatabaseException");
+			}
+
+			/* Release it. */
+			try {
+			    latch1.release();
+			} catch (LatchNotHeldException LNHE) {
+			    fail("unexpected LatchNotHeldException");
+			}
+		    }
+		    LatchStats stats = latch1.getLatchStats();
+		    stats.toString();
+		    assertTrue(stats.nAcquiresNoWaiters == N_PERF_TESTS);
+		    assertTrue(stats.nReleases == N_PERF_TESTS);
+		}
+	    };
+
+	tester.doTest();
+    }
+
+    /* Test latch waiting. */
+
+    public void testWait()
+	throws Throwable {
+
+	initExclusiveLatches();
+	for (int i = 0; i < 10; i++) {
+	    doTestWait();
+	}
+    }
+
+    private int nAcquiresWithContention = 0;
+
+    public void doTestWait()
+	throws Throwable {
+
+	tester1 =
+	    new JUnitThread("testWait-Thread1") {
+		public void testBody() {
+		    /* Acquire a latch. */
+		    try {
+			latch1.acquire();
+		    } catch (DatabaseException LE) {
+			fail("caught DatabaseException");
+		    }
+
+		    /* Wait for tester2 to try to acquire the latch. */
+		    while (latch1.nWaiters() == 0) {
+			Thread.yield();
+		    }
+
+		    try {
+			latch1.release();
+		    } catch (LatchNotHeldException LNHE) {
+			fail("unexpected LatchNotHeldException");
+		    }
+		}
+	    };
+
+	tester2 =
+	    new JUnitThread("testWait-Thread2") {
+		public void testBody() {
+		    /* Wait for tester1 to start. */
+
+		    while (latch1.owner() != tester1) {
+			Thread.yield();
+		    }
+
+		    /* Acquire a latch. */
+		    try {
+			latch1.acquire();
+		    } catch (DatabaseException LE) {
+			fail("caught DatabaseException");
+		    }
+
+		    assertTrue(latch1.getLatchStats().nAcquiresWithContention
+			       == ++nAcquiresWithContention);
+
+		    /* Release it. */
+		    try {
+			latch1.release();
+		    } catch (LatchNotHeldException LNHE) {
+			fail("unexpected LatchNotHeldException");
+		    }
+		}
+	    };
+
+	tester1.start();
+	tester2.start();
+	tester1.finishTest();
+	tester2.finishTest();
+    }
+
+    /* Test acquireNoWait(). */
+
+    private volatile boolean attemptedAcquireNoWait;
+
+    public void testAcquireNoWait()
+	throws Throwable {
+
+	initExclusiveLatches();
+	tester1 =
+	    new JUnitThread("testWait-Thread1") {
+		public void testBody() {
+		    debugMsg("Acquiring Latch");
+		    /* Acquire a latch. */
+		    try {
+			latch1.acquire();
+		    } catch (DatabaseException LE) {
+			fail("caught DatabaseException");
+		    }
+
+		    /* Wait for tester2 to try to acquire the latch. */
+
+		    debugMsg("Waiting for other thread");
+		    while (!attemptedAcquireNoWait) {
+			Thread.yield();
+		    }
+
+		    debugMsg("Releasing the latch");
+		    try {
+			latch1.release();
+		    } catch (LatchNotHeldException LNHE) {
+			fail("unexpected LatchNotHeldException");
+		    }
+		}
+	    };
+
+	tester2 =
+	    new JUnitThread("testWait-Thread2") {
+		public void testBody() {
+		    /* Wait for tester1 to start. */
+
+		    debugMsg("Waiting for T1 to acquire latch");
+		    while (latch1.owner() != tester1) {
+			Thread.yield();
+		    }
+
+		    /*
+		     * Attempt Acquire with no wait -- should fail since
+		     * tester1 has it.
+		     */
+		    debugMsg("Acquiring no wait");
+		    try {
+			assertFalse(latch1.acquireNoWait());
+			assertTrue(latch1.getLatchStats().
+				   nAcquireNoWaitUnsuccessful == 1);
+		    } catch (DatabaseException LE) {
+			fail("caught DatabaseException");
+		    }
+
+		    attemptedAcquireNoWait = true;
+
+		    debugMsg("Waiting for T1 to release latch");
+		    while (latch1.owner() != null) {
+			Thread.yield();
+		    }
+
+		    /*
+		     * Attempt Acquire with no wait -- should succeed now that
+		     * tester1 is done.
+		     */
+		    debugMsg("Acquiring no wait - 2");
+		    try {
+			assertTrue(latch1.acquireNoWait());
+			assertTrue(latch1.getLatchStats().
+				   nAcquireNoWaitSuccessful == 1);
+		    } catch (DatabaseException LE) {
+			fail("caught DatabaseException");
+		    }
+
+		    /*
+		     * Attempt Acquire with no wait again -- should throw
+		     * exception since we already have it.
+		     */
+		    debugMsg("Acquiring no wait - 3");
+		    try {
+			latch1.acquireNoWait();
+			fail("didn't throw LatchException");
+		    } catch (LatchException LE) {
+		    	// expected
+		    } catch (Exception e) {
+			fail("caught Exception");
+		    }
+
+		    /* Release it. */
+		    debugMsg("releasing the latch");
+		    try {
+			latch1.release();
+		    } catch (LatchNotHeldException LNHE) {
+			fail("unexpected LatchNotHeldException");
+		    }
+		}
+	    };
+
+	tester1.start();
+	tester2.start();
+	tester1.finishTest();
+	tester2.finishTest();
+    }
+
+    /* State for testMultipleWaiters. */
+    private final int N_WAITERS = 5;
+
+    /* A JUnitThread that holds the waiter number. */
+    private class MultiWaiterTestThread extends JUnitThread {
+	private int waiterNumber;
+	public MultiWaiterTestThread(String name, int waiterNumber) {
+	    super(name);
+	    this.waiterNumber = waiterNumber;
+	}
+    }
+
+    public void testMultipleWaiters()
+	throws Throwable {
+
+	initExclusiveLatches();
+	JUnitThread[] waiterThreads =
+	    new JUnitThread[N_WAITERS];
+
+	tester1 =
+	    new JUnitThread("testWait-Thread1") {
+		public void testBody() {
+
+		    debugMsg("About to acquire latch");
+
+		    /* Acquire a latch. */
+		    try {
+			latch1.acquire();
+		    } catch (DatabaseException LE) {
+			fail("caught DatabaseException");
+		    }
+
+		    debugMsg("acquired latch");
+
+		    /*
+		     * Wait for all other testers to be waiting on the latch.
+		     */
+		    while (latch1.nWaiters() < N_WAITERS) {
+			Thread.yield();
+		    }
+
+		    debugMsg("About to release latch");
+
+		    try {
+			latch1.release();
+		    } catch (LatchNotHeldException LNHE) {
+			fail("unexpected LatchNotHeldException");
+		    }
+		}
+	    };
+
+	for (int i = 0; i < N_WAITERS; i++) {
+	    waiterThreads[i] =
+		new MultiWaiterTestThread("testWait-Waiter" + i, i) {
+		    public void testBody() {
+
+			int waiterNumber =
+			    ((MultiWaiterTestThread)
+			     Thread.currentThread()).waiterNumber;
+
+			/* Wait for tester1 to start. */
+			debugMsg("Waiting for main to acquire latch");
+
+			while (latch1.owner() != tester1) {
+			    Thread.yield();
+			}
+
+			/*
+			 * Wait until it's our turn to try to acquire the
+			 * latch.
+			 */
+			debugMsg("Waiting for our turn to acquire latch");
+			while (latch1.nWaiters() < waiterNumber) {
+			    Thread.yield();
+			}
+
+			debugMsg("About to acquire latch");
+			/* Try to acquire the latch */
+			try {
+			    latch1.acquire();
+			} catch (DatabaseException LE) {
+			    fail("caught DatabaseException");
+			}
+
+			debugMsg("nWaiters: " + latch1.nWaiters());
+			assertTrue(latch1.nWaiters() ==
+				   (N_WAITERS - waiterNumber - 1));
+
+			debugMsg("About to release latch");
+			/* Release it. */
+			try {
+			    latch1.release();
+			} catch (LatchNotHeldException LNHE) {
+			    fail("unexpected LatchNotHeldException");
+			}
+		    }
+		};
+	}
+
+	tester1.start();
+
+	for (int i = 0; i < N_WAITERS; i++) {
+	    waiterThreads[i].start();
+	}
+
+	tester1.finishTest();
+	for (int i = 0; i < N_WAITERS; i++) {
+	    waiterThreads[i].finishTest();
+	}
+    }
+}
diff --git a/test/com/sleepycat/je/log/FSyncManagerTest.java b/test/com/sleepycat/je/log/FSyncManagerTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..a2a48e8e64adc640cc9e853938e2cea47a1c9fd8
--- /dev/null
+++ b/test/com/sleepycat/je/log/FSyncManagerTest.java
@@ -0,0 +1,143 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: FSyncManagerTest.java,v 1.13.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.File;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.junit.JUnitThread;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Exercise the synchronization aspects of the sync manager.
+ */
+public class FSyncManagerTest extends TestCase {
+    private File envHome;
+
+    public FSyncManagerTest() {
+        super();
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    protected void setUp()
+        throws Exception {
+        /* Remove files to start with a clean slate. */
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    protected void tearDown()
+        throws Exception {
+
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+    }
+
+    public void testBasic()
+        throws Throwable{
+        Environment env = null;
+
+        try {
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setConfigParam(EnvironmentParams.LOG_FSYNC_TIMEOUT.getName(),
+                                     "50000000");
+            envConfig.setAllowCreate(true);
+            env = new Environment(envHome, envConfig);
+
+            WaitVal waitVal = new WaitVal(0);
+
+            FSyncManager syncManager =
+                new TestSyncManager(DbInternal.envGetEnvironmentImpl(env),
+                                    waitVal);
+            JUnitThread t1 = new TestSyncThread(syncManager);
+            JUnitThread t2 = new TestSyncThread(syncManager);
+            JUnitThread t3 = new TestSyncThread(syncManager);
+            t1.start();
+            t2.start();
+            t3.start();
+
+            /* Wait for all threads to request a sync, so they form a group.*/
+            Thread.sleep(500);
+
+            /* Free thread 1. */
+            synchronized (waitVal) {
+                waitVal.value = 1;
+                waitVal.notify();
+            }
+
+            t1.join();
+            t2.join();
+            t3.join();
+
+            /*
+             * All three threads ask for fsyncs.
+             * 2 do fsyncs -- the initial leader, and the leader of the
+             * waiting group of 2.
+             * The last thread gets a free ride.
+             */
+            assertEquals(3, syncManager.getNFSyncRequests());
+            assertEquals(2, syncManager.getNFSyncs());
+            assertEquals(0, syncManager.getNTimeouts());
+        } finally {
+            if (env != null) {
+                env.close();
+            }
+        }
+    }
+
+    /* This test class waits for an object instead of executing a sync.
+     * This way, we can manipulate grouping behavior.
+     */
+    class TestSyncManager extends FSyncManager {
+        private WaitVal waitVal;
+        TestSyncManager(EnvironmentImpl env, WaitVal waitVal)
+            throws DatabaseException {
+            super(env);
+            this.waitVal = waitVal;
+        }
+        protected void executeFSync()
+            throws DatabaseException {
+            try {
+                synchronized (waitVal) {
+                    if (waitVal.value < 1) {
+                        waitVal.wait();
+                    }
+                }
+            } catch (InterruptedException e) {
+                // woken up.
+            }
+        }
+    }
+
+    class TestSyncThread extends JUnitThread {
+        private FSyncManager syncManager;
+        TestSyncThread(FSyncManager syncManager) {
+            super("syncThread");
+            this.syncManager = syncManager;
+        }
+
+        public void testBody()
+            throws Throwable {
+            syncManager.fsync();
+        }
+    }
+
+    class WaitVal {
+        public int value;
+
+        WaitVal(int value) {
+            this.value = value;
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/log/FileEdgeCaseTest.java b/test/com/sleepycat/je/log/FileEdgeCaseTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..40f22d023051072482999a3ec9f874e622e7926f
--- /dev/null
+++ b/test/com/sleepycat/je/log/FileEdgeCaseTest.java
@@ -0,0 +1,142 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: FileEdgeCaseTest.java,v 1.6.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.recovery.NoRootException;
+import com.sleepycat.je.util.TestUtils;
+
+public class FileEdgeCaseTest extends TestCase {
+
+    private File envHome;
+    private Environment env;
+    private String firstFile;
+
+    public FileEdgeCaseTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        /*
+	 * Close down environments in case the unit test failed so that the log
+	 * files can be removed.
+         */
+        try {
+            if (env != null) {
+                env.close();
+                env = null;
+            }
+        } catch (DatabaseException e) {
+            e.printStackTrace();
+            // ok, the test closed it
+        }
+       TestUtils.removeLogFiles("TearDown", envHome, false);
+    }
+
+    /**
+     * SR #15133
+     * Create a JE environment with a single log file and a checksum
+     * exception in the second entry in the log file.
+     *
+     * When an application attempts to open this JE environment, JE truncates
+     * the log file at the point before the bad checksum, because it assumes
+     * that bad entries at the end of the log are the result of incompletely
+     * flushed writes from the last environment use.  However, the truncated
+     * log doesn't have a valid environment root, so JE complains and asks the
+     * application to move aside the existing log file (via the exception
+     * message). The resulting environment has a single log file, with
+     * a single valid entry, which is the file header.
+     *
+     * Any subsequent attempts to open the environment should also fail at the
+     * same point. In the error case reported by this SR, we didn't handle this
+     * single log file/single file header case right, and subsequent opens
+     * first truncated before the file header, leaving a 0 length log, and
+     * then proceeded to write error trace messages into the log. This
+     * resulted in a log file with no file header, (but with trace messages)
+     * and any following opens got unpredictable errors like
+     * ClassCastExceptions and BufferUnderflows.
+     *
+     * The correct situation is to continue to get the same exception.
+     */
+    public void testPostChecksumError()
+        throws IOException, DatabaseException {
+
+        EnvironmentConfig config = new EnvironmentConfig();
+        config.setAllowCreate(true);
+        env = new Environment(envHome, config);
+
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+        FileManager fm = envImpl.getFileManager();
+        firstFile = fm.getFullFileName(0, FileManager.JE_SUFFIX);
+
+        env.close();
+        env = null;
+
+        /* Intentionally corrupt the second entry. */
+        corruptSecondEntry();
+
+        /*
+         * Next attempt to open the environment should fail with a
+         * NoRootException
+         */
+        try {
+            env = new Environment(envHome, config);
+        } catch (NoRootException expected) {
+        }
+
+        /*
+         * Next attempt to open the environment should fail with a
+         * NoRootException
+         */
+        try {
+            env = new Environment(envHome, config);
+        } catch (NoRootException expected) {
+        }
+    }
+
+    /**
+     * Write junk into the second log entry, after the file header.
+     */
+    private void corruptSecondEntry()
+        throws IOException {
+
+        RandomAccessFile file =
+            new RandomAccessFile(firstFile,
+                                 FileManager.FileMode.
+                                 READWRITE_MODE.getModeValue());
+
+        try {
+            byte[] junk = new byte[20];
+            file.seek(FileManager.firstLogEntryOffset());
+            file.write(junk);
+        } catch (Exception e) {
+            e.printStackTrace();
+        } finally {
+            file.close();
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/log/FileManagerTest.java b/test/com/sleepycat/je/log/FileManagerTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..803ea2a88a7dab613078b6b6ec1d53d4327960ce
--- /dev/null
+++ b/test/com/sleepycat/je/log/FileManagerTest.java
@@ -0,0 +1,589 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: FileManagerTest.java,v 1.72.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.util.Set;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * File Manager
+ */
+public class FileManagerTest extends TestCase {
+
+    static private int FILE_SIZE = 120;
+
+    private EnvironmentImpl envImpl;
+    private FileManager fileManager;
+    private File envHome;
+
+    public FileManagerTest() {
+        super();
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    protected void setUp()
+        throws DatabaseException, IOException {
+
+        /* Remove files to start with a clean slate. */
+        TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX);
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+	DbInternal.disableParameterValidation(envConfig);
+        envConfig.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(),
+                                 new Integer(FILE_SIZE).toString());
+        /* Yank the cache size way down. */
+        envConfig.setConfigParam
+	    (EnvironmentParams.LOG_FILE_CACHE_SIZE.getName(), "3");
+        envConfig.setAllowCreate(true);
+        envImpl = new EnvironmentImpl(envHome, envConfig,
+                                      null /*sharedCacheEnv*/,
+                                      false /*replicationIntended*/);
+
+        /* Make a standalone file manager for this test. */
+        envImpl.close();
+        envImpl.open(); /* Just sets state to OPEN. */
+        fileManager = new FileManager(envImpl, envHome, false);
+
+        /*
+         * Remove any files after the environment is created again!  We want to
+         * remove the files made by recovery, so we can test the file manager
+         * in controlled cases.
+         */
+        TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX);
+    }
+
+    protected void tearDown()
+        throws IOException, DatabaseException {
+
+        if (fileManager != null) {
+            fileManager.clear();
+            fileManager.close();
+        }
+        TestUtils.removeFiles("TearDown", envHome, FileManager.JE_SUFFIX);
+    }
+
+    /**
+     * Test LSN administration.
+     */
+    public void testLsnBumping()
+        throws Exception {
+
+        /*
+	   We are adding these entries:
+	   +----+------+---------+--------+
+	   file 0:  |hdr | 30   |   50    |empty   |
+	   +----+------+---------+--------+
+	   0    hdr  hdr+30   hdr+80     99
+
+	   +----+--------+-------+-------+-----+-------+
+	   file 1:  |hdr | 40     | 20    | 10    | 5   | empty |
+	   +----+--------+-------+-------+-----+-------+
+	   0    hdr   hdr+40  hdr+60  hdr+70  hdr+75
+
+	   +-----+-----+--------+
+	   file 2:  | hdr | 75  |  empty |
+	   +-----+-----+--------+
+	   0    hdr   hdr+75
+
+	   +-----+-------------------------------+
+	   file 3:  | hdr | 125                           |
+	   +-----+-------------------------------+
+	   0    hdr
+
+	   +-----+-----+------+-----+--------------+
+	   file 4:  | hdr | 10  | 20   | 30  | empty
+	   +-----+-----+------+-----+--------------+
+	   0    hdr hdr+10 hdr+30
+
+        */
+
+        try {
+            /* Should start out at LSN 0. */
+
+            /* "add" some entries to the log. */
+            long hdrSize = FileManager.firstLogEntryOffset();
+
+            fileManager.bumpLsn(30L);
+            /* Item placed here. */
+            assertEquals(DbLsn.makeLsn(0, hdrSize),
+                         fileManager.getLastUsedLsn());
+            /* prev entry. */
+            assertEquals(0, fileManager.getPrevEntryOffset());
+
+            fileManager.bumpLsn(50L);
+            /* Item placed here. */
+            assertEquals(DbLsn.makeLsn(0, (hdrSize + 30)),
+                         fileManager.getLastUsedLsn());
+            assertEquals(hdrSize, fileManager.getPrevEntryOffset());
+
+            /* bump over to a file 1. */
+            fileManager.bumpLsn(40L);
+            /* item placed here. */
+            assertEquals(DbLsn.makeLsn(1, hdrSize),
+                         fileManager.getLastUsedLsn());
+            assertEquals(0, fileManager.getPrevEntryOffset());
+
+            fileManager.bumpLsn(20L);
+            /* Item placed here. */
+            assertEquals(DbLsn.makeLsn(1,(hdrSize+40)),
+                         fileManager.getLastUsedLsn());
+            assertEquals(hdrSize, fileManager.getPrevEntryOffset());
+
+            fileManager.bumpLsn(10L);
+            /* Item placed here. */
+            assertEquals(DbLsn.makeLsn(1,(hdrSize+60)),
+                         fileManager.getLastUsedLsn());
+            assertEquals(hdrSize+40, fileManager.getPrevEntryOffset());
+
+            fileManager.bumpLsn(5L);
+            /* item placed here. */
+            assertEquals(DbLsn.makeLsn(1,(hdrSize+70)),
+                         fileManager.getLastUsedLsn());
+            assertEquals(hdrSize+60, fileManager.getPrevEntryOffset());
+
+            /* bump over to file 2. */
+            fileManager.bumpLsn(75L);
+            /* Item placed here. */
+            assertEquals(DbLsn.makeLsn(2, hdrSize),
+                         fileManager.getLastUsedLsn());
+            assertEquals(0, fileManager.getPrevEntryOffset());
+
+            /* Ask for something bigger than a file: bump over to file 3. */
+            fileManager.bumpLsn(125L);
+            /* item placed here. */
+            assertEquals(DbLsn.makeLsn(3, hdrSize),
+                         fileManager.getLastUsedLsn());
+            assertEquals(0, fileManager.getPrevEntryOffset());
+
+            /* bump over to file 4. */
+            fileManager.bumpLsn(10L);
+            /* Item placed here. */
+            assertEquals(DbLsn.makeLsn(4, hdrSize),
+			 fileManager.getLastUsedLsn());
+            assertEquals(0, fileManager.getPrevEntryOffset());
+
+            fileManager.bumpLsn(20L);
+            /* Item placed here. */
+            assertEquals(DbLsn.makeLsn(4, (hdrSize+10)),
+			 fileManager.getLastUsedLsn());
+            assertEquals(hdrSize, fileManager.getPrevEntryOffset());
+
+            fileManager.bumpLsn(30L);
+            /* Item placed here. */
+            assertEquals(DbLsn.makeLsn(4, (hdrSize+30)),
+			 fileManager.getLastUsedLsn());
+            assertEquals((hdrSize+10), fileManager.getPrevEntryOffset());
+
+        } catch (Exception e) {
+            e.printStackTrace();
+            throw e;
+        }
+    }
+
+    /**
+     * Test initializing the last position in the logs.
+     */
+    public void testSetLastPosition()
+        throws DatabaseException {
+
+        /*
+         * Pretend that the last file is file 79.
+         */
+        fileManager.setLastPosition(// next available LSN
+				    DbLsn.makeLsn(79L, 88L),
+                                    DbLsn.makeLsn(79L, 77),
+                                    66L);
+
+        /* Put an entry down, should fit within file 79. */
+        fileManager.bumpLsn(11L);
+        assertEquals(DbLsn.makeLsn(79L, 88L), fileManager.getLastUsedLsn());
+        assertEquals(77L, fileManager.getPrevEntryOffset());
+
+        /* Put another entry in, should go to the next file. */
+        fileManager.bumpLsn(22L);
+        assertEquals(DbLsn.makeLsn(80L, FileManager.firstLogEntryOffset()),
+                     fileManager.getLastUsedLsn());
+        assertEquals(0, fileManager.getPrevEntryOffset());
+    }
+
+    /**
+     * Test log file naming.
+     */
+    public void testFileNameFormat()
+        throws DatabaseException {
+
+        String filePrefix = envHome + File.separator;
+        assertEquals(filePrefix + "00000001.jdb",
+                     fileManager.getFullFileNames(1L)[0]);
+        assertEquals(filePrefix + "0000007b.jdb",
+                     fileManager.getFullFileNames(123L)[0]);
+    }
+
+    /**
+     * Test log file creation.
+     */
+    public void testFileCreation()
+        throws IOException, DatabaseException {
+
+        FileManagerTestUtils.createLogFile(fileManager, envImpl, FILE_SIZE);
+        FileManagerTestUtils.createLogFile(fileManager, envImpl, FILE_SIZE);
+
+        String[] jeFiles = fileManager.listFiles(FileManager.JE_SUFFIXES);
+
+        assertEquals("Should have two files", 2, jeFiles.length);
+
+        /* Make a fake files with confusing names. */
+        File fakeFile1 = new File(envHome, "00000abx.jdb");
+        File fakeFile2 = new File(envHome, "10.10.jdb");
+        fakeFile1.createNewFile();
+        fakeFile2.createNewFile();
+
+        jeFiles = fileManager.listFiles(FileManager.JE_SUFFIXES);
+        assertEquals("Should have two files", 2, jeFiles.length);
+
+        /* Open the two existing log files. */
+        FileHandle file0Handle = fileManager.getFileHandle(0L);
+        FileHandle file1Handle = fileManager.getFileHandle(1L);
+
+        jeFiles = fileManager.listFiles(FileManager.JE_SUFFIXES);
+        assertEquals("Should have two files", 2, jeFiles.length);
+        file0Handle.release();
+        file1Handle.release();
+
+        /* Empty the cache and get them again. */
+        fileManager.clear();
+        file0Handle = fileManager.getFileHandle(0L);
+        file1Handle = fileManager.getFileHandle(1L);
+
+        jeFiles = fileManager.listFiles(FileManager.JE_SUFFIXES);
+        assertEquals("Should have two files", 2, jeFiles.length);
+        file0Handle.close();
+        file1Handle.close();
+        file0Handle.release();
+        file1Handle.release();
+
+        fakeFile1.delete();
+        fakeFile2.delete();
+    }
+
+    /**
+     * Make sure we can find the last file.
+     */
+    public void testLastFile()
+        throws IOException, DatabaseException {
+
+        /* There shouldn't be any files here anymore. */
+        String[] jeFiles = fileManager.listFiles(FileManager.JE_SUFFIXES);
+        assertTrue(jeFiles.length == 0);
+
+        /* No files exist, should get null. */
+        assertNull("No last file", fileManager.getLastFileNum());
+
+        /* Create some files, ask for the largest. */
+        File fakeFile1 = new File(envHome, "108.cif");
+        fakeFile1.createNewFile();
+        FileManagerTestUtils.createLogFile(fileManager, envImpl, FILE_SIZE);
+        FileManagerTestUtils.createLogFile(fileManager, envImpl, FILE_SIZE);
+        FileManagerTestUtils.createLogFile(fileManager, envImpl, FILE_SIZE);
+
+        assertEquals("Should have 2 as last file", 2L,
+                     fileManager.getLastFileNum().longValue());
+        fakeFile1.delete();
+    }
+
+    /**
+     * Make sure we can find the next file in a set of files.
+     */
+    public void testFollowingFile()
+        throws IOException, DatabaseException {
+
+        /* There shouldn't be any files here anymore. */
+        String[] jeFiles = fileManager.listFiles(FileManager.JE_SUFFIXES);
+        assertTrue(jeFiles.length == 0);
+
+        /* No files exist, should get null. */
+        assertNull("No last file", fileManager.getFollowingFileNum(0, true));
+        assertNull("No last file", fileManager.getFollowingFileNum(0, false));
+        assertNull("No last file", fileManager.getFollowingFileNum(1, true));
+        assertNull("No last file", fileManager.getFollowingFileNum(-1, false));
+
+        /* Create some files. */
+        File okFile1 = new File(envHome, "00000001.jdb");
+        okFile1.createNewFile();
+
+        File fakeFile3 = new File(envHome, "003.jdb");
+        fakeFile3.createNewFile();
+
+        File okFile6 = new File(envHome, "00000006.jdb");
+        okFile6.createNewFile();
+
+        File okFile9 = new File(envHome, "00000009.jdb");
+        okFile9.createNewFile();
+
+
+        /* Test forward */
+        assertEquals("Should get 6 next", 6L,
+                     fileManager.getFollowingFileNum(2, true).longValue());
+        assertEquals("Should get 9 next, testing non-existent file", 9L,
+                     fileManager.getFollowingFileNum(8, true).longValue());
+        assertNull("Should get null next",
+		   fileManager.getFollowingFileNum(9, true));
+        assertNull("Should get null next",
+		   fileManager.getFollowingFileNum(10, true));
+
+        /* Test prev */
+        assertEquals("Should get 6 next, testing non-existent file", 6L,
+                     fileManager.getFollowingFileNum(8, false).longValue());
+        assertEquals("Should get 6 next", 6L,
+                     fileManager.getFollowingFileNum(9, false).longValue());
+        assertNull("Should get null next",
+		   fileManager.getFollowingFileNum(1, false));
+        assertNull("Should get null next",
+		   fileManager.getFollowingFileNum(0, false));
+
+        okFile1.delete();
+        fakeFile3.delete();
+        okFile6.delete();
+        okFile9.delete();
+    }
+
+    /**
+     * See if we can catch a file with an invalid header.
+     */
+    public void testBadHeader()
+        throws IOException, DatabaseException {
+
+        /* First try a bad environment r/w. */
+        try {
+            FileManager test =
+                new FileManager(envImpl, new File("xxyy"), true);
+            fail("expect creation of " + test + "to fail.");
+        } catch (LogException e) {
+            /* should throw */
+        }
+
+        /* Next try a bad environment r/o. */
+        try {
+            FileManager test =
+                new FileManager(envImpl, new File("xxyy"), false);
+            fail("expect creation of " + test + "to fail.");
+        } catch (DatabaseException e) {
+            /* should throw */
+        }
+
+        /* Now create a file, but mess up the header. */
+        FileManagerTestUtils.createLogFile(fileManager, envImpl, FILE_SIZE);
+
+        byte[] badData = new byte[]{1,1};
+        RandomAccessFile file0 =
+            new RandomAccessFile
+		(fileManager.getFullFileName(0, FileManager.JE_SUFFIX),
+		 FileManager.FileMode.READWRITE_MODE.getModeValue());
+        file0.write(badData);
+        file0.close();
+        fileManager.clear();
+
+        try {
+            fileManager.getFileHandle(0L);
+            fail("expect to catch a checksum error");
+        } catch (DbChecksumException e) {
+        }
+    }
+
+    public void testTruncatedHeader()
+        throws IOException, DatabaseException {
+
+        /* Create a log file */
+        FileManagerTestUtils.createLogFile(fileManager, envImpl, FILE_SIZE);
+
+        /* Truncate the header */
+        RandomAccessFile file0 =
+            new RandomAccessFile
+		(fileManager.getFullFileName(0, FileManager.JE_SUFFIX),
+		 FileManager.FileMode.READWRITE_MODE.getModeValue());
+        file0.getChannel().truncate(FileManager.firstLogEntryOffset()/2);
+        file0.close();
+
+        try {
+            fileManager.getFileHandle(0);
+            fail("Should see assertion");
+        } catch (DatabaseException e) {
+        }
+    }
+
+    /**
+     * Test the file cache.
+     */
+    public void testCache()
+        throws Throwable {
+
+        try {
+
+            /*
+             * Make five log files. The file descriptor cache should be empty.
+             */
+            FileManagerTestUtils.createLogFile
+		(fileManager, envImpl, FILE_SIZE);
+            FileManagerTestUtils.createLogFile
+		(fileManager, envImpl, FILE_SIZE);
+            FileManagerTestUtils.createLogFile
+		(fileManager, envImpl, FILE_SIZE);
+            FileManagerTestUtils.createLogFile
+		(fileManager, envImpl, FILE_SIZE);
+            FileManagerTestUtils.createLogFile
+		(fileManager, envImpl, FILE_SIZE);
+
+            Long f0 = new Long(0L);
+            Long f1 = new Long(1L);
+            Long f2 = new Long(2L);
+            Long f3 = new Long(3L);
+            Long f4 = new Long(4L);
+
+            Set<Long> keySet = fileManager.getCacheKeys();
+            assertEquals("should have 0 keys", 0, keySet.size());
+
+            /*
+             * Get file descriptors for three files, expect 3 handles in the
+             * cache.
+             */
+            FileHandle f0Handle = fileManager.getFileHandle(0);
+            FileHandle f1Handle = fileManager.getFileHandle(1);
+            FileHandle f2Handle = fileManager.getFileHandle(2);
+            keySet = fileManager.getCacheKeys();
+            assertEquals("should have 3 keys", 3, keySet.size());
+            assertTrue(keySet.contains(f0));
+            assertTrue(keySet.contains(f1));
+            assertTrue(keySet.contains(f2));
+
+            /*
+             * Ask for a fourth handle, the cache should grow even though it
+             * was set to 3 as a starting size, because all handles are
+             * locked. Do it within another thread, otherwise we'll get a
+             * latch-already-held exception when we test the other handles in
+             * the cache. The other thread will get the handle and then release
+             * it.
+             */
+            CachingThread otherThread = new CachingThread(fileManager, 3);
+            otherThread.start();
+            otherThread.join();
+
+            keySet = fileManager.getCacheKeys();
+            assertEquals("should have 4 keys", 4, keySet.size());
+            assertTrue(keySet.contains(f0));
+            assertTrue(keySet.contains(f1));
+            assertTrue(keySet.contains(f2));
+            assertTrue(keySet.contains(f3));
+
+            /*
+             * Now ask for another file. The cache should not grow, because no
+             * handles are locked and there's room to evict one.
+             */
+            f0Handle.release();
+            f1Handle.release();
+            f2Handle.release();
+            FileHandle f4Handle = fileManager.getFileHandle(4);
+            keySet = fileManager.getCacheKeys();
+            assertEquals("should have 4 keys", 4, keySet.size());
+            assertTrue(keySet.contains(f4));
+
+            f4Handle.release();
+
+            /* Clearing should release all file descriptors. */
+            fileManager.clear();
+            assertEquals("should have 0 keys", 0,
+                         fileManager.getCacheKeys().size());
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testFlipFile()
+	throws Throwable {
+
+        /*
+         * The setUp() method opens a standalone FileManager, but in this test
+         * case we need a regular Environment.  On Windows, we can't lock the
+         * file range twice in FileManager.lockEnvironment, so we must close
+         * the standalone FileManager here before opening a regular
+         * environment.
+         */
+        fileManager.clear();
+        fileManager.close();
+        fileManager = null;
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setTransactional(true);
+        Environment env = new Environment(envHome, envConfig);
+	EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+	FileManager fileManager = envImpl.getFileManager();
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        Database exampleDb =
+	    env.openDatabase(null, "simpleDb", dbConfig);
+
+        assertEquals("Should have 0 as current file", 0L,
+                     fileManager.getCurrentFileNum());
+	long flipLsn = envImpl.forceLogFileFlip();
+	assertEquals("LSN should be 1 post-flip", 1L,
+		     DbLsn.getFileNumber(flipLsn));
+	DatabaseEntry key = new DatabaseEntry();
+	DatabaseEntry data = new DatabaseEntry();
+	key.setData("key".getBytes());
+	data.setData("data".getBytes());
+	exampleDb.put(null, key, data);
+        assertEquals("Should have 1 as last file", 1L,
+                     fileManager.getCurrentFileNum());
+	exampleDb.close();
+	env.close();
+    }	
+
+    class CachingThread extends Thread {
+        private FileManager fileManager;
+        private long fileNum;
+
+        private FileHandle handle;
+
+        CachingThread(FileManager fileManager, long fileNum) {
+            this.fileManager = fileManager;
+            this.fileNum = fileNum;
+        }
+
+        public void run() {
+            try {
+                handle = fileManager.getFileHandle(fileNum);
+                handle.release();
+            } catch (Exception e) {
+                fail(e.getMessage());
+            }
+        }
+
+        FileHandle getHandle() {
+            return handle;
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/log/FileManagerTestUtils.java b/test/com/sleepycat/je/log/FileManagerTestUtils.java
new file mode 100644
index 0000000000000000000000000000000000000000..07a92d280dcc1fde7267ed961b9f0c0875f379a0
--- /dev/null
+++ b/test/com/sleepycat/je/log/FileManagerTestUtils.java
@@ -0,0 +1,30 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: FileManagerTestUtils.java,v 1.8.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.IOException;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+
+public class FileManagerTestUtils {
+    public static void createLogFile(FileManager fileManager,
+    	                             EnvironmentImpl envImpl,
+    	                             long logFileSize)
+        throws DatabaseException, IOException {
+
+        LogBuffer logBuffer = new LogBuffer(50, envImpl);
+        logBuffer.getDataBuffer().flip();
+        fileManager.bumpLsn(logFileSize - FileManager.firstLogEntryOffset());
+        logBuffer.registerLsn(fileManager.getLastUsedLsn());
+        fileManager.writeLogBuffer(logBuffer);
+        fileManager.syncLogEndAndFinishFile();
+    }
+}
+
diff --git a/test/com/sleepycat/je/log/FileReaderBufferingTest.java b/test/com/sleepycat/je/log/FileReaderBufferingTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..1f9dee7fb30ff24ebd8cab70dcccbbecad4b1d4a
--- /dev/null
+++ b/test/com/sleepycat/je/log/FileReaderBufferingTest.java
@@ -0,0 +1,175 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: FileReaderBufferingTest.java,v 1.18.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.Tracer;
+
+/**
+ * Check our ability to adjust the file reader buffer size.
+ */
+public class FileReaderBufferingTest extends TestCase {
+
+    private File envHome;
+    private Environment env;
+    private EnvironmentImpl envImpl;
+    private ArrayList<Long> expectedLsns;
+    private ArrayList<String> expectedVals;
+
+    public FileReaderBufferingTest() {
+        super();
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException, DatabaseException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+        throws IOException, DatabaseException {
+
+        TestUtils.removeFiles("TearDown", envHome, FileManager.JE_SUFFIX);
+    }
+
+    /**
+     * Should overflow once and then grow.
+     */
+    public void testBasic()
+        throws Exception {
+
+        readLog(1050,   // starting size of object in entry
+                0,      // object growth increment
+                100,    // starting read buffer size
+                "3000", // max read buffer size
+                0);     // expected number of overflows.
+    }
+
+    /**
+     * Should overflow once and then grow.
+     */
+    public void testCantGrow()
+        throws Exception {
+
+        readLog(2000,   // starting size of object in entry
+                0,      // object growth increment
+                100,    // starting read buffer size
+                "1000", // max read buffer size
+                10);    // expected number of overflows.
+    }
+
+    /**
+     * Should overflow, grow, and then reach the max.
+     */
+    public void testReachMax()
+        throws Exception {
+
+        readLog(1000,   // size of object in entry
+                1000,      // object growth increment
+                100,    // starting read buffer size
+                "3500", // max read buffer size
+                7);     // expected number of overflows.
+    }
+    /**
+     *
+     */
+    private void readLog(int entrySize,
+                         int entrySizeIncrement,
+                         int readBufferSize,
+                         String bufferMaxSize,
+                         int expectedOverflows)
+        throws Exception {
+
+        try {
+
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setAllowCreate(true);
+            envConfig.setConfigParam
+                (EnvironmentParams.LOG_ITERATOR_MAX_SIZE.getName(),
+                 bufferMaxSize);
+            env = new Environment(envHome, envConfig);
+
+            envImpl = DbInternal.envGetEnvironmentImpl(env);
+
+            /* Make a log file */
+            createLogFile(10, entrySize, entrySizeIncrement);
+            SearchFileReader reader =
+                new SearchFileReader(envImpl,
+                                     readBufferSize,
+                                     true,
+                                     DbLsn.longToLsn
+				     (expectedLsns.get(0)),
+                                     DbLsn.NULL_LSN,
+                                     LogEntryType.LOG_TRACE);
+
+            Iterator<Long> lsnIter = expectedLsns.iterator();
+            Iterator<String> valIter = expectedVals.iterator();
+            while (reader.readNextEntry()) {
+                Tracer rec = (Tracer)reader.getLastObject();
+                assertTrue(lsnIter.hasNext());
+                assertEquals(reader.getLastLsn(),
+			     DbLsn.longToLsn(lsnIter.next()));
+                assertEquals(valIter.next(), rec.getMessage());
+            }
+            assertEquals(10, reader.getNumRead());
+            assertEquals(expectedOverflows, reader.getNRepeatIteratorReads());
+
+        } catch (Exception e) {
+            e.printStackTrace();
+            throw e;
+        } finally {
+            env.close();
+        }
+    }
+
+    /**
+     * Write a logfile of entries, put the entries that we expect to
+     * read into a list for later verification.
+     * @return end of file LSN.
+     */
+    private void createLogFile(int numItems, int size, int sizeIncrement)
+        throws IOException, DatabaseException {
+
+        LogManager logManager = envImpl.getLogManager();
+        expectedLsns = new ArrayList<Long>();
+        expectedVals = new ArrayList<String>();
+
+        for (int i = 0; i < numItems; i++) {
+            /* Add a debug record just to be filler. */
+            int recordSize = size + (i * sizeIncrement);
+            byte[] filler = new byte[recordSize];
+            Arrays.fill(filler, (byte)i);
+            String val = new String(filler);
+
+            Tracer rec = new Tracer(val);
+            long lsn = rec.log(logManager);
+            expectedLsns.add(new Long(lsn));
+            expectedVals.add(val);
+        }
+
+        logManager.flush();
+        envImpl.getFileManager().clear();
+    }
+}
diff --git a/test/com/sleepycat/je/log/FileReaderTest.java b/test/com/sleepycat/je/log/FileReaderTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..fcd793385d091140309e90020fad9d8b35525dcc
--- /dev/null
+++ b/test/com/sleepycat/je/log/FileReaderTest.java
@@ -0,0 +1,91 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2005,2008 Oracle.  All rights reserved.
+ *
+ * $Id: FileReaderTest.java,v 1.16 2008/06/30 20:54:47 linda Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.DbLsn;
+
+
+/**
+ * Test edge cases for file reading.
+ */
+public class FileReaderTest extends TestCase {
+
+    private File envHome;
+
+    public FileReaderTest()
+        throws Exception {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException, DatabaseException {
+
+        TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX);
+    }
+
+    public void tearDown()
+        throws IOException, DatabaseException {
+
+        TestUtils.removeFiles("TearDown", envHome, FileManager.JE_SUFFIX);
+    }
+
+    /*
+     * Check that we can handle the case when we are reading forward
+     * with other than the LastFileReader, and the last file exists but is
+     * 0 length. This case came up when a run of MemoryStress was killed off,
+     * and we then attempted to read it with DbPrintLog.
+     */
+    public void testEmptyExtraFile()
+        throws Throwable {
+	
+	EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        Environment env = new Environment(envHome, envConfig);
+
+        try {
+            /* Make an environment. */
+            env.sync();
+
+            /* Add an extra, 0 length file */
+            EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+
+            File newFile = new File(envHome, "00000001.jdb");
+            newFile.createNewFile();
+
+            INFileReader reader = new INFileReader(envImpl,
+                                                   1000,
+                                                   DbLsn.NULL_LSN,
+						   DbLsn.NULL_LSN,
+                                                   false,
+                                                   false,
+                                                   DbLsn.NULL_LSN,
+                                                   DbLsn.NULL_LSN,
+                                                   null);
+            while (reader.readNextEntry()) {
+            }
+
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        } finally {
+            env.close();
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/log/INFileReaderTest.java b/test/com/sleepycat/je/log/INFileReaderTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..df98b662909016190b36271bef81d71dc0a339a6
--- /dev/null
+++ b/test/com/sleepycat/je/log/INFileReaderTest.java
@@ -0,0 +1,458 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: INFileReaderTest.java,v 1.87.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.cleaner.RecoveryUtilizationTracker;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DbConfigManager;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.entry.SingleItemEntry;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.tree.ChildReference;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.tree.INDeleteInfo;
+import com.sleepycat.je.tree.Key;
+import com.sleepycat.je.tree.Key.DumpType;
+import com.sleepycat.je.tree.LN;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.Tracer;
+
+/**
+ *
+ */
+public class INFileReaderTest extends TestCase {
+
+    static private final boolean DEBUG = false;
+
+    private File envHome;
+    private Environment env;
+    /*
+     * Need a handle onto the true environment in order to create
+     * a reader
+     */
+    private EnvironmentImpl envImpl;
+    private Database db;
+    private long maxNodeId;
+    private List<CheckInfo> checkList;
+
+    public INFileReaderTest() {
+        super();
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+        Key.DUMP_TYPE = DumpType.BINARY;
+    }
+
+    public void setUp()
+	throws IOException, DatabaseException {
+
+        /*
+         * Note that we use the official Environment class to make the
+         * environment, so that everything is set up, but we then go a
+         * backdoor route to get to the underlying EnvironmentImpl class
+         * so that we don't require that the Environment.getDbEnvironment
+         * method be unnecessarily public.
+         */
+        TestUtils.removeLogFiles("Setup", envHome, false);
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6");
+        envConfig.setConfigParam
+	    (EnvironmentParams.BIN_DELTA_PERCENT.getName(), "75");
+        envConfig.setAllowCreate(true);
+
+        /* Disable noisy UtilizationProfile database creation. */
+        DbInternal.setCreateUP(envConfig, false);
+        /* Don't checkpoint utilization info for this test. */
+        DbInternal.setCheckpointUP(envConfig, false);
+        /* Don't run the cleaner without a UtilizationProfile. */
+        envConfig.setConfigParam
+	    (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+
+        env = new Environment(envHome, envConfig);
+
+        envImpl =DbInternal.envGetEnvironmentImpl(env);
+
+    }
+
+    public void tearDown()
+	throws IOException, DatabaseException {
+
+        envImpl = null;
+        env.close();
+        TestUtils.removeFiles("TearDown", envHome, FileManager.JE_SUFFIX);
+    }
+
+    /**
+     * Test no log file
+     */
+    public void testNoFile()
+	throws IOException, DatabaseException {
+
+        /* Make a log file with a valid header, but no data. */
+        INFileReader reader = new INFileReader
+            (envImpl, 1000, DbLsn.NULL_LSN, DbLsn.NULL_LSN, false, false,
+             DbLsn.NULL_LSN, DbLsn.NULL_LSN, null);
+        reader.addTargetType(LogEntryType.LOG_IN);
+        reader.addTargetType(LogEntryType.LOG_BIN);
+        reader.addTargetType(LogEntryType.LOG_IN_DELETE_INFO);
+
+        int count = 0;
+        while (reader.readNextEntry()) {
+            count += 1;
+        }
+        assertEquals("Empty file should not have entries", 0, count);
+    }
+
+    /**
+     * Run with an empty file
+     */
+    public void testEmpty()
+	throws IOException, DatabaseException {
+
+        /* Make a log file with a valid header, but no data. */
+        FileManager fileManager = envImpl.getFileManager();
+        fileManager.bumpLsn(1000000);
+        FileManagerTestUtils.createLogFile(fileManager, envImpl, 10000);
+        fileManager.clear();
+
+        INFileReader reader = new INFileReader
+            (envImpl, 1000, DbLsn.NULL_LSN, DbLsn.NULL_LSN, false, false,
+             DbLsn.NULL_LSN, DbLsn.NULL_LSN, null);
+        reader.addTargetType(LogEntryType.LOG_IN);
+        reader.addTargetType(LogEntryType.LOG_BIN);
+        reader.addTargetType(LogEntryType.LOG_IN_DELETE_INFO);
+
+        int count = 0;
+        while (reader.readNextEntry()) {
+            count += 1;
+        }
+        assertEquals("Empty file should not have entries", 0, count);
+    }
+
+    /**
+     * Run with defaults, read whole log
+     */
+    public void testBasic()
+	throws IOException, DatabaseException {
+
+        DbConfigManager cm = envImpl.getConfigManager();
+        doTest(50,
+               cm.getInt(EnvironmentParams.LOG_ITERATOR_READ_SIZE),
+               0,
+               false);
+    }
+
+    /**
+     * Run with very small buffers and track node ids
+     */
+    public void testTracking()
+	throws IOException, DatabaseException {
+
+        doTest(50, // num iterations
+               10, // tiny buffer
+               0, // start lsn index
+               true); // track node ids
+    }
+
+    /**
+     * Start in the middle of the file
+     */
+    public void testMiddleStart()
+	throws IOException, DatabaseException {
+
+        doTest(50, 100, 40, true);
+    }
+
+    private void doTest(int numIters,
+                        int bufferSize,
+                        int startLsnIndex,
+                        boolean trackNodeIds)
+        throws IOException, DatabaseException {
+
+        /* Fill up a fake log file. */
+        createLogFile(numIters);
+
+        /* Decide where to start. */
+        long startLsn = DbLsn.NULL_LSN;
+        int checkIndex = 0;
+        if (startLsnIndex >= 0) {
+            startLsn = checkList.get(startLsnIndex).lsn;
+            checkIndex = startLsnIndex;
+        }
+
+        /* Use an empty utilization map for testing tracking. */
+        RecoveryUtilizationTracker tracker = trackNodeIds ?
+            (new RecoveryUtilizationTracker(envImpl)) : null;
+
+        INFileReader reader =
+            new INFileReader(envImpl, bufferSize, startLsn, DbLsn.NULL_LSN,
+                             trackNodeIds, false, DbLsn.NULL_LSN,
+                             DbLsn.NULL_LSN, tracker);
+        reader.addTargetType(LogEntryType.LOG_IN);
+        reader.addTargetType(LogEntryType.LOG_BIN);
+        reader.addTargetType(LogEntryType.LOG_BIN_DELTA);
+        reader.addTargetType(LogEntryType.LOG_IN_DELETE_INFO);
+
+        /* Read. */
+        checkLogFile(reader, checkIndex, trackNodeIds);
+    }
+
+    /**
+     * Write a logfile of entries, then read the end
+     */
+    private void createLogFile(int numIters)
+	throws IOException, DatabaseException {
+
+        /*
+         * Create a log file full of INs, INDeleteInfo, BINDeltas and
+         * Debug Records
+         */
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        db = env.openDatabase(null, "foo", dbConfig);
+        LogManager logManager = envImpl.getLogManager();
+        maxNodeId = 0;
+
+        checkList = new ArrayList<CheckInfo>();
+
+        for (int i = 0; i < numIters; i++) {
+            /* Add a debug record. */
+            Tracer rec = new Tracer("Hello there, rec " + (i + 1));
+            rec.log(logManager);
+
+            /* Create, log, and save an IN. */
+            byte[] data = new byte[i + 1];
+            Arrays.fill(data, (byte) (i + 1));
+
+            byte[] key = new byte[i + 1];
+            Arrays.fill(key, (byte) (i + 1));
+
+            IN in = new IN(DbInternal.dbGetDatabaseImpl(db), key, 5, 10);
+	    in.latch(CacheMode.UNCHANGED);
+            long lsn = in.log(logManager);
+	    in.releaseLatch();
+            checkList.add(new CheckInfo(lsn, in));
+
+            if (DEBUG) {
+                System.out.println("LSN " + i + " = " + lsn);
+                System.out.println("IN " + i + " = " + in.getNodeId());
+            }
+
+            /* Add other types of INs. */
+            BIN bin = new BIN(DbInternal.dbGetDatabaseImpl(db), key, 2, 1);
+	    bin.latch(CacheMode.UNCHANGED);
+            lsn = bin.log(logManager);
+            checkList.add(new CheckInfo(lsn, bin));
+
+            /* Add provisional entries, which should get ignored. */
+            lsn = bin.log(logManager,
+        	          false, // allowDeltas,
+        	          true,  // isProvisional,
+        	          false, // proactiveMigration,
+        	          false, // backgroundIO
+        	          in);
+
+	    bin.releaseLatch();
+
+            /* Add a LN, to stress the node tracking. */
+            LN ln = new LN(data, envImpl, false /*replicated*/);
+            lsn = ln.log(envImpl,
+                         DbInternal.dbGetDatabaseImpl(db),
+                         key, DbLsn.NULL_LSN, null, false,
+                         ReplicationContext.NO_REPLICATE);
+
+            /*
+	     * Add an IN delete entry, it should get picked up by the reader.
+	     */
+            INDeleteInfo info =
+                new INDeleteInfo(i, key, DbInternal.
+				 dbGetDatabaseImpl(db).getId());
+            lsn = logManager.log
+                (new SingleItemEntry(LogEntryType.LOG_IN_DELETE_INFO, info),
+                 ReplicationContext.NO_REPLICATE);
+            checkList.add(new CheckInfo(lsn, info));
+
+            /*
+             * Add an BINDelta. Generate it by making the first, full version
+             * provisional so the test doesn't pick it up, and then log a
+             * delta.
+             */
+            BIN binDeltaBin =
+		new BIN(DbInternal.dbGetDatabaseImpl(db), key, 10, 1);
+            maxNodeId = binDeltaBin.getNodeId();
+            binDeltaBin.latch();
+            ChildReference newEntry =
+                new ChildReference(null, key, DbLsn.makeLsn(0, 0));
+            assertTrue(binDeltaBin.insertEntry(newEntry));
+
+            lsn = binDeltaBin.log(logManager,
+        	            	  false, // allowDeltas,
+        	          	  true,  // isProvisional,
+        	          	  false, // proactiveMigration,
+                                  false, // backgroundIO
+        	          	  in);   // parent
+
+            /* Modify the bin with one entry so there can be a delta. */
+
+            byte[] keyBuf2 = new byte[2];
+            Arrays.fill(keyBuf2, (byte) (i + 2));
+            ChildReference newEntry2 =
+                new ChildReference(null, keyBuf2,
+                                   DbLsn.makeLsn(100, 101));
+            assertTrue(binDeltaBin.insertEntry(newEntry2));
+
+            assertTrue(binDeltaBin.log(logManager,
+        	                       true, // allowDeltas
+        	                       false, // isProvisional
+        	                       false, // proactiveMigration,
+                                       false, // backgroundIO
+        	                       in) ==
+		       DbLsn.NULL_LSN);
+            lsn = binDeltaBin.getLastDeltaVersion();
+            if (DEBUG) {
+                System.out.println("delta =" + binDeltaBin.getNodeId() +
+                                   " at LSN " + lsn);
+            }
+            checkList.add(new CheckInfo(lsn, binDeltaBin));
+
+            /*
+             * Reset the generation to 0 so this version of the BIN, which gets
+             * saved for unit test comparison, will compare to the version read
+             * from the log, which is initialized to 0.
+             */
+            binDeltaBin.setGeneration(0);
+            binDeltaBin.releaseLatch();
+        }
+
+        /* Flush the log, files. */
+        logManager.flush();
+        envImpl.getFileManager().clear();
+    }
+
+    private void checkLogFile(INFileReader reader,
+                              int checkIndex,
+                              boolean checkMaxNodeId)
+        throws IOException, DatabaseException {
+
+        try {
+            /* Read all the INs. */
+            int i = checkIndex;
+
+            while (reader.readNextEntry()) {
+                if (DEBUG) {
+                    System.out.println("i = "
+                                       + i
+                                       + " reader.isDeleteInfo="
+                                       + reader.isDeleteInfo()
+                                       + " LSN = "
+                                       + reader.getLastLsn());
+                }
+
+                CheckInfo check = checkList.get(i);
+
+                if (reader.isDeleteInfo()) {
+                    assertEquals(check.info.getDeletedNodeId(),
+                                 reader.getDeletedNodeId());
+                    assertTrue(Arrays.equals(check.info.getDeletedIdKey(),
+                                             reader.getDeletedIdKey()));
+                    assertTrue(check.info.getDatabaseId().equals
+                               (reader.getDatabaseId()));
+
+                } else {
+
+                    /*
+		     * When comparing the check data against the data from the
+		     * log, make the dirty bits match so that they compare
+		     * equal.
+                     */
+                    IN inFromLog = reader.getIN();
+		    inFromLog.latch(CacheMode.UNCHANGED);
+                    inFromLog.setDirty(true);
+		    inFromLog.releaseLatch();
+                    IN testIN = check.in;
+		    testIN.latch(CacheMode.UNCHANGED);
+                    testIN.setDirty(true);
+		    testIN.releaseLatch();
+
+                    /*
+                     * Only check the INs we created in the test. (The others
+                     * are from the map db.
+                     */
+                    if (reader.getDatabaseId().
+			equals(DbInternal.dbGetDatabaseImpl(db).getId())) {
+                        // The IN should match
+                        String inFromLogString = inFromLog.toString();
+                        String testINString = testIN.toString();
+                        if (DEBUG) {
+                            System.out.println("testIN=" + testINString);
+                            System.out.println("inFromLog=" + inFromLogString);
+                        }
+
+                        assertEquals("IN "
+                                     + inFromLog.getNodeId()
+                                     + " at index "
+                                     + i
+                                     + " should match.\nTestIN=" +
+                                     testIN +
+                                     "\nLogIN=" +
+                                     inFromLog,
+                                     testINString,
+                                     inFromLogString);
+                    }
+                }
+                /* The LSN should match. */
+                assertEquals
+		    ("LSN " + i + " should match",
+		     check.lsn,
+		     reader.getLastLsn());
+
+                i++;
+            }
+            assertEquals(i, checkList.size());
+            if (checkMaxNodeId) {
+                assertEquals(maxNodeId, reader.getMaxNodeId());
+            }
+        } finally {
+            db.close();
+        }
+    }
+
+    private class CheckInfo {
+        long lsn;
+        IN in;
+        INDeleteInfo info;
+
+        CheckInfo(long lsn, IN in) {
+            this.lsn = lsn;
+            this.in = in;
+            this.info = null;
+        }
+
+        CheckInfo(long lsn, INDeleteInfo info) {
+            this.lsn = lsn;
+            this.in = null;
+            this.info = info;
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/log/IOExceptionTest.java b/test/com/sleepycat/je/log/IOExceptionTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..25e8c7a4d4c5b5e6a66c62ebe665e8d1dad27d45
--- /dev/null
+++ b/test/com/sleepycat/je/log/IOExceptionTest.java
@@ -0,0 +1,967 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: IOExceptionTest.java,v 1.22.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.logging.Level;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.EnvironmentMutableConfig;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.LockStats;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.RunRecoveryException;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.cleaner.UtilizationProfile;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.Tracer;
+
+public class IOExceptionTest extends TestCase {
+
+    private Environment env;
+    private Database db;
+    private File envHome;
+
+    public IOExceptionTest()
+        throws Exception {
+
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    @Override
+    public void setUp()
+        throws IOException, DatabaseException {
+
+        TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX);
+    }
+
+    @Override
+    public void tearDown()
+        throws IOException, DatabaseException {
+
+        FileManager.IO_EXCEPTION_TESTING_ON_WRITE = false;
+        if (db != null) {
+            db.close();
+        }
+
+        if (env != null) {
+            env.close();
+        }
+
+        TestUtils.removeFiles("TearDown", envHome, FileManager.JE_SUFFIX);
+    }
+
+    public void testRunRecoveryExceptionOnWrite()
+	throws Exception {
+
+	try {
+	    createDatabase(200000, 0, false);
+	
+	    final int N_RECS = 25;
+
+	    CheckpointConfig chkConf = new CheckpointConfig();
+	    chkConf.setForce(true);
+	    Transaction txn = env.beginTransaction(null, null);
+	    int keyInt = 0;
+	    FileManager.IO_EXCEPTION_TESTING_ON_WRITE = false;
+	    for (int i = 0; i < N_RECS; i++) {
+		String keyStr = Integer.toString(keyInt);
+		DatabaseEntry key =
+		    new DatabaseEntry(keyStr.getBytes());
+		DatabaseEntry data =
+		    new DatabaseEntry(("d" + keyStr).getBytes());
+		if (i == (N_RECS - 1)) {
+		    FileManager.THROW_RRE_FOR_UNIT_TESTS = true;
+		    FileManager.IO_EXCEPTION_TESTING_ON_WRITE = true;
+		}
+		try {
+		    assertTrue(db.put(txn, key, data) ==
+			       OperationStatus.SUCCESS);
+		} catch (DatabaseException DE) {
+		    fail("unexpected DatabaseException");
+		    break;
+		}
+	    }
+
+	    try {
+		txn.commit();
+		fail("expected DatabaseException");
+	    } catch (RunRecoveryException DE) {
+	    }
+	    forceCloseEnvOnly();
+
+	    FileManager.IO_EXCEPTION_TESTING_ON_WRITE = false;
+	    FileManager.THROW_RRE_FOR_UNIT_TESTS = false;
+	    db = null;
+	    env = null;
+	} catch (Throwable E) {
+	    E.printStackTrace();
+	}
+    }
+
+    public void testIOExceptionNoRecovery()
+        throws Throwable {
+
+        doIOExceptionTest(false);
+    }
+
+    public void testIOExceptionWithRecovery()
+        throws Throwable {
+
+        doIOExceptionTest(true);
+    }
+
+    public void testEviction()
+        throws Exception {
+
+        try {
+            createDatabase(200000, 0, true);
+
+            final int N_RECS = 25;
+
+            CheckpointConfig chkConf = new CheckpointConfig();
+            chkConf.setForce(true);
+            Transaction txn = env.beginTransaction(null, null);
+            int keyInt = 0;
+            FileManager.IO_EXCEPTION_TESTING_ON_WRITE = true;
+            for (int i = 0; i < N_RECS; i++) {
+                String keyStr = Integer.toString(keyInt);
+                DatabaseEntry key =
+                    new DatabaseEntry(keyStr.getBytes());
+                DatabaseEntry data =
+                    new DatabaseEntry(("d" + keyStr).getBytes());
+                try {
+                    assertTrue(db.put(txn, key, data) ==
+                               OperationStatus.SUCCESS);
+                } catch (DatabaseException DE) {
+                    fail("unexpected DatabaseException");
+                    break;
+                }
+            }
+
+            try {
+                env.checkpoint(chkConf);
+                fail("expected DatabaseException");
+            } catch (DatabaseException DE) {
+            }
+
+            EnvironmentStats stats = env.getStats(null);
+            assertTrue((stats.getNFullINFlush() +
+                        stats.getNFullBINFlush()) > 0);
+
+            /* Read back the data and make sure it all looks ok. */
+            for (int i = 0; i < N_RECS; i++) {
+                String keyStr = Integer.toString(keyInt);
+                DatabaseEntry key =
+                    new DatabaseEntry(keyStr.getBytes());
+                DatabaseEntry data = new DatabaseEntry();
+                try {
+                    assertTrue(db.get(txn, key, data, null) ==
+                               OperationStatus.SUCCESS);
+                    assertEquals(new String(data.getData()), "d" + keyStr);
+                } catch (DatabaseException DE) {
+                    fail("unexpected DatabaseException");
+                    break;
+                }
+            }
+
+            /*
+             * Now we have some IN's in the log buffer and there have been
+             * IOExceptions that will later force rewriting that buffer.
+             */
+            FileManager.IO_EXCEPTION_TESTING_ON_WRITE = false;
+            try {
+                txn.commit();
+            } catch (DatabaseException DE) {
+                fail("unexpected DatabaseException");
+            }
+        } catch (Exception E) {
+            E.printStackTrace();
+        }
+    }
+
+    /*
+     * Test for SR 13898.  Write out some records with
+     * IO_EXCEPTION_TESTING_ON_WRITE true thereby forcing some commits to be
+     * rewritten as aborts.  Ensure that the checksums are correct on those
+     * rewritten records by reading them back with a file reader.
+     */
+    public void testIOExceptionReadBack()
+        throws Exception {
+
+        createDatabase(100000, 1000, true);
+
+        /*
+         * Turn off daemons so we can check the size of the log
+         * deterministically.
+         */
+        EnvironmentMutableConfig newConfig = new EnvironmentMutableConfig();
+        newConfig.setConfigParam("je.env.runCheckpointer", "false");
+        newConfig.setConfigParam("je.env.runCleaner", "false");
+        env.setMutableConfig(newConfig);
+
+        final int N_RECS = 25;
+
+        /* Intentionally corrupt the transaction commit record. */
+        CheckpointConfig chkConf = new CheckpointConfig();
+        chkConf.setForce(true);
+        Transaction txn = env.beginTransaction(null, null);
+        for (int i = 0; i < N_RECS; i++) {
+            String keyStr = Integer.toString(i);
+            DatabaseEntry key =
+                new DatabaseEntry(keyStr.getBytes());
+            DatabaseEntry data =
+                new DatabaseEntry(new byte[100]);
+            try {
+                assertTrue(db.put(txn, key, data) ==
+                           OperationStatus.SUCCESS);
+            } catch (DatabaseException DE) {
+                fail("unexpected DatabaseException");
+                break;
+            }
+            try {
+                FileManager.IO_EXCEPTION_TESTING_ON_WRITE = true;
+                txn.commit();
+                fail("expected DatabaseException");
+            } catch (DatabaseException DE) {
+            }
+            FileManager.IO_EXCEPTION_TESTING_ON_WRITE = false;
+            txn = env.beginTransaction(null, null);
+        }
+
+        FileManager.IO_EXCEPTION_TESTING_ON_WRITE = false;
+
+        try {
+            txn.commit();
+        } catch (DatabaseException DE) {
+            fail("unexpected DatabaseException");
+        }
+
+        /* Flush the corrupted records to disk. */
+        try {
+            env.checkpoint(chkConf);
+        } catch (DatabaseException DE) {
+            DE.printStackTrace();
+            fail("unexpected DatabaseException");
+        }
+
+        EnvironmentStats stats = env.getStats(null);
+        assertTrue((stats.getNFullINFlush() +
+                    stats.getNFullBINFlush()) > 0);
+
+        /*
+         * Figure out where the log starts and ends, and make a local
+         * FileReader class to mimic reading the logs. The only action we need
+         * is to run checksums on the entries.
+         */
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+        long lastLsn = envImpl.getFileManager().getLastUsedLsn();
+        Long firstFile =  envImpl.getFileManager().getFirstFileNum();
+        long firstLsn = DbLsn.makeLsn(firstFile, 0);
+
+        FileReader reader = new FileReader
+            (envImpl,
+             4096,              // readBufferSize
+             true,              // forward
+             firstLsn,
+             null,              // singleFileNumber
+             lastLsn,           // end of file lsn
+             DbLsn.NULL_LSN) {  // finishLsn
+
+                protected boolean processEntry(ByteBuffer entryBuffer)
+                    throws DatabaseException {
+
+                    entryBuffer.position(entryBuffer.position() +
+                                         currentEntryHeader.getItemSize());
+                    return true;
+                }
+            };
+
+        /* Read the logs, checking checksums. */
+        while (reader.readNextEntry()) {
+        }
+
+        /* Check that the reader reads all the way to last entry. */
+        assertEquals("last=" + DbLsn.getNoFormatString(lastLsn) +
+                     " readerlast=" +
+                     DbLsn.getNoFormatString(reader.getLastLsn()),
+                     lastLsn,
+                     reader.getLastLsn());
+    }
+
+    public void testLogBufferOverflowAbortNoDupes()
+        throws Exception {
+
+        doLogBufferOverflowTest(false, false);
+    }
+
+    public void testLogBufferOverflowCommitNoDupes()
+        throws Exception {
+
+        doLogBufferOverflowTest(true, false);
+    }
+
+    public void testLogBufferOverflowAbortDupes()
+        throws Exception {
+
+        doLogBufferOverflowTest(false, true);
+    }
+
+    public void testLogBufferOverflowCommitDupes()
+        throws Exception {
+
+        doLogBufferOverflowTest(true, true);
+    }
+
+    private void doLogBufferOverflowTest(boolean abort, boolean dupes)
+        throws Exception {
+
+        try {
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setTransactional(true);
+            envConfig.setAllowCreate(true);
+            envConfig.setCacheSize(100000);
+            envConfig.setConfigParam("java.util.logging.level", "OFF");
+            env = new Environment(envHome, envConfig);
+
+            String databaseName = "ioexceptiondb";
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setAllowCreate(true);
+            dbConfig.setSortedDuplicates(true);
+            dbConfig.setTransactional(true);
+            db = env.openDatabase(null, databaseName, dbConfig);
+
+            Transaction txn = env.beginTransaction(null, null);
+            DatabaseEntry oneKey =
+                (dupes ?
+                 new DatabaseEntry("2".getBytes()) :
+                 new DatabaseEntry("1".getBytes()));
+            DatabaseEntry oneData =
+                new DatabaseEntry(new byte[10]);
+            DatabaseEntry twoKey =
+                new DatabaseEntry("2".getBytes());
+            DatabaseEntry twoData =
+                new DatabaseEntry(new byte[100000]);
+            if (dupes) {
+                DatabaseEntry temp = oneKey;
+                oneKey = oneData;
+                oneData = temp;
+                temp = twoKey;
+                twoKey = twoData;
+                twoData = temp;
+            }
+
+            try {
+                assertTrue(db.put(txn, oneKey, oneData) ==
+                           OperationStatus.SUCCESS);
+                db.put(txn, twoKey, twoData);
+            } catch (DatabaseException DE) {
+                fail("unexpected DatabaseException");
+            }
+
+            /* Read back the data and make sure it all looks ok. */
+            try {
+                assertTrue(db.get(txn, oneKey, oneData, null) ==
+                           OperationStatus.SUCCESS);
+                assertTrue(oneData.getData().length == (dupes ? 1 : 10));
+            } catch (DatabaseException DE) {
+                fail("unexpected DatabaseException");
+            }
+
+            try {
+                assertTrue(db.get(txn, twoKey, twoData, null) ==
+                           OperationStatus.SUCCESS);
+            } catch (DatabaseException DE) {
+                fail("unexpected DatabaseException");
+            }
+
+            try {
+                if (abort) {
+                    txn.abort();
+                } else {
+                    txn.commit();
+                }
+            } catch (DatabaseException DE) {
+                fail("unexpected DatabaseException");
+            }
+
+            /* Read back the data and make sure it all looks ok. */
+            try {
+                assertTrue(db.get(null, oneKey, oneData, null) ==
+                           (abort ?
+                            OperationStatus.NOTFOUND :
+                            OperationStatus.SUCCESS));
+                assertTrue(oneData.getData().length == (dupes ? 1 : 10));
+            } catch (DatabaseException DE) {
+                fail("unexpected DatabaseException");
+            }
+
+            try {
+                assertTrue(db.get(null, twoKey, twoData, null) ==
+                           (abort ?
+                            OperationStatus.NOTFOUND :
+                            OperationStatus.SUCCESS));
+            } catch (DatabaseException DE) {
+                fail("unexpected DatabaseException");
+            }
+
+        } catch (Exception E) {
+            E.printStackTrace();
+        }
+    }
+
+    public void testPutTransactionalWithIOException()
+        throws Throwable {
+
+        try {
+            createDatabase(100000, 0, true);
+
+            Transaction txn = env.beginTransaction(null, null);
+            int keyInt = 0;
+            String keyStr;
+            FileManager.IO_EXCEPTION_TESTING_ON_WRITE = true;
+
+            /* Fill up the buffer until we see an IOException. */
+            while (true) {
+                keyStr = Integer.toString(++keyInt);
+                DatabaseEntry key = new DatabaseEntry(keyStr.getBytes());
+                DatabaseEntry data =
+                    new DatabaseEntry(("d" + keyStr).getBytes());
+                try {
+                    assertTrue(db.put(txn, key, data) ==
+                               OperationStatus.SUCCESS);
+                } catch (DatabaseException DE) {
+                    break;
+                }
+            }
+
+            /* Buffer still hasn't been written.  This should also fail. */
+            try {
+                db.put(txn,
+                       new DatabaseEntry("shouldFail".getBytes()),
+                       new DatabaseEntry("shouldFailD".getBytes()));
+                fail("expected DatabaseException");
+            } catch (IllegalStateException ISE) {
+                /* Expected. */
+            }
+            FileManager.IO_EXCEPTION_TESTING_ON_WRITE = false;
+
+            /* Buffer should write out ok now. */
+            try {
+                db.put(txn,
+                       new DatabaseEntry("shouldAlsoFail".getBytes()),
+                       new DatabaseEntry("shouldAlsoFailD".getBytes()));
+                fail("expected DatabaseException");
+            } catch (IllegalStateException ISE) {
+		/* Expected. */
+            }
+            txn.abort();
+
+	    /* Txn aborted.  None of the entries should be found. */
+            DatabaseEntry data = new DatabaseEntry();
+            assertTrue(db.get(null,
+                              new DatabaseEntry("shouldAlsoFail".getBytes()),
+                              data,
+                              null) == OperationStatus.NOTFOUND);
+
+            assertTrue(db.get(null,
+                              new DatabaseEntry("shouldFail".getBytes()),
+                              data,
+                              null) == OperationStatus.NOTFOUND);
+
+            assertTrue(db.get(null,
+                              new DatabaseEntry("shouldFail".getBytes()),
+                              data,
+                              null) == OperationStatus.NOTFOUND);
+
+            assertTrue(db.get(null,
+                              new DatabaseEntry(keyStr.getBytes()),
+                              data,
+                              null) == OperationStatus.NOTFOUND);
+
+            for (int i = --keyInt; i > 0; i--) {
+                keyStr = Integer.toString(i);
+                assertTrue(db.get(null,
+                                  new DatabaseEntry(keyStr.getBytes()),
+                                  data,
+                                  null) == OperationStatus.NOTFOUND);
+            }
+
+        } catch (Throwable T) {
+            T.printStackTrace();
+        }
+    }
+
+    public void testIOExceptionDuringFileFlippingWrite()
+	throws Throwable {
+
+	doIOExceptionDuringFileFlippingWrite(8, 33, 2);
+    }
+
+    private void doIOExceptionDuringFileFlippingWrite(int numIterations,
+						      int exceptionStartWrite,
+						      int exceptionWriteCount)
+	throws Throwable {
+
+	try {
+	    EnvironmentConfig envConfig = new EnvironmentConfig();
+	    DbInternal.disableParameterValidation(envConfig);
+	    envConfig.setTransactional(true);
+	    envConfig.setAllowCreate(true);
+	    envConfig.setConfigParam("je.log.fileMax", "1000");
+	    envConfig.setConfigParam("je.log.bufferSize", "1025");
+	    envConfig.setConfigParam("je.env.runCheckpointer", "false");
+	    envConfig.setConfigParam("je.env.runCleaner", "false");
+	    env = new Environment(envHome, envConfig);
+
+	    EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+	    DatabaseConfig dbConfig = new DatabaseConfig();
+	    dbConfig.setTransactional(true);
+	    dbConfig.setAllowCreate(true);
+	    db = env.openDatabase(null, "foo", dbConfig);
+
+	    /* 
+	     * Put one record into the database so it gets populated w/INs and
+	     * LNs, and we can fake out the RMW commits used below.
+	     */
+	    DatabaseEntry key = new DatabaseEntry();
+	    DatabaseEntry data = new DatabaseEntry();
+	    IntegerBinding.intToEntry(5, key);
+	    IntegerBinding.intToEntry(5, data);
+	    db.put(null, key, data);
+
+	    /*
+	     * Now generate trace and commit log entries. The trace records 
+	     * aren't forced out, but the commit records are forced.
+	     */
+	    FileManager.WRITE_COUNT = 0;
+	    FileManager.THROW_ON_WRITE = true;
+	    FileManager.STOP_ON_WRITE_COUNT = exceptionStartWrite;
+	    FileManager.N_BAD_WRITES = exceptionWriteCount;
+	    for (int i = 0; i < numIterations; i++) {
+
+		try {
+		    /* Generate a non-forced record. */
+		    if (i == (numIterations - 1)) {
+
+			/*
+			 * On the last iteration, write a record that is large
+			 * enough to force a file flip (i.e. an fsync which
+			 * succeeds) followed by the large write (which doesn't
+			 * succeed due to an IOException).  In [#15754] the
+			 * large write fails on Out Of Disk Space, rolling back
+			 * the savedLSN to the previous file, even though the
+			 * file has flipped.  The subsequent write ends up in
+			 * the flipped file, but at the offset of the older
+			 * file (leaving a hole in the new flipped file).
+			 */
+			Tracer.trace(Level.SEVERE, envImpl,
+				     i + "/" + FileManager.WRITE_COUNT +
+				     " " + new String(new byte[2000]));
+		    } else {
+			Tracer.trace(Level.SEVERE, envImpl,
+				     i + "/" + FileManager.WRITE_COUNT +
+				     " " + "xx");
+		    }
+		} catch (IllegalStateException ISE) {
+		    /* Eat exception thrown by TraceLogHandler. */
+		}
+
+		/* 
+                 * Generate a forced record by calling commit. Since RMW
+                 * transactions that didn't actually do a write won't log a
+                 * commit record, do an addLogInfo to trick the txn into
+                 * logging a commit.
+                 */
+		Transaction txn = env.beginTransaction(null, null);
+		db.get(txn, key, data, LockMode.RMW);
+                DbInternal.getTxn(txn).addLogInfo(DbLsn.makeLsn(3, 3));
+		txn.commit();
+	    }
+	    db.close();
+
+	    /*
+	     * Verify that the log files are ok and have no checksum errors.
+	     */
+	    FileReader reader =
+		new FileReader(DbInternal.envGetEnvironmentImpl(env),
+			       4096, true, 0, null, DbLsn.NULL_LSN,
+			       DbLsn.NULL_LSN) {
+		    protected boolean processEntry(ByteBuffer entryBuffer)
+			throws DatabaseException {
+
+			entryBuffer.position(entryBuffer.position() +
+					     currentEntryHeader.getItemSize());
+			return true;
+		    }
+		};
+
+	    DbInternal.envGetEnvironmentImpl(env).getLogManager().flush();
+
+	    while (reader.readNextEntry()) {
+	    }
+
+	    /* Make sure the reader really did scan the files. */
+	    assert (DbLsn.getFileNumber(reader.getLastLsn()) == 3) :
+		DbLsn.toString(reader.getLastLsn());
+
+	    env.close();
+	    env = null;
+	    db = null;
+	} catch (Throwable T) {
+	    T.printStackTrace();
+	} finally {
+	    FileManager.STOP_ON_WRITE_COUNT = Long.MAX_VALUE;
+	    FileManager.N_BAD_WRITES = Long.MAX_VALUE;
+	}
+    }
+
+    /*
+     * Test the following sequence:
+     *
+     * write LN, commit;
+     * write same LN (getting an IOException),
+     * write another LN (getting an IOException) verify fails due to must-abort
+     * either commit(should fail and abort automatically) + abort (should fail)
+     * or abort (should success since must-abort).
+     * Verify UP, ensuring that LSN of LN is not marked obsolete.
+     */
+    public void testSR15761Part1()
+        throws Throwable {
+
+	doSR15761Test(true);
+    }
+
+    public void testSR15761Part2()
+        throws Throwable {
+
+	doSR15761Test(false);
+    }
+
+    private void doSR15761Test(boolean doCommit)
+	throws Throwable {
+
+        try {
+            createDatabase(100000, 0, false);
+
+            Transaction txn = env.beginTransaction(null, null);
+            int keyInt = 0;
+            String keyStr;
+
+	    keyStr = Integer.toString(keyInt);
+	    DatabaseEntry key = new DatabaseEntry(keyStr.getBytes());
+	    DatabaseEntry data = new DatabaseEntry(new byte[2888]);
+	    try {
+		assertTrue(db.put(txn, key, data) == OperationStatus.SUCCESS);
+	    } catch (DatabaseException DE) {
+		fail("should have completed");
+	    }
+	    txn.commit();
+
+	    LockStats stats = env.getLockStats(null);
+	    int nLocksPrePut = stats.getNTotalLocks();
+	    txn = env.beginTransaction(null, null);
+            FileManager.IO_EXCEPTION_TESTING_ON_WRITE = true;
+	    try {
+		data = new DatabaseEntry(new byte[10000]);
+		assertTrue(db.put(txn, key, data) == OperationStatus.SUCCESS);
+		fail("expected IOException");
+	    } catch (DatabaseException DE) {
+		/* Expected */
+	    }
+
+            FileManager.IO_EXCEPTION_TESTING_ON_WRITE = false;
+	    try {
+		data = new DatabaseEntry(new byte[10]);
+		assertTrue(db.put(txn, key, data) == OperationStatus.SUCCESS);
+		fail("expected IOException");
+	    } catch (IllegalStateException ISE) {
+		/* Expected */
+	    }
+
+            FileManager.IO_EXCEPTION_TESTING_ON_WRITE = false;
+	    if (doCommit) {
+		try {
+		    txn.commit();
+		    fail("expected must-abort transaction exception");
+		} catch (DatabaseException DE) {
+		    /* Expected. */
+		}
+
+		try {
+		    txn.abort();
+		    fail("expected failure on commit/abort sequence");
+		} catch (IllegalStateException ISE) {
+		    /* Expected. */
+		}
+	    } else {
+		try {
+		    txn.abort();
+		} catch (DatabaseException DE) {
+		    fail("expected abort to succeed");
+		}
+	    }
+
+	    /* Lock should not be held. */
+	    stats = env.getLockStats(null);
+	    int nLocksPostPut = stats.getNTotalLocks();
+	    assertTrue(nLocksPrePut == nLocksPostPut);
+
+	    UtilizationProfile up =
+		DbInternal.envGetEnvironmentImpl(env).getUtilizationProfile();
+
+	    /*
+	     * Checkpoint the environment to flush all utilization tracking
+	     * information before verifying.
+	     */
+	    CheckpointConfig ckptConfig = new CheckpointConfig();
+	    ckptConfig.setForce(true);
+	    env.checkpoint(ckptConfig);
+
+	    assertTrue(up.verifyFileSummaryDatabase());
+        } catch (Throwable T) {
+            T.printStackTrace();
+        }
+    }
+
+    public void testAbortWithIOException()
+        throws Throwable {
+
+        Transaction txn = null;
+        createDatabase(0, 0, true);
+        writeAndVerify(null, false, "k1", "d1", false);
+        writeAndVerify(null, true, "k2", "d2", false);
+        writeAndVerify(null, false, "k3", "d3", false);
+
+        FileManager.IO_EXCEPTION_TESTING_ON_WRITE = true;
+	LockStats stats = env.getLockStats(null);
+	int nLocksPreGet = stats.getNTotalLocks();
+
+	/* Loop doing aborts until the buffer fills up and we get an IOE. */
+	while (true) {
+	    txn = env.beginTransaction(null, null);
+
+	    DatabaseEntry key = new DatabaseEntry("k1".getBytes());
+	    DatabaseEntry returnedData = new DatabaseEntry();
+
+	    /* Use RMW to get a write lock but not put anything in the log. */
+	    OperationStatus status =
+		db.get(txn, key, returnedData, LockMode.RMW);
+	    assertTrue(status == (OperationStatus.SUCCESS));
+
+	    stats = env.getLockStats(null);
+
+	    try {
+		   txn.abort();
+
+		/*
+		 * Keep going until we actually get an IOException from the
+		 * buffer filling up.
+		 */
+		continue;
+	    } catch (DatabaseException DE) {
+		break;
+	    }
+	}
+
+        FileManager.IO_EXCEPTION_TESTING_ON_WRITE = false;
+
+	/* Lock should not be held. */
+	stats = env.getLockStats(null);
+	int nLocksPostAbort = stats.getNTotalLocks();
+	assertTrue(nLocksPreGet == nLocksPostAbort);
+    }
+
+    private void doIOExceptionTest(boolean doRecovery)
+        throws Throwable {
+
+        Transaction txn = null;
+        createDatabase(0, 0, true);
+        writeAndVerify(null, false, "k1", "d1", doRecovery);
+        writeAndVerify(null, true, "k2", "d2", doRecovery);
+        writeAndVerify(null, false, "k3", "d3", doRecovery);
+
+        txn = env.beginTransaction(null, null);
+        writeAndVerify(txn, false, "k4", "d4", false);
+        txn.abort();
+        verify(null, true, "k4", doRecovery);
+        verify(null, false, "k1", doRecovery);
+        verify(null, false, "k3", doRecovery);
+
+        txn = env.beginTransaction(null, null);
+        writeAndVerify(txn, false, "k4", "d4", false);
+        txn.commit();
+        verify(null, false, "k4", doRecovery);
+
+        txn = env.beginTransaction(null, null);
+        writeAndVerify(txn, true, "k5", "d5", false);
+        /* Ensure that writes after IOExceptions don't succeed. */
+        writeAndVerify(txn, false, "k5a", "d5a", false);
+        txn.abort();
+        verify(null, true, "k5", doRecovery);
+        verify(null, true, "k5a", doRecovery);
+
+        txn = env.beginTransaction(null, null);
+
+	LockStats stats = env.getLockStats(null);
+	int nLocksPrePut = stats.getNTotalLocks();
+
+        writeAndVerify(txn, false, "k6", "d6", false);
+        writeAndVerify(txn, true, "k6a", "d6a", false);
+
+    	stats = env.getLockStats(null);
+        try {
+            txn.commit();
+            fail("expected DatabaseException");
+        } catch (DatabaseException DE) {
+        }
+
+	/* Lock should not be held. */
+	stats = env.getLockStats(null);
+	int nLocksPostCommit = stats.getNTotalLocks();
+	assertTrue(nLocksPrePut == nLocksPostCommit);
+
+        verify(null, true, "k6", doRecovery);
+        verify(null, true, "k6a", doRecovery);
+
+        txn = env.beginTransaction(null, null);
+        writeAndVerify(txn, false, "k6", "d6", false);
+        writeAndVerify(txn, true, "k6a", "d6a", false);
+        writeAndVerify(txn, false, "k6b", "d6b", false);
+
+        try {
+            txn.commit();
+        } catch (DatabaseException DE) {
+            fail("expected success");
+        }
+
+        /*
+         * k6a will still exist because the writeAndVerify didn't fail -- there
+         * was no write.  The write happens at commit time.
+         */
+        verify(null, false, "k6", doRecovery);
+        verify(null, false, "k6a", doRecovery);
+        verify(null, false, "k6b", doRecovery);
+    }
+
+    private void writeAndVerify(Transaction txn,
+                                boolean throwIOException,
+                                String keyString,
+                                String dataString,
+                                boolean doRecovery)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry(keyString.getBytes());
+        DatabaseEntry data = new DatabaseEntry(dataString.getBytes());
+        FileManager.IO_EXCEPTION_TESTING_ON_WRITE = throwIOException;
+        try {
+            assertTrue(db.put(txn, key, data) == OperationStatus.SUCCESS);
+
+            /*
+             * We don't expect an IOException if we're in a transaction because
+             * the put() only writes to the buffer, not the disk.  The write to
+             * disk doesn't happen until the commit/abort.
+             */
+            if (throwIOException && txn == null) {
+                fail("didn't catch DatabaseException.");
+            }
+        } catch (DatabaseException DE) {
+            if (!throwIOException) {
+                fail("caught DatabaseException.");
+            }
+        }
+        verify(txn, throwIOException, keyString, doRecovery);
+    }
+
+    private void verify(Transaction txn,
+                        boolean expectFailure,
+                        String keyString,
+                        boolean doRecovery)
+        throws DatabaseException {
+
+        if (doRecovery) {
+            db.close();
+            forceCloseEnvOnly();
+            createDatabase(0, 0, true);
+        }
+        DatabaseEntry key = new DatabaseEntry(keyString.getBytes());
+        DatabaseEntry returnedData = new DatabaseEntry();
+        OperationStatus status =
+            db.get(txn,
+                   key,
+                   returnedData,
+                   LockMode.DEFAULT);
+        assertTrue(status == ((expectFailure && txn == null) ?
+                              OperationStatus.NOTFOUND :
+                              OperationStatus.SUCCESS));
+    }
+
+    private void createDatabase(long cacheSize, long maxFileSize, boolean dups)
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+        envConfig.setConfigParam
+            (EnvironmentParams.NUM_LOG_BUFFERS.getName(), "2");
+        envConfig.setConfigParam
+            (EnvironmentParams.LOG_MEM_SIZE.getName(),
+             EnvironmentParams.LOG_MEM_SIZE_MIN_STRING);
+        if (maxFileSize != 0) {
+            DbInternal.disableParameterValidation(envConfig);
+            envConfig.setConfigParam
+                (EnvironmentParams.LOG_FILE_MAX.getName(), "" + maxFileSize);
+        }
+        if (cacheSize != 0) {
+            envConfig.setCacheSize(cacheSize);
+            envConfig.setConfigParam("java.util.logging.level", "OFF");
+        }
+        env = new Environment(envHome, envConfig);
+
+        String databaseName = "ioexceptiondb";
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(dups);
+        dbConfig.setTransactional(true);
+        db = env.openDatabase(null, databaseName, dbConfig);
+    }
+
+    /* Force the environment to be closed even with outstanding handles.*/
+    private void forceCloseEnvOnly()
+        throws DatabaseException {
+
+        /* Close w/out checkpointing, in order to exercise recovery better.*/
+        try {
+            DbInternal.envGetEnvironmentImpl(env).close(false);
+        } catch (DatabaseException DE) {
+            if (!FileManager.IO_EXCEPTION_TESTING_ON_WRITE) {
+                throw DE;
+            } else {
+                /* Expect an exception from flushing the log manager. */
+            }
+        }
+        env = null;
+    }
+}
diff --git a/test/com/sleepycat/je/log/LNFileReaderTest.java b/test/com/sleepycat/je/log/LNFileReaderTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..f97c556fd359355bbe3a6574afe4ae7da38811f4
--- /dev/null
+++ b/test/com/sleepycat/je/log/LNFileReaderTest.java
@@ -0,0 +1,502 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LNFileReaderTest.java,v 1.101.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.TransactionConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.DbConfigManager;
+import com.sleepycat.je.dbi.DbTree;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.tree.LN;
+import com.sleepycat.je.tree.MapLN;
+import com.sleepycat.je.txn.LockType;
+import com.sleepycat.je.txn.Txn;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.Tracer;
+
+/**
+ * Test the LNFileReader
+ */
+public class LNFileReaderTest extends TestCase {
+    static private final boolean DEBUG = false;
+
+    private File envHome;
+    private Environment env;
+    private EnvironmentImpl envImpl;
+    private Database db;
+    private List<CheckInfo> checkList;
+
+    public LNFileReaderTest() {
+        super();
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException, DatabaseException {
+
+        /*
+         * Note that we use the official Environment class to make the
+         * environment, so that everything is set up, but we then go a backdoor
+         * route to get to the underlying EnvironmentImpl class so that we
+         * don't require that the Environment.getDbEnvironment method be
+         * unnecessarily public.
+         */
+        TestUtils.removeLogFiles("Setup", envHome, false);
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+	DbInternal.disableParameterValidation(envConfig);
+	envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6");
+        envConfig.setConfigParam
+	    (EnvironmentParams.LOG_FILE_MAX.getName(), "1024");
+        envConfig.setAllowCreate(true);
+	envConfig.setTransactional(true);
+        env = new Environment(envHome, envConfig);
+
+        envImpl = DbInternal.envGetEnvironmentImpl(env);
+    }
+
+    public void tearDown()
+        throws IOException, DatabaseException {
+
+        envImpl = null;
+        env.close();
+        TestUtils.removeFiles("TearDown", envHome, FileManager.JE_SUFFIX);
+    }
+
+    /**
+     * Test no log file
+     */
+    public void testNoFile()
+        throws IOException, DatabaseException {
+
+        /* Make a log file with a valid header, but no data. */
+        LNFileReader reader =
+            new LNFileReader(envImpl,
+                             1000,             // read buffer size
+                             DbLsn.NULL_LSN,   // start lsn
+                             true,             // redo
+                             DbLsn.NULL_LSN,   // end of file lsn
+                             DbLsn.NULL_LSN,   // finish lsn
+                             null,             // single file
+                             DbLsn.NULL_LSN);  // ckpt end lsn
+        reader.addTargetType(LogEntryType.LOG_LN_TRANSACTIONAL);
+        reader.addTargetType(LogEntryType.LOG_DEL_DUPLN_TRANSACTIONAL);
+        assertFalse("Empty file should not have entries",
+                    reader.readNextEntry());
+    }
+
+    /**
+     * Run with an empty file.
+     */
+    public void testEmpty()
+        throws IOException, DatabaseException {
+
+        /* Make a log file with a valid header, but no data. */
+        FileManager fileManager = envImpl.getFileManager();
+        FileManagerTestUtils.createLogFile(fileManager, envImpl, 1000);
+        fileManager.clear();
+
+        LNFileReader reader =
+            new LNFileReader(envImpl,
+                             1000,             // read buffer size
+                             DbLsn.NULL_LSN,   // start lsn
+                             true,             // redo
+                             DbLsn.NULL_LSN,   // end of file lsn
+                             DbLsn.NULL_LSN,   // finish lsn
+                             null,             // single file
+                             DbLsn.NULL_LSN);  // ckpt end lsn
+        reader.addTargetType(LogEntryType.LOG_LN_TRANSACTIONAL);
+        reader.addTargetType(LogEntryType.LOG_DEL_DUPLN_TRANSACTIONAL);
+        assertFalse("Empty file should not have entries",
+                    reader.readNextEntry());
+    }
+
+    /**
+     * Run with defaults, read whole log for redo, going forwards.
+     */
+    public void testBasicRedo()
+        throws Throwable {
+
+        try {
+            DbConfigManager cm =  envImpl.getConfigManager();
+            doTest(50,
+                   cm.getInt(EnvironmentParams.LOG_ITERATOR_READ_SIZE),
+                   0,
+                   false,
+                   true);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Run with defaults, read whole log for undo, going backwards.
+     */
+    public void testBasicUndo()
+        throws Throwable {
+
+        try {
+            DbConfigManager cm =  envImpl.getConfigManager();
+            doTest(50,
+                   cm.getInt(EnvironmentParams.LOG_ITERATOR_READ_SIZE),
+                   0,
+                   false,
+                   false);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Run with very small read buffer for redo, and track LNs.
+     */
+    public void testSmallBuffersRedo()
+        throws IOException, DatabaseException {
+
+        doTest(50, 10, 0, true, true);
+    }
+
+    /**
+     * Run with very small read buffer for undo and track LNs.
+     */
+    public void testSmallBuffersUndo()
+        throws IOException, DatabaseException {
+
+        doTest(50, 10, 0, true, false);
+    }
+
+
+    /**
+     * Run with medium buffers for redo.
+     */
+    public void testMedBuffersRedo()
+        throws IOException, DatabaseException {
+
+        doTest(50, 100, 0, false, true);
+    }
+
+    /**
+     * Run with medium buffers for undo.
+     */
+    public void testMedBuffersUndo()
+        throws IOException, DatabaseException {
+
+        doTest(50, 100, 0, false, false);
+    }
+
+    /**
+     * Start in the middle of the file for redo.
+     */
+    public void testMiddleStartRedo()
+        throws IOException, DatabaseException {
+
+        doTest(50, 100, 20, true, true);
+    }
+
+    /**
+     * Start in the middle of the file for undo.
+     */
+    public void testMiddleStartUndo()
+        throws IOException, DatabaseException {
+
+        doTest(50, 100, 20, true, false);
+    }
+
+    /**
+     * Create a log file, create the reader, read the log file
+     * @param numIters each iteration makes 3 log entries (debug record, ln
+     *           and mapLN
+     * @param bufferSize to pass to reader
+     * @param checkIndex where in the test data to start
+     * @param trackLNs true if we're tracking LNS, false if we're tracking
+     *           mapLNs
+     */
+    private void doTest(int numIters,
+			int bufferSize,
+			int checkIndex,
+                        boolean trackLNs,
+			boolean redo)
+        throws IOException, DatabaseException {
+
+        checkList = new ArrayList<CheckInfo>();
+
+        /* Fill up a fake log file. */
+        long endOfFileLsn = createLogFile(numIters, trackLNs, redo);
+
+        if (DEBUG) {
+            System.out.println("eofLsn = " + endOfFileLsn);
+        }
+
+        /* Decide where to start. */
+        long startLsn = DbLsn.NULL_LSN;
+        long finishLsn = DbLsn.NULL_LSN;
+        if (redo) {
+            startLsn = checkList.get(checkIndex).lsn;
+        } else {
+            /* Going backwards. Start at last check entry. */
+            int lastEntryIdx = checkList.size() - 1;
+            startLsn = checkList.get(lastEntryIdx).lsn;
+            finishLsn = checkList.get(checkIndex).lsn;
+        }
+
+        LNFileReader reader =
+	    new LNFileReader(envImpl, bufferSize, startLsn, redo, endOfFileLsn,
+			     finishLsn, null, DbLsn.NULL_LSN);
+        if (trackLNs) {
+            reader.addTargetType(LogEntryType.LOG_LN_TRANSACTIONAL);
+            reader.addTargetType(LogEntryType.LOG_DEL_DUPLN_TRANSACTIONAL);
+        } else {
+            reader.addTargetType(LogEntryType.LOG_MAPLN_TRANSACTIONAL);
+        }
+
+        if (!redo) {
+            reader.addTargetType(LogEntryType.LOG_TXN_COMMIT);
+        }
+
+        /* read. */
+        checkLogFile(reader, checkIndex, redo);
+    }
+
+    /**
+     * Write a logfile of entries, put the entries that we expect to
+     * read into a list for later verification.
+     * @return end of file LSN.
+     */
+    private long createLogFile(int numIters, boolean trackLNs, boolean redo)
+        throws IOException, DatabaseException {
+
+        /*
+         * Create a log file full of LNs, DeletedDupLNs, MapLNs and Debug
+         * Records
+         */
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        db = env.openDatabase(null, "foo", dbConfig);
+        LogManager logManager = envImpl.getLogManager();
+        DatabaseImpl dbImpl = DbInternal.dbGetDatabaseImpl(db);
+        DatabaseImpl mapDbImpl = envImpl.getDbTree().getDb(DbTree.ID_DB_ID);
+
+        long lsn;
+        Txn userTxn = Txn.createTxn(envImpl, new TransactionConfig(),
+				    ReplicationContext.NO_REPLICATE);
+        long txnId = userTxn.getId();
+
+        for (int i = 0; i < numIters; i++) {
+            /* Add a debug record just to be filler. */
+            Tracer rec = new Tracer("Hello there, rec " + (i+1));
+            rec.log(logManager);
+
+            /* Make a transactional LN, we expect it to be there. */
+            byte[] data = new byte[i+1];
+            Arrays.fill(data, (byte)(i+1));
+            LN ln = new LN(data, envImpl, false /* replicated */);
+            byte[] key = new byte[i+1];
+            Arrays.fill(key, (byte)(i+10));
+
+            /*
+	     * Log an LN. If we're tracking LNs add it to the verification
+	     * list.
+	     */
+            userTxn.lock(ln.getNodeId(), LockType.WRITE, false, dbImpl);
+            lsn = ln.log(envImpl, dbImpl, key,
+                         DbLsn.NULL_LSN, // oldLSN
+                         userTxn,
+                         false,          // backgroundIO
+                         ReplicationContext.NO_REPLICATE);
+
+            if (trackLNs) {
+                checkList.add(new CheckInfo(lsn, ln, key,
+                                            ln.getData(), txnId));
+            }
+
+            /* Log a deleted duplicate LN. */
+            LN deleteLN = new LN(data,
+                                 envImpl,
+                                 false); // replicated
+
+            byte[] dupKey = new byte[i+1];
+            Arrays.fill(dupKey, (byte)(i+2));
+
+            userTxn.lock(deleteLN.getNodeId(), LockType.WRITE, false, dbImpl);
+            lsn = deleteLN.delete
+                (dbImpl, key, dupKey, DbLsn.NULL_LSN,
+                 userTxn, ReplicationContext.NO_REPLICATE);
+            if (trackLNs) {
+                checkList.add(new CheckInfo(lsn, deleteLN,
+                                            dupKey, key, txnId));
+            }
+
+            /*
+	     * Make a non-transactional LN. Shouldn't get picked up by reader.
+	     */
+            LN nonTxnalLN = new LN(data,
+                                   envImpl,
+                                   false); // replicated
+            nonTxnalLN.log(envImpl, dbImpl, key,
+                           DbLsn.NULL_LSN, // oldLsn
+                           null,           // locker
+                           false,          // backgroundIO
+                           ReplicationContext.NO_REPLICATE);
+
+            /* Add a MapLN. */
+            MapLN mapLN = new MapLN(dbImpl);
+            userTxn.lock
+                (mapLN.getNodeId(), LockType.WRITE, false,
+                 mapDbImpl);
+            lsn = mapLN.log(envImpl,
+                            mapDbImpl,
+                            key, DbLsn.NULL_LSN, userTxn,
+                            false, // backgroundIO
+                            ReplicationContext.NO_REPLICATE);
+            if (!trackLNs) {
+                checkList.add(new CheckInfo(lsn, mapLN, key,
+                                            mapLN.getData(),
+                                            txnId));
+            }
+        }
+
+        long commitLsn = userTxn.commit(TransactionConfig.SYNC);
+
+        /* We only expect checkpoint entries to be read in redo passes. */
+        if (!redo) {
+            checkList.add(new CheckInfo(commitLsn, null, null, null, txnId));
+        }
+
+        /* Make a marker log entry to pose as the end of file. */
+        Tracer rec = new Tracer("Pretend this is off the file");
+        long lastLsn = rec.log(logManager);
+        db.close();
+        logManager.flush();
+        envImpl.getFileManager().clear();
+        return lastLsn;
+    }
+
+    private void checkLogFile(LNFileReader reader,
+                              int checkIndex,
+                              boolean redo)
+        throws IOException, DatabaseException {
+
+        LN lnFromLog;
+        byte[] keyFromLog;
+
+        /* Read all the LNs. */
+        int i;
+        if (redo) {
+            /* start where indicated. */
+            i = checkIndex;
+        } else {
+            /* start at the end. */
+            i = checkList.size() - 1;
+        }
+        while (reader.readNextEntry()) {
+            CheckInfo expected = checkList.get(i);
+
+            /* Check LSN. */
+            assertEquals("LSN " + i + " should match",
+                         expected.lsn,
+                         reader.getLastLsn());
+
+            if (reader.isLN()) {
+
+                /* Check the LN. */
+                lnFromLog = reader.getLN();
+                LN expectedLN = expected.ln;
+                assertEquals("Should be the same type of object",
+                             expectedLN.getClass(),
+                             lnFromLog.getClass());
+
+                if (DEBUG) {
+                    if (!expectedLN.toString().equals(lnFromLog.toString())) {
+                        System.out.println("expected = " +
+                                           expectedLN.toString()+
+                                           "lnFromLog = " +
+                                           lnFromLog.toString());
+                    }
+                }
+
+                /*
+                 * Don't expect MapLNs to be equal, since they change as
+                 * logging occurs and utilization info changes.
+                 */
+                if (!(expectedLN instanceof MapLN)) {
+                    assertEquals("LN " + i + " should match",
+                                 expectedLN.toString(),
+                                 lnFromLog.toString());
+                }
+
+                /* Check the key. */
+                keyFromLog = reader.getKey();
+                byte[] expectedKey = expected.key;
+                if (DEBUG) {
+                    if (!Arrays.equals(expectedKey, keyFromLog)) {
+                        System.out.println("expectedKey=" + expectedKey +
+                                           " logKey=" + keyFromLog);
+                    }
+                }
+
+                assertTrue("Key " + i + " should match",
+                           Arrays.equals(expectedKey, keyFromLog));
+
+                /* Check the dup key. */
+                byte[] dupKeyFromLog = reader.getDupTreeKey();
+                byte[] expectedDupKey = expected.dupKey;
+                assertTrue(Arrays.equals(expectedDupKey, dupKeyFromLog));
+
+                assertEquals(expected.txnId,
+                             reader.getTxnId().longValue());
+
+            } else {
+                /* Should be a txn commit record. */
+                assertEquals(expected.txnId,
+                             reader.getTxnCommitId());
+            }
+
+            if (redo) {
+                i++;
+            } else {
+                i--;
+            }
+        }
+        int expectedCount = checkList.size() - checkIndex;
+        assertEquals(expectedCount, reader.getNumRead());
+    }
+
+    private class CheckInfo {
+        long lsn;
+        LN ln;
+        byte[] key;
+        byte[] dupKey;
+        long txnId;
+
+        CheckInfo(long lsn, LN ln, byte[] key, byte[] dupKey, long txnId) {
+            this.lsn = lsn;
+            this.ln = ln;
+            this.key = key;
+            this.dupKey = dupKey;
+            this.txnId = txnId;
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/log/LastFileReaderTest.java b/test/com/sleepycat/je/log/LastFileReaderTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..c403ebec0c23224da26ba7beb6777fe6456b3be8
--- /dev/null
+++ b/test/com/sleepycat/je/log/LastFileReaderTest.java
@@ -0,0 +1,522 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LastFileReaderTest.java,v 1.76.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.util.ArrayList;
+import java.util.List;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DbConfigManager;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.entry.SingleItemEntry;
+import com.sleepycat.je.txn.TxnAbort;
+import com.sleepycat.je.util.BadFileFilter;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.Tracer;
+
+public class LastFileReaderTest extends TestCase {
+
+    private DbConfigManager configManager;
+    private FileManager fileManager;
+    private LogManager logManager;
+    private File envHome;
+    private EnvironmentImpl envImpl;
+    public LastFileReaderTest() {
+        super();
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws DatabaseException, IOException {
+
+        TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX);
+        TestUtils.removeFiles(envHome, new BadFileFilter());
+    }
+
+    public void tearDown()
+        throws DatabaseException, IOException {
+
+        /*
+         * Pass false to skip checkpoint, since the file manager may hold an
+         * open file that we've trashed in the tests, so we don't want to
+         * write to it here.
+         */
+        try {
+            envImpl.close(false);
+        } catch (DatabaseException e) {
+        }
+
+        TestUtils.removeFiles("TearDown", envHome, FileManager.JE_SUFFIX);
+        TestUtils.removeFiles(envHome, new BadFileFilter());
+    }
+
+    /* Create an environment, using the default log file size. */
+    private void initEnv()
+        throws DatabaseException {
+
+        initEnv(null);
+    }
+
+    /* Create an environment, specifying the log file size. */
+    private void initEnv(String logFileSize)
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+
+        /* Don't run daemons; we do some abrupt shutdowns. */
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false");
+
+        envConfig.setConfigParam
+	    (EnvironmentParams.NODE_MAX.getName(), "6");
+	envConfig.setConfigParam
+	    (EnvironmentParams.JE_LOGGING_LEVEL.getName(), "CONFIG");
+        if (logFileSize != null) {
+	    DbInternal.disableParameterValidation(envConfig);
+            envConfig.setConfigParam
+                (EnvironmentParams.LOG_FILE_MAX.getName(), logFileSize);
+        }
+
+        /* Disable noisy UtilizationProfile database creation. */
+        DbInternal.setCreateUP(envConfig, false);
+        /* Don't checkpoint utilization info for this test. */
+        DbInternal.setCheckpointUP(envConfig, false);
+
+	envConfig.setAllowCreate(true);
+        envImpl = new EnvironmentImpl(envHome,
+                                      envConfig,
+                                      null /*sharedCacheEnv*/,
+                                      false /*replicationIntended*/);
+        configManager = envImpl.getConfigManager();
+        fileManager = envImpl.getFileManager();
+        logManager = envImpl.getLogManager();
+    }
+
+    /**
+     * Run with an empty file that has a file header but no log entries.
+     */
+    public void testEmptyAtEnd()
+        throws Throwable {
+
+        initEnv();
+
+        /*
+         * Make a log file with a valid header, but no data.
+         */
+        FileManagerTestUtils.createLogFile(fileManager, envImpl, 100);
+        fileManager.clear();
+
+        LastFileReader reader = new LastFileReader(envImpl, 1000);
+        assertTrue(reader.readNextEntry());
+        assertEquals(0, DbLsn.getFileOffset(reader.getLastLsn()));
+    }
+
+    /**
+     * Run with an empty, 0 length file at the end.  This has caused a
+     * BufferUnderflowException. [#SR 12631]
+     */
+    public void testLastFileEmpty()
+        throws Throwable {
+
+        initEnv("1000");
+        int numIters = 10;
+        List<Tracer> testObjs = new ArrayList<Tracer>();
+        List<Long> testLsns = new ArrayList<Long>();
+
+        /*
+         * Create a log with one or more files. Use only Tracer objects so we
+         * can iterate through the entire log ... ?
+         */
+        for (int i = 0; i < numIters; i++) {
+            /* Add a debug record. */
+            Tracer msg = new Tracer("Hello there, rec " + (i+1));
+            testObjs.add(msg);
+            testLsns.add(new Long(msg.log(logManager)));
+        }
+        /* Flush the log, files. */
+	logManager.flush();
+        fileManager.clear();
+
+        int lastFileNum = fileManager.getAllFileNumbers().length - 1;
+
+        /*
+         * Create an extra, totally empty file.
+         */
+        fileManager.syncLogEnd();
+        fileManager.clear();
+        String emptyLastFile = fileManager.getFullFileName(lastFileNum+1,
+                                                      FileManager.JE_SUFFIX);
+
+        RandomAccessFile file =
+            new RandomAccessFile(emptyLastFile, FileManager.FileMode.
+                                 READWRITE_MODE.getModeValue());
+        file.close();
+
+        assertTrue(fileManager.getAllFileNumbers().length >= 2);
+
+        /*
+         * Try a LastFileReader. It should give us a end-of-log position in the
+         * penultimate file.
+         */
+        LastFileReader reader = new LastFileReader(envImpl, 1000);
+        while (reader.readNextEntry()) {
+        }
+
+        /*
+         * The reader should be positioned at the last, valid file, skipping
+         * this 0 length file.
+         */
+        assertEquals("lastValid=" + DbLsn.toString(reader.getLastValidLsn()),
+                     lastFileNum,
+                     DbLsn.getFileNumber(reader.getLastValidLsn()));
+        assertEquals(lastFileNum, DbLsn.getFileNumber(reader.getEndOfLog()));
+    }
+
+    /**
+     * Corrupt the file headers of the one and only log file.
+     */
+    public void testBadFileHeader()
+	throws Throwable {
+
+        initEnv();
+
+        /*
+         * Handle a log file that has data and a bad header. First corrupt the
+         * existing log file. We will not be able to establish log end, but
+         * won't throw away the file because it has data.
+         */
+        long lastFileNum = fileManager.getLastFileNum().longValue();
+        String lastFile =
+            fileManager.getFullFileName(lastFileNum,
+                                        FileManager.JE_SUFFIX);
+
+        RandomAccessFile file =
+            new RandomAccessFile(lastFile, FileManager.FileMode.
+                                 READWRITE_MODE.getModeValue());
+
+        file.seek(15);
+        file.writeBytes("putting more junk in, mess up header");
+        file.close();
+
+        /*
+         * We should see an exception on this one, because we made a file that
+         * looks like it has a bad header and bad data.
+         */
+        try {
+            LastFileReader reader = new LastFileReader(envImpl, 1000);
+            fail("Should see exception when creating " + reader);
+        } catch (DbChecksumException e) {
+            /* Eat exception, expected. */
+        }
+
+        /*
+         * Now make a bad file header, but one that is less than the size of a
+         * file header. This file ought to get moved aside.
+         */
+        file = new RandomAccessFile(lastFile, "rw");
+        file.getChannel().truncate(0);
+        file.writeBytes("bad");
+        file.close();
+
+        LastFileReader reader = new LastFileReader(envImpl, 1000);
+        /* Nothing comes back from reader. */
+        assertFalse(reader.readNextEntry());
+        File movedFile = new File(envHome, "00000000.bad");
+        assertTrue(movedFile.exists());
+
+        /* Try a few more times, we ought to keep moving the file. */
+        file = new RandomAccessFile(lastFile, "rw");
+        file.getChannel().truncate(0);
+        file.writeBytes("bad");
+        file.close();
+
+        reader = new LastFileReader(envImpl, 1000);
+        assertTrue(movedFile.exists());
+        File movedFile1 = new File(envHome, "00000000.bad.1");
+        assertTrue(movedFile1.exists());
+    }
+
+    /**
+     * Run with defaults.
+     */
+    public void testBasic()
+        throws Throwable {
+
+        initEnv();
+        int numIters = 50;
+        List<Loggable> testObjs = new ArrayList<Loggable>();
+        List<Long> testLsns = new ArrayList<Long>();
+
+        fillLogFile(numIters, testLsns, testObjs);
+        LastFileReader reader =
+            new LastFileReader(envImpl,
+                               configManager.getInt
+                               (EnvironmentParams.LOG_ITERATOR_READ_SIZE));
+
+        checkLogEnd(reader, numIters, testLsns, testObjs);
+    }
+
+    /**
+     * Run with very small read buffer.
+     */
+    public void testSmallBuffers()
+        throws Throwable {
+
+        initEnv();
+        int numIters = 50;
+        List<Loggable> testObjs = new ArrayList<Loggable>();
+        List<Long> testLsns = new ArrayList<Long>();
+
+        fillLogFile(numIters, testLsns, testObjs);
+        LastFileReader reader = new LastFileReader(envImpl, 10);
+        checkLogEnd(reader, numIters, testLsns, testObjs);
+    }
+
+    /**
+     * Run with medium buffers.
+     */
+    public void testMedBuffers()
+        throws Throwable {
+
+        initEnv();
+        int numIters = 50;
+        List<Loggable> testObjs = new ArrayList<Loggable>();
+        List<Long> testLsns = new ArrayList<Long>();
+
+        fillLogFile(numIters, testLsns, testObjs);
+        LastFileReader reader = new LastFileReader(envImpl, 100);
+        checkLogEnd(reader, numIters, testLsns, testObjs);
+    }
+
+    /**
+     * Put junk at the end of the file.
+     */
+    public void testJunk()
+        throws Throwable {
+
+        initEnv();
+        int numIters = 50;
+        List<Loggable> testObjs = new ArrayList<Loggable>();
+        List<Long> testLsns = new ArrayList<Long>();
+
+        /* Write junk into the end of the file. */
+        fillLogFile(numIters, testLsns, testObjs);
+        long lastFileNum = fileManager.getLastFileNum().longValue();
+        String lastFile =
+            fileManager.getFullFileName(lastFileNum,
+                                        FileManager.JE_SUFFIX);
+
+        RandomAccessFile file =
+            new RandomAccessFile(lastFile, FileManager.FileMode.
+                                 READWRITE_MODE.getModeValue());
+        file.seek(file.length());
+        file.writeBytes("hello, some junk");
+        file.close();
+
+
+        /* Read. */
+        LastFileReader reader = new LastFileReader(envImpl, 100);
+        checkLogEnd(reader, numIters, testLsns, testObjs);
+    }
+
+    /**
+     * Make a log, then make a few extra files at the end, one empty, one with
+     * a bad file header.
+     */
+    public void testExtraEmpty()
+        throws Throwable {
+
+        initEnv();
+        int numIters = 50;
+        List<Loggable> testObjs = new ArrayList<Loggable>();
+        List<Long> testLsns = new ArrayList<Long>();
+        int defaultBufferSize =
+            configManager.getInt(EnvironmentParams.LOG_ITERATOR_READ_SIZE);
+
+        /*
+         * Make a valid log with data, then put a couple of extra files after
+         * it. Make the file numbers non-consecutive. We should have three log
+         * files.
+         */
+        /* Create a log */
+        fillLogFile(numIters, testLsns, testObjs);
+
+        /* First empty log file -- header, no data. */
+        fileManager.bumpLsn(100000000);
+        fileManager.bumpLsn(100000000);
+        FileManagerTestUtils.createLogFile(fileManager, envImpl, 10);
+
+        /* Second empty log file -- header, no data. */
+        fileManager.bumpLsn(100000000);
+        fileManager.bumpLsn(100000000);
+        FileManagerTestUtils.createLogFile(fileManager, envImpl, 10);
+
+        assertEquals(3, fileManager.getAllFileNumbers().length);
+
+        /*
+         * Corrupt the last empty file and then search for the correct last
+         * file.
+         */
+        long lastFileNum = fileManager.getLastFileNum().longValue();
+        String lastFile =
+            fileManager.getFullFileName(lastFileNum,
+                                        FileManager.JE_SUFFIX);
+        RandomAccessFile file =
+            new RandomAccessFile(lastFile, FileManager.FileMode.
+                                 READWRITE_MODE.getModeValue());
+        file.getChannel().truncate(10);
+        file.close();
+        fileManager.clear();
+
+        /*
+         * Make a reader, read the log. After the reader returns, we should
+         * only have 2 log files.
+         */
+        LastFileReader reader = new LastFileReader(envImpl,
+                                                   defaultBufferSize);
+        checkLogEnd(reader, numIters, testLsns, testObjs);
+        assertEquals(2, fileManager.getAllFileNumbers().length);
+
+        /*
+         * Corrupt the now "last" empty file and try again. This is actually
+         * the first empty file we made.
+         */
+        lastFileNum = fileManager.getLastFileNum().longValue();
+        lastFile = fileManager.getFullFileName(lastFileNum,
+                                               FileManager.JE_SUFFIX);
+        file = new RandomAccessFile(lastFile, FileManager.FileMode.
+                                    READWRITE_MODE.getModeValue());
+        file.getChannel().truncate(10);
+        file.close();
+
+        /*
+         * Validate that we have the right number of log entries, and only one
+         * valid log file.
+         */
+        reader = new LastFileReader(envImpl, defaultBufferSize);
+        checkLogEnd(reader, numIters, testLsns, testObjs);
+        assertEquals(1, fileManager.getAllFileNumbers().length);
+    }
+
+
+    /**
+     * Write a logfile of entries, then read the end.
+     */
+    private void fillLogFile(int numIters, List<Long> testLsns, List<Loggable> testObjs)
+        throws Throwable {
+
+        /*
+         * Create a log file full of LNs and Debug Records.
+         */
+        for (int i = 0; i < numIters; i++) {
+            /* Add a debug record. */
+            Tracer msg = new Tracer("Hello there, rec " + (i+1));
+            testObjs.add(msg);
+            testLsns.add(new Long(msg.log(logManager)));
+
+            /* Add a txn abort */
+            TxnAbort abort = new TxnAbort(10L, 200L, 
+            		                      1234567 /* masterNodeId */);
+            SingleItemEntry entry =
+                new SingleItemEntry(LogEntryType.LOG_TXN_ABORT, abort);
+            testObjs.add(abort);
+            testLsns.add(new Long(logManager.log
+                                  (entry,
+                                   ReplicationContext.NO_REPLICATE)));
+        }
+
+        /* Flush the log, files. */
+	logManager.flush();
+        fileManager.clear();
+    }
+
+    /**
+     * Use the LastFileReader to check this file, see if the log end is set
+     * right.
+     */
+    private void checkLogEnd(LastFileReader reader,
+			     int numIters,
+                             List<Long> testLsns,
+			     List<Loggable> testObjs)
+        throws Throwable {
+
+        reader.setTargetType(LogEntryType.LOG_ROOT);
+        reader.setTargetType(LogEntryType.LOG_TXN_COMMIT);
+        reader.setTargetType(LogEntryType.LOG_TXN_ABORT);
+        reader.setTargetType(LogEntryType.LOG_TRACE);
+        reader.setTargetType(LogEntryType.LOG_IN);
+        reader.setTargetType(LogEntryType.LOG_LN_TRANSACTIONAL);
+
+        /* Now ask the LastFileReader to read it back. */
+        while (reader.readNextEntry()) {
+        }
+
+        /* Truncate the file. */
+        reader.setEndOfFile();
+
+        /*
+	 * How many entries did the iterator go over? We should see
+	 *   numIters * 2 + 7
+	 * (the extra 7 are the root, debug records and checkpoints and file
+	 * header written by recovery.
+	 */
+        assertEquals("should have seen this many entries", (numIters * 2) + 7,
+                     reader.getNumRead());
+
+        /* Check last used LSN. */
+        int numLsns = testLsns.size();
+        long lastLsn = DbLsn.longToLsn(testLsns.get(numLsns - 1));
+        assertEquals("last LSN", lastLsn, reader.getLastLsn());
+
+        /* Check last offset. */
+        assertEquals("prev offset", DbLsn.getFileOffset(lastLsn),
+                     reader.getPrevOffset());
+
+        /* Check next available LSN. */
+        int lastSize =
+            testObjs.get(testObjs.size() - 1).getLogSize();
+        assertEquals("next available",
+                     DbLsn.makeLsn(DbLsn.getFileNumber(lastLsn),
+				   DbLsn.getFileOffset(lastLsn) +
+				   LogEntryHeader.MIN_HEADER_SIZE + lastSize),
+                     reader.getEndOfLog());
+
+        /* The log should be truncated to just the right size. */
+        FileHandle handle =  fileManager.getFileHandle(0L);
+        RandomAccessFile file = handle.getFile();
+        assertEquals(DbLsn.getFileOffset(reader.getEndOfLog()),
+                     file.getChannel().size());
+        handle.release();
+        fileManager.clear();
+
+        /* Check the last tracked LSNs. */
+        assertTrue(reader.getLastSeen(LogEntryType.LOG_ROOT) !=
+		   DbLsn.NULL_LSN);
+        assertTrue(reader.getLastSeen(LogEntryType.LOG_IN) == DbLsn.NULL_LSN);
+        assertTrue(reader.getLastSeen(LogEntryType.LOG_LN_TRANSACTIONAL) ==
+		   DbLsn.NULL_LSN);
+        assertEquals(reader.getLastSeen(LogEntryType.LOG_TRACE),
+                     DbLsn.longToLsn(testLsns.get(numLsns - 2)));
+        assertEquals(reader.getLastSeen(LogEntryType.LOG_TXN_ABORT),
+                     lastLsn);
+    }
+}
diff --git a/test/com/sleepycat/je/log/LogBufferPoolTest.java b/test/com/sleepycat/je/log/LogBufferPoolTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..a83e3db406cef794a33fdf6ef14dc0a2b5edacab
--- /dev/null
+++ b/test/com/sleepycat/je/log/LogBufferPoolTest.java
@@ -0,0 +1,288 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LogBufferPoolTest.java,v 1.66.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.util.TestUtils;
+
+public class LogBufferPoolTest extends TestCase {
+
+    Environment env;
+    Database db;
+    EnvironmentImpl environment;
+    FileManager fileManager;
+    File envHome;
+    LogBufferPool bufPool;
+
+    public LogBufferPoolTest() {
+        super();
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    protected void setUp()
+        throws Exception {
+
+        /* Remove files to start with a clean slate. */
+        TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX);
+    }
+
+    protected void tearDown()
+        throws Exception {
+
+        bufPool = null;
+	if (fileManager != null) {
+	    fileManager.clear();
+	    fileManager.close();
+	}
+        TestUtils.removeFiles("TearDown", envHome, FileManager.JE_SUFFIX);
+    }
+
+    /**
+     * Make sure that we'll add more buffers as needed.
+     */
+    public void testGrowBuffers()
+        throws Throwable {
+
+        try {
+
+            setupEnv(true);
+
+            /*
+             * Each buffer can only hold 2 items.  Put enough test items in to
+             * get seven buffers.
+             */
+            List<Long> lsns = new ArrayList<Long>();
+            for (int i = 0; i < 14; i++) {
+                long lsn = insertData(bufPool, (byte)(i + 1));
+                lsns.add(new Long(lsn));
+            }
+
+            /*
+             * Check that the bufPool knows where each LSN lives and that the
+             * fetched buffer does hold this item.
+             */
+            LogBuffer logBuf;
+            ByteBuffer b;
+            for (int i = 0; i < 14; i++) {
+
+                /*
+                 * For each test LSN, ask the bufpool for the logbuffer that
+                 * houses it.
+                 */
+                long testLsn = DbLsn.longToLsn(lsns.get(i));
+                logBuf = bufPool.getReadBuffer(testLsn);
+                assertNotNull(logBuf);
+
+                /* Here's the expected data. */
+                byte[] expected = new byte[10];
+                Arrays.fill(expected, (byte)(i+1));
+
+                /* Here's the data in the log buffer. */
+                byte[] logData = new byte[10];
+                b = logBuf.getDataBuffer();
+                long firstLsnInBuf = logBuf.getFirstLsn();
+                b.position((int) (DbLsn.getFileOffset(testLsn) -
+				  DbLsn.getFileOffset(firstLsnInBuf)));
+                logBuf.getDataBuffer().get(logData);
+
+                /* They'd better be equal. */
+                assertTrue(Arrays.equals(logData, expected));
+                logBuf.release();
+            }
+
+            /*
+             * This LSN shouldn't be in the buffers, it's less than any
+             * buffered item.
+             */
+            assertNull(bufPool.getReadBuffer(DbLsn.makeLsn(0,10)));
+
+            /*
+             * This LSN is illegal to ask for, it's greater than any registered
+             * LSN.
+             */
+            assertNull("LSN too big",
+                       bufPool.getReadBuffer(DbLsn.makeLsn(10, 141)));
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Helper to insert fake data.
+     * @return LSN registered for this fake data
+     */
+    private long insertData(LogBufferPool bufPool,
+			    byte value)
+	throws IOException, DatabaseException {
+
+        byte[] data = new byte[10];
+        Arrays.fill(data, value);
+        boolean flippedFile = fileManager.bumpLsn(data.length);
+        LogBuffer logBuf = bufPool.getWriteBuffer(data.length, flippedFile);
+        logBuf.getDataBuffer().put(data);
+        long lsn = fileManager.getLastUsedLsn();
+        bufPool.writeCompleted(fileManager.getLastUsedLsn(), false);
+        return lsn;
+    }
+
+    /**
+     * Test buffer flushes.
+     */
+    public void testBufferFlush()
+        throws Throwable {
+
+        try {
+            setupEnv(false);
+            assertFalse("There should be no files", fileManager.filesExist());
+
+            fileManager.VERIFY_CHECKSUMS = false;
+
+            /*
+	     * Each buffer can only hold 2 items. Put enough test items in to
+	     * get five buffers.
+	     */
+	    /* CWL: What is lsnList used for? */
+            List<Long> lsnList = new ArrayList<Long>();
+            for (int i = 0; i < 9; i++) {
+                long lsn = insertData(bufPool, (byte)(i+1));
+                lsnList.add(new Long(lsn));
+            }
+            bufPool.writeBufferToFile(0);
+            fileManager.syncLogEnd();
+
+            /* We should see two files exist. */
+            String[] fileNames =
+		fileManager.listFiles(FileManager.JE_SUFFIXES);
+            assertEquals("Should be 2 files", 2, fileNames.length);
+
+            /* Read the files. */
+            if (false) {
+            ByteBuffer dataBuffer = ByteBuffer.allocate(100);
+            FileHandle file0 = fileManager.getFileHandle(0L);
+            RandomAccessFile file = file0.getFile();
+            FileChannel channel = file.getChannel();
+            int bytesRead = channel.read(dataBuffer,
+                                         FileManager.firstLogEntryOffset());
+            dataBuffer.flip();
+            assertEquals("Check bytes read", 50, bytesRead);
+            assertEquals("Check size of file", 50, dataBuffer.limit());
+            file.close();
+            FileHandle file1 = fileManager.getFileHandle(1L);
+            file = file1.getFile();
+            channel = file.getChannel();
+            bytesRead = channel.read(dataBuffer,
+                                     FileManager.firstLogEntryOffset());
+            dataBuffer.flip();
+            assertEquals("Check bytes read", 40, bytesRead);
+            assertEquals("Check size of file", 40, dataBuffer.limit());
+            file0.release();
+            file1.release();
+            }
+        } catch (Throwable e) {
+            e.printStackTrace();
+            throw e;
+        }
+    }
+
+    public void testTemporaryBuffers()
+	throws Exception {
+
+	final int KEY_SIZE = 10;
+	final int DATA_SIZE = 1000000;
+
+	tempBufferInitEnvInternal
+	    ("0", MemoryBudget.MIN_MAX_MEMORY_SIZE_STRING);
+	DatabaseEntry key = new DatabaseEntry(new byte[KEY_SIZE]);
+	DatabaseEntry data = new DatabaseEntry(new byte[DATA_SIZE]);
+	db.put(null, key, data);
+	db.close();
+	env.close();
+    }
+
+    private void tempBufferInitEnvInternal(String buffSize, String cacheSize)
+	throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+	if (!buffSize.equals("0")) {
+	    envConfig.setConfigParam("je.log.totalBufferBytes", buffSize);
+	}
+
+	if (!cacheSize.equals("0")) {
+	    envConfig.setConfigParam("je.maxMemory", cacheSize);
+	}
+        env = new Environment(envHome, envConfig);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(true);
+	dbConfig.setTransactional(true);
+        db = env.openDatabase(null, "InsertAndDelete", dbConfig);
+    }
+
+    private void setupEnv(boolean inMemory)
+        throws Exception {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+
+	DbInternal.disableParameterValidation(envConfig);
+        envConfig.setConfigParam(EnvironmentParams.LOG_MEM_SIZE.getName(),
+                                 EnvironmentParams.LOG_MEM_SIZE_MIN_STRING);
+	envConfig.setConfigParam
+	    (EnvironmentParams.LOG_FILE_MAX.getName(), "90");
+	envConfig.setConfigParam
+	    (EnvironmentParams.NUM_LOG_BUFFERS.getName(), "2");
+	envConfig.setAllowCreate(true);
+        if (inMemory) {
+            /* Make the bufPool grow some buffers. Disable writing. */
+            envConfig.setConfigParam
+		(EnvironmentParams.LOG_MEMORY_ONLY.getName(), "true");
+        }
+        environment = new EnvironmentImpl(envHome,
+                                          envConfig,
+                                          null /*sharedCacheEnv*/,
+                                          false /*replicationIntended*/);
+
+        /* Make a standalone file manager for this test. */
+        environment.close();
+        environment.open(); /* Just sets state to OPEN. */
+        fileManager = new FileManager(environment, envHome, false);
+        bufPool = new LogBufferPool(fileManager, environment);
+
+        /*
+         * Remove any files after the environment is created again!  We want to
+         * remove the files made by recovery, so we can test the file manager
+         * in controlled cases.
+         */
+        TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX);
+    }
+}
diff --git a/test/com/sleepycat/je/log/LogEntryTest.java b/test/com/sleepycat/je/log/LogEntryTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..da7d9ef1382a4919814bc7bd24eacc4742245712
--- /dev/null
+++ b/test/com/sleepycat/je/log/LogEntryTest.java
@@ -0,0 +1,43 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LogEntryTest.java,v 1.20.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.log.entry.LogEntry;
+
+/**
+ */
+public class LogEntryTest extends TestCase {
+
+    public void testEquality()
+        throws DatabaseException {
+
+        byte testTypeNum = LogEntryType.LOG_IN.getTypeNum();
+
+        /* Look it up by type */
+        LogEntryType foundType = LogEntryType.findType(testTypeNum);
+        assertEquals(foundType, LogEntryType.LOG_IN);
+        assertTrue(foundType.getSharedLogEntry() instanceof
+                   com.sleepycat.je.log.entry.INLogEntry);
+
+        /* Look it up by type */
+        foundType = LogEntryType.findType(testTypeNum);
+        assertEquals(foundType, LogEntryType.LOG_IN);
+        assertTrue(foundType.getSharedLogEntry() instanceof
+                   com.sleepycat.je.log.entry.INLogEntry);
+
+        /* Get a new entry object */
+        LogEntry sharedEntry = foundType.getSharedLogEntry();
+        LogEntry newEntry = foundType.getNewLogEntry();
+
+        assertTrue(sharedEntry != newEntry);
+    }
+}
diff --git a/test/com/sleepycat/je/log/LogManagerTest.java b/test/com/sleepycat/je/log/LogManagerTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..9a8ec29269901d4e6e6356926cdb6bc068a3191d
--- /dev/null
+++ b/test/com/sleepycat/je/log/LogManagerTest.java
@@ -0,0 +1,679 @@
+ /*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LogManagerTest.java,v 1.85.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.entry.SingleItemEntry;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.Tracer;
+
+/**
+ * Test basic log management.
+ */
+public class LogManagerTest extends TestCase {
+
+    static private final boolean DEBUG = false;
+
+    private FileManager fileManager;
+    private LogManager logManager;
+    private File envHome;
+    private EnvironmentImpl env;
+
+    public LogManagerTest() {
+        super();
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws DatabaseException, IOException  {
+
+        TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX);
+        TestUtils.removeFiles("Setup", envHome, FileManager.DEL_SUFFIX);
+    }
+
+    public void tearDown()
+        throws IOException, DatabaseException {
+
+        TestUtils.removeFiles("TearDown", envHome, FileManager.JE_SUFFIX);
+    }
+
+    /**
+     * Log and retrieve objects, with log in memory
+     */
+    public void testBasicInMemory()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        DbInternal.disableParameterValidation(envConfig);
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6");
+        envConfig.setConfigParam
+            (EnvironmentParams.LOG_FILE_MAX.getName(), "1000");
+        turnOffDaemons(envConfig);
+        envConfig.setAllowCreate(true);
+        env = new EnvironmentImpl(envHome,
+                                  envConfig,
+                                  null /*sharedCacheEnv*/,
+                                  false /*replicationIntended*/);
+        fileManager = env.getFileManager();
+        logManager = env.getLogManager();
+
+        logAndRetrieve();
+        env.close();
+    }
+
+    /**
+     * Log and retrieve objects, with log completely flushed to disk
+     */
+    public void testBasicOnDisk()
+        throws Throwable {
+
+        try {
+
+            /*
+             * Force the buffers and files to be small. The log buffer is
+             * actually too small, will have to grow dynamically. Each file
+             * only holds one test item (each test item is 50 bytes).
+             */
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            DbInternal.disableParameterValidation(envConfig);
+            envConfig.setConfigParam(
+                            EnvironmentParams.LOG_MEM_SIZE.getName(),
+                            EnvironmentParams.LOG_MEM_SIZE_MIN_STRING);
+            envConfig.setConfigParam(
+                            EnvironmentParams.NUM_LOG_BUFFERS.getName(), "2");
+            envConfig.setConfigParam(
+                            EnvironmentParams.LOG_FILE_MAX.getName(), "79");
+            envConfig.setConfigParam(
+                            EnvironmentParams.NODE_MAX.getName(), "6");
+            envConfig.setConfigParam
+                (EnvironmentParams.JE_LOGGING_LEVEL.getName(), "CONFIG");
+
+            /* Disable noisy UtilizationProfile database creation. */
+            DbInternal.setCreateUP(envConfig, false);
+            /* Don't checkpoint utilization info for this test. */
+            DbInternal.setCheckpointUP(envConfig, false);
+            /* Don't run the cleaner without a UtilizationProfile. */
+            envConfig.setConfigParam
+                (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+
+            /*
+             * Don't run any daemons, those emit trace messages and other log
+             * entries and mess up our accounting.
+             */
+            turnOffDaemons(envConfig);
+            envConfig.setAllowCreate(true);
+
+            /*
+             * Recreate the file manager and log manager w/different configs.
+             */
+            env = new EnvironmentImpl(envHome,
+                                      envConfig,
+                                      null /*sharedCacheEnv*/,
+                                      false /*replicationIntended*/);
+            fileManager = env.getFileManager();
+            logManager = env.getLogManager();
+
+            logAndRetrieve();
+
+            /*
+             * Expect 13 je files, 7 to hold logged records, 1 to hold root, 3
+             * to hold recovery messages, 2 for checkpoint records
+             */
+            String[] names = fileManager.listFiles(FileManager.JE_SUFFIXES);
+            assertEquals("Should be 13 files on disk", 13, names.length);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        } finally {
+            env.close();
+        }
+    }
+
+    /**
+     * Log and retrieve objects, with some of log flushed to disk, some of log
+     * in memory.
+     */
+    public void testComboDiskMemory()
+        throws Throwable {
+
+        try {
+
+            /*
+             * Force the buffers and files to be small. The log buffer is
+             * actually too small, will have to grow dynamically. Each file
+             * only holds one test item (each test item is 50 bytes)
+             */
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            DbInternal.disableParameterValidation(envConfig);
+            envConfig.setConfigParam
+                (EnvironmentParams.LOG_MEM_SIZE.getName(),
+                 EnvironmentParams.LOG_MEM_SIZE_MIN_STRING);
+            envConfig.setConfigParam
+                (EnvironmentParams.NUM_LOG_BUFFERS.getName(), "2");
+            envConfig.setConfigParam
+                (EnvironmentParams.JE_LOGGING_LEVEL.getName(), "CONFIG");
+            envConfig.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(),
+                                     "64");
+            envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(),
+                                     "6");
+
+            /* Disable noisy UtilizationProfile database creation. */
+            DbInternal.setCreateUP(envConfig, false);
+            /* Don't checkpoint utilization info for this test. */
+            DbInternal.setCheckpointUP(envConfig, false);
+            /* Don't run the cleaner without a UtilizationProfile. */
+            envConfig.setConfigParam
+                (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+
+            /*
+             * Don't run the cleaner or the checkpointer daemons, those create
+             * more log entries and mess up our accounting
+             */
+            turnOffDaemons(envConfig);
+            envConfig.setAllowCreate(true);
+
+            env = new EnvironmentImpl(envHome,
+                                      envConfig,
+                                      null /*sharedCacheEnv*/,
+                                      false /*replicationIntended*/);
+            fileManager = env.getFileManager();
+            logManager = env.getLogManager();
+
+            logAndRetrieve();
+
+            /*
+             * Expect 13 je files:
+             * trace
+             * root
+             * ckptstart
+             * trace
+             * ckptend
+             * trace
+             * trace trace
+             * trace trace
+             * trace trace
+             * trace trace
+             * ckptstart
+             * trace
+             * ckptend
+             *
+             * This is based on a manual perusal of the log files and their
+             * contents. Changes in the sizes of log entries can throw this
+             * test off, and require that a check and a change to the assertion
+             * value.
+             */
+            String[] names = fileManager.listFiles(FileManager.JE_SUFFIXES);
+            assertEquals("Should be 13 files on disk", 13, names.length);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        } finally {
+            env.close();
+        }
+    }
+
+    /**
+     * Log and retrieve objects, with some of log flushed to disk, some
+     * of log in memory. Force the read buffer to be very small
+     */
+    public void testFaultingIn()
+        throws Throwable {
+
+        try {
+
+            /*
+             * Force the buffers and files to be small. The log buffer is
+             * actually too small, will have to grow dynamically. We read in 32
+             * byte chunks, will have to re-read only holds one test item (each
+             * test item is 50 bytes)
+             */
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            DbInternal.disableParameterValidation(envConfig);
+            envConfig.setConfigParam
+                (EnvironmentParams.LOG_MEM_SIZE.getName(),
+                 EnvironmentParams.LOG_MEM_SIZE_MIN_STRING);
+            envConfig.setConfigParam
+                (EnvironmentParams.NUM_LOG_BUFFERS.getName(), "2");
+            envConfig.setConfigParam
+                (EnvironmentParams.LOG_FILE_MAX.getName(), "200");
+            envConfig.setConfigParam
+                (EnvironmentParams.LOG_FAULT_READ_SIZE.getName(), "32");
+            envConfig.setConfigParam
+                (EnvironmentParams.NODE_MAX.getName(), "6");
+            envConfig.setAllowCreate(true);
+            env = new EnvironmentImpl(envHome,
+                                      envConfig,
+                                      null /*sharedCacheEnv*/,
+                                      false /*replicationIntended*/);
+            fileManager = env.getFileManager();
+            logManager = env.getLogManager();
+
+            logAndRetrieve();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        } finally {
+            env.close();
+        }
+    }
+
+    /**
+     * Log several objects, retrieve them.
+     */
+    private void logAndRetrieve()
+        throws DatabaseException {
+
+        /* Make test loggable objects. */
+        List<Tracer> testRecs = new ArrayList<Tracer>();
+        for (int i = 0; i < 10; i++) {
+            testRecs.add(new Tracer("Hello there, rec " + (i+1)));
+        }
+
+        /* Log three of them, remember their LSNs. */
+        List<Long> testLsns = new ArrayList<Long>();
+
+        for (int i = 0; i < 3; i++) {
+            long lsn = testRecs.get(i).log(logManager);
+            if (DEBUG) {
+                System.out.println("i = " + i + " test LSN: file = " +
+                                   DbLsn.getFileNumber(lsn) +
+                                   " offset = " +
+                                   DbLsn.getFileOffset(lsn));
+            }
+            testLsns.add(new Long(lsn));
+        }
+
+        /* Ask for them back, out of order. */
+        assertEquals(testRecs.get(2),
+                     (Tracer) logManager.get
+                     (DbLsn.longToLsn(testLsns.get(2))));
+        assertEquals(testRecs.get(0),
+                     (Tracer) logManager.get
+                     (DbLsn.longToLsn(testLsns.get(0))));
+        assertEquals(testRecs.get(1),
+                     (Tracer) logManager.get
+                     (DbLsn.longToLsn(testLsns.get(1))));
+
+        /* Intersperse logging and getting. */
+        testLsns.add(new Long(testRecs.get(3).log(logManager)));
+        testLsns.add(new Long(testRecs.get(4).log(logManager)));
+
+        assertEquals(testRecs.get(2),
+                     (Tracer) logManager.get
+                     (DbLsn.longToLsn(testLsns.get(2))));
+        assertEquals(testRecs.get(4),
+                     (Tracer) logManager.get
+                     (DbLsn.longToLsn(testLsns.get(4))));
+
+        /* Intersperse logging and getting. */
+        testLsns.add(new Long(testRecs.get(5).log(logManager)));
+        testLsns.add(new Long(testRecs.get(6).log(logManager)));
+        testLsns.add(new Long(testRecs.get(7).log(logManager)));
+
+        assertEquals(testRecs.get(7),
+                     (Tracer) logManager.get
+                     (DbLsn.longToLsn(testLsns.get(7))));
+        assertEquals(testRecs.get(0),
+                     (Tracer) logManager.get
+                     (DbLsn.longToLsn(testLsns.get(0))));
+        assertEquals(testRecs.get(6),
+                     (Tracer) logManager.get
+                     (DbLsn.longToLsn(testLsns.get(6))));
+
+        /*
+         * Check that we can retrieve log entries as byte buffers, and get the
+         * correct object back. Used by replication.
+         */
+        long lsn = testLsns.get(7).longValue();
+        ByteBuffer buffer = logManager.getByteBufferFromLog(lsn);
+
+        LogUtils.HeaderAndEntry contents =
+            LogUtils.readHeaderAndEntry(buffer,
+                                        null,  // envImpl
+                                        false,  // anticipateChecksumError
+                                        true); // readFullItem
+
+        assertEquals(testRecs.get(7),
+                     (Tracer) contents.entry.getMainItem());
+        assertEquals(LogEntryType.LOG_TRACE.getTypeNum(),
+                     contents.header.getType());
+        assertEquals(LogEntryType.LOG_VERSION,
+                     contents.header.getVersion());
+    }
+
+    private void turnOffDaemons(EnvironmentConfig envConfig) {
+        envConfig.setConfigParam(
+                       EnvironmentParams.ENV_RUN_CLEANER.getName(),
+                      "false");
+        envConfig.setConfigParam(
+                       EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(),
+                       "false");
+        envConfig.setConfigParam(
+                       EnvironmentParams.ENV_RUN_EVICTOR.getName(),
+                       "false");
+        envConfig.setConfigParam(
+                       EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(),
+                       "false");
+    }
+
+    /**
+     * Log a few items, then hit exceptions. Make sure LSN state is correctly
+     * maintained and that items logged after the exceptions are at the correct
+     * locations on disk.
+     */
+    public void testExceptions()
+        throws Throwable {
+
+        int logBufferSize = ((int) EnvironmentParams.LOG_MEM_SIZE_MIN) / 3;
+        int numLogBuffers = 5;
+        int logBufferMemSize = logBufferSize * numLogBuffers;
+        int logFileMax = 1000;
+        int okCounter = 0;
+
+        try {
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            DbInternal.disableParameterValidation(envConfig);
+            envConfig.setConfigParam(EnvironmentParams.LOG_MEM_SIZE.getName(),
+                                     new Integer(logBufferMemSize).toString());
+            envConfig.setConfigParam
+                (EnvironmentParams.NUM_LOG_BUFFERS.getName(),
+                 new Integer(numLogBuffers).toString());
+            envConfig.setConfigParam
+                (EnvironmentParams.LOG_FILE_MAX.getName(),
+                 new Integer(logFileMax).toString());
+            envConfig.setConfigParam(
+                            EnvironmentParams.NODE_MAX.getName(), "6");
+            envConfig.setConfigParam
+                (EnvironmentParams.JE_LOGGING_LEVEL.getName(), "SEVERE");
+
+            /* Disable noisy UtilizationProfile database creation. */
+            DbInternal.setCreateUP(envConfig, false);
+            /* Don't checkpoint utilization info for this test. */
+            DbInternal.setCheckpointUP(envConfig, false);
+            /* Don't run the cleaner without a UtilizationProfile. */
+            envConfig.setConfigParam
+                (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+
+            /*
+             * Don't run any daemons, those emit trace messages and other log
+             * entries and mess up our accounting.
+             */
+            turnOffDaemons(envConfig);
+            envConfig.setAllowCreate(true);
+            env = new EnvironmentImpl(envHome,
+                                      envConfig,
+                                      null /*sharedCacheEnv*/,
+                                      false /*replicationIntended*/);
+            fileManager = env.getFileManager();
+            logManager = env.getLogManager();
+
+            /* Keep track of items logged and their LSNs. */
+            ArrayList<Tracer> testRecs = new ArrayList<Tracer>();
+            ArrayList<Long> testLsns = new ArrayList<Long>();
+
+            /*
+             * Intersperse:
+             * - log successfully
+             * - log w/failure because the item doesn't fit in the log buffer
+             * - have I/O failures writing out the log
+             * Verify that all expected items can be read. Some will come
+             * from the log buffer pool.
+             * Then close and re-open the environment, to verify that
+             * all log items are faulted from disk
+             */
+
+            /* Successful log. */
+            addOkayItem(logManager, okCounter++,
+                        testRecs, testLsns, logBufferSize);
+
+            /* Item that's too big for the log buffers. */
+            attemptTooBigItem(logManager, logBufferSize, testRecs, testLsns);
+
+            /* Successful log. */
+            addOkayItem(logManager, okCounter++,
+                        testRecs, testLsns, logBufferSize);
+
+            /*
+             * This verify read the items from the log buffers. Note before SR
+             * #12638 existed (LSN state not restored properly after exception
+             * because of too-small log buffer), this verify hung.
+             */
+            verifyOkayItems(logManager, testRecs, testLsns, true);
+
+            /* More successful logs, along with a few too-big items. */
+            for (;okCounter < 23; okCounter++) {
+                addOkayItem(logManager, okCounter, testRecs,
+                            testLsns, logBufferSize);
+
+                if ((okCounter % 4) == 0) {
+                    attemptTooBigItem(logManager, logBufferSize,
+                                      testRecs, testLsns);
+                }
+                /*
+                 * If we verify in the loop, sometimes we'll read from disk and
+                 * sometimes from the log buffer pool.
+                 */
+                verifyOkayItems(logManager, testRecs, testLsns, true);
+            }
+
+            /*
+             * Test the case where we flip files and write the old write buffer
+             * out before we try getting a log buffer for the new item. We need
+             * to
+             *
+             * - hit a log-too-small exceptin
+             * - right after, we need to log an item that is small enough
+             *   to fit in the log buffer but big enough to require that
+             *   we flip to a new file.
+             */
+            long nextLsn = fileManager.getNextLsn();
+            long fileOffset = DbLsn.getFileOffset(nextLsn);
+
+            assertTrue((logFileMax - fileOffset ) < logBufferSize);
+            attemptTooBigItem(logManager, logBufferSize, testRecs, testLsns);
+            addOkayItem(logManager, okCounter++,
+                        testRecs, testLsns, logBufferSize,
+                        ((int)(logFileMax - fileOffset)));
+            verifyOkayItems(logManager, testRecs, testLsns, true);
+
+            /* Invoke some i/o exceptions. */
+            for (;okCounter < 50; okCounter++) {
+                attemptIOException(logManager, fileManager, testRecs,
+                                   testLsns, false);
+                addOkayItem(logManager, okCounter,
+                            testRecs, testLsns, logBufferSize);
+                verifyOkayItems(logManager, testRecs, testLsns, false);
+            }
+
+            /*
+             * Finally, close this environment and re-open, and read all
+             * expected items from disk.
+             */
+            env.close();
+            envConfig.setAllowCreate(false);
+            env = new EnvironmentImpl(envHome,
+                                      envConfig,
+                                      null /*sharedCacheEnv*/,
+                                      false /*replicationIntended*/);
+            fileManager = env.getFileManager();
+            logManager = env.getLogManager();
+            verifyOkayItems(logManager, testRecs, testLsns, false);
+
+            /* Check that we read these items off disk. */
+            EnvironmentStats stats = new EnvironmentStats();
+            StatsConfig config = new StatsConfig();
+            logManager.loadStats(config, stats);
+	    assertTrue(stats.getEndOfLog() > 0);
+            assertTrue(stats.getNNotResident() >= testRecs.size());
+
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        } finally {
+            env.close();
+        }
+    }
+
+    private void addOkayItem(LogManager logManager,
+                             int tag,
+                             List<Tracer> testRecs,
+                             List<Long> testLsns,
+                             int logBufferSize,
+                             int fillerLen)
+        throws DatabaseException {
+
+        String filler = new String(new byte[fillerLen]);
+        Tracer t = new Tracer("okay" + filler + tag );
+        assertTrue(logBufferSize > t.getLogSize());
+        testRecs.add(t);
+        long lsn = t.log(logManager);
+        testLsns.add(new Long(lsn));
+    }
+
+    private void addOkayItem(LogManager logManager,
+                             int tag,
+                             List<Tracer> testRecs,
+                             List<Long> testLsns,
+                             int logBufferSize)
+        throws DatabaseException {
+
+        addOkayItem(logManager, tag, testRecs, testLsns, logBufferSize, 0);
+    }
+
+    private void attemptTooBigItem(LogManager logManager,
+                                   int logBufferSize,
+                                   Tracer big,
+                                   List<Tracer> testRecs,
+                                   List<Long> testLsns) {
+        assertTrue(big.getLogSize() > logBufferSize);
+
+        try {
+            long lsn = big.log(logManager);
+            testLsns.add(new Long(lsn));
+            testRecs.add(big);
+        } catch (DatabaseException expected) {
+            fail("Should not have hit exception.");
+        }
+    }
+
+    private void attemptTooBigItem(LogManager logManager,
+                                   int logBufferSize,
+                                   List<Tracer> testRecs,
+                                   List<Long> testLsns) {
+        String stuff = "12345679890123456798901234567989012345679890";
+        while (stuff.length() < EnvironmentParams.LOG_MEM_SIZE_MIN) {
+            stuff += stuff;
+        }
+        Tracer t = new Tracer(stuff);
+        attemptTooBigItem(logManager, logBufferSize, t, testRecs, testLsns);
+    }
+
+    private void attemptIOException(LogManager logManager,
+                                    FileManager fileManager,
+                                    List<Tracer> testRecs,
+                                    List<Long> testLsns,
+                                    boolean forceFlush) {
+        Tracer t = new Tracer("ioException");
+        FileManager.IO_EXCEPTION_TESTING_ON_WRITE = true;
+        try {
+
+            /*
+             * This object might get flushed to disk -- depend on whether
+             * the ioexception happened before or after the copy into the
+             * log buffer. Both are valid, but the test doesn't yet
+             * know how to differentiate the cases.
+
+               testLsns.add(new Long(fileManager.getNextLsn()));
+               testRecs.add(t);
+            */
+            logManager.logForceFlush
+                (new SingleItemEntry(LogEntryType.LOG_TRACE, t),
+                 true,  // fsyncRequired
+                 ReplicationContext.NO_REPLICATE);
+            fail("expect io exception");
+        } catch (DatabaseException expected) {
+        } finally {
+            FileManager.IO_EXCEPTION_TESTING_ON_WRITE = false;
+        }
+    }
+
+    private void verifyOkayItems(LogManager logManager,
+                                 ArrayList<Tracer> testRecs,
+                                 ArrayList<Long> testLsns,
+                                 boolean checkOrder)
+        throws DatabaseException {
+
+        /* read forwards. */
+        for (int i = 0; i < testRecs.size(); i++) {
+            assertEquals(testRecs.get(i),
+                         (Tracer) logManager.get
+                         (DbLsn.longToLsn(testLsns.get(i))));
+
+        }
+
+        /* Make sure LSNs are adjacent */
+        assertEquals(testLsns.size(), testRecs.size());
+
+        if (checkOrder) {
+
+            /*
+             * TODO: sometimes an ioexception entry will make it into the write
+             * buffer, and sometimes it won't. It depends on whether the IO
+             * exception was thrown when before or after the logabble item was
+             * written into the buffer.  I haven't figure out yet how to tell
+             * the difference, so for now, we don't check order in the portion
+             * of the test that issues IO exceptions.
+             */
+            for (int i = 1; i < testLsns.size(); i++) {
+
+                long lsn = testLsns.get(i).longValue();
+                long lsnFile = DbLsn.getFileNumber(lsn);
+                long lsnOffset = DbLsn.getFileOffset(lsn);
+                long prevLsn = testLsns.get(i-1).longValue();
+                long prevFile = DbLsn.getFileNumber(prevLsn);
+                long prevOffset = DbLsn.getFileOffset(prevLsn);
+                if (prevFile == lsnFile) {
+                    assertEquals("item " + i + "prev = " +
+                                 DbLsn.toString(prevLsn) +
+                                 " current=" + DbLsn.toString(lsn),
+                                 (testRecs.get(i-1).getLogSize() +
+                                  LogEntryHeader.MIN_HEADER_SIZE),
+                                 lsnOffset - prevOffset);
+                } else {
+                    assertEquals(prevFile+1, lsnFile);
+                    assertEquals(FileManager.firstLogEntryOffset(),
+                                 lsnOffset);
+                }
+            }
+        }
+
+        /* read backwards. */
+        for (int i = testRecs.size() - 1; i > -1; i--) {
+            assertEquals(testRecs.get(i),
+                         (Tracer) logManager.get
+                         (DbLsn.longToLsn(testLsns.get(i))));
+
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/log/LogUtilsTest.java b/test/com/sleepycat/je/log/LogUtilsTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..35d7c702694191641e75bc4f58bac01498574313
--- /dev/null
+++ b/test/com/sleepycat/je/log/LogUtilsTest.java
@@ -0,0 +1,155 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LogUtilsTest.java,v 1.24.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.nio.ByteBuffer;
+import java.sql.Timestamp;
+import java.util.Arrays;
+import java.util.Calendar;
+
+import junit.framework.TestCase;
+
+/**
+ *  Test basic marshalling utilities
+ */
+public class LogUtilsTest extends TestCase {
+
+    public void testMarshalling() {
+        ByteBuffer dest = ByteBuffer.allocate(100);
+
+        // unsigned ints
+        long unsignedData = 10;
+        dest.clear();
+        LogUtils.writeUnsignedInt(dest, unsignedData);
+        assertEquals(LogUtils.UNSIGNED_INT_BYTES, dest.position());
+        dest.flip();
+        assertEquals(unsignedData, LogUtils.readUnsignedInt(dest));
+
+        unsignedData = 49249249L;
+        dest.clear();
+        LogUtils.writeUnsignedInt(dest, unsignedData);
+        assertEquals(LogUtils.UNSIGNED_INT_BYTES, dest.position());
+        dest.flip();
+        assertEquals(unsignedData, LogUtils.readUnsignedInt(dest));
+
+        // ints
+        int intData = -1021;
+        dest.clear();
+        LogUtils.writeInt(dest, intData);
+        assertEquals(LogUtils.INT_BYTES, dest.position());
+        dest.flip();
+        assertEquals(intData, LogUtils.readInt(dest));
+
+        intData = 257;
+        dest.clear();
+        LogUtils.writeInt(dest, intData);
+        assertEquals(LogUtils.INT_BYTES, dest.position());
+        dest.flip();
+        assertEquals(intData, LogUtils.readInt(dest));
+
+        // longs
+        long longData = -1021;
+        dest.clear();
+        LogUtils.writeLong(dest, longData);
+        assertEquals(LogUtils.LONG_BYTES, dest.position());
+        dest.flip();
+        assertEquals(longData, LogUtils.readLong(dest));
+
+        // byte arrays
+        byte[] byteData = new byte[] {1,2,3,4,5,6,7,8,9,10,11,12};
+        dest.clear();
+        LogUtils.writeByteArray(dest, byteData);
+        assertEquals(LogUtils.getPackedIntLogSize(12) + 12, dest.position());
+        dest.flip();
+        assertTrue(Arrays.equals(byteData,
+                                 LogUtils.readByteArray(dest,
+                                                        false/*unpacked*/)));
+
+        // Strings
+        String stringData = "Hello world!";
+        dest.clear();
+        LogUtils.writeString(dest, stringData);
+        assertEquals(LogUtils.INT_BYTES + 9, dest.position());
+        dest.flip();
+        assertEquals(stringData, LogUtils.readString(dest, false/*unpacked*/));
+
+        // Timestamps
+        Timestamp timestampData =
+            new Timestamp(Calendar.getInstance().getTimeInMillis());
+        dest.clear();
+        LogUtils.writeTimestamp(dest, timestampData);
+        assertEquals(LogUtils.getTimestampLogSize(timestampData),
+                     dest.position());
+        dest.flip();
+        assertEquals(timestampData, LogUtils.readTimestamp(dest, false));
+
+        // Booleans
+        boolean boolData = true;
+        dest.clear();
+        LogUtils.writeBoolean(dest, boolData);
+        assertEquals(1, dest.position());
+        dest.flip();
+        assertEquals(boolData, LogUtils.readBoolean(dest));
+
+        /*
+         * Test packed values with both array and direct buffers because the
+         * implementation is different when there is an array available
+         * (ByteBuffer.hasArray) or not.
+         */
+        testPacked(dest);
+        testPacked(ByteBuffer.allocateDirect(100));
+    }
+
+    private void testPacked(ByteBuffer dest) {
+
+        // packed ints
+        int intValue = 119;
+        dest.clear();
+        LogUtils.writePackedInt(dest, intValue);
+        assertEquals(1, dest.position());
+        dest.flip();
+        assertEquals(intValue, LogUtils.readPackedInt(dest));
+
+        intValue = 0xFFFF + 119;
+        dest.clear();
+        LogUtils.writePackedInt(dest, intValue);
+        assertEquals(3, dest.position());
+        dest.flip();
+        assertEquals(intValue, LogUtils.readPackedInt(dest));
+
+        intValue = Integer.MAX_VALUE;
+        dest.clear();
+        LogUtils.writePackedInt(dest, intValue);
+        assertEquals(5, dest.position());
+        dest.flip();
+        assertEquals(intValue, LogUtils.readPackedInt(dest));
+
+        // packed longs
+        long longValue = 119;
+        dest.clear();
+        LogUtils.writePackedLong(dest, longValue);
+        assertEquals(1, dest.position());
+        dest.flip();
+        assertEquals(longValue, LogUtils.readPackedLong(dest));
+
+        longValue = 0xFFFFFFFFL + 119;
+        dest.clear();
+        LogUtils.writePackedLong(dest, longValue);
+        assertEquals(5, dest.position());
+        dest.flip();
+        assertEquals(longValue, LogUtils.readPackedLong(dest));
+
+        longValue = Long.MAX_VALUE;
+        dest.clear();
+        LogUtils.writePackedLong(dest, longValue);
+        assertEquals(9, dest.position());
+        dest.flip();
+        assertEquals(longValue, LogUtils.readPackedLong(dest));
+    }
+}
diff --git a/test/com/sleepycat/je/log/LoggableTest.java b/test/com/sleepycat/je/log/LoggableTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..665f76664fe87b6c6d2ee3c477e58ef1bad19b43
--- /dev/null
+++ b/test/com/sleepycat/je/log/LoggableTest.java
@@ -0,0 +1,413 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LoggableTest.java,v 1.94.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.cleaner.FileSummary;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.DbTree;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.recovery.CheckpointEnd;
+import com.sleepycat.je.recovery.CheckpointStart;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.tree.ChildReference;
+import com.sleepycat.je.tree.DBIN;
+import com.sleepycat.je.tree.DIN;
+import com.sleepycat.je.tree.FileSummaryLN;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.tree.INDeleteInfo;
+import com.sleepycat.je.tree.LN;
+import com.sleepycat.je.tree.MapLN;
+import com.sleepycat.je.txn.TxnAbort;
+import com.sleepycat.je.txn.TxnCommit;
+import com.sleepycat.je.txn.TxnPrepare;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.Tracer;
+
+/**
+ * Check that every loggable object can be read in and out of a buffer
+ */
+public class LoggableTest extends TestCase {
+
+    static private final boolean DEBUG = false;
+
+    // private DocumentBuilder builder;
+    private EnvironmentImpl env;
+    private File envHome;
+    private DatabaseImpl database;
+
+    public LoggableTest()
+        throws Exception {
+
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException, DatabaseException {
+
+        TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX);
+	
+	EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+	envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6");
+        envConfig.setAllowCreate(true);
+        env = new EnvironmentImpl(envHome,
+                                  envConfig,
+                                  null /*sharedCacheEnv*/,
+                                  false /*replicationIntended*/);
+    }
+
+    public void tearDown()
+        throws IOException, DatabaseException {
+
+        TestUtils.removeFiles("TearDown", envHome, FileManager.JE_SUFFIX);
+        env.close();
+    }
+
+    public void testEntryData()
+        throws Throwable {
+
+        try {
+            ByteBuffer buffer = ByteBuffer.allocate(1000);
+            database = new DatabaseImpl("foo", new DatabaseId(1),
+                                         env, new DatabaseConfig());
+
+            /*
+             * For each loggable object, can we write the entry data out?
+             */
+
+            /*
+             * Tracer records.
+             */
+            Tracer dMsg = new Tracer("Hello there");
+            writeAndRead(buffer, LogEntryType.LOG_TRACE,  dMsg, new Tracer());
+
+            /*
+             * LNs
+             */
+            String data = "abcdef";
+            LN ln = new LN(data.getBytes(), env, false /* replicated */);
+            LN lnFromLog = new LN();
+            writeAndRead(buffer, LogEntryType.LOG_LN, ln, lnFromLog);
+            lnFromLog.verify(null);
+            assertTrue(LogEntryType.LOG_LN.marshallOutsideLatch());
+
+            FileSummaryLN fsLN = new FileSummaryLN(env, new FileSummary());
+            FileSummaryLN fsLNFromLog = new FileSummaryLN();
+            writeAndRead(buffer, LogEntryType.LOG_FILESUMMARYLN,
+                         fsLN, fsLNFromLog);
+            assertFalse(
+                   LogEntryType.LOG_FILESUMMARYLN.marshallOutsideLatch());
+
+            /*
+             * INs
+             */
+            IN in = new IN(database,
+                           new byte[] {1,0,1,0},
+                           7, 5);
+	    in.latch();
+            in.insertEntry(new ChildReference(null,
+                                              new byte[] {1,0,1,0},
+                                              DbLsn.makeLsn(12, 200)));
+            in.insertEntry(new ChildReference(null,
+                                              new byte[] {1,1,1,0},
+                                              DbLsn.makeLsn(29, 300)));
+            in.insertEntry(new ChildReference(null,
+                                              new byte[] {0,0,1,0},
+                                              DbLsn.makeLsn(35, 400)));
+
+            /* Write it. */
+            IN inFromLog = new IN();
+	    inFromLog.latch();
+            writeAndRead(buffer, LogEntryType.LOG_IN, in, inFromLog);
+	    inFromLog.releaseLatch();
+	    in.releaseLatch();
+
+	    /*
+	     * IN - long form
+	     */
+            in = new IN(database,
+			new byte[] {1,0,1,0},
+			7, 5);
+	    in.latch();
+            in.insertEntry(new ChildReference(null,
+                                              new byte[] {1,0,1,0},
+                                              DbLsn.makeLsn(12, 200)));
+            in.insertEntry(new ChildReference(null,
+                                              new byte[] {1,1,1,0},
+                                              DbLsn.makeLsn(29, 300)));
+            in.insertEntry(new ChildReference(null,
+                                              new byte[] {0,0,1,0},
+                                              DbLsn.makeLsn(1235, 400)));
+            in.insertEntry(new ChildReference(null,
+                                              new byte[] {0,0,1,0},
+                                              DbLsn.makeLsn(0xFFFFFFF0L, 400)));
+
+            /* Write it. */
+            inFromLog = new IN();
+	    inFromLog.latch();
+            writeAndRead(buffer, LogEntryType.LOG_IN, in, inFromLog);
+	    inFromLog.releaseLatch();
+	    in.releaseLatch();
+
+            /*
+             * BINs
+             */
+            BIN bin = new BIN(database,
+                              new byte[] {3,2,1},
+                              8, 5);
+            bin.latch();
+            bin.insertEntry(new ChildReference(null,
+                                               new byte[] {1,0,1,0},
+                                               DbLsn.makeLsn(212, 200)));
+            bin.insertEntry(new ChildReference(null,
+                                               new byte[] {1,1,1,0},
+                                               DbLsn.makeLsn(229, 300)));
+            bin.insertEntry(new ChildReference(null,
+                                               new byte[] {0,0,1,0},
+                                               DbLsn.makeLsn(235, 400)));
+            BIN binFromLog = new BIN();
+	    binFromLog.latch();
+            writeAndRead(buffer, LogEntryType.LOG_BIN, bin, binFromLog);
+            binFromLog.verify(null);
+	    binFromLog.releaseLatch();
+            bin.releaseLatch();
+
+            /*
+             * DINs
+             */
+            DIN din = new DIN(database,
+                              new byte[] {1,0,0,1},
+                              7,
+                              new byte[] {0,1,1,0},
+                              new ChildReference(null,
+                                                 new byte[] {1,0,0,1},
+                                                 DbLsn.makeLsn(10, 100)),
+                              5);
+	    din.latch();
+            din.insertEntry(new ChildReference(null,
+                                               new byte[] {1,0,1,0},
+                                               DbLsn.makeLsn(12, 200)));
+            din.insertEntry(new ChildReference(null,
+                                               new byte[] {1,1,1,0},
+                                               DbLsn.makeLsn(29, 300)));
+            din.insertEntry(new ChildReference(null,
+                                               new byte[] {0,0,1,0},
+                                               DbLsn.makeLsn(35, 400)));
+
+            /* Write it. */
+            DIN dinFromLog = new DIN();
+	    dinFromLog.latch();
+            writeAndRead(buffer, LogEntryType.LOG_DIN, din, dinFromLog);
+	    din.releaseLatch();
+	    dinFromLog.releaseLatch();
+
+            /*
+             * DBINs
+             */
+            DBIN dbin = new DBIN(database,
+                                 new byte[] {3,2,1},
+                                 8,
+                                 new byte[] {1,2,3},
+                                 5);
+            dbin.latch();
+            dbin.insertEntry(new ChildReference(null,
+                                                new byte[] {1,0,1,0},
+                                                DbLsn.makeLsn(212, 200)));
+            dbin.insertEntry(new ChildReference(null,
+                                                new byte[] {1,1,1,0},
+                                                DbLsn.makeLsn(229, 300)));
+            dbin.insertEntry(new ChildReference(null,
+                                                new byte[] {0,0,1,0},
+                                                DbLsn.makeLsn(235, 400)));
+            DBIN dbinFromLog = new DBIN();
+	    dbinFromLog.latch();
+            writeAndRead(buffer, LogEntryType.LOG_DBIN, dbin, dbinFromLog);
+            dbinFromLog.verify(null);
+            dbin.releaseLatch();
+	    dbinFromLog.releaseLatch();
+
+            /*
+             * Root
+             */
+            DbTree dbTree = new DbTree(env, false /* replicationIntended */);
+            DbTree dbTreeFromLog = new DbTree();
+            writeAndRead(buffer, LogEntryType.LOG_ROOT, dbTree, dbTreeFromLog);
+            dbTree.close();
+
+            /*
+             * MapLN
+             */
+            MapLN mapLn = new MapLN(database);
+            MapLN mapLnFromLog = new MapLN();
+            writeAndRead(buffer, LogEntryType.LOG_MAPLN, mapLn, mapLnFromLog);
+
+            /*
+             * UserTxn
+             */
+
+            /*
+	     * Disabled for now because these txns don't compare equal,
+             * because one has a name of "main" and the other has a name of
+             * null because it was read from the log.
+
+	     Txn txn = new Txn(env, new TransactionConfig());
+	     Txn txnFromLog = new Txn();
+	     writeAndRead(buffer, LogEntryType.TXN_COMMIT, txn, txnFromLog);
+	     txn.commit();
+            */
+
+
+            /*
+             * TxnCommit
+             */
+            TxnCommit commit = new TxnCommit(111, DbLsn.makeLsn(10, 10),
+            		                         179 /* masterNodeId */);
+            TxnCommit commitFromLog = new TxnCommit();
+            writeAndRead(buffer, LogEntryType.LOG_TXN_COMMIT, commit,
+                         commitFromLog);
+
+            /*
+             * TxnAbort
+             */
+            TxnAbort abort = new TxnAbort(111, DbLsn.makeLsn(11, 11),
+            		                      7654321 /* masterNodeId*/);
+            TxnAbort abortFromLog = new TxnAbort();
+            writeAndRead(buffer, LogEntryType.LOG_TXN_ABORT,
+                         abort, abortFromLog);
+
+            /*
+             * TxnPrepare
+             */
+	    byte[] gid = new byte[64];
+	    byte[] bqual = new byte[64];
+            TxnPrepare prepare =
+		new TxnPrepare(111, new LogUtils.XidImpl(1, gid, bqual));
+            TxnPrepare prepareFromLog = new TxnPrepare();
+            writeAndRead(buffer, LogEntryType.LOG_TXN_PREPARE, prepare,
+                         prepareFromLog);
+
+            prepare =
+		new TxnPrepare(111, new LogUtils.XidImpl(1, null, bqual));
+            prepareFromLog = new TxnPrepare();
+            writeAndRead(buffer, LogEntryType.LOG_TXN_PREPARE,
+                         prepare, prepareFromLog);
+
+            prepare =
+		new TxnPrepare(111, new LogUtils.XidImpl(1, gid, null));
+            prepareFromLog = new TxnPrepare();
+            writeAndRead(buffer, LogEntryType.LOG_TXN_PREPARE,
+                         prepare, prepareFromLog);
+
+            /*
+             * IN delete info
+             */
+            INDeleteInfo info = new INDeleteInfo(77, new byte[1],
+                                                 new DatabaseId(100));
+            INDeleteInfo infoFromLog = new INDeleteInfo();
+            writeAndRead(buffer, LogEntryType.LOG_IN_DELETE_INFO,
+                         info, infoFromLog);
+
+            /*
+             * Checkpoint start
+             */
+            CheckpointStart start = new CheckpointStart(177, "test");
+            CheckpointStart startFromLog = new CheckpointStart();
+            writeAndRead(buffer, LogEntryType.LOG_CKPT_START,
+                         start, startFromLog);
+
+            /*
+             * Checkpoint end
+             */
+            CheckpointEnd end =
+                new CheckpointEnd("test",
+                                  DbLsn.makeLsn(20, 55),
+                                  env.getRootLsn(),
+                                  env.getTxnManager().getFirstActiveLsn(),
+                                  env.getNodeSequence().getLastLocalNodeId(),
+                                  env.getNodeSequence()
+                                  .getLastReplicatedNodeId(),
+                                  env.getDbTree().getLastLocalDbId(),
+                                  env.getDbTree().getLastReplicatedDbId(),
+                                  env.getTxnManager().getLastLocalTxnId(),
+                                  env.getTxnManager().getLastReplicatedTxnId(),
+                                  177);
+            CheckpointEnd endFromLog = new CheckpointEnd();
+            writeAndRead(buffer, LogEntryType.LOG_CKPT_END,  end, endFromLog);
+
+            /* 
+             * Mimic what happens when the environment is closed.
+             */
+            database.releaseTreeAdminMemory();
+        
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Helper which takes a dbLoggable, writes it, reads it back and
+     * checks for equality and size
+     */
+    private void writeAndRead(ByteBuffer buffer,
+                              LogEntryType entryType,
+			      Loggable orig,
+                              Loggable fromLog)
+        throws Exception {
+
+        /* Write it. */
+        buffer.clear();
+        orig.writeToLog(buffer);
+
+        /* Check the log size. */
+        buffer.flip();
+        assertEquals(buffer.limit(), orig.getLogSize());
+
+        /*
+	 * Read it and compare sizes. Note that we assume we're testing
+	 * objects that are readable and writable to the log.
+	 */
+        fromLog.readFromLog(buffer, LogEntryType.LOG_VERSION);
+        assertEquals(orig.getLogSize(), fromLog.getLogSize());
+
+        assertEquals("We should have read the whole buffer for " +
+                     fromLog.getClass().getName(),
+                     buffer.limit(), buffer.position());
+
+        /* Compare contents. */
+        StringBuffer sb1 = new StringBuffer();
+        StringBuffer sb2 = new StringBuffer();
+        orig.dumpLog(sb1, true);
+        fromLog.dumpLog(sb2, true);
+
+        if (DEBUG) {
+            System.out.println("sb1 = " + sb1.toString());
+            System.out.println("sb2 = " + sb2.toString());
+        }
+        assertEquals("Not equals for " +
+                     fromLog.getClass().getName(),
+                     sb1.toString(), sb2.toString());
+
+        /* Validate that the dump string is valid XML. */
+        //        builder = factory.newDocumentBuilder();
+        //        builder.parse("<?xml version=\"1.0\" ?>");
+        //                      sb1.toString()+
+    }
+}
diff --git a/test/com/sleepycat/je/log/TestUtilLogReader.java b/test/com/sleepycat/je/log/TestUtilLogReader.java
new file mode 100644
index 0000000000000000000000000000000000000000..4901710a91c4d151519eff62d6e2d7298cec64b6
--- /dev/null
+++ b/test/com/sleepycat/je/log/TestUtilLogReader.java
@@ -0,0 +1,85 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TestUtilLogReader.java,v 1.11.2.3 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.log;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.entry.LogEntry;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * Instantiates all log entries using the shared log entry instances.
+ */
+public class TestUtilLogReader extends FileReader {
+
+    private LogEntryType entryType;
+    private LogEntry entry;
+    private boolean readFullItem;
+
+    public TestUtilLogReader(EnvironmentImpl env, boolean readFullItem)
+        throws IOException, DatabaseException {
+
+        super(env,
+              4096,
+              true,
+              DbLsn.NULL_LSN,
+              null,
+              DbLsn.NULL_LSN,
+              DbLsn.NULL_LSN);
+        this.readFullItem = readFullItem;
+    }
+
+    public TestUtilLogReader(EnvironmentImpl env,
+                             int readBufferSize,
+                             boolean forward,
+                             long startLsn,
+                             Long singleFileNumber,
+                             long endOfFileLsn,
+                             long finishLsn)
+        throws IOException, DatabaseException {
+
+        super(env,
+              readBufferSize,
+              forward,
+              startLsn,
+              singleFileNumber,
+              endOfFileLsn,
+              finishLsn);
+    }
+
+    public LogEntryType getEntryType() {
+        return entryType;
+    }
+
+    public int getEntryVersion() {
+        return currentEntryHeader.getVersion();
+    }
+
+    public LogEntry getEntry() {
+        return entry;
+    }
+
+    protected boolean isTargetEntry() {
+        return true;
+    }
+
+    protected boolean processEntry(ByteBuffer entryBuffer)
+        throws DatabaseException {
+
+        entryType = LogEntryType.findType(currentEntryHeader.getType());
+        entry = entryType.getSharedLogEntry();
+        entry.readEntry(currentEntryHeader,
+                        entryBuffer,
+                        readFullItem);
+        return true;
+    }
+}
diff --git a/test/com/sleepycat/je/logversion/LogEntryVersionTest.java b/test/com/sleepycat/je/logversion/LogEntryVersionTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..7933db245e8164ac20675e87adbd966aa55eed19
--- /dev/null
+++ b/test/com/sleepycat/je/logversion/LogEntryVersionTest.java
@@ -0,0 +1,330 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LogEntryVersionTest.java,v 1.24.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.logversion;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.LineNumberReader;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.VerifyConfig;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.log.TestUtilLogReader;
+import com.sleepycat.je.log.entry.LNLogEntry;
+import com.sleepycat.je.log.entry.LogEntry;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Tests that prior versions of log entries can be read.  This test is used in
+ * conjunction with MakeLogEntryVersionData, a main program that was used once
+ * to generate log files named je-x.y.z.jdb, where x.y.z is the version of JE
+ * used to create the log.  When a new test log file is created with
+ * MakeLogEntryVersionData, add a new test_x_y_z() method to this class.
+ *
+ * @see MakeLogEntryVersionData
+ */
+public class LogEntryVersionTest extends TestCase {
+
+    private File envHome;
+    private Environment env;
+    private Database db1;
+    private Database db2;
+
+    public LogEntryVersionTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+        TestUtils.removeFiles("Setup", envHome, FileManager.DEL_SUFFIX);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        try {
+            if (env != null) {
+                env.close();
+            }
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        try {
+            TestUtils.removeLogFiles("tearDown", envHome, true);
+            TestUtils.removeFiles("tearDown", envHome, FileManager.DEL_SUFFIX);
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        envHome = null;
+        env = null;
+        db1 = null;
+        db2 = null;
+    }
+
+    private void openEnv(String jeVersion, boolean readOnly)
+        throws DatabaseException, IOException {
+
+        /* Copy log file resource to log file zero. */
+        String resName = "je-" + jeVersion + ".jdb";
+        TestUtils.loadLog(getClass(), resName, envHome);
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(false);
+        envConfig.setReadOnly(readOnly);
+        envConfig.setTransactional(true);
+        env = new Environment(envHome, envConfig);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(false);
+        dbConfig.setReadOnly(readOnly);
+        dbConfig.setSortedDuplicates(true);
+        db1 = env.openDatabase(null, Utils.DB1_NAME, dbConfig);
+        db2 = env.openDatabase(null, Utils.DB2_NAME, dbConfig);
+    }
+
+    private void closeEnv()
+        throws DatabaseException {
+
+        db1.close();
+        db1 = null;
+        db2.close();
+        db2 = null;
+        env.close();
+        env = null;
+    }
+
+    public void test_1_5_4()
+        throws DatabaseException, IOException {
+
+        doTest("1.5.4");
+    }
+
+    public void test_1_7_0()
+        throws DatabaseException, IOException {
+
+        doTest("1.7.0");
+    }
+
+    /**
+     * JE 2.0: FileHeader version 3.
+     */
+    public void test_2_0_0()
+        throws DatabaseException, IOException {
+
+        doTest("2.0.0");
+    }
+
+    /**
+     * JE 3.0.12: FileHeader version 4.
+     */
+    public void test_3_0_12()
+        throws DatabaseException, IOException {
+
+        /*
+         * The test was not run until JE 3.1.25, but no format changes were
+         * made between 3.0.12 and 3.1.25.
+         */
+        doTest("3.1.25");
+    }
+
+    /**
+     * JE 3.2.79: FileHeader version 5. Version 5 was actually introduced in
+     * 3.2.22
+     */
+    public void test_3_2_79()
+        throws DatabaseException, IOException {
+
+        doTest("3.2.79");
+    }
+
+    private void doTest(String jeVersion)
+        throws DatabaseException, IOException {
+
+        openEnv(jeVersion, true /*readOnly*/);
+
+        VerifyConfig verifyConfig = new VerifyConfig();
+        verifyConfig.setAggressive(true);
+        assertTrue(env.verify(verifyConfig, System.err));
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Database 1 is empty because the txn was aborted. */
+        Cursor cursor = db1.openCursor(null, null);
+        try {
+            status = cursor.getFirst(key, data, null);
+            assertEquals(OperationStatus.NOTFOUND, status);
+        } finally {
+            cursor.close();
+        }
+
+        /* Database 2 has one record: {3, 0} */
+        cursor = db2.openCursor(null, null);
+        try {
+            status = cursor.getFirst(key, data, null);
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals(3, Utils.value(key));
+            assertEquals(0, Utils.value(data));
+            status = cursor.getNext(key, data, null);
+            assertEquals(OperationStatus.NOTFOUND, status);
+        } finally {
+            cursor.close();
+        }
+
+        /*
+         * Database 3 should have one record (99,79) that was explicitly
+         * committed. We only added this commit record and test case when
+         * implementing JE 3.3, and only went to the trouble of backporting the
+         * MakeLogEntryVersionData to file version 5. (It's just an additional
+         * test, it should be fine for earlier versions.
+         */
+        if (!((jeVersion.startsWith("1")) ||
+              (jeVersion.startsWith("2")) ||
+              (jeVersion.startsWith("3.1")))) {
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setReadOnly(true);
+            Database db3 = env.openDatabase(null, Utils.DB3_NAME, dbConfig);
+
+            cursor = db3.openCursor(null, null);
+            try {
+                status = cursor.getFirst(key, data, null);
+                assertEquals(OperationStatus.SUCCESS, status);
+                assertEquals(99, Utils.value(key));
+                assertEquals(79, Utils.value(data));
+                status = cursor.getNext(key, data, null);
+                assertEquals(OperationStatus.NOTFOUND, status);
+            } finally {
+                cursor.close();
+                db3.close();
+            }
+        }
+
+        /* 
+         * Verify log entry types using a log reader. Read both full and
+         * partial items. 
+         */
+        String resName = "je-" + jeVersion + ".txt";
+        LineNumberReader textReader = new LineNumberReader
+            (new InputStreamReader(getClass().getResourceAsStream(resName)));
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+        TestUtilLogReader fullLogReader = 
+            new TestUtilLogReader(envImpl, true /* readFullItem */);
+        TestUtilLogReader partialLogReader = 
+            new TestUtilLogReader(envImpl, false /* readFullItem */);
+
+        String expectedType = textReader.readLine();
+        while (expectedType != null) {
+        	/* Read the full item. */
+            assertTrue(fullLogReader.readNextEntry());
+            String foundType = fullLogReader.getEntryType().toString();
+            assertEquals
+                ("At line " + textReader.getLineNumber(),
+                 expectedType.substring(0, expectedType.indexOf('/')),
+                 foundType);
+            
+            /*
+             * Read a partial item to mimic recovery. So far, partial reads are
+             * only used for LNLogEntries, and must provide the node id,
+             * database id (the database this node is a part of) and the txn id
+             */
+            assertTrue(partialLogReader.readNextEntry());
+            foundType = partialLogReader.getEntryType().toString();
+
+            LogEntry entry = partialLogReader.getEntry();
+            if (entry instanceof LNLogEntry) {
+                assertTrue(((LNLogEntry) entry).getDbId() != null);
+                assertTrue(((LNLogEntry) entry).getNodeId() >= 0);
+
+                /* 
+                 * Sometimes the txnId is null -- just make sure we 
+                 * don't get NullPointerException.
+                 */
+                @SuppressWarnings("unused")
+                Long txnId = ((LNLogEntry) entry).getTxnId();
+            }
+
+            assertEquals
+                ("At line " + textReader.getLineNumber(),
+                 expectedType.substring(0, expectedType.indexOf('/')),
+                 foundType);
+                 
+            expectedType = textReader.readLine();
+        }
+        assertTrue("This test should be sure to read some lines",
+        			textReader.getLineNumber() > 0);
+        assertFalse("No more expected entries after line " +
+                   	textReader.getLineNumber() + " but found: " + 
+                   	fullLogReader.getEntry(),
+                   	fullLogReader.readNextEntry());
+
+
+        assertTrue(env.verify(verifyConfig, System.err));
+        closeEnv();
+
+        /*
+         * Do enough inserts to cause a split and perform some other write
+         * operations for good measure.
+         */
+        openEnv(jeVersion, false /*readOnly*/);
+        for (int i = -127; i < 127; i += 1) {
+            status = db2.put(null, Utils.entry(i), Utils.entry(0));
+            assertEquals(OperationStatus.SUCCESS, status);
+        }
+        /* Do updates. */
+        for (int i = -127; i < 127; i += 1) {
+            status = db2.put(null, Utils.entry(i), Utils.entry(1));
+            assertEquals(OperationStatus.SUCCESS, status);
+        }
+        /* Do deletes. */
+        for (int i = -127; i < 127; i += 1) {
+            status = db2.delete(null, Utils.entry(i));
+            assertEquals(OperationStatus.SUCCESS, status);
+        }
+        /* Same for duplicates. */
+        for (int i = -127; i < 127; i += 1) {
+            status = db2.put(null, Utils.entry(0), Utils.entry(i));
+            assertEquals(OperationStatus.SUCCESS, status);
+        }
+        for (int i = -127; i < 127; i += 1) {
+            status = db2.put(null, Utils.entry(0), Utils.entry(i));
+            assertEquals(OperationStatus.SUCCESS, status);
+        }
+        cursor = db2.openCursor(null, null);
+        try {
+            status = cursor.getFirst(key, data, null);
+            while (status == OperationStatus.SUCCESS) {
+                status = cursor.delete();
+                assertEquals(OperationStatus.SUCCESS, status);
+                status = cursor.getNext(key, data, null);
+            }
+        } finally {
+            cursor.close();
+        }
+
+        assertTrue(env.verify(verifyConfig, System.err));
+        closeEnv();
+    }
+}
diff --git a/test/com/sleepycat/je/logversion/LogHeaderVersionTest.java b/test/com/sleepycat/je/logversion/LogHeaderVersionTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..338c374613ad51bc93cf0deb1425b1dddc5b7b6a
--- /dev/null
+++ b/test/com/sleepycat/je/logversion/LogHeaderVersionTest.java
@@ -0,0 +1,113 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LogHeaderVersionTest.java,v 1.10.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.logversion;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.log.LogException;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Tests log file header versioning.  This test is used in conjunction with
+ * MakeLogHeaderVersionData, a main program that was used once to generate two
+ * log files with maximum and minimum valued header version numbers.
+ *
+ * @see MakeLogHeaderVersionData
+ */
+public class LogHeaderVersionTest extends TestCase {
+
+    private File envHome;
+
+    public LogHeaderVersionTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+        TestUtils.removeFiles("Setup", envHome, FileManager.DEL_SUFFIX);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        try {
+            //*
+            TestUtils.removeLogFiles("tearDown", envHome, true);
+            TestUtils.removeFiles("tearDown", envHome, FileManager.DEL_SUFFIX);
+            //*/
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        envHome = null;
+    }
+
+    /**
+     * Tests that an exception is thrown when a log header is read with a newer
+     * version than the current version.  The maxversion.jdb log file is loaded
+     * as a resource by this test and written as a regular log file.  When the
+     * environment is opened, we expect a LogException.
+     */
+    public void testGreaterVersionNotAllowed()
+        throws DatabaseException, IOException {
+
+        TestUtils.loadLog(getClass(), Utils.MAX_VERSION_NAME, envHome);
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(false);
+        envConfig.setTransactional(true);
+
+        try {
+            Environment env = new Environment(envHome, envConfig);
+            try {
+                env.close();
+            } catch (Exception ignore) {}
+        } catch (DatabaseException e) {
+            if (e.getCause() instanceof LogException) {
+                /* Got LogException as expected. */
+                return;
+            }
+        }
+        fail("Expected LogException");
+    }
+
+    /**
+     * Tests that when a file is opened with a lesser version than the current
+     * version, a new log file is started for writing new log entries.  This is
+     * important so that the new header version is written even if no new log
+     * file is needed.  If the new version were not written, an older version
+     * of JE would not recognize that there had been a version change.
+     */
+    public void testLesserVersionNotUpdated()
+        throws DatabaseException, IOException {
+
+        TestUtils.loadLog(getClass(), Utils.MIN_VERSION_NAME, envHome);
+        File logFile = new File(envHome, TestUtils.LOG_FILE_NAME);
+        long origFileSize = logFile.length();
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(false);
+        envConfig.setTransactional(true);
+
+        Environment env = new Environment(envHome, envConfig);
+        env.sync();
+        env.close();
+
+        assertEquals(origFileSize, logFile.length());
+    }
+}
diff --git a/test/com/sleepycat/je/logversion/MakeLogEntryVersionData.java b/test/com/sleepycat/je/logversion/MakeLogEntryVersionData.java
new file mode 100644
index 0000000000000000000000000000000000000000..c3c7b7d3f4a294d3e957307f064b9d2030d409c3
--- /dev/null
+++ b/test/com/sleepycat/je/logversion/MakeLogEntryVersionData.java
@@ -0,0 +1,262 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: MakeLogEntryVersionData.java,v 1.19.2.4 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.logversion;
+
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.PrintWriter;
+import java.util.Set;
+
+import javax.transaction.xa.XAResource;
+
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.JEVersion;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.XAEnvironment;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.TestUtilLogReader;
+import com.sleepycat.je.log.LogUtils.XidImpl;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * This standalone command line program generates log files named je-x.y.z.jdb
+ * and je-x.y.z.txt, where x.y.z is the version of JE used to run the program.
+ * This program needs to be run for the current version of JE when we release
+ * a new major version of JE.  It does not need to be run again for older
+ * versions of JE, unless it is changed to generate new types of log entries
+ * and we need to verify those log entries for all versions of JE.  In that
+ * the LogEntryVersionTest may also need to be changed.
+ *
+ * <p>Run this program with the desired version of JE in the classpath and pass
+ * a home directory as the single command line argument.  After running this
+ * program move the je-x.y.z.* files to the directory of this source package.
+ * When adding je-x.y.z.jdb to CVS make sure to use -kb since it is a binary
+ * file.</p>
+ *
+ * <p>This program can be run using the logversiondata ant target.</p>
+ *
+ * @see LogEntryVersionTest
+ */
+public class MakeLogEntryVersionData {
+
+    /* Minimum child entries per BIN. */
+    private static int N_ENTRIES = 4;
+
+    private MakeLogEntryVersionData() {
+    }
+
+    public static void main(String[] args)
+        throws Exception {
+
+        if (args.length != 1) {
+            throw new Exception("Home directory arg is required.");
+        }
+
+        File homeDir = new File(args[0]);
+        File logFile = new File(homeDir, TestUtils.LOG_FILE_NAME);
+        File renamedLogFile = new File(homeDir, "je-" +
+            JEVersion.CURRENT_VERSION.getNumericVersionString() + ".jdb");
+        File summaryFile = new File(homeDir, "je-" +
+            JEVersion.CURRENT_VERSION.getNumericVersionString() + ".txt");
+
+        if (logFile.exists()) {
+            throw new Exception("Home directory must be empty of log files.");
+        }
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        DbInternal.disableParameterValidation(envConfig);
+        envConfig.setAllowCreate(true);
+        envConfig.setTransactional(true);
+        /* Make as small a log as possible to save space in CVS. */
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+        /* force trace messages at recovery. */
+        envConfig.setConfigParam
+            (EnvironmentParams.JE_LOGGING_LEVEL.getName(), "CONFIG");
+        /* Use a 100 MB log file size to ensure only one file is written. */
+        envConfig.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(),
+                                 Integer.toString(100 * (1 << 20)));
+        /* Force BINDelta. */
+        envConfig.setConfigParam
+            (EnvironmentParams.BIN_DELTA_PERCENT.getName(),
+             Integer.toString(75));
+        /* Force INDelete -- only used when the root is purged. */
+        envConfig.setConfigParam
+            (EnvironmentParams.COMPRESSOR_PURGE_ROOT.getName(), "true");
+        /* Ensure that we create two BINs with N_ENTRIES LNs. */
+        envConfig.setConfigParam
+            (EnvironmentParams.NODE_MAX.getName(),
+             Integer.toString(N_ENTRIES));
+
+        CheckpointConfig forceCheckpoint = new CheckpointConfig();
+        forceCheckpoint.setForce(true);
+
+        XAEnvironment env = new XAEnvironment(homeDir, envConfig);
+
+        /* 
+         * Make two shadow database. Database 1 is transactional and has
+         * aborts, database 2 is not transactional.
+         */
+        for (int i = 0; i < 2; i += 1) {
+            boolean transactional = (i == 0);
+            String dbName = transactional ? Utils.DB1_NAME : Utils.DB2_NAME;
+
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setAllowCreate(true);
+            dbConfig.setTransactional(transactional);
+            dbConfig.setSortedDuplicates(true);
+            Database db = env.openDatabase(null, dbName, dbConfig);
+
+            Transaction txn = null;
+            if (transactional) {
+                txn = env.beginTransaction(null, null);
+            }
+
+            for (int j = 0; j < N_ENTRIES; j += 1) {
+                db.put(txn, Utils.entry(j), Utils.entry(0));
+            }
+            db.put(txn, Utils.entry(0), Utils.entry(1));
+
+            /* Must checkpoint to generate BINDeltas. */
+            env.checkpoint(forceCheckpoint);
+
+            /* Delete everything but the last LN to cause IN deletion. */
+            for (int j = 0; j < N_ENTRIES - 1; j += 1) {
+                db.delete(txn, Utils.entry(j));
+            }
+
+            if (transactional) {
+                txn.abort();
+            }
+
+            db.close();
+        }
+
+        /* Compress twice to delete DBIN, DIN, BIN, IN. */
+        env.compress();
+        env.compress();
+
+        /* DB2 was not aborted and will contain: {3, 0} */
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(false);
+        dbConfig.setReadOnly(true);
+        dbConfig.setSortedDuplicates(true);
+        Database db = env.openDatabase(null, Utils.DB2_NAME, dbConfig);
+        Cursor cursor = db.openCursor(null, null);
+        try {
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            OperationStatus status = cursor.getFirst(key, data, null);
+            if (status != OperationStatus.SUCCESS) {
+                throw new Exception("Expected SUCCESS but got: " + status);
+            }
+            if (Utils.value(key) != 3 || Utils.value(data) != 0) {
+                throw new Exception("Expected {3,0} but got: {" +
+                                    Utils.value(key) + ',' +
+                                    Utils.value(data) + '}');
+            }
+        } finally {
+            cursor.close();
+        }
+        db.close();
+
+        /* 
+         * Make database 3, which is transactional and has some explicit 
+         * transaction commit record.
+         */
+        dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setTransactional(true);
+        Transaction txn = env.beginTransaction(null, null);
+        db = env.openDatabase(null, Utils.DB3_NAME, dbConfig);
+        OperationStatus status = db.put(txn, Utils.entry(99), Utils.entry(79));
+        assert status == OperationStatus.SUCCESS: "status=" + status;
+        db.close();
+        txn.commit();
+        
+        /*
+         * Generate an XA txn Prepare. The transaction must be non-empty in
+         * order to actually log the Prepare.
+         */
+        XidImpl xid =
+            new XidImpl(1, "MakeLogEntryVersionData".getBytes(), null);
+        env.start(xid, XAResource.TMNOFLAGS);
+        /* Re-write the existing {3,0} record. */
+        dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(false);
+        dbConfig.setReadOnly(false);
+        dbConfig.setTransactional(true);
+        dbConfig.setSortedDuplicates(true);
+        db = env.openDatabase(null, Utils.DB2_NAME, dbConfig);
+        db.put(null, Utils.entry(3), Utils.entry(0));
+        db.close();
+        env.prepare(xid);
+        env.rollback(xid);
+
+        env.close();
+
+        /*
+         * Get the set of all log entry types we expect to output.  We exclude
+         * two types:
+         * - MapLN_TX, because MapLN (non-transactional) is now used instead.
+         * - INDelete, because root compression is no longer used.
+         */
+        Set<LogEntryType> expectedTypes = LogEntryType.getAllTypes();
+        expectedTypes.remove(LogEntryType.LOG_MAPLN_TRANSACTIONAL);
+        expectedTypes.remove(LogEntryType.LOG_IN_DELETE_INFO);
+
+        /* Open read-only and write all LogEntryType names to a text file. */
+        envConfig.setReadOnly(true);
+        Environment env2 = new Environment(homeDir, envConfig);
+        PrintWriter writer = new PrintWriter
+            (new BufferedOutputStream(new FileOutputStream(summaryFile)));
+        TestUtilLogReader reader = new TestUtilLogReader
+            (DbInternal.envGetEnvironmentImpl(env2), true /* readFullItem */);
+        while (reader.readNextEntry()) {
+            LogEntryType type = reader.getEntryType();
+            writer.println(type.toStringNoVersion() + '/' +
+                           reader.getEntryVersion());
+            expectedTypes.remove(type);
+        }
+        writer.close();
+        env2.close();
+
+        if (expectedTypes.size() > 0) {
+            throw new Exception("Types not output: " + expectedTypes);
+        }
+
+        if (!logFile.exists()) {
+            throw new Exception("What happened to: " + logFile);
+        }
+
+        if (!logFile.renameTo(renamedLogFile)) {
+            throw new Exception
+                ("Could not rename: " + logFile + " to " + renamedLogFile);
+        }
+
+        System.out.println("Created: " + renamedLogFile);
+        System.out.println("Created: " + summaryFile);
+    }
+}
diff --git a/test/com/sleepycat/je/logversion/MakeLogHeaderVersionData.java b/test/com/sleepycat/je/logversion/MakeLogHeaderVersionData.java
new file mode 100644
index 0000000000000000000000000000000000000000..453e712c383dc0bea7dd12058fc3f973a226b684
--- /dev/null
+++ b/test/com/sleepycat/je/logversion/MakeLogHeaderVersionData.java
@@ -0,0 +1,79 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: MakeLogHeaderVersionData.java,v 1.10.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.logversion;
+
+import java.io.File;
+import java.util.logging.Level;
+
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * This standalone command line program creates a single 00000000.jdb log file.
+ * It was used to generate maxversion.jdb and minversion.jdb, and although it
+ * may never need to be used again, below are instructions.
+ *
+ * <p>Before running this program change LogEntryType.LOG_VERSION to
+ * Integer.MAX_VALUE or zero temporarily, just for creating a file with the
+ * maximum or minimum version number.  A single command line argument is
+ * required for the home directory.  After running this program rename the
+ * 00000000.jdb file to maxversion.jdb or minversion.jdb file in the directory
+ * of this source package.  When adding it to CVS make sure to use -kb since it
+ * is a binary file.  Don't forget to change LogEntryType.LOG_VERSION back to
+ * the correct value.</p>
+ *
+ * @see LogHeaderVersionTest
+ */
+public class MakeLogHeaderVersionData {
+
+    private MakeLogHeaderVersionData() {
+    }
+
+    public static void main(String[] args)
+        throws Exception {
+
+        if (args.length != 1) {
+            throw new Exception("Home directory arg is required.");
+        }
+
+        File homeDir = new File(args[0]);
+        File logFile = new File(homeDir, TestUtils.LOG_FILE_NAME);
+
+        if (logFile.exists()) {
+            throw new Exception("Home directory must be empty of log files.");
+        }
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setTransactional(true);
+        /* Make as small a log as possible to save space in CVS. */
+        envConfig.setConfigParam
+            (EnvironmentParams.JE_LOGGING_LEVEL.getName(),
+             Level.OFF.getName());
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+
+        Environment env = new Environment(homeDir, envConfig);
+        env.close();
+
+        if (!logFile.exists()) {
+            throw new Exception("Home directory does not contain: " + logFile);
+        }
+
+        System.out.println("Sucessfully created: " + logFile);
+    }
+}
diff --git a/test/com/sleepycat/je/logversion/Utils.java b/test/com/sleepycat/je/logversion/Utils.java
new file mode 100644
index 0000000000000000000000000000000000000000..68de1c4220057886030dae118c7fd6a30d9b76b5
--- /dev/null
+++ b/test/com/sleepycat/je/logversion/Utils.java
@@ -0,0 +1,35 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Utils.java,v 1.9.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.logversion;
+
+import com.sleepycat.je.DatabaseEntry;
+
+public class Utils {
+
+    static final String DB1_NAME = "database1";
+    static final String DB2_NAME = "database2";
+    static final String DB3_NAME = "database3";
+    static final String MIN_VERSION_NAME = "minversion.jdb";
+    static final String MAX_VERSION_NAME = "maxversion.jdb";
+
+    static DatabaseEntry entry(int val) {
+
+        byte[] data = new byte[] { (byte) val };
+        return new DatabaseEntry(data);
+    }
+
+    static int value(DatabaseEntry entry) {
+
+        byte[] data = entry.getData();
+        if (data.length != 1) {
+            throw new IllegalStateException("len=" + data.length);
+        }
+        return data[0];
+    }
+}
diff --git a/test/com/sleepycat/je/logversion/je-1.5.4.jdb b/test/com/sleepycat/je/logversion/je-1.5.4.jdb
new file mode 100644
index 0000000000000000000000000000000000000000..6a332183a4e6fb2e5deee9dfdaa277a8d6610aac
Binary files /dev/null and b/test/com/sleepycat/je/logversion/je-1.5.4.jdb differ
diff --git a/test/com/sleepycat/je/logversion/je-1.5.4.txt b/test/com/sleepycat/je/logversion/je-1.5.4.txt
new file mode 100644
index 0000000000000000000000000000000000000000..85d701872bd476f1fe3000c98d5b3296f3dd8ff0
--- /dev/null
+++ b/test/com/sleepycat/je/logversion/je-1.5.4.txt
@@ -0,0 +1,130 @@
+FileHeader/0
+Trace/0
+Root/0
+CkptStart/0
+IN/0
+BIN/0
+NameLN_TX/0
+IN/0
+BIN/0
+MapLN_TX/0
+Commit/0
+Commit/0
+IN/0
+BIN/0
+FileSummaryLN/0
+Trace/0
+CkptEnd/0
+Trace/0
+NameLN_TX/0
+MapLN_TX/0
+Commit/0
+Commit/0
+IN/0
+BIN/0
+LN_TX/0
+LN_TX/0
+LN_TX/0
+LN_TX/0
+BIN/0
+BIN/0
+IN/0
+DupCountLN/0
+DBIN/0
+DIN/0
+DupCountLN_TX/0
+LN_TX/0
+CkptStart/0
+DupBINDelta/0
+DIN/0
+BIN/0
+BINDelta/0
+BIN/0
+IN/0
+Root/0
+IN/0
+MapLN_TX/0
+Commit/0
+IN/0
+MapLN_TX/0
+Commit/0
+BIN/0
+IN/0
+Root/0
+FileSummaryLN/0
+Trace/0
+CkptEnd/0
+DelDupLN_TX/0
+DupCountLN_TX/0
+DelDupLN_TX/0
+DupCountLN_TX/0
+LN_TX/0
+LN_TX/0
+Abort/0
+NameLN/0
+MapLN_TX/0
+Commit/0
+IN/0
+BIN/0
+LN/0
+LN/0
+LN/0
+LN/0
+BIN/0
+BIN/0
+IN/0
+DupCountLN/0
+DBIN/0
+DIN/0
+DupCountLN/0
+LN/0
+CkptStart/0
+DupBINDelta/0
+DBIN/0
+DIN/0
+DIN/0
+BIN/0
+BIN/0
+BIN/0
+BINDelta/0
+BINDelta/0
+IN/0
+MapLN_TX/0
+Commit/0
+IN/0
+MapLN_TX/0
+Commit/0
+IN/0
+MapLN_TX/0
+Commit/0
+BIN/0
+FileSummaryLN/0
+Trace/0
+CkptEnd/0
+DelDupLN/0
+DupCountLN/0
+DelDupLN/0
+DupCountLN/0
+LN/0
+LN/0
+INDelete/0
+INDelete/0
+CkptStart/0
+BIN/0
+BIN/0
+BIN/0
+BINDelta/0
+IN/0
+MapLN_TX/0
+Commit/0
+IN/0
+MapLN_TX/0
+Commit/0
+IN/0
+MapLN_TX/0
+Commit/0
+IN/0
+Root/0
+FileSummaryLN/0
+Trace/0
+CkptEnd/0
diff --git a/test/com/sleepycat/je/logversion/je-1.7.0.jdb b/test/com/sleepycat/je/logversion/je-1.7.0.jdb
new file mode 100644
index 0000000000000000000000000000000000000000..a9b0361b2428306d0793c57f58180df5cc59d625
Binary files /dev/null and b/test/com/sleepycat/je/logversion/je-1.7.0.jdb differ
diff --git a/test/com/sleepycat/je/logversion/je-1.7.0.txt b/test/com/sleepycat/je/logversion/je-1.7.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a3d47d5698e4fdda89ffd6f0bc4388d650d2f661
--- /dev/null
+++ b/test/com/sleepycat/je/logversion/je-1.7.0.txt
@@ -0,0 +1,135 @@
+FileHeader/0
+Trace/0
+Root/0
+CkptStart/0
+IN/1
+BIN/1
+NameLN_TX/0
+IN/1
+BIN/1
+MapLN_TX/0
+Commit/0
+Commit/0
+IN/1
+BIN/1
+FileSummaryLN/0
+Trace/0
+CkptEnd/0
+Trace/0
+NameLN_TX/0
+MapLN_TX/0
+Commit/0
+Commit/0
+IN/1
+BIN/1
+LN_TX/0
+LN_TX/0
+LN_TX/0
+LN_TX/0
+BIN/1
+BIN/1
+IN/1
+DupCountLN/0
+DBIN/1
+DIN/1
+DupCountLN_TX/0
+LN_TX/0
+CkptStart/0
+DupBINDelta/0
+DIN/1
+BIN/1
+BINDelta/0
+BIN/1
+IN/1
+MapLN_TX/0
+Commit/0
+IN/1
+MapLN_TX/0
+Commit/0
+IN/1
+Root/0
+BIN/1
+IN/1
+Root/0
+FileSummaryLN/0
+Trace/0
+CkptEnd/0
+DelDupLN_TX/0
+DupCountLN_TX/0
+DelDupLN_TX/0
+DupCountLN_TX/0
+LN_TX/0
+LN_TX/0
+Abort/0
+NameLN/0
+MapLN_TX/0
+Commit/0
+IN/1
+BIN/1
+LN/0
+LN/0
+LN/0
+LN/0
+BIN/1
+BIN/1
+IN/1
+DupCountLN/0
+DBIN/1
+DIN/1
+DupCountLN/0
+LN/0
+CkptStart/0
+DupBINDelta/0
+DBIN/1
+DIN/1
+DIN/1
+BINDelta/0
+BIN/1
+BINDelta/0
+BIN/1
+BIN/1
+IN/1
+MapLN_TX/0
+Commit/0
+IN/1
+MapLN_TX/0
+Commit/0
+IN/1
+MapLN_TX/0
+Commit/0
+BIN/1
+FileSummaryLN/0
+Trace/0
+CkptEnd/0
+DelDupLN/0
+DupCountLN/0
+DelDupLN/0
+DupCountLN/0
+LN/0
+LN/0
+INDupDelete/0
+FileSummaryLN/0
+INDupDelete/0
+FileSummaryLN/0
+INDelete/0
+FileSummaryLN/0
+CkptStart/0
+BIN/1
+BINDelta/0
+BIN/1
+BIN/1
+BIN/1
+IN/1
+MapLN_TX/0
+Commit/0
+IN/1
+MapLN_TX/0
+Commit/0
+IN/1
+MapLN_TX/0
+Commit/0
+IN/1
+Root/0
+FileSummaryLN/0
+Trace/0
+CkptEnd/0
diff --git a/test/com/sleepycat/je/logversion/je-2.0.0.jdb b/test/com/sleepycat/je/logversion/je-2.0.0.jdb
new file mode 100644
index 0000000000000000000000000000000000000000..680170415b1ee729dc86381fd3d27e33c66fde22
Binary files /dev/null and b/test/com/sleepycat/je/logversion/je-2.0.0.jdb differ
diff --git a/test/com/sleepycat/je/logversion/je-2.0.0.txt b/test/com/sleepycat/je/logversion/je-2.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0c1ea24efc8ab45e35e3721b97b8bbc2b86b691e
--- /dev/null
+++ b/test/com/sleepycat/je/logversion/je-2.0.0.txt
@@ -0,0 +1,130 @@
+FileHeader/0
+Trace/0
+Root/1
+CkptStart/0
+IN/2
+BIN/2
+NameLN_TX/0
+IN/2
+BIN/2
+MapLN_TX/1
+Commit/0
+Commit/0
+IN/2
+BIN/2
+FileSummaryLN/1
+Trace/0
+CkptEnd/0
+Trace/0
+NameLN_TX/0
+MapLN_TX/1
+Commit/0
+Commit/0
+IN/2
+BIN/2
+LN_TX/0
+LN_TX/0
+LN_TX/0
+LN_TX/0
+BIN/2
+BIN/2
+IN/2
+DupCountLN/0
+DBIN/2
+DIN/2
+DupCountLN_TX/0
+LN_TX/0
+CkptStart/0
+DupBINDelta/0
+DIN/2
+BIN/2
+BINDelta/0
+BIN/2
+IN/2
+MapLN_TX/1
+Commit/0
+IN/2
+MapLN_TX/1
+Commit/0
+IN/2
+Root/1
+BIN/2
+IN/2
+Root/1
+FileSummaryLN/1
+Trace/0
+CkptEnd/0
+DelDupLN_TX/0
+DupCountLN_TX/0
+DelDupLN_TX/0
+DupCountLN_TX/0
+LN_TX/0
+LN_TX/0
+Abort/0
+NameLN/0
+MapLN_TX/1
+Commit/0
+IN/2
+BIN/2
+LN/0
+LN/0
+LN/0
+LN/0
+BIN/2
+BIN/2
+IN/2
+DupCountLN/0
+DBIN/2
+DIN/2
+DupCountLN/0
+LN/0
+CkptStart/0
+DupBINDelta/0
+DBIN/2
+DIN/2
+DIN/2
+BIN/2
+BINDelta/0
+BINDelta/0
+BIN/2
+BINDelta/0
+IN/2
+MapLN_TX/1
+Commit/0
+IN/2
+MapLN_TX/1
+Commit/0
+BINDelta/0
+FileSummaryLN/1
+Trace/0
+CkptEnd/0
+DelDupLN/0
+DupCountLN/0
+DelDupLN/0
+DupCountLN/0
+LN/0
+LN/0
+INDupDelete/0
+INDelete/0
+INDupDelete/0
+INDelete/0
+FileSummaryLN/1
+Prepare/0
+CkptStart/0
+BIN/2
+BINDelta/0
+BIN/2
+BINDelta/0
+IN/2
+MapLN_TX/1
+Commit/0
+IN/2
+MapLN_TX/1
+Commit/0
+BINDelta/0
+BIN/2
+BIN/2
+IN/2
+FileSummaryLN/1
+Trace/0
+CkptEnd/0
diff --git a/test/com/sleepycat/je/logversion/je-3.1.25.jdb b/test/com/sleepycat/je/logversion/je-3.1.25.jdb
new file mode 100644
index 0000000000000000000000000000000000000000..e1543d4ba74ef35c8641332acf69197d42e0e357
Binary files /dev/null and b/test/com/sleepycat/je/logversion/je-3.1.25.jdb differ
diff --git a/test/com/sleepycat/je/logversion/je-3.1.25.txt b/test/com/sleepycat/je/logversion/je-3.1.25.txt
new file mode 100644
index 0000000000000000000000000000000000000000..277ee8f1a2741bec6d1936e9596a5bb7750c7aab
--- /dev/null
+++ b/test/com/sleepycat/je/logversion/je-3.1.25.txt
@@ -0,0 +1,131 @@
+FileHeader/0
+Trace/0
+Root/1
+BIN/2
+IN/2
+NameLN_TX/0
+BIN/2
+IN/2
+MapLN/2
+Commit/0
+CkptStart/0
+BIN/2
+IN/2
+Root/1
+BIN/2
+IN/2
+Root/1
+BIN/2
+IN/2
+FileSummaryLN/2
+Trace/0
+CkptEnd/0
+Trace/0
+NameLN_TX/0
+MapLN/2
+Commit/0
+BIN/2
+IN/2
+LN_TX/0
+LN_TX/0
+LN_TX/0
+LN_TX/0
+BIN/2
+BIN/2
+IN/2
+DupCountLN/0
+DBIN/2
+DIN/2
+DupCountLN_TX/0
+LN_TX/0
+CkptStart/0
+DupBINDelta/0
+DIN/2
+BINDelta/0
+BIN/2
+BINDelta/0
+IN/2
+MapLN/2
+IN/2
+MapLN/2
+BIN/2
+FileSummaryLN/2
+Trace/0
+CkptEnd/0
+DelDupLN_TX/0
+DupCountLN_TX/0
+DelDupLN_TX/0
+DupCountLN_TX/0
+LN_TX/0
+LN_TX/0
+Abort/0
+NameLN/0
+MapLN/2
+BIN/2
+IN/2
+LN/0
+LN/0
+LN/0
+LN/0
+BIN/2
+BIN/2
+IN/2
+DupCountLN/0
+DBIN/2
+DIN/2
+DupCountLN/0
+LN/0
+CkptStart/0
+DupBINDelta/0
+DBIN/2
+DIN/2
+DIN/2
+BINDelta/0
+BIN/2
+BIN/2
+BINDelta/0
+BINDelta/0
+IN/2
+MapLN/2
+IN/2
+MapLN/2
+BINDelta/0
+IN/2
+Root/1
+FileSummaryLN/2
+Trace/0
+CkptEnd/0
+DelDupLN/0
+DupCountLN/0
+DelDupLN/0
+DupCountLN/0
+LN/0
+LN/0
+INDupDelete/0
+IN/2
+MapLN/2
+INDupDelete/0
+FileSummaryLN/2
+INDelete/0
+MapLN/2
+IN/2
+MapLN/2
+BIN/2
+BIN/2
+IN/2
+FileSummaryLN/2
+Prepare/0
+CkptStart/0
+BIN/2
+BIN/2
+BIN/2
+IN/2
+MapLN/2
+IN/2
+Root/1
+IN/2
+MapLN/2
+BIN/2
+FileSummaryLN/2
+Trace/0
+CkptEnd/0
diff --git a/test/com/sleepycat/je/logversion/je-3.2.22.jdb b/test/com/sleepycat/je/logversion/je-3.2.22.jdb
new file mode 100644
index 0000000000000000000000000000000000000000..f0d9aa63e72ebbf8af555fb5e366327e7d348c2e
Binary files /dev/null and b/test/com/sleepycat/je/logversion/je-3.2.22.jdb differ
diff --git a/test/com/sleepycat/je/logversion/je-3.2.22.txt b/test/com/sleepycat/je/logversion/je-3.2.22.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7c31e284a4f5684bf3de15692bb8307a0234db4c
--- /dev/null
+++ b/test/com/sleepycat/je/logversion/je-3.2.22.txt
@@ -0,0 +1,133 @@
+FileHeader/0
+Trace/0
+Root/1
+BIN/2
+IN/2
+NameLN_TX/0
+BIN/2
+IN/2
+MapLN/2
+Commit/0
+CkptStart/0
+BIN/2
+IN/2
+Root/1
+BIN/2
+IN/2
+Root/1
+BIN/2
+IN/2
+FileSummaryLN/3
+Trace/0
+CkptEnd/0
+Trace/0
+NameLN_TX/0
+MapLN/2
+Commit/0
+BIN/2
+IN/2
+LN_TX/0
+LN_TX/0
+LN_TX/0
+LN_TX/0
+BIN/2
+BIN/2
+IN/2
+DupCountLN/0
+DBIN/2
+DIN/2
+DupCountLN_TX/0
+LN_TX/0
+CkptStart/0
+DupBINDelta/0
+DIN/2
+BINDelta/0
+BIN/2
+BINDelta/0
+IN/2
+MapLN/2
+IN/2
+MapLN/2
+BIN/2
+FileSummaryLN/3
+Trace/0
+CkptEnd/0
+DelDupLN_TX/0
+DupCountLN_TX/0
+DelDupLN_TX/0
+DupCountLN_TX/0
+LN_TX/0
+LN_TX/0
+Abort/0
+NameLN/0
+MapLN/2
+BIN/2
+IN/2
+LN/0
+LN/0
+LN/0
+LN/0
+BIN/2
+BIN/2
+IN/2
+DupCountLN/0
+DBIN/2
+DIN/2
+DupCountLN/0
+LN/0
+CkptStart/0
+DupBINDelta/0
+DBIN/2
+DIN/2
+DIN/2
+BIN/2
+BINDelta/0
+BIN/2
+BINDelta/0
+BINDelta/0
+IN/2
+MapLN/2
+IN/2
+MapLN/2
+BINDelta/0
+IN/2
+Root/1
+FileSummaryLN/3
+Trace/0
+CkptEnd/0
+DelDupLN/0
+DupCountLN/0
+DelDupLN/0
+DupCountLN/0
+LN/0
+LN/0
+INDupDelete/0
+IN/2
+MapLN/2
+INDupDelete/0
+FileSummaryLN/3
+INDelete/0
+MapLN/2
+IN/2
+MapLN/2
+BIN/2
+BIN/2
+IN/2
+FileSummaryLN/3
+LN_TX/0
+Prepare/0
+Abort/0
+CkptStart/0
+BIN/2
+BIN/2
+BIN/2
+IN/2
+MapLN/2
+IN/2
+Root/1
+IN/2
+MapLN/2
+BIN/2
+FileSummaryLN/3
+Trace/0
+CkptEnd/0
diff --git a/test/com/sleepycat/je/logversion/je-3.2.79.jdb b/test/com/sleepycat/je/logversion/je-3.2.79.jdb
new file mode 100644
index 0000000000000000000000000000000000000000..d112eb736e80783f8c7c49242aa4d53882eb1828
Binary files /dev/null and b/test/com/sleepycat/je/logversion/je-3.2.79.jdb differ
diff --git a/test/com/sleepycat/je/logversion/je-3.2.79.txt b/test/com/sleepycat/je/logversion/je-3.2.79.txt
new file mode 100644
index 0000000000000000000000000000000000000000..3173303ad15c1f9776ce6ba498dc624dbbf914cf
--- /dev/null
+++ b/test/com/sleepycat/je/logversion/je-3.2.79.txt
@@ -0,0 +1,143 @@
+FileHeader/0
+Trace/0
+Root/1
+BIN/2
+IN/2
+NameLN_TX/0
+BIN/2
+IN/2
+MapLN/2
+Commit/0
+CkptStart/0
+BIN/2
+IN/2
+Root/1
+BIN/2
+IN/2
+Root/1
+BIN/2
+IN/2
+FileSummaryLN/3
+Trace/0
+CkptEnd/0
+Trace/0
+NameLN_TX/0
+MapLN/2
+Commit/0
+BIN/2
+IN/2
+LN_TX/0
+LN_TX/0
+LN_TX/0
+LN_TX/0
+BIN/2
+BIN/2
+IN/2
+DupCountLN/0
+DBIN/2
+DIN/2
+DupCountLN_TX/0
+LN_TX/0
+CkptStart/0
+DupBINDelta/0
+DIN/2
+BINDelta/0
+BIN/2
+BINDelta/0
+IN/2
+MapLN/2
+IN/2
+MapLN/2
+BIN/2
+FileSummaryLN/3
+Trace/0
+CkptEnd/0
+DelDupLN_TX/0
+DupCountLN_TX/0
+DelDupLN_TX/0
+DupCountLN_TX/0
+LN_TX/0
+LN_TX/0
+Abort/0
+NameLN/0
+MapLN/2
+BIN/2
+IN/2
+LN/0
+LN/0
+LN/0
+LN/0
+BIN/2
+BIN/2
+IN/2
+DupCountLN/0
+DBIN/2
+DIN/2
+DupCountLN/0
+LN/0
+CkptStart/0
+DupBINDelta/0
+DBIN/2
+DIN/2
+DIN/2
+BIN/2
+BINDelta/0
+BIN/2
+BINDelta/0
+BINDelta/0
+IN/2
+MapLN/2
+IN/2
+MapLN/2
+BINDelta/0
+IN/2
+Root/1
+FileSummaryLN/3
+Trace/0
+CkptEnd/0
+DelDupLN/0
+DupCountLN/0
+DelDupLN/0
+DupCountLN/0
+LN/0
+LN/0
+INDupDelete/0
+IN/2
+MapLN/2
+INDupDelete/0
+FileSummaryLN/3
+INDelete/0
+MapLN/2
+IN/2
+MapLN/2
+BIN/2
+BIN/2
+IN/2
+FileSummaryLN/3
+NameLN_TX/0
+MapLN/2
+Commit/0
+BIN/2
+IN/2
+LN_TX/0
+Commit/0
+LN_TX/0
+Prepare/0
+Abort/0
+CkptStart/0
+BIN/2
+BIN/2
+BIN/2
+BIN/2
+IN/2
+MapLN/2
+IN/2
+MapLN/2
+IN/2
+Root/1
+IN/2
+MapLN/2
+BIN/2
+FileSummaryLN/3
+Trace/0
+CkptEnd/0
diff --git a/test/com/sleepycat/je/logversion/maxversion.jdb b/test/com/sleepycat/je/logversion/maxversion.jdb
new file mode 100644
index 0000000000000000000000000000000000000000..84a2cb559237395fe858d9534c62a462e592f2af
Binary files /dev/null and b/test/com/sleepycat/je/logversion/maxversion.jdb differ
diff --git a/test/com/sleepycat/je/logversion/minversion.jdb b/test/com/sleepycat/je/logversion/minversion.jdb
new file mode 100644
index 0000000000000000000000000000000000000000..927188131582d8258b2c531018a63e902c28551f
Binary files /dev/null and b/test/com/sleepycat/je/logversion/minversion.jdb differ
diff --git a/test/com/sleepycat/je/recovery/CheckBINDeltaTest.java b/test/com/sleepycat/je/recovery/CheckBINDeltaTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..086d872d7c8f6cb5fddd3ea1ea7f9364f54cbfc3
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/CheckBINDeltaTest.java
@@ -0,0 +1,164 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2004,2008 Oracle.  All rights reserved.
+ *
+ * $Id: CheckBINDeltaTest.java,v 1.18 2008/03/18 01:17:45 cwl Exp $
+ */
+package com.sleepycat.je.recovery;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.tree.Tree;
+import com.sleepycat.je.util.TestUtils;
+
+public class CheckBINDeltaTest extends CheckBase {
+
+    private static final String DB_NAME = "simpleDB";
+    private static final boolean DEBUG = false;
+
+    /**
+     * SR #11123
+     * Make sure that BINDeltas are applied only to non-deleted nodes.
+     */
+    public void testBINDelta()
+        throws Throwable {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        turnOffEnvDaemons(envConfig);
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(),
+                                 "4");
+        envConfig.setConfigParam(EnvironmentParams.BIN_DELTA_PERCENT.getName(),
+                                 "75");
+        envConfig.setAllowCreate(true);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+
+        EnvironmentConfig restartConfig = TestUtils.initEnvConfig();
+        turnOffEnvDaemons(restartConfig);
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(),
+                                 "4");
+
+        testOneCase(DB_NAME,
+                    envConfig,
+                    dbConfig,
+                    new TestGenerator(){
+                        void generateData(Database db)
+                            throws DatabaseException {
+                            addData(db);
+                        }
+                    },
+                    restartConfig,
+                    new DatabaseConfig());
+    }
+
+    /**
+     * This test checks for the bug described in SR11123.  If an IN and its
+     * child-subtree is deleted, an INDeleteInfo is written to the
+     * log.  If there is a BINDelta in the log for a BIN-child of the
+     * removed subtree (i.e. compressed), then recovery will apply it to the
+     * compressed IN.  Since the IN has no data in * it, that is not
+     * necessarily a problem.  However, reinstantiating the obsolete IN
+     * may cause a parent IN to split which is not allowed during IN
+     * recovery.
+     *
+     * Here's the case:
+     *
+     *           |
+     *          IN1
+     *      +---------------------------------+
+     *      |                                 |
+     *     IN2                               IN6
+     *   /   |                            /    |     \
+     * BIN3 BIN4                      BIN7   BIN8   BIN9
+     *
+     * IN2 and the subtree below are compressed away. During recovery
+     * replay, after the pass where INs and INDeleteINfos are
+     * processed, the in-memory tree looks like this:
+     *
+     *                         IN1
+     *                          |
+     *                         IN6
+     *                     /    |     \
+     *                  BIN7   BIN8   BIN9
+     *
+     * However, let's assume that BINDeltas were written for
+     * BIN3, BIN4, BIN5 within the recovery part of the log, before the
+     * subtree was compressed.  We'll replay those BINDeltas in the
+     * following pass, and in the faulty implementation, they cause
+     * the ghosts of BIN3, BIN4 to be resurrected and applied to
+     * IN6. Let's assume that the max node size is 4 -- we won't be
+     * able to connect BIN3, BIN4 because IN6 doesn't have the
+     * capacity, and we don't expect to have to do splits.
+     */
+    private void addData(Database db)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        /* Populate a tree so there are 3 levels. */
+        for (int i = 0; i < 140; i += 10) {
+            IntegerBinding.intToEntry(i, key);
+            IntegerBinding.intToEntry(i, data);
+            assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+        }
+
+        CheckpointConfig ckptConfig = new CheckpointConfig();
+        ckptConfig.setForce(true);
+        env.checkpoint(ckptConfig);
+
+        Tree tree = DbInternal.dbGetDatabaseImpl(db).getTree();
+        com.sleepycat.je.tree.Key.DUMP_INT_BINDING = true;
+	com.sleepycat.je.tree.Key.DUMP_TYPE =
+	    com.sleepycat.je.tree.Key.DumpType.BINARY;
+        if (DEBUG) {
+            tree.dump();
+        }
+
+        /*
+         * Update a key on the BIN3 and a key on BIN4, to create reason for
+         * a BINDelta. Force a BINDelta for BIN3 and BIN4 out to the log.
+         */
+        IntegerBinding.intToEntry(0, key);
+        IntegerBinding.intToEntry(100, data);
+        assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+        IntegerBinding.intToEntry(20, key);
+        assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+        BIN bin = (BIN)tree.getFirstNode(CacheMode.DEFAULT);
+        bin.log(envImpl.getLogManager(), true, false, false, false, null);
+        bin = tree.getNextBin(bin, false /* traverseWithinDupTree */,
+                              CacheMode.DEFAULT);
+        bin.log(envImpl.getLogManager(), true, false, false, false, null);
+        bin.releaseLatch();
+
+        /*
+         * Delete all of left hand side of the tree, so that the subtree root
+         * headed by IN2 is compressed.
+         */
+        for (int i = 0; i < 50; i+=10) {
+            IntegerBinding.intToEntry(i, key);
+            assertEquals(OperationStatus.SUCCESS, db.delete(null, key));
+        }
+
+        /* force a compression */
+        env.compress();
+        if (DEBUG) {
+            tree.dump();
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/recovery/CheckBase.java b/test/com/sleepycat/je/recovery/CheckBase.java
new file mode 100644
index 0000000000000000000000000000000000000000..23a378235f367311c4b61873025ae19a9a9e1bfc
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/CheckBase.java
@@ -0,0 +1,516 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2004,2008 Oracle.  All rights reserved.
+ *
+ * $Id: CheckBase.java,v 1.24 2008/06/30 20:54:48 linda Exp $
+ */
+
+package com.sleepycat.je.recovery;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.logging.Level;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.VerifyConfig;
+import com.sleepycat.je.cleaner.VerifyUtils;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.recovery.stepwise.EntryTrackerReader;
+import com.sleepycat.je.recovery.stepwise.LogEntryInfo;
+import com.sleepycat.je.recovery.stepwise.TestData;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.CmdUtil;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.Tracer;
+
+public class CheckBase extends TestCase {
+
+    private static final boolean DEBUG = false;
+    private HashSet<TestData> expected;
+    private Set<TestData> found;
+
+    File envHome;
+    Environment env;
+
+    private List<LogEntryInfo> logDescription;
+    private long stepwiseStartLsn;
+
+    private boolean checkLsns = true;
+
+    public CheckBase() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+        TestUtils.removeFiles("Setup", envHome, ".jdb_save");
+    }
+
+    public void tearDown() {
+        if (env != null) {
+            try {
+                env.close();
+                env = null;
+            } catch (Exception ignore) {
+            }
+        }
+
+        /*
+        try {
+            TestUtils.removeLogFiles("TearDown", envHome, false);
+            TestUtils.removeFiles("TearDown", envHome, ".jdb_save");
+        } catch (Exception ignore) {
+        }
+        //*/
+    }
+
+    /**
+     * Create an environment, generate data, record the expected values.
+     * Then close the environment and recover, and check that the expected
+     * values are there.
+     */
+    protected void testOneCase(String dbName,
+                               EnvironmentConfig startEnvConfig,
+                               DatabaseConfig startDbConfig,
+                               TestGenerator testGen,
+                               EnvironmentConfig validateEnvConfig,
+                               DatabaseConfig validateDbConfig)
+        throws Throwable {
+
+        try {
+            /* Create an environment. */
+            env = new Environment(envHome, startEnvConfig);
+            Database db = env.openDatabase(null, dbName, startDbConfig);
+
+            /* Generate test data. */
+            testGen.generateData(db);
+
+            /* Scan the database to save what values we should have. */
+            loadExpectedData(db);
+
+            /* Check for overlap between the tree and utilization profile. */
+	    if (checkLsns) {
+		VerifyUtils.checkLsns(db);
+	    }
+
+            /* Close w/out checkpointing. */
+            db.close();
+            DbInternal.envGetEnvironmentImpl(env).close(false);
+            env = null;
+
+            if (testGen.generateLogDescription) {
+                makeLogDescription();
+            }
+
+            tryRecovery(validateEnvConfig,
+                        validateDbConfig,
+                        dbName,
+                        expected);
+        } catch (Throwable t) {
+            /* Dump stack trace before trying to tear down. */
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /* Run recovery and validation twice. */
+    private void tryRecovery(EnvironmentConfig validateEnvConfig,
+                             DatabaseConfig validateDbConfig,
+                             String dbName,
+                             HashSet<TestData> useExpected)
+        throws DatabaseException {
+        /* Recover and load what's in the database post-recovery. */
+        recoverAndLoadData(validateEnvConfig,
+                           validateDbConfig,
+                           dbName);
+
+        /* Check the pre and post recovery data. */
+        if (useExpected == null) {
+            useExpected = expected;
+        }
+        validate(useExpected);
+
+        /* Repeat the recovery and validation. */
+        recoverAndLoadData(validateEnvConfig,
+                           validateDbConfig,
+                           dbName);
+
+        validate(useExpected);
+    }
+
+    void setCheckLsns(boolean checkLsns) {
+	this.checkLsns = checkLsns;
+    }
+
+    /**
+     * Call this method to set the start of the stepwise loop. The stepwise
+     * testing will begin at this point in the log.
+     */
+    void setStepwiseStart() {
+
+        /*
+         * Put a tracing message both for debugging assistance, and also
+         * to force the truncation to start at this log entry, since we're
+         * getting the last used LSN.
+         */
+        Tracer.trace(Level.SEVERE, DbInternal.envGetEnvironmentImpl(env),
+                     "StepwiseStart");
+        FileManager fileManager =
+            DbInternal.envGetEnvironmentImpl(env).getFileManager();
+        stepwiseStartLsn = fileManager.getLastUsedLsn();
+    }
+
+    void stepwiseLoop(String dbName,
+                      EnvironmentConfig validateEnvConfig,
+                      DatabaseConfig validateDbConfig,
+                      HashSet<TestData> useExpected,
+                      int startingIteration)
+        throws DatabaseException, IOException {
+
+        assertTrue(logDescription.size() > 0);
+        saveLogFiles(envHome);
+
+        /* txnId -> LogEntryInfo */
+        Map<Long, Set<TestData>> newUncommittedRecords = new HashMap<Long, Set<TestData>>();
+        Map<Long, Set<TestData>> deletedUncommittedRecords = new HashMap<Long, Set<TestData>>();
+
+        /* Now run recovery repeatedly, truncating at different locations. */
+        String status = null;
+        try {
+
+            /*
+             * Some tests are not working with starting at 0. As a workaround,
+             * start at another iteration.
+             */
+            for (int i = startingIteration; i < logDescription.size(); i++ ) {
+
+                /* Find out where to truncate. */
+                LogEntryInfo info = logDescription.get(i);
+                long lsn = info.getLsn();
+
+                if (lsn == 0) {
+                    continue;
+                }
+
+                status = "Iteration " + i + " out of " +
+                    logDescription.size() + " truncate at 0x" +
+                    DbLsn.getNoFormatString(lsn);
+
+                if (DEBUG) {
+                    System.out.println(status);
+                }
+
+                /* copy files back. */
+                resetLogFiles(envHome);
+
+                /* truncate */
+                truncateAtOffset(envHome, lsn);
+
+                /* recover */
+                tryRecovery(validateEnvConfig, validateDbConfig,
+                            dbName, useExpected);
+
+                /* Adjust the set of expected records for the next iteration.*/
+                info.updateExpectedSet(useExpected, newUncommittedRecords,
+                                       deletedUncommittedRecords);
+            }
+        } catch (Error e) {
+            System.err.println("Failure at step: " + status);
+            throw e;
+        }
+    }
+
+    protected void turnOffEnvDaemons(EnvironmentConfig envConfig) {
+        envConfig.setConfigParam(EnvironmentParams.ENV_RUN_CLEANER.getName(),
+                                 "false");
+        envConfig.setConfigParam(EnvironmentParams.
+                                 ENV_RUN_CHECKPOINTER.getName(),
+                                 "false");
+        envConfig.setConfigParam(EnvironmentParams.ENV_RUN_EVICTOR.getName(),
+                                 "false");
+        envConfig.setConfigParam(EnvironmentParams.
+                                 ENV_RUN_INCOMPRESSOR.getName(),
+                                 "false");
+    }
+
+    /**
+     * Re-open the environment and load all data present, to compare to the
+     * data set of expected values.
+     */
+    protected void recoverAndLoadData(EnvironmentConfig envConfig,
+                                      DatabaseConfig dbConfig,
+                                      String dbName)
+        throws DatabaseException {
+
+        env = new Environment(envHome, envConfig);
+        Database db = env.openDatabase(null, dbName, dbConfig);
+
+        /* Check for overlap between the tree and utilization profile. */
+	if (checkLsns) {
+	    VerifyUtils.checkLsns(db);
+	}
+
+        found = new HashSet<TestData>();
+
+        Cursor cursor = db.openCursor(null, null);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        try {
+            while (cursor.getNext(key, data, null) ==
+                   OperationStatus.SUCCESS) {
+                TestData t = new TestData(key, data);
+                if (DEBUG) {
+                    System.out.println("found k=" +
+                                       IntegerBinding.entryToInt(key) +
+                                       " d=" +
+                                       IntegerBinding.entryToInt(data));
+                }
+                found.add(t);
+            }
+        }
+        finally {
+            cursor.close();
+        }
+
+        db.close();
+
+        assertTrue(env.verify(new VerifyConfig(), System.out));
+        env.close();
+    }
+
+    /*
+     * The found and expected data sets need to match exactly after recovery.
+     */
+    @SuppressWarnings("unchecked") // clone() returns Object
+	void validate(HashSet<TestData> expected)
+        throws DatabaseException {
+
+        Set<TestData> useExpected = (Set<TestData>) expected.clone();
+
+        if (useExpected.size() != found.size()) {
+            System.err.println("expected---------");
+            dumpHashSet(useExpected);
+            System.err.println("actual---------");
+            dumpHashSet(found);
+            assertEquals("expected and found set sizes don't match" ,
+                         useExpected.size(), found.size());
+        }
+
+        Iterator<TestData> foundIter = found.iterator();
+        while (foundIter.hasNext()) {
+            TestData t = foundIter.next();
+            assertTrue("Missing " + t + "from the expected set",
+                       useExpected.remove(t));
+        }
+
+        assertEquals("Expected has " + useExpected.size() + " items remaining",
+                     0, useExpected.size());
+    }
+
+    protected void putTestData(Database db,
+                             DatabaseEntry key,
+                             DatabaseEntry data)
+        throws DatabaseException {
+
+        assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+    }
+
+    private void loadExpectedData(Database db)
+        throws DatabaseException {
+
+        expected = new HashSet<TestData>();
+
+        Cursor cursor = db.openCursor(null, null);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        try {
+            while (cursor.getNext(key, data, null) ==
+                   OperationStatus.SUCCESS) {
+                if (DEBUG) {
+                    System.out.println("expect k=" +
+                                       IntegerBinding.entryToInt(key) +
+                                       " d=" +
+                                       IntegerBinding.entryToInt(data));
+                }
+                TestData t = new TestData(key, data);
+                expected.add(t);
+            }
+        }
+        finally {
+            cursor.close();
+        }
+    }
+
+    void dumpData(Database db)
+        throws DatabaseException {
+
+        Cursor cursor = db.openCursor(null, null);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        int i = 0;
+        try {
+            while (cursor.getNext(key, data, null) ==
+                   OperationStatus.SUCCESS) {
+                TestData t = new TestData(key, data);
+                System.out.println(t);
+                i++;
+            }
+        }
+        finally {
+            cursor.close();
+        }
+        System.out.println("scanned=" + i);
+    }
+
+    private void dumpHashSet(Set<TestData> expected) {
+        Iterator<TestData> iter = expected.iterator();
+        System.err.println("size=" + expected.size());
+        while (iter.hasNext()) {
+            System.err.println(iter.next());
+        }
+    }
+
+    private void makeLogDescription()
+        throws DatabaseException {
+
+        EnvironmentImpl cmdEnvImpl =
+            CmdUtil.makeUtilityEnvironment(envHome, false);
+        logDescription = new ArrayList<LogEntryInfo>();
+
+        try {
+            EntryTrackerReader reader =
+                new EntryTrackerReader(cmdEnvImpl,
+                                       stepwiseStartLsn,
+                                       logDescription);
+            while (reader.readNextEntry()) {
+            }
+        } catch (IOException e) {
+            throw new DatabaseException(e);
+        } finally {
+            cmdEnvImpl.close();
+        }
+
+        if (DEBUG) {
+            Iterator<LogEntryInfo> iter = logDescription.iterator();
+            while (iter.hasNext()) {
+        	Object o = iter.next();
+        	LogEntryInfo entryInfo =(LogEntryInfo) o;
+                System.out.println(entryInfo);
+            }
+        }
+    }
+
+    /**
+     * Truncate the log at the specified offset.
+     */
+    private void truncateAtOffset(File envHome, long lsn)
+        throws DatabaseException, IOException {
+
+        EnvironmentImpl cmdEnvImpl =
+            CmdUtil.makeUtilityEnvironment(envHome, false);
+
+        cmdEnvImpl.getFileManager().truncateLog(DbLsn.getFileNumber(lsn),
+                                                DbLsn.getFileOffset(lsn));
+
+        cmdEnvImpl.close();
+    }
+
+    /* Copy all .jdb files to .jdb_save for stepwise processing. */
+    private void saveLogFiles(File envHome)
+        throws IOException {
+
+        String[] suffix = new String[] {".jdb"};
+        String[] fileNames = FileManager.listFiles(envHome, suffix);
+
+        for (int i = 0; i < fileNames.length; i++) {
+            File src = new File(envHome, fileNames[i]);
+            File dst = new File(envHome, fileNames[i]+ "_save");
+            copy(src, dst);
+        }
+    }
+
+    /* Copy all .jdb_save file back to ._jdb */
+    private void resetLogFiles(File envHome)
+        throws IOException {
+        String[] suffix = new String[] {".jdb_save"};
+        String[] fileNames = FileManager.listFiles(envHome, suffix);
+
+        for (int i = 0; i < fileNames.length; i++) {
+            String srcName = fileNames[i];
+            int end = srcName.indexOf("_save");
+            String dstName = srcName.substring(0, end);
+            copy(new File(envHome, srcName), new File(envHome, dstName));
+        }
+    }
+
+    private void copy(File src, File dst)
+        throws IOException {
+
+        InputStream in = new FileInputStream(src);
+        OutputStream out = new FileOutputStream(dst);
+
+        // Transfer bytes from in to out
+        byte[] buf = new byte[1024];
+        int len;
+        while ((len = in.read(buf)) > 0) {
+            out.write(buf, 0, len);
+        }
+        in.close();
+        out.close();
+    }
+
+    /*
+     * Each unit test overrides the generateData method. Don't make this
+     * abstract, because we may want different unit tests to call different
+     * flavors of generateData(), and we want a default implementation for each
+     * flavor.
+     *
+     * A unit test may also specify an implementation for truncateLog. When
+     * that happens, the truncation is done before the first recovery.
+     */
+    protected class TestGenerator {
+
+        /* If true, generate a log description to use in stepwise testing. */
+        boolean generateLogDescription;
+
+        public TestGenerator() {
+        }
+
+        public TestGenerator(boolean generateLogDescription) {
+            this.generateLogDescription = generateLogDescription;
+        }
+
+        void generateData(Database db)
+            throws Exception {
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/recovery/CheckDupMaxFlushLevelTest.java b/test/com/sleepycat/je/recovery/CheckDupMaxFlushLevelTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..4f3e9fc569baed1fae190f488e9cf0f85e628878
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/CheckDupMaxFlushLevelTest.java
@@ -0,0 +1,224 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2004,2008 Oracle.  All rights reserved.
+ *
+ * $Id: CheckDupMaxFlushLevelTest.java,v 1.1.2.1 2008/12/13 18:24:04 mark Exp $
+ */
+package com.sleepycat.je.recovery;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.tree.DIN;
+import com.sleepycat.je.tree.Node;
+import com.sleepycat.je.tree.Tree.SearchType;
+import com.sleepycat.je.util.TestUtils;
+
+public class CheckDupMaxFlushLevelTest extends CheckBase {
+
+    private static final String DB_NAME = "foo";
+
+    /**
+     * Tests a fix for a bug in the way that the maxFlushLevel is used to
+     * determine when to log a DIN root as provisional.  [#16712]
+     *
+     * Imagine this Btree.
+     *
+     *        IN-A
+     *         |
+     *       BIN-B
+     *      /     \
+     *    DIN-C   DIN-E
+     *      |       |
+     *   DBIN-D   DIN-F
+     *              |
+     *           DBIN-G
+     *
+     * When the checkpoint starts, DIN-C and DIN-E are the highest nodes that
+     * are dirty.  So the max flush level is .... 3!
+     *
+     * DIN-C is level 2 and DIN-E is level 3.  They are at the same height
+     * relative to the top, but not to the bottom.
+     *
+     * When we flush DIN-E, we make it non-provisional because its level (3) is
+     * equal to the max flush level (3).
+     *
+     * But when we flush DIN-C, we make it provisional
+     * (Provisional.BEFORE_CKPT_END to be exact, but provisional in effect)
+     * because its level (2) is less than the max flush level (3).
+     *
+     * When we recover, we don't replay DIN-C because it is provisional.  So
+     * any references it contains (or its children contain) that were necessary
+     * for log cleaning, are lost.  If we deleted a log file based on those
+     * lost references, we'll get LogFileNotFound.
+     *
+     * The solution is to log DIN-C non-provisionally, even though its level is
+     * less than the max flush level.  It must be logged non-provisionally
+     * when the parent's level is greater than the max flush level. 
+     */
+    public void testDupMaxFlushLevel()
+        throws Throwable {
+
+        final EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        turnOffEnvDaemons(envConfig);
+        envConfig.setAllowCreate(true);
+        turnOffEnvDaemons(envConfig);
+
+        final DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+	dbConfig.setSortedDuplicates(true);
+	dbConfig.setNodeMaxDupTreeEntries(4);
+
+	setCheckLsns(false);
+
+        testOneCase(DB_NAME,
+                    envConfig,
+                    dbConfig,
+                    new TestGenerator(){
+                        void generateData(Database db)
+                            throws DatabaseException {
+
+                            createMultiLevelDupTree(db);
+                        }
+                    },
+                    envConfig,
+                    dbConfig);
+    }
+
+    private void insert(Database db, int key, int data)
+	throws DatabaseException {
+
+	final DatabaseEntry keyEntry = new DatabaseEntry();
+        IntegerBinding.intToEntry(key, keyEntry);
+	final DatabaseEntry dataEntry = new DatabaseEntry();
+        IntegerBinding.intToEntry(data, dataEntry);
+	assertSame(OperationStatus.SUCCESS,
+                   db.putNoDupData(null, keyEntry, dataEntry));
+    }
+
+    private void remove(Database db, int key, int data)
+	throws DatabaseException {
+
+	final DatabaseEntry keyEntry = new DatabaseEntry();
+        IntegerBinding.intToEntry(key, keyEntry);
+	final DatabaseEntry dataEntry = new DatabaseEntry();
+        IntegerBinding.intToEntry(data, dataEntry);
+	final Cursor c = db.openCursor(null, null);
+        try {
+            assertSame(OperationStatus.SUCCESS,
+                       c.getSearchBoth(keyEntry, dataEntry, null));
+            assertSame(OperationStatus.SUCCESS, c.delete());
+        } finally {
+            c.close();
+        }
+    }
+
+    private void createMultiLevelDupTree(Database db)
+        throws DatabaseException {
+
+	final DatabaseImpl dbImpl = DbInternal.dbGetDatabaseImpl(db);
+
+        /* Create a 3 level dup tree for key 1. */
+        for (int data = 1; getDupTreeDepth(dbImpl, 1) != 3; data += 1) {
+            insert(db, 1, data);
+        }
+
+        /* Create a 2 level dup tree for key 2. */
+        for (int data = 1; getDupTreeDepth(dbImpl, 2) != 2; data += 1) {
+            insert(db, 2, data);
+        }
+
+        /* Flush all the way to the root. */
+        final CheckpointConfig ckptConfig = new CheckpointConfig();
+        ckptConfig.setForce(true);
+        ckptConfig.setMinimizeRecoveryTime(true);
+        env.checkpoint(ckptConfig);
+
+        /*
+         * Remove one entry for key 2, for which the DIN will be flushed
+         * provisionally (incorrectly) when the bug occurs.
+         */
+        remove(db, 2, 1);
+
+        /* Make both DIN roots dirty. */
+        setDINRootDirty(dbImpl, 1);
+        setDINRootDirty(dbImpl, 2);
+
+        /*
+         * Perform a normal checkpoint which should flush only up to the DIN
+         * roots.  The bug causes the DIN root for key 2 to be incorrectly
+         * logged as provisional.  During recovery, the removal of record (2,1)
+         * will be lost.
+         */
+        ckptConfig.setForce(true);
+        ckptConfig.setMinimizeRecoveryTime(false);
+        env.checkpoint(ckptConfig);
+    }
+
+    private int getDupTreeDepth(DatabaseImpl dbImpl, int key)
+        throws DatabaseException {
+
+        final DIN din = getLatchedDINRoot(dbImpl, key);
+        if (din == null) {
+            return 0;
+        }
+        try {
+            return din.getLevel();
+        } finally {
+            din.releaseLatch();
+        }
+    }
+
+    private void setDINRootDirty(DatabaseImpl dbImpl, int key)
+        throws DatabaseException {
+
+        final DIN din = getLatchedDINRoot(dbImpl, key);
+        assertNotNull(din);
+        try {
+            din.setDirty(true);
+        } finally {
+            din.releaseLatch();
+        }
+    }
+
+    private DIN getLatchedDINRoot(DatabaseImpl dbImpl, int key)
+        throws DatabaseException {
+
+	final DatabaseEntry keyEntry = new DatabaseEntry();
+        IntegerBinding.intToEntry(key, keyEntry);
+        final byte[] keyBytes = keyEntry.getData();
+
+	final BIN bin = (BIN) dbImpl.getTree().search
+            (keyBytes, SearchType.NORMAL, -1, null, CacheMode.DEFAULT);
+        if (bin == null) {
+            return null;
+        }
+        try {
+            final int idx = bin.findEntry(keyBytes, false, true);
+            if (idx < 0) {
+                return null;
+            }
+            final Node child = bin.fetchTarget(idx);
+            if (!(child instanceof DIN)) {
+                return null;
+            }
+            final DIN din = (DIN) child;
+            assertNotNull(din);
+            din.latch();
+            return din;
+        } finally {
+            bin.releaseLatch();
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/recovery/CheckNewRootTest.java b/test/com/sleepycat/je/recovery/CheckNewRootTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..7c6298e0b286089ddf2897abbe0d0a7dd9817523
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/CheckNewRootTest.java
@@ -0,0 +1,416 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2004,2008 Oracle.  All rights reserved.
+ *
+ * $Id: CheckNewRootTest.java,v 1.17 2008/06/30 20:54:48 linda Exp $
+ */
+package com.sleepycat.je.recovery;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.logging.Level;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.ReplicationContext;
+import com.sleepycat.je.log.entry.SingleItemEntry;
+import com.sleepycat.je.recovery.stepwise.TestData;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.TestHook;
+import com.sleepycat.je.utilint.Tracer;
+
+/**
+ * Test situations where a new root is created
+ */
+public class CheckNewRootTest extends CheckBase {
+
+    private static final boolean DEBUG = false;
+    private static final String DB_NAME = "simpleDB";
+
+    private boolean useDups = false;
+    private static CheckpointConfig FORCE_CONFIG = new CheckpointConfig();
+    static {
+        FORCE_CONFIG.setForce(true);
+    }
+
+    /**
+     * Create a tree, make sure the root changes and is logged
+     * before any checkpointing. The bug found in [#13897] was this:
+     *
+     * 100 BIN a
+     * 110 RootIN b
+     * 120 MapLN points to root IN at 110
+     * 130 RootIN b written as part of compression
+     * 140 ckpt start
+     * 150 ckpt end
+     *
+     * Since the compression was writing a root IN w/out updating the mapLN,
+     * the obsolete root at 110 was recovered instead of newer rootIN at 130.
+     */
+    public void testWrittenByCompression()
+        throws Throwable {
+
+        EnvironmentConfig envConfig = setupEnvConfig();
+        DatabaseConfig dbConfig = setupDbConfig();
+
+        /* Run the full test case w/out truncating the log. */
+        testOneCase(DB_NAME, envConfig, dbConfig,
+                    new TestGenerator(true /* generate log description. */){
+                        void generateData(Database db)
+                            throws DatabaseException {
+                            setupWrittenByCompression(db);
+                        }
+                    },
+                    envConfig, dbConfig);
+
+
+        /*
+         * Now run the test in a stepwise loop, truncate after each log entry.
+         * Our baseline expected set is empty -- no records expected.
+         */
+        HashSet<TestData> currentExpected = new HashSet<TestData>();
+        stepwiseLoop(DB_NAME, envConfig, dbConfig, currentExpected, 0);
+    }
+
+    /**
+     * Create a populated tree, delete all records, then begin to insert again.
+     */
+    private void setupWrittenByCompression(Database db)
+        throws DatabaseException {
+        setStepwiseStart();
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        /* Populate a tree so it grows to 2 levels, with 2 BINs. */
+        for (int i = 0; i < 10; i ++) {
+            IntegerBinding.intToEntry(i, key);
+            IntegerBinding.intToEntry(i, data);
+            assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+        }
+
+        Tracer.trace(Level.SEVERE, DbInternal.envGetEnvironmentImpl(env),
+                     "After inserts");
+        env.checkpoint(FORCE_CONFIG);
+        if (DEBUG) {
+            System.out.println(db.getStats(new StatsConfig()));
+        }
+
+        /* Now delete all of 1 BIN. */
+        for (int i = 0; i < 5; i ++) {
+            IntegerBinding.intToEntry(i, key);
+            assertEquals(OperationStatus.SUCCESS, db.delete(null, key));
+        }
+
+        /* Compress, removing a BIN. */
+        env.compress();
+        if (DEBUG) {
+            System.out.println("After compress");
+            System.out.println(db.getStats(new StatsConfig()));
+        }
+
+        /* Checkpoint again. */
+        env.checkpoint(FORCE_CONFIG);
+    }
+
+    /**
+     * Create a tree, make sure the root changes and is logged
+     * before any checkpointing. The bug found in [#13897] was this:
+     *
+     * 110 RootIN b
+     * 120 MapLN points to root IN at 110
+     * 130 BINb split
+     * 140 RootIN b written as part of split
+     * 150 ckpt start
+     * 160 ckpt end
+     *
+     * Since the compression was writing a root IN w/out updating the mapLN,
+     * the obsolete root at 110 was recovered instead of newer rootIN at 130.
+     */
+    public void testWrittenBySplit()
+        throws Throwable {
+
+        EnvironmentConfig envConfig = setupEnvConfig();
+        DatabaseConfig dbConfig = setupDbConfig();
+
+        /* Run the full test case w/out truncating the log. */
+        testOneCase(DB_NAME, envConfig, dbConfig,
+                    new TestGenerator(true /* generate log description. */){
+                        void generateData(Database db)
+                            throws DatabaseException {
+                            setupWrittenBySplits(db);
+                        }
+                    },
+                    envConfig, dbConfig);
+
+
+        /*
+         * Now run the test in a stepwise loop, truncate after each log entry.
+         * Our baseline expected set is empty -- no records expected.
+         */
+        HashSet<TestData> currentExpected = new HashSet<TestData>();
+        stepwiseLoop(DB_NAME, envConfig, dbConfig, currentExpected, 0);
+    }
+
+    /**
+     */
+    private void setupWrittenBySplits(Database db)
+        throws DatabaseException {
+        setStepwiseStart();
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        /* Create a tree and checkpoint. */
+        IntegerBinding.intToEntry(0, key);
+        IntegerBinding.intToEntry(0, data);
+        assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+        env.checkpoint(FORCE_CONFIG);
+        Tracer.trace(Level.SEVERE, DbInternal.envGetEnvironmentImpl(env),
+                     "After creation");
+
+        /* Populate a tree so it splits. */
+        for (int i = 1; i < 6; i ++) {
+            IntegerBinding.intToEntry(i, key);
+            IntegerBinding.intToEntry(i, data);
+            assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+        }
+
+        Tracer.trace(Level.SEVERE, DbInternal.envGetEnvironmentImpl(env),
+                     "After inserts");
+        env.checkpoint(FORCE_CONFIG);
+    }
+
+    /*
+     * Scenario from [#13897]: tree is created. Log looks like this
+     *  provisional BIN
+     *  root IN
+     *  checkpoint start
+     *  LN is logged but not yet attached to BIN
+     *  checkpoint end
+     *  BIN is dirtied, but is not part of checkpoint, because dirtying wasn't
+     *  seen
+     * In this case, getParentForBIN hangs, because there is no root.
+     * This test is for debugging only, because it's not really possible to
+     * run a real checkpoint in the small window when the bin is not dirty.
+     * Attempts to run a checkpoint programmatically result in failing the
+     * assert that no latches are held when the inlist latch is taken.
+     * Instead, we do this pseudo checkpoint, to make the hang reproduce. But
+     * this test will still fail even with the fixed code because the fix
+     * now causes the rootIN to get re-logged, and the pseudo checkpoint
+     * doesn't do that logging.
+     */
+    public void xxtestCreateNewTree() // This test for debugging only
+        throws Throwable {
+
+        EnvironmentConfig envConfig = setupEnvConfig();
+        DatabaseConfig dbConfig = setupDbConfig();
+
+        /* Run the full test case w/out truncating the log. */
+        testOneCase(DB_NAME, envConfig, dbConfig,
+                    new TestGenerator(true /* generate log description. */){
+                        void generateData(Database db)
+                            throws DatabaseException {
+                            setupCreateNewTree(db);
+                        }
+                    },
+                    envConfig, dbConfig);
+
+
+        /*
+         * Now run the test in a stepwise loop, truncate after each log entry.
+         * Our baseline expected set is empty -- no records expected.
+         */
+        HashSet<TestData> currentExpected = new HashSet<TestData>();
+        stepwiseLoop(DB_NAME, envConfig, dbConfig, currentExpected, 0);
+    }
+
+    /**
+     * Create a populated tree, delete all records, then begin to insert again.
+     */
+    private void setupCreateNewTree(Database db)
+        throws DatabaseException {
+
+
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        TestHook ckptHook = new CheckpointHook(env);
+        DbInternal.dbGetDatabaseImpl(db).getTree().setCkptHook(ckptHook);
+
+        env.checkpoint(FORCE_CONFIG);
+
+        /*
+         * Create in the log
+         *  provisional BIN, IN, ckpt start, LN
+         */
+        IntegerBinding.intToEntry(1, key);
+        IntegerBinding.intToEntry(1, data);
+        assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+    }
+
+    /*
+     * Force a checkpoint into the log. Use another thread, lest the asserts
+     * about held latches take effect.
+     */
+    private static class CheckpointHook implements TestHook {
+        private Environment env;
+
+        CheckpointHook(Environment env) {
+            this.env = env;
+        }
+
+        public void doHook() {
+            try {
+                EnvironmentImpl envImpl =
+                    DbInternal.envGetEnvironmentImpl(env);
+		SingleItemEntry startEntry =
+		    new SingleItemEntry(LogEntryType.LOG_CKPT_START,
+                                        new CheckpointStart(100, "test"));
+		long checkpointStart = envImpl.getLogManager().log
+                    (startEntry,
+                     ReplicationContext.NO_REPLICATE);
+                CheckpointEnd ckptEnd = new CheckpointEnd
+                    ("test",
+                     checkpointStart,
+                     envImpl.getRootLsn(),
+                     envImpl.getTxnManager().getFirstActiveLsn(),
+                     envImpl.getNodeSequence().getLastLocalNodeId(),
+                     envImpl.getNodeSequence().getLastReplicatedNodeId(),
+                     envImpl.getDbTree().getLastLocalDbId(),
+                     envImpl.getDbTree().getLastReplicatedDbId(),
+                     envImpl.getTxnManager().getLastLocalTxnId(),
+                     envImpl.getTxnManager().getLastReplicatedTxnId(),
+                                      100);
+                SingleItemEntry endEntry =
+                    new SingleItemEntry(LogEntryType.LOG_CKPT_END, ckptEnd);
+                envImpl.getLogManager().logForceFlush
+                    (endEntry,
+                     true, // fsyncRequired
+                     ReplicationContext.NO_REPLICATE);
+            } catch (DatabaseException e) {
+        	fail(e.getMessage());
+            }
+        }
+        public Object getHookValue() {
+            throw new UnsupportedOperationException();
+        }
+        public void doIOHook() throws IOException {
+            throw new UnsupportedOperationException();
+        }
+        public void hookSetup() {
+            throw new UnsupportedOperationException();
+        }
+    }
+
+    /**
+     * Make sure eviction doesn't evict roots. If it did, we'd need to
+     * log the mapLN to be sure that recovery is correct.
+     */
+    public void testChangeAndEvictRoot()
+        throws Throwable {
+
+        EnvironmentConfig envConfig = setupEnvConfig();
+        DatabaseConfig dbConfig = setupDbConfig();
+
+        /* Run the full test case w/out truncating the log. */
+        testOneCase(DB_NAME, envConfig, dbConfig,
+                    new TestGenerator(true /* generate log description. */){
+                        void generateData(Database db)
+                            throws DatabaseException {
+                            setupEvictedRoot(db);
+                        }
+                    },
+                    envConfig, dbConfig);
+
+
+        /*
+         * Now run the test in a stepwise loop, truncate after each log entry.
+         * Our baseline expected set is empty -- no records expected.
+         */
+        HashSet<TestData> currentExpected = new HashSet<TestData>();
+        stepwiseLoop(DB_NAME, envConfig, dbConfig, currentExpected, 0);
+    }
+
+    /**
+     * Create a populated tree, delete all records, then begin to insert again.
+     */
+    private void setupEvictedRoot(Database db)
+        throws DatabaseException {
+        setStepwiseStart();
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        /* Populate a tree so it grows to 2 levels, with 2 BINs. */
+        for (int i = 0; i < 10; i ++) {
+            IntegerBinding.intToEntry(i, key);
+            IntegerBinding.intToEntry(i, data);
+            assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+        }
+
+        Tracer.trace(Level.SEVERE, DbInternal.envGetEnvironmentImpl(env),
+                     "After inserts");
+        env.checkpoint(FORCE_CONFIG);
+
+        /*
+         * Add another record so that the eviction below will log
+         * a different versions of the IN nodes.
+         */
+        IntegerBinding.intToEntry(10, key);
+        IntegerBinding.intToEntry(10, data);
+        assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+
+        /* Evict */
+        TestHook evictHook = new TestHook() {
+                public void doIOHook() throws IOException {
+                    throw new UnsupportedOperationException();
+                }
+                public void doHook() {
+                    throw new UnsupportedOperationException();
+                }
+                public Object getHookValue() {
+                    return Boolean.TRUE;
+                }
+                public void hookSetup() {
+                    throw new UnsupportedOperationException();
+                }
+            };
+        DbInternal.envGetEnvironmentImpl(env).getEvictor().
+                                           setRunnableHook(evictHook);
+        env.evictMemory();
+
+        /* Checkpoint again. */
+        env.checkpoint(FORCE_CONFIG);
+    }
+
+    private EnvironmentConfig setupEnvConfig() {
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        turnOffEnvDaemons(envConfig);
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(),
+                                 "4");
+        envConfig.setAllowCreate(true);
+        return envConfig;
+    }
+
+    private DatabaseConfig setupDbConfig() {
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setSortedDuplicates(useDups);
+        dbConfig.setAllowCreate(true);
+        return dbConfig;
+    }
+}
diff --git a/test/com/sleepycat/je/recovery/CheckReverseSplitsTest.java b/test/com/sleepycat/je/recovery/CheckReverseSplitsTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..8e8c7eabc3cf1858dfe6926c4a94a46be2f4e16d
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/CheckReverseSplitsTest.java
@@ -0,0 +1,318 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2004,2008 Oracle.  All rights reserved.
+ *
+ * $Id: CheckReverseSplitsTest.java,v 1.12.2.1 2009/08/02 20:04:43 mark Exp $
+ */
+package com.sleepycat.je.recovery;
+
+import java.util.HashSet;
+import java.util.logging.Level;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.BtreeStats;
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.recovery.stepwise.TestData;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.Tracer;
+
+/*
+ * Exercise reverse splits (deletes of subtrees). Add a comprehensive
+ * "stepwise" approach, where we run the test repeatedly, truncating the log
+ * at each log entry point. At recovery, we check that we have all expected
+ * values. In particular, this approach was required to reproduce SR [#13501],
+ * which only failed if the log was broken off at a given point, between
+ * the logging of an IN and the update of a mapln.
+ */
+public class CheckReverseSplitsTest extends CheckBase {
+
+    private static final String DB_NAME = "simpleDB";
+
+    private int max = 12;
+    private boolean useDups;
+    private boolean purgeRoot = false;
+    private static CheckpointConfig FORCE_CONFIG = new CheckpointConfig();
+    static {
+        FORCE_CONFIG.setForce(true);
+    }
+
+    /**
+     * SR #13501
+     * Reverse splits require the same upward propagation as regular splits,
+     * to avoid logging inconsistent versions of ancestor INs.
+     */
+    public void testReverseSplit()
+        throws Throwable {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        turnOffEnvDaemons(envConfig);
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(),
+                                 "4");
+        envConfig.setAllowCreate(true);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setSortedDuplicates(useDups);
+        dbConfig.setAllowCreate(true);
+
+        /* Run the full test case w/out truncating the log. */
+        testOneCase(DB_NAME, envConfig, dbConfig,
+                    new TestGenerator(true /* generate log description */){
+                        void generateData(Database db)
+                            throws DatabaseException {
+                            setupReverseSplit(db);
+                        }
+                    },
+                    envConfig, dbConfig);
+
+
+        /*
+         * Now run the test in a stepwise loop, truncate after each
+         * log entry.
+         */
+
+        /* Establish the base set of records we expect. */
+        HashSet<TestData> currentExpected = new HashSet<TestData>();
+        DatabaseEntry keyEntry = new DatabaseEntry();
+        DatabaseEntry dataEntry = new DatabaseEntry();
+        for (int i = 2; i < max; i++) {
+            if (useDups) {
+                IntegerBinding.intToEntry(0, keyEntry);
+            } else {
+                IntegerBinding.intToEntry(i, keyEntry);
+            }
+            IntegerBinding.intToEntry(i, dataEntry);
+            currentExpected.add(new TestData(keyEntry, dataEntry));
+        }
+
+        stepwiseLoop(DB_NAME, envConfig, dbConfig, currentExpected, 0);
+    }
+
+    public void testReverseSplitDups()
+        throws Throwable {
+
+        useDups = true;
+        testReverseSplit();
+    }
+
+    /**
+     * Create this:
+     * <p>
+     * <pre>
+
+                         INa                        level 3
+                   /           \
+                INb            INc                  level 2
+             /   |    \        /  \
+           BINs BINt  BINu   BINv  BINw             level 1
+     * </pre>
+     * <p>
+     * First provoke an IN compression which removes BINs, and then
+     * provoke a split of BINw which results in propagating the change
+     * all the way up the tree. The bug therefore created a version of INa
+     * on disk which did not include the removal of BINs.
+     */
+    private void setupReverseSplit(Database db)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        /* Populate a tree so it grows to 3 levels. */
+        for (int i = 0; i < max; i ++) {
+            if (useDups) {
+                IntegerBinding.intToEntry(0, key);
+            } else {
+                IntegerBinding.intToEntry(i, key);
+            }
+            IntegerBinding.intToEntry(i, data);
+            assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+        }
+
+        /* Empty out the leftmost bin */
+        Cursor c = db.openCursor(null, null);
+        try {
+            assertEquals(OperationStatus.SUCCESS, c.getFirst(key, data,
+                                                         LockMode.DEFAULT));
+            assertEquals(OperationStatus.SUCCESS, c.delete());
+            assertEquals(OperationStatus.SUCCESS,
+                         c.getFirst(key, data, LockMode.DEFAULT));
+            assertEquals(OperationStatus.SUCCESS, c.delete());
+        } finally {
+            c.close();
+        }
+
+        Tracer.trace(Level.SEVERE, DbInternal.envGetEnvironmentImpl(env),
+                     "After deletes");
+
+        /* For log description start. */
+        setStepwiseStart();
+
+        /*
+         * Checkpoint so that the deleted lns are not replayed, and recovery
+         * relies on INs.
+         */
+        env.checkpoint(FORCE_CONFIG);
+
+        /* Now remove the empty BIN. */
+        env.compress();
+        Tracer.trace(Level.SEVERE, DbInternal.envGetEnvironmentImpl(env),
+                     "After compress");
+
+        /*
+         * Add enough keys to split the level 2 IN on the right hand side.
+         * This makes an INa which still references the obsolete BINs.
+         * Truncate the log before the mapLN which refers to the new INa,
+         * else the case will not fail, because recovery will first apply the
+         * new INa, and then apply the INDelete of BINs. We want this case
+         * to apply the INDelete of BINs, and then follow with a splicing in
+         * of the new root.
+         */
+        for (int i = max; i < max+13; i ++) {
+            if (useDups) {
+                IntegerBinding.intToEntry(0, key);
+            } else {
+                IntegerBinding.intToEntry(i, key);
+            }
+            IntegerBinding.intToEntry(i, data);
+            assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+        }
+
+        Tracer.trace(Level.SEVERE, DbInternal.envGetEnvironmentImpl(env),
+                     "After data setup");
+
+    }
+
+    /**
+     * Create a tree, remove it all, replace with new records.
+     */
+    public void testCompleteRemoval()
+        throws Throwable {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        turnOffEnvDaemons(envConfig);
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(),
+                                 "4");
+        envConfig.setAllowCreate(true);
+        if (purgeRoot) {
+            envConfig.setConfigParam(
+                       EnvironmentParams.COMPRESSOR_PURGE_ROOT.getName(),
+                       "true");
+        }
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setSortedDuplicates(useDups);
+        dbConfig.setAllowCreate(true);
+
+        /* Run the full test case w/out truncating the log. */
+        testOneCase(DB_NAME, envConfig, dbConfig,
+                    new TestGenerator(true /* generate log description. */){
+                        void generateData(Database db)
+                            throws DatabaseException {
+                            setupCompleteRemoval(db);
+                        }
+                    },
+                    envConfig, dbConfig);
+
+
+        /*
+         * Now run the test in a stepwise loop, truncate after each log entry.
+         * Our baseline expected set is empty -- no records expected.
+         */
+        HashSet<TestData> currentExpected = new HashSet<TestData>();
+        stepwiseLoop(DB_NAME, envConfig, dbConfig, currentExpected, 0);
+    }
+
+    public void testCompleteRemovalDups()
+        throws Throwable {
+
+        useDups = true;
+        testCompleteRemoval();
+    }
+
+    public void testCompleteRemovalPurgeRoot()
+        throws Throwable {
+
+        purgeRoot = true;
+        testCompleteRemoval();
+    }
+
+    /**
+     * Create a populated tree, delete all records, then begin to insert again.
+     */
+    private void setupCompleteRemoval(Database db)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        /* Populate a tree so it grows to 3 levels. */
+        for (int i = 0; i < max; i ++) {
+            if (useDups) {
+                IntegerBinding.intToEntry(0, key);
+            } else {
+                IntegerBinding.intToEntry(i, key);
+            }
+            IntegerBinding.intToEntry(i, data);
+            assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+        }
+
+        Tracer.trace(Level.SEVERE, DbInternal.envGetEnvironmentImpl(env),
+                     "After inserts");
+
+        /* Now delete it all. */
+        Cursor c = db.openCursor(null, null);
+        try {
+            int count = 0;
+            while (c.getNext(key, data, LockMode.DEFAULT) ==
+                   OperationStatus.SUCCESS) {
+                assertEquals(OperationStatus.SUCCESS, c.delete());
+                count++;
+            }
+        } finally {
+            c.close();
+        }
+        Tracer.trace(Level.SEVERE, DbInternal.envGetEnvironmentImpl(env),
+                     "After deletes");
+
+
+
+        /* For log description start. */
+        setStepwiseStart();
+
+        /* Checkpoint before, so we don't simply replay all the  deleted LNs */
+        env.checkpoint(FORCE_CONFIG);
+
+        /* Compress, and make sure the subtree was removed. */
+        env.compress();
+        BtreeStats stats = (BtreeStats) db.getStats(new StatsConfig());
+        if (useDups) {
+            assertEquals(0, stats.getDuplicateInternalNodeCount());
+        } else {
+            /* COMPRESSOR_PURGE_ROOT is disabled, node should always exist. */
+            assertEquals(1, stats.getBottomInternalNodeCount());
+        }
+
+        /* Insert new data. */
+        for (int i = max*2; i < ((max*2) +5); i ++) {
+            if (useDups) {
+                IntegerBinding.intToEntry(0, key);
+            } else {
+                IntegerBinding.intToEntry(i, key);
+            }
+            IntegerBinding.intToEntry(i, data);
+            assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/recovery/CheckSR11307Test.java b/test/com/sleepycat/je/recovery/CheckSR11307Test.java
new file mode 100644
index 0000000000000000000000000000000000000000..6862c22687cd78610be4020eb4ea463042f02c71
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/CheckSR11307Test.java
@@ -0,0 +1,185 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2004,2008 Oracle.  All rights reserved.
+ *
+ * $Id: CheckSR11307Test.java,v 1.19 2008/03/18 01:17:45 cwl Exp $
+ */
+package com.sleepycat.je.recovery;
+
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.tree.DBIN;
+import com.sleepycat.je.tree.DIN;
+import com.sleepycat.je.tree.Key;
+import com.sleepycat.je.tree.Key.DumpType;
+import com.sleepycat.je.tree.Tree.SearchType;
+import com.sleepycat.je.util.TestUtils;
+
+public class CheckSR11307Test extends CheckBase {
+
+    private static final String DB_NAME = "simpleDB";
+
+    /**
+     * SR #11307
+     */
+    public void testSR11307()
+        throws Throwable {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        turnOffEnvDaemons(envConfig);
+        envConfig.setAllowCreate(true);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+	dbConfig.setSortedDuplicates(true);
+
+        DatabaseConfig validateDbConfig = new DatabaseConfig();
+	validateDbConfig.setSortedDuplicates(true);
+
+        EnvironmentConfig restartConfig = TestUtils.initEnvConfig();
+        turnOffEnvDaemons(restartConfig);
+
+	setCheckLsns(false);
+        testOneCase(DB_NAME,
+                    envConfig,
+                    dbConfig,
+                    new TestGenerator(){
+                        void generateData(Database db)
+                            throws DatabaseException {
+
+                            addData(db);
+                        }
+                    },
+                    restartConfig,
+                    validateDbConfig);
+    }
+
+    private void put(Database db, String keyString, String dataString)
+	throws DatabaseException {
+
+	DatabaseEntry key = new DatabaseEntry(keyString.getBytes());
+	DatabaseEntry data = new DatabaseEntry(dataString.getBytes());
+	assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+    }
+
+    private void delete(Database db, String keyString, String dataString)
+	throws DatabaseException {
+
+	DatabaseEntry key = new DatabaseEntry(keyString.getBytes());
+	DatabaseEntry data = new DatabaseEntry(dataString.getBytes());
+
+	Cursor c = db.openCursor(null, null);
+	assertEquals(OperationStatus.SUCCESS,
+		     c.getSearchBoth(key, data, null));
+	assertEquals(OperationStatus.SUCCESS, c.delete());
+	c.close();
+    }
+
+    /*
+     * We're trying to emulate the following log synopsis.  The replay of this
+     * log will end up putting a DBIN in a DBIN if the requiresExactMatch is
+     * false in the call to readINs() in pass 7 of recovery.
+
+     <entry lsn="0x3d9/0x1cd2" type="LN/0" prev="0x1ca0" size="36" cksum="2555578370"><ln><node>6809</node><data>yrhwlvlgvq</data></ln><dbId id="3"/><key val="cfhaa"/></entry>
+
+     <entry lsn="0x3d9/0x4787" type="DBIN/1" prev="0x46d2" size="75" cksum="2518487070"><dbin><node>6807</node><key val="yrhwlvlgvq"/><isRoot val="false"/><level val="1"/><entries numEntries="1" length="128"><ref knownDeleted="false"><key val="yrhwlvlgvq"/><DbLsn val="0x3d9/0x1cd2"/></ref></entries><key val="cfhaa"/></dbin><dbId id="3"/></entry>
+
+     <entry lsn="0x3dc/0x1e029" type="DIN/1" prev="0x1dfbd" size="94" cksum="419435527"><din><node>6806</node><key val="ubpgspglfn"/><isRoot val="true"/><level val="2"/><entries numEntries="1" length="128"><ref knownDeleted="false"><key val="ubpgspglfn"/><DbLsn val="0x3d9/0x4787"/></ref></entries><key val="cfhaa"/><ref knownDeleted="false"><key val="cfhaa"/><DbLsn val="0x3dc/0x1c226"/></ref></din><dbId id="3"/></entry>
+
+     <entry lsn="0x3dc/0x4b3b5" type="DBIN/1" prev="0x4b349" size="213" cksum="4263720111"><dbin><node>5904</node><key val="gutjjgrfan"/><isRoot val="false"/><level val="1"/><entries numEntries="7" length="128"><ref knownDeleted="false"><key val="gutjjgrfan"/><DbLsn val="0x3dc/0x4848a"/></ref><ref knownDeleted="false"><key val="insjaepgnc"/><DbLsn val="0x3dc/0x48458"/></ref><ref knownDeleted="false"><key val="lthfpygdej"/><DbLsn val="0x3d8/0x584bb"/></ref><ref knownDeleted="false"><key val="qugxbqwtgd"/><DbLsn val="0x3dc/0x48426"/></ref><ref knownDeleted="false"><key val="savescwzoy"/><DbLsn val="0x3da/0x38d5"/></ref><ref knownDeleted="false"><key val="srbzmnargv"/><DbLsn val="0x3dc/0x484bc"/></ref><ref knownDeleted="false"><key val="ttkwptlkxv"/><DbLsn val="0x3db/0xa2d4a"/></ref></entries><key val="cfgaa"/></dbin><dbId id="3"/></entry>
+
+     <entry lsn="0x3dc/0x4dce2" type="DIN/1" prev="0x4dc76" size="94" cksum="4266267702"><din><node>5903</node><key val="fyfmgvxwux"/><isRoot val="true"/><level val="2"/><entries numEntries="1" length="128"><ref knownDeleted="false"><key val="fyfmgvxwux"/><DbLsn val="0x3dc/0x4b3b5"/></ref></entries><key val="cfgaa"/><ref knownDeleted="false"><key val="cfgaa"/><DbLsn val="0x3d9/0xc08d1"/></ref></din><dbId id="3"/></entry>
+
+     <entry lsn="0x3dd/0xe5c6d" type="CkptStart/0" prev="0xe57cb" size="26" cksum="2882930936"><CkptStart invoker="daemon" time="2004-10-30 13:26:19.89" id="1747"/></entry>
+
+     <entry lsn="0x3dd/0xe5d4f" type="IN/1" prev="0xe5cd7" size="236" cksum="1038102495"><in><node>17</node><key val="aaaaa"/><isRoot val="true"/><level val="10002"/><entries numEntries="11" length="128"><ref knownDeleted="false"><key val="aaaaa"/><DbLsn val="0x3dd/0xda14e"/></ref><ref knownDeleted="false"><key val="bbfaa"/><DbLsn val="0x3dd/0xdf614"/></ref><ref knownDeleted="false"><key val="ceaaa"/><DbLsn val="0x3dd/0xde785"/></ref><ref knownDeleted="false"><key val="dgcaa"/><DbLsn val="0x3dd/0xe063f"/></ref><ref knownDeleted="false"><key val="ecgaa"/><DbLsn val="0x3dd/0xddbe0"/></ref><ref knownDeleted="false"><key val="ejaaa"/><DbLsn val="0x3dd/0xe4ea1"/></ref><ref knownDeleted="false"><key val="gbbaa"/><DbLsn val="0x3dd/0xe40bb"/></ref><ref knownDeleted="false"><key val="hdcaa"/><DbLsn val="0x3dd/0xe36a1"/></ref><ref knownDeleted="false"><key val="hjhaa"/><DbLsn val="0x3dd/0xe0c81"/></ref><ref knownDeleted="false"><key val="igcaa"/><DbLsn val="0x3dd/0xd18dc"/></ref><ref knownDeleted="false"><key val="jdaaa"/><DbLsn val="0x3dd/0xd863a"/></ref></entries></in><dbId id="3"/></entry>
+
+     <entry lsn="0x3dd/0xed260" type="CkptEnd/0" prev="0xed22e" size="71" cksum="4160163625"><CkptEnd invoker="daemon" time="2004-10-30 13:26:20.046" lastNodeId="36927" lastDbId="4" lastTxnId="11033" id="1747" rootExists="true"><ckptStart><DbLsn val="0x3dd/0xe5c6d"/></ckptStart><root><DbLsn val="0x3dd/0xec74d"/></root><firstActive><DbLsn val="0x3dd/0xe5c6d"/></firstActive></CkptEnd></entry>
+
+     <entry lsn="0x3de/0x390ee" type="BIN/1" prev="0x38c70" size="2180" cksum="1778961065"><bin><node>2317</node><key val="ceaaa"/><isRoot val="false"/><level val="10001"/><entries numEntries="119" length="128">...<ref knownDeleted="false"><key val="cfhaa"/><DbLsn val="0x3dc/0x1e029"/></ref>...</entries></bin><dbId id="3"/></entry>
+
+     <entry lsn="0x3de/0x3b64c" type="DelDupLN_TX/0" prev="0x3b607" size="61" cksum="1460079772"><ln><node>6809</node></ln><dbId id="3"/><key val="cfhaa"/><DbLsn val="0x3d9/0x1cd2"/><knownDeleted val="false"/><txn id="11046__Txn"><DbLsn val="0x3de/0x3b3c7"/></txn><key val="yrhwlvlgvq"/></entry>
+
+     <entry lsn="0x3de/0x3b6e2" type="DupCountLN_TX/0" prev="0x3b697" size="55" cksum="4138272827"><dupCountLN><node>6805</node><data></data><count v="0"/></dupCountLN><dbId id="3"/><key val="cfhaa"/><DbLsn val="0x3dc/0x1c226"/><knownDeleted val="false"/><txn id="11046__Txn"><DbLsn val="0x3de/0x3b64c"/></txn></entry>
+
+     <entry lsn="0x3de/0x3b838" type="DBIN/1" prev="0x3b76c" size="75" cksum="2625900628"><dbin><node>6807</node><key val="yrhwlvlgvq"/><isRoot val="false"/><level val="1"/><entries numEntries="1" length="128"><ref knownDeleted="false"><key val="yrhwlvlgvq"/><DbLsn val="0x3de/0x3b64c"/></ref></entries><key val="cfhaa"/></dbin><dbId id="3"/></entry>
+
+     <entry lsn="0x3de/0x3bec7" type="DIN/1" prev="0x3bdcd" size="94" cksum="635376858"><din><node>6806</node><key val="ubpgspglfn"/><isRoot val="true"/><level val="2"/><entries numEntries="1" length="128"><ref knownDeleted="false"><key val="ubpgspglfn"/><DbLsn val="0x3de/0x3b838"/></ref></entries><key val="cfhaa"/><ref knownDeleted="false"><key val="cfhaa"/><DbLsn val="0x3de/0x3b6e2"/></ref></din><dbId id="3"/></entry>
+
+     <entry lsn="0x3de/0x40df6" type="INDupDelete/0" prev="0x40d8a" size="35" cksum="2389575622"><INDupDeleteEntry node="6806"><key val="cfhaa"/><key val="ubpgspglfn"/><dbId id="3"/></INDupDeleteEntry></entry>
+
+     <entry lsn="0x3de/0x46660" type="BIN/1" prev="0x465f4" size="2162" cksum="3104884361"><bin><node>2317</node><key val="ceaaa"/><isRoot val="false"/><level val="10001"/><entries numEntries="118" length="128">...<ref knownDeleted="false"><key val="cfgaa"/><DbLsn val="0x3dc/0x4dce2"/></ref>...</entries></bin><dbId id="3"/></entry>
+
+     <entry lsn="0x3de/0x50fa2" type="BIN/1" prev="0x50f36" size="2162" cksum="111985926"><bin><node>2317</node><key val="ceaaa"/><isRoot val="false"/><level val="10001"/><entries numEntries="118" length="128">...<ref knownDeleted="false"><key val="cfgaa"/>...</entries></bin><dbId id="3"/></entry>
+
+     Trace list:
+     lsn=0x3dd/0xe5d4f node=17
+     lsn=0x3de/0x50fa2 node=2317
+     lsn=0x3dc/0x4dce2 node=5903
+     lsn=0x3dc/0x4b3b5 node=5904
+
+    */
+    private void addData(Database db)
+        throws DatabaseException {
+
+	DatabaseImpl dbImpl = DbInternal.dbGetDatabaseImpl(db);
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+        CheckpointConfig ckptConfig = new CheckpointConfig();
+        ckptConfig.setForce(true);
+
+	/*
+	 * Create a one element dup tree by making a dupe and then reducing it
+	 * back to one element.
+	 */
+	put(db, "cfhaa", "yrhwlvlgvq");
+	put(db, "cfhaa", "blort");
+	delete(db, "cfhaa", "blort");
+	env.compress();
+        env.sync();
+
+	/* Same thing for cfgaa. */	
+	put(db, "cfgaa", "urhwlvlgvq");
+	put(db, "cfgaa", "blort");
+	delete(db, "cfgaa", "blort");
+	put(db, "cfiaa", "yrhwlvlgvq");
+	put(db, "cffaa", "yrhwlvlgvq");
+        env.sync();
+        env.sync();
+
+	/* Write out the DelDupLN and DupCountLN. */
+	delete(db, "cfhaa", "yrhwlvlgvq");
+	BIN bin = (BIN) dbImpl.getTree().
+            search("cfhaa".getBytes(), SearchType.NORMAL, -1,
+                   null, CacheMode.DEFAULT);
+	assertNotNull(bin);
+	int idx = bin.findEntry("cfhaa".getBytes(), false, true);
+	DIN din = (DIN) bin.getTarget(idx);
+	din.latch();
+	assertNotNull(din);
+	idx = din.findEntry("yrhwlvlgvq".getBytes(), false, true);
+	DBIN dbin = (DBIN) din.getTarget(idx);
+	Key.DUMP_TYPE = DumpType.TEXT;
+	dbin.latch();
+	dbin.log(envImpl.getLogManager());
+	din.log(envImpl.getLogManager());
+	din.releaseLatch();
+	dbin.releaseLatch();
+	bin.releaseLatch();
+	env.compress();
+	bin.latch();
+	bin.log(envImpl.getLogManager());
+	bin.releaseLatch();
+    }
+}
diff --git a/test/com/sleepycat/je/recovery/CheckSplitAuntTest.java b/test/com/sleepycat/je/recovery/CheckSplitAuntTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..f72ed54b1c7f2c49105d844fca90887342ca8a9f
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/CheckSplitAuntTest.java
@@ -0,0 +1,146 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2004,2008 Oracle.  All rights reserved.
+ *
+ * $Id: CheckSplitAuntTest.java,v 1.7 2008/06/30 20:54:48 linda Exp $
+ */
+package com.sleepycat.je.recovery;
+
+import java.util.HashSet;
+import java.util.logging.Level;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.recovery.stepwise.TestData;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.Tracer;
+
+public class CheckSplitAuntTest extends CheckBase {
+
+    private static final String DB_NAME = "simpleDB";
+
+    /**
+     */
+    public void testSplitAunt()
+        throws Throwable {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        turnOffEnvDaemons(envConfig);
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(),
+                                 "4");
+        envConfig.setAllowCreate(true);
+        envConfig.setTransactional(true);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setTransactional(true);
+
+        EnvironmentConfig restartConfig = TestUtils.initEnvConfig();
+        turnOffEnvDaemons(envConfig);
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(),
+                                 "4");
+        envConfig.setTransactional(true);
+
+        testOneCase(DB_NAME,
+                    envConfig,
+                    dbConfig,
+                    new TestGenerator(true){
+                        void generateData(Database db)
+                            throws DatabaseException {
+                            setupSplitData(db);
+                        }
+                    },
+                    restartConfig,
+                    new DatabaseConfig());
+
+        /*
+         * Now run the test in a stepwise loop, truncate after each
+         * log entry. We start the steps before the inserts, so the base
+         * expected set is empty.
+         */
+        HashSet<TestData> currentExpected = new HashSet<TestData>();
+        if (TestUtils.runLongTests()) {
+            stepwiseLoop(DB_NAME, envConfig, dbConfig, currentExpected,  0);
+        }
+    }
+
+    private void setupSplitData(Database db)
+        throws DatabaseException {
+
+        setStepwiseStart();
+
+        int max = 12;
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        /* Populate a tree so it grows to 3 levels, then checkpoint. */
+        for (int i = 0; i < max; i ++) {
+            IntegerBinding.intToEntry(i*10, key);
+            IntegerBinding.intToEntry(i*10, data);
+            assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+        }
+
+        CheckpointConfig ckptConfig = new CheckpointConfig();
+        Tracer.trace(Level.SEVERE, DbInternal.envGetEnvironmentImpl(env),
+                     "First sync");
+        env.sync();
+
+        Tracer.trace(Level.SEVERE, DbInternal.envGetEnvironmentImpl(env),
+                     "Second sync");
+        env.sync();
+
+        Tracer.trace(Level.SEVERE, DbInternal.envGetEnvironmentImpl(env),
+                     "Third sync");
+        env.sync();
+
+        Tracer.trace(Level.SEVERE, DbInternal.envGetEnvironmentImpl(env),
+                     "Fourth sync");
+        env.sync();
+
+        Tracer.trace(Level.SEVERE, DbInternal.envGetEnvironmentImpl(env),
+                     "Fifth sync");
+        env.sync();
+
+        Tracer.trace(Level.SEVERE, DbInternal.envGetEnvironmentImpl(env),
+                     "Sync6");
+        env.sync();
+
+        Tracer.trace(Level.SEVERE, DbInternal.envGetEnvironmentImpl(env),
+                     "After sync");
+
+        /* Add a key to dirty the left hand branch. */
+        IntegerBinding.intToEntry(5, key);
+        IntegerBinding.intToEntry(5, data);
+        assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+        Tracer.trace(Level.SEVERE, DbInternal.envGetEnvironmentImpl(env),
+                     "After single key insert");
+
+        ckptConfig.setForce(true);
+        ckptConfig.setMinimizeRecoveryTime(true);
+        env.checkpoint(ckptConfig);
+
+        Tracer.trace(Level.SEVERE, DbInternal.envGetEnvironmentImpl(env),
+                     "before split");
+
+
+        /* Add enough keys to split the right hand branch. */
+        for (int i = 51; i < 57; i ++) {
+            IntegerBinding.intToEntry(i, key);
+            IntegerBinding.intToEntry(i, data);
+            assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+        }
+
+        Tracer.trace(Level.SEVERE, DbInternal.envGetEnvironmentImpl(env),
+                     "after split");
+    }
+}
diff --git a/test/com/sleepycat/je/recovery/CheckSplitsTest.java b/test/com/sleepycat/je/recovery/CheckSplitsTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..10182acac03c39f95cde7c2c62b94b573688f23e
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/CheckSplitsTest.java
@@ -0,0 +1,378 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2004,2008 Oracle.  All rights reserved.
+ *
+ * $Id: CheckSplitsTest.java,v 1.18 2008/06/30 20:54:48 linda Exp $
+ */
+package com.sleepycat.je.recovery;
+
+import java.util.HashSet;
+import java.util.logging.Level;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.recovery.stepwise.TestData;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.Tracer;
+
+public class CheckSplitsTest extends CheckBase {
+
+    private static final String DB_NAME = "simpleDB";
+    private boolean useDups;
+
+    /**
+     * Test basic inserts.
+     */
+    public void testBasicInsert()
+        throws Throwable {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        turnOffEnvDaemons(envConfig);
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(),
+                                 "4");
+        envConfig.setAllowCreate(true);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(useDups);
+
+        DatabaseConfig validateDbConfig = new DatabaseConfig();
+        validateDbConfig.setSortedDuplicates(useDups);
+
+        testOneCase(DB_NAME,
+                    envConfig,
+                    dbConfig,
+                    new TestGenerator(true /* generate log description */){
+                        void generateData(Database db)
+                            throws DatabaseException {
+                            setupBasicInsertData(db);
+                        }
+                    },
+                    envConfig,
+                    validateDbConfig);
+
+        /*
+         * Now run the test in a stepwise loop, truncate after each
+         * log entry. We start the steps before the inserts, so the base
+         * expected set is empty.
+         */
+        HashSet<TestData> currentExpected = new HashSet<TestData>();
+        stepwiseLoop(DB_NAME, envConfig, dbConfig, currentExpected,  0);
+    }
+
+    public void testBasicInsertDups()
+        throws Throwable {
+
+        useDups = true;
+        testBasicInsert();
+    }
+
+    private void setupBasicInsertData(Database db)
+        throws DatabaseException {
+
+        setStepwiseStart();
+
+        /* If using dups, create several dup trees. */
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        for (int i = 0; i < 21; i++) {
+            if (useDups) {
+                IntegerBinding.intToEntry(i%3, key);
+            } else {
+                IntegerBinding.intToEntry(i, key);
+            }
+            IntegerBinding.intToEntry(i, data);
+            db.put(null, key, data);
+        }
+    }
+
+    /**
+     * SR #10715
+     * Splits must propagate up the tree at split time to avoid logging
+     * inconsistent versions of ancestor INs.
+     */
+    public void testSplitPropagation()
+        throws Throwable {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        turnOffEnvDaemons(envConfig);
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(),
+                                 "6");
+        envConfig.setAllowCreate(true);
+        envConfig.setTransactional(true);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setTransactional(true);
+
+        EnvironmentConfig restartConfig = TestUtils.initEnvConfig();
+        turnOffEnvDaemons(envConfig);
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(),
+                                 "6");
+        envConfig.setTransactional(true);
+
+        testOneCase(DB_NAME,
+                    envConfig,
+                    dbConfig,
+                    new TestGenerator(true){
+                        void generateData(Database db)
+                            throws DatabaseException {
+                            setupSplitData(db);
+                        }
+                    },
+                    restartConfig,
+                    new DatabaseConfig());
+
+        /*
+         * Now run the test in a stepwise loop, truncate after each
+         * log entry. We start the steps before the inserts, so the base
+         * expected set is empty.
+         */
+        HashSet<TestData> currentExpected = new HashSet<TestData>();
+        if (TestUtils.runLongTests()) {
+            stepwiseLoop(DB_NAME, envConfig, dbConfig, currentExpected,  0);
+        }
+    }
+
+    private void setupSplitData(Database db)
+        throws DatabaseException {
+
+        setStepwiseStart();
+
+        int max = 120;
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        /* Populate a tree so it grows to 4 levels, then checkpoint. */
+
+        for (int i = 0; i < max; i ++) {
+            IntegerBinding.intToEntry(i*10, key);
+            IntegerBinding.intToEntry(i*10, data);
+            assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+        }
+
+
+        CheckpointConfig ckptConfig = new CheckpointConfig();
+        ckptConfig.setForce(true);
+        env.checkpoint(ckptConfig);
+
+
+        /* Add enough keys to split the left hand branch again. */
+        for (int i = 50; i < 100; i+=2) {
+            IntegerBinding.intToEntry(i, key);
+            IntegerBinding.intToEntry(i, data);
+            assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+        }
+
+        /* Add enough keys to split the right hand branch. */
+        for (int i = 630; i < 700; i ++) {
+            IntegerBinding.intToEntry(i, key);
+            IntegerBinding.intToEntry(i, data);
+            assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+        }
+
+        Tracer.trace(Level.SEVERE, DbInternal.envGetEnvironmentImpl(env),
+                     "before split");
+
+        /* Add enough keys to split the left hand branch again. */
+        for (int i = 58; i < 75; i++) {
+            IntegerBinding.intToEntry(i, key);
+            IntegerBinding.intToEntry(i, data);
+            assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+        }
+    }
+
+    /**
+     * [#13435]  Checks that a DIN can be replayed with a full BIN parent.
+     * When a DIN is replayed, it may already be present in the parent BIN.
+     * Before fixing this bug, we searched without allowing splits and then
+     * called IN.insertEntry, which would throw InconsistentNodeException if
+     * the BIN was full.  We now search with splits allowed, which avoids the
+     * exception; however, it causes a split when one is not needed.
+     *
+     * Note that an alternate fix would be to revert to an earlier version of
+     * RecoveryManager.replaceOrInsertDuplicateRoot (differences are between
+     * version 1.184 and 1.185).  The older version searches for an existing
+     * entry, and then inserts if necessary.  This would avoid the extra split.
+     * However, we had to search with splits allowed anyway to fix another
+     * problem -- see testBINSplitDuringDeletedDINReplay.
+     */
+    public void testBINSplitDuringDINReplay()
+        throws Throwable {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        turnOffEnvDaemons(envConfig);
+        envConfig.setAllowCreate(true);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(true);
+
+        testOneCase(DB_NAME,
+                    envConfig,
+                    dbConfig,
+                    new TestGenerator(true){
+                        void generateData(Database db)
+                            throws DatabaseException {
+                            setupBINSplitDuringDINReplay(db);
+                        }
+                    },
+                    envConfig,
+                    dbConfig);
+
+        /*
+         * Now run the test in a stepwise loop, truncate after each
+         * log entry. We start the steps before the inserts, so the base
+         * expected set is empty.
+         */
+        HashSet<TestData> currentExpected = new HashSet<TestData>();
+        if (TestUtils.runLongTests()) {
+            stepwiseLoop(DB_NAME, envConfig, dbConfig, currentExpected,  0);
+        }
+    }
+
+    /**
+     * Fill a BIN with entries, with a DIN in the first entry; then force the
+     * BIN to be flushed, as might occur via eviction or checkpointing.
+     */
+    private void setupBINSplitDuringDINReplay(Database db)
+        throws DatabaseException {
+
+        setStepwiseStart();
+
+        final int max = 128;
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        IntegerBinding.intToEntry(1, key);
+        IntegerBinding.intToEntry(0, data);
+        assertEquals(OperationStatus.SUCCESS,
+                     db.putNoOverwrite(null, key, data));
+        IntegerBinding.intToEntry(1, data);
+        assertEquals(OperationStatus.SUCCESS,
+                     db.putNoDupData(null, key, data));
+
+        Cursor cursor = db.openCursor(null, null);
+
+        for (int i = 2; i <= max; i ++) {
+            IntegerBinding.intToEntry(i, key);
+            IntegerBinding.intToEntry(0, data);
+            assertEquals(OperationStatus.SUCCESS,
+                         cursor.putNoOverwrite(key, data));
+        }
+
+        TestUtils.logBINAndIN(env, cursor);
+
+        cursor.close();
+    }
+
+    /**
+     * [#13435]  Checks that recovering a DIN causes a BIN split when needed.
+     * This occurs when a DIN has been deleted and subsequently the BIN is
+     * filled.  The DIN and the INDupDelete will be be replayed; we will insert
+     * the DIN and then delete it.  In order to insert it, we may need to split
+     * the BIN.  The sequence is:
+     *
+     * LN-a
+     * (DupCountLN/) DIN (/DBIN/DupCountLN)
+     * LN-b
+     * DelDupLN-a (/DupCountLN)
+     * DelDupLN-b (/DupCountLN)
+     * INDupDelete compress
+     * LN-c/etc to fill the BIN
+     * BIN
+     *
+     * LN-a and LN-b are dups (same key).  After being compressed away, the
+     * BIN is filled completely and flushed by the evictor or checkpointer.
+     *
+     * During recovery, when we replay the DIN and need to insert it into the
+     * full BIN, therefore we need to split.  Before the bug fix, we did not
+     * search with splits allowed, and got an InconsistentNodeException.
+     */
+    public void testBINSplitDuringDeletedDINReplay()
+        throws Throwable {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        turnOffEnvDaemons(envConfig);
+        envConfig.setAllowCreate(true);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(true);
+
+        testOneCase(DB_NAME,
+                    envConfig,
+                    dbConfig,
+                    new TestGenerator(true){
+                        void generateData(Database db)
+                            throws DatabaseException {
+                            setupBINSplitDuringDeletedDINReplay(db);
+                        }
+                    },
+                    envConfig,
+                    dbConfig);
+
+        /*
+         * Now run the test in a stepwise loop, truncate after each
+         * log entry. We start the steps before the inserts, so the base
+         * expected set is empty.
+         */
+        HashSet<TestData> currentExpected = new HashSet<TestData>();
+        if (TestUtils.runLongTests()) {
+            stepwiseLoop(DB_NAME, envConfig, dbConfig, currentExpected,  0);
+        }
+    }
+
+    /**
+     * Insert two dups, delete them, and compress to free the BIN entry;
+     * then fill the BIN with LNs and flush the BIN.
+     */
+    private void setupBINSplitDuringDeletedDINReplay(Database db)
+        throws DatabaseException {
+
+        setStepwiseStart();
+
+        int max = 128;
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        IntegerBinding.intToEntry(0, key);
+        IntegerBinding.intToEntry(0, data);
+        assertEquals(OperationStatus.SUCCESS,
+                     db.putNoOverwrite(null, key, data));
+        IntegerBinding.intToEntry(1, data);
+        assertEquals(OperationStatus.SUCCESS,
+                     db.putNoDupData(null, key, data));
+
+        assertEquals(OperationStatus.SUCCESS,
+                     db.delete(null, key));
+
+        env.compress();
+
+        Cursor cursor = db.openCursor(null, null);
+
+        for (int i = 1; i <= max; i ++) {
+            IntegerBinding.intToEntry(i, key);
+            IntegerBinding.intToEntry(0, data);
+            assertEquals(OperationStatus.SUCCESS,
+                         cursor.putNoOverwrite(key, data));
+        }
+
+        TestUtils.logBINAndIN(env, cursor);
+
+        cursor.close();
+    }
+}
diff --git a/test/com/sleepycat/je/recovery/CheckpointActivationTest.java b/test/com/sleepycat/je/recovery/CheckpointActivationTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..4bed38afa7dd62170c4a3607e0b1f6b9f34465ab
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/CheckpointActivationTest.java
@@ -0,0 +1,280 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: CheckpointActivationTest.java,v 1.21.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.recovery;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.logging.Level;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.Tracer;
+
+public class CheckpointActivationTest extends TestCase {
+
+    private File envHome;
+
+    public CheckpointActivationTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+    }
+
+    /**
+     * Write elements to the log, check that the right number of
+     * checkpoints ran.
+     */
+    public void testLogSizeBasedCheckpoints()
+        throws Exception {
+
+        final int CKPT_INTERVAL = 5000;
+        final int TRACER_OVERHEAD = 26;
+        final int N_TRACES = 100;
+        final int N_CHECKPOINTS = 10;
+        final int WAIT_FOR_CHECKPOINT_SECS = 10;
+        final int FILE_SIZE = 20000000;
+
+        /* Init trace message with hyphens. */
+        assert CKPT_INTERVAL % N_TRACES == 0;
+        int msgBytesPerTrace = (CKPT_INTERVAL / N_TRACES) - TRACER_OVERHEAD;
+        StringBuffer traceBuf = new StringBuffer();
+        for (int i = 0; i < msgBytesPerTrace; i += 1) {
+            traceBuf.append('-');
+        }
+        String traceMsg = traceBuf.toString();
+
+        Environment env = null;
+        try {
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setAllowCreate(true);
+            envConfig.setConfigParam(EnvironmentParams.
+                                     CHECKPOINTER_BYTES_INTERVAL.getName(),
+                                     String.valueOf(CKPT_INTERVAL));
+
+            /*
+             * This test needs to control exactly how much goes into the log,
+             * so disable daemons.
+             */
+            envConfig.setConfigParam(EnvironmentParams.
+                                     ENV_RUN_EVICTOR.getName(), "false");
+            envConfig.setConfigParam(EnvironmentParams.
+                                     ENV_RUN_INCOMPRESSOR.getName(), "false");
+            envConfig.setConfigParam(EnvironmentParams.
+                                     ENV_RUN_CLEANER.getName(), "false");
+            env = new Environment(envHome, envConfig);
+
+            /*
+             * Get a first reading on number of checkpoints run. Read once
+             * to clear, then read again.
+             */
+            StatsConfig statsConfig = new StatsConfig();
+            statsConfig.setFast(true);
+            statsConfig.setClear(true);
+            EnvironmentStats stats = env.getStats(statsConfig); // clear stats
+
+            stats = env.getStats(statsConfig);  // read again
+            assertEquals(0, stats.getNCheckpoints());
+            long lastCkptEnd = stats.getLastCheckpointEnd();
+
+            /* Wait for checkpointer thread to start and go to wait state. */
+            EnvironmentImpl envImpl =
+                DbInternal.envGetEnvironmentImpl(env);
+            Thread ckptThread = envImpl.getCheckpointer().getThread();
+            while (true) {
+                Thread.State state = ckptThread.getState();
+                if (state == Thread.State.WAITING ||
+                    state == Thread.State.TIMED_WAITING) {
+                    break;
+                }
+            }
+
+            /* Run several checkpoints to ensure they occur as expected.  */
+            for (int i = 0; i < N_CHECKPOINTS; i += 1) {
+
+                /*
+                 * Write enough to prompt a checkpoint.  20% extra bytes are
+                 * written to be sure that we exceed the chekcpoint interval.
+                 */
+                long lastLsn = envImpl.getFileManager().getNextLsn();
+                while (DbLsn.getNoCleaningDistance
+                        (lastLsn, envImpl.getFileManager().getNextLsn(),
+                         FILE_SIZE) < CKPT_INTERVAL + CKPT_INTERVAL/5) {
+                    Tracer.trace(Level.SEVERE, envImpl, traceMsg);
+                }
+
+                /*
+                 * Wait for a checkpoint to start (if the test succeeds it will
+                 * start right away).  We take advantage of the fact that the
+                 * NCheckpoints stat is set at the start of a checkpoint.
+                 */
+                 long startTime = System.currentTimeMillis();
+                 boolean started = false;
+                 while (!started &&
+                        (System.currentTimeMillis() - startTime <
+                         WAIT_FOR_CHECKPOINT_SECS * 1000)) {
+                    Thread.yield();
+                    Thread.sleep(1);
+                    stats = env.getStats(statsConfig);
+                    if (stats.getNCheckpoints() > 0) {
+                        started = true;
+                    }
+                }
+                assertTrue("Checkpoint " + i + " did not start after " +
+                           WAIT_FOR_CHECKPOINT_SECS + " seconds",
+                           started);
+
+                /*
+                 * Wait for the checkpointer daemon to do its work.  We do not
+                 * want to continue writing until the checkpoint is complete,
+                 * because the amount of data we write is calculated to be the
+                 * correct amount in between checkpoints.  We know the
+                 * checkpoint is finished when the LastCheckpointEnd LSN
+                 * changes.
+                 */
+                while (true) {
+                    Thread.yield();
+                    Thread.sleep(1);
+                    stats = env.getStats(statsConfig);
+                    if (lastCkptEnd != stats.getLastCheckpointEnd()) {
+                        lastCkptEnd = stats.getLastCheckpointEnd();
+                        break;
+                    }
+                }
+            }
+        } catch (Exception e) {
+
+            /*
+             * print stack trace now, else it gets subsumed in exceptions
+             * caused by difficulty in removing log files.
+             */
+            e.printStackTrace();
+            throw e;
+        } finally {
+            if (env != null) {
+                env.close();
+            }
+        }
+    }
+
+    /* Test programmatic call to checkpoint. */
+    public void testApiCalls()
+        throws Exception {
+
+        Environment env = null;
+        try {
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setAllowCreate(true);
+            envConfig.setConfigParam(EnvironmentParams.
+                                     CHECKPOINTER_BYTES_INTERVAL.getName(),
+                                     "1000");
+
+            /* Disable all daemons */
+            envConfig.setConfigParam(EnvironmentParams.
+                                     ENV_RUN_EVICTOR.getName(), "false");
+            envConfig.setConfigParam(EnvironmentParams.
+                                     ENV_RUN_INCOMPRESSOR.getName(), "false");
+            envConfig.setConfigParam(EnvironmentParams.
+                                     ENV_RUN_CLEANER.getName(), "false");
+            envConfig.setConfigParam(EnvironmentParams.
+                                     ENV_RUN_CHECKPOINTER.getName(), "false");
+            env = new Environment(envHome, envConfig);
+
+            /*
+             * Get a first reading on number of checkpoints run. Read once
+             * to clear, then read again.
+             */
+            StatsConfig statsConfig = new StatsConfig();
+            statsConfig.setFast(true);
+            statsConfig.setClear(true);
+            EnvironmentStats stats = env.getStats(statsConfig); // clear stats
+
+            stats = env.getStats(statsConfig);  // read again
+            assertEquals(0, stats.getNCheckpoints());
+
+            /*
+	     * From the last checkpoint start LSN, there should be the
+	     * checkpoint end log entry and a trace message. These take 196
+	     * bytes.
+             */
+            CheckpointConfig checkpointConfig = new CheckpointConfig();
+
+            /* Should not cause a checkpoint, too little growth. */
+            checkpointConfig.setKBytes(1);
+            env.checkpoint(checkpointConfig);
+            stats = env.getStats(statsConfig);  // read again
+            assertEquals(0, stats.getNCheckpoints());
+
+            /* Fill up the log, there should be a checkpoint. */
+            String filler = "123456789012345678901245678901234567890123456789";
+            EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+            for (int i = 0; i < 20; i++) {
+                Tracer.trace(Level.SEVERE, envImpl, filler);
+            }
+            env.checkpoint(checkpointConfig);
+            stats = env.getStats(statsConfig);  // read again
+            assertEquals(1, stats.getNCheckpoints());
+
+            /* Try time based, should not checkpoint. */
+            checkpointConfig.setKBytes(0);
+            checkpointConfig.setMinutes(1);
+            env.checkpoint(checkpointConfig);
+            stats = env.getStats(statsConfig);  // read again
+            assertEquals(0, stats.getNCheckpoints());
+
+            /*
+	     * Sleep, enough time has passed for a checkpoint, but nothing was
+	     * written to the log.
+             */
+            Thread.sleep(1000);
+            env.checkpoint(checkpointConfig);
+            stats = env.getStats(statsConfig);  // read again
+            assertEquals(0, stats.getNCheckpoints());
+
+            /* Log something, now try a checkpoint. */
+            Tracer.trace(Level.SEVERE,  envImpl, filler);
+            env.checkpoint(checkpointConfig);
+            stats = env.getStats(statsConfig);  // read again
+            // TODO: make this test more timing independent. Sometimes
+            // the assertion will fail.
+            // assertEquals(1, stats.getNCheckpoints());
+
+        } catch (Exception e) {
+            /*
+             * print stack trace now, else it gets subsumed in exceptions
+             * caused by difficulty in removing log files.
+             */
+            e.printStackTrace();
+            throw e;
+        } finally {
+            if (env != null) {
+                env.close();
+            }
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/recovery/LevelRecorderTest.java b/test/com/sleepycat/je/recovery/LevelRecorderTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..0a858d11b423a24c1db5a8d7d45d5c4574e1e781
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/LevelRecorderTest.java
@@ -0,0 +1,54 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2004,2008 Oracle.  All rights reserved.
+ *
+ * $Id: LevelRecorderTest.java,v 1.7 2008/06/30 20:54:48 linda Exp $
+ */
+
+package com.sleepycat.je.recovery;
+
+import java.util.Set;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.tree.IN;
+
+public class LevelRecorderTest extends TestCase {
+
+    public LevelRecorderTest() {
+    }
+
+    public void testRecording () {
+        LevelRecorder recorder = new LevelRecorder();
+
+        DatabaseId id1 = new DatabaseId(1);
+        DatabaseId id5 = new DatabaseId(5);
+        DatabaseId id10 = new DatabaseId(10);
+
+        int level1 = IN.BIN_LEVEL;
+        int level2 = level1 + 1;
+        int level3 = level1 + 2;
+        int level4 = level1 + 3;
+
+        /* Mimic the recording of various INs for various databases. */
+        recorder.record(id10, level1);
+        recorder.record(id5,  level3);
+        recorder.record(id5,  level2);
+        recorder.record(id10, level1);
+        recorder.record(id1,  level1);
+        recorder.record(id10, level1);
+        recorder.record(id1,  level4);
+
+        /*
+         * We should only have to redo recovery for dbs 1 and 5. Db 10 had
+         * INs all of the same level.
+         */
+        Set<DatabaseId> reprocessSet = recorder.getDbsWithDifferentLevels();
+        assertEquals(2, reprocessSet.size());
+        assertTrue(reprocessSet.contains(id5));
+        assertTrue(reprocessSet.contains(id1));
+        assertFalse(reprocessSet.contains(id10));
+    }
+}
diff --git a/test/com/sleepycat/je/recovery/MultiEnvTest.java b/test/com/sleepycat/je/recovery/MultiEnvTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..718587f7890e2f8571d500f618f6360058151e35
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/MultiEnvTest.java
@@ -0,0 +1,53 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2004,2008 Oracle.  All rights reserved.
+ *
+ * $Id: MultiEnvTest.java,v 1.16 2008/06/30 20:54:48 linda Exp $
+ */
+
+package com.sleepycat.je.recovery;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.util.TestUtils;
+
+public class MultiEnvTest extends TestCase {
+
+    private File envHome1;
+    private File envHome2;
+
+    public MultiEnvTest() {
+        envHome1 = new File(System.getProperty(TestUtils.DEST_DIR));
+        envHome2 = new File(System.getProperty(TestUtils.DEST_DIR),
+                            "propTest");
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome1, false);
+        TestUtils.removeLogFiles("Setup", envHome2, false);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+	TestUtils.removeLogFiles("TearDown", envHome1, false);
+	TestUtils.removeLogFiles("TearDown", envHome2, false);
+    }
+
+    public void testNodeIdsAfterRecovery()
+        throws Throwable {
+            /* 
+             * TODO: replace this test which previously checked that the node
+             * id sequence shared among environments was correct with a test
+             * that checks all sequences, including replicated ones. This
+             * change is appropriate because the node id sequence is no longer
+             * a static field.
+             */
+    }
+}
diff --git a/test/com/sleepycat/je/recovery/Recovery2PCTest.java b/test/com/sleepycat/je/recovery/Recovery2PCTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..85d6d86abf06013403ccb3838fc84a4043cdc0a6
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/Recovery2PCTest.java
@@ -0,0 +1,520 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Recovery2PCTest.java,v 1.15.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.recovery;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import javax.transaction.xa.XAException;
+import javax.transaction.xa.XAResource;
+import javax.transaction.xa.Xid;
+
+import junit.framework.Test;
+import junit.framework.TestSuite;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.XAEnvironment;
+import com.sleepycat.je.log.LogUtils.XidImpl;
+
+public class Recovery2PCTest extends RecoveryTestBase {
+    private boolean explicitTxn;
+    private boolean commit;
+    private boolean recover;
+
+    public static Test suite() {
+        TestSuite allTests = new TestSuite();
+	for (int i = 0; i < 2; i++) {
+	    for (int j = 0; j < 2; j++) {
+		for (int k = 0; k < 2; k++) {
+		    allTests.addTest
+			(((Recovery2PCTest)
+			  (TestSuite.createTest(Recovery2PCTest.class,
+						"testBasic"))).
+			 init(i, j, k));
+		}
+	    }
+	}
+
+	/* We only need to test XARecoveryAPI for implicit and explicit. */
+	allTests.addTest
+	    (((Recovery2PCTest)
+	      (TestSuite.createTest(Recovery2PCTest.class,
+				    "testXARecoverAPI"))).
+	     init(0, 0, 0));
+	allTests.addTest
+	    (((Recovery2PCTest)
+	      (TestSuite.createTest(Recovery2PCTest.class,
+				    "testXARecoverAPI"))).
+	     init(1, 0, 0));
+	allTests.addTest
+	    (((Recovery2PCTest)
+	      (TestSuite.createTest(Recovery2PCTest.class,
+				    "testXARecoverArgCheck"))).
+	     init(0, 0, 0));
+        return allTests;
+    }
+
+    public Recovery2PCTest() {
+	super(true);
+    }
+
+    private Recovery2PCTest init(int explicitTxn,
+				 int commit,
+				 int recover) {
+	this.explicitTxn = (explicitTxn == 0);
+	this.commit = (commit == 0);
+	this.recover = (recover == 0);
+	return this;
+    }
+
+    private String opName() {
+	StringBuffer sb = new StringBuffer();
+
+	if (explicitTxn) {
+	    sb.append("Exp");
+	} else {
+	    sb.append("Imp");
+	}
+
+	sb.append("/");
+
+	if (commit) {
+	    sb.append("C");
+	} else {
+	    sb.append("A");
+	}
+
+	sb.append("/");
+
+	if (recover) {
+	    sb.append("Rec");
+	} else {
+	    sb.append("No Rec");
+	}
+
+	return sb.toString();
+    }
+
+    public void tearDown()
+	throws IOException, DatabaseException {
+
+        /* Set test name for reporting; cannot be done in the ctor or setUp. */
+        setName(getName() + ": " + opName());
+	super.tearDown();
+    }
+
+    public void testBasic()
+	throws Throwable {
+
+	createXAEnvAndDbs(1 << 20, false, NUM_DBS);
+	XAEnvironment xaEnv = (XAEnvironment) env;
+        int numRecs = NUM_RECS * 3;
+
+        try {
+            /* Set up an repository of expected data. */
+            Map<TestData, Set<TestData>> expectedData = 
+                new HashMap<TestData, Set<TestData>>();
+
+            /* Insert all the data. */
+	    XidImpl xid = new XidImpl(1, "TwoPCTest1".getBytes(), null);
+            Transaction txn = null;
+	    if (explicitTxn) {
+		txn = env.beginTransaction(null, null);
+		xaEnv.setXATransaction(xid, txn);
+	    } else {
+		xaEnv.start(xid, 0);
+	    }
+            insertData(txn, 0, numRecs - 1, expectedData, 1, commit, NUM_DBS);
+	    if (!explicitTxn) {
+		xaEnv.end(xid, 0);
+	    }
+
+	    xaEnv.prepare(xid);
+
+	    if (recover) {
+		closeEnv();
+		xaRecoverOnly(NUM_DBS);
+		xaEnv = (XAEnvironment) env;
+	    }
+
+	    if (commit) {
+		xaEnv.commit(xid, false);
+	    } else {
+		xaEnv.rollback(xid);
+	    }
+
+	    if (recover) {
+		verifyData(expectedData, commit, NUM_DBS);
+		forceCloseEnvOnly();
+	    } else {
+		closeEnv();
+	    }
+            xaRecoverAndVerify(expectedData, NUM_DBS);
+        } catch (Throwable t) {
+            /* Print stacktrace before trying to clean up files. */
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testXARecoverAPI()
+ 	throws Throwable {
+
+	createXAEnvAndDbs(1 << 20, false, NUM_DBS << 1);
+	final XAEnvironment xaEnv = (XAEnvironment) env;
+        final int numRecs = NUM_RECS * 3;
+
+        try {
+            /* Set up an repository of expected data. */
+            final Map<TestData, Set<TestData>> expectedData1 = 
+                new HashMap<TestData, Set<TestData>>();
+
+            final Map<TestData, Set<TestData>> expectedData2 = 
+                new HashMap<TestData, Set<TestData>>();
+
+            /* Insert all the data. */
+            final Transaction txn1 =
+		(explicitTxn ?
+		 env.beginTransaction(null, null) :
+		 null);
+            final Transaction txn2 =
+		(explicitTxn ?
+		 env.beginTransaction(null, null) :
+		 null);
+	    final XidImpl xid1 = new XidImpl(1, "TwoPCTest1".getBytes(), null);
+	    final XidImpl xid2 = new XidImpl(1, "TwoPCTest2".getBytes(), null);
+
+	    Thread thread1 = new Thread() {
+		    public void run() {
+			try {
+			    if (explicitTxn) {
+				xaEnv.setXATransaction(xid1, txn1);
+			    } else {
+				xaEnv.start(xid1, 0);
+			    }
+			    Thread.yield();
+			    insertData(txn1, 0, numRecs - 1, expectedData1, 1,
+				       true, 0, NUM_DBS);
+			    Thread.yield();
+			    if (!explicitTxn) {
+				xaEnv.end(xid1, 0);
+			    }
+			    Thread.yield();
+			} catch (Exception E) {
+			    fail("unexpected: " + E);
+			}
+		    }
+		};
+
+	    Thread thread2 = new Thread() {
+		    public void run() {
+			try {
+			    if (explicitTxn) {
+				xaEnv.setXATransaction(xid2, txn2);
+			    } else {
+				xaEnv.start(xid2, 0);
+			    }
+			    Thread.yield();
+			    insertData(txn2, numRecs, numRecs << 1,
+				       expectedData2, 1, false, NUM_DBS,
+				       NUM_DBS << 1);
+			    Thread.yield();
+			    if (!explicitTxn) {
+				xaEnv.end(xid2, 0);
+			    }
+			    Thread.yield();
+			} catch (Exception E) {
+			    fail("unexpected: " + E);
+			}
+		    }
+		};
+
+	    thread1.start();
+	    thread2.start();
+	    thread1.join();
+	    thread2.join();
+
+	    xaEnv.prepare(xid1);
+	    try {
+		xaEnv.prepare(xid1);
+		fail("should have thrown XID has already been registered");
+	    } catch (XAException XAE) {
+		// xid1 has already been registered.
+	    }
+	    xaEnv.prepare(xid2);
+
+	    XAEnvironment xaEnv2 = xaEnv;
+	    Xid[] unfinishedXAXids = xaEnv2.recover(0);
+	    assertTrue(unfinishedXAXids.length == 2);
+	    boolean sawXid1 = false;
+	    boolean sawXid2 = false;
+	    for (int i = 0; i < 2; i++) {
+		if (unfinishedXAXids[i].equals(xid1)) {
+		    if (sawXid1) {
+			fail("saw Xid1 twice");
+		    }
+		    sawXid1 = true;
+		}
+		if (unfinishedXAXids[i].equals(xid2)) {
+		    if (sawXid2) {
+			fail("saw Xid2 twice");
+		    }
+		    sawXid2 = true;
+		}
+	    }
+	    assertTrue(sawXid1 && sawXid2);
+
+	    for (int ii = 0; ii < 4; ii++) {
+		forceCloseEnvOnly();
+		xaEnv2 = (XAEnvironment) env;
+		xaRecoverOnly(NUM_DBS);
+		xaEnv2 = (XAEnvironment) env;
+
+		unfinishedXAXids = xaEnv2.recover(0);
+		assertTrue(unfinishedXAXids.length == 2);
+		sawXid1 = false;
+		sawXid2 = false;
+		for (int i = 0; i < 2; i++) {
+		    if (unfinishedXAXids[i].equals(xid1)) {
+			if (sawXid1) {
+			    fail("saw Xid1 twice");
+			}
+			sawXid1 = true;
+		    }
+		    if (unfinishedXAXids[i].equals(xid2)) {
+			if (sawXid2) {
+			    fail("saw Xid2 twice");
+			}
+			sawXid2 = true;
+		    }
+		}
+		assertTrue(sawXid1 && sawXid2);
+	    }
+
+	    xaEnv2 = (XAEnvironment) env;
+	    xaEnv2.getXATransaction(xid1);
+	    xaEnv2.getXATransaction(xid2);
+	    xaEnv2.commit(xid1, false);
+	    xaEnv2.rollback(xid2);
+	    verifyData(expectedData1, false, 0, NUM_DBS);
+	    verifyData(expectedData2, false, NUM_DBS, NUM_DBS << 1);
+	    forceCloseEnvOnly();
+	    xaRecoverOnly(NUM_DBS);
+	    verifyData(expectedData1, false, 0, NUM_DBS);
+	    verifyData(expectedData2, false, NUM_DBS, NUM_DBS << 1);
+        } catch (Throwable t) {
+            /* Print stacktrace before trying to clean up files. */
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testXARecoverArgCheck()
+ 	throws Throwable {
+
+	createXAEnvAndDbs(1 << 20, false, NUM_DBS);
+	XAEnvironment xaEnv = (XAEnvironment) env;
+
+        try {
+	    XidImpl xid = new XidImpl(1, "TwoPCTest1".getBytes(), null);
+
+	    /* Check that only one of TMJOIN and TMRESUME can be set. */
+	    try {
+		xaEnv.start(xid, XAResource.TMJOIN | XAResource.TMRESUME);
+		fail("Expected XAException(XAException.XAER_INVAL)");
+	    } catch (XAException XAE) {
+		/* Expect this. */
+		assertTrue(XAE.errorCode == XAException.XAER_INVAL);
+	    }
+
+	    /*
+	     * Check that only one of TMJOIN and TMRESUME can be set by passing
+	     * a bogus flag value (TMSUSPEND).
+	     */
+	    try {
+		xaEnv.start(xid, XAResource.TMSUSPEND);
+		fail("Expected XAException(XAException.XAER_INVAL)");
+	    } catch (XAException XAE) {
+		/* Expect this. */
+		assertTrue(XAE.errorCode == XAException.XAER_INVAL);
+	    }
+
+	    xaEnv.start(xid, XAResource.TMNOFLAGS);
+	    try {
+		xaEnv.start(xid, XAResource.TMNOFLAGS);
+		fail("Expected XAER_DUPID");
+	    } catch (XAException XAE) {
+		/* Expect this. */
+		assertTrue(XAE.errorCode == XAException.XAER_DUPID);
+	    }
+	    xaEnv.end(xid, XAResource.TMNOFLAGS);
+
+	    /*
+	     * Check that JOIN with a non-existant association throws NOTA.
+	     */
+	    try {
+		xid = new XidImpl(1, "TwoPCTest2".getBytes(), null);
+		xaEnv.start(xid, XAResource.TMJOIN);
+		fail("Expected XAER_NOTA");
+	    } catch (XAException XAE) {
+		/* Expect this. */
+		assertTrue(XAE.errorCode == XAException.XAER_NOTA);
+	    }
+
+	    /*
+	     * Check that RESUME with a non-existant association throws NOTA.
+	     */
+	    try {
+		xaEnv.start(xid, XAResource.TMRESUME);
+		fail("Expected XAER_NOTA");
+	    } catch (XAException XAE) {
+		/* Expect this. */
+		assertTrue(XAE.errorCode == XAException.XAER_NOTA);
+	    }
+
+	    /*
+	     * Check that start(JOIN) from a thread that is already associated
+	     * throws XAER_PROTO.
+	     */
+	    Xid xid2 = new XidImpl(1, "TwoPCTest3".getBytes(), null);
+	    xaEnv.start(xid2, XAResource.TMNOFLAGS);
+	    xaEnv.end(xid2, XAResource.TMNOFLAGS);
+	    xid = new XidImpl(1, "TwoPCTest2".getBytes(), null);
+	    xaEnv.start(xid, XAResource.TMNOFLAGS);
+	    try {
+		xaEnv.start(xid2, XAResource.TMJOIN);
+		fail("Expected XAER_PROTO");
+	    } catch (XAException XAE) {
+		/* Expect this. */
+		assertTrue(XAE.errorCode == XAException.XAER_PROTO);
+	    }
+
+	    /*
+	     * Check that start(RESUME) for an xid that is not suspended throws
+	     * XAER_PROTO.
+	     */
+	    try {
+		xid = new XidImpl(1, "TwoPCTest2".getBytes(), null);
+		xaEnv.start(xid, XAResource.TMRESUME);
+		fail("Expected XAER_PROTO");
+	    } catch (XAException XAE) {
+		/* Expect this. */
+		assertTrue(XAE.errorCode == XAException.XAER_PROTO);
+	    }
+
+	    /*
+	     * Check that end(TMFAIL | TMSUCCESS) throws XAER_INVAL.
+	     */
+	    try {
+		xid = new XidImpl(1, "TwoPCTest2".getBytes(), null);
+		xaEnv.end(xid, XAResource.TMFAIL | XAResource.TMSUCCESS);
+		fail("Expected XAER_INVAL");
+	    } catch (XAException XAE) {
+		/* Expect this. */
+		assertTrue(XAE.errorCode == XAException.XAER_INVAL);
+	    }
+
+	    /*
+	     * Check that end(TMFAIL | TMSUSPEND) throws XAER_INVAL.
+	     */
+	    try {
+		xid = new XidImpl(1, "TwoPCTest2".getBytes(), null);
+		xaEnv.end(xid, XAResource.TMFAIL | XAResource.TMSUSPEND);
+		fail("Expected XAER_INVAL");
+	    } catch (XAException XAE) {
+		/* Expect this. */
+		assertTrue(XAE.errorCode == XAException.XAER_INVAL);
+	    }
+
+	    /*
+	     * Check that end(TMSUCCESS | TMSUSPEND) throws XAER_INVAL.
+	     */
+	    try {
+		xid = new XidImpl(1, "TwoPCTest2".getBytes(), null);
+		xaEnv.end(xid, XAResource.TMSUCCESS | XAResource.TMSUSPEND);
+		fail("Expected XAER_INVAL");
+	    } catch (XAException XAE) {
+		/* Expect this. */
+		assertTrue(XAE.errorCode == XAException.XAER_INVAL);
+	    }
+
+	    /*
+	     * Check that end(TMSUSPEND) actually works.
+	     */
+	    Xid xid4 = new XidImpl(1, "TwoPCTest4".getBytes(), null);
+	    xaEnv.start(xid4, XAResource.TMNOFLAGS);
+	    Transaction txn4 = xaEnv.getThreadTransaction();
+	    assertTrue(txn4 != null);
+	    xaEnv.end(xid4, XAResource.TMSUSPEND);
+	    assertTrue(xaEnv.getThreadTransaction() == null);
+	    Xid xid5 = new XidImpl(1, "TwoPCTest5".getBytes(), null);
+	    xaEnv.start(xid5, XAResource.TMNOFLAGS);
+	    Transaction txn5 = xaEnv.getThreadTransaction();
+	    xaEnv.end(xid5, XAResource.TMSUSPEND);
+	    assertTrue(xaEnv.getThreadTransaction() == null);
+	    xaEnv.start(xid4, XAResource.TMRESUME);
+	    assertTrue(xaEnv.getThreadTransaction().equals(txn4));
+	    xaEnv.end(xid4, XAResource.TMNOFLAGS);
+	    xaEnv.start(xid5, XAResource.TMRESUME);
+	    assertTrue(xaEnv.getThreadTransaction().equals(txn5));
+	    xaEnv.end(xid5, XAResource.TMNOFLAGS);
+
+	    /*
+	     * Check TMFAIL.
+	     */
+	    try {
+		xid = new XidImpl(1, "TwoPCTest6".getBytes(), null);
+		xaEnv.start(xid, XAResource.TMNOFLAGS);
+		xaEnv.end(xid, XAResource.TMFAIL);
+		xaEnv.commit(xid, false);
+	    } catch (XAException XAE) {
+		/* Expect this. */
+		assertTrue(XAE.errorCode == XAException.XA_RBROLLBACK);
+	    }
+	    xaEnv.rollback(xid);
+
+	    /*
+	     * Check TMSUCCESS.
+	     */
+	    xid = new XidImpl(1, "TwoPCTest6".getBytes(), null);
+	    xaEnv.start(xid, XAResource.TMNOFLAGS);
+	    xaEnv.end(xid, XAResource.TMSUCCESS);
+	    xaEnv.commit(xid, false);
+
+	    /*
+	     * Check start(); end(SUSPEND); end(SUCCESS).  This is a case that
+	     * JBoss causes to happen.  It should succeed.
+	     */
+	    xid = new XidImpl(1, "TwoPCTest7".getBytes(), null);
+	    xaEnv.start(xid, XAResource.TMNOFLAGS);
+	    xaEnv.end(xid, XAResource.TMSUSPEND);
+	    xaEnv.end(xid, XAResource.TMSUCCESS);
+	    xaEnv.commit(xid, false);
+
+	    /*
+	     * Check end(SUSPEND); end(SUCCESS) [with no start() call.].
+	     * This should fail.
+	     */
+	    try {
+		xid = new XidImpl(1, "TwoPCTest8".getBytes(), null);
+		xaEnv.end(xid, XAResource.TMFAIL);
+		xaEnv.commit(xid, false);
+	    } catch (XAException XAE) {
+		/* Expect this. */
+		assertTrue(XAE.errorCode == XAException.XAER_NOTA);
+	    }
+	} catch (Throwable t) {
+	    t.printStackTrace();
+	    throw t;
+	}
+    }
+}
diff --git a/test/com/sleepycat/je/recovery/RecoveryAbortTest.java b/test/com/sleepycat/je/recovery/RecoveryAbortTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..03a7e147a41e6ffcf12cd11deca55dc3af5921d8
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/RecoveryAbortTest.java
@@ -0,0 +1,684 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RecoveryAbortTest.java,v 1.59.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.recovery;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.util.TestUtils;
+
+public class RecoveryAbortTest extends RecoveryTestBase {
+    private static final boolean DEBUG = false;
+
+    public RecoveryAbortTest() {
+	super(true);
+    }
+
+    /**
+     * Insert data into several dbs, then abort.
+     */
+    public void testBasic()
+	throws Throwable {
+
+	createEnvAndDbs(1 << 20, true, NUM_DBS);
+        int numRecs = NUM_RECS * 3;
+
+        try {
+            /* Set up an repository of expected data. */
+            Map<TestData, Set<TestData>> expectedData = 
+                new HashMap<TestData, Set<TestData>>();
+
+            /* Insert all the data. */
+            Transaction txn = env.beginTransaction(null, null);
+            insertData(txn, 0, numRecs - 1, expectedData, 1, false, NUM_DBS);
+            txn.abort();
+            closeEnv();
+            recoverAndVerify(expectedData, NUM_DBS);
+        } catch (Throwable t) {
+            /* Print stacktrace before trying to clean up files. */
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Test insert/abort with no duplicates.
+     */
+    public void testInserts()
+	throws Throwable {
+
+	createEnvAndDbs(1 << 20, true, NUM_DBS);
+        EnvironmentImpl realEnv = DbInternal.envGetEnvironmentImpl(env);
+
+        int N = NUM_RECS;
+
+        if (DEBUG) {
+            System.out.println("<dump>");
+        }
+        try {
+            /* Set up an repository of expected data. */
+            Map<TestData, Set<TestData>> expectedData = 
+                new HashMap<TestData, Set<TestData>>();
+
+            /* Insert 0 - N and commit. */
+            Transaction txn = env.beginTransaction(null, null);
+            insertData(txn, 0, N - 1, expectedData, 1, true, NUM_DBS);
+            txn.commit();
+            verifyData(expectedData, false, NUM_DBS);
+
+            /* Insert N - 3N and abort. */
+            txn = env.beginTransaction(null, null);
+            insertData(txn, N, (3 * N) - 1, expectedData, 1, false, NUM_DBS);
+            txn.abort();
+            verifyData(expectedData, false, NUM_DBS);
+
+            /*
+	     * Wait for the incompressor queue to be processed, so we force the
+	     * recovery to run w/IN delete replays.
+	     */
+            while (realEnv.getINCompressorQueueSize() > 0) {
+                Thread.sleep(10000);
+            }
+
+            /* Insert 2N - 4N and commit. */
+            txn = env.beginTransaction(null, null);
+            insertData(txn, (2 * N), (4 * N) - 1, expectedData, 1, true,
+		       NUM_DBS);
+            txn.commit();
+            verifyData(expectedData, false, NUM_DBS);
+
+            closeEnv();
+            recoverAndVerify(expectedData, NUM_DBS);
+
+        } catch (Throwable t) {
+            /* Print stacktrace before trying to clean up files. */
+            t.printStackTrace();
+            throw t;
+        } finally {
+            if (DEBUG) {
+                System.out.println("</dump>");
+            }
+        }
+    }
+
+    public void testMix()
+	throws Throwable {
+
+	createEnvAndDbs(1 << 20, true, NUM_DBS);
+
+        int numRecs = NUM_RECS;
+        int numDups = 10;
+
+        try {
+            /* Set up an repository of expected data. */
+            Map<TestData, Set<TestData>> expectedData = 
+                new HashMap<TestData, Set<TestData>>();
+
+            /* Insert data without duplicates. */
+            Transaction txn = env.beginTransaction(null, null);
+            insertData(txn, 0, numRecs, expectedData, 1, true, NUM_DBS);
+
+            /* Insert more with duplicates, commit. */
+            insertData(txn, numRecs+1, (2*numRecs), expectedData,
+                       numDups, true, NUM_DBS);
+            txn.commit();
+
+            /* Delete all and abort. */
+            txn = env.beginTransaction(null, null);
+            deleteData(txn, expectedData, true, false, NUM_DBS);
+            txn.abort();
+
+            /* Delete every other and commit. */
+            txn = env.beginTransaction(null, null);
+            deleteData(txn, expectedData, false, true, NUM_DBS);
+            txn.commit();
+
+            /* Modify some and abort. */
+            txn = env.beginTransaction(null, null);
+            modifyData(txn, numRecs, expectedData, 3, false, NUM_DBS);
+            txn.abort();
+
+            /* Modify some and commit. */
+            txn = env.beginTransaction(null, null);
+            modifyData(txn, numRecs/2, expectedData, 2, true, NUM_DBS);
+            txn.commit();
+
+            if (DEBUG) {
+                dumpData(NUM_DBS);
+                dumpExpected(expectedData);
+                com.sleepycat.je.tree.Key.DUMP_TYPE =
+		    com.sleepycat.je.tree.Key.DumpType.BINARY;
+                DbInternal.dbGetDatabaseImpl(dbs[0]).getTree().dump();
+            }
+            TestUtils.validateNodeMemUsage
+                           (DbInternal.envGetEnvironmentImpl(env),
+                            false);
+            closeEnv();
+            recoverAndVerify(expectedData, NUM_DBS);
+        } catch (Throwable t) {
+            // print stacktrace before trying to clean up files
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testSR13726()
+	throws Throwable {
+
+	int numDbs = 1;
+
+	createEnvAndDbs(1 << 20, true, numDbs);
+
+        try {
+            /*
+	     * Insert data without duplicates, commit. This gets us a
+	     * DupCountLN.
+	     */
+            Transaction txn = env.beginTransaction(null, null);
+	    Cursor c = dbs[0].openCursor(txn, null);
+	    DatabaseEntry key = new DatabaseEntry();
+	    DatabaseEntry data = new DatabaseEntry();
+            byte[] keyData = TestUtils.getTestArray(0);
+            byte[] dataData = TestUtils.byteArrayCopy(keyData);
+            key.setData(keyData);
+	    data.setData(dataData);
+	    for (int i = 0; i < 3; i++) {
+		data.setData(TestUtils.getTestArray(i));
+		assertEquals("insert some dups",
+			     c.put(key, data),
+			     OperationStatus.SUCCESS);
+	    }
+	    c.close();
+	    txn.commit();
+
+	    /* This gets us a DelDupLN in the slot in the BIN. */
+	    txn = env.beginTransaction(null, null);
+	    assertEquals("delete initial dups",
+			 dbs[0].delete(txn, key),
+			 OperationStatus.SUCCESS);
+            txn.commit();
+
+	    /* This gets the dup tree cleaned up. */
+	    env.compress();
+
+	    /* Gets the BIN written out with knownDeleted=true. */
+	    closeEnv();
+	    recoverOnly(numDbs);
+	    createDbs(null, numDbs);
+
+	    /*
+	     * Tree now has a BIN referring to a DelDupLN.  Add duplicates,
+	     * and abort.
+	     */
+	    txn = env.beginTransaction(null, null);
+	    c = dbs[0].openCursor(txn, null);
+	    for (int i = 0; i < 3; i++) {
+		data.setData(TestUtils.getTestArray(i));
+		assertEquals("insert later dups",
+			     c.put(key, data),
+			     OperationStatus.SUCCESS);
+	    }
+	    c.close();
+            txn.abort();
+
+	    /*
+	     * Now add duplicates again and commit.
+	     */
+	    txn = env.beginTransaction(null, null);
+	    c = dbs[0].openCursor(txn, null);
+	    for (int i = 0; i < 3; i++) {
+		data.setData(TestUtils.getTestArray(i));
+		assertEquals("insert later dups",
+			     c.put(key, data),
+			     OperationStatus.SUCCESS);
+	    }
+	    c.close();
+            txn.commit();
+
+	    txn = env.beginTransaction(null, null);
+	    c = dbs[0].openCursor(txn, null);
+	    int count = 0;
+	    while (c.getNext(key, data, null) == OperationStatus.SUCCESS) {
+		count++;
+	    }
+	    c.getSearchKey(key, data, null);
+	    assertEquals("scanned count == count()", count, c.count());
+	    c.close();
+	    txn.commit();
+	    closeEnv();
+        } catch (Throwable t) {
+            // print stacktrace before trying to clean up files
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /*
+     * Test the sequence where we have an existing record in the
+     * database; then in a separate transaction we delete that data
+     * and reinsert it and then abort that transaction.  During the
+     * undo, the insert will be undone first (by deleting the record
+     * and setting knownDeleted true in the ChildReference); the
+     * deletion will be undone second by adding the record back into
+     * the database.  The entry needs to be present in the BIN when we
+     * add it back in.  But the compressor may be running at the same
+     * time and compress the entry out between the deletion and
+     * re-insertion making the entry disappear from the BIN.  This is
+     * prevented by a lock being taken by the compressor on the LN,
+     * even if the LN is "knownDeleted". [#9465]
+     */
+    public void testSR9465Part1()
+	throws Throwable {
+
+	createEnvAndDbs(1 << 20, true, NUM_DBS);
+        int numRecs = NUM_RECS;
+
+        try {
+            /* Set up an repository of expected data. */
+            Map<TestData, Set<TestData>> expectedData = 
+                new HashMap<TestData, Set<TestData>>();
+
+            /* Insert data without duplicates. */
+            Transaction txn = env.beginTransaction(null, null);
+            insertData(txn, 0, numRecs, expectedData, 1, true, NUM_DBS);
+            txn.commit();
+
+            /* Delete all and abort. */
+            txn = env.beginTransaction(null, null);
+            deleteData(txn, expectedData, true, false, NUM_DBS);
+            insertData(txn, 0, numRecs, expectedData, 1, false, NUM_DBS);
+            txn.abort();
+
+            txn = env.beginTransaction(null, null);
+	    verifyData(expectedData, NUM_DBS);
+            txn.commit();
+
+            if (DEBUG) {
+                dumpData(NUM_DBS);
+                dumpExpected(expectedData);
+                com.sleepycat.je.tree.Key.DUMP_TYPE =
+		    com.sleepycat.je.tree.Key.DumpType.BINARY;
+                DbInternal.dbGetDatabaseImpl(dbs[0]).getTree().dump();
+            }
+
+            closeEnv();
+            recoverAndVerify(expectedData, NUM_DBS);
+        } catch (Throwable t) {
+            /* Print stacktrace before trying to clean up files. */
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testSR9465Part2()
+	throws Throwable {
+
+	createEnvAndDbs(1 << 20, true, NUM_DBS);
+        int numRecs = NUM_RECS;
+
+        try {
+            /* Set up an repository of expected data. */
+            Map<TestData, Set<TestData>> expectedData = 
+                new HashMap<TestData, Set<TestData>>();
+
+            /* Insert data without duplicates. */
+            Transaction txn = env.beginTransaction(null, null);
+            insertData(txn, 0, numRecs, expectedData, 1, true, NUM_DBS);
+            txn.commit();
+
+            /* Delete all and abort. */
+            txn = env.beginTransaction(null, null);
+            deleteData(txn, expectedData, true, false, NUM_DBS);
+            insertData(txn, 0, numRecs, expectedData, 1, false, NUM_DBS);
+            deleteData(txn, expectedData, true, false, NUM_DBS);
+            txn.abort();
+
+            if (DEBUG) {
+                dumpData(NUM_DBS);
+                dumpExpected(expectedData);
+                com.sleepycat.je.tree.Key.DUMP_TYPE =
+		    com.sleepycat.je.tree.Key.DumpType.BINARY;
+                DbInternal.dbGetDatabaseImpl(dbs[0]).getTree().dump();
+            }
+
+            txn = env.beginTransaction(null, null);
+	    verifyData(expectedData, NUM_DBS);
+            txn.commit();
+
+            if (DEBUG) {
+                dumpData(NUM_DBS);
+                dumpExpected(expectedData);
+                com.sleepycat.je.tree.Key.DUMP_TYPE =
+		    com.sleepycat.je.tree.Key.DumpType.BINARY;
+                DbInternal.dbGetDatabaseImpl(dbs[0]).getTree().dump();
+            }
+
+            closeEnv();
+            recoverAndVerify(expectedData, NUM_DBS);
+        } catch (Throwable t) {
+            /* Print stacktrace before trying to clean up files. */
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testSR9752Part1()
+	throws Throwable {
+
+	createEnvAndDbs(1 << 20, false, NUM_DBS);
+        int numRecs = NUM_RECS;
+
+        try {
+            /* Set up an repository of expected data. */
+            Map<TestData, Set<TestData>> expectedData = 
+                new HashMap<TestData, Set<TestData>>();
+
+            /* Insert data without duplicates. */
+            Transaction txn = env.beginTransaction(null, null);
+            insertData(txn, 0, numRecs, expectedData, 1, true, NUM_DBS);
+            txn.commit();
+
+	    /*
+	     * txn1 just puts a piece of data out to a database that won't
+	     * be seen by deleteData or insertData.  The idea is to hold
+	     * the transaction open across the env.sync() so that firstActive
+	     * comes before ckptStart.
+	     */
+            Transaction txn1 = env.beginTransaction(null, null);
+	    DatabaseEntry key = new DatabaseEntry(new byte[] { 1, 2, 3, 4 });
+	    DatabaseEntry data = new DatabaseEntry(new byte[] { 4, 3, 2, 1 });
+	    DatabaseConfig dbConfig = new DatabaseConfig();
+	    dbConfig.setAllowCreate(true);
+	    dbConfig.setSortedDuplicates(false);
+	    dbConfig.setTransactional(true);
+            Database otherDb = env.openDatabase(txn1, "extradb", dbConfig);
+	    otherDb.put(txn1, key, data);
+
+            /* Delete all and abort. */
+            txn = env.beginTransaction(null, null);
+            deleteData(txn, expectedData, false, false, NUM_DBS);
+            txn.abort();
+
+            /* Delete all and commit. */
+            txn = env.beginTransaction(null, null);
+            deleteData(txn, expectedData, false, true, NUM_DBS);
+            txn.commit();
+
+	    env.sync(); /* env.checkpoint does not seem to be sufficient. */
+            txn1.commit();
+	    otherDb.close();
+
+	    closeEnv();
+
+            if (DEBUG) {
+                dumpData(NUM_DBS);
+                dumpExpected(expectedData);
+                com.sleepycat.je.tree.Key.DUMP_TYPE =
+		    com.sleepycat.je.tree.Key.DumpType.BINARY;
+                DbInternal.dbGetDatabaseImpl(dbs[0]).getTree().dump();
+            }
+
+            recoverAndVerify(expectedData, NUM_DBS);
+        } catch (Throwable t) {
+            /* Print stacktrace before trying to clean up files. */
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testSR9752Part2()
+	throws Throwable {
+
+	createEnvAndDbs(1 << 20, false, NUM_DBS);
+	DbInternal.envGetEnvironmentImpl(env).shutdownCleaner();
+        int numRecs = NUM_RECS;
+
+        try {
+            /* Set up an repository of expected data. */
+            Map<TestData, Set<TestData>> expectedData = 
+                new HashMap<TestData, Set<TestData>>();
+
+            /* Insert data without duplicates. */
+            Transaction txn = env.beginTransaction(null, null);
+            insertData(txn, 0, numRecs, expectedData, 1, true, NUM_DBS);
+            txn.commit();
+
+	    /*
+	     * txn1 just puts a piece of data out to a database that won't
+	     * be seen by deleteData or insertData.  The idea is to hold
+	     * the transaction open across the env.sync() so that firstActive
+	     * comes before ckptStart.
+	     */
+            Transaction txn1 = env.beginTransaction(null, null);
+	    DatabaseEntry key = new DatabaseEntry(new byte[] { 1, 2, 3, 4 });
+	    DatabaseEntry data = new DatabaseEntry(new byte[] { 4, 3, 2, 1 });
+	    DatabaseConfig dbConfig = new DatabaseConfig();
+	    dbConfig.setAllowCreate(true);
+	    dbConfig.setSortedDuplicates(false);
+	    dbConfig.setTransactional(true);
+            Database otherDb = env.openDatabase(txn1, "extradb", dbConfig);
+	    otherDb.put(txn1, key, data);
+
+            /* Delete all and abort. */
+            txn = env.beginTransaction(null, null);
+            deleteData(txn, expectedData, false, false, NUM_DBS);
+            txn.abort();
+
+            /* Delete all and commit. */
+            txn = env.beginTransaction(null, null);
+            deleteData(txn, expectedData, false, true, NUM_DBS);
+            txn.commit();
+
+	    env.sync(); /* env.checkpoint does not seem to be sufficient. */
+            txn1.commit();
+	    otherDb.close();
+
+	    closeEnv();
+
+            if (DEBUG) {
+                dumpData(NUM_DBS);
+                dumpExpected(expectedData);
+                com.sleepycat.je.tree.Key.DUMP_TYPE =
+		    com.sleepycat.je.tree.Key.DumpType.BINARY;
+                DbInternal.dbGetDatabaseImpl(dbs[0]).getTree().dump();
+            }
+
+            recoverAndVerify(expectedData, NUM_DBS);
+        } catch (Throwable t) {
+            /* Print stacktrace before trying to clean up files. */
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Insert dbs, commit some, abort some. To do: add db remove, rename.
+     */
+    public void testDbCreateRemove()
+	throws Throwable {
+
+        createEnv(1 << 20, true);
+        int N1 = 10;
+        int N2 = 50;
+        int N3 = 60;
+        int N4 = 70;
+        int N5 = 100;
+
+        String dbName1 = "foo";
+        String dbName2 = "bar";
+
+        try {
+            /* Make Dbs, abort */
+            Transaction txn = env.beginTransaction(null, null);
+
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setTransactional(true);
+            dbConfig.setAllowCreate(true);
+            for (int i = 0; i < N2; i++) {
+                env.openDatabase(txn, dbName1 + i, dbConfig);
+            }
+            txn.abort();
+
+            /* All dbs should not exist */
+            checkForNoDb(dbName1, 0, N2);
+
+            /* Make more dbs, overlapping with some of the aborted set. */
+            txn = env.beginTransaction(null, null);
+            for (int i = N1; i < N5; i++) {
+                Database db = env.openDatabase(txn, dbName1 + i, dbConfig);
+                db.close();
+            }
+            txn.commit();
+
+            /*
+             * Dbs 0  - N1-1 shouldn't exist
+             * Dbs N1 - N5 should exist
+             */
+            checkForNoDb(dbName1, 0, N1);
+            checkForDb(dbName1, N1, N5);
+
+            /* Close and recover */
+            env.close();
+
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+	    envConfig.setConfigParam
+		(EnvironmentParams.NODE_MAX.getName(), "6");
+	    envConfig.setConfigParam(EnvironmentParams.MAX_MEMORY.getName(),
+				     new Long(1 << 24).toString());
+            envConfig.setTransactional(true);
+            envConfig.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC));
+            env = new Environment(envHome, envConfig);
+
+            /*
+             * Dbs 0  - N1-1 shouldn't exist
+             * Dbs N1 - N5 should exist
+             */
+            checkForNoDb(dbName1, 0, N1);
+            checkForDb(dbName1, N1, N5);
+
+            /* Remove some dbs, abort */
+            txn = env.beginTransaction(null, null);
+            for (int i = N2; i < N4; i++) {
+                env.removeDatabase(txn, dbName1+i);
+            }
+            txn.abort();
+
+            /* Remove some dbs, commit */
+            txn = env.beginTransaction(null, null);
+            for (int i = N3; i < N4; i++) {
+                env.removeDatabase(txn, dbName1+i);
+            }
+            txn.commit();
+
+            /*
+             * Dbs 0 - N1-1  should not exist
+             * Dbs N1 - N3-1 should exist
+             * Dbs N3 - N4-1 should not exist
+             * Dbs N4 - N5-1 should exist
+             */
+            checkForNoDb(dbName1, 0, N1);
+            checkForDb(dbName1, N1, N3);
+            checkForNoDb(dbName1, N3, N4);
+            checkForDb(dbName1, N4, N5);
+
+            /* Close and recover */
+            env.close();
+            env = new Environment(envHome, envConfig);
+
+            /*
+             * Dbs 0 - N1-1  should not exist
+             * Dbs N1 - N3-1 should exist
+             * Dbs N3 - N4-1 should not exist
+             * Dbs N4 - N5-1 should exist
+             */
+            checkForNoDb(dbName1, 0, N1);
+            checkForDb(dbName1, N1, N3);
+            checkForNoDb(dbName1, N3, N4);
+            checkForDb(dbName1, N4, N5);
+
+            /* Rename some dbs, abort */
+            txn = env.beginTransaction(null, null);
+            for (int i = N1; i < N3; i++) {
+                env.renameDatabase
+		    (txn, dbName1+i, dbName2+i);
+            }
+            txn.abort();
+
+            /* Remove some dbs, commit */
+            txn = env.beginTransaction(null, null);
+            for (int i = N2; i < N3; i++) {
+                env.renameDatabase
+		    (txn, dbName1+i, dbName2+i);
+            }
+            txn.commit();
+
+            /*
+             * Dbs 0 - N1-1  should not exist
+             * Dbs N1 - N2-1 should exist with old name
+             * Dbs N2 - N3-1 should exist with new name
+             * Dbs N3 - N4 should not exist
+             * Dbs N4 - N5-1 should exist with old name
+             */
+            checkForNoDb(dbName1, 0, N1);
+            checkForDb(dbName1, N1, N2);
+            checkForDb(dbName2, N2, N3);
+            checkForNoDb(dbName1, N3, N4);
+            checkForDb(dbName1, N4, N5);
+        } catch (Throwable t) {
+            /* print stacktrace before trying to clean up files. */
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Fail if any db from start - (end -1) exists
+     */
+    private void checkForNoDb(String dbName, int start, int end)
+        throws DatabaseException {
+        	
+        /* Dbs start - end -1  shouldn't exist */
+        for (int i = start; i < end; i++) {
+            try {
+                env.openDatabase(null, dbName + i, null);
+                fail(DB_NAME + i + " shouldn't exist");
+            } catch (DatabaseException e) {
+            }
+        }
+    }
+
+    /**
+     * Fail if any db from start - (end -1) doesn't exist
+     */
+    private void checkForDb(String dbName, int start, int end)
+        throws DatabaseException {
+        /* Dbs start - end -1  should exist. */
+        for (int i = start; i < end; i++) {
+            try {
+                Database checkDb = env.openDatabase(null, dbName + i, null);
+                checkDb.close();
+            } catch (DatabaseException e) {
+                fail(e.getMessage());
+            }
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/recovery/RecoveryCheckpointTest.java b/test/com/sleepycat/je/recovery/RecoveryCheckpointTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..903321e4c996021b9d12e426fda7e9e3c63ea84f
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/RecoveryCheckpointTest.java
@@ -0,0 +1,422 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RecoveryCheckpointTest.java,v 1.42.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.recovery;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.DbTree;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.junit.JUnitThread;
+import com.sleepycat.je.util.TestUtils;
+
+public class RecoveryCheckpointTest extends RecoveryTestBase {
+
+    volatile int sequence = 0;
+
+    public void setExtraProperties()
+	throws DatabaseException {
+
+        /*
+         * Make sure that the environments in this unit test always run with
+         * checkpointing off, so we can call it explcitly.
+         */
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+	/*
+        envConfig.setConfigParam
+            (EnvironmentParams.JE_LOGGING_LEVEL.getName(), "CONFIG");
+	*/
+    }
+
+    /**
+     * Run checkpoints on empty dbs.
+     */
+    public void testEmptyCheckpoint()
+        throws Throwable {
+
+        createEnvAndDbs(1 << 20, true, NUM_DBS);
+
+        try {
+
+            /*
+	     * Run checkpoint on empty environment. Should be the second one
+	     * run, the first was run by recovery when the environment was
+	     * opened.
+	     */
+            env.checkpoint(forceConfig);
+            EnvironmentStats stats = env.getStats(TestUtils.FAST_STATS);
+            assertEquals(2, stats.getNCheckpoints());
+            assertEquals(2, stats.getLastCheckpointId());
+
+            /* Shutdown, recover. */
+            Map<TestData, Set<TestData>> expectedData = 
+                new HashMap<TestData, Set<TestData>>();
+
+            closeEnv();
+            recoverAndVerify(expectedData, NUM_DBS); // 0 checkpoints
+
+            /* Another checkpoint. */
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setTransactional(true);
+	    envConfig.setConfigParam
+		(EnvironmentParams.JE_LOGGING_LEVEL.getName(), "CONFIG");
+            env = new Environment(envHome, envConfig);
+            env.checkpoint(forceConfig);
+            stats = env.getStats(TestUtils.FAST_STATS);
+            assertEquals(2, stats.getNCheckpoints());
+            assertEquals(4, stats.getLastCheckpointId());
+
+            /* Shutdown, recover. */
+            env.close();
+            recoverAndVerify(expectedData, NUM_DBS);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Run checkpoints on empty dbs.
+     */
+    public void testNoCheckpointOnOpenSR11861()
+        throws Throwable {
+
+        createEnvAndDbs(1 << 20, true, NUM_DBS);
+
+        try {
+
+            EnvironmentStats stats = env.getStats(TestUtils.FAST_STATS);
+            assertEquals(1, stats.getNCheckpoints());
+            assertEquals(1, stats.getLastCheckpointId());
+
+            /* Shutdown, recover. */
+            Map<TestData, Set<TestData>> expectedData = 
+                new HashMap<TestData, Set<TestData>>();
+
+            Transaction txn = env.beginTransaction(null, null);
+	    insertData(txn, 0, 1, expectedData, 1, true, NUM_DBS);
+	    txn.commit();
+            closeEnv();   // closes without a checkpoint
+            recoverAndVerify(expectedData, NUM_DBS); // 1 checkpoint
+
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setTransactional(true);
+            env = new Environment(envHome, envConfig);
+            stats = env.getStats(TestUtils.FAST_STATS);
+            assertEquals(0, stats.getNCheckpoints());
+            assertEquals(2, stats.getLastCheckpointId());
+	    env.close();
+            env = new Environment(envHome, envConfig);
+            stats = env.getStats(TestUtils.FAST_STATS);
+            assertEquals(0, stats.getNCheckpoints());
+            assertEquals(2, stats.getLastCheckpointId());
+
+            /* Shutdown, recover. */
+            env.close();
+            recoverAndVerify(expectedData, NUM_DBS);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Test checkpoints that end up using BINDeltas -- the recovery must work.
+     */
+    public void testBinDelta()
+	throws Throwable {
+
+        doTestBinDelta(true);
+    }
+
+    /**
+     * Same as testBinDelta but disallows deltas, to ensure that the
+     * setMinimizeRecoveryTime API works.
+     */
+    public void testNoBinDelta()
+	throws Throwable {
+
+        doTestBinDelta(false);
+    }
+
+    private void doTestBinDelta(boolean useDeltas)
+	throws Throwable {
+
+	createEnvAndDbs(1 << 20, false, NUM_DBS);
+
+        StatsConfig statsConfig = new StatsConfig();
+        statsConfig.setClear(true);
+
+        CheckpointConfig deltaConfig = new CheckpointConfig();
+        deltaConfig.setForce(true);
+        deltaConfig.setMinimizeRecoveryTime(!useDeltas);
+
+        try {
+
+            /*
+             * Insert 4 records (nodeMax is 6), checkpoint, then insert 1
+             * record.  The 1 record insertion will qualify for a delta,
+             * because the threshold percentage is 25%, and 25% of 4 is 1.
+             */
+            int numRecs = 4;
+            Map<TestData, Set<TestData>> expectedData = 
+                new HashMap<TestData, Set<TestData>>();
+
+            Transaction txn = env.beginTransaction(null, null);
+            insertData(txn, 0, numRecs, expectedData, 1, true, NUM_DBS);
+            env.checkpoint(forceConfig);
+            insertData(txn, numRecs+1, numRecs+2, expectedData,
+		       1, true, NUM_DBS);
+            txn.commit();
+
+            /*
+             * If useDeltas is true, this next checkpoint will end up using a
+             * BINDelta to log the last inserted record. It will have
+             * practically nothing but the root in the checkpoint.
+             */
+            EnvironmentStats stats = env.getStats(statsConfig);
+            env.checkpoint(deltaConfig);
+            stats = env.getStats(statsConfig);
+            if (useDeltas) {
+                assertTrue(stats.getNDeltaINFlush() > 0);
+            } else {
+                assertTrue(stats.getNDeltaINFlush() == 0);
+            }
+
+            /* Shutdown, recover from a checkpoint that uses BINDeltas. */
+            closeEnv();
+            recoverAndVerify(expectedData, NUM_DBS);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Test the rollback of transactions that are active during a checkpoint.
+     */
+    public void testActiveWhileCheckpointing()
+        throws Throwable {
+
+	createEnvAndDbs(1 << 20, true, NUM_DBS);
+
+        try {
+            int numRecs = 1;
+            Map<TestData, Set<TestData>> expectedData = 
+                new HashMap<TestData, Set<TestData>>();
+
+            Transaction txn = env.beginTransaction(null, null);
+            insertData(txn, 0, numRecs, expectedData, 1, false, NUM_DBS);
+
+            /* Now run a checkpoint while this operation hasn't finished. */
+            env.checkpoint(forceConfig);
+            txn.abort();
+
+            /* Shutdown, recover. */
+            closeEnv();
+            recoverAndVerify(expectedData, NUM_DBS);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testSR11293()
+	throws Throwable {
+
+	createEnv(1 << 20, false);
+
+	Transaction dbTxn = env.beginTransaction(null, null);
+	EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+	final DbTree dbTree = envImpl.getDbTree();
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(true);
+	final Database db = env.openDatabase(dbTxn, "foo", dbConfig);
+	dbTxn.commit();
+	final Transaction txn = env.beginTransaction(null, null);
+	sequence = 0;
+
+	/**
+	 * The sequence between the two tester threads is:
+	 *
+	 * tester2: write 1/1 into the database.  This causes the initial tree
+	 * to be created (IN/BIN/LN).  Flush that out to the disk with a full
+	 * checkpoint.  Signal tester1 and wait.
+	 *
+	 * tester1: Lock the MapLN for "foo" db.  Signal tester2 and wait.
+	 *
+	 * tester2: Add 2/2 to the tree which causes the BIN to be dirtied.
+	 * Signal tester1 to continue, perform a full checkpoint which will
+	 * causes the root IN to be dirtied and flushed.  DbTree.modifyDbRoot
+	 * will block on the MapLN lock held by tester1.
+	 *
+	 * tester1: while tester2 is blocking on the MapLN lock, this thread is
+	 * sleeping.  When it wakes up, it releases the MapLN lock by aborting
+	 * the transaction.
+	 *
+	 * tester2: modifyDbRoot finally acquires the write lock on foo-db's
+	 * MapLN write lock, performs the update to the DbTree and returns from
+	 * the sync().
+	 */
+	JUnitThread tester1 =
+	    new JUnitThread("testSR11293DbTreeLocker") {
+		    public void testBody() {
+			try {
+			    /* Wait for tester2. */
+			    while (sequence < 1) {
+				Thread.yield();
+			    }
+
+			    /* Lock the MapLN for the database. */
+			    DatabaseId fooId =
+				DbInternal.dbGetDatabaseImpl(db).getId();
+			    DatabaseImpl fooDb = dbTree.getDb(fooId, 500000L);
+			    assert fooDb != null;
+
+			    sequence++;
+
+			    /* Wait for tester2. */
+			    while (sequence < 3) {
+				Thread.yield();
+			    }
+
+			    try {
+				Thread.sleep(3000);
+			    } catch (Exception E) {
+			    }
+
+			    try {
+				txn.abort();
+				db.close();
+				env.close();
+			    } catch (DatabaseException DBE) {
+				DBE.printStackTrace();
+				fail("unexpected exception: " + DBE);
+			    }
+			} catch (DatabaseException DBE) {
+			    DBE.printStackTrace();
+			    fail("caught DatabaseException " + DBE);
+			}
+		    }
+		};
+
+	JUnitThread tester2 =
+	    new JUnitThread("testSR11293DbWriter") {
+		    public void testBody() {
+			try {
+			    DatabaseEntry key =
+				new DatabaseEntry(new byte[] { 1 });
+			    DatabaseEntry data =
+				new DatabaseEntry(new byte[] { 1 });
+			    assertEquals(OperationStatus.SUCCESS,
+					 db.put(null, key, data));
+			    env.sync();
+
+			    sequence++;
+			    while (sequence < 2) {
+				Thread.yield();
+			    }
+
+			    key.setData(new byte[] { 2 });
+			    data.setData(new byte[] { 2 });
+			    assertEquals(OperationStatus.SUCCESS,
+					 db.put(null, key, data));
+			    sequence++;
+			    env.sync();
+			} catch (DatabaseException DBE) {
+			    DBE.printStackTrace();
+			    fail("unexpected exception: " + DBE);
+			}
+		    }
+		};
+
+	tester1.start();
+	tester2.start();
+	tester1.finishTest();
+	tester2.finishTest();
+
+        EnvironmentConfig recoveryConfig = TestUtils.initEnvConfig();
+
+        recoveryConfig.setConfigParam
+	    (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+	recoveryConfig.setConfigParam
+	    (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+	recoveryConfig.setConfigParam
+	    (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false");
+
+        env = new Environment(envHome, recoveryConfig);
+	dbConfig.setAllowCreate(false);
+	dbConfig.setTransactional(false);
+	Database db2 = env.openDatabase(null, "foo", dbConfig);
+	Cursor c = db2.openCursor(null, null);
+	DatabaseEntry key = new DatabaseEntry();
+	DatabaseEntry data = new DatabaseEntry();
+	assertEquals(OperationStatus.SUCCESS,
+		     c.getNext(key, data, LockMode.DEFAULT));
+	assertEquals((key.getData())[0], 1);
+	assertEquals((data.getData())[0], 1);
+
+	assertEquals(OperationStatus.SUCCESS,
+		     c.getNext(key, data, LockMode.DEFAULT));
+	assertEquals((key.getData())[0], 2);
+	assertEquals((data.getData())[0], 2);
+	assertEquals(OperationStatus.NOTFOUND,
+		     c.getNext(key, data, LockMode.DEFAULT));
+
+	c.close();
+	db2.close();
+	env.close();
+    }
+
+    /*
+     * See what happens if someone calls checkpoint on a read only environment.
+     */
+    public void testReadOnlyCheckpoint()
+        throws DatabaseException {
+        /* Create an environment, close. */
+        EnvironmentConfig c = TestUtils.initEnvConfig();
+        c.setAllowCreate(true);
+        Environment e = new Environment(envHome, c);
+        e.close();
+
+        /* Now open read only. */
+        c.setAllowCreate(false);
+        c.setReadOnly(true);
+        e = new Environment(envHome, c);
+        try {
+            CheckpointConfig ckptConfig = new CheckpointConfig();
+            ckptConfig.setForce(true);
+            e.checkpoint(ckptConfig);
+        } finally {
+            e.close();
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/recovery/RecoveryCreateDupTest.java b/test/com/sleepycat/je/recovery/RecoveryCreateDupTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..c08cbce332ce128b8aa251450492e1f7a4164439
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/RecoveryCreateDupTest.java
@@ -0,0 +1,234 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RecoveryCreateDupTest.java,v 1.13.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.recovery;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.SearchFileReader;
+import com.sleepycat.je.utilint.CmdUtil;
+import com.sleepycat.je.utilint.DbLsn;
+
+/*
+ * Test the log entries that are made when a duplicate tree is
+ * created. Inspired by SR 10203.
+ */
+public class RecoveryCreateDupTest extends RecoveryTestBase {
+
+    /*
+     * These tests insert 2 records in order to create a duplicate tree.  Then
+     * they truncate the log and recover, checking that (a) the recovery was
+     * able to succeed, and (b), that the results are correct.
+     *
+     * They vary where the truncation happens and if the two records are
+     * inserted in a single or in two txns.
+     */
+
+    public void testCreateDup1()
+        throws Throwable {
+        errorHaltCase(false, true);
+    }
+
+    public void testCreateDup2()
+        throws Throwable {
+        errorHaltCase(true, true);
+    }
+
+    public void testCreateDup3()
+        throws Throwable {
+        errorHaltCase(false, false);
+    }
+
+    public void testCreateDup4()
+        throws Throwable {
+        errorHaltCase(true, false);
+    }
+
+    /**
+     * Insert 2 duplicate records, cut the log off at different places,
+     * recover.
+     *
+     * @param allValuesCreatedWithinTxn true if both records are inserted
+     * the same txn.
+     * @param truncateBeforeDIN if true, truncate just before the DIN entry.
+     * If false, truncate before the first LN
+     */
+    private void errorHaltCase(boolean allValuesCreatedWithinTxn,
+                               boolean truncateBeforeDIN)
+        throws Throwable {
+
+        /* test data setup. */
+        byte[] key = new byte [1];
+        key[0] = 5;
+        DatabaseEntry keyEntry1 = new DatabaseEntry(key);
+        DatabaseEntry keyEntry2 = new DatabaseEntry(key);
+        byte[] data1 = new byte [1];
+        byte[] data2 = new byte [1];
+        data1[0] = 7;
+        data2[0] = 8;
+        DatabaseEntry dataEntry1 = new DatabaseEntry(data1);
+        DatabaseEntry dataEntry2 = new DatabaseEntry(data2);
+
+        /* Create 1 database. */
+        createEnvAndDbs(1 << 20, true, 1);
+        try {
+            /*
+             * Set up an repository of expected data. We'll be inserting
+             * 2 records, varying whether they are in the same txn or not.
+             */
+            Map<TestData, Set<TestData>> expectedData = 
+                new HashMap<TestData, Set<TestData>>();
+
+            Transaction txn = env.beginTransaction(null, null);
+            dbs[0].put(txn, keyEntry1, dataEntry1);
+            addExpectedData(expectedData, 0, keyEntry1, dataEntry1,
+                            !allValuesCreatedWithinTxn);
+
+            if (!allValuesCreatedWithinTxn) {
+                txn.commit();
+                txn = env.beginTransaction(null, null);
+            }
+
+            dbs[0].put(txn, keyEntry2, dataEntry2);
+            addExpectedData(expectedData, 0, keyEntry2, dataEntry2, false);
+
+            txn.commit();
+            closeEnv();
+
+            /*
+             * Find the location of the DIN and the location of the followon
+             * LN.
+             */
+
+            env = new Environment(envHome, null);
+            EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+            SearchFileReader searcher =
+                new SearchFileReader(envImpl, 1000, true, DbLsn.NULL_LSN,
+				     DbLsn.NULL_LSN, LogEntryType.LOG_DIN);
+            searcher.readNextEntry();
+            long dinLsn = searcher.getLastLsn();
+
+            searcher =
+                new SearchFileReader(envImpl, 1000, true, dinLsn,
+				     DbLsn.NULL_LSN,
+                                     LogEntryType.LOG_LN_TRANSACTIONAL);
+            searcher.readNextEntry();
+            long lnLsn = searcher.getLastLsn();
+
+            env.close();
+
+            /*
+             *  Truncate the log, sometimes before the DIN, sometimes after.
+             */
+            EnvironmentImpl cmdEnvImpl =
+                CmdUtil.makeUtilityEnvironment(envHome, false);
+
+            /* Go through the file manager to get the JE file. Truncate. */
+            long truncateLsn = truncateBeforeDIN ? dinLsn : lnLsn;
+            cmdEnvImpl.getFileManager().
+                truncateLog(DbLsn.getFileNumber(truncateLsn),
+                            DbLsn.getFileOffset(truncateLsn));
+
+            cmdEnvImpl.close();
+
+            /*
+             * Recover and verify that we have the expected data.
+             */
+            recoverAndVerify(expectedData, 1);
+
+
+	} catch (Throwable t) {
+            // print stacktrace before trying to clean up files
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Test when a duplicate tree reuses an entry previously populated by
+     * a deleted LN. [#SR12847]
+     * The sequence is this:
+     *   create database
+     *   insert k1/d1 (make BIN with a slot for k1)
+     *   abort the insert, so the BIN is marked known deleted
+     *   flush the BIN to the log
+     *
+     *   insert k1/d100
+     *   insert k1/d200 (creates a new duplicate tree)
+     *
+     * Now recover from here. The root of the duplicate tree must be put
+     * into the old known deleted slot used for k1/d1. There is some
+     * finagling to make this happen; namely the BIN must not be compressed
+     * during checkpoint.
+     */
+    public void testReuseSlot()
+        throws DatabaseException {
+
+        /* Create 1 database. */
+        createEnvAndDbs(1 << 20, false, 1);
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        /* Insert a record, then abort it so it's marked knownDeleted. */
+        Transaction txn = env.beginTransaction(null, null);
+        IntegerBinding.intToEntry(100, key);
+        IntegerBinding.intToEntry(1, data);
+        dbs[0].put(txn, key, data);
+        txn.abort();
+
+        /*
+         * Put a cursor on this bin to prevent lazy compression and preserve
+         * the slot created above.
+         */
+        IntegerBinding.intToEntry(200, key);
+        IntegerBinding.intToEntry(1, data);
+        txn = env.beginTransaction(null, null);
+        Cursor c = dbs[0].openCursor(txn, null);
+        c.put(key, data);
+
+        /* Flush this bin to the log. */
+        CheckpointConfig ckptConfig = new CheckpointConfig();
+        ckptConfig.setForce(true);
+        env.checkpoint(ckptConfig);
+        c.close();
+        txn.abort();
+
+        /*
+         * Now create a duplicate tree, reusing the known deleted slot
+         * in the bin.
+         */
+        Map<TestData, Set<TestData>> expectedData = 
+            new HashMap<TestData, Set<TestData>>();
+        IntegerBinding.intToEntry(100, key);
+        IntegerBinding.intToEntry(1, data);
+        dbs[0].put(null, key, data);
+        addExpectedData(expectedData, 0, key, data, true);
+
+        IntegerBinding.intToEntry(2, data);
+        dbs[0].put(null, key, data);
+        addExpectedData(expectedData, 0, key, data, true);
+
+        /* close the environment. */
+        closeEnv();
+
+        recoverAndVerify(expectedData, 1);
+    }
+}
diff --git a/test/com/sleepycat/je/recovery/RecoveryDeleteTest.java b/test/com/sleepycat/je/recovery/RecoveryDeleteTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..84223f0860e5170c4cace7a84c1de2fe79cf1da6
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/RecoveryDeleteTest.java
@@ -0,0 +1,74 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RecoveryDeleteTest.java,v 1.10.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.recovery;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.config.EnvironmentParams;
+
+public class RecoveryDeleteTest extends RecoveryTestBase {
+
+    protected void setExtraProperties()
+        throws DatabaseException {
+        envConfig.setConfigParam(
+                      EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(),
+                      "false");
+    }
+
+    /* Make sure that we can recover after the entire tree is compressed away. */
+    public void testDeleteAllAndCompress()
+        throws Throwable {
+
+        createEnvAndDbs(1 << 20, false, NUM_DBS);
+        int numRecs = 10;
+
+        try {
+            // Set up an repository of expected data
+            Map<TestData, Set<TestData>> expectedData = 
+                new HashMap<TestData, Set<TestData>>();
+
+            // insert all the data
+            Transaction txn = env.beginTransaction(null, null);
+            insertData(txn, 0, numRecs -1 , expectedData, 1, true, NUM_DBS);
+            txn.commit();
+
+            /*
+             * Do two checkpoints here so that the INs that make up this new
+             * tree are not in the redo part of the log.
+             */
+            CheckpointConfig ckptConfig = new CheckpointConfig();
+            ckptConfig.setForce(true);
+            env.checkpoint(ckptConfig);
+            env.checkpoint(ckptConfig);
+            txn = env.beginTransaction(null, null);
+            insertData(txn, numRecs, numRecs + 1, expectedData, 1, true, NUM_DBS);
+            txn.commit();
+
+            /* delete all */
+            txn = env.beginTransaction(null, null);
+            deleteData(txn, expectedData, true, true, NUM_DBS);
+            txn.commit();
+
+            /* This will remove the root. */
+            env.compress();
+
+            closeEnv();
+            recoverAndVerify(expectedData, NUM_DBS);
+        } catch (Throwable t) {
+            // print stacktrace before trying to clean up files
+            t.printStackTrace();
+            throw t;
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/recovery/RecoveryDeltaTest.java b/test/com/sleepycat/je/recovery/RecoveryDeltaTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..5da8bdd45bafbc2bc479b741bf75e976011868fd
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/RecoveryDeltaTest.java
@@ -0,0 +1,244 @@
+/*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RecoveryDeltaTest.java,v 1.31.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+package com.sleepycat.je.recovery;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.config.EnvironmentParams;
+
+/**
+ * Exercise delta BIN logging.
+ */
+public class RecoveryDeltaTest extends RecoveryTestBase {
+
+    /**
+     * The recovery delta tests set extra properties.
+     */
+    public void setExtraProperties()
+	throws DatabaseException {
+
+        /* Always run with delta logging cranked up. */
+        envConfig.setConfigParam
+            (EnvironmentParams.BIN_DELTA_PERCENT.getName(), "75");
+
+        /*
+         * Make sure that the environments in this unit test always
+         * run with checkpointing off, so we can call it explcitly.
+         */
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+        /*
+         * Make sure that the environments in this unit test always
+         * run with the compressor off, so we get known results
+         */
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false");
+    }
+
+    /**
+     * Test the interaction of compression and deltas. After a compress,
+     * the next entry must be a full one.
+     */
+    public void testCompress()
+        throws Throwable {
+
+        createEnvAndDbs(1 << 20, true, NUM_DBS);
+        int numRecs = 20;
+        try {
+            // Set up an repository of expected data
+            Map<TestData, Set<TestData>> expectedData = 
+                new HashMap<TestData, Set<TestData>>();
+
+            // insert all the data
+            Transaction txn = env.beginTransaction(null, null);
+            insertData(txn, 0, numRecs - 1, expectedData, 1, true, NUM_DBS);
+            txn.commit();
+
+            // delete every other record
+            txn = env.beginTransaction(null, null);
+            deleteData(txn, expectedData, false, true, NUM_DBS);
+            txn.commit();
+
+
+            // Ask the compressor to run.
+            env.compress();	
+
+            // force a checkpoint, should avoid deltas..
+            env.checkpoint(forceConfig);
+
+            closeEnv();
+
+            recoverAndVerify(expectedData, NUM_DBS);
+
+        } catch (Throwable t) {
+            // print stacktrace before trying to clean up files
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Test a recovery that processes deltas.
+     */
+    public void testRecoveryDelta()
+        throws Throwable {
+
+        createEnvAndDbs(1 << 20, true, NUM_DBS);
+        int numRecs = 20;
+        try {
+            /* Set up a repository of expected data */
+            Map<TestData, Set<TestData>> expectedData = 
+                new HashMap<TestData, Set<TestData>>();
+
+            /*
+             * Force a checkpoint, to flush a full version of the BIN
+             * to disk, so the next checkpoint can cause deltas
+             */
+            env.checkpoint(forceConfig);
+
+            /* insert data */
+            Transaction txn = env.beginTransaction(null, null);
+            insertData(txn, 0, numRecs - 1, expectedData, 1, true, NUM_DBS);
+            txn.commit();
+
+            /* This checkpoint should write deltas. Although there's
+             * just been one spate of insertions, those insertions caused
+             * some BIN splitting, so many BINS have a logged version
+             * on disk. This is what causes the deltas.
+             */
+            env.checkpoint(forceConfig);
+
+            closeEnv();
+            List<RecoveryInfo> infoList = recoverAndVerify(expectedData, NUM_DBS);
+
+            /* Check that this recovery processed deltas */
+            RecoveryInfo firstInfo = (RecoveryInfo) infoList.get(0);
+            assertTrue(firstInfo.numBinDeltas > 0);
+
+        } catch (Throwable t) {
+            // print stacktrace before trying to clean up files
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * This test checks that reconstituting the bin deals properly with
+     * the knownDeleted flag
+     * insert key1, abort, checkpoint,  -- after abort, childref KD = true;
+     * insert key1, commit,             -- after commit, childref KD = false
+     * delete key1, abort,              -- BinDelta should have KD = false
+     * checkpoint to write deltas,
+     * recover. verify key1 is present. -- reconstituteBIN should make KD=false
+     */
+    public void testKnownDeleted()
+        throws Throwable {
+
+        createEnvAndDbs(1 << 20, true, NUM_DBS);
+        int numRecs = 20;
+        try {
+
+            /* Set up a repository of expected data */
+            Map<TestData, Set<TestData>> expectedData = 
+                new HashMap<TestData, Set<TestData>>();
+
+            /* Insert data and abort. */
+            Transaction txn = env.beginTransaction(null, null);
+            insertData(txn, 0, numRecs - 1, expectedData, 1, false, NUM_DBS);
+
+            /*
+             * Add cursors to pin down BINs. Otherwise the checkpoint that
+             * follows will compress away all the values.
+             */
+            Cursor[][] cursors = new Cursor[NUM_DBS][numRecs];
+            addCursors(cursors);
+            txn.abort();
+
+            /*
+             * Force a checkpoint, to flush a full version of the BIN
+             * to disk, so the next checkpoint can cause deltas.
+             * These checkpointed BINS have known deleted flags set.
+             */
+            env.checkpoint(forceConfig);
+            removeCursors(cursors);
+
+
+            /*
+             * Insert every other data value, makes some known deleted flags
+             * false.
+             */
+            txn = env.beginTransaction(null, null);
+            insertData(txn, 0, numRecs - 1, expectedData, 1,
+                       true,  true, NUM_DBS);
+            txn.commit();
+
+            /* Delete data and abort, keeps known delete flag true */
+            txn = env.beginTransaction(null, null);
+            deleteData(txn, expectedData, true, false, NUM_DBS);
+            txn.abort();
+
+            /* This checkpoint should write deltas. */
+            cursors = new Cursor[NUM_DBS][numRecs/2];
+            addCursors(cursors);
+            env.checkpoint(forceConfig);
+            removeCursors(cursors);
+
+            closeEnv();
+            List<RecoveryInfo> infoList = recoverAndVerify(expectedData, NUM_DBS);
+
+            /* Check that this recovery processed deltas */
+            RecoveryInfo firstInfo = (RecoveryInfo) infoList.get(0);
+            assertTrue(firstInfo.numBinDeltas > 0);
+
+        } catch (Throwable t) {
+            // print stacktrace before trying to clean up files
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /* Add cursors on each value to prevent compression. */
+    private void addCursors(Cursor[][] cursors)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        /* Pin each record with a cursor. */
+        for (int d = 0; d < NUM_DBS; d++) {
+            for (int i = 0; i < cursors[d].length; i++) {
+                cursors[d][i] = dbs[d].openCursor(null, null);
+
+                for (int j = 0; j < i; j++) {
+                    OperationStatus status =
+                        cursors[d][i].getNext(key, data,
+                                              LockMode.READ_UNCOMMITTED);
+                    assertEquals(OperationStatus.SUCCESS, status);
+                }
+            }
+        }
+    }
+
+    private void removeCursors(Cursor[][] cursors)
+        throws DatabaseException {
+        for (int d = 0; d < NUM_DBS; d++) {
+            for (int i = 0; i < cursors[d].length; i++) {
+                cursors[d][i].close();
+            }
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/recovery/RecoveryDuplicatesTest.java b/test/com/sleepycat/je/recovery/RecoveryDuplicatesTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..52f9348b375df7ade60d3de4f1abe2fd5b5e78b0
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/RecoveryDuplicatesTest.java
@@ -0,0 +1,172 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RecoveryDuplicatesTest.java,v 1.16.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.recovery;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.util.TestUtils;
+
+public class RecoveryDuplicatesTest extends RecoveryTestBase {
+
+    public void testDuplicates()
+        throws Throwable {
+
+        createEnvAndDbs(1 << 20, true, NUM_DBS);
+        int numRecs = 10;
+        int numDups = N_DUPLICATES_PER_KEY;
+
+        try {
+            /* Set up an repository of expected data. */
+            Map<TestData, Set<TestData>> expectedData = 
+                new HashMap<TestData, Set<TestData>>();
+
+            /* Insert all the data. */
+            Transaction txn = env.beginTransaction(null, null);
+            insertData(txn, 0, numRecs - 1, expectedData,
+                       numDups, true, NUM_DBS);
+            txn.commit();
+            closeEnv();
+            recoverAndVerify(expectedData, NUM_DBS);
+	} catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testDuplicatesWithDeletion()
+        throws Throwable {
+
+        createEnvAndDbs(1 << 20, true, NUM_DBS);
+        int numRecs = 10;
+        int nDups = N_DUPLICATES_PER_KEY;
+
+        try {
+            /* Set up an repository of expected data. */
+            Map<TestData, Set<TestData>> expectedData = 
+                new HashMap<TestData, Set<TestData>>();
+
+            /* Insert all the data. */
+            Transaction txn = env.beginTransaction(null, null);
+            insertData(txn, 0, numRecs -1, expectedData, nDups, true, NUM_DBS);
+
+            /* Delete all the even records. */
+            deleteData(txn, expectedData, false, true, NUM_DBS);
+            txn.commit();
+
+            /* Modify all the records. */
+            //    modifyData(expectedData);
+
+            closeEnv();
+
+            recoverAndVerify(expectedData, NUM_DBS);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /*
+     * See SR11455 for details.
+     *
+     * This test is checking that the maxTxnId gets recovered properly during
+     * recovery.  The SR has to do with the INFileReader not including
+     * DupCountLN_TX and DelDupLN_TX's in its txnIdTrackingMap.  When these
+     * were not included, it was possible for a transaction to consist solely
+     * of DupCountLN_TX/DelDupLN_TX pairs.  The "deleteData" transaction below
+     * does just this.  If no checkpoint occurred following such a transaction,
+     * then the correct current txnid would not be written to the log and
+     * determining this value during recovery would be left up to the
+     * INFileReader.  However, without reading the DupCountLN_TX/DelDupLN_TX
+     * records, it would not recover the correct value.
+     *
+     * We take the poor man's way out of creating this situation by just
+     * manually asserting the txn id is correct post-recovery.  The txnid of 12
+     * was determined by looking through logs before and after the fix.
+     */
+    public void testSR11455()
+        throws Throwable {
+
+        createEnvAndDbs(1 << 20, true, 1);
+        int numRecs = 1;
+        int nDups = 3;
+
+        try {
+            /* Set up an repository of expected data. */
+            Map<TestData, Set<TestData>> expectedData = 
+                new HashMap<TestData, Set<TestData>>();
+
+            /* Insert all the data. */
+            Transaction txn = env.beginTransaction(null, null);
+            insertData(txn, 0, numRecs -1, expectedData, nDups, true, 1);
+	    txn.commit();
+
+	    txn = env.beginTransaction(null, null);
+            /* Delete all the even records. */
+            deleteData(txn, expectedData, false, false, 1);
+            txn.abort();
+            closeEnv();
+
+	    /* Open it again, which will run recovery. */
+	    EnvironmentConfig recoveryConfig = TestUtils.initEnvConfig();
+	    recoveryConfig.setTransactional(true);
+	    recoveryConfig.setConfigParam
+		(EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+	    recoveryConfig.setConfigParam
+		(EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false");
+	    env = new Environment(envHome, recoveryConfig);
+
+	    txn = env.beginTransaction(null, null);
+	    assertEquals(6, txn.getId());
+	    txn.commit();
+	    env.close();
+
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testDuplicatesWithAllDeleted()
+        throws Throwable {
+
+        createEnvAndDbs(1 << 20, true, NUM_DBS);
+        int numRecs = 10;
+        int nDups = N_DUPLICATES_PER_KEY;
+
+        try {
+            /* Set up an repository of expected data. */
+            Map<TestData, Set<TestData>> expectedData = 
+                new HashMap<TestData, Set<TestData>>();
+
+            /* Insert all the data. */
+            Transaction txn = env.beginTransaction(null, null);
+            insertData(txn, 0, numRecs - 1, expectedData, nDups,
+		       true, NUM_DBS);
+
+            /* Delete all data. */
+            deleteData(txn, expectedData, true, true, NUM_DBS);
+            txn.commit();
+
+            /* Modify all the records. */
+	    //    modifyData(expectedData);
+            closeEnv();
+
+            recoverAndVerify(expectedData, NUM_DBS);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/recovery/RecoveryEdgeTest.java b/test/com/sleepycat/je/recovery/RecoveryEdgeTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..9f4be19737686d0ace8522f50c3f861e20e9436c
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/RecoveryEdgeTest.java
@@ -0,0 +1,528 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RecoveryEdgeTest.java,v 1.72.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.recovery;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import junit.framework.Test;
+import junit.framework.TestSuite;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.NodeSequence;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.SearchFileReader;
+import com.sleepycat.je.tree.LN;
+import com.sleepycat.je.util.StringDbt;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.DbLsn;
+
+public class RecoveryEdgeTest extends RecoveryTestBase {
+
+    public static Test suite() {
+        TestSuite allTests = new TestSuite();
+        addTests(allTests, false/*keyPrefixing*/);
+        addTests(allTests, true/*keyPrefixing*/);
+        return allTests;
+    }
+
+    @SuppressWarnings("unchecked") // suite.tests returns untyped Enumeration
+	private static void addTests(TestSuite allTests,
+                                 boolean keyPrefixing) {
+
+        TestSuite suite = new TestSuite(RecoveryEdgeTest.class);
+        Enumeration e = suite.tests();
+        while (e.hasMoreElements()) {
+            RecoveryEdgeTest test = (RecoveryEdgeTest) e.nextElement();
+            test.keyPrefixing = keyPrefixing;
+            allTests.addTest(test);
+        }
+    }
+
+    public void tearDown()
+        throws IOException, DatabaseException {
+
+        /* Set test name for reporting; cannot be done in the ctor or setUp. */
+        setName(getName() +
+                (keyPrefixing ? ":keyPrefixing" : ":noKeyPrefixing"));
+        super.tearDown();
+    }
+
+    public void testNoLogFiles()
+        throws Throwable {
+
+        /* Creating an environment runs recovery. */
+        EnvironmentImpl envImpl = null;
+        try {
+            EnvironmentConfig noFileConfig = TestUtils.initEnvConfig();
+            /* Don't checkpoint utilization info for this test. */
+            DbInternal.setCheckpointUP(noFileConfig, false);
+            noFileConfig.setConfigParam
+                (EnvironmentParams.LOG_MEMORY_ONLY.getName(), "true");
+            noFileConfig.setTransactional(true);
+            noFileConfig.setAllowCreate(true);
+            envImpl = new EnvironmentImpl(envHome,
+                                      noFileConfig,
+                                      null /*sharedCacheEnv*/,
+                                      false /*replicationIntended*/);
+            List<String> dbList = envImpl.getDbTree().getDbNames();
+            assertEquals("no dbs exist", 0, dbList.size());
+
+            /* Fake a shutdown/startup. */
+            envImpl.close();
+            envImpl = new EnvironmentImpl(envHome,
+                                      noFileConfig,
+                                      null /*sharedCacheEnv*/,
+                                      false /*replicationIntended*/);
+            dbList = envImpl.getDbTree().getDbNames();
+            assertEquals("no dbs exist", 0, dbList.size());
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        } finally {
+            if (envImpl != null)
+                envImpl.close();
+        }
+    }
+
+    /**
+     * Test setting of the database ids in recovery.
+     */
+    public void testDbId()
+        throws Throwable {
+
+        Transaction createTxn = null;
+        try {
+
+            /*
+             * Create an environment and three databases. The first two
+             * ids are allocated to the name db and the id db.
+             */
+            EnvironmentConfig createConfig = TestUtils.initEnvConfig();
+            createConfig.setTransactional(true);
+            createConfig.setAllowCreate(true);
+            createConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(),
+                                        "6");
+            env = new Environment(envHome, createConfig);
+
+            int numStartDbs = 1;
+            createTxn = env.beginTransaction(null, null);
+
+            /* Check id of each db. */
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setTransactional(true);
+            dbConfig.setAllowCreate(true);
+            for (int i = 0; i < numStartDbs; i++) {
+                Database anotherDb = env.openDatabase(createTxn, "foo" + i,
+						      dbConfig);
+                assertEquals((i+3),
+			     DbInternal.dbGetDatabaseImpl(anotherDb).
+                             getId().getId());
+                anotherDb.close();
+            }
+            createTxn.commit();
+            env.close();
+
+            /*
+             * Go through a set of open, creates, and closes. Check id after
+             * recovery.
+             */
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setTransactional(true);
+            createTxn = null;
+            for (int i = numStartDbs; i < numStartDbs + 3; i++) {
+                env = new Environment(envHome, envConfig);
+
+                createTxn = env.beginTransaction(null, null);
+                Database anotherDb = env.openDatabase(createTxn, "foo" + i,
+						      dbConfig);
+                assertEquals(i+3,
+                             DbInternal.dbGetDatabaseImpl(anotherDb).getId().getId());
+                anotherDb.close();
+                createTxn.commit();
+                env.close();
+            }
+        } catch (Throwable t) {
+            if (createTxn != null) {
+                createTxn.abort();
+            }
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Test setting the node ids in recovery.
+     */
+    public void testNodeId()
+        throws Throwable {
+
+        try {
+            /* Create an environment and databases. */
+            createEnvAndDbs(1024, true, NUM_DBS);
+            Map<TestData, Set<TestData>> expectedData = 
+                new HashMap<TestData, Set<TestData>>();
+
+            Transaction txn = env.beginTransaction(null, null);
+            insertData(txn, 0, 4, expectedData, 1, true, NUM_DBS);
+            txn.commit();
+
+            /* Find the largest node id that has been allocated. */
+            EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+            NodeSequence nodeSequence = envImpl.getNodeSequence();
+            long maxSeenNodeId = nodeSequence.getLastLocalNodeId();
+
+            /* Close the environment, then recover. */
+            closeEnv();
+            EnvironmentConfig recoveryConfig = TestUtils.initEnvConfig();
+            recoveryConfig.setConfigParam(
+                           EnvironmentParams.NODE_MAX.getName(), "6");
+            recoveryConfig.setConfigParam(
+                           EnvironmentParams.ENV_RUN_CLEANER.getName(),
+                           "false");
+            /* Don't checkpoint utilization info for this test. */
+            DbInternal.setCheckpointUP(recoveryConfig, false);
+            env = new Environment(envHome, recoveryConfig);
+            LN ln = new LN(new byte[0],
+                           DbInternal.envGetEnvironmentImpl(env),
+                           false); // replicated
+
+            /* Recovery should have initialized the next node id to use */
+            assertTrue("maxSeenNodeId=" + maxSeenNodeId +
+                       " ln=" + ln.getNodeId(),
+                       maxSeenNodeId < ln.getNodeId());
+            maxSeenNodeId = nodeSequence.getLastLocalNodeId();
+            assertEquals(0, nodeSequence.getLastReplicatedNodeId());
+
+            /*
+             * One more time -- this recovery will get the node id off the
+             * checkpoint of the environment close. This checkpoint records
+             * the fact that the node id was bumped forward by the create of
+             * the LN above.
+             */
+            env.close();
+            env = new Environment(envHome, recoveryConfig);
+            ln = new LN(new byte[0],
+                        DbInternal.envGetEnvironmentImpl(env),
+                        false); // replicate
+            /*
+             * The environment re-opening will increment the node id
+             * several times because of the EOF node id.
+             */
+            assertTrue(maxSeenNodeId < ln.getNodeId());
+            assertEquals(0, nodeSequence.getLastReplicatedNodeId());
+
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Test setting the txn id.
+     */
+    public void testTxnId()
+        throws Throwable {
+
+        try {
+            /* Create an environment and databases. */
+            createEnvAndDbs(1024, true, NUM_DBS);
+            Map<TestData, Set<TestData>> expectedData = 
+                new HashMap<TestData, Set<TestData>>();
+
+            /* Make txns before and after a checkpoint */
+            Transaction txn = env.beginTransaction(null, null);
+            insertData(txn, 0, 4, expectedData, 1, true, NUM_DBS);
+            txn.commit();
+            env.checkpoint(forceConfig);
+            txn = env.beginTransaction(null, null);
+            insertData(txn, 5, 6, expectedData, 1, false, NUM_DBS);
+
+            /* Find the largest node id that has been allocated. */
+            long maxTxnId = txn.getId();
+            txn.abort();
+
+            /* Close the environment, then recover. */
+            closeEnv();
+
+            EnvironmentConfig recoveryConfig = TestUtils.initEnvConfig();
+            recoveryConfig.setConfigParam
+                (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+            recoveryConfig.setTransactional(true);
+            env = new Environment(envHome, recoveryConfig);
+
+            /*
+             * Check that the next txn id is larger than the last seen.
+             * A few txn ids were eaten by AutoTxns during recovery, do
+             * a basic check that we didn't eat more than 11.
+             */
+            txn = env.beginTransaction(null, null);
+            createDbs(txn, NUM_DBS);
+            assertTrue(maxTxnId < txn.getId());
+            assertTrue((txn.getId() - maxTxnId) < 11);
+
+            /*
+             * Do something with this txn so a node with it's value shows up in
+             * the log.
+             */
+            insertData(txn, 7, 8, expectedData, 1, false, NUM_DBS);
+            long secondMaxTxnId = txn.getId();
+            txn.abort();
+
+            /*
+             * One more time -- this recovery will get the txn id off the
+             * checkpoint of the second environment creation.
+             */
+            closeEnv();
+            env = new Environment(envHome, recoveryConfig);
+            txn = env.beginTransaction(null, null);
+            assertTrue(secondMaxTxnId < txn.getId());
+            assertTrue((txn.getId() - secondMaxTxnId) < 10);
+            txn.abort();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Test writing a non-transactional db in a transactional environment.
+     * Make sure we can recover.
+     */
+    public void testNonTxnalDb ()
+        throws Throwable {
+
+        createEnv(1024, false);
+        try {
+
+            /*
+             * Create a database, write into it non-txnally. Should be
+             * allowed
+             */
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setAllowCreate(true);
+            Database dbA = env.openDatabase(null, "NotTxnal", dbConfig);
+
+            DatabaseEntry key = new StringDbt("foo");
+            DatabaseEntry data = new StringDbt("bar");
+            dbA.put(null, key, data);
+
+            /* close and recover -- the database should still be there
+             * because we're shutting down clean.
+             */
+            dbA.close();
+            env.close();
+            createEnv(1024, false);
+
+            dbA = env.openDatabase(null, "NotTxnal", null);
+            dbA.close();
+
+            /*
+             * Create a database, auto commit. Then write a record.
+             * The database should exist after recovery.
+             */
+            dbConfig.setTransactional(true);
+            Database dbB = env.openDatabase(null, "Txnal", dbConfig);
+            dbB.close();
+            dbB = env.openDatabase(null, "Txnal", null);
+            dbB.put(null, key, data);
+            dbB.close();
+            env.close();
+
+            /*
+             * Recover. We should see the database. We may or may not see
+             * the records.
+             */
+            createEnv(1024, false);
+            List<String> dbNames = env.getDatabaseNames();
+            assertEquals(2, dbNames.size());
+            assertEquals("Txnal", dbNames.get(1));
+            assertEquals("NotTxnal", dbNames.get(0));
+
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        } finally {
+            env.close();
+        }
+    }
+
+    /**
+     * Test that we can recover with a bad checksum.
+     */
+    public void testBadChecksum()
+        throws Throwable {
+
+        try {
+            /* Create an environment and databases. */
+            createEnvAndDbs(2048, false, 1);
+            Map<TestData, Set<TestData>> expectedData = 
+                new HashMap<TestData, Set<TestData>>();
+
+            /* Make txns before and after a checkpoint */
+            Transaction txn = env.beginTransaction(null, null);
+            insertData(txn, 0, 4, expectedData, 1, true, 1);
+            txn.commit();
+            env.checkpoint(forceConfig);
+
+            txn = env.beginTransaction(null, null);
+            insertData(txn, 5, 6, expectedData, 1, true, 1);
+            txn.commit();
+
+            txn = env.beginTransaction(null, null);
+            insertData(txn, 7, 8, expectedData, 1, false, 1);
+
+            /* Close the environment, then recover. */
+            closeEnv();
+
+            /* Write some 0's into the last file. */
+            writeBadStuffInLastFile();
+
+            recoverAndVerify(expectedData, 1);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Another bad checksum test. Make sure that there is no checkpoint in the
+     * last file so that this recovery will have to read backwards into the
+     * previous file. Also recover in read/only mode to make sure we don't
+     * process the bad portion of the log.
+     */
+    public void testBadChecksumReadOnlyReadPastLastFile()
+        throws Throwable {
+
+        try {
+            /* Create an environment and databases. */
+            createEnvAndDbs(500, false, 1);
+            Map<TestData, Set<TestData>> expectedData = 
+                new HashMap<TestData, Set<TestData>>();
+
+            /* Commit some data, checkpoint. */
+            Transaction txn = env.beginTransaction(null, null);
+            insertData(txn, 0, 4, expectedData, 1, true, 1);
+            txn.commit();
+            env.checkpoint(forceConfig);
+
+            /*
+             * Remember how many files we have, so we know where the last
+             * checkpoint is.
+             */
+            String[] suffixes = new String[] {FileManager.JE_SUFFIX};
+            String[] fileList = FileManager.listFiles(envHome, suffixes);
+            int startingNumFiles = fileList.length;
+
+            /* Now add enough non-committed data to add more files. */
+            txn = env.beginTransaction(null, null);
+            insertData(txn, 7, 50, expectedData, 1, false, 1);
+
+            /* Close the environment, then recover. */
+            closeEnv();
+
+            /* Make sure that we added on files after the checkpoint. */
+            fileList = FileManager.listFiles(envHome, suffixes);
+            assertTrue(fileList.length > startingNumFiles);
+
+            /* Write some 0's into the last file. */
+            writeBadStuffInLastFile();
+
+            recoverROAndVerify(expectedData, 1);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    private void writeBadStuffInLastFile()
+        throws IOException {
+
+        String[] files =
+            FileManager.listFiles(envHome,
+                                  new String[] {FileManager.JE_SUFFIX});
+        File lastFile = new File(envHome, files[files.length - 1]);
+        RandomAccessFile rw = new RandomAccessFile(lastFile, "rw");
+
+        rw.seek(rw.length() - 10);
+        rw.writeBytes("000000");
+        rw.close();
+    }
+
+    /**
+     * Test that we can recover with no checkpoint end
+     */
+    public void testNoCheckpointEnd() 
+        throws Exception {
+
+    	/* Create a new environment */
+        EnvironmentConfig createConfig = TestUtils.initEnvConfig();
+        createConfig.setTransactional(true);
+        createConfig.setAllowCreate(true);
+        env = new Environment(envHome, createConfig);
+
+        /* Truncate before the first ckpt end. */
+        truncateAtEntry(LogEntryType.LOG_CKPT_END);
+        env.close();
+
+        /* Check that we can recover. */
+        createConfig.setAllowCreate(false);
+        env = new Environment(envHome, createConfig);
+        env.close();
+    }
+
+    /**
+    * Truncate the log so it doesn't include the first incidence of this
+    * log entry type.
+    */
+    private void truncateAtEntry(LogEntryType entryType) 
+        throws Exception {
+    	
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+
+        /*
+         * Find the first given log entry type and truncate the file so it
+         * doesn't include that entry.
+         */
+        SearchFileReader reader =
+            new SearchFileReader(envImpl,
+                                 1000,           // readBufferSize
+                                 true,           // forward
+                                 0,              // startLSN
+                                 DbLsn.NULL_LSN, // endLSN
+                                 entryType);
+
+        long targetLsn = 0;
+        if (reader.readNextEntry()) {
+            targetLsn = reader.getLastLsn();
+        } else {
+            fail("There should be some kind of " + entryType + " in the log.");
+        }
+        
+        assertTrue(targetLsn != 0);
+        envImpl.getFileManager().truncateLog(DbLsn.getFileNumber(targetLsn),
+                                             DbLsn.getFileOffset(targetLsn));
+    }
+}
diff --git a/test/com/sleepycat/je/recovery/RecoveryTest.java b/test/com/sleepycat/je/recovery/RecoveryTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..e9d4e11e9a364ded63f8fd2b8dbedb7093b3dbc3
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/RecoveryTest.java
@@ -0,0 +1,294 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RecoveryTest.java,v 1.61.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.recovery;
+
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.util.TestUtils;
+
+public class RecoveryTest extends RecoveryTestBase {
+
+    /**
+     * Basic insert, delete data.
+     */
+    public void testBasic()
+        throws Throwable {
+
+        doBasic(true);
+    }
+
+    /**
+     * Basic insert, delete data with BtreeComparator
+     */
+    public void testBasicRecoveryWithBtreeComparator()
+        throws Throwable {
+
+        btreeComparisonFunction = new BtreeComparator(true);
+        doBasic(true);
+    }
+
+    /**
+     * Test that put(OVERWRITE) works correctly with duplicates.
+     */
+    public void testDuplicateOverwrite()
+        throws Throwable {
+
+        createEnvAndDbs(1 << 10, false, NUM_DBS);
+        try {
+            Map<TestData, Set<TestData>> expectedData = 
+                new HashMap<TestData, Set<TestData>>();
+
+            Transaction txn = env.beginTransaction(null, null);
+            DatabaseEntry key = new DatabaseEntry("aaaaa".getBytes());
+            DatabaseEntry data1 = new DatabaseEntry("dddddddddd".getBytes());
+            DatabaseEntry data2 = new DatabaseEntry("eeeeeeeeee".getBytes());
+            DatabaseEntry data3 = new DatabaseEntry("ffffffffff".getBytes());
+            Database db = dbs[0];
+            assertEquals(OperationStatus.SUCCESS,
+                         db.put(null, key, data1));
+            addExpectedData(expectedData, 0, key, data1, true);
+            assertEquals(OperationStatus.SUCCESS,
+                         db.put(null, key, data2));
+            addExpectedData(expectedData, 0, key, data2, true);
+            assertEquals(OperationStatus.SUCCESS,
+                         db.put(null, key, data3));
+            addExpectedData(expectedData, 0, key, data3, true);
+            assertEquals(OperationStatus.SUCCESS,
+                         db.put(null, key, data3));
+            txn.commit();
+            closeEnv();
+
+            recoverAndVerify(expectedData, NUM_DBS);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Basic insert, delete data.
+     */
+    public void testBasicFewerCheckpoints()
+        throws Throwable {
+
+        doBasic(false);
+    }
+
+    public void testSR8984Part1()
+        throws Throwable {
+
+        doTestSR8984Work(true);
+    }
+
+    public void testSR8984Part2()
+        throws Throwable {
+
+        doTestSR8984Work(false);
+    }
+
+    private void doTestSR8984Work(boolean sameKey)
+        throws DatabaseException {
+
+        final int NUM_EXTRA_DUPS = 150;
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        /* Make an environment and open it */
+        envConfig.setTransactional(false);
+        envConfig.setAllowCreate(true);
+        envConfig.setConfigParam(EnvironmentParams.ENV_CHECK_LEAKS.getName(),
+                                 "false");
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6");
+        envConfig.setConfigParam(EnvironmentParams.ENV_RUN_CLEANER.getName(),
+                                 "false");
+
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+        env = new Environment(envHome, envConfig);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(false);
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(true);
+        Database db = env.openDatabase(null, "testSR8984db", dbConfig);
+
+        DatabaseEntry key = new DatabaseEntry("k1".getBytes());
+        DatabaseEntry data = new DatabaseEntry("d1".getBytes());
+        assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+        assertEquals(OperationStatus.SUCCESS, db.delete(null, key));
+
+        if (!sameKey) {
+            data.setData("d2".getBytes());
+        }
+        /* Cause a dup tree of some depth to be created. */
+        assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+        for (int i = 3; i < NUM_EXTRA_DUPS; i++) {
+            data.setData(("d" + i).getBytes());
+            assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+        }
+
+        data.setData("d1".getBytes());
+
+        Cursor c = db.openCursor(null, null);
+        assertEquals(OperationStatus.SUCCESS,
+                     c.getFirst(key, data, LockMode.DEFAULT));
+
+        c.close();
+        db.close();
+
+        /* Force an abrupt close so there is no checkpoint at the end. */
+        closeEnv();
+        env = new Environment(envHome, envConfig);
+        db = env.openDatabase(null, "testSR8984db", dbConfig);
+        c = db.openCursor(null, null);
+        assertEquals(OperationStatus.SUCCESS,
+                     c.getFirst(key, data, LockMode.DEFAULT));
+        assertEquals(NUM_EXTRA_DUPS - 2, c.count());
+        c.close();
+        db.close();
+        env.close();
+    }
+
+    /**
+     * Insert data, delete data into several dbs.
+     */
+    public void doBasic(boolean runCheckpointerDaemon)
+        throws Throwable {
+
+        createEnvAndDbs(1 << 20, runCheckpointerDaemon, NUM_DBS);
+        int numRecs = NUM_RECS;
+
+        try {
+            // Set up an repository of expected data
+            Map<TestData, Set<TestData>> expectedData = 
+                new HashMap<TestData, Set<TestData>>();
+
+            // insert all the data
+            Transaction txn = env.beginTransaction(null, null);
+            insertData(txn, 0, numRecs - 1, expectedData, 1, true, NUM_DBS);
+            txn.commit();
+
+            // delete all the even records
+            txn = env.beginTransaction(null, null);
+            deleteData(txn, expectedData, false, true, NUM_DBS);
+            txn.commit();
+
+            // modify all the records
+            txn = env.beginTransaction(null, null);
+            modifyData(txn, NUM_RECS/2, expectedData, 1, true, NUM_DBS);
+            txn.commit();
+
+            closeEnv();
+            recoverAndVerify(expectedData, NUM_DBS);
+        } catch (Throwable t) {
+            // print stacktrace before trying to clean up files
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Insert data, delete all data into several dbs.
+     */
+    public void testBasicDeleteAll()
+        throws Throwable {
+
+        createEnvAndDbs(1024, true, NUM_DBS);
+        int numRecs = NUM_RECS;
+        try {
+            // Set up an repository of expected data
+            Map<TestData, Set<TestData>> expectedData = 
+                new HashMap<TestData, Set<TestData>>();
+
+            // insert all the data
+            Transaction txn = env.beginTransaction(null, null);
+            insertData(txn, 0, numRecs - 1, expectedData, 1, true, NUM_DBS);
+            txn.commit();
+
+            // modify half the records
+            txn = env.beginTransaction(null, null);
+            modifyData(txn, numRecs/2, expectedData, 1, true, NUM_DBS);
+            txn.commit();
+
+            // delete all the records
+            txn = env.beginTransaction(null, null);
+            deleteData(txn, expectedData, true, true, NUM_DBS);
+            txn.commit();
+
+            closeEnv();
+
+            recoverAndVerify(expectedData, NUM_DBS);
+        } catch (Throwable t) {
+            // print stacktrace before trying to clean up files
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    protected static class BtreeComparator implements Comparator<byte[]> {
+        protected boolean ascendingComparison = true;
+
+        public BtreeComparator() {
+        }
+
+        protected BtreeComparator(boolean ascendingComparison) {
+            this.ascendingComparison = ascendingComparison;
+        }
+
+        public int compare(byte[] o1, byte[] o2) {
+            byte[] arg1;
+            byte[] arg2;
+            if (ascendingComparison) {
+                arg1 = (byte[]) o1;
+                arg2 = (byte[]) o2;
+            } else {
+                arg1 = (byte[]) o2;
+                arg2 = (byte[]) o1;
+            }
+            int a1Len = arg1.length;
+            int a2Len = arg2.length;
+
+            int limit = Math.min(a1Len, a2Len);
+
+            for (int i = 0; i < limit; i++) {
+                byte b1 = arg1[i];
+                byte b2 = arg2[i];
+                if (b1 == b2) {
+                    continue;
+                } else {
+                    /* Remember, bytes are signed, so convert to shorts so that
+                       we effectively do an unsigned byte comparison. */
+                    short s1 = (short) (b1 & 0x7F);
+                    short s2 = (short) (b2 & 0x7F);
+                    if (b1 < 0) {
+                        s1 |= 0x80;
+                    }
+                    if (b2 < 0) {
+                        s2 |= 0x80;
+                    }
+                    return (s1 - s2);
+                }
+            }
+
+            return (a1Len - a2Len);
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/recovery/RecoveryTestBase.java b/test/com/sleepycat/je/recovery/RecoveryTestBase.java
new file mode 100644
index 0000000000000000000000000000000000000000..14757be29f8c685eef25173cdbfbbf2a674e19da
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/RecoveryTestBase.java
@@ -0,0 +1,891 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RecoveryTestBase.java,v 1.118.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.recovery;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.XAEnvironment;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.tree.Key;
+import com.sleepycat.je.tree.Key.DumpType;
+import com.sleepycat.je.util.TestUtils;
+
+public class RecoveryTestBase extends TestCase {
+    private static final boolean DEBUG = false;
+
+    protected static final int NUM_RECS = 257;
+    protected static final int N_DUPLICATES_PER_KEY = 28;
+    protected static final int NUM_DBS = 3;
+
+    protected static final String DB_NAME = "testDb";
+
+    protected File envHome;
+    protected Environment env;
+    protected Database[] dbs;
+    protected EnvironmentConfig envConfig;
+    protected CheckpointConfig forceConfig;
+    protected Comparator<byte[]> btreeComparisonFunction = null;
+    protected boolean keyPrefixing = false;
+
+    public RecoveryTestBase() {
+	init();
+    }
+
+    public RecoveryTestBase(boolean reduceMemory) {
+	init();
+	envConfig.setConfigParam(EnvironmentParams.MAX_MEMORY.getName(),
+				 new Long(1 << 24).toString());
+    }
+
+    private void init() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+        Key.DUMP_TYPE = DumpType.BINARY;
+        envConfig = TestUtils.initEnvConfig();
+        forceConfig = new CheckpointConfig();
+        forceConfig.setForce(true);
+    }
+
+    public void setUp()
+        throws IOException, DatabaseException {
+
+        TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX);
+    }
+
+    public void tearDown()
+	throws IOException, DatabaseException {
+
+        if (env != null) {
+            try {
+                env.close();
+            } catch (DatabaseException E) {
+            }
+        }
+	env = null;
+	dbs = null;
+	envConfig = null;
+	forceConfig = null;
+        /*        TestUtils.removeFiles("TearDown", envHome,
+                              FileManager.JE_SUFFIX, true);
+        */
+    }
+
+    /**
+     * Make an environment and databases, commit the db creation by default.
+     * Running with or without the checkpoint daemon changes how recovery is
+     * exercised.
+     */
+    protected void createEnv(int fileSize, boolean runCheckpointDaemon)
+        throws DatabaseException {
+
+	createEnvInternal(fileSize, runCheckpointDaemon, false);
+    }
+
+    protected void createXAEnv(int fileSize, boolean runCheckpointDaemon)
+        throws DatabaseException {
+
+	createEnvInternal(fileSize, runCheckpointDaemon, true);
+    }
+
+    private void createEnvInternal(int fileSize,
+				   boolean runCheckpointDaemon,
+				   boolean createXAEnv)
+        throws DatabaseException {
+
+        /* Make an environment and open it. */
+	DbInternal.disableParameterValidation(envConfig);
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+        envConfig.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC));
+        envConfig.
+	    setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(),
+			   Integer.toString(fileSize));
+        envConfig.setConfigParam(EnvironmentParams.ENV_CHECK_LEAKS.getName(),
+				 "false");
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6");
+	envConfig.setConfigParam(EnvironmentParams.ENV_RUN_CLEANER.getName(),
+				 "false");
+	envConfig.setConfigParam(EnvironmentParams.ENV_RUN_EVICTOR.getName(),
+				 "false");
+
+        if (!runCheckpointDaemon) {
+            envConfig.setConfigParam
+		(EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+        }
+        setExtraProperties();
+	if (createXAEnv) {
+	    env = new XAEnvironment(envHome, envConfig);
+	} else {
+	    env = new Environment(envHome, envConfig);
+	}
+    }
+
+    /*
+     * Overriden by using class.
+     */
+    protected void setExtraProperties()
+        throws DatabaseException {
+
+    }
+
+    /**
+     * Make an environment and databases, commit the db creation by default.
+     */
+    protected void createDbs(Transaction txn, int numDbs)
+        throws DatabaseException {
+
+        /* Make a db and open it. */
+        dbs = new Database[numDbs];
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+	if (btreeComparisonFunction != null) {
+	    dbConfig.setBtreeComparator
+                ((Class<? extends Comparator<byte[]>>)
+                 btreeComparisonFunction.getClass());
+	}
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(true);
+        dbConfig.setKeyPrefixing(keyPrefixing);
+        for (int i = 0; i < numDbs; i++) {
+            dbs[i] = env.openDatabase(txn, DB_NAME + i, dbConfig);
+        }
+    }
+
+    /**
+     * Make an environment and databases.
+     */
+    protected void createEnvAndDbs(int fileSize,
+                                   boolean runCheckpointerDaemon,
+                                   int numDbs)
+        throws DatabaseException {
+
+	createEnvAndDbsInternal(fileSize, runCheckpointerDaemon,
+				numDbs, false);
+    }
+
+    protected void createXAEnvAndDbs(int fileSize,
+				     boolean runCheckpointerDaemon,
+				     int numDbs)
+        throws DatabaseException {
+
+	createEnvAndDbsInternal(fileSize, runCheckpointerDaemon,
+				numDbs, true);
+    }
+
+    protected void createEnvAndDbsInternal(int fileSize,
+					   boolean runCheckpointerDaemon,
+					   int numDbs,
+					   boolean createXAEnv)
+        throws DatabaseException {
+
+        createEnvInternal(fileSize, runCheckpointerDaemon, createXAEnv);
+        Transaction txn = env.beginTransaction(null, null);
+        createDbs(txn, numDbs);
+        txn.commit();
+    }
+
+    /**
+     * Throw away the environment so the next open will cause a recovery.
+     */
+    protected void closeEnv()
+        throws DatabaseException {
+
+        TestUtils.validateNodeMemUsage(DbInternal.envGetEnvironmentImpl(env),
+                                      false);
+
+        /* Close the environment. */
+        if (dbs != null) {
+            for (int i = 0; i < dbs.length; i++) {
+                if (dbs[i] != null) {
+                    dbs[i].close();
+                }
+            }
+        }
+	forceCloseEnvOnly();
+    }
+
+    /* Force the environment to be closed even if with outstanding handles.*/
+    protected void forceCloseEnvOnly()
+	throws DatabaseException {
+
+	/* Close w/out checkpointing, in order to exercise recovery better.*/
+	DbInternal.envGetEnvironmentImpl(env).close(false);
+	env = null;
+    }
+
+    /*
+     * Recover the databases and check the data. Return a list of the
+     * RecoveryInfos generated by each recovery.
+     */
+    protected List<RecoveryInfo> recoverAndVerify(Map<TestData, Set<TestData>> expectedData, int numDbs)
+        throws DatabaseException {
+
+	return recoverAndVerifyInternal(expectedData, numDbs,
+                                        false,  // XA
+                                        false); // readOnly
+    }
+
+    protected List<RecoveryInfo> recoverROAndVerify(Map<TestData, Set<TestData>> expectedData, int numDbs)
+        throws DatabaseException {
+
+	return recoverAndVerifyInternal(expectedData, numDbs,
+                                        false,  // XA
+                                        true);  // readOnly
+    }
+
+    /*
+     * Recover the databases and check the data. Return a list of the
+     * RecoveryInfos generated by each recovery.
+     */
+    protected List<RecoveryInfo> xaRecoverAndVerify(Map<TestData, Set<TestData>> expectedData, int numDbs)
+        throws DatabaseException {
+
+	return recoverAndVerifyInternal(expectedData, numDbs,
+			                true,   // XA
+					false); // readOnly
+    }
+
+    private List<RecoveryInfo> recoverAndVerifyInternal(Map<TestData, Set<TestData>> expectedData,
+					  int numDbs,
+					  boolean createXAEnv,
+                                          boolean readOnlyMode)
+        throws DatabaseException {
+
+	List<RecoveryInfo> infoList = recoverOnlyInternal(numDbs, createXAEnv, readOnlyMode);
+        verifyData(expectedData, numDbs);
+	TestUtils.validateNodeMemUsage(DbInternal.envGetEnvironmentImpl(env),
+				       false);	
+        /* Run verify again. */
+        DbInternal.envGetEnvironmentImpl(env).close(false);
+        env = new Environment(envHome, getRecoveryConfig(readOnlyMode));
+        EnvironmentImpl envImpl =
+	    DbInternal.envGetEnvironmentImpl(env);
+        infoList.add(envImpl.getLastRecoveryInfo());
+        verifyData(expectedData, numDbs);
+        TestUtils.validateNodeMemUsage(envImpl, false);
+        env.close();
+	return infoList;
+    }
+
+    private EnvironmentConfig getRecoveryConfig(boolean readOnlyMode) {
+        EnvironmentConfig recoveryConfig = TestUtils.initEnvConfig();
+        recoveryConfig.setConfigParam
+            (EnvironmentParams.NODE_MAX.getName(), "6");
+	recoveryConfig.setConfigParam
+            (EnvironmentParams.MAX_MEMORY.getName(),
+             new Long(1 << 24).toString());
+        recoveryConfig.setReadOnly(readOnlyMode);
+
+        /*
+         * Don't run checkLeaks, because verify is running while the system is
+         * not quiescent. The other daemons are running.
+         */
+        recoveryConfig.setConfigParam
+	    (EnvironmentParams.ENV_CHECK_LEAKS.getName(), "false");
+	recoveryConfig.setConfigParam
+	    (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+	recoveryConfig.setConfigParam
+	    (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false");
+
+        if (DEBUG) {
+            recoveryConfig.setConfigParam
+                (EnvironmentParams.JE_LOGGING_FILE.getName(), "true");
+            recoveryConfig.setConfigParam
+                (EnvironmentParams.JE_LOGGING_LEVEL.getName(), "FINE");
+        }
+
+	recoveryConfig.setTransactional(true);
+	return recoveryConfig;
+    }
+
+    protected List<RecoveryInfo> recoverOnly(int numDbs)
+	throws DatabaseException {
+
+	return recoverOnlyInternal(numDbs,
+                                   false,   // XA
+                                   false);  // read only
+    }
+
+    protected List<RecoveryInfo> xaRecoverOnly(int numDbs)
+	throws DatabaseException {
+
+	return recoverOnlyInternal(numDbs,
+                                   true,   // XA
+                                   false); // read only
+    }
+
+    private List<RecoveryInfo> recoverOnlyInternal(int numDbs,
+                                     boolean createXAEnv,
+                                     boolean readOnlyMode)
+	throws DatabaseException {
+
+        List<RecoveryInfo> infoList = new ArrayList<RecoveryInfo>();
+
+        /* Open it again, which will run recovery. */
+	if (createXAEnv) {
+	    env = new XAEnvironment(envHome, getRecoveryConfig(readOnlyMode));
+	} else {
+	    env = new Environment(envHome, getRecoveryConfig(readOnlyMode));
+	}
+        TestUtils.validateNodeMemUsage(DbInternal.envGetEnvironmentImpl(env),
+                                      false);
+
+        infoList.add
+            (DbInternal.envGetEnvironmentImpl(env).getLastRecoveryInfo());
+
+        return infoList;
+    }
+
+    /**
+     * Compare the data in the databases agains the data in the expected data
+     * set.
+     */
+    protected void verifyData(Map<TestData, Set<TestData>> expectedData, int numDbs)
+        throws DatabaseException  {
+
+        verifyData(expectedData, true, numDbs);
+    }
+
+    /**
+     * Compare the data in the databases against the data in the expected data
+     * set.
+     */
+    protected void verifyData(Map<TestData, Set<TestData>> expectedData,
+                              boolean checkInList,
+                              int numDbs)
+        throws DatabaseException  {
+
+	verifyData(expectedData, checkInList, 0, numDbs);
+    }
+
+    @SuppressWarnings("unchecked")
+	protected void verifyData(Map<TestData, Set<TestData>> expectedData,
+                              boolean checkInList,
+			      int startDb,
+                              int endDb)
+        throws DatabaseException  {
+
+        /* Run verify. */
+        if (checkInList) {
+            assertTrue(env.verify(null, System.err));
+        } else {
+            assertTrue(env.verify(null, System.err));
+        }
+
+        /*
+         * Get a deep copy of expected data (cloning the data sets, not the
+         * items within dataSet, since verifyData will remove items, and we
+         * need to keep the expectedData set intact because we call verify
+         * repeatedly.
+         */
+        Map<TestData, Set<TestData>> useData = 
+            new HashMap<TestData, Set<TestData>>();
+
+        Iterator<Map.Entry<TestData, Set<TestData>>> iter = 
+            expectedData.entrySet().iterator();
+
+        while (iter.hasNext()) {
+            Map.Entry<TestData, Set<TestData>> entry = iter.next();
+            Set<TestData> clone = (Set<TestData>)((HashSet<TestData>)entry.getValue()).clone();
+            useData.put(entry.getKey(), clone);
+        }
+
+        /* Generate an expected count map. */
+        Map<TestData, Integer> countMap = generateCountMap(expectedData);
+
+        /* Check each db in turn. */
+        DatabaseConfig dbConfig = new DatabaseConfig();
+	if (btreeComparisonFunction != null) {
+	    dbConfig.setBtreeComparator
+                ((Class<? extends Comparator<byte[]>>)
+                 btreeComparisonFunction.getClass());
+	}
+        dbConfig.setTransactional(env.getConfig().getTransactional());
+        dbConfig.setSortedDuplicates(true);
+        dbConfig.setReadOnly(true);
+        for (int d = startDb; d < endDb; d++) {
+            Database checkDb = env.openDatabase(null, DB_NAME + d,
+						dbConfig);
+            Cursor myCursor = checkDb.openCursor(null, null);
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            OperationStatus status =
+		myCursor.getFirst(key, data, LockMode.DEFAULT);
+            DbInternal.envGetEnvironmentImpl(env).verifyCursors();
+            int numSeen = 0;
+
+            while (status == OperationStatus.SUCCESS) {
+
+                /* The key should have been in the expected data set. */
+                removeExpectedData(useData, d, key, data, true);
+
+                /* The count should be right. */
+                int count = myCursor.count();
+                assertEquals("Count not right for key " +
+                             TestUtils.dumpByteArray(key.getData()),
+                             getExpectedCount(countMap, d, key), count);
+
+                status = myCursor.getNext(key, data, LockMode.DEFAULT);
+                numSeen++;
+            }
+
+            myCursor.close();
+
+            /* Should be nothing left in the expected data map. */
+            if (DEBUG) {
+                System.out.println("Finished db" + d + " numSeen=" +numSeen);
+                dumpExpected(useData);
+            }
+            checkDb.close();
+        }
+
+        assertEquals(0, useData.size());
+    }
+
+    /**
+     * Process the expected data map to generate expected counts. For each
+     * database, make a map of key value to count.
+     */
+    private Map<TestData, Integer> 
+        generateCountMap(Map<TestData, Set<TestData>> expectedData) {
+
+        Map<TestData, Integer> countMap = new HashMap<TestData, Integer>();
+
+        Iterator<Set<TestData>> iter = expectedData.values().iterator();
+        while (iter.hasNext()) {
+            Set<TestData> dataSet = iter.next();
+            Iterator<TestData> dataIter = dataSet.iterator();
+            while (dataIter.hasNext()) {
+                TestData t = dataIter.next();
+                TestData countKey = new TestData(t.dbNum, t.key);
+                Integer count = countMap.get(countKey);
+                if (count == null) {
+                    countMap.put(countKey, new Integer(1));
+                } else {
+                    countMap.put(countKey, new Integer(count.intValue()+1));
+                }
+            }
+        }
+        return countMap;
+    }
+
+    /**
+     * @return the expected count value for a given key in a given db.
+     */
+    private int getExpectedCount(Map<TestData, Integer> countMap,
+				 int whichDb,
+				 DatabaseEntry key) {
+        return countMap.get(new TestData(whichDb, key.getData())).intValue();
+    }
+
+    /**
+     * Insert data over many databases.
+     */
+    protected void insertData(Transaction txn,
+                              int startVal,
+                              int endVal,
+                              Map<TestData, Set<TestData>> expectedData,
+                              int nDuplicatesPerKey,
+                              boolean addToExpectedData,
+                              int numDbs)
+        throws DatabaseException {
+
+        insertData(txn, startVal, endVal, expectedData,
+                   nDuplicatesPerKey, false, addToExpectedData,
+                   0, numDbs);
+    }
+
+    protected void insertData(Transaction txn,
+                              int startVal,
+                              int endVal,
+                              Map<TestData, Set<TestData>> expectedData,
+                              int nDuplicatesPerKey,
+                              boolean addToExpectedData,
+                              int startDb,
+			      int endDb)
+        throws DatabaseException {
+
+        insertData(txn, startVal, endVal, expectedData,
+                   nDuplicatesPerKey, false, addToExpectedData,
+                   startDb, endDb);
+    }
+
+    /**
+     * Insert data over many databases.
+     *
+     * @param toggle if true, insert every other value.
+     */
+    protected void insertData(Transaction txn,
+                              int startVal,
+                              int endVal,
+                              Map<TestData, Set<TestData>> expectedData,
+                              int nDuplicatesPerKey,
+                              boolean toggle,
+                              boolean addToExpectedData,
+                              int numDbs)
+        throws DatabaseException {
+
+	insertData(txn, startVal, endVal, expectedData, nDuplicatesPerKey,
+		   toggle, addToExpectedData, 0, numDbs);
+    }
+
+    /**
+     * Insert data over many databases.
+     *
+     * @param toggle if true, insert every other value.
+     */
+    protected void insertData(Transaction txn,
+                              int startVal,
+                              int endVal,
+                              Map<TestData, Set<TestData>> expectedData,
+                              int nDuplicatesPerKey,
+                              boolean toggle,
+                              boolean addToExpectedData,
+                              int startDb,
+			      int endDb)
+        throws DatabaseException {
+
+        Cursor[] cursors = getCursors(txn, startDb, endDb);
+
+        /* Make sure this test inserts something! */
+        assertTrue(endVal - startVal > -1);
+
+        /* Are we inserting in an ascending or descending way? */
+        int incVal = (toggle) ? 2 : 1;
+        if (startVal < endVal) {
+            for (int i = startVal; i <= endVal; i += incVal) {
+                insertOneRecord(cursors, i, expectedData,
+                                nDuplicatesPerKey, addToExpectedData);
+            }
+        } else {
+            for (int i = startVal; i >= endVal; i -= incVal) {
+                insertOneRecord(cursors, i, expectedData,
+                                nDuplicatesPerKey, addToExpectedData);
+            }
+        }
+
+        for (int i = 0; i < cursors.length; i++) {
+            cursors[i].close();
+        }
+    }
+
+    /**
+     * Add to the set of expected results. ExpectedData is keyed by a TestData
+     * object that wraps db number and key, and points to sets of TestData
+     * objects that wrap db number, key, and data.
+     */
+    protected void addExpectedData(Map<TestData, Set<TestData>> expectedData,
+				   int dbNum,
+				   DatabaseEntry key,
+				   DatabaseEntry data,
+				   boolean expectCommit) {
+        if (expectCommit) {
+            TestData keyTestData = new TestData(dbNum, key, null);
+            Set<TestData> dataSet = expectedData.get(keyTestData);
+            if (dataSet == null) {
+                dataSet = new HashSet<TestData>();
+                expectedData.put(keyTestData, dataSet);
+            }
+
+            dataSet.add(new TestData(dbNum, key, data));
+        }
+    }
+
+    /**
+     * Remove from the set of expected results.
+     */
+    private void removeExpectedData(Map<TestData, Set<TestData>> expectedData,
+				    int dbNum,
+				    DatabaseEntry key,
+                                    DatabaseEntry data,
+				    boolean expectCommit) {
+        if (expectCommit) {
+            TestData keyTestData = new TestData(dbNum, key, null);
+            Set<TestData> dataSet = expectedData.get(keyTestData);
+            assertTrue("Should be a data set for " + keyTestData,
+                       (dataSet != null));
+            assertTrue("Should be able to remove key " + key +
+                       " from expected data ",
+                       dataSet.remove(new TestData(dbNum, key, data)));
+            if (dataSet.size() == 0) {
+                expectedData.remove(keyTestData);
+            }
+        }
+    }
+
+    /**
+     * @return a set of cursors for the test databases.
+     */
+    private Cursor[] getCursors(Transaction txn, int startDb, int endDb)
+        throws DatabaseException {
+
+        Cursor[] cursors = new Cursor[endDb - startDb];
+        for (int i = 0; i < cursors.length; i++) {
+            cursors[i] = dbs[startDb + i].openCursor(txn, null);
+        }
+        return cursors;
+    }
+
+    /**
+     * Insert the given record into all databases.
+     */
+    private void insertOneRecord(Cursor[] cursors,
+                                 int val,
+                                 Map<TestData, Set<TestData>> expectedData,
+                                 int nDuplicatesPerKey,
+                                 boolean expectCommit)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        for (int c = 0; c < cursors.length; c++) {
+
+            int testVal = val + c;
+            byte[] keyData = TestUtils.getTestArray(testVal);
+            byte[] dataData = TestUtils.byteArrayCopy(keyData);
+            key.setData(keyData);
+            for (int d = 0; d < nDuplicatesPerKey; d++) {
+                dataData = TestUtils.byteArrayCopy(dataData);
+                dataData[1]++;
+                data.setData(dataData);
+
+                assertEquals("Insertion of key " +
+                             TestUtils.dumpByteArray(keyData),
+                             OperationStatus.SUCCESS,
+			     cursors[c].putNoDupData(key, data));
+
+                addExpectedData(expectedData, c, key, data, expectCommit);
+            }
+        }
+    }
+
+    /**
+     * Delete either every other or all data.
+     */
+    protected void deleteData(Transaction txn, Map<TestData, Set<TestData>> expectedData,
+                              boolean all, boolean expectCommit, int numDbs)
+        throws DatabaseException {
+
+        Cursor[] cursors = getCursors(txn, 0, numDbs);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        for (int d = 0; d < cursors.length; d++) {
+            OperationStatus status =
+		cursors[d].getFirst(key, data, LockMode.DEFAULT);
+            boolean toggle = true;
+            int deleteCount = 0;
+            while (status == OperationStatus.SUCCESS) {
+                if (toggle) {
+                    removeExpectedData(expectedData, d, key, data,
+                                       expectCommit);
+                    assertEquals(OperationStatus.SUCCESS, cursors[d].delete());
+                    deleteCount++;
+                    toggle = all;
+                } else {
+                    toggle = true;
+                }
+                status = cursors[d].getNext(key, data, LockMode.DEFAULT);
+            }
+            /* Make sure the test deletes something! */
+            assertTrue(deleteCount > 0);
+        }
+
+        for (int i = 0; i < cursors.length; i++) {
+            cursors[i].close();
+        }
+    }
+
+    /**
+     * Modify data
+     * @param txn owning txn
+     * @param endVal end point of the modification range
+     * @param expectedData store of expected values for verification at end
+     * @param increment used to modify the data.
+     * @param expectCommit if true, reflect change in expected map. Sometimes
+     *         we don't want to do this because we plan to make the txn abort.
+     */
+    protected void modifyData(Transaction txn, int endVal,
+                              Map<TestData, Set<TestData>> expectedData, int increment,
+                              boolean expectCommit, int numDbs)
+        throws DatabaseException {
+
+        Cursor[] cursors = getCursors(txn, 0, numDbs);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        for (int d = 0; d < cursors.length; d++) {
+
+            /* Position cursor at the start value. */
+            OperationStatus status =
+		cursors[d].getFirst(key, data, LockMode.DEFAULT);
+
+            /* For each record within the range, change the data. */
+            int modCount = 0;
+            int keyVal = TestUtils.getTestVal(key.getData());
+            while ((status == OperationStatus.SUCCESS) && (keyVal <= endVal)) {
+
+                /* Change the data. */
+                removeExpectedData(expectedData, d, key, data, expectCommit);
+                data.setData(TestUtils.getTestArray(keyVal + increment));
+                cursors[d].delete();
+                cursors[d].put(key, data);
+                addExpectedData(expectedData, d, key, data, expectCommit);
+                modCount++;
+
+                status = cursors[d].getNext(key, data, LockMode.DEFAULT);
+
+                if (status == OperationStatus.SUCCESS) {
+                    keyVal = TestUtils.getTestVal(key.getData());
+                }
+            }
+            /* Make sure we modify something! */
+            assertTrue(modCount > 0);
+        }
+
+        for (int i = 0; i < cursors.length; i++) {
+            cursors[i].close();
+        }
+    }
+
+
+    /**
+     * Print the contents of the databases out for debugging
+     */
+    protected void dumpData(int numDbs)
+        throws DatabaseException {
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+	if (btreeComparisonFunction != null) {
+	    dbConfig.setBtreeComparator
+                ((Class<? extends Comparator<byte[]>>)
+                 btreeComparisonFunction.getClass());
+	}
+        dbConfig.setSortedDuplicates(true);
+	dbConfig.setTransactional(true);
+        for (int d = 0; d < numDbs; d++) {
+            Database checkDb = env.openDatabase(null, DB_NAME + d, dbConfig);
+            Cursor myCursor = checkDb.openCursor(null, null);
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+
+            OperationStatus status =
+		myCursor.getFirst(key, data, LockMode.DEFAULT);
+            while (status == OperationStatus.SUCCESS) {
+                System.out.println("Database " + d +
+                                   " seen = " +
+                                   /*
+                                      new String(key.getData()) +
+                                      "/" +
+                                      new String(data.getData()));
+                                   */
+                                   TestUtils.dumpByteArray(key.getData()) +
+                                   "/" +
+                                   TestUtils.dumpByteArray(data.getData()));
+                status = myCursor.getNext(key, data, LockMode.DEFAULT);
+            }
+            myCursor.close();
+        }
+    }
+
+    /**
+     * Print the contents of the expected map for debugging.
+     */
+    protected void dumpExpected(Map<TestData, Set<TestData>> expectedData)
+        throws DatabaseException {
+        System.out.println("Expected = " );
+        Iterator<Set<TestData>> iter = expectedData.values().iterator();
+        while (iter.hasNext()) {
+            Set<TestData> dataSet = iter.next();
+            Iterator<TestData> dataIter = dataSet.iterator();
+            while (dataIter.hasNext()) {
+                TestData t = dataIter.next();
+                System.out.println(t);
+            }
+        }
+    }
+
+    protected class TestData {
+        public int dbNum;
+        public byte[] key;
+        public byte[] data;
+
+        TestData(int dbNum, DatabaseEntry keyDbt, DatabaseEntry dataDbt) {
+            this.dbNum = dbNum;
+            key = keyDbt.getData();
+            if (dataDbt == null) {
+                dataDbt = new DatabaseEntry();
+                dataDbt.setData(new byte[1]);
+            }
+            data = dataDbt.getData();
+        }
+
+        TestData(int dbNum, byte[] key) {
+            this.dbNum = dbNum;
+            this.key = key;
+        }
+
+        public boolean equals(Object o ) {
+            if (this == o)
+                return true;
+            if (!(o instanceof TestData))
+                return false;
+
+            TestData other = (TestData) o;
+            if ((dbNum == other.dbNum) &&
+                Arrays.equals(key, other.key) &&
+                Arrays.equals(data, other.data)) {
+                return true;
+            } else
+                return false;
+        }
+
+        public String toString() {
+            if (data == null) {
+                return "db=" + dbNum +
+                    " k=" + TestUtils.dumpByteArray(key);
+            } else {
+                return "db=" + dbNum +
+                    " k=" + TestUtils.dumpByteArray(key) +
+                    " d=" + TestUtils.dumpByteArray(data);
+            }
+        }
+
+        public int hashCode() {
+            return toString().hashCode();
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/recovery/Rollback2PCTest.java b/test/com/sleepycat/je/recovery/Rollback2PCTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..b350ce5f43fc107e33b4e34d13f2e92d2285bf60
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/Rollback2PCTest.java
@@ -0,0 +1,134 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Rollback2PCTest.java,v 1.6.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.recovery;
+
+import java.io.File;
+
+import javax.transaction.xa.XAException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.XAEnvironment;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.log.LogUtils.XidImpl;
+import com.sleepycat.je.util.TestUtils;
+
+public class Rollback2PCTest extends TestCase {
+    private final File envHome;
+
+    public Rollback2PCTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    @Override
+    public void setUp() {
+        try {
+            TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX);
+        } catch (Throwable t) {
+            t.printStackTrace();
+        }
+    }
+
+    @Override
+    public void tearDown() {
+        //*
+        try {
+            TestUtils.removeFiles("TearDown", envHome, FileManager.JE_SUFFIX);
+        } catch (Throwable t) {
+            t.printStackTrace();
+        }
+        //*/
+    }
+
+    /**
+     * Verifies a bug fix to a problem that occurs when aborting a prepared txn
+     * after recovery.  During recovery, we were counting the old version of an
+     * LN as obsolete when replaying the prepared txn LN.  But if that txn
+     * aborts later, the old version becomes active.  The fix is to use inexact
+     * counting.  [#17022]
+     */
+    public void testLogCleanAfterRollbackPrepared()
+        throws DatabaseException, XAException {
+
+    	/* Setup environment. */
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+        envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, "false");
+        envConfig.setConfigParam
+            (EnvironmentConfig.CLEANER_MIN_UTILIZATION, "90");
+        XAEnvironment xaEnv = new XAEnvironment(envHome, envConfig);
+
+        /* Setup database. */
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        Database db = xaEnv.openDatabase(null, "foo", dbConfig);
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        IntegerBinding.intToEntry(1, key);
+        IntegerBinding.intToEntry(99, data);
+        assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+        DbInternal.envGetEnvironmentImpl(xaEnv).forceLogFileFlip();
+        DbInternal.envGetEnvironmentImpl(xaEnv).forceLogFileFlip();
+        DbInternal.envGetEnvironmentImpl(xaEnv).forceLogFileFlip();
+
+        /*
+         * Start an XA transaction and add a record.  Then crash the
+         * environment.
+         */
+        XidImpl xid = new XidImpl(1, "FooTxn".getBytes(), null);
+        Transaction preCrashTxn = xaEnv.beginTransaction(null, null);
+        xaEnv.setXATransaction(xid, preCrashTxn);
+        IntegerBinding.intToEntry(100, data);
+        assertEquals(OperationStatus.SUCCESS, db.put(preCrashTxn, key, data));
+        db.close();
+        xaEnv.prepare(xid);
+        DbInternal.envGetEnvironmentImpl(xaEnv).getLogManager().flush();
+
+        /* Crash */
+        DbInternal.envGetEnvironmentImpl(xaEnv).abnormalClose();
+        xaEnv = null;
+
+        /* Recover */
+        envConfig.setAllowCreate(false);
+        xaEnv = new XAEnvironment(envHome, envConfig);
+
+        /* Rollback. */
+        xaEnv.rollback(xid);
+        
+        /* Force log cleaning. */
+        CheckpointConfig force = new CheckpointConfig();
+        force.setForce(true);
+        xaEnv.checkpoint(force);
+        xaEnv.cleanLog();
+        xaEnv.checkpoint(force);
+
+        /* Close and re-open, ensure we can read the original record. */
+        xaEnv.close();
+        xaEnv = new XAEnvironment(envHome, envConfig);
+        db = xaEnv.openDatabase(null, "foo", dbConfig);
+        /* Before the fix, the get() caused a LogFileNotFound. */
+        assertEquals(OperationStatus.SUCCESS, db.get(null, key, data, null));
+        assertEquals(99, IntegerBinding.entryToInt(data));
+        db.close();
+        xaEnv.close();
+    }
+}
diff --git a/test/com/sleepycat/je/recovery/stepwise/CommitEntry.java b/test/com/sleepycat/je/recovery/stepwise/CommitEntry.java
new file mode 100644
index 0000000000000000000000000000000000000000..a273dad041cefdd6e54a90c205cd1d883eb3ad91
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/stepwise/CommitEntry.java
@@ -0,0 +1,59 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: CommitEntry.java,v 1.8.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.recovery.stepwise;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+
+/*
+ * A Commit entry signals that some records should be moved from the
+ * not-yet-committed sets to the expected set.
+ *
+ * Note that this uses key and data values rather than node ids to check
+ * existence, so a test which re-used the same key and data values may
+ * not work out correctly. This could be enhanced in the future to use
+ * node ids.
+ */
+
+public class CommitEntry extends LogEntryInfo {
+    private long txnId;
+
+    CommitEntry(long lsn, long txnId) {
+        super(lsn, 0, 0);
+        this.txnId = txnId;
+    }
+
+    @Override
+    public void updateExpectedSet
+        (Set<TestData>  useExpected,
+         Map<Long, Set<TestData>> newUncommittedRecords,
+         Map<Long, Set<TestData>> deletedUncommittedRecords) {
+
+        Long mapKey = new Long(txnId);
+
+        /* Add any new records to the expected set. */
+        Set<TestData> records = newUncommittedRecords.get(mapKey);
+        if (records != null) {
+            Iterator<TestData> iter = records.iterator();
+            while (iter.hasNext()) {
+                useExpected.add(iter.next());
+            }
+        }
+
+        /* Remove any deleted records from expected set. */
+        records = deletedUncommittedRecords.get(mapKey);
+        if (records != null) {
+            Iterator<TestData> iter = records.iterator();
+            while (iter.hasNext()) {
+                useExpected.remove(iter.next());
+            }
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/recovery/stepwise/EntryTrackerReader.java b/test/com/sleepycat/je/recovery/stepwise/EntryTrackerReader.java
new file mode 100644
index 0000000000000000000000000000000000000000..dff70b726da64828e1cd943d096002f0848a13b6
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/stepwise/EntryTrackerReader.java
@@ -0,0 +1,179 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EntryTrackerReader.java,v 1.11.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.recovery.stepwise;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.List;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.FileReader;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.entry.DeletedDupLNLogEntry;
+import com.sleepycat.je.log.entry.LNLogEntry;
+import com.sleepycat.je.log.entry.LogEntry;
+import com.sleepycat.je.txn.TxnCommit;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * EntryTrackerReader collects a list of EntryInfo describing all log entries
+ * in the truncated portion of a log.  It lets the test know where to do a log
+ * truncation and remembers whether an inserted or deleted record was seen, in
+ * order to update the test's set of expected records.
+ */
+public class EntryTrackerReader extends FileReader {
+
+    /*
+     * EntryInfo is a list that corresponds to each entry in the truncated
+     * area of the log.
+     */
+    private List<LogEntryInfo> entryInfo;
+    private DatabaseEntry dbt = new DatabaseEntry();
+    private LogEntry useLogEntry;
+    private LogEntryType useLogEntryType;
+    private boolean isCommit;
+
+    /**
+     * Create this reader to start at a given LSN.
+     */
+    public EntryTrackerReader(EnvironmentImpl env,
+                              long startLsn,
+                              List<LogEntryInfo> entryInfo) // EntryInfo
+	throws IOException, DatabaseException {
+
+        super(env, 2000, true, startLsn, null,
+	      -1, DbLsn.NULL_LSN);
+
+        this.entryInfo = entryInfo;
+    }
+
+    /**
+     * @return true if this is a targeted entry that should be processed.
+     */
+    protected boolean isTargetEntry() {
+        byte logEntryTypeNumber = currentEntryHeader.getType();
+        isCommit = false;
+        boolean targeted = true;
+
+        useLogEntryType = null;
+
+        if (LogEntryType.LOG_LN.equalsType(logEntryTypeNumber)) {
+            useLogEntryType = LogEntryType.LOG_LN;
+        } else if (LogEntryType.LOG_LN_TRANSACTIONAL.equalsType(
+                                                        logEntryTypeNumber)) {
+            useLogEntryType = LogEntryType.LOG_LN_TRANSACTIONAL;
+        } else if (LogEntryType.LOG_DEL_DUPLN.equalsType(logEntryTypeNumber)) {
+            useLogEntryType = LogEntryType.LOG_DEL_DUPLN;
+        } else if (LogEntryType.LOG_DEL_DUPLN_TRANSACTIONAL.equalsType(
+                                                     logEntryTypeNumber)) {
+            useLogEntryType = LogEntryType.LOG_DEL_DUPLN_TRANSACTIONAL;
+        } else if (LogEntryType.LOG_TXN_COMMIT.equalsType(logEntryTypeNumber)) {
+            useLogEntryType = LogEntryType.LOG_TXN_COMMIT;
+            isCommit = true;
+        } else {
+            /*
+             * Just make note, no need to process the entry, nothing to record
+             * besides the LSN. Note that the offset has not been bumped by
+             * the FileReader, so use nextEntryOffset.
+             */
+            entryInfo.add(new LogEntryInfo(DbLsn.makeLsn(readBufferFileNum,
+                                                         nextEntryOffset),
+                                           0, 0));
+            targeted = false;
+        }
+
+        if (useLogEntryType != null) {
+            useLogEntry = useLogEntryType.getSharedLogEntry();
+        }
+        return targeted;
+    }
+
+
+    /**
+     * This log entry has data which affects the expected set of records.
+     * We need to save each lsn and determine whether the value of the
+     * log entry should affect the expected set of records. For
+     * non-transactional entries, the expected set is affected right away.
+     * For transactional entries, we defer updates of the expected set until
+     * a commit is seen.
+     */
+    protected boolean processEntry(ByteBuffer entryBuffer)
+        throws DatabaseException {
+
+        /*
+         * Note that the offset has been bumped, so use currentEntryOffset
+         * for the LSN.
+         */
+        long lsn = DbLsn.makeLsn(readBufferFileNum, currentEntryOffset);
+        useLogEntry.readEntry(currentEntryHeader,
+                              entryBuffer,
+                              true); // readFullItem
+
+        boolean isTxnal = useLogEntryType.isTransactional();
+        long txnId = useLogEntry.getTransactionId();
+
+        if (isCommit) {
+
+            /*
+             * The txn id in a single item log entry is embedded within
+             * the item.
+             */
+            txnId = ((TxnCommit) useLogEntry.getMainItem()).getId();
+            entryInfo.add(new CommitEntry(lsn, txnId));
+        } else if (useLogEntry instanceof DeletedDupLNLogEntry) {
+
+            /* This log entry is a deleted dup LN. */
+            DeletedDupLNLogEntry delDupLogEntry =
+                (DeletedDupLNLogEntry) useLogEntry;
+            dbt.setData(delDupLogEntry.getKey());
+            int keyValue = IntegerBinding.entryToInt(dbt);
+            dbt.setData(delDupLogEntry.getDupKey());
+            int dataValue = IntegerBinding.entryToInt(dbt);
+
+            if (isTxnal) {
+                entryInfo.add(new TxnalDeletedEntry(lsn, keyValue,
+                                                    dataValue, txnId));
+            } else {
+                entryInfo.add(new NonTxnalDeletedEntry(lsn, keyValue,
+                                                       dataValue));
+            }
+        } else {
+            LNLogEntry lnLogEntry = (LNLogEntry) useLogEntry;
+            byte[] keyArray = lnLogEntry.getKey();
+            dbt.setData(keyArray);
+            int keyValue = IntegerBinding.entryToInt(dbt);
+            byte[] dataArray = lnLogEntry.getLN().getData();
+
+            if (dataArray == null) {
+                /* This log entry is a deleted, non-dup LN. */
+                if (isTxnal) {
+                    entryInfo.add(new TxnalDeletedEntry(lsn, keyValue, -1,
+                                                        txnId));
+                } else {
+                    entryInfo.add(new NonTxnalDeletedEntry(lsn, keyValue, -1));
+                }
+            } else {
+                /* This log entry is new LN. */
+                dbt.setData(dataArray);
+                int dataValue = IntegerBinding.entryToInt(dbt);
+                if (isTxnal) {
+                    entryInfo.add(new TxnalEntry(lsn, keyValue, dataValue,
+                                                 txnId));
+                } else {
+                    entryInfo.add(new NonTxnalEntry(lsn, keyValue, dataValue));
+                }
+            }
+        }
+
+        return true;
+    }
+}
diff --git a/test/com/sleepycat/je/recovery/stepwise/LogEntryInfo.java b/test/com/sleepycat/je/recovery/stepwise/LogEntryInfo.java
new file mode 100644
index 0000000000000000000000000000000000000000..468f208fa7d3b81544f50071f9f6cf02cd86bcda
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/stepwise/LogEntryInfo.java
@@ -0,0 +1,62 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LogEntryInfo.java,v 1.8.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.recovery.stepwise;
+
+import java.util.Map;
+import java.util.Set;
+
+import com.sleepycat.je.utilint.DbLsn;
+
+/*
+ * A LogEntryInfo supports stepwise recovery testing, where the log is
+ * systemantically truncated and recovery is executed. At each point in a log,
+ * there is a set of records that we expect to see. The LogEntryInfo
+ * encapsulates enough information about the current log entry so we can
+ * update the expected set accordingly.
+ */
+
+public class LogEntryInfo {
+    private long lsn;
+    int key;
+    int data;
+
+    LogEntryInfo(long lsn,
+              int key,
+              int data) {
+        this.lsn = lsn;
+        this.key = key;
+        this.data = data;
+    }
+
+    /*
+     * Implement this accordingly. For example, a LogEntryInfo which
+     * represents a non-txnal LN record would add that key/data to the
+     * expected set. A txnal delete LN record would delete the record
+     * from the expecte set at commit.
+     *
+     * The default action is that the expected set is not changed.
+     */
+    public void updateExpectedSet
+        (Set<TestData> expectedSet, 
+         Map<Long, Set<TestData>> newUncommittedRecords, 
+         Map<Long, Set<TestData>> deletedUncommittedRecords) {}
+
+    public String toString() {
+        StringBuffer sb = new StringBuffer();
+        sb.append("type=").append(this.getClass().getName());
+        sb.append("lsn=").append(DbLsn.getNoFormatString(lsn));
+        sb.append(" key=").append(key);
+        sb.append(" data=").append(data);
+        return sb.toString();
+    }
+
+    public long getLsn() {
+        return lsn;
+    }
+}
diff --git a/test/com/sleepycat/je/recovery/stepwise/NonTxnalDeletedEntry.java b/test/com/sleepycat/je/recovery/stepwise/NonTxnalDeletedEntry.java
new file mode 100644
index 0000000000000000000000000000000000000000..4cb024bc32d357637f82c6da3a098483b6fe4c78
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/stepwise/NonTxnalDeletedEntry.java
@@ -0,0 +1,55 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: NonTxnalDeletedEntry.java,v 1.7.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.recovery.stepwise;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+
+/*
+ * A non-transactional log entry should add itself to the expected set.
+ */
+
+class NonTxnalDeletedEntry extends LogEntryInfo {
+    NonTxnalDeletedEntry(long lsn,
+                  int key,
+                  int data) {
+        super(lsn, key, data);
+    }
+
+    /* Delete this item from the expected set. */
+    @Override
+    public void updateExpectedSet
+        (Set<TestData> useExpected, 
+         Map<Long, Set<TestData>> newUncommittedRecords, 
+         Map<Long, Set<TestData>>  deletedUncommittedRecords) {
+
+        Iterator<TestData> iter = useExpected.iterator();
+        while (iter.hasNext()) {
+            TestData setItem = iter.next();
+            int keyValInSet = IntegerBinding.entryToInt(setItem.getKey());
+            if (keyValInSet == key) {
+                if (data == -1) {
+                    /* non-dup case, remove the matching key. */
+                    iter.remove();
+                    break;
+                } else {
+                    int dataValInSet = 
+                        IntegerBinding.entryToInt(setItem.getData());
+                    if (dataValInSet == data) {
+                        iter.remove();
+                        break;
+                    }
+                }
+            }
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/recovery/stepwise/NonTxnalEntry.java b/test/com/sleepycat/je/recovery/stepwise/NonTxnalEntry.java
new file mode 100644
index 0000000000000000000000000000000000000000..4b615e438b72628fdc9b3ca6b0e626bbe859d2bf
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/stepwise/NonTxnalEntry.java
@@ -0,0 +1,47 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: NonTxnalEntry.java,v 1.8.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.recovery.stepwise;
+
+import java.util.Map;
+import java.util.Set;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.DatabaseEntry;
+
+/*
+ * A non-transactional log entry should add itself to the expected set.
+ */
+
+public class NonTxnalEntry extends LogEntryInfo {
+    NonTxnalEntry(long lsn,
+                  int key,
+                  int data) {
+        super(lsn, key, data);
+    }
+
+    /* Implement this accordingly. For example, a LogEntryInfo which
+     * represents a non-txnal LN record would add that key/data to the
+     * expected set. A txnal delete LN record would delete the record
+     * from the expecte set at commit time.
+     */
+    @Override
+    public void updateExpectedSet
+        (Set<TestData> useExpected, 
+         Map<Long, Set<TestData>> newUncommittedRecords, 
+         Map<Long, Set<TestData>> deletedUncommittedRecords) {
+
+        DatabaseEntry keyEntry = new DatabaseEntry();
+        DatabaseEntry dataEntry = new DatabaseEntry();
+
+        IntegerBinding.intToEntry(key, keyEntry);
+        IntegerBinding.intToEntry(data, dataEntry);
+
+        useExpected.add(new TestData(keyEntry, dataEntry));
+    }
+}
diff --git a/test/com/sleepycat/je/recovery/stepwise/TestData.java b/test/com/sleepycat/je/recovery/stepwise/TestData.java
new file mode 100644
index 0000000000000000000000000000000000000000..b72880d4af4225614c2381ac991d647229eaf8d9
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/stepwise/TestData.java
@@ -0,0 +1,58 @@
+/*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2004,2008 Oracle.  All rights reserved.
+ *
+ * $Id: TestData.java,v 1.7 2008/01/07 14:29:11 cwl Exp $
+ */
+package com.sleepycat.je.recovery.stepwise;
+
+import java.util.Arrays;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.DatabaseEntry;
+
+/**
+ * Wrapper class that encapsulates a record in a database used for recovery
+ * testing.
+ */
+public class TestData {
+    private DatabaseEntry key;
+    private DatabaseEntry data;
+
+    public TestData(DatabaseEntry key, DatabaseEntry data) {
+        this.key = new DatabaseEntry(key.getData());
+        this.data = new DatabaseEntry(data.getData());
+    }
+
+    public boolean equals(Object o ) {
+        if (this == o)
+            return true;
+        if (!(o instanceof TestData))
+            return false;
+
+        TestData other = (TestData) o;
+        if (Arrays.equals(key.getData(), other.key.getData()) &&
+            Arrays.equals(data.getData(), other.data.getData())) {
+            return true;
+        } else
+            return false;
+    }
+
+    public String toString() {
+        return  " k=" + IntegerBinding.entryToInt(key) +
+                " d=" + IntegerBinding.entryToInt(data);
+    }
+
+    public int hashCode() {
+        return toString().hashCode();
+    }
+
+    public DatabaseEntry getKey() {
+        return key;
+    }
+
+    public DatabaseEntry getData() {
+        return data;
+    }
+}
diff --git a/test/com/sleepycat/je/recovery/stepwise/TxnalDeletedEntry.java b/test/com/sleepycat/je/recovery/stepwise/TxnalDeletedEntry.java
new file mode 100644
index 0000000000000000000000000000000000000000..852e5ba8fb165743e8a4630479a815ca59e421f8
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/stepwise/TxnalDeletedEntry.java
@@ -0,0 +1,59 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TxnalDeletedEntry.java,v 1.8.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.recovery.stepwise;
+
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.DatabaseEntry;
+
+/*
+ * A Transactional log entry should add put itself
+ * into the not-yet-committed set.
+ */
+
+public class TxnalDeletedEntry extends LogEntryInfo {
+    private long txnId;
+
+    TxnalDeletedEntry(long lsn,
+                      int key,
+                      int data,
+                      long txnId) {
+        super(lsn, key, data);
+        this.txnId = txnId;
+    }
+
+    /* Implement this accordingly. For example, a LogEntryInfo which
+     * represents a non-txnal LN record would add that key/data to the
+     * expected set. A txnal delete LN record would delete the record
+     * from the expecte set at commit time.
+     */
+    @Override
+    public void updateExpectedSet
+        (Set<TestData> useExpected,
+         Map<Long, Set<TestData>> newUncommittedRecords,
+         Map<Long, Set<TestData>> deletedUncommittedRecords) {
+
+        DatabaseEntry keyEntry = new DatabaseEntry();
+        DatabaseEntry dataEntry = new DatabaseEntry();
+
+        IntegerBinding.intToEntry(key, keyEntry);
+        IntegerBinding.intToEntry(data, dataEntry);
+
+        Long mapKey = new Long(txnId);
+        Set<TestData> records = deletedUncommittedRecords.get(mapKey);
+        if (records == null) {
+            records = new HashSet<TestData>();
+           deletedUncommittedRecords.put(mapKey, records);
+        }
+        records.add(new TestData(keyEntry, dataEntry));
+    }
+}
diff --git a/test/com/sleepycat/je/recovery/stepwise/TxnalEntry.java b/test/com/sleepycat/je/recovery/stepwise/TxnalEntry.java
new file mode 100644
index 0000000000000000000000000000000000000000..cf662b6a097f821295319f49c255de2dec2b4f72
--- /dev/null
+++ b/test/com/sleepycat/je/recovery/stepwise/TxnalEntry.java
@@ -0,0 +1,59 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TxnalEntry.java,v 1.8.2.2 2010/01/04 15:30:44 cwl Exp $
+ */
+
+package com.sleepycat.je.recovery.stepwise;
+
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.DatabaseEntry;
+
+/*
+ * A Transactional log entry should add put itself
+ * into the not-yet-committed set.
+ */
+
+public class TxnalEntry extends LogEntryInfo {
+    private long txnId;
+
+    TxnalEntry(long lsn,
+               int key,
+               int data,
+               long txnId) {
+        super(lsn, key, data);
+        this.txnId = txnId;
+    }
+
+    /* Implement this accordingly. For example, a LogEntryInfo which
+     * represents a non-txnal LN record would add that key/data to the
+     * expected set. A txnal delete LN record would delete the record
+     * from the expecte set at commit time.
+     */
+    @Override
+    public void updateExpectedSet
+        (Set<TestData> useExpected,
+         Map<Long, Set<TestData>> newUncommittedRecords,
+         Map<Long, Set<TestData>> deletedUncommittedRecords) {
+
+        DatabaseEntry keyEntry = new DatabaseEntry();
+        DatabaseEntry dataEntry = new DatabaseEntry();
+
+        IntegerBinding.intToEntry(key, keyEntry);
+        IntegerBinding.intToEntry(data, dataEntry);
+
+        Long mapKey = new Long(txnId);
+        Set<TestData> records = newUncommittedRecords.get(mapKey);
+        if (records == null) {
+            records = new HashSet<TestData>();
+            newUncommittedRecords.put(mapKey, records);
+        }
+        records.add(new TestData(keyEntry, dataEntry));
+    }
+}
diff --git a/test/com/sleepycat/je/rep.properties b/test/com/sleepycat/je/rep.properties
new file mode 100644
index 0000000000000000000000000000000000000000..08a24690ed4d4c62a7b5c9b615ab665daac0aeff
--- /dev/null
+++ b/test/com/sleepycat/je/rep.properties
@@ -0,0 +1,7 @@
+# $Id: rep.properties,v 1.3 2007/12/19 17:27:27 linda Exp $
+je.log.totalBufferBytes=7001
+je.log.numBuffers=200
+je.rep.node.foo=address:localhost:3000,isPeer:true
+je.rep.node.bar=address:localhost:3001,isPeer:false
+je.rep.node.baz=address:localhost:3002,isPeer:false
+je.rep.local.address = 127.0.0.1:9999
diff --git a/test/com/sleepycat/je/test/AtomicPutTest.java b/test/com/sleepycat/je/test/AtomicPutTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..3b0844d1e4c4a08fbdf3e5abef2da68f2494b648
--- /dev/null
+++ b/test/com/sleepycat/je/test/AtomicPutTest.java
@@ -0,0 +1,295 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: AtomicPutTest.java,v 1.17.2.2 2010/01/04 15:30:47 cwl Exp $
+ */
+
+package com.sleepycat.je.test;
+
+import junit.framework.Test;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DeadlockException;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.junit.JUnitMethodThread;
+import com.sleepycat.je.junit.JUnitThread;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.util.test.TxnTestCase;
+
+/**
+ * Tests put() (overwrite) and putNoOverwrite() to check that they work
+ * atomically under concurrent access.  These tests were added after put()
+ * and putNoOverwrite() were changed to work atomically.  The history of the
+ * bugs is below.
+ *
+ *  Old Algorithm
+ *  -------------
+ *  put(X, Y):
+ *      if duplicates:
+ *          return insertDup(X, Y)
+ *      else:
+ *          search(X)
+ *          if SUCCESS:
+ *              putCurrent(Y)
+ *              return SUCCESS
+ *          else:
+ *              return insert(X,Y)
+ *
+ *  putNoOverwrite(X, Y):
+ *      search(X)
+ *      if SUCCESS:
+ *          return KEYEXIST
+ *      else:
+ *          if duplicates:
+ *              insertDup(X, Y)
+ *          else:
+ *              insert(X, Y)
+ *
+ *  Bug #1: In put with duplicates: Returned KEYEXIST when trying to overwrite
+ *  a duplicate duplicate.
+ *
+ *  Bug #2: In put without duplicates: Returned KEYEXIST if another thread
+ *  inserted in between a search that returned NOTFOUND and the insert().
+ *
+ *  Bug #3: In putNoOverwrite with duplicates:  Added a duplicate if another
+ *  thread inserted in between a search that returned NOTFOUND and the
+ *  insert().
+ *
+ *  New Algorithm
+ *  -------------
+ *  put(X, Y):
+ *      if duplicates:
+ *          insertDup(X, Y)
+ *      else:
+ *          insert(X, Y)
+ *      if KEYEXIST:
+ *          putCurrent(Y)
+ *      return SUCCESS
+ *
+ *  putNoOverwrite(X, Y):
+ *      return insert(X, Y)
+ *
+ *  Potential Bug #4: In put, if the lock is not acquired: Another thread may
+ *  overwrite in between the insert and the putCurrent.  But then putCurrent
+ *  wouldn't be able to get a write lock, right?  I can't think of how a
+ *  problem could occur.
+
+ *  Potential Bug #5: In putNoOverwrite, if we need to lock an existing record
+ *  in order to return KEYEXIST, we may cause more deadlocks than is necessary.
+ *
+ *  Low level operations
+ *  --------------------
+ *  insert(X, Y):    insert if key is not present, else return KEYEXIST
+ *  insertDup(X, Y): insert if key and data are not present, else return
+ *  KEYEXIST
+ *
+ *  Both insert methods obtain a lock on the existing record when returning
+ *  KEYEXIST, to support overwrite.
+ */
+public class AtomicPutTest extends TxnTestCase {
+
+    private static final int MAX_KEY = 400; //50000;
+
+    public static Test suite() {
+        return txnTestSuite(AtomicPutTest.class, null,
+                            //null);
+                            new String[] {TxnTestCase.TXN_USER});
+    }
+
+    private int nextKey;
+    private Database db;
+
+    /**
+     * Closes databases, then calls the super.tearDown to close the env.
+     */
+    public void tearDown()
+        throws Exception {
+
+        if (db != null) {
+            try {
+                db.close();
+            } catch (Exception e) {}
+            db = null;
+        }
+        super.tearDown();
+    }
+
+    /**
+     * Tests that put (overwrite), with no duplicates allowed, never causes a
+     * KEYEXIST status return.
+     */
+    public void testOverwriteNoDuplicates()
+	throws Throwable {
+
+        String method = "runOverwriteNoDuplicates";
+        JUnitMethodThread tester1 = new JUnitMethodThread(method + "-t1",
+                                                          method, this);
+        JUnitMethodThread tester2 = new JUnitMethodThread(method + "-t2",
+                                                          method, this);
+	db = openDb("foo", false);
+        tester1.start();
+        tester2.start();
+        finishTests(new JUnitThread[] { tester1, tester2 });
+        db.close();
+        db = null;
+    }
+
+    /**
+     * The old put() implementation first did a search, then inserted if
+     * NOTFOUND was returned by the search.  This test tries to create the
+     * situation where one thread does a search on a key that returns NOTFOUND
+     * and another thread immediately afterwards inserts the same key, before
+     * the first thread has a chance to start the insert.  Before the fix to
+     * make put() atomic, the first thread would have returned KEYEXIST from
+     * put(), and that should never happen.
+     */
+    public void runOverwriteNoDuplicates()
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        while (nextKey < MAX_KEY) {
+            /*
+             * Attempt to insert the same key as was just inserted by the other
+             * thread.  We need to keep incrementing the key, since the error
+             * only occurs for a non-existing key value.
+             */
+            int val = nextKey++ / 2;
+            Transaction txn = txnBegin();
+            key.setData(TestUtils.getTestArray(val));
+            data.setData(TestUtils.getTestArray(val));
+	    boolean commit = true;
+	    try {
+		OperationStatus status = db.put(txn, key, data);
+		assertEquals("Key=" + val, OperationStatus.SUCCESS, status);
+	    } catch (DeadlockException DE) {
+		commit = false;
+	    }
+	    if (commit) {
+		txnCommit(txn);
+	    } else {
+		txnAbort(txn);
+	    }
+        }
+    }
+
+    /**
+     * Tests that putNoOverwrite, with duplicates allowed, never inserts a
+     * duplicate.
+     */
+    public void testNoOverwriteWithDuplicates()
+	throws Throwable {
+
+        String method = "runNoOverwriteWithDuplicates";
+        JUnitMethodThread tester1 = new JUnitMethodThread(method + "-t1",
+                                                          method, this);
+        JUnitMethodThread tester2 = new JUnitMethodThread(method + "-t2",
+                                                          method, this);
+	db = openDb("foo", true);
+        tester1.start();
+        tester2.start();
+        finishTests(new JUnitThread[] { tester1, tester2 });
+        db.close();
+        db = null;
+    }
+
+    /**
+     * The old putNoOverwrite() inserted a duplicate after a search returned
+     * NOTFOUND, when duplicates were configured.  This test tries to create
+     * the situation where the second thread inserting with a given key inserts
+     * a duplicate, which should never happen since we're using
+     * putNoOverwrite().
+     */
+    public void runNoOverwriteWithDuplicates()
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        while (nextKey < MAX_KEY) {
+            /*
+             * Attempt to insert a duplicate for the same key as was just
+             * inserted by the other thread.  Each thread uses a different data
+             * value (modulo 2) so to avoid a duplicate-duplicate, which would
+             * not be inserted.
+             */
+            int val = nextKey++;
+            int keyVal = val / 2;
+            int dataVal = val % 2;
+            key.setData(TestUtils.getTestArray(keyVal));
+            data.setData(TestUtils.getTestArray(dataVal));
+            while (true) {
+                Transaction txn = txnBegin();
+                boolean commit = true;
+                try {
+                    db.putNoOverwrite(txn, key, data);
+                } catch (DeadlockException DE) {
+                    commit = false;
+                }
+                if (commit) {
+                    txnCommit(txn);
+                    break;
+                } else {
+                    txnAbort(txn);
+                }
+            }
+            Cursor cursor = db.openCursor(null, null);
+            try {
+                OperationStatus status = cursor.getSearchKey(key, data,
+                                                             LockMode.DEFAULT);
+                assertEquals(OperationStatus.SUCCESS, status);
+                assertEquals(1, cursor.count());
+            } finally {
+                cursor.close();
+            }
+        }
+    }
+
+    /**
+     * Opens a database.
+     */
+    private Database openDb(String name, boolean dups)
+        throws DatabaseException {
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(isTransactional);
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(dups);
+
+        Transaction txn = txnBegin();
+        try {
+            return env.openDatabase(txn, name, dbConfig);
+        } finally {
+            txnCommit(txn);
+        }
+    }
+
+    /**
+     * When one thread throws an assertion, the other threads need to be
+     * stopped, otherwise we will see side effects that mask the real problem.
+     */
+    private void finishTests(JUnitThread[] threads)
+	throws Throwable {
+
+        Throwable ex = null;
+        for (int i = 0; i < threads.length; i += 1) {
+            try {
+                threads[i].finishTest();
+            } catch (Throwable e) {
+                if (ex == null) {
+                    ex = e;
+                }
+            }
+        }
+        if (ex != null) {
+            throw ex;
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/test/DeferredWriteTest.java b/test/com/sleepycat/je/test/DeferredWriteTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..e1816e3d1b46c9313eb5d740fff185ae116b9561
--- /dev/null
+++ b/test/com/sleepycat/je/test/DeferredWriteTest.java
@@ -0,0 +1,1533 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DeferredWriteTest.java,v 1.18.2.5 2010/02/05 14:22:24 cwl Exp $
+ */
+
+package com.sleepycat.je.test;
+
+import java.io.File;
+import java.util.Enumeration;
+import java.util.HashSet;
+import java.util.Random;
+import java.util.Set;
+import java.util.logging.Level;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.PreloadConfig;
+import com.sleepycat.je.PreloadStats;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.cleaner.VerifyUtils;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.Tracer;
+
+public class DeferredWriteTest extends TestCase {
+    private static boolean DEBUG = false;
+    private static String DBNAME = "foo";
+    private static String DBNAME2 = "foo2";
+    private static DatabaseEntry MAIN_KEY_FOR_DUPS =
+        new DatabaseEntry(new byte[10]);
+
+    private static final CheckpointConfig CHECKPOINT_FORCE_CONFIG =
+        new CheckpointConfig();
+
+    static {
+        CHECKPOINT_FORCE_CONFIG.setForce(true);
+    }
+
+    private static final StatsConfig STATS_CLEAR_CONFIG = new StatsConfig();
+
+    static {
+        STATS_CLEAR_CONFIG.setClear(true);
+    }
+
+    private File envHome;
+    private Environment env;
+    private boolean truncateOrRemoveDone;
+    private boolean dups;
+
+    public static Test suite() {
+        TestSuite allTests = new TestSuite();
+        addTests(allTests, false); // no dups
+        addTests(allTests, true); // dups
+        return allTests;
+    }
+
+    private static void addTests(TestSuite allTests, boolean dups) {
+        TestSuite suite = new TestSuite(DeferredWriteTest.class);
+        Enumeration e = suite.tests();
+        while (e.hasMoreElements()) {
+            DeferredWriteTest test = (DeferredWriteTest) e.nextElement();
+            test.dups = dups;
+            allTests.addTest(test);
+        }
+    }
+
+    public DeferredWriteTest()
+        throws Exception {
+
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws Exception {
+        TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX);
+        TestUtils.removeFiles("Setup", envHome, FileManager.DEL_SUFFIX);
+    }
+
+    public void tearDown()
+	throws Exception {
+
+        /* Set test name for reporting; cannot be done in the ctor or setUp. */
+        setName(getName() + (dups ? ":dups" : ""));
+
+        if (env != null) {
+            try {
+                env.close();
+            } catch (Exception e) {
+                System.err.println("TearDown: " + e);
+            }
+        }
+        env = null;
+        //*
+        TestUtils.removeFiles("TearDown", envHome, FileManager.JE_SUFFIX);
+        //*/
+    }
+
+    private EnvironmentConfig getEnvConfig(boolean transactional) {
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setTransactional(transactional);
+        envConfig.setAllowCreate(true);
+        envConfig.setConfigParam
+            (EnvironmentParams.NODE_MAX.getName(), "4");
+        envConfig.setConfigParam
+            (EnvironmentParams.NODE_MAX_DUPTREE.getName(), "4");
+        /* Force correct LN obsolete size calculation during recovery. */
+        envConfig.setConfigParam
+            (EnvironmentParams.CLEANER_FETCH_OBSOLETE_SIZE.getName(), "true");
+        if (DEBUG) {
+            envConfig.setConfigParam("java.util.logging.ConsoleHandler.on",
+                                     "true");
+            envConfig.setConfigParam("java.util.logging.level.cleaner",
+                                     "SEVERE");
+        }
+
+        return envConfig;
+    }
+
+    private void closeEnv(boolean normalClose)
+        throws DatabaseException {
+
+        closeEnv(normalClose,
+                 true /*expectAccurateObsoleteLNCount*/,
+                 true /*expectAccurateDbUtilization*/);
+    }
+
+    /**
+     * @param expectAccurateObsoleteLNCount should be false only when an LN
+     * cannot be counted obsolete during recovery as explained in
+     * RecoveryManager.redoUtilizationInfo.
+     *
+     * @param expectAccurateDbUtilization should be false only when DB info is
+     * not accurate because INs are evicted and then recovered without a
+     * checkpoint.  The provisional INs are counted obsolete by recovery in the
+     * per-DB info because the MapLN is not flushed, but not in the per-file
+     * info because the FileSummaryLNs are flushed by eviction.
+     */
+    private void closeEnv(boolean normalClose,
+                          boolean expectAccurateObsoleteLNCount,
+                          boolean expectAccurateDbUtilization)
+        throws DatabaseException {
+
+        if (env != null) {
+
+            /* Stop daemons first to stop utilization from changing. */
+            try {
+                DbInternal.envGetEnvironmentImpl(env).stopDaemons();
+            } catch (InterruptedException e) {
+                throw new DatabaseException(e);
+            }
+
+            /*
+             * We pass expectAccurateDbUtilization as false when
+             * truncateOrRemoveDone, because the database utilization info for
+             * that database is now gone.
+             */
+            VerifyUtils.verifyUtilization
+                (DbInternal.envGetEnvironmentImpl(env),
+                 expectAccurateObsoleteLNCount,
+                 true,                   // expectAccurateObsoleteLNSize,
+                 expectAccurateDbUtilization &&
+                 !truncateOrRemoveDone); // expectAccurateDbUtilization
+
+            if (normalClose) {
+                env.close();
+            } else {
+                DbInternal.envGetEnvironmentImpl(env).abnormalClose();
+            }
+            env = null;
+        }
+    }
+
+    private Database createDb(boolean deferredWrite)
+        throws DatabaseException {
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setDeferredWrite(deferredWrite);
+        dbConfig.setSortedDuplicates(dups);
+
+        Database db = env.openDatabase(null, DBNAME, dbConfig);
+
+        assertEquals
+            (deferredWrite,
+             DbInternal.dbGetDatabaseImpl(db).isDurableDeferredWrite());
+        assertEquals
+            (deferredWrite,
+             DbInternal.dbGetDatabaseImpl(db).isDeferredWriteMode());
+        assertEquals
+            (false,
+             DbInternal.dbGetDatabaseImpl(db).isTemporary());
+
+        return db;
+    }
+
+    private Database createTempDb()
+        throws DatabaseException {
+
+        return createTempDb(DBNAME);
+    }
+
+    private Database createTempDb(String dbName)
+        throws DatabaseException {
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setTemporary(true);
+        dbConfig.setSortedDuplicates(dups);
+
+        Database db = env.openDatabase(null, dbName, dbConfig);
+
+        assertEquals
+            (false,
+             DbInternal.dbGetDatabaseImpl(db).isDurableDeferredWrite());
+        assertEquals
+            (true,
+             DbInternal.dbGetDatabaseImpl(db).isDeferredWriteMode());
+        assertEquals
+            (true,
+             DbInternal.dbGetDatabaseImpl(db).isTemporary());
+
+        return db;
+    }
+
+    /**
+     * Check that all INs are removed from the INList for a DB that is removed
+     * before it is sync'ed (or checkpointed).  Before the bug fix, INs were
+     * not removed if the DB root IN was never logged (was still null).  This
+     * caused a DatabaseException when evicting, because the evictor expects no
+     * INs for deleted DBs on the INList.
+     */
+    public void testRemoveNonPersistentDbSR15317()
+	throws Throwable {
+
+        EnvironmentConfig envConfig = getEnvConfig(true);
+        /* Disable compressor for test predictability. */
+        envConfig.setConfigParam("je.env.runINCompressor", "false");
+        env = new Environment(envHome, envConfig);
+        Database db = createDb(true);
+        /* Insert some data to cause eviction later. */
+        insert(db,
+               null,          // txn
+               1,             // start
+               30000,         // end
+               new HashSet(), // expected
+               false);        // useRandom
+        db.close();
+        env.removeDatabase(null, DBNAME);
+        truncateOrRemoveDone = true;
+
+        envConfig = env.getConfig();
+        /* Switch to a small cache to force eviction. */
+        envConfig.setCacheSize(96 * 1024);
+        env.setMutableConfig(envConfig);
+        for (int i = 0; i < 10; i += 1) {
+            env.evictMemory();
+        }
+        closeEnv(true /*normalClose*/);
+    }
+
+    public void testEmptyDatabaseSR14744()
+	throws Throwable {
+
+        EnvironmentConfig envConfig = getEnvConfig(true);
+        env = new Environment(envHome, envConfig);
+        Database db = createDb(true);
+        db.sync();
+        db.close();
+        env.sync();
+        closeEnv(true /*normalClose*/);
+    }
+
+    /**
+     * Check that deferred write db re-opens at expected state.
+     */
+    public void testCloseOpen()
+        throws Throwable {
+
+        HashSet expectedSet =
+            doCloseOpen(true,   /* useDeferredWrites */
+                        true,   /* doSync */
+                        1,      /* starting value */
+                        new HashSet()); /* initial ExpectedSet */
+        expectedSet =
+            doCloseOpen(false,  /* useDeferredWrites */
+                        true,   /* doSync */
+                        100,    /* starting value */
+                        expectedSet);
+        expectedSet =
+            doCloseOpen(true,   /* useDeferredWrites */
+                        true,   /* doSync */
+                        200,    /* starting value */
+                        expectedSet);
+    }
+
+    /**
+     * Check that after crashing without a close/sync/checkpoint, a deferred
+     * write DB does not contain the unflushed data.
+     */
+    public void testCloseOpenNoSync()
+        throws Throwable {
+
+        HashSet expectedSet =
+            doCloseOpen(true,   /* useDeferredWrites */
+                        false,  /* doSync */
+                        1,      /* starting value */
+                        new HashSet()); /* initial ExpectedSet */
+        expectedSet =
+            doCloseOpen(true,   /* useDeferredWrites */
+                        false,  /* doSync */
+                        100,    /* starting value */
+                        expectedSet);
+    }
+
+    /**
+     * Check that deferred write and durable databases re-open at expected
+     * state.
+     */
+    private HashSet doCloseOpen(boolean useDeferredWrite,
+                                boolean doSync,
+                                int startingValue,
+                                HashSet initialSet)
+        throws Throwable {
+
+	EnvironmentConfig envConfig = getEnvConfig(true);
+        env = new Environment(envHome, envConfig);
+        Database db = createDb(useDeferredWrite);
+
+        /* We'll do inserts in two batches. */
+        HashSet expectedBatch1 = new HashSet();
+        expectedBatch1.addAll(initialSet);
+        HashSet expectedBatch2 = new HashSet();
+        HashSet finalExpectedSet = null;
+
+        int batch1Size = 40;
+        int batch2Size = 50;
+
+        /*
+         * Insert non-random values in two batches. Don't use random inserts in
+         * order to be sure we have a set of non-conflicting values for the
+         * test.
+         */
+        insert(db, null, startingValue, startingValue + batch1Size,
+               expectedBatch1, false);
+        checkExactContentMatch(db, expectedBatch1);
+        if (useDeferredWrite) {
+            db.sync();
+        }
+
+        /* Insert a second batch */
+        insert(db, null,
+               startingValue + batch1Size,
+               startingValue + batch2Size,
+               expectedBatch2, false);
+        expectedBatch2.addAll(expectedBatch1);
+        checkExactContentMatch(db, expectedBatch2);
+
+        /* Close/reopen, database should hold the expectedBatch2 set. */
+        if (doSync) {
+            db.close();
+            db = createDb(useDeferredWrite);
+            checkExactContentMatch(db, expectedBatch2);
+        }
+
+        /*
+         * Recover the environment. batch2 changes should show up even if the
+         * db was deferred write, because a sync is done when the database is
+         * closed.  batch2 changes should NOT show up only when doSync is
+         * false and deferred write is used.
+         *
+         * If a flush of INs occured followed by an abnormal close and
+         * recovery, obsolete LNs will not always be counted correctly.
+         */
+        closeEnv(false /*normalClose*/,
+                 false /*expectAccurateObsoleteLNCount*/,
+                 true  /*expectAccurateDbUtilization*/);
+        env = new Environment(envHome, envConfig);
+
+        db = createDb(useDeferredWrite);
+
+        finalExpectedSet = (useDeferredWrite && !doSync) ?
+            expectedBatch1 : expectedBatch2;
+
+        checkExactContentMatch(db, finalExpectedSet);
+        db.close();
+        env.sync();
+
+        /*
+         */
+        closeEnv(true  /*normalClose*/,
+                 false /*expectAccurateObsoleteLNCount*/,
+                 true  /*expectAccurateDbUtilization*/);
+
+        return finalExpectedSet;
+    }
+
+    /**
+     * Test that a checkpoint syncs a durable deferred-write DB.
+     */
+    public void testCheckpoint()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = getEnvConfig(false);
+        env = new Environment(envHome, envConfig);
+
+        Database db = createDb(true);
+        HashSet expected = insertAndCheck(db);
+
+        env.checkpoint(CHECKPOINT_FORCE_CONFIG);
+        closeEnv(false /*normalClose*/);
+        env = new Environment(envHome, envConfig);
+
+        db = createDb(true);
+        checkExactContentMatch(db, expected);
+        db.close();
+
+        closeEnv(true /*normalClose*/);
+    }
+
+    /**
+     * Test that a checkpoint does not sync a temp DB.
+     */
+    public void testCheckpointTemp()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = getEnvConfig(false);
+        env = new Environment(envHome, envConfig);
+
+        Database db = createTempDb();
+        env.sync();
+        EnvironmentStats stats = env.getStats(STATS_CLEAR_CONFIG);
+
+        insertAndCheck(db);
+
+        env.sync();
+        stats = env.getStats(STATS_CLEAR_CONFIG);
+
+        /* With a non-temp DB, more than 30 BINs are flushed. */
+        assertTrue(String.valueOf(stats.getNFullBINFlush()),
+                   stats.getNFullBINFlush() <= 2);
+        assertTrue(String.valueOf(stats.getNFullINFlush()),
+                   stats.getNFullINFlush() <= 4);
+        assertTrue(String.valueOf(stats.getNDeltaINFlush()),
+                   stats.getNDeltaINFlush() <= 2);
+
+        db.close();
+        closeEnv(true /*normalClose*/);
+    }
+
+    /**
+     * Check that temp db works in deferred write mode.
+     */
+    public void testTempIsDeferredWriteMode()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = getEnvConfig(false);
+        env = new Environment(envHome, envConfig);
+        Database db = createTempDb();
+
+        long origEndOfLog = DbInternal.envGetEnvironmentImpl(env)
+                                      .getFileManager()
+                                      .getNextLsn();
+
+        insertAndCheck(db);
+
+        long endOfLog = DbInternal.envGetEnvironmentImpl(env)
+                                  .getFileManager()
+                                  .getNextLsn();
+
+        /* Check that no writing occurred after inserts. */
+        assertEquals("origEndOfLog=" + DbLsn.getNoFormatString(origEndOfLog) +
+                     " endOfLog=" + DbLsn.getNoFormatString(endOfLog),
+                     origEndOfLog, endOfLog);
+
+        db.close();
+        closeEnv(true /*normalClose*/);
+    }
+
+    /**
+     * Check that temp db is removed on close and by recovery.
+     */
+    public void testTempRemoval()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = getEnvConfig(false);
+        env = new Environment(envHome, envConfig);
+
+        /* Create DB and close() to remove it. */
+        Database db = createTempDb(DBNAME);
+        insertAndCheck(db);
+        assertTrue(env.getDatabaseNames().contains(DBNAME));
+        db.close();
+        assertTrue(!env.getDatabaseNames().contains(DBNAME));
+
+        /*
+         * Create multiple DBs and run recovery to remove them.  Recovery keeps
+         * a set of temp DBs, and we want to make sure it removes all of them.
+         */
+        db = createTempDb(DBNAME);
+        Database db2 = createTempDb(DBNAME2);
+        insertAndCheck(db);
+        insertAndCheck(db2);
+        assertTrue(env.getDatabaseNames().contains(DBNAME));
+        assertTrue(env.getDatabaseNames().contains(DBNAME2));
+        closeEnv(false /*normalClose*/);
+        env = new Environment(envHome, envConfig);
+        assertTrue(!env.getDatabaseNames().contains(DBNAME));
+        assertTrue(!env.getDatabaseNames().contains(DBNAME2));
+
+        /*
+         * Test that recovery deletes a temp DB after several checkpoints.
+         * This test requires that the MapLN for every open temp DB is logged
+         * during each checkpoint interval.
+         */
+        db = createTempDb(DBNAME);
+        insertAndCheck(db);
+        assertTrue(env.getDatabaseNames().contains(DBNAME));
+        env.sync();
+        env.sync();
+        env.sync();
+        closeEnv(false /*normalClose*/);
+        env = new Environment(envHome, envConfig);
+        assertTrue(!env.getDatabaseNames().contains(DBNAME));
+
+        closeEnv(true /*normalClose*/);
+    }
+
+    public void testTempEvictionAndObsoleteCounting()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = getEnvConfig(false);
+        envConfig.setConfigParam
+            (EnvironmentParams.NODE_MAX.getName(), "128");
+        envConfig.setConfigParam
+            (EnvironmentParams.NODE_MAX_DUPTREE.getName(), "128");
+        /* Use a small cache to cause eviction. */
+        envConfig.setCacheSize(MemoryBudget.MIN_MAX_MEMORY_SIZE);
+        envConfig.setConfigParam("je.env.runCleaner", "false");
+        envConfig.setConfigParam("je.env.runCheckpointer", "false");
+        envConfig.setConfigParam("je.env.runINCompressor", "false");
+        env = new Environment(envHome, envConfig);
+
+        /* Create DB and insert until 1000 INs are evicted. */
+        Database db = createTempDb(DBNAME);
+        int start;
+        for (start = 1;; start += 1000) {
+            insert(db,
+                   null,          // txn
+                   start,         // start
+                   start + 1000,  // end
+                   new HashSet(), // expected
+                   true);         // useRandom
+            EnvironmentStats stats = env.getStats(null);
+            if (stats.getNNodesExplicitlyEvicted() > 1000) {
+                break;
+            }
+        }
+
+        /*
+         * Update all records twice, to cause eviction and log multiple
+         * versions of the INs.
+         */
+        int lastStart = start;
+        for (start = 1; start <= lastStart; start += 1000) {
+            update(db,
+                   null,          // txn
+                   start,         // start
+                   start + 1000); // end
+        }
+        for (start = 1; start < lastStart; start += 1000) {
+            update(db,
+                   null,          // txn
+                   start,         // start
+                   start + 1000); // end
+        }
+
+        assertTrue(DbInternal.envGetEnvironmentImpl(env).
+                              getUtilizationProfile().
+                              getFileSummaryMap(true).
+                              get(0L).
+                              obsoleteINCount > 1000);
+
+        db.close();
+        closeEnv(true  /*normalClose*/,
+                 true /*expectAccurateObsoleteLNCount*/,
+                 false /*expectAccurateDbUtilization*/);
+    }
+
+    private HashSet insertAndCheck(Database db)
+        throws DatabaseException {
+
+        HashSet expected = new HashSet();
+        insert(db, null, 1, 100, expected, false);
+        checkExactContentMatch(db, expected);
+        return expected;
+    }
+
+    public void testRecoverNoSync()
+        throws Throwable {
+
+        EnvironmentConfig envConfig = getEnvConfig(true);
+        doRecover(envConfig,
+                  30,     /* numRecords */
+                  false,  /* syncBeforeRecovery. */
+                  false); /* expectEviction */
+    }
+
+    public void testRecoverSync()
+        throws Throwable {
+
+        EnvironmentConfig envConfig = getEnvConfig(true);
+        doRecover(envConfig,
+                  30,     /* numRecords */
+                  true,   /* syncBeforeRecovery. */
+                  false); /* expectEviction */
+    }
+
+    public void testRecoverNoSyncEvict()
+        throws Throwable {
+
+        EnvironmentConfig envConfig = getEnvConfig(true);
+        envConfig.setCacheSize(MemoryBudget.MIN_MAX_MEMORY_SIZE);
+        doRecover(envConfig,
+                  3000,   /* numRecords */
+                  false,  /* syncBeforeRecovery. */
+                  true);  /* expectEviction */
+    }
+
+    public void testRecoverSyncEvict()
+        throws Throwable {
+
+        EnvironmentConfig envConfig = getEnvConfig(true);
+        envConfig.setCacheSize(MemoryBudget.MIN_MAX_MEMORY_SIZE);
+        doRecover(envConfig,
+                  3000,   /* numRecords */
+                  true,   /* syncBeforeRecovery. */
+                  true);  /* expectEviction */
+    }
+
+    private void doRecover(EnvironmentConfig envConfig,
+                           int numRecords,
+                           boolean syncBeforeRecovery,
+                           boolean expectEviction)
+        throws DatabaseException {
+
+        env = new Environment(envHome, envConfig);
+        Database db = createDb(true);
+        HashSet expected = new HashSet();
+
+        /* Insert */
+        EnvironmentStats stats = env.getStats(STATS_CLEAR_CONFIG);
+        insert(db, null, 1, numRecords, expected, true);
+        checkForEvictionActivity(expectEviction, /* evict activity */
+                                 expectEviction); /* cache miss */
+        checkExactContentMatch(db, expected);
+        checkForEvictionActivity(expectEviction, /* evict activity */
+                                 expectEviction); /* cache miss */
+
+        /*
+         * optional sync; do not checkpoint because checkpoints include a
+         * sync of non-temporary DBs.
+         */
+        DatabaseConfig saveConfig = db.getConfig();
+        if (syncBeforeRecovery) {
+            db.sync();
+        }
+
+        /* Close without sync or checkpoint to force recovery.  */
+        closeEnv(false /*normalClose*/);
+
+        /* recover and re-open. */
+        env = new Environment(envHome, envConfig);
+        db = env.openDatabase(null, DBNAME, saveConfig);
+
+        /* Check the contents. */
+        HashSet useExpected = null;
+        if (syncBeforeRecovery) {
+            useExpected = expected;
+        } else {
+            useExpected = new HashSet();
+        }
+
+        checkExactContentMatch(db, useExpected);
+        db.close();
+
+        /*
+         * When eviction precedes the abnormal close and recovery, obsolete LNs
+         * and INs will not always be counted correctly.
+         */
+        closeEnv(true  /*normalClose*/,
+                 false /*expectAccurateObsoleteLNCount*/,
+                 false /*expectAccurateDbUtilization*/);
+    }
+
+    /**
+     * Performs a basic check of deferred-write w/duplicates for verifying the
+     * fix to duplicate logging on 3.2.x. [#15365]
+     */
+    public void testDups()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = getEnvConfig(false);
+        env = new Environment(envHome, envConfig);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setDeferredWrite(true);
+        dbConfig.setSortedDuplicates(true);
+        Database db = env.openDatabase(null, DBNAME, dbConfig);
+
+        /* Insert {9,0} and {9,1}. */
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        IntegerBinding.intToEntry(9, key);
+        IntegerBinding.intToEntry(0, data);
+        assertSame(OperationStatus.SUCCESS,
+                   db.putNoDupData(null, key, data));
+        IntegerBinding.intToEntry(1, data);
+        assertSame(OperationStatus.SUCCESS,
+                   db.putNoDupData(null, key, data));
+
+        /* Check that both exist. */
+        Cursor c = db.openCursor(null, null);
+        try {
+            assertSame(OperationStatus.SUCCESS,
+                       c.getNext(key, data, LockMode.DEFAULT));
+            assertEquals(9, IntegerBinding.entryToInt(key));
+            assertEquals(0, IntegerBinding.entryToInt(data));
+
+            assertSame(OperationStatus.SUCCESS,
+                       c.getNext(key, data, LockMode.DEFAULT));
+            assertEquals(9, IntegerBinding.entryToInt(key));
+            assertEquals(1, IntegerBinding.entryToInt(data));
+
+            assertSame(OperationStatus.NOTFOUND,
+                       c.getNext(key, data, LockMode.DEFAULT));
+        } finally {
+            c.close();
+        }
+
+        /* Close without a checkpoint to redo the LNs during recovery. */
+        db.sync();
+        db.close();
+        DbInternal.envGetEnvironmentImpl(env).close(false);
+        env = null;
+
+        /* Recover and check again. */
+        env = new Environment(envHome, envConfig);
+        db = env.openDatabase(null, DBNAME, dbConfig);
+        c = db.openCursor(null, null);
+        try {
+            assertSame(OperationStatus.SUCCESS,
+                       c.getNext(key, data, LockMode.DEFAULT));
+
+            /*
+             * Before fixing the problem with deferred-write duplicate logging,
+             * the key read below was 0 instead of 9.  The bug was that the
+             * data (0) was being logged as the main tree key.
+             */
+            assertEquals(9, IntegerBinding.entryToInt(key));
+            assertEquals(0, IntegerBinding.entryToInt(data));
+
+            assertSame(OperationStatus.SUCCESS,
+                       c.getNext(key, data, LockMode.DEFAULT));
+            assertEquals(9, IntegerBinding.entryToInt(key));
+            assertEquals(1, IntegerBinding.entryToInt(data));
+
+            assertSame(OperationStatus.NOTFOUND,
+                       c.getNext(key, data, LockMode.DEFAULT));
+        } finally {
+            c.close();
+        }
+
+        db.close();
+        env.close();
+        env = null;
+    }
+
+    /**
+     * Tests a fix for a bug where reusing a slot caused a non-deleted record
+     * to be compressed. [#15684]
+     */
+    public void testCompressAfterSlotReuse()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = getEnvConfig(false);
+        /* Disable daemons to prevent async compression. */
+        envConfig.setConfigParam("je.env.runCleaner", "false");
+        envConfig.setConfigParam("je.env.runCheckpointer", "false");
+        envConfig.setConfigParam("je.env.runINCompressor", "false");
+        env = new Environment(envHome, envConfig);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setDeferredWrite(true);
+        Database db = env.openDatabase(null, DBNAME, dbConfig);
+
+        /* Reuse slot: Insert key 0, delete 0, insert 0 */
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        IntegerBinding.intToEntry(0, key);
+        IntegerBinding.intToEntry(0, data);
+        assertSame(OperationStatus.SUCCESS,
+                   db.putNoOverwrite(null, key, data));
+        assertSame(OperationStatus.SUCCESS,
+                   db.delete(null, key));
+        assertSame(OperationStatus.SUCCESS,
+                   db.putNoOverwrite(null, key, data));
+
+        /*
+         * Because of the delete() above, a compressor entry is queued for key
+         * 0, although it was re-inserted.  And there is no LSN for the slot
+         * because it has never been logged. When we compress now, we run into
+         * the BIN.compress bug where it assumes an entry is deleted if its LSN
+         * is null.
+         */
+        env.compress();
+
+        /*
+         * Before the bug fix, the following assert would fail because the
+         * entry was compressed and NOTFOUND.
+         */
+        assertSame(OperationStatus.SUCCESS,
+                   db.get(null, key, data, null));
+
+        db.close();
+        env.close();
+        env = null;
+    }
+
+    public void testPreloadNoSync()
+        throws DatabaseException {
+
+        doPreload(false); /* syncBeforeRecovery */
+    }
+
+    public void testPreloadSync()
+        throws DatabaseException {
+
+        doPreload(true); /* syncBeforeRecovery */
+    }
+
+    private void doPreload(boolean syncBeforeRecovery)
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = getEnvConfig(false);
+        envConfig.setCacheSize(MemoryBudget.MIN_MAX_MEMORY_SIZE);
+        env = new Environment(envHome, envConfig);
+        Database db = createDb(true);
+        HashSet expected = new HashSet();
+
+        int numRecords = 3000;
+
+        /* Insert */
+        EnvironmentStats stats = env.getStats(STATS_CLEAR_CONFIG);
+        insert(db, null, 1, numRecords, expected, true);
+        checkForEvictionActivity(true, /* evict activity */
+                                 true); /* cache miss */
+
+        /*
+         * Change the cache size to the default value so a preload will
+         * have enough cache to pull items in.
+         */
+        envConfig.setCacheSize(0);
+        env.setMutableConfig(envConfig);
+        if (DEBUG) {
+            System.out.println("after mutable " +
+                               env.getConfig().getCacheSize());
+        }
+
+        PreloadConfig pConfig = new PreloadConfig();
+        pConfig.setLoadLNs(true);
+        PreloadStats pStats = db.preload(pConfig);
+
+        if (DEBUG) {
+            System.out.println("first preload " + pStats);
+        }
+        if (dups) {
+            assertTrue(String.valueOf(pStats.getNDBINsLoaded()),
+                       pStats.getNDBINsLoaded() > 50);
+            assertTrue(String.valueOf(pStats.getNDINsLoaded()),
+                       pStats.getNDINsLoaded() > 50);
+        } else {
+            assertTrue(String.valueOf(pStats.getNBINsLoaded()),
+                       pStats.getNBINsLoaded() > 50);
+            assertTrue(String.valueOf(pStats.getNINsLoaded()),
+                       pStats.getNINsLoaded() > 50);
+        }
+        assertTrue(String.valueOf(pStats.getNLNsLoaded()),
+                   pStats.getNLNsLoaded() > 50);
+
+        checkExactContentMatch(db, expected);
+
+        DatabaseConfig saveConfig = db.getConfig();
+        if (syncBeforeRecovery) {
+            db.sync();
+        }
+
+        /* Close db and env without sync or checkpoint */
+        closeEnv(false /*normalClose*/);
+
+        /* recover and re-open. */
+        env = new Environment(envHome, envConfig);
+        db = env.openDatabase(null, DBNAME, saveConfig);
+        pStats = db.preload(pConfig);
+        if (DEBUG) {
+            System.out.println("second preload " + pStats);
+        }
+
+        /* Check the contents. */
+        HashSet useExpected = null;
+        if (syncBeforeRecovery) {
+            useExpected = expected;
+            if (dups) {
+                assertTrue(String.valueOf(pStats.getNDBINsLoaded()),
+                           pStats.getNDBINsLoaded() > 50);
+                assertTrue(String.valueOf(pStats.getNDINsLoaded()),
+                           pStats.getNDINsLoaded() > 50);
+            } else {
+                assertTrue(String.valueOf(pStats.getNBINsLoaded()),
+                           pStats.getNBINsLoaded() > 50);
+                assertTrue(String.valueOf(pStats.getNINsLoaded()),
+                           pStats.getNINsLoaded() > 50);
+            }
+            assertTrue(String.valueOf(pStats.getNLNsLoaded()),
+                       pStats.getNLNsLoaded() > 50);
+        } else {
+            useExpected = new HashSet();
+            assertEquals(0, pStats.getNBINsLoaded());
+            assertEquals(0, pStats.getNINsLoaded());
+            assertEquals(0, pStats.getNLNsLoaded());
+        }
+
+        checkExactContentMatch(db, useExpected);
+
+        db.close();
+    }
+
+    private void checkForEvictionActivity(boolean expectEviction,
+                                          boolean expectCacheMiss)
+        throws DatabaseException {
+
+        EnvironmentStats stats = env.getStats(STATS_CLEAR_CONFIG);
+        if (DEBUG) {
+            System.out.println("EvictPasses=" + stats.getNEvictPasses());
+            System.out.println("Selected=" + stats.getNNodesSelected());
+            System.out.println("Stripped=" + stats.getNBINsStripped());
+            System.out.println("Evicted=" +
+                               stats.getNNodesExplicitlyEvicted());
+            System.out.println("CacheMiss=" +
+                               stats.getNCacheMiss());
+        }
+
+        if (expectEviction) {
+
+            assertTrue(String.valueOf(stats.getNNodesSelected()),
+                       stats.getNNodesSelected() > 50);
+            assertTrue(String.valueOf(stats.getNBINsStripped()),
+                       stats.getNBINsStripped() > 50);
+            assertTrue(String.valueOf(stats.getNNodesExplicitlyEvicted()),
+                       stats.getNNodesExplicitlyEvicted() > 50);
+        }
+
+        if (expectCacheMiss) {
+            assertTrue(String.valueOf(stats.getNCacheMiss()),
+                       stats.getNCacheMiss() > 50);
+        }
+    }
+
+    public void testBadConfigurations()
+        throws Throwable {
+
+        env = new Environment(envHome, getEnvConfig(true));
+
+        DatabaseConfig dbConfigDeferred = new DatabaseConfig();
+        dbConfigDeferred.setAllowCreate(true);
+        dbConfigDeferred.setDeferredWrite(true);
+        dbConfigDeferred.setSortedDuplicates(dups);
+
+        DatabaseConfig dbConfigNoDeferred = new DatabaseConfig();
+        dbConfigNoDeferred.setAllowCreate(true);
+        dbConfigNoDeferred.setSortedDuplicates(dups);
+
+        /* A txnal deferred database is not possible */
+        try {
+            dbConfigDeferred.setTransactional(true);
+            @SuppressWarnings("unused")
+            Database db = env.openDatabase(null, "foo", dbConfigDeferred);
+            fail("No support yet for txnal, deferred-write databases");
+        } catch (IllegalArgumentException expected) {
+        }
+
+        dbConfigDeferred.setTransactional(false);
+
+        /*
+         * Open a db first with deferred write, then secondly without deferred
+         * write, should fail.
+         */
+        Database db1 = env.openDatabase(null, "foo", dbConfigDeferred);
+        try {
+            @SuppressWarnings("unused")
+            Database db2 = env.openDatabase(null, "foo", dbConfigNoDeferred);
+            fail("Database already opened with deferred write");
+        } catch (IllegalArgumentException expected) {
+        }
+        db1.close();
+
+        /*
+         * Open a db first without deferred write, then secondly with deferred
+         * write, should fail.
+         */
+        db1 = env.openDatabase(null, "foo", dbConfigNoDeferred);
+        try {
+            @SuppressWarnings("unused")
+            Database db2 = env.openDatabase(null, "foo", dbConfigDeferred);
+            fail("Database already opened with out deferred write");
+        } catch (IllegalArgumentException expected) {
+        }
+        db1.close();
+
+        /* Sync is only allowed for deferred-write databases. */
+        Database db = env.openDatabase(null, "foo", dbConfigNoDeferred);
+        try {
+            db.sync();
+            fail("Sync not permitted");
+        } catch (UnsupportedOperationException expected) {
+            if (DEBUG) {
+                System.out.println("expected=" + expected);
+            }
+            db.close();
+        }
+    }
+
+    public void testCleaning5000()
+        throws Throwable {
+
+        doCleaning("90", "4200"); /* log file size. */
+    }
+
+    /**
+     * This test is disabled because it is very unreliable.  It works some of
+     * the time.  Other times, it creates huge numbers (10s of thousands) of
+     * log files.  The file size is too small.  A small file size causes many
+     * FileSummaryLNs to be logged, which creates more files.  With a small
+     * cache, the FileSummaryLNs are constantly evicted, creating more files.
+     */
+    public void XXtestCleaning2000()
+        throws Throwable {
+
+        doCleaning("90", "3000"); /* log file size. */
+    }
+
+    private void doCleaning(String minUtilization, String logFileSize)
+        throws DatabaseException {
+
+        /*
+         * Run with a small cache so there's plenty of logging.  But use a
+         * slightly bigger cache than the minimum so that eviction during
+         * cleaning has enough working room on 64-bit systems [#15176].
+         */
+        long cacheSize = MemoryBudget.MIN_MAX_MEMORY_SIZE +
+                        (MemoryBudget.MIN_MAX_MEMORY_SIZE / 2);
+	EnvironmentConfig envConfig = getEnvConfig(true);
+        DbInternal.disableParameterValidation(envConfig);
+        envConfig.setCacheSize(cacheSize);
+        envConfig.setConfigParam("je.cleaner.minUtilization",
+                                 minUtilization);
+        envConfig.setConfigParam("je.log.fileMax", logFileSize);
+        envConfig.setConfigParam("je.cleaner.expunge", "false");
+        /* Disable cleaner thread so batch cleaning is predictable. [#15176] */
+        envConfig.setConfigParam("je.env.runCleaner", "false");
+        env = new Environment(envHome, envConfig);
+        Database db = createDb(true);
+
+        /* We'll do inserts in two batches. */
+        HashSet expectedBatch1 = new HashSet();
+        HashSet expectedBatch2 = new HashSet();
+
+        int batch1Size = 100;
+        int batch2Size = 110;
+
+        /*
+         * Insert non-random values in two batches. Don't use random
+         * inserts in order to be sure we have a set of non-conflicting
+         * values for the test.
+         */
+        int startingValue = 1;
+        insert(db,
+               null,
+               startingValue,
+               startingValue + batch1Size,
+               expectedBatch1,
+               false); /* random */
+        checkExactContentMatch(db, expectedBatch1);
+        db.sync();
+
+        /* Insert a second batch with no sync */
+        insertAndUpdate(db,
+                        null,
+                        startingValue + batch1Size,
+                        startingValue + batch2Size,
+                        expectedBatch2,
+                        false); /* random */
+        expectedBatch2.addAll(expectedBatch1);
+        checkExactContentMatch(db, expectedBatch2);
+        env.checkpoint(CHECKPOINT_FORCE_CONFIG);
+        Tracer.trace(Level.SEVERE,
+                     DbInternal.envGetEnvironmentImpl(env),
+                     "before clean");
+        batchClean();
+
+        Tracer.trace(Level.SEVERE,
+                     DbInternal.envGetEnvironmentImpl(env),
+                     "after clean");
+
+        checkExactContentMatch(db, expectedBatch2);
+
+        /*
+         * Recover the environment a few times. Whether the batch2 changes
+         * show up depend on whether the db was deferred write, and whether
+         * a sync was done.
+         */
+        for (int i = 0; i < 4; i++) {
+            /* Do an abnormal close, we do not want to sync the database. */
+            db = null;
+            closeEnv(false /*normalClose*/);
+            env = new Environment(envHome, envConfig);
+
+            db = createDb(true);
+            checkContents(db,
+                          expectedBatch2,
+                          false); /* exact match. */
+
+            batchClean();
+            checkContents(db,
+                          expectedBatch2,
+                          false); /* exact match. */
+        }
+
+        db.close();
+        closeEnv(true /*normalClose*/);
+    }
+
+    /**
+     * Insert a set of records, record the values in the expected set.
+     * @param useRandom If True, use random values.
+     */
+    private void insert(Database db,
+                        Transaction txn,
+                        int start,
+                        int end,
+                        Set expected,
+                        boolean useRandom)
+        throws DatabaseException{
+
+        OperationStatus status;
+        DatabaseEntry entry = new DatabaseEntry();
+        Random rand = new Random();
+        for (int i = start; i < end; i++) {
+            int value = useRandom ? rand.nextInt() : i;
+
+            IntegerBinding.intToEntry(value, entry);
+            if (dups) {
+                status = db.putNoDupData(txn, MAIN_KEY_FOR_DUPS, entry);
+            } else {
+                status = db.putNoOverwrite(txn, entry, entry);
+            }
+            if (!useRandom) {
+                assertEquals(OperationStatus.SUCCESS, status);
+            }
+            expected.add(new Integer(value));
+        }
+    }
+
+    /**
+     * Insert and modify a set of records, record the values in the
+     * expected set.
+     * @param useRandom If True, use random values.
+     */
+    private void insertAndUpdate(Database db,
+                                 Transaction txn,
+                                 int start,
+                                 int end,
+                                 Set expected,
+                                 boolean useRandom)
+        throws DatabaseException{
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        Random rand = new Random();
+        for (int i = start; i < end; i++) {
+            int value = useRandom ? rand.nextInt() : i;
+
+            IntegerBinding.intToEntry(value, key);
+            if (dups) {
+                OperationStatus status =
+                    db.putNoDupData(txn, MAIN_KEY_FOR_DUPS, key);
+                if (status == OperationStatus.SUCCESS) {
+                    /* Update it */
+                    db.put(txn, MAIN_KEY_FOR_DUPS, key);
+                    expected.add(new Integer(value));
+                }
+            } else {
+                IntegerBinding.intToEntry(value - 1, data);
+                OperationStatus status = db.putNoOverwrite(txn, key, data);
+                if (status == OperationStatus.SUCCESS) {
+                    /* Update it */
+                    IntegerBinding.intToEntry(value, data);
+                    db.put(txn, key, data);
+                    expected.add(new Integer(value));
+                }
+            }
+        }
+    }
+
+    /**
+     * Update a set of records.
+     */
+    private void update(Database db,
+                        Transaction txn,
+                        int start,
+                        int end)
+        throws DatabaseException{
+
+        OperationStatus status;
+        DatabaseEntry entry = new DatabaseEntry();
+        for (int i = start; i < end; i++) {
+            IntegerBinding.intToEntry(i, entry);
+            if (dups) {
+                status = db.put(txn, MAIN_KEY_FOR_DUPS, entry);
+            } else {
+                status = db.put(txn, entry, entry);
+            }
+            assertEquals(OperationStatus.SUCCESS, status);
+        }
+    }
+
+    /**
+     * Delete a set of records, update the values in the expected set.
+     * @param useRandom If True, use random values.
+     */
+    private void delete(Database db,
+                        Transaction txn,
+                        int start,
+                        int end,
+                        Set expected,
+                        boolean useRandom)
+        throws DatabaseException{
+
+        DatabaseEntry entry = new DatabaseEntry();
+        Random rand = new Random();
+        for (int i = start; i < end; i++) {
+            int value = useRandom ? (start + rand.nextInt(end - start)) : i;
+
+            IntegerBinding.intToEntry(value, entry);
+            if (dups) {
+                final Cursor c = db.openCursor(txn, null);
+                try {
+                    if (c.getSearchBoth(MAIN_KEY_FOR_DUPS, entry, null) ==
+                        OperationStatus.SUCCESS) {
+                        c.delete();
+                    }
+                } finally {
+                    c.close();
+                }
+            } else {
+                db.delete(txn, entry);
+            }
+            expected.remove(new Integer(value));
+        }
+    }
+
+    /**
+     * The database should hold exactly the values in the expected set.
+     */
+    private void checkExactContentMatch(Database db, HashSet expected)
+        throws DatabaseException{
+
+        checkContents(db, expected, true);
+    }
+
+    /**
+     * The database should hold only values that are in the expected set.
+     * Note that this assumes that the key and data are the same value.
+     * @param exactMatch if true, the database ought to hold all the values
+     * in the expected set.
+     */
+    private void checkContents(Database db,
+                               HashSet expected,
+                               boolean exactMatch)
+        throws DatabaseException{
+
+        Cursor c = db.openCursor(null, null);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        Set useExpected = (Set) expected.clone();
+
+        if (DEBUG) {
+            System.err.println("Start checking");
+        }
+
+        while (c.getNext(key, data, LockMode.DEFAULT) ==
+               OperationStatus.SUCCESS) {
+            int value = IntegerBinding.entryToInt(dups ? data : key);
+
+            if (DEBUG) {
+                System.err.println("checkDatabase: found " + value);
+            }
+
+            assertTrue(value + " not in useExpected set. Expected size="
+                       + useExpected.size(),
+                       useExpected.remove(new Integer(value)));
+            assertEquals(value, IntegerBinding.entryToInt(data));
+        }
+
+        if (exactMatch) {
+            assertEquals(0, useExpected.size());
+        } else {
+            if (DEBUG) {
+                System.out.println(useExpected.size() +
+                                   " is leftover in expected set");
+            }
+        }
+        c.close();
+    }
+
+    private void batchClean()
+        throws DatabaseException {
+
+        int cleaned = 0;
+        int cleanedThisRound = 0;
+        do {
+            cleanedThisRound = env.cleanLog();
+            cleaned += cleanedThisRound;
+        } while (cleanedThisRound > 0);
+
+        if (DEBUG) {
+            System.out.println("numCleaned = " + cleaned);
+        }
+
+        assertTrue("cleaned must be > 0, was only " + cleaned +
+                   " but may vary on machine to machine", cleaned > 0);
+
+        if (cleaned > 0) {
+            CheckpointConfig force = new CheckpointConfig();
+            force.setForce(true);
+            env.checkpoint(force);
+        }
+    }
+
+    /**
+     * Tests that record deletion is durable after Database.sync, when a crash
+     * ocurs after the sync and the previous version of the LN is in the
+     * recovery interval.  Before logging deferred-write LNs provisionally, the
+     * previous version of the LN was reinserted into the BIN by recovery.
+     *
+     * [#16864]
+     */
+    public void testDelete()
+        throws DatabaseException {
+
+        final EnvironmentConfig envConfig = getEnvConfig(false);
+        env = new Environment(envHome, envConfig);
+        Database db = createDb(true);
+
+        final int NUM_RECORDS = 100;
+        final HashSet expected = new HashSet();
+        insert(db, null, 1, NUM_RECORDS, expected, false);
+        db.sync();
+        delete(db, null, 1, NUM_RECORDS, expected, false);
+        db.sync();
+        assertTrue(expected.isEmpty());
+        checkExactContentMatch(db, expected);
+
+        /* Close without a checkpoint to redo the LNs during recovery. */
+        DbInternal.envGetEnvironmentImpl(env).abnormalClose();
+        env = null;
+
+        /* Recover and check again. */
+        env = new Environment(envHome, envConfig);
+        db = createDb(true);
+        checkExactContentMatch(db, expected);
+        db.close();
+        env.close();
+        env = null;
+    }
+
+    /**
+     * Tests a fix for a LogFileNotFound exception in the following sequence
+     * for a deferred-write database.
+     *
+     * 100 LN-A
+     * 200 BIN-B, parent of LN-A
+     * ... LN-A is deleted, marked dirty and not logged
+     * ... BIN-B is compressed, LN-A is counted obsolete in utilization tracker
+     * 300 BIN-B flushed by eviction (this step is optional)
+     * 400 FileSummaryLN with LN-A obsolete offset is flushed as the result of
+     *     utilization tracker eviction
+     * ... Crash and recover, LN-A is mistakedly inserted into BIN-B by redo
+     *
+     * When the log file containing 100 LN-A is cleaned, it will not be
+     * migrated because it was counted obsolete.  Yet it is referenced by its
+     * BIN parent.  This caused a LogFileNotFound exception later when
+     * attempting to access the LN.
+     *
+     * [#16864]
+     */
+    public void testCleanAfterDelete()
+        throws DatabaseException{
+
+        if (dups) {
+            /* There is no variant of this test for dups. */
+            return;
+        }
+        final int CACHE_SIZE = 4 << 20;
+        final EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setConfigParam
+            (EnvironmentConfig.MAX_MEMORY, String.valueOf(CACHE_SIZE));
+        envConfig.setConfigParam
+            (EnvironmentConfig.CLEANER_DETAIL_MAX_MEMORY_PERCENTAGE, "1");
+        envConfig.setConfigParam
+            (EnvironmentConfig.CLEANER_EXPUNGE, "false");
+        /* Disable daemons to prevent async compression. */
+        envConfig.setConfigParam
+            (EnvironmentConfig.ENV_RUN_CLEANER, "false");
+        envConfig.setConfigParam
+            (EnvironmentConfig.ENV_RUN_CHECKPOINTER, "false");
+        envConfig.setConfigParam
+            (EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, "false");
+        env = new Environment(envHome, envConfig);
+
+        final DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setDeferredWrite(true);
+        Database db = env.openDatabase(null, DBNAME, dbConfig);
+
+        final DatabaseEntry key = new DatabaseEntry();
+        final DatabaseEntry data = new DatabaseEntry(new byte[1000]);
+        final int N_RECORDS = 10000;
+
+        IntegerBinding.intToEntry(0, key);
+        assertSame(OperationStatus.SUCCESS,
+                   db.putNoOverwrite(null, key, data));
+        IntegerBinding.intToEntry(1, key);
+        assertSame(OperationStatus.SUCCESS,
+                   db.putNoOverwrite(null, key, data));
+        db.sync();
+        IntegerBinding.intToEntry(0, key);
+        assertSame(OperationStatus.SUCCESS,
+                   db.delete(null, key));
+        env.compress();
+        db.sync();
+
+        /* Cause enough eviction to flush the FileSummaryLNs. */
+        for (int i = 1; i < N_RECORDS; i += 1) {
+            IntegerBinding.intToEntry(i, key);
+            assertSame(OperationStatus.SUCCESS,
+                       db.put(null, key, data));
+        }
+        db.sync();
+        for (int i = 1; i < N_RECORDS; i += 1) {
+            IntegerBinding.intToEntry(i, key);
+            assertSame(OperationStatus.SUCCESS,
+                       db.put(null, key, data));
+        }
+        db.sync();
+        for (int i = 1; i < N_RECORDS; i += 1) {
+            IntegerBinding.intToEntry(i, key);
+            assertSame(OperationStatus.SUCCESS,
+                       db.put(null, key, data));
+        }
+        db.sync();
+
+        /* Crash and recover. */
+        DbInternal.envGetEnvironmentImpl(env).abnormalClose();
+        db = null;
+        env = null;
+        envConfig.setAllowCreate(false);
+        env = new Environment(envHome, envConfig);
+        dbConfig.setAllowCreate(false);
+        db = env.openDatabase(null, DBNAME, dbConfig);
+
+        /* Create enough waste to cause log file zero to be cleaned. */
+        for (int i = 1; i < N_RECORDS; i += 1) {
+            IntegerBinding.intToEntry(i, key);
+            assertSame(OperationStatus.SUCCESS,
+                       db.put(null, key, data));
+        }
+        db.sync();
+        for (int i = 1; i < N_RECORDS; i += 1) {
+            IntegerBinding.intToEntry(i, key);
+            assertSame(OperationStatus.SUCCESS,
+                       db.delete(null, key));
+        }
+        db.sync();
+        env.cleanLog();
+        env.checkpoint(CHECKPOINT_FORCE_CONFIG);
+        assertTrue(!(new File(envHome, TestUtils.LOG_FILE_NAME)).exists());
+
+        /* Before the fix, a LogFileNotFound exception was thrown here. */
+        IntegerBinding.intToEntry(0, key);
+        assertSame(OperationStatus.NOTFOUND,
+                   db.get(null, key, data, null));
+
+        db.close();
+        env.close();
+        env = null;
+    }
+}
diff --git a/test/com/sleepycat/je/test/ForeignKeyTest.java b/test/com/sleepycat/je/test/ForeignKeyTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..abf48c58ab695a18b0ee314d720e8b08baa359f9
--- /dev/null
+++ b/test/com/sleepycat/je/test/ForeignKeyTest.java
@@ -0,0 +1,413 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ForeignKeyTest.java,v 1.16.2.2 2010/01/04 15:30:47 cwl Exp $
+ */
+
+package com.sleepycat.je.test;
+
+import junit.framework.Test;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.ForeignKeyDeleteAction;
+import com.sleepycat.je.ForeignKeyNullifier;
+import com.sleepycat.je.ForeignMultiKeyNullifier;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.SecondaryConfig;
+import com.sleepycat.je.SecondaryCursor;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.je.SecondaryKeyCreator;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.util.TestUtils;
+
+public class ForeignKeyTest extends MultiKeyTxnTestCase {
+
+    public static Test suite() {
+        return multiKeyTxnTestSuite(ForeignKeyTest.class, null, null);
+    }
+
+    public void testDupsNotAllowed()
+        throws DatabaseException {
+
+        Database priDb1 = openPrimary("pri1");
+        Database priDb2 = openPrimary("pri2", true /*duplicates*/);
+
+        try {
+            openSecondary(priDb1, "sec2", priDb2, ForeignKeyDeleteAction.ABORT);
+            fail();
+        } catch (IllegalArgumentException expected) {
+            String msg = expected.getMessage();
+            assertTrue
+                (msg, msg.indexOf("Duplicates must not be allowed") >= 0);
+        }
+
+        priDb1.close();
+        priDb2.close();
+    }
+
+    public void testIllegalNullifier()
+        throws DatabaseException {
+
+        Database priDb1 = openPrimary("pri1");
+        Transaction txn = txnBegin();
+        MyKeyCreator myCreator = new MyKeyCreator();
+        SecondaryConfig config;
+
+        /* A nullifier is required with NULLIFY. */
+        config = new SecondaryConfig();
+        config.setForeignKeyDeleteAction(ForeignKeyDeleteAction.NULLIFY);
+        config.setKeyCreator(myCreator);
+        try {
+            env.openSecondaryDatabase(txn, "sec1", priDb1, config);
+            fail();
+        } catch (NullPointerException expected) { }
+
+        /* Both nullifiers are not allowed. */
+        config = new SecondaryConfig();
+        config.setForeignKeyDeleteAction(ForeignKeyDeleteAction.NULLIFY);
+        config.setKeyCreator(myCreator);
+        config.setForeignKeyNullifier(myCreator);
+        config.setForeignMultiKeyNullifier(myCreator);
+        try {
+            env.openSecondaryDatabase(txn, "sec1", priDb1, config);
+            fail();
+        } catch (IllegalArgumentException expected) { }
+
+        /* ForeignKeyNullifier is not allowed with MultiKeyCreator. */
+        config = new SecondaryConfig();
+        config.setForeignKeyDeleteAction(ForeignKeyDeleteAction.NULLIFY);
+        config.setMultiKeyCreator(new SimpleMultiKeyCreator(myCreator));
+        config.setForeignKeyNullifier(myCreator);
+        try {
+            env.openSecondaryDatabase(txn, "sec1", priDb1, config);
+            fail();
+        } catch (IllegalArgumentException expected) { }
+
+        txnCommit(txn);
+        priDb1.close();
+    }
+
+    public void testAbort()
+        throws DatabaseException {
+
+        doTest(ForeignKeyDeleteAction.ABORT);
+    }
+
+    public void testCascade()
+        throws DatabaseException {
+
+        doTest(ForeignKeyDeleteAction.CASCADE);
+    }
+
+    public void testNullify()
+        throws DatabaseException {
+
+        doTest(ForeignKeyDeleteAction.NULLIFY);
+    }
+
+    private void doTest(ForeignKeyDeleteAction onDelete)
+        throws DatabaseException {
+
+        Database priDb1 = openPrimary("pri1");
+        Database priDb2 = openPrimary("pri2");
+
+        SecondaryDatabase secDb1 = openSecondary(priDb1, "sec1", null, null);
+        SecondaryDatabase secDb2 = openSecondary(priDb2, "sec2", priDb1,
+                                                 onDelete);
+
+        OperationStatus status;
+        DatabaseEntry data = new DatabaseEntry();
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry pkey = new DatabaseEntry();
+        Transaction txn = txnBegin();
+
+        /*
+         * pri1 has a record with primary key 1 and index key 3.
+         * pri2 has a record with primary key 2 and foreign key 1,
+         * which is the primary key of pri1.
+         * pri2 has another record with primary key 3 and foreign key 1,
+         * to enable testing cascade and nullify for secondary duplicates.
+         */
+
+        /* Add three records. */
+
+        status = priDb1.put(txn, entry(1), entry(3));
+        assertEquals(OperationStatus.SUCCESS, status);
+
+        status = priDb2.put(txn, entry(2), entry(1));
+        assertEquals(OperationStatus.SUCCESS, status);
+
+        status = priDb2.put(txn, entry(3), entry(1));
+        assertEquals(OperationStatus.SUCCESS, status);
+
+        /* Verify record data. */
+
+        status = priDb1.get(txn, entry(1), data, LockMode.DEFAULT);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(3, val(data));
+
+        status = secDb1.get(txn, entry(3), data, LockMode.DEFAULT);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(3, val(data));
+
+        status = priDb2.get(txn, entry(2), data, LockMode.DEFAULT);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, val(data));
+
+        status = priDb2.get(txn, entry(3), data, LockMode.DEFAULT);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, val(data));
+
+        SecondaryCursor cursor = secDb2.openSecondaryCursor(txn, null);
+        status = cursor.getFirst(key, pkey, data, LockMode.DEFAULT);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, val(key));
+        assertEquals(2, val(pkey));
+        assertEquals(1, val(data));
+        status = cursor.getNext(key, pkey, data, LockMode.DEFAULT);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, val(key));
+        assertEquals(3, val(pkey));
+        assertEquals(1, val(data));
+        status = cursor.getNext(key, pkey, data, LockMode.DEFAULT);
+        assertEquals(OperationStatus.NOTFOUND, status);
+        cursor.close();
+
+        txnCommit(txn);
+        txn = txnBegin();
+
+        /* Test delete action. */
+
+        if (onDelete == ForeignKeyDeleteAction.ABORT) {
+
+            /* Test that we abort trying to delete a referenced key. */
+
+            try {
+                status = priDb1.delete(txn, entry(1));
+                fail();
+            } catch (DatabaseException expected) {
+                txnAbort(txn);
+                txn = txnBegin();
+            }
+
+            /* Test that we can put a record into pri2 with a null foreign key
+             * value. */
+
+            status = priDb2.put(txn, entry(2), entry(0));
+            assertEquals(OperationStatus.SUCCESS, status);
+
+            status = priDb2.put(txn, entry(3), entry(0));
+            assertEquals(OperationStatus.SUCCESS, status);
+
+            /* The sec2 records should not be present since the key was set
+             * to null above. */
+
+            status = secDb2.get(txn, entry(1), data, LockMode.DEFAULT);
+            assertEquals(OperationStatus.NOTFOUND, status);
+
+            /* Test that now we can delete the record in pri1, since it is no
+             * longer referenced. */
+
+            status = priDb1.delete(txn, entry(1));
+            assertEquals(OperationStatus.SUCCESS, status);
+
+            status = priDb1.get(txn, entry(1), data, LockMode.DEFAULT);
+            assertEquals(OperationStatus.NOTFOUND, status);
+
+            status = secDb1.get(txn, entry(3), data, LockMode.DEFAULT);
+            assertEquals(OperationStatus.NOTFOUND, status);
+
+        } else if (onDelete == ForeignKeyDeleteAction.NULLIFY) {
+
+            /* Delete the referenced key. */
+
+            status = priDb1.delete(txn, entry(1));
+            assertEquals(OperationStatus.SUCCESS, status);
+
+            status = priDb1.get(txn, entry(1), data, LockMode.DEFAULT);
+            assertEquals(OperationStatus.NOTFOUND, status);
+
+            status = secDb1.get(txn, entry(3), data, LockMode.DEFAULT);
+            assertEquals(OperationStatus.NOTFOUND, status);
+
+            /* The pri2 records should still exist, but should have a zero/null
+             * secondary key since it was nullified. */
+
+            status = priDb2.get(txn, entry(2), data, LockMode.DEFAULT);
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals(0, val(data));
+
+            status = priDb2.get(txn, entry(3), data, LockMode.DEFAULT);
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals(0, val(data));
+
+            status = secDb2.get(txn, entry(1), data, LockMode.DEFAULT);
+            assertEquals(OperationStatus.NOTFOUND, status);
+
+        } else if (onDelete == ForeignKeyDeleteAction.CASCADE) {
+
+            /* Delete the referenced key. */
+
+            status = priDb1.delete(txn, entry(1));
+            assertEquals(OperationStatus.SUCCESS, status);
+
+            status = priDb1.get(txn, entry(1), data, LockMode.DEFAULT);
+            assertEquals(OperationStatus.NOTFOUND, status);
+
+            status = secDb1.get(txn, entry(3), data, LockMode.DEFAULT);
+            assertEquals(OperationStatus.NOTFOUND, status);
+
+            /* The pri2 records should have deleted also. */
+
+            status = priDb2.get(txn, entry(2), data, LockMode.DEFAULT);
+            assertEquals(OperationStatus.NOTFOUND, status);
+
+            status = priDb2.get(txn, entry(3), data, LockMode.DEFAULT);
+            assertEquals(OperationStatus.NOTFOUND, status);
+
+            status = secDb2.get(txn, entry(1), data, LockMode.DEFAULT);
+            assertEquals(OperationStatus.NOTFOUND, status);
+
+        } else {
+            throw new IllegalStateException();
+        }
+
+        /*
+         * Test that a foreign key value may not be used that is not present
+         * in the foreign db. Key 2 is not in pri1 in this case.
+         */
+        try {
+            status = priDb2.put(txn, entry(3), entry(2));
+            fail();
+        } catch (DatabaseException expected) { }
+
+        txnCommit(txn);
+        secDb1.close();
+        secDb2.close();
+        priDb1.close();
+        priDb2.close();
+    }
+
+    private Database openPrimary(String name)
+        throws DatabaseException {
+
+        return openPrimary(name, false);
+    }
+
+    private Database openPrimary(String name, boolean duplicates)
+        throws DatabaseException {
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(isTransactional);
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(duplicates);
+
+        Transaction txn = txnBegin();
+        try {
+            return env.openDatabase(txn, name, dbConfig);
+        } finally {
+            txnCommit(txn);
+        }
+    }
+
+    private SecondaryDatabase openSecondary(Database priDb, String dbName,
+                                            Database foreignDb,
+                                            ForeignKeyDeleteAction onDelete)
+        throws DatabaseException {
+
+        SecondaryConfig dbConfig = new SecondaryConfig();
+        dbConfig.setTransactional(isTransactional);
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(true);
+
+        MyKeyCreator keyCreator = new MyKeyCreator();
+        if (useMultiKey) {
+            dbConfig.setMultiKeyCreator(new SimpleMultiKeyCreator(keyCreator));
+        } else {
+            dbConfig.setKeyCreator(keyCreator);
+        }
+
+        if (foreignDb != null) {
+
+            if (useMultiKey) {
+                dbConfig.setForeignMultiKeyNullifier(keyCreator);
+            } else {
+                dbConfig.setForeignKeyNullifier(keyCreator);
+            }
+            dbConfig.setForeignKeyDatabase(foreignDb);
+            dbConfig.setForeignKeyDeleteAction(onDelete);
+        }
+
+        Transaction txn = txnBegin();
+        try {
+            return env.openSecondaryDatabase(txn, dbName, priDb, dbConfig);
+        } finally {
+            txnCommit(txn);
+        }
+    }
+
+    static private DatabaseEntry entry(int val) {
+
+        return new DatabaseEntry(TestUtils.getTestArray(val));
+    }
+
+    static private int val(DatabaseEntry entry) {
+
+        return TestUtils.getTestVal(entry.getData());
+    }
+
+    private class MyKeyCreator implements SecondaryKeyCreator,
+                                          ForeignMultiKeyNullifier,
+                                          ForeignKeyNullifier {
+
+        /* SecondaryKeyCreator */
+        public boolean createSecondaryKey(SecondaryDatabase secondary,
+                                          DatabaseEntry key,
+                                          DatabaseEntry data,
+                                          DatabaseEntry result)
+            throws DatabaseException {
+
+            int val = val(data);
+            if (val != 0) {
+                result.setData(TestUtils.getTestArray(val));
+                return true;
+            } else {
+                return false;
+            }
+        }
+
+        /* ForeignMultiKeyNullifier */
+        public boolean nullifyForeignKey(SecondaryDatabase secondary,
+                                         DatabaseEntry key,
+                                         DatabaseEntry data,
+                                         DatabaseEntry secKey)
+            throws DatabaseException {
+
+            DatabaseEntry entry = new DatabaseEntry();
+            assertTrue(createSecondaryKey(secondary, null, data, entry));
+            assertEquals(entry, secKey);
+
+            return nullifyForeignKey(secondary, data);
+        }
+
+        /* ForeignKeyNullifier */
+        public boolean nullifyForeignKey(SecondaryDatabase secondary,
+                                         DatabaseEntry data)
+            throws DatabaseException {
+
+            int val = val(data);
+            if (val != 0) {
+                data.setData(TestUtils.getTestArray(0));
+                return true;
+            } else {
+                return false;
+            }
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/test/JoinTest.java b/test/com/sleepycat/je/test/JoinTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..018b70a30b73b5c2f576128fbc8ed14a7d20177b
--- /dev/null
+++ b/test/com/sleepycat/je/test/JoinTest.java
@@ -0,0 +1,444 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: JoinTest.java,v 1.16.2.2 2010/01/04 15:30:47 cwl Exp $
+ */
+
+package com.sleepycat.je.test;
+
+import junit.framework.Test;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.JoinConfig;
+import com.sleepycat.je.JoinCursor;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.SecondaryConfig;
+import com.sleepycat.je.SecondaryCursor;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.je.SecondaryKeyCreator;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.util.TestUtils;
+
+public class JoinTest extends MultiKeyTxnTestCase {
+
+    /*
+     * DATA sets are pairs of arrays for each record. The first array is
+     * the record data and has three values in the 0/1/2 positions for the
+     * secondary key values with key IDs 0/1/2.  second array contains a single
+     * value which is the primary key.
+     *
+     * JOIN sets are also pairs of arrays.  The first array in each pair has 3
+     * values for setting the input cursors.  Entries 0/1/2 in that array are
+     * for secondary keys 0/1/2.  The second array is the set of primary keys
+     * that are expected to match in the join operation.
+     *
+     * A zero value for an index key means "don't index", so zero values are
+     * never used for join index keys since we wouldn't be able to successfully
+     * position the input cursor.
+     *
+     * These values are all stored as bytes, not ints, in the actual records,
+     * so all values must be within the range of a signed byte.
+     */
+    private static final int[][][] ALL = {
+        /* Data set #1 - single match possible per record. */
+        {
+            {1, 1, 1}, {11},
+            {2, 2, 2}, {12},
+            {3, 3, 3}, {13},
+        }, {
+            {1, 1, 1}, {11},
+            {2, 2, 2}, {12},
+            {3, 3, 3}, {13},
+            {1, 2, 3}, {},
+            {1, 1, 2}, {},
+            {3, 2, 2}, {},
+        },
+        /* Data set #2 - no match possible when all indices are not present
+         * (when some are zero). */
+        {
+            {1, 1, 0}, {11},
+            {2, 0, 2}, {12},
+            {0, 3, 3}, {13},
+            {3, 2, 1}, {14},
+        }, {
+            {1, 1, 1}, {},
+            {2, 2, 2}, {},
+            {3, 3, 3}, {},
+        },
+        /* Data set #3 - one match in the presence of non-matching records
+         * (with missing/zero index keys). */
+        {
+            {1, 0, 0}, {11},
+            {1, 1, 0}, {12},
+            {1, 1, 1}, {13},
+            {0, 0, 0}, {14},
+        }, {
+            {1, 1, 1}, {13},
+        },
+        /* Data set #4 - one match in the presence of non-matching records
+         * (with non-matching but non-zero values). */
+        {
+            {1, 2, 3}, {11},
+            {1, 1, 3}, {12},
+            {1, 1, 1}, {13},
+            {3, 2, 1}, {14},
+        }, {
+            {1, 1, 1}, {13},
+        },
+        /* Data set #5 - two matches in the presence of non-matching records.
+         */
+        {
+            {1, 2, 3}, {11},
+            {1, 1, 3}, {12},
+            {1, 1, 1}, {13},
+            {1, 2, 3}, {14},
+        }, {
+            {1, 2, 3}, {11, 14},
+        },
+        /* Data set #6 - three matches in the presence of non-matching records.
+         * Also used to verify that cursors are sorted by count: 2, 1, 0 */
+        {
+            {1, 2, 3}, {11},
+            {1, 1, 3}, {12},
+            {1, 1, 1}, {13},
+            {1, 2, 3}, {14},
+            {1, 1, 1}, {15},
+            {1, 0, 0}, {16},
+            {1, 1, 0}, {17},
+            {1, 1, 1}, {18},
+            {0, 0, 0}, {19},
+            {3, 2, 1}, {20},
+        }, {
+            {1, 1, 1}, {13, 15, 18},
+        },
+        /* Data set #7 - three matches by themselves. */
+        {
+            {1, 2, 3}, {11},
+            {1, 2, 3}, {12},
+            {1, 2, 3}, {13},
+        }, {
+            {1, 2, 3}, {11, 12, 13},
+        },
+    };
+
+    /* Used for testing the cursors are sorted by count. */
+    private static final int CURSOR_ORDER_SET = 6;
+    private static final int[] CURSOR_ORDER = {2, 1, 0};
+
+    private static EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+    static {
+        envConfig.setAllowCreate(true);
+    }
+
+    private static JoinConfig joinConfigNoSort = new JoinConfig();
+    static {
+        joinConfigNoSort.setNoSort(true);
+    }
+
+    public static Test suite() {
+        return multiKeyTxnTestSuite(JoinTest.class, envConfig, null);
+    }
+
+    public void testJoin()
+        throws DatabaseException {
+
+        for (int i = 0; i < ALL.length; i += 2) {
+            doJoin(ALL[i], ALL[i + 1], (i / 2) + 1);
+        }
+    }
+
+    private void doJoin(int[][] dataSet, int[][] joinSet, int setNum)
+        throws DatabaseException {
+
+        String name = "Set#" + setNum;
+        Database priDb = openPrimary("pri");
+        SecondaryDatabase secDb0 = openSecondary(priDb, "sec0", true, 0);
+        SecondaryDatabase secDb1 = openSecondary(priDb, "sec1", true, 1);
+        SecondaryDatabase secDb2 = openSecondary(priDb, "sec2", true, 2);
+
+        OperationStatus status;
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        Transaction txn;
+        txn = txnBegin();
+
+        for (int i = 0; i < dataSet.length; i += 2) {
+            int[] vals = dataSet[i];
+            setData(data, vals[0], vals[1], vals[2]);
+            setKey(key, dataSet[i + 1][0]);
+            status = priDb.put(txn, key, data);
+            assertEquals(name, OperationStatus.SUCCESS, status);
+        }
+
+        txnCommit(txn);
+        txn = txnBeginCursor();
+
+        SecondaryCursor c0 = secDb0.openSecondaryCursor(txn, null);
+        SecondaryCursor c1 = secDb1.openSecondaryCursor(txn, null);
+        SecondaryCursor c2 = secDb2.openSecondaryCursor(txn, null);
+        SecondaryCursor[] cursors = {c0, c1, c2};
+
+        for (int i = 0; i < joinSet.length; i += 2) {
+            int[] indexKeys = joinSet[i];
+            int[] priKeys = joinSet[i + 1];
+            String prefix = name + " row=" + i;
+            for (int k = 0; k < 3; k += 1) {
+                String msg = prefix + " k=" + k + " ikey=" + indexKeys[k];
+                setKey(key, indexKeys[k]);
+                status = cursors[k].getSearchKey(key, data,
+                                                 LockMode.DEFAULT);
+                assertEquals(msg, OperationStatus.SUCCESS, status);
+            }
+            for (int j = 0; j < 2; j += 1) {
+                boolean withData = (j == 0);
+                JoinConfig config = (j == 0) ? null : joinConfigNoSort;
+                JoinCursor jc = priDb.join(cursors, config);
+                assertSame(priDb, jc.getDatabase());
+                for (int k = 0; k < priKeys.length; k += 1) {
+                    String msg = prefix + " k=" + k + " pkey=" + priKeys[k];
+                    if (withData) {
+                        status = jc.getNext(key, data, LockMode.DEFAULT);
+                    } else {
+                        status = jc.getNext(key, LockMode.DEFAULT);
+                    }
+                    assertEquals(msg, OperationStatus.SUCCESS, status);
+                    assertEquals(msg, priKeys[k], (int) key.getData()[0]);
+                    if (withData) {
+                        boolean dataFound = false;
+                        for (int m = 0; m < dataSet.length; m += 2) {
+                            int[] vals = dataSet[m];
+                            int priKey = dataSet[m + 1][0];
+                            if (priKey == priKeys[k]) {
+                                for (int n = 0; n < 3; n += 1) {
+                                    assertEquals(msg, vals[n],
+                                                 (int) data.getData()[n]);
+                                    dataFound = true;
+                                }
+                            }
+                        }
+                        assertTrue(msg, dataFound);
+                    }
+                }
+                String msg = prefix + " no more expected";
+                if (withData) {
+                    status = jc.getNext(key, data, LockMode.DEFAULT);
+                } else {
+                    status = jc.getNext(key, LockMode.DEFAULT);
+                }
+                assertEquals(msg, OperationStatus.NOTFOUND, status);
+
+                Cursor[] sorted = DbInternal.getSortedCursors(jc);
+                assertEquals(CURSOR_ORDER.length, sorted.length);
+                if (config == joinConfigNoSort) {
+                    Database db0 = sorted[0].getDatabase();
+                    Database db1 = sorted[1].getDatabase();
+                    Database db2 = sorted[2].getDatabase();
+                    assertSame(db0, secDb0);
+                    assertSame(db1, secDb1);
+                    assertSame(db2, secDb2);
+                } else if (setNum == CURSOR_ORDER_SET) {
+                    Database db0 = sorted[CURSOR_ORDER[0]].getDatabase();
+                    Database db1 = sorted[CURSOR_ORDER[1]].getDatabase();
+                    Database db2 = sorted[CURSOR_ORDER[2]].getDatabase();
+                    assertSame(db0, secDb0);
+                    assertSame(db1, secDb1);
+                    assertSame(db2, secDb2);
+                }
+                jc.close();
+            }
+        }
+
+        c0.close();
+        c1.close();
+        c2.close();
+        txnCommit(txn);
+
+        secDb0.close();
+        secDb1.close();
+        secDb2.close();
+        priDb.close();
+
+        /* Remove dbs since we reuse them multiple times in a single case. */
+        txn = txnBegin();
+        env.removeDatabase(txn, "pri");
+        env.removeDatabase(txn, "sec0");
+        env.removeDatabase(txn, "sec1");
+        env.removeDatabase(txn, "sec2");
+        txnCommit(txn);
+    }
+
+    /**
+     * Checks that a join operation does not block writers from inserting
+     * duplicates with the same main key as the search key.  Writers were being
+     * blocked before we changed join() to use READ_UNCOMMITTED when getting
+     * the duplicate count for each cursor.  [#11833]
+     */
+    public void testWriteDuringJoin()
+        throws DatabaseException {
+
+        Database priDb = openPrimary("pri");
+        SecondaryDatabase secDb0 = openSecondary(priDb, "sec0", true, 0);
+        SecondaryDatabase secDb1 = openSecondary(priDb, "sec1", true, 1);
+        SecondaryDatabase secDb2 = openSecondary(priDb, "sec2", true, 2);
+
+        OperationStatus status;
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        Transaction txn;
+        txn = txnBegin();
+
+        setKey(key, 13);
+        setData(data, 1, 1, 1);
+        status = priDb.put(txn, key, data);
+        assertEquals(OperationStatus.SUCCESS, status);
+        setKey(key, 14);
+        setData(data, 1, 1, 1);
+        status = priDb.put(txn, key, data);
+        assertEquals(OperationStatus.SUCCESS, status);
+
+        txnCommit(txn);
+        txn = txnBeginCursor();
+
+        SecondaryCursor c0 = secDb0.openSecondaryCursor(txn, null);
+        SecondaryCursor c1 = secDb1.openSecondaryCursor(txn, null);
+        SecondaryCursor c2 = secDb2.openSecondaryCursor(txn, null);
+        SecondaryCursor[] cursors = {c0, c1, c2};
+
+        for (int i = 0; i < 3; i += 1) {
+            setKey(key, 1);
+            status = cursors[i].getSearchKey(key, data, LockMode.DEFAULT);
+            assertEquals(OperationStatus.SUCCESS, status);
+        }
+
+        /* join() will get the cursor counts. */
+        JoinCursor jc = priDb.join(cursors, null);
+
+        /*
+         * After calling join(), try inserting dups for the same main key.
+         * Before the fix to use READ_UNCOMMITTED, this would cause a deadlock.
+         */
+        Transaction writerTxn = txnBegin();
+        setKey(key, 12);
+        setData(data, 1, 1, 1);
+        status = priDb.put(writerTxn, key, data);
+        assertEquals(OperationStatus.SUCCESS, status);
+
+        /* The join should retrieve two records, 13 and 14. */
+        status = jc.getNext(key, data, LockMode.DEFAULT);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(13, (int) key.getData()[0]);
+        status = jc.getNext(key, data, LockMode.DEFAULT);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(14, (int) key.getData()[0]);
+        status = jc.getNext(key, data, LockMode.DEFAULT);
+        assertEquals(OperationStatus.NOTFOUND, status);
+
+        /* Try writing again after calling getNext(). */
+        setKey(key, 11);
+        setData(data, 1, 1, 1);
+        status = priDb.put(writerTxn, key, data);
+        assertEquals(OperationStatus.SUCCESS, status);
+        txnCommit(writerTxn);
+
+        jc.close();
+
+        c0.close();
+        c1.close();
+        c2.close();
+        txnCommit(txn);
+
+        secDb0.close();
+        secDb1.close();
+        secDb2.close();
+        priDb.close();
+    }
+
+    private Database openPrimary(String name)
+        throws DatabaseException {
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(isTransactional);
+        dbConfig.setAllowCreate(true);
+
+        Transaction txn = txnBegin();
+        try {
+            return env.openDatabase(txn, name, dbConfig);
+        } finally {
+            txnCommit(txn);
+        }
+    }
+
+    private SecondaryDatabase openSecondary(Database priDb, String dbName,
+                                            boolean dups, int keyId)
+        throws DatabaseException {
+
+        SecondaryConfig dbConfig = new SecondaryConfig();
+        dbConfig.setTransactional(isTransactional);
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(dups);
+        if (useMultiKey) {
+            dbConfig.setMultiKeyCreator
+                (new SimpleMultiKeyCreator(new MyKeyCreator(keyId)));
+        } else {
+            dbConfig.setKeyCreator(new MyKeyCreator(keyId));
+        }
+
+        Transaction txn = txnBegin();
+        try {
+            return env.openSecondaryDatabase(txn, dbName, priDb, dbConfig);
+        } finally {
+            txnCommit(txn);
+        }
+    }
+
+    private static void setKey(DatabaseEntry key, int priKey) {
+
+        byte[] a = new byte[1];
+        a[0] = (byte) priKey;
+        key.setData(a);
+    }
+
+    private static void setData(DatabaseEntry data,
+                                int key1, int key2, int key3) {
+
+        byte[] a = new byte[4];
+        a[0] = (byte) key1;
+        a[1] = (byte) key2;
+        a[2] = (byte) key3;
+        data.setData(a);
+    }
+
+    private static class MyKeyCreator implements SecondaryKeyCreator {
+
+        private int keyId;
+
+        MyKeyCreator(int keyId) {
+
+            this.keyId = keyId;
+        }
+
+        public boolean createSecondaryKey(SecondaryDatabase secondary,
+                                          DatabaseEntry key,
+                                          DatabaseEntry data,
+                                          DatabaseEntry result)
+            throws DatabaseException {
+
+            byte val = data.getData()[keyId];
+            if (val != 0) {
+                result.setData(new byte[] { val });
+                return true;
+            } else {
+                return false;
+            }
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/test/MultiEnvOpenCloseTest.java b/test/com/sleepycat/je/test/MultiEnvOpenCloseTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..a61e4933ab0d7e3c2711be235f9020cc2267ad43
--- /dev/null
+++ b/test/com/sleepycat/je/test/MultiEnvOpenCloseTest.java
@@ -0,0 +1,96 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: MultiEnvOpenCloseTest.java,v 1.13.2.2 2010/01/04 15:30:47 cwl Exp $
+ */
+
+package com.sleepycat.je.test;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Test out-of-memory fix to DaemonThread [#10504].
+ */
+public class MultiEnvOpenCloseTest extends TestCase {
+
+    private File envHome;
+
+    public void setUp()
+        throws IOException {
+
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+    }
+
+    public void testMultiOpenClose()
+        throws Exception {
+
+        /*
+         * Before fixing the bug in DaemonThread [#10504] this test would run
+         * out of memory after 7 iterations.  The bug was, if we open an
+         * environment read-only we won't start certain daemon threads, they
+         * will not be GC'ed because they are part of a thread group, and they
+         * will retain a reference to the Environment.  The fix was to not
+         * create the threads until we need to start them.
+         */
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+
+        final int DATA_SIZE = 1024 * 10;
+        final int N_RECORDS = 1000;
+        final int N_ITERS = 30;
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry(new byte[DATA_SIZE]);
+
+        Environment env = new Environment(envHome, envConfig);
+        Database db = env.openDatabase(null, "MultiEnvOpenCloseTest",
+                                       dbConfig);
+        for (int i = 0; i < N_RECORDS; i += 1) {
+            IntegerBinding.intToEntry(i, key);
+            db.put(null, key, data);
+        }
+
+        db.close();
+        env.close();
+
+        envConfig.setAllowCreate(false);
+        envConfig.setReadOnly(true);
+        dbConfig.setAllowCreate(false);
+        dbConfig.setReadOnly(true);
+
+        for (int i = 1; i <= N_ITERS; i += 1) {
+            //System.out.println("MultiEnvOpenCloseTest iteration # " + i);
+            env = new Environment(envHome, envConfig);
+            db = env.openDatabase(null, "MultiEnvOpenCloseTest", dbConfig);
+            for (int j = 0; j < N_RECORDS; j += 1) {
+                IntegerBinding.intToEntry(j, key);
+                db.get(null, key, data, null);
+            }
+            db.close();
+            env.close();
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/test/MultiKeyTxnTestCase.java b/test/com/sleepycat/je/test/MultiKeyTxnTestCase.java
new file mode 100644
index 0000000000000000000000000000000000000000..f8eca04bf630b4b0c18ae77d58ae9ea4b0159f68
--- /dev/null
+++ b/test/com/sleepycat/je/test/MultiKeyTxnTestCase.java
@@ -0,0 +1,86 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: MultiKeyTxnTestCase.java,v 1.8.2.2 2010/01/04 15:30:47 cwl Exp $
+ */
+
+package com.sleepycat.je.test;
+
+import java.util.Enumeration;
+import java.util.Set;
+
+import junit.framework.TestSuite;
+
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.je.SecondaryKeyCreator;
+import com.sleepycat.je.SecondaryMultiKeyCreator;
+import com.sleepycat.util.test.TxnTestCase;
+
+/**
+ * Permutes a TxnTestCase over a boolean property for using multiple secondary
+ * keys.
+ */
+public abstract class MultiKeyTxnTestCase  extends TxnTestCase {
+
+    boolean useMultiKey = false;
+
+    static TestSuite multiKeyTxnTestSuite(Class testCaseClass,
+                                          EnvironmentConfig envConfig,
+                                          String[] txnTypes) {
+
+        TestSuite suite = new TestSuite();
+        for (int i = 0; i < 2; i += 1) {
+            boolean multiKey = (i == 1);
+            TestSuite txnSuite = txnTestSuite(testCaseClass, envConfig,
+                                              txnTypes);
+            Enumeration e = txnSuite.tests();
+            while (e.hasMoreElements()) {
+                MultiKeyTxnTestCase test =
+                    (MultiKeyTxnTestCase) e.nextElement();
+                test.useMultiKey = multiKey;
+                suite.addTest(test);
+            }
+        }
+        return suite;
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        super.tearDown();
+        if (useMultiKey) {
+            setName("multi-key:" + getName());
+        }
+    }
+
+    /**
+     * Wraps a single key creator to exercise the multi-key code for tests that
+     * only create a single secondary key.
+     */
+    static class SimpleMultiKeyCreator
+        implements SecondaryMultiKeyCreator {
+
+        private SecondaryKeyCreator keyCreator;
+
+        SimpleMultiKeyCreator(SecondaryKeyCreator keyCreator) {
+            this.keyCreator = keyCreator;
+        }
+
+        public void createSecondaryKeys(SecondaryDatabase secondary,
+                                        DatabaseEntry key,
+                                        DatabaseEntry data,
+                                        Set results)
+            throws DatabaseException {
+
+            DatabaseEntry result = new DatabaseEntry();
+            if (keyCreator.createSecondaryKey(secondary, key, data, result)) {
+                results.add(result);
+            }
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/test/PhantomRestartTest.java b/test/com/sleepycat/je/test/PhantomRestartTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..3f24d566047a41724d93c881cafd31b78570663c
--- /dev/null
+++ b/test/com/sleepycat/je/test/PhantomRestartTest.java
@@ -0,0 +1,543 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PhantomRestartTest.java,v 1.14.2.2 2010/01/04 15:30:47 cwl Exp $
+ */
+
+package com.sleepycat.je.test;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DeadlockException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockStats;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.junit.JUnitThread;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Tests read operation restarts that are the by product of phantom prevention
+ * (range locking) added in SR [#10477].
+ */
+public class PhantomRestartTest extends TestCase {
+
+    /*
+     * Spec Parameters: Oper name, InsertKey1, InsertKey2, Oper instance
+     *
+     * A- InsertKey1 is inserted in transaction T0 and committed.
+     * B- T1 starts and performs Oper passing InsertKey1; it finishes the
+     *    operation, but doesn't commit.
+     * C- T2 starts and attempts to insert InsertKey2, but is blocked by T1.
+     * D- T3 starts and performs Oper passing InsertKey2, but is restarted
+     *    because it is blocked by T2.
+     * E- T1 is committed, allowing T2 and T3 to finish also.
+     * F- T4 performs Oper a final time passing InsertKey2.
+     *
+     * For each Spec below the Lock owners and waiters are described in between
+     * steps D and E above.  This state describes the condition where the read
+     * operation (Oper) is performing restarts because it is blocked by a
+     * RANGE_INSERT.
+     *
+     * To understand how read operation restarts work consider the "First"
+     * Spec below.  When T1 releases K2, T2 should finish, and T3 should read
+     * K1.  If restart were not implemented in the lock manager, T3 would read
+     * K2 instead of K1; K1 would then be a phantom with respect to T3.  If
+     * search restarts were not implemented, a RangeRestartException would
+     * surface at the user level.  These errors were observed when running this
+     * test before search restarts were fully implemented.
+     */
+    private static Spec[] SPECS = {
+
+        /*
+         * T1 calls getFirst -- owns RANGE_READ on K2.
+         * T2 inserts K1 -- waits for RANGE_INSERT on K2.
+         * T3 calls getFirst -- requests RANGE_READ on K2: restarts.
+         */
+        new Spec("First", 2, 1, new Oper() {
+            void doOper(int insertedKey) throws DatabaseException {
+                status = cursor.getFirst(key, data, null);
+                checkStatus(OperationStatus.SUCCESS);
+                checkKey(insertedKey);
+            }
+        }),
+
+        /*
+         * T1 calls getLast -- owns RANGE_READ on EOF.
+         * T2 inserts K2 -- waits for RANGE_INSERT on EOF.
+         * T3 calls getLast -- requests RANGE_READ on EOF: restarts.
+         */
+        new Spec("Last", 1, 2, new Oper() {
+            void doOper(int insertedKey) throws DatabaseException {
+                status = cursor.getLast(key, data, null);
+                checkStatus(OperationStatus.SUCCESS);
+                checkKey(insertedKey);
+            }
+        }),
+
+        /*
+         * T1 calls getSearchKey on K1 -- owns RANGE_READ on K2.
+         * T2 inserts K1 -- waits for RANGE_INSERT on K2.
+         * T3 calls getSearchKey on K1 -- requests RANGE_READ on K2: restarts.
+         */
+        new Spec("Search", 2, 1, new Oper() {
+            void doOper(int insertedKey) throws DatabaseException {
+                setKey(1);
+                status = dups ? cursor.getSearchBoth(key, data, null)
+                              : cursor.getSearchKey(key, data, null);
+                checkStatus((insertedKey == 1) ? OperationStatus.SUCCESS
+                                               : OperationStatus.NOTFOUND);
+            }
+        }),
+
+        /*
+         * T1 calls getSearchKeyRange on K0 -- owns RANGE_READ on K2.
+         * T2 inserts K1 -- waits for RANGE_INSERT on K2.
+         * T3 calls getSearchKeyRange on K0 -- requests RANGE_READ on K2:
+         * restarts.
+         */
+        new Spec("SearchRange", 2, 1, new Oper() {
+            void doOper(int insertedKey) throws DatabaseException {
+                setKey(0);
+                status = dups ? cursor.getSearchBothRange(key, data, null)
+                              : cursor.getSearchKeyRange(key, data, null);
+                checkStatus(OperationStatus.SUCCESS);
+                checkKey(insertedKey);
+            }
+        }),
+
+        /*
+         * T1 calls getNext from K1 -- owns RANGE_READ on EOF.
+         * T2 inserts K2 -- waits for RANGE_INSERT on EOF.
+         * T3 calls getNext from K1 -- requests RANGE_READ on EOF: restarts.
+         */
+        new Spec("Next", 1, 2, new Oper() {
+            void doOper(int insertedKey) throws DatabaseException {
+                status = cursor.getFirst(key, data, null);
+                checkStatus(OperationStatus.SUCCESS);
+                checkKey(1);
+                status = cursor.getNext(key, data, null);
+                checkStatus((insertedKey == 2) ? OperationStatus.SUCCESS
+                                               : OperationStatus.NOTFOUND);
+            }
+        }),
+
+        /*
+         * T1 calls getPrev from K2 -- owns RANGE_READ on K2.
+         * T2 inserts K1 -- waits for RANGE_INSERT on K2.
+         * T3 calls getPrev from K2 -- requests RANGE_READ on K2: restarts.
+         */
+        new Spec("Prev", 2, 1, new Oper() {
+            void doOper(int insertedKey) throws DatabaseException {
+                status = cursor.getLast(key, data, null);
+                checkStatus(OperationStatus.SUCCESS);
+                checkKey(2);
+                status = cursor.getPrev(key, data, null);
+                checkStatus((insertedKey == 1) ? OperationStatus.SUCCESS
+                                               : OperationStatus.NOTFOUND);
+            }
+        }),
+
+        /*
+         * NextDup, NextNoDup, PrevDup and PrevNoDup are not tested here.
+         * Restarts for these operations are implemented together with Next and
+         * Prev operations, so testing was skipped.
+         */
+    };
+
+    private static abstract class Oper {
+
+        PhantomRestartTest test;
+        boolean dups;
+        Cursor cursor;
+        DatabaseEntry key;
+        DatabaseEntry data;
+        OperationStatus status;
+
+        void init(PhantomRestartTest test, Cursor cursor) {
+            this.test = test;
+            this.cursor = cursor;
+            this.dups = test.dups;
+            this.key = new DatabaseEntry();
+            this.data = new DatabaseEntry();
+            this.status = null;
+        }
+
+        void checkStatus(OperationStatus expected) {
+            TestCase.assertEquals(expected, status);
+        }
+
+        void setKey(int val) {
+            if (dups) {
+                IntegerBinding.intToEntry(100, key);
+                IntegerBinding.intToEntry(val, data);
+            } else {
+                IntegerBinding.intToEntry(val, key);
+            }
+        }
+
+        void checkKey(int expected) {
+            if (dups) {
+                TestCase.assertEquals(100, IntegerBinding.entryToInt(key));
+                TestCase.assertEquals
+                    (expected, IntegerBinding.entryToInt(data));
+            } else {
+                TestCase.assertEquals
+                    (expected, IntegerBinding.entryToInt(key));
+            }
+        }
+
+        abstract void doOper(int insertedKey)
+            throws DatabaseException;
+    }
+
+    private static class Spec {
+
+        String name;
+        int insertKey1;
+        int insertKey2;
+        Oper oper;
+
+        Spec(String name, int insertKey1, int insertKey2, Oper oper) {
+            this.name = name;
+            this.insertKey1 = insertKey1;
+            this.insertKey2 = insertKey2;
+            this.oper = oper;
+        }
+    }
+
+    public static Test suite()
+        throws Exception {
+
+        TestSuite suite = new TestSuite();
+        for (int i = 0; i < SPECS.length; i += 1) {
+            for (int j = 0; j < 2; j += 1) {
+                boolean dups = (j != 0);
+                suite.addTest(new PhantomRestartTest(SPECS[i], dups));
+            }
+        }
+        return suite;
+    }
+
+    private static final int MAX_INSERT_MILLIS = 5000;
+
+    private File envHome;
+    private Environment env;
+    private Database db;
+    private JUnitThread writerThread;
+    private JUnitThread readerThread;
+    private boolean dups;
+    private Spec spec;
+
+    public PhantomRestartTest(Spec spec, boolean dups) {
+        super(spec.name + (dups ? "-Dups" : ""));
+        this.spec = spec;
+        this.dups = dups;
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+        TestUtils.removeFiles("Setup", envHome, FileManager.DEL_SUFFIX);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        try {
+            if (env != null) {
+                env.close();
+            }
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        try {
+            //*
+            TestUtils.removeLogFiles("tearDown", envHome, true);
+            TestUtils.removeFiles("tearDown", envHome, FileManager.DEL_SUFFIX);
+            //*/
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        envHome = null;
+        env = null;
+        db = null;
+
+        if (writerThread != null) {
+            while (writerThread.isAlive()) {
+                writerThread.interrupt();
+                Thread.yield();
+            }
+            writerThread = null;
+        }
+
+        if (readerThread != null) {
+            while (readerThread.isAlive()) {
+                readerThread.interrupt();
+                Thread.yield();
+            }
+            readerThread = null;
+        }
+    }
+
+    /**
+     * Opens the environment and database.
+     */
+    private void openEnv()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setTransactional(true);
+        envConfig.setTxnSerializableIsolation(true);
+
+        /* Disable the daemons so the don't interfere with stats. */
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false");
+
+        env = new Environment(envHome, envConfig);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setTransactional(true);
+        dbConfig.setSortedDuplicates(dups);
+        db = env.openDatabase(null, "PhantomRestartTest", dbConfig);
+    }
+
+    /**
+     * Closes the environment and database.
+     */
+    private void closeEnv()
+        throws DatabaseException {
+
+        if (db != null) {
+            db.close();
+            db = null;
+        }
+        if (env != null) {
+            env.close();
+            env = null;
+        }
+    }
+
+    public void runTest()
+        throws DatabaseException, InterruptedException {
+
+        openEnv();
+
+        /* T0 inserts first key. */
+        if (dups) {
+
+            /*
+             * Create a dup tree and delete it to avoid deadlocking.  Note that
+             * we have the compressor disabled to make this work.  Deadlocking
+             * occurs without a dup tree because insertion locks the sibling
+             * key when creating a dup tree from a single LN.  This extra
+             * locking throws off our test.
+             */
+            insert(100, 0);
+            insert(100, 1);
+            DatabaseEntry key = new DatabaseEntry();
+            IntegerBinding.intToEntry(100, key);
+            db.delete(null, key);
+
+            /* Insert the dup key we're testing with. */
+            insert(100, spec.insertKey1);
+        } else {
+            insert(spec.insertKey1, 0);
+        }
+
+        /* T1 performs Oper. */
+        Transaction readerTxn = env.beginTransaction(null, null);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        spec.oper.init(this, cursor);
+        spec.oper.doOper(spec.insertKey1);
+
+        /* T2 starts to insert second key, waits on T1. */
+        if (dups) {
+            startInsert(100, spec.insertKey2);
+        } else {
+            startInsert(spec.insertKey2, 0);
+        }
+
+        /* T3 performs Oper. */
+        startReadOper(spec.insertKey2);
+
+        /* Close T1 to allow T2 and T3 to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+        waitForReadOper();
+
+        /* T4 performs Oper again in this thread as a double-check. */
+        readerTxn = env.beginTransaction(null, null);
+        cursor = db.openCursor(readerTxn, null);
+        spec.oper.init(this, cursor);
+        spec.oper.doOper(spec.insertKey2);
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    /**
+     * Inserts the given key and data in a new transaction and commits it.
+     */
+    private void insert(int keyVal, int dataVal)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        IntegerBinding.intToEntry(keyVal, key);
+        IntegerBinding.intToEntry(dataVal, data);
+        OperationStatus status;
+        Transaction writerTxn = env.beginTransaction(null, null);
+        try {
+            if (dups) {
+                status = db.putNoDupData(writerTxn, key, data);
+            } else {
+                status = db.putNoOverwrite(writerTxn, key, data);
+            }
+        } catch (DeadlockException e) {
+            writerTxn.abort();
+            throw e;
+        }
+        assertEquals(OperationStatus.SUCCESS, status);
+        writerTxn.commitNoSync();
+    }
+
+    /**
+     * Starts writer thread and waits for it to start the insert.
+     */
+    private void startInsert(final int keyVal, final int dataVal)
+        throws DatabaseException, InterruptedException {
+
+        LockStats origStats = env.getLockStats(null);
+
+        writerThread = new JUnitThread("Writer") {
+            public void testBody()
+                throws DatabaseException {
+                DatabaseEntry key = new DatabaseEntry();
+                DatabaseEntry data = new DatabaseEntry();
+                IntegerBinding.intToEntry(keyVal, key);
+                IntegerBinding.intToEntry(dataVal, data);
+                Transaction writerTxn = env.beginTransaction(null, null);
+                OperationStatus status;
+                if (dups) {
+                    status = db.putNoDupData(writerTxn, key, data);
+                } else {
+                    status = db.putNoOverwrite(writerTxn, key, data);
+                }
+                assertEquals(OperationStatus.SUCCESS, status);
+                writerTxn.commitNoSync();
+            }
+        };
+
+        writerThread.start();
+        waitForBlock(origStats);
+    }
+
+    /**
+     * Waits for the writer thread to finish.
+     */
+    private void waitForInsert() {
+
+        try {
+            writerThread.finishTest();
+        } catch (Throwable e) {
+            e.printStackTrace();
+            fail(e.toString());
+        } finally {
+            writerThread = null;
+        }
+    }
+
+    /**
+     * Starts reader thread and waits for it to start the read operation.
+     */
+    private void startReadOper(final int operKeyParam)
+        throws DatabaseException, InterruptedException {
+
+        LockStats origStats = env.getLockStats(null);
+
+        readerThread = new JUnitThread("Reader") {
+            public void testBody()
+                throws DatabaseException {
+                Transaction readerTxn = env.beginTransaction(null, null);
+                Cursor cursor = db.openCursor(readerTxn, null);
+                spec.oper.init(PhantomRestartTest.this, cursor);
+                spec.oper.doOper(operKeyParam);
+                cursor.close();
+                readerTxn.commitNoSync();
+            }
+        };
+
+        readerThread.start();
+        waitForBlock(origStats);
+    }
+
+    /**
+     * Waits for a new locker to block waiting for a lock.
+     */
+    private void waitForBlock(LockStats origStats)
+        throws DatabaseException, InterruptedException {
+
+        long startTime = System.currentTimeMillis();
+        while (true) {
+
+            /* Give some time to the thread. */
+            Thread.yield();
+            Thread.sleep(10);
+            if (System.currentTimeMillis() - startTime > MAX_INSERT_MILLIS) {
+                fail("Timeout");
+            }
+
+            /* Wait for the operation to block. */
+            LockStats stats = env.getLockStats(null);
+            if (stats.getNWaiters() > origStats.getNWaiters()) {
+                break;
+            }
+        }
+    }
+
+    /**
+     * Waits for the reader thread to finish.
+     */
+    private void waitForReadOper() {
+
+        try {
+            readerThread.finishTest();
+        } catch (Throwable e) {
+            e.printStackTrace();
+            fail(e.toString());
+        } finally {
+            readerThread = null;
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/test/PhantomTest.java b/test/com/sleepycat/je/test/PhantomTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..6672ac347c2bcc887ca6f5f12d4917a142f768e3
--- /dev/null
+++ b/test/com/sleepycat/je/test/PhantomTest.java
@@ -0,0 +1,3080 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PhantomTest.java,v 1.16.2.3 2010/01/04 15:30:47 cwl Exp $
+ */
+
+package com.sleepycat.je.test;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Enumeration;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.CursorConfig;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DeadlockException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.LockStats;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.TransactionConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.junit.JUnitThread;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Tests phantom prevention (range locking) added in SR [#10477].
+ *
+ * <p>We test that with a serializable txn, range locking will prevent phantoms
+ * from appearing.  We also test that phantoms *do* appear for non-serializable
+ * isolation levels.  These include read-uncommitted, read-committed and
+ * repeatable-read now.</p>
+ *
+ * <p>Test method names have the suffix _Sucess or _NotFound depending on
+ * whether they're testing a read operation with a SUCCESS or NOTFOUND outcome.
+ * If they're testing duplicates, the _Dup suffix is also added.  Finally, a
+ * suffix is added for the isolation level at run time.</p>
+ *
+ * <p>All tests are for the case where the reader txn locks a range and then
+ * the writer txn tries to insert into the locked range.  The reverse (where
+ * the writer inserts first) works without range locking because the reader
+ * will block on the inserted key, so we don't test that here.</p>
+ *
+ * <p>We test all read operations with and without duplicates (with duplicates
+ * the test name has _Dup appended) except for the following cases which are
+ * meaningless without duplicates because get{Next,Prev}Dup always return
+ * NOTFOUND when duplicates are not configured:
+ * testGetNextDup_Success, testGetNextDup_NotFound,
+ * testGetPrevDup_Success, testGetPrevDup_NotFound.</p>
+ */
+public class PhantomTest extends TestCase {
+
+    private static final TransactionConfig READ_UNCOMMITTED_CONFIG
+                                           = new TransactionConfig();
+    private static final TransactionConfig READ_COMMITTED_CONFIG
+                                           = new TransactionConfig();
+    private static final TransactionConfig REPEATABLE_READ_CONFIG
+                                           = new TransactionConfig();
+    private static final TransactionConfig SERIALIZABLE_CONFIG
+                                           = new TransactionConfig();
+    static {
+        READ_UNCOMMITTED_CONFIG.setReadUncommitted(true);
+        READ_COMMITTED_CONFIG.setReadCommitted(true);
+        SERIALIZABLE_CONFIG.setSerializableIsolation(true);
+    }
+    private static final TransactionConfig[] TXN_CONFIGS = {
+        READ_UNCOMMITTED_CONFIG,
+        READ_COMMITTED_CONFIG,
+        REPEATABLE_READ_CONFIG,
+        SERIALIZABLE_CONFIG,
+    };
+
+    private static final String DB_NAME = "PhantomTest";
+
+    public static Test suite() {
+        TestSuite all = new TestSuite();
+        for (int i = 0; i < TXN_CONFIGS.length; i += 1) {
+            TestSuite suite = new TestSuite(PhantomTest.class);
+            Enumeration e = suite.tests();
+            while (e.hasMoreElements()) {
+                PhantomTest test = (PhantomTest) e.nextElement();
+                test.init(TXN_CONFIGS[i]);
+                all.addTest(test);
+            }
+        }
+        return all;
+    }
+
+    private static final int MAX_INSERT_MILLIS = 5000;
+
+    private File envHome;
+    private Environment env;
+    private Database db;
+    private TransactionConfig txnConfig;
+    private JUnitThread writerThread;
+    private boolean txnSerializable;
+    private boolean dups;
+    private boolean insertFinished;
+
+    public PhantomTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    private void init(TransactionConfig txnConfig) {
+        this.txnConfig = txnConfig;
+        txnSerializable = (txnConfig == SERIALIZABLE_CONFIG);
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+        TestUtils.removeFiles("Setup", envHome, FileManager.DEL_SUFFIX);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        String txnType;
+        if (txnConfig == SERIALIZABLE_CONFIG) {
+            txnType = "-Serializable";
+        } else if (txnConfig == REPEATABLE_READ_CONFIG) {
+            txnType = "-RepeatableRead";
+        } else if (txnConfig == READ_COMMITTED_CONFIG) {
+            txnType = "-ReadCommitted";
+        } else if (txnConfig == READ_UNCOMMITTED_CONFIG) {
+            txnType = "-ReadUncommitted";
+        } else {
+            throw new IllegalStateException();
+        }
+        setName(getName() + txnType);
+
+        try {
+            if (env != null) {
+                env.close();
+            }
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        try {
+            //*
+            TestUtils.removeLogFiles("tearDown", envHome, true);
+            TestUtils.removeFiles("tearDown", envHome, FileManager.DEL_SUFFIX);
+            //*/
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        envHome = null;
+        env = null;
+        db = null;
+
+        if (writerThread != null) {
+            while (writerThread.isAlive()) {
+                writerThread.interrupt();
+                Thread.yield();
+            }
+            writerThread = null;
+        }
+    }
+
+    /**
+     * Opens the environment and database.
+     */
+    private void openEnv(boolean dups)
+        throws DatabaseException {
+
+        openEnv(dups, null);
+    }
+
+    /**
+     * Opens the environment and database.
+     */
+    private void openEnv(boolean dups, EnvironmentConfig envConfig)
+        throws DatabaseException {
+
+        this.dups = dups;
+        if (envConfig == null) {
+            envConfig = TestUtils.initEnvConfig();
+            /* Control over isolation level is required by this test. */
+            TestUtils.clearIsolationLevel(envConfig);
+        }
+
+        /* Disable the daemons so the don't interfere with stats. */
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false");
+
+        envConfig.setAllowCreate(true);
+        envConfig.setTransactional(true);
+        env = new Environment(envHome, envConfig);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setTransactional(true);
+        dbConfig.setSortedDuplicates(dups);
+        db = env.openDatabase(null, DB_NAME, dbConfig);
+    }
+
+    /**
+     * Closes the environment and database.
+     */
+    private void closeEnv()
+        throws DatabaseException {
+
+        if (db != null) {
+            db.close();
+            db = null;
+        }
+        if (env != null) {
+            env.close();
+            env = null;
+        }
+    }
+
+    public void testGetSearchKey_Success()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(false);
+
+        /* Insert key 2. */
+        insert(2);
+
+        /* getSearchKey returns key 2. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 2));
+
+        /* Insertions are never blocked. */
+        try {
+            insert(1);
+            insert(3);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        cursor.close();
+        readerTxn.commitNoSync();
+        closeEnv();
+    }
+
+    public void testGetSearchKey_Success_Dup()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(true);
+
+        /* Insert dups. */
+        insert(1, 2);
+        insert(1, 3);
+
+        /* getSearchKey returns key {1,2}. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1, 2));
+
+        /* Insertions are never blocked. */
+        try {
+            insert(1, 1);
+            insert(1, 4);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        cursor.close();
+        readerTxn.commitNoSync();
+        closeEnv();
+    }
+
+    public void testGetSearchKey_NotFound()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(false);
+
+        /* Insert key 1. */
+        insert(1);
+
+        /* getSearchKey for key 2 returns NOTFOUND. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.NOTFOUND, searchKey(cursor, 2));
+
+        /* Insertions before 2 are never blocked. */
+        try {
+            insert(0);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert key 2 in a writer thread. */
+        startInsert(2);
+
+        /*
+         * If serializable, getSearchKey should return NOTFOUND again;
+         * otherwise getSearchKey should see key 2.
+         */
+        if (txnSerializable) {
+            assertEquals(OperationStatus.NOTFOUND, searchKey(cursor, 2));
+        } else {
+            assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 2));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getSearchKey returns key 2. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 2));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetSearchKey_NotFound_Dup()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(true);
+
+        /* Insert dups. */
+        insert(2, 1);
+        insert(2, 2);
+
+        /* getSearchKey for {1,1} returns NOTFOUND. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.NOTFOUND, searchKey(cursor, 1, 1));
+
+        /* Insertions after {2,2} are never blocked. */
+        try {
+            insert(2, 3);
+            insert(3, 0);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert {1,1} in a writer thread. */
+        startInsert(1, 1);
+
+        /*
+         * If serializable, getSearchKey should return NOTFOUND again;
+         * otherwise getSearchKey should see {1,1}.
+         */
+        if (txnSerializable) {
+            assertEquals(OperationStatus.NOTFOUND, searchKey(cursor, 1, 1));
+        } else {
+            assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1, 1));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getSearchKey returns {1,1}. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1, 1));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetSearchBoth_Success()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(false);
+
+        /* Insert key 2. */
+        insert(2);
+
+        /* getSearchBoth returns {2,0}. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 2, 0));
+
+        /* Insertions are never blocked. */
+        try {
+            insert(1);
+            insert(3);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        cursor.close();
+        readerTxn.commitNoSync();
+        closeEnv();
+    }
+
+    public void testGetSearchBoth_Success_Dup()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(true);
+
+        /* Insert dups. */
+        insert(1, 1);
+        insert(1, 3);
+
+        /* getSearchBoth returns key {1,3}. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 3));
+
+        /* Insertions are never blocked. */
+        try {
+            insert(0, 0);
+            insert(1, 0);
+            insert(1, 2);
+            insert(1, 4);
+            insert(2, 0);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        cursor.close();
+        readerTxn.commitNoSync();
+        closeEnv();
+    }
+
+    public void testGetSearchBoth_NotFound()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(false);
+
+        /* Insert key 1. */
+        insert(1);
+
+        /* getSearchBoth for key 2 returns NOTFOUND. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.NOTFOUND, searchBoth(cursor, 2));
+
+        /* Insertions before 2 are never blocked. */
+        try {
+            insert(0);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert key 2 in a writer thread. */
+        startInsert(2);
+
+        /*
+         * If serializable, getSearchBoth should return NOTFOUND again;
+         * otherwise getSearchBoth should see key 2.
+         */
+        if (txnSerializable) {
+            assertEquals(OperationStatus.NOTFOUND, searchBoth(cursor, 2));
+        } else {
+            assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 2));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getSearchBoth returns key 2. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 2));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetSearchBoth_NotFound_Dup()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(true);
+
+        /* Insert dups. */
+        insert(1, 1);
+        insert(1, 3);
+
+        /* getSearchBoth for {1,2} returns NOTFOUND. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.NOTFOUND, searchBoth(cursor, 1, 2));
+
+        /* Insertions before {1,2} or after {1,3} are never blocked. */
+        try {
+            insert(1, 0);
+            insert(0, 0);
+            insert(1, 4);
+            insert(2, 0);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert {1,2} in a writer thread. */
+        startInsert(1, 2);
+
+        /*
+         * If serializable, getSearchBoth should return NOTFOUND again;
+         * otherwise getSearchBoth should see {1,2}.
+         */
+        if (txnSerializable) {
+            assertEquals(OperationStatus.NOTFOUND, searchBoth(cursor, 1, 2));
+        } else {
+            assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 2));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getSearchBoth returns {1,2}. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 2));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetSearchKeyRange_Success()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(false);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert key 1 and 3. */
+        insert(1);
+        insert(3);
+
+        /* getSearchKeyRange for key 2 returns key 3. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        IntegerBinding.intToEntry(2, key);
+        status = cursor.getSearchKeyRange(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(3, IntegerBinding.entryToInt(key));
+
+        /* Insertions before 2 and after 3 are never blocked. */
+        try {
+            insert(0);
+            insert(4);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert key 2 in a writer thread. */
+        startInsert(2);
+
+        /*
+         * If serializable, getSearchKeyRange should return key 3 again;
+         * otherwise getSearchKeyRange should see key 2.
+         */
+        IntegerBinding.intToEntry(2, key);
+        status = cursor.getSearchKeyRange(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        if (txnSerializable) {
+            assertEquals(3, IntegerBinding.entryToInt(key));
+        } else {
+            assertEquals(2, IntegerBinding.entryToInt(key));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getSearchKeyRange returns key 2. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        IntegerBinding.intToEntry(2, key);
+        status = cursor.getSearchKeyRange(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(2, IntegerBinding.entryToInt(key));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetSearchKeyRange_Success_Dup()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(true);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert dups. */
+        insert(1, 1);
+        insert(1, 2);
+        insert(3, 2);
+        insert(3, 3);
+
+        /* getSearchKeyRange for key 2 returns {3,2}. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        IntegerBinding.intToEntry(2, key);
+        status = cursor.getSearchKeyRange(key, data, null);
+        assertEquals(3, IntegerBinding.entryToInt(key));
+        assertEquals(2, IntegerBinding.entryToInt(data));
+        assertEquals(OperationStatus.SUCCESS, status);
+
+        /* Insertions before 2 and after {3,3} are never blocked. */
+        try {
+            insert(1, 0);
+            insert(0, 0);
+            insert(3, 4);
+            insert(4, 0);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert {3,1} in a writer thread. */
+        startInsert(3, 1);
+
+        /*
+         * If serializable, getSearchKeyRange should return {3,2} again;
+         * otherwise getSearchKeyRange should see {3,1}.
+         */
+        IntegerBinding.intToEntry(2, key);
+        status = cursor.getSearchKeyRange(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        if (txnSerializable) {
+            assertEquals(3, IntegerBinding.entryToInt(key));
+            assertEquals(2, IntegerBinding.entryToInt(data));
+        } else {
+            assertEquals(3, IntegerBinding.entryToInt(key));
+            assertEquals(1, IntegerBinding.entryToInt(data));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getSearchKeyRange returns {3,1}. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        IntegerBinding.intToEntry(2, key);
+        status = cursor.getSearchKeyRange(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(3, IntegerBinding.entryToInt(key));
+        assertEquals(1, IntegerBinding.entryToInt(data));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetSearchKeyRange_NotFound()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(false);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert key 1. */
+        insert(1);
+
+        /* getSearchKeyRange for key 2 returns NOTFOUND. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        IntegerBinding.intToEntry(2, key);
+        status = cursor.getSearchKeyRange(key, data, null);
+        assertEquals(OperationStatus.NOTFOUND, status);
+
+        /* Insertions before 2 are never blocked. */
+        try {
+            insert(0);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert key 3 in a writer thread. */
+        startInsert(3);
+
+        /*
+         * If serializable, getSearchKeyRange should return NOTFOUND again;
+         * otherwise getSearchKeyRange should see key 3.
+         */
+        IntegerBinding.intToEntry(2, key);
+        status = cursor.getSearchKeyRange(key, data, null);
+        if (txnSerializable) {
+            assertEquals(OperationStatus.NOTFOUND, status);
+        } else {
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals(3, IntegerBinding.entryToInt(key));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getSearchKeyRange returns key 3. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        IntegerBinding.intToEntry(2, key);
+        status = cursor.getSearchKeyRange(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(3, IntegerBinding.entryToInt(key));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetSearchKeyRange_NotFound_Dup()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(true);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert dups. */
+        insert(1, 1);
+        insert(1, 2);
+
+        /* getSearchKeyRange for key 2 returns NOTFOUND. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        IntegerBinding.intToEntry(2, key);
+        status = cursor.getSearchKeyRange(key, data, null);
+        assertEquals(OperationStatus.NOTFOUND, status);
+
+        /* Insertions before 2 are never blocked. */
+        try {
+            insert(1, 0);
+            insert(0, 0);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert {3,1} in a writer thread. */
+        startInsert(3, 1);
+
+        /*
+         * If serializable, getSearchKeyRange should return NOTFOUND again;
+         * otherwise getSearchKeyRange should see {3,1}.
+         */
+        IntegerBinding.intToEntry(2, key);
+        status = cursor.getSearchKeyRange(key, data, null);
+        if (txnSerializable) {
+            assertEquals(OperationStatus.NOTFOUND, status);
+        } else {
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals(3, IntegerBinding.entryToInt(key));
+            assertEquals(1, IntegerBinding.entryToInt(data));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getSearchKeyRange returns {3,1}. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        IntegerBinding.intToEntry(2, key);
+        status = cursor.getSearchKeyRange(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(3, IntegerBinding.entryToInt(key));
+        assertEquals(1, IntegerBinding.entryToInt(data));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    /*
+     * A testGetSearchBothRange_Success test case is not possible because it is
+     * not possible to insert a duplicate when only one LN for the key already
+     * exists, without locking the existing LN.  Therefore, the insert thread
+     * will deadlock with the reader thread, which has the existing LN locked.
+     * This is a testing anomoly, not a bug.
+     */
+
+    public void testGetSearchBothRange_Success_Dup()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(true);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert dups. */
+        insert(1, 1);
+        insert(1, 2);
+        insert(3, 2);
+        insert(3, 3);
+
+        /* getSearchBothRange for {3, 0} returns {3,2}. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        IntegerBinding.intToEntry(3, key);
+        IntegerBinding.intToEntry(0, data);
+        status = cursor.getSearchBothRange(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(3, IntegerBinding.entryToInt(key));
+        assertEquals(2, IntegerBinding.entryToInt(data));
+
+        /* Insertions before {1,1} and after {3,2} are never blocked. */
+        try {
+            insert(1, 0);
+            insert(0, 0);
+            insert(3, 4);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert {3,1} in a writer thread. */
+        startInsert(3, 1);
+
+        /*
+         * If serializable, getSearchBothRange should return {3,2} again;
+         * otherwise getSearchBothRange should see {3,1}.
+         */
+        IntegerBinding.intToEntry(3, key);
+        IntegerBinding.intToEntry(0, data);
+        status = cursor.getSearchBothRange(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        if (txnSerializable) {
+            assertEquals(3, IntegerBinding.entryToInt(key));
+            assertEquals(2, IntegerBinding.entryToInt(data));
+        } else {
+            assertEquals(3, IntegerBinding.entryToInt(key));
+            assertEquals(1, IntegerBinding.entryToInt(data));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getSearchBothRange returns {3,1}. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        IntegerBinding.intToEntry(3, key);
+        IntegerBinding.intToEntry(0, data);
+        status = cursor.getSearchBothRange(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(3, IntegerBinding.entryToInt(key));
+        assertEquals(1, IntegerBinding.entryToInt(data));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetSearchBothRange_NotFound()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(false);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert key 1. */
+        insert(1);
+
+        /* getSearchBothRange for {3, 0} returns NOTFOUND. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        IntegerBinding.intToEntry(3, key);
+        IntegerBinding.intToEntry(0, data);
+        status = cursor.getSearchBothRange(key, data, null);
+        assertEquals(OperationStatus.NOTFOUND, status);
+
+        /* Insertions before 1 are never blocked. */
+        try {
+            insert(0);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert {3, 1} in a writer thread. */
+        startInsert(3, 1);
+
+        /*
+         * If serializable, getSearchBothRange should return NOTFOUND again;
+         * otherwise getSearchBothRange should see key 3.
+         */
+        IntegerBinding.intToEntry(3, key);
+        IntegerBinding.intToEntry(0, data);
+        status = cursor.getSearchBothRange(key, data, null);
+        if (txnSerializable) {
+            assertEquals(OperationStatus.NOTFOUND, status);
+        } else {
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals(3, IntegerBinding.entryToInt(key));
+            assertEquals(1, IntegerBinding.entryToInt(data));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getSearchBothRange returns key 3. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        IntegerBinding.intToEntry(3, key);
+        IntegerBinding.intToEntry(0, data);
+        status = cursor.getSearchBothRange(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(3, IntegerBinding.entryToInt(key));
+        assertEquals(1, IntegerBinding.entryToInt(data));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetSearchBothRange_NotFound_Dup()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(true);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert dups. */
+        insert(3, 0);
+        insert(3, 1);
+
+        /* getSearchBothRange for {3, 2} returns NOTFOUND. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        IntegerBinding.intToEntry(3, key);
+        IntegerBinding.intToEntry(2, data);
+        status = cursor.getSearchBothRange(key, data, null);
+        assertEquals(OperationStatus.NOTFOUND, status);
+
+        /* Insertions before {3,0} are never blocked. */
+        try {
+            insert(3, -1);
+            insert(2, 0);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert {3,3} in a writer thread. */
+        startInsert(3, 3);
+
+        /*
+         * If serializable, getSearchBothRange should return NOTFOUND again;
+         * otherwise getSearchBothRange should see {3,3}.
+         */
+        IntegerBinding.intToEntry(3, key);
+        IntegerBinding.intToEntry(2, data);
+        status = cursor.getSearchBothRange(key, data, null);
+        if (txnSerializable) {
+            assertEquals(OperationStatus.NOTFOUND, status);
+        } else {
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals(3, IntegerBinding.entryToInt(key));
+            assertEquals(3, IntegerBinding.entryToInt(data));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getSearchBothRange returns {3,3}. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        IntegerBinding.intToEntry(3, key);
+        IntegerBinding.intToEntry(2, data);
+        status = cursor.getSearchBothRange(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(3, IntegerBinding.entryToInt(key));
+        assertEquals(3, IntegerBinding.entryToInt(data));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetFirst_Success()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(false);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert key 2. */
+        insert(2);
+
+        /* getFirst returns key 2. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        status = cursor.getFirst(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(2, IntegerBinding.entryToInt(key));
+
+        /* Insertions after 2 are never blocked. */
+        try {
+            insert(3);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert key 1 in a writer thread. */
+        startInsert(1);
+
+        /*
+         * If serializable, getFirst should return key 2 again; otherwise
+         * getFirst should see key 1.
+         */
+        status = cursor.getFirst(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        if (txnSerializable) {
+            assertEquals(2, IntegerBinding.entryToInt(key));
+        } else {
+            assertEquals(1, IntegerBinding.entryToInt(key));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getFirst returns key 1. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        status = cursor.getFirst(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, IntegerBinding.entryToInt(key));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetFirst_Success_Dup()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(true);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert dups. */
+        insert(1, 2);
+        insert(1, 3);
+
+        /* getFirst returns {1,2}. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        status = cursor.getFirst(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, IntegerBinding.entryToInt(key));
+        assertEquals(2, IntegerBinding.entryToInt(data));
+
+        /* Insertions after {1,3} are never blocked. */
+        try {
+            insert(1, 4);
+            insert(2, 0);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert {1,1} in a writer thread. */
+        startInsert(1, 1);
+
+        /*
+         * If serializable, getFirst should return {1,2} again; otherwise
+         * getFirst should see {1,1}.
+         */
+        status = cursor.getFirst(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        if (txnSerializable) {
+            assertEquals(1, IntegerBinding.entryToInt(key));
+            assertEquals(2, IntegerBinding.entryToInt(data));
+        } else {
+            assertEquals(1, IntegerBinding.entryToInt(key));
+            assertEquals(1, IntegerBinding.entryToInt(data));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getFirst returns {1,1}. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        status = cursor.getFirst(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, IntegerBinding.entryToInt(key));
+        assertEquals(1, IntegerBinding.entryToInt(data));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetFirst_NotFound()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(false);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* getFirst returns NOTFOUND. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        status = cursor.getFirst(key, data, null);
+        assertEquals(OperationStatus.NOTFOUND, status);
+
+        /* Insert key 1 in a writer thread. */
+        startInsert(1);
+
+        /*
+         * If serializable, getFirst should return NOTFOUND again; otherwise
+         * getFirst should see key 1.
+         */
+        status = cursor.getFirst(key, data, null);
+        if (txnSerializable) {
+            assertEquals(OperationStatus.NOTFOUND, status);
+        } else {
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals(1, IntegerBinding.entryToInt(key));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getFirst returns key 1. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        status = cursor.getFirst(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, IntegerBinding.entryToInt(key));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetFirst_NotFound_Dup()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(true);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* getFirst returns NOTFOUND. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        status = cursor.getFirst(key, data, null);
+        assertEquals(OperationStatus.NOTFOUND, status);
+
+        /* Insert {1,1} in a writer thread. */
+        startInsert(1, 1);
+
+        /*
+         * If serializable, getFirst should return NOTFOUND again; otherwise
+         * getFirst should see {1,1}.
+         */
+        status = cursor.getFirst(key, data, null);
+        if (txnSerializable) {
+            assertEquals(OperationStatus.NOTFOUND, status);
+        } else {
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals(1, IntegerBinding.entryToInt(key));
+            assertEquals(1, IntegerBinding.entryToInt(data));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getFirst returns {1,1}. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        status = cursor.getFirst(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, IntegerBinding.entryToInt(key));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetLast_Success()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(false);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert key 1. */
+        insert(1);
+
+        /* getLast returns key 1. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        status = cursor.getLast(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, IntegerBinding.entryToInt(key));
+
+        /* Insertions before current position are never blocked. */
+        try {
+            insert(0);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert key 2 in a writer thread. */
+        startInsert(2);
+
+        /*
+         * If serializable, getLast should return key 1 again; otherwise
+         * getLast should see key 2.
+         */
+        status = cursor.getLast(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        if (txnSerializable) {
+            assertEquals(1, IntegerBinding.entryToInt(key));
+        } else {
+            assertEquals(2, IntegerBinding.entryToInt(key));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getLast returns key 2. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        status = cursor.getLast(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(2, IntegerBinding.entryToInt(key));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetLast_Success_Dup()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(true);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert dups. */
+        insert(1, 0);
+        insert(1, 2);
+
+        /* getLast returns {1,2}. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        status = cursor.getLast(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, IntegerBinding.entryToInt(key));
+        assertEquals(2, IntegerBinding.entryToInt(data));
+
+        /* Insertions before current position are never blocked. */
+        try {
+            insert(1, 1);
+            insert(0, 0);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert {1,3} in a writer thread. */
+        startInsert(1, 3);
+
+        /*
+         * If serializable, getLast should return {1,2} again; otherwise
+         * getLast should see {1,3}.
+         */
+        status = cursor.getLast(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        if (txnSerializable) {
+            assertEquals(1, IntegerBinding.entryToInt(key));
+            assertEquals(2, IntegerBinding.entryToInt(data));
+        } else {
+            assertEquals(1, IntegerBinding.entryToInt(key));
+            assertEquals(3, IntegerBinding.entryToInt(data));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getLast returns {1,3}. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        status = cursor.getLast(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, IntegerBinding.entryToInt(key));
+        assertEquals(3, IntegerBinding.entryToInt(data));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetLast_NotFound()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(false);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* getLast returns NOTFOUND. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        status = cursor.getLast(key, data, null);
+        assertEquals(OperationStatus.NOTFOUND, status);
+
+        /* Insert key 1 in a writer thread. */
+        startInsert(1);
+
+        /*
+         * If serializable, getLast should return NOTFOUND again; otherwise
+         * getLast should see key 1.
+         */
+        status = cursor.getLast(key, data, null);
+        if (txnSerializable) {
+            assertEquals(OperationStatus.NOTFOUND, status);
+        } else {
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals(1, IntegerBinding.entryToInt(key));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getLast returns key 1. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        status = cursor.getLast(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, IntegerBinding.entryToInt(key));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetLast_NotFound_Dup()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(true);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* getLast returns NOTFOUND. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        status = cursor.getLast(key, data, null);
+        assertEquals(OperationStatus.NOTFOUND, status);
+
+        /* Insert {1,1} in a writer thread. */
+        startInsert(1, 1);
+
+        /*
+         * If serializable, getLast should return NOTFOUND again; otherwise
+         * getLast should see {1,1}.
+         */
+        status = cursor.getLast(key, data, null);
+        if (txnSerializable) {
+            assertEquals(OperationStatus.NOTFOUND, status);
+        } else {
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals(1, IntegerBinding.entryToInt(key));
+            assertEquals(1, IntegerBinding.entryToInt(data));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getLast returns {1,1}. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        status = cursor.getLast(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, IntegerBinding.entryToInt(key));
+        assertEquals(1, IntegerBinding.entryToInt(data));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetNext_Success()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(false);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert key 1 and 3. */
+        insert(1);
+        insert(3);
+
+        /* getNext returns key 3. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1));
+        status = cursor.getNext(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(3, IntegerBinding.entryToInt(key));
+
+        /* Insertions before 1 and after 3 are never blocked. */
+        try {
+            insert(0);
+            insert(4);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert key 2 in a writer thread. */
+        startInsert(2);
+
+        /*
+         * If serializable, getNext should return key 3 again; otherwise
+         * getNext should see key 2.
+         */
+        assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1));
+        status = cursor.getNext(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        if (txnSerializable) {
+            assertEquals(3, IntegerBinding.entryToInt(key));
+        } else {
+            assertEquals(2, IntegerBinding.entryToInt(key));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getNext returns key 2. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1));
+        status = cursor.getNext(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(2, IntegerBinding.entryToInt(key));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetNext_Success_Dup()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(true);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert dups. */
+        insert(1, 1);
+        insert(1, 3);
+
+        /* getNext returns {1,3}. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 1));
+        status = cursor.getNext(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, IntegerBinding.entryToInt(key));
+        assertEquals(3, IntegerBinding.entryToInt(data));
+
+        /* Insertions before {1,1} and after {1,3} are never blocked. */
+        try {
+            insert(1, 0);
+            insert(0, 0);
+            insert(1, 4);
+            insert(2, 0);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert {1,2} in a writer thread. */
+        startInsert(1, 2);
+
+        /*
+         * If serializable, getNext should return {1,3} again; otherwise
+         * getNext should see {1,2}.
+         */
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 1));
+        status = cursor.getNext(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        if (txnSerializable) {
+            assertEquals(1, IntegerBinding.entryToInt(key));
+            assertEquals(3, IntegerBinding.entryToInt(data));
+        } else {
+            assertEquals(1, IntegerBinding.entryToInt(key));
+            assertEquals(2, IntegerBinding.entryToInt(data));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getNext returns {1,2}. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 1));
+        status = cursor.getNext(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, IntegerBinding.entryToInt(key));
+        assertEquals(2, IntegerBinding.entryToInt(data));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetNext_NotFound()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(false);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert key 1. */
+        insert(1);
+
+        /* getNext returns NOTFOUND. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1));
+        status = cursor.getNext(key, data, null);
+        assertEquals(OperationStatus.NOTFOUND, status);
+
+        /* Insertions before 1 are never blocked. */
+        try {
+            insert(0);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert key 2 in a writer thread. */
+        startInsert(2);
+
+        /*
+         * If serializable, getNext should return NOTFOUND again; otherwise
+         * getNext should see key 2.
+         */
+        assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1));
+        status = cursor.getNext(key, data, null);
+        if (txnSerializable) {
+            assertEquals(OperationStatus.NOTFOUND, status);
+        } else {
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals(2, IntegerBinding.entryToInt(key));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getNext returns key 2. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1));
+        status = cursor.getNext(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(2, IntegerBinding.entryToInt(key));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetNext_NotFound_Dup()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(true);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert dups. */
+        insert(1, 1);
+        insert(1, 2);
+
+        /* getNext returns NOTFOUND. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 2));
+        status = cursor.getNext(key, data, null);
+        assertEquals(OperationStatus.NOTFOUND, status);
+
+        /* Insertions before {1,1} are never blocked. */
+        try {
+            insert(1, 0);
+            insert(0, 0);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert {1,3} in a writer thread. */
+        startInsert(1, 3);
+
+        /*
+         * If serializable, getNext should return NOTFOUND again; otherwise
+         * getNext should see {1,3}.
+         */
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 2));
+        status = cursor.getNext(key, data, null);
+        if (txnSerializable) {
+            assertEquals(OperationStatus.NOTFOUND, status);
+        } else {
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals(1, IntegerBinding.entryToInt(key));
+            assertEquals(3, IntegerBinding.entryToInt(data));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getNext returns {1,3}. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 2));
+        status = cursor.getNext(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, IntegerBinding.entryToInt(key));
+        assertEquals(3, IntegerBinding.entryToInt(data));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetNextDup_Success_Dup()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(true);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert dups. */
+        insert(1, 1);
+        insert(1, 3);
+
+        /* getNextDup returns {1,3}. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 1));
+        status = cursor.getNextDup(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, IntegerBinding.entryToInt(key));
+        assertEquals(3, IntegerBinding.entryToInt(data));
+
+        /* Insertions before {1,1} and after {1,3} are never blocked. */
+        try {
+            insert(1, 0);
+            insert(0, 0);
+            insert(1, 4);
+            insert(2, 0);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert {1,2} in a writer thread. */
+        startInsert(1, 2);
+
+        /*
+         * If serializable, getNextDup should return {1,3} again; otherwise
+         * getNextDup should see {1,2}.
+         */
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 1));
+        status = cursor.getNextDup(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        if (txnSerializable) {
+            assertEquals(1, IntegerBinding.entryToInt(key));
+            assertEquals(3, IntegerBinding.entryToInt(data));
+        } else {
+            assertEquals(1, IntegerBinding.entryToInt(key));
+            assertEquals(2, IntegerBinding.entryToInt(data));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getNextDup returns {1,2}. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 1));
+        status = cursor.getNextDup(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, IntegerBinding.entryToInt(key));
+        assertEquals(2, IntegerBinding.entryToInt(data));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetNextDup_NotFound_Dup()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(true);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert dups. */
+        insert(1, 1);
+        insert(1, 2);
+        insert(2, 1);
+        insert(2, 2);
+
+        /* getNextDup returns NOTFOUND. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 2));
+        status = cursor.getNextDup(key, data, null);
+        assertEquals(OperationStatus.NOTFOUND, status);
+
+        /* Insertions before {1,1} and after {2,2} are never blocked. */
+        try {
+            insert(1, 0);
+            insert(0, 0);
+            insert(2, 3);
+            insert(3, 0);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert {1,3} in a writer thread. */
+        startInsert(1, 3);
+
+        /*
+         * If serializable, getNextDup should return NOTFOUND again; otherwise
+         * getNextDup should see {1,3}.
+         */
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 2));
+        status = cursor.getNextDup(key, data, null);
+        if (txnSerializable) {
+            assertEquals(OperationStatus.NOTFOUND, status);
+        } else {
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals(1, IntegerBinding.entryToInt(key));
+            assertEquals(3, IntegerBinding.entryToInt(data));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getNextDup returns {1,3}. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 2));
+        status = cursor.getNextDup(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, IntegerBinding.entryToInt(key));
+        assertEquals(3, IntegerBinding.entryToInt(data));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetNextNoDup_Success()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(false);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert key 1 and 3. */
+        insert(1);
+        insert(3);
+
+        /* getNextNoDup returns key 3. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1));
+        status = cursor.getNextNoDup(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(3, IntegerBinding.entryToInt(key));
+
+        /* Insertions before 1 and after 3 are never blocked. */
+        try {
+            insert(0);
+            insert(4);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert key 2 in a writer thread. */
+        startInsert(2);
+
+        /*
+         * If serializable, getNextNoDup should return key 3 again; otherwise
+         * getNextNoDup should see key 2.
+         */
+        assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1));
+        status = cursor.getNextNoDup(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        if (txnSerializable) {
+            assertEquals(3, IntegerBinding.entryToInt(key));
+        } else {
+            assertEquals(2, IntegerBinding.entryToInt(key));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getNextNoDup returns key 2. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1));
+        status = cursor.getNextNoDup(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(2, IntegerBinding.entryToInt(key));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetNextNoDup_Success_Dup()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(true);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert dups. */
+        insert(1, 1);
+        insert(1, 2);
+        insert(3, 1);
+        insert(3, 2);
+
+        /* getNextNoDup returns {3,1}. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 1));
+        status = cursor.getNextNoDup(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(3, IntegerBinding.entryToInt(key));
+        assertEquals(1, IntegerBinding.entryToInt(data));
+
+        /* Insertions before {1,1} and after {3,2} are never blocked. */
+        try {
+            insert(1, 0);
+            insert(0, 0);
+            insert(3, 3);
+            insert(4, 0);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert {2,1} in a writer thread. */
+        startInsert(2, 1);
+
+        /*
+         * If serializable, getNextNoDup should return {3,1} again; otherwise
+         * getNextNoDup should see {2,1}.
+         */
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 1));
+        status = cursor.getNextNoDup(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        if (txnSerializable) {
+            assertEquals(3, IntegerBinding.entryToInt(key));
+            assertEquals(1, IntegerBinding.entryToInt(data));
+        } else {
+            assertEquals(2, IntegerBinding.entryToInt(key));
+            assertEquals(1, IntegerBinding.entryToInt(data));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getNextNoDup returns {2,1}. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 1));
+        status = cursor.getNextNoDup(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(2, IntegerBinding.entryToInt(key));
+        assertEquals(1, IntegerBinding.entryToInt(data));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetNextNoDup_NotFound()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(false);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert key 1. */
+        insert(1);
+
+        /* getNextNoDup returns NOTFOUND. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1));
+        status = cursor.getNextNoDup(key, data, null);
+        assertEquals(OperationStatus.NOTFOUND, status);
+
+        /* Insertions before 1 are never blocked. */
+        try {
+            insert(0);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert key 2 in a writer thread. */
+        startInsert(2);
+
+        /*
+         * If serializable, getNextNoDup should return NOTFOUND again;
+         * otherwise getNextNoDup should see key 2.
+         */
+        assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1));
+        status = cursor.getNextNoDup(key, data, null);
+        if (txnSerializable) {
+            assertEquals(OperationStatus.NOTFOUND, status);
+        } else {
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals(2, IntegerBinding.entryToInt(key));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getNextNoDup returns key 2. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1));
+        status = cursor.getNextNoDup(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(2, IntegerBinding.entryToInt(key));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetNextNoDup_NotFound_Dup()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(true);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert dups. */
+        insert(1, 1);
+        insert(1, 2);
+
+        /* getNextNoDup returns NOTFOUND. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 1));
+        status = cursor.getNextNoDup(key, data, null);
+        assertEquals(OperationStatus.NOTFOUND, status);
+
+        /* Insertions before {1,1} are never blocked. */
+        try {
+            insert(1, 0);
+            insert(0, 0);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert {2,1} in a writer thread. */
+        startInsert(2, 1);
+
+        /*
+         * If serializable, getNextNoDup should return NOTFOUND again;
+         * otherwise getNextNoDup should see {2,1}.
+         */
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 1));
+        status = cursor.getNextNoDup(key, data, null);
+        if (txnSerializable) {
+            assertEquals(OperationStatus.NOTFOUND, status);
+        } else {
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals(2, IntegerBinding.entryToInt(key));
+            assertEquals(1, IntegerBinding.entryToInt(data));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getNextNoDup returns {2,1}. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 1));
+        status = cursor.getNextNoDup(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(2, IntegerBinding.entryToInt(key));
+        assertEquals(1, IntegerBinding.entryToInt(data));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetPrev_Success()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(false);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert key 1 and 3. */
+        insert(1);
+        insert(3);
+
+        /* getPrev returns key 1. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 3));
+        status = cursor.getPrev(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, IntegerBinding.entryToInt(key));
+
+        /* Insertions before 1 and after 3 are never blocked. */
+        try {
+            insert(0);
+            insert(4);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert key 2 in a writer thread. */
+        startInsert(2);
+
+        /*
+         * If serializable, getPrev should return key 1 again; otherwise
+         * getPrev should see key 2.
+         */
+        assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 3));
+        status = cursor.getPrev(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        if (txnSerializable) {
+            assertEquals(1, IntegerBinding.entryToInt(key));
+        } else {
+            assertEquals(2, IntegerBinding.entryToInt(key));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getPrev returns key 2. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 3));
+        status = cursor.getPrev(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(2, IntegerBinding.entryToInt(key));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetPrev_Success_Dup()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(true);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert dups. */
+        insert(1, 1);
+        insert(1, 3);
+
+        /* getPrev returns {1,1}. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 3));
+        status = cursor.getPrev(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, IntegerBinding.entryToInt(key));
+        assertEquals(1, IntegerBinding.entryToInt(data));
+
+        /* Insertions before {1,1} and after {1,3} are never blocked. */
+        try {
+            insert(1, 0);
+            insert(0, 0);
+            insert(1, 4);
+            insert(2, 0);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert {1,2} in a writer thread. */
+        startInsert(1, 2);
+
+        /*
+         * If serializable, getPrev should return {1,1} again; otherwise
+         * getPrev should see {1,2}.
+         */
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 3));
+        status = cursor.getPrev(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        if (txnSerializable) {
+            assertEquals(1, IntegerBinding.entryToInt(key));
+            assertEquals(1, IntegerBinding.entryToInt(data));
+        } else {
+            assertEquals(1, IntegerBinding.entryToInt(key));
+            assertEquals(2, IntegerBinding.entryToInt(data));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getPrev returns {1,2}. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 3));
+        status = cursor.getPrev(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, IntegerBinding.entryToInt(key));
+        assertEquals(2, IntegerBinding.entryToInt(data));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetPrev_NotFound()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(false);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert key 2. */
+        insert(2);
+
+        /* getPrev returns NOTFOUND. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 2));
+        status = cursor.getPrev(key, data, null);
+        assertEquals(OperationStatus.NOTFOUND, status);
+
+        /* Insertions after 2 are never blocked. */
+        try {
+            insert(3);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert key 1 in a writer thread. */
+        startInsert(1);
+
+        /*
+         * If serializable, getPrev should return NOTFOUND again; otherwise
+         * getPrev should see key 1.
+         */
+        assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 2));
+        status = cursor.getPrev(key, data, null);
+        if (txnSerializable) {
+            assertEquals(OperationStatus.NOTFOUND, status);
+        } else {
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals(1, IntegerBinding.entryToInt(key));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getPrev returns key 1. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 2));
+        status = cursor.getPrev(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, IntegerBinding.entryToInt(key));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetPrev_NotFound_Dup()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(true);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert dups. */
+        insert(2, 2);
+        insert(2, 3);
+
+        /* getPrev returns NOTFOUND. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 2, 2));
+        status = cursor.getPrev(key, data, null);
+        assertEquals(OperationStatus.NOTFOUND, status);
+
+        /* Insertions after {2,3} are never blocked. */
+        try {
+            insert(2, 4);
+            insert(3, 0);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert {2,1} in a writer thread. */
+        startInsert(2, 1);
+
+        /*
+         * If serializable, getPrev should return NOTFOUND again; otherwise
+         * getPrev should see {2,1}.
+         */
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 2, 2));
+        status = cursor.getPrev(key, data, null);
+        if (txnSerializable) {
+            assertEquals(OperationStatus.NOTFOUND, status);
+        } else {
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals(2, IntegerBinding.entryToInt(key));
+            assertEquals(1, IntegerBinding.entryToInt(data));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getPrev returns {2,1}. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 2, 2));
+        status = cursor.getPrev(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(2, IntegerBinding.entryToInt(key));
+        assertEquals(1, IntegerBinding.entryToInt(data));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetPrevDup_Success_Dup()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(true);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert dups. */
+        insert(1, 1);
+        insert(1, 3);
+
+        /* getPrevDup returns {1,1}. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 3));
+        status = cursor.getPrevDup(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, IntegerBinding.entryToInt(key));
+        assertEquals(1, IntegerBinding.entryToInt(data));
+
+        /* Insertions before {1,1} and after {1,3} are never blocked. */
+        try {
+            insert(1, 0);
+            insert(0, 0);
+            insert(1, 4);
+            insert(2, 0);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert {1,2} in a writer thread. */
+        startInsert(1, 2);
+
+        /*
+         * If serializable, getPrevDup should return {1,1} again; otherwise
+         * getPrevDup should see {1,2}.
+         */
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 3));
+        status = cursor.getPrevDup(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        if (txnSerializable) {
+            assertEquals(1, IntegerBinding.entryToInt(key));
+            assertEquals(1, IntegerBinding.entryToInt(data));
+        } else {
+            assertEquals(1, IntegerBinding.entryToInt(key));
+            assertEquals(2, IntegerBinding.entryToInt(data));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getPrevDup returns {1,2}. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 3));
+        status = cursor.getPrevDup(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, IntegerBinding.entryToInt(key));
+        assertEquals(2, IntegerBinding.entryToInt(data));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetPrevDup_NotFound_Dup()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(true);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert dups. */
+        insert(2, 2);
+        insert(2, 3);
+
+        /* getPrevDup returns NOTFOUND. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 2, 2));
+        status = cursor.getPrevDup(key, data, null);
+        assertEquals(OperationStatus.NOTFOUND, status);
+
+        /* Insertions after {2,3} are never blocked. */
+        try {
+            insert(2, 4);
+            insert(3, 0);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert {2,1} in a writer thread. */
+        startInsert(2, 1);
+
+        /*
+         * If serializable, getPrevDup should return NOTFOUND again; otherwise
+         * getPrevDup should see {2,1}.
+         */
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 2, 2));
+        status = cursor.getPrevDup(key, data, null);
+        if (txnSerializable) {
+            assertEquals(OperationStatus.NOTFOUND, status);
+        } else {
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals(2, IntegerBinding.entryToInt(key));
+            assertEquals(1, IntegerBinding.entryToInt(data));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getPrevDup returns {2,1}. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 2, 2));
+        status = cursor.getPrevDup(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(2, IntegerBinding.entryToInt(key));
+        assertEquals(1, IntegerBinding.entryToInt(data));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetPrevNoDup_Success()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(false);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert key 1 and 3. */
+        insert(1);
+        insert(3);
+
+        /* getPrevNoDup returns key 1. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 3));
+        status = cursor.getPrevNoDup(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, IntegerBinding.entryToInt(key));
+
+        /* Insertions before 1 and after 3 are never blocked. */
+        try {
+            insert(0);
+            insert(4);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert key 2 in a writer thread. */
+        startInsert(2);
+
+        /*
+         * If serializable, getPrevNoDup should return key 1 again; otherwise
+         * getPrevNoDup should see key 2.
+         */
+        assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 3));
+        status = cursor.getPrevNoDup(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        if (txnSerializable) {
+            assertEquals(1, IntegerBinding.entryToInt(key));
+        } else {
+            assertEquals(2, IntegerBinding.entryToInt(key));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getPrevNoDup returns key 2. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 3));
+        status = cursor.getPrevNoDup(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(2, IntegerBinding.entryToInt(key));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetPrevNoDup_Success_Dup()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(true);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert dups. */
+        insert(1, 0);
+        insert(1, 2);
+        insert(3, 1);
+        insert(3, 2);
+
+        /* getPrevNoDup returns {1,2}. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 3, 2));
+        status = cursor.getPrevNoDup(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, IntegerBinding.entryToInt(key));
+        assertEquals(2, IntegerBinding.entryToInt(data));
+
+        /* Insertions before {1,2} and after {3,2} are never blocked. */
+        try {
+            insert(1, 1);
+            insert(0, 0);
+            insert(3, 3);
+            insert(4, 0);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert {2,1} in a writer thread. */
+        startInsert(2, 1);
+
+        /*
+         * If serializable, getPrevNoDup should return {1,2} again; otherwise
+         * getPrevNoDup should see {2,1}.
+         */
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 3, 2));
+        status = cursor.getPrevNoDup(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        if (txnSerializable) {
+            assertEquals(1, IntegerBinding.entryToInt(key));
+            assertEquals(2, IntegerBinding.entryToInt(data));
+        } else {
+            assertEquals(2, IntegerBinding.entryToInt(key));
+            assertEquals(1, IntegerBinding.entryToInt(data));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getPrevNoDup returns {2,1}. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 3, 2));
+        status = cursor.getPrevNoDup(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(2, IntegerBinding.entryToInt(key));
+        assertEquals(1, IntegerBinding.entryToInt(data));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetPrevNoDup_NotFound()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(false);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert key 2. */
+        insert(2);
+
+        /* getPrevNoDup returns NOTFOUND. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 2));
+        status = cursor.getPrevNoDup(key, data, null);
+        assertEquals(OperationStatus.NOTFOUND, status);
+
+        /* Insertions after 2 are never blocked. */
+        try {
+            insert(3);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert key 1 in a writer thread. */
+        startInsert(1);
+
+        /*
+         * If serializable, getPrevNoDup should return NOTFOUND again;
+         * otherwise getPrevNoDup should see key 1.
+         */
+        assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 2));
+        status = cursor.getPrevNoDup(key, data, null);
+        if (txnSerializable) {
+            assertEquals(OperationStatus.NOTFOUND, status);
+        } else {
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals(1, IntegerBinding.entryToInt(key));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getPrevNoDup returns key 1. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 2));
+        status = cursor.getPrevNoDup(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, IntegerBinding.entryToInt(key));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testGetPrevNoDup_NotFound_Dup()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(true);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert dups. */
+        insert(2, 1);
+        insert(2, 2);
+
+        /* getPrevNoDup returns NOTFOUND. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 2, 2));
+        status = cursor.getPrevNoDup(key, data, null);
+        assertEquals(OperationStatus.NOTFOUND, status);
+
+        /* Insertions after {2,2} are never blocked. */
+        try {
+            insert(2, 3);
+            insert(3, 0);
+        } catch (DeadlockException e) {
+            fail();
+        }
+
+        /* Insert {1,1} in a writer thread. */
+        startInsert(1, 1);
+
+        /*
+         * If serializable, getPrevNoDup should return NOTFOUND again;
+         * otherwise getPrevNoDup should see {1,1}.
+         */
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 2, 2));
+        status = cursor.getPrevNoDup(key, data, null);
+        if (txnSerializable) {
+            assertEquals(OperationStatus.NOTFOUND, status);
+        } else {
+            assertEquals(OperationStatus.SUCCESS, status);
+            assertEquals(1, IntegerBinding.entryToInt(key));
+            assertEquals(1, IntegerBinding.entryToInt(data));
+        }
+
+        /* Close reader to allow writer to finish. */
+        cursor.close();
+        readerTxn.commitNoSync();
+        waitForInsert();
+
+        /* getPrevNoDup returns {1,1}. */
+        readerTxn = env.beginTransaction(null, txnConfig);
+        cursor = db.openCursor(readerTxn, null);
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 2, 2));
+        status = cursor.getPrevNoDup(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(1, IntegerBinding.entryToInt(key));
+        assertEquals(1, IntegerBinding.entryToInt(data));
+        cursor.close();
+        readerTxn.commit();
+
+        closeEnv();
+    }
+
+    public void testIllegalTransactionConfig()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(false);
+        TransactionConfig config = new TransactionConfig();
+        config.setSerializableIsolation(true);
+        config.setReadUncommitted(true);
+        try {
+            Transaction txn = env.beginTransaction(null, config);
+            txn.abort();
+            fail();
+        } catch (IllegalArgumentException expected) {
+        }
+        closeEnv();
+    }
+
+    /*
+     * In other tests we test TransactionConfig.setReadUncommitted and
+     * TransactionConfig.setSerializableIsolation to make sure they result in
+     * expected non-serializable or serializable behavior.  Below we check
+     * EnvironmentConfig.setSerializableIsolation,
+     * CursorConfig.setSerializableIsolation, CursorConfig.setReadUncommitted
+     * and LockMode.READ_UNCOMMITTED, although for a single test case only.
+     */
+
+    public void testEnvironmentConfig()
+        throws DatabaseException, InterruptedException {
+
+        EnvironmentConfig config = TestUtils.initEnvConfig();
+        /* Control over isolation level is required by this test. */
+        TestUtils.clearIsolationLevel(config);
+        checkSerializable(false, config, null, null);
+
+        config.setTxnSerializableIsolation(true);
+        checkSerializable(true, config, null, null);
+    }
+
+    public void testCursorConfig()
+        throws DatabaseException, InterruptedException {
+
+        CursorConfig config = new CursorConfig();
+        checkSerializable(false, null, config, null);
+
+        config.setReadUncommitted(true);
+        checkSerializable(false, null, config, null);
+    }
+
+    public void testReadUncommittedLockMode()
+        throws DatabaseException, InterruptedException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        /* Control over isolation level is required by this test. */
+        TestUtils.clearIsolationLevel(envConfig);
+        envConfig.setTxnSerializableIsolation(true);
+
+        checkSerializable(false, envConfig, null, LockMode.READ_UNCOMMITTED);
+    }
+
+    private void checkSerializable(boolean expectSerializable,
+                                   EnvironmentConfig envConfig,
+                                   CursorConfig cursorConfig,
+                                   LockMode lockMode)
+        throws DatabaseException, InterruptedException {
+
+        openEnv(false, envConfig);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Insert key 2. */
+        insert(2);
+
+        /* getFirst returns key 2. */
+        Transaction readerTxn = env.beginTransaction(null, null);
+        Cursor cursor = db.openCursor(readerTxn, cursorConfig);
+        status = cursor.getFirst(key, data, lockMode);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(2, IntegerBinding.entryToInt(key));
+
+        /* Should deadlock iff serializable. */
+        try {
+            insert(1);
+            assertTrue(!expectSerializable);
+        } catch (DeadlockException e) {
+            assertTrue(expectSerializable);
+        }
+
+        cursor.close();
+        readerTxn.commit();
+
+        /* This method is called multiple times so remove the database. */
+        db.close();
+        db = null;
+        env.removeDatabase(null, DB_NAME);
+
+        closeEnv();
+    }
+
+    /**
+     * Tests that with a single degree 3 txn we don't obtain the extra lock
+     * during insert.
+     */
+    public void testSingleDegree3TxnOptimization()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(false);
+
+        /* Insert key 2. */
+        insert(2);
+
+        StatsConfig clearStats = new StatsConfig();
+        clearStats.setClear(true);
+
+        /* Clear before inserting. */
+        LockStats stats = env.getLockStats(clearStats);
+
+        /* Insert key 1, which would lock key 2 while inserting. */
+        insert(1);
+
+        /* Expect a single lock was requested. */
+        stats = env.getLockStats(clearStats);
+        assertEquals(1, stats.getNRequests());
+
+        closeEnv();
+    }
+
+    /**
+     * Tests a particular getSearchBothRange bug that has come up in several
+     * contexts.  This test is probably redundant with GetSearchBothTest but
+     * I've left it here for good measure.
+     */
+    public void testSingleDatumBug()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(true);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+
+        insert(1, 1);
+        insert(2, 2);
+
+        /* getSearchBothRange for {2, 1} returns {2, 2}. */
+        Transaction readerTxn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(readerTxn, null);
+        IntegerBinding.intToEntry(2, key);
+        IntegerBinding.intToEntry(1, data);
+        status = cursor.getSearchBothRange(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(2, IntegerBinding.entryToInt(key));
+        assertEquals(2, IntegerBinding.entryToInt(data));
+
+        /* If serializable, inserting in the locked range should deadlock. */
+        try {
+            insert(1, 2);
+            if (txnSerializable) {
+                fail();
+            }
+        } catch (DeadlockException e) {
+            if (!txnSerializable) {
+                fail();
+            }
+        }
+
+        cursor.close();
+        readerTxn.commitNoSync();
+        closeEnv();
+    }
+
+    /**
+     * Tests that searchKey returns SUCCESS when it must skip over a deleted
+     * duplicate.  This did not work at one point and was causing warnings
+     * (Cursor Not Initialized) in duplicate.conf testing.
+     */
+    public void testSearchKeySkipDeletedDup()
+        throws DatabaseException, InterruptedException {
+
+        openEnv(true);
+
+        /* Insert {1,1} and {1,2}. */
+        insert(1, 1);
+        insert(1, 2);
+
+        /* Delete {1,1}. */
+        Transaction txn = env.beginTransaction(null, txnConfig);
+        Cursor cursor = db.openCursor(txn, null);
+        assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 1));
+        OperationStatus status = cursor.delete();
+        assertEquals(OperationStatus.SUCCESS, status);
+
+        /* Search for key 1 -- should not return NOTFOUND. */
+        assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1, 2));
+
+        cursor.close();
+        txn.commitNoSync();
+        closeEnv();
+    }
+
+    /**
+     * Performs getSearchKey on the given key, expects data to be zero.
+     */
+    private OperationStatus searchKey(Cursor cursor, int keyVal)
+        throws DatabaseException {
+
+        return searchKey(cursor, keyVal, 0);
+    }
+
+    /**
+     * Performs getSearchKey on the given key, expects given data value.
+     */
+    private OperationStatus searchKey(Cursor cursor, int keyVal, int dataVal)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        IntegerBinding.intToEntry(keyVal, key);
+        OperationStatus status = cursor.getSearchKey(key, data, null);
+        if (status == OperationStatus.SUCCESS) {
+            assertEquals(keyVal, IntegerBinding.entryToInt(key));
+            assertEquals(dataVal, IntegerBinding.entryToInt(data));
+        }
+        return status;
+    }
+
+    /**
+     * Performs getSearchBoth on the given key and zero data.
+     */
+    private OperationStatus searchBoth(Cursor cursor, int keyVal)
+        throws DatabaseException {
+
+        return searchBoth(cursor, keyVal, 0);
+    }
+
+    /**
+     * Performs getSearchBoth on the given key and data.
+     */
+    private OperationStatus searchBoth(Cursor cursor, int keyVal, int dataVal)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        IntegerBinding.intToEntry(keyVal, key);
+        IntegerBinding.intToEntry(dataVal, data);
+        OperationStatus status = cursor.getSearchBoth(key, data, null);
+        if (status == OperationStatus.SUCCESS) {
+            assertEquals(keyVal, IntegerBinding.entryToInt(key));
+            assertEquals(dataVal, IntegerBinding.entryToInt(data));
+        }
+        return status;
+    }
+
+    /**
+     * Inserts the given key in a new transaction and commits it.
+     */
+    private void insert(int keyVal)
+        throws DatabaseException {
+
+        insert(keyVal, 0);
+    }
+
+    /**
+     * Inserts the given key and data in a new transaction and commits it.
+     */
+    private void insert(int keyVal, int dataVal)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        IntegerBinding.intToEntry(keyVal, key);
+        IntegerBinding.intToEntry(dataVal, data);
+        OperationStatus status;
+        Transaction writerTxn = env.beginTransaction(null, txnConfig);
+        try {
+            if (dups) {
+                status = db.putNoDupData(writerTxn, key, data);
+            } else {
+                status = db.putNoOverwrite(writerTxn, key, data);
+            }
+        } catch (DeadlockException e) {
+            writerTxn.abort();
+            throw e;
+        }
+        assertEquals(OperationStatus.SUCCESS, status);
+        writerTxn.commitNoSync();
+    }
+
+    /**
+     * Starts writer thread and waits for it to start the insert.
+     */
+    private void startInsert(final int keyVal)
+        throws DatabaseException, InterruptedException {
+
+        startInsert(keyVal, 0);
+    }
+
+    /**
+     * Starts writer thread and waits for it to start the insert.
+     */
+    private void startInsert(final int keyVal, final int dataVal)
+        throws DatabaseException, InterruptedException {
+
+        LockStats origStats = env.getLockStats(null);
+        insertFinished = false;
+
+        writerThread = new JUnitThread("Writer") {
+            public void testBody()
+                throws DatabaseException {
+                DatabaseEntry key = new DatabaseEntry();
+                DatabaseEntry data = new DatabaseEntry();
+                OperationStatus status;
+                IntegerBinding.intToEntry(keyVal, key);
+                IntegerBinding.intToEntry(dataVal, data);
+                Transaction writerTxn = env.beginTransaction(null, txnConfig);
+                if (dups) {
+                    status = db.putNoDupData(writerTxn, key, data);
+                } else {
+                    status = db.putNoOverwrite(writerTxn, key, data);
+                }
+                assertEquals(OperationStatus.SUCCESS, status);
+                writerTxn.commitNoSync();
+                insertFinished = true;
+            }
+        };
+
+        writerThread.start();
+
+        long startTime = System.currentTimeMillis();
+        while (true) {
+
+            /* Give some time to the writer thread. */
+            Thread.yield();
+            Thread.sleep(10);
+            if (System.currentTimeMillis() - startTime > MAX_INSERT_MILLIS) {
+                fail("Timeout doing insert");
+            }
+
+            if (txnSerializable) {
+
+                /* Wait for the insert to block. */
+                LockStats stats = env.getLockStats(null);
+                if (stats.getNWaiters() > origStats.getNWaiters()) {
+                    break;
+                }
+            } else {
+
+                /* Wait for the operation to complete. */
+                if (insertFinished) {
+                    insertFinished = false;
+                    break;
+                }
+            }
+        }
+    }
+
+    /**
+     * Waits for the writer thread to finish.
+     */
+    private void waitForInsert() {
+
+        try {
+            writerThread.finishTest();
+        } catch (Throwable e) {
+            e.printStackTrace();
+            fail(e.toString());
+        } finally {
+            writerThread = null;
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/test/SR11297Test.java b/test/com/sleepycat/je/test/SR11297Test.java
new file mode 100644
index 0000000000000000000000000000000000000000..b307620ccb76492b0684229c99aba01024021e2a
--- /dev/null
+++ b/test/com/sleepycat/je/test/SR11297Test.java
@@ -0,0 +1,197 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SR11297Test.java,v 1.10.2.2 2010/01/04 15:30:47 cwl Exp $
+ */
+
+package com.sleepycat.je.test;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Fix for SR11297.  When the first BIN in database was empty,
+ * CursorImpl.positionFirstOrLast(true, null) was returning false, causing
+ * Cursor.getFirst to return NOTFOUND.  This test reproduces that problem by
+ * creating a database with the first BIN empty and the second BIN non-empty.
+ *
+ * <p>A specific sequence where partial compression takes place is necessary to
+ * reproduce the problem.  A duplicate is added as the first entry in the first
+ * BIN, then that BIN is filled and one entry is added to the next BIN.  Then
+ * all records in the first BIN are deleted.  compress() is called once, which
+ * deletes the duplicate tree and all entries in the first BIN, but the first
+ * BIN will not be deleted until the next compression.  At that point in time,
+ * getFirst failed to find the record in the second BIN.</p>
+ */
+public class SR11297Test extends TestCase {
+
+    /* Minimum child entries per BIN. */
+    private static int N_ENTRIES = 4;
+
+    private static CheckpointConfig forceCheckpoint = new CheckpointConfig();
+    static {
+        forceCheckpoint.setForce(true);
+    }
+
+    private File envHome;
+    private Environment env;
+
+    public SR11297Test() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+        TestUtils.removeFiles("Setup", envHome, FileManager.DEL_SUFFIX);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        try {
+            if (env != null) {
+                env.close();
+            }
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        try {
+            //*
+            TestUtils.removeLogFiles("tearDown", envHome, true);
+            TestUtils.removeFiles("tearDown", envHome, FileManager.DEL_SUFFIX);
+            //*/
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        envHome = null;
+        env = null;
+    }
+
+    private void openEnv()
+        throws DatabaseException, IOException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+	DbInternal.disableParameterValidation(envConfig);
+        envConfig.setAllowCreate(true);
+        /* Make as small a log as possible to save space in CVS. */
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+        /* Use a 100 MB log file size to ensure only one file is written. */
+        envConfig.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(),
+                                 Integer.toString(100 * (1 << 20)));
+        /* Force BINDelta. */
+        envConfig.setConfigParam
+            (EnvironmentParams.BIN_DELTA_PERCENT.getName(),
+             Integer.toString(75));
+        /* Force INDelete. */
+        envConfig.setConfigParam
+            (EnvironmentParams.NODE_MAX.getName(),
+             Integer.toString(N_ENTRIES));
+        env = new Environment(envHome, envConfig);
+    }
+
+    private void closeEnv()
+        throws DatabaseException {
+
+        env.close();
+        env = null;
+    }
+
+    public void test11297()
+        throws DatabaseException, IOException {
+
+        openEnv();
+
+        /* Write db0 and db1. */
+        for (int i = 0; i < 2; i += 1) {
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setAllowCreate(true);
+            dbConfig.setSortedDuplicates(true);
+            Database db = env.openDatabase(null, "db" + i, dbConfig);
+
+            /* Write: {0, 0}, {0, 1}, {1, 0}, {2, 0}, {3, 0} */
+            for (int j = 0; j < N_ENTRIES; j += 1) {
+                db.put(null, entry(j), entry(0));
+            }
+            db.put(null, entry(0), entry(1));
+
+            /* Delete everything but the last record. */
+            for (int j = 0; j < N_ENTRIES - 1; j += 1) {
+                db.delete(null, entry(j));
+            }
+
+            db.close();
+        }
+
+        checkFirstRecord();
+        env.compress();
+        checkFirstRecord();
+
+        closeEnv();
+    }
+
+    /**
+     * First and only record in db1 should be {3,0}.
+     */
+    private void checkFirstRecord()
+        throws DatabaseException {
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(false);
+        dbConfig.setReadOnly(true);
+        dbConfig.setSortedDuplicates(true);
+        Database db = env.openDatabase(null, "db1", dbConfig);
+        Cursor cursor = db.openCursor(null, null);
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status = cursor.getFirst(key, data, null);
+        assertEquals(OperationStatus.SUCCESS, status);
+        assertEquals(3, value(key));
+        assertEquals(0, value(data));
+        cursor.close();
+        db.close();
+    }
+
+    static DatabaseEntry entry(int val) {
+
+        byte[] data = new byte[] { (byte) val };
+        return new DatabaseEntry(data);
+    }
+
+    static int value(DatabaseEntry entry) {
+
+        byte[] data = entry.getData();
+        if (data.length != 1) {
+            throw new IllegalStateException("len=" + data.length);
+        }
+        return data[0];
+    }
+}
diff --git a/test/com/sleepycat/je/test/SecondaryDirtyReadTest.java b/test/com/sleepycat/je/test/SecondaryDirtyReadTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..551d34ab7733905c2d79e6d2145033a3c37b133a
--- /dev/null
+++ b/test/com/sleepycat/je/test/SecondaryDirtyReadTest.java
@@ -0,0 +1,333 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SecondaryDirtyReadTest.java,v 1.17.2.2 2010/01/04 15:30:47 cwl Exp $
+ */
+
+package com.sleepycat.je.test;
+
+import junit.framework.Test;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.SecondaryConfig;
+import com.sleepycat.je.SecondaryCursor;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.je.SecondaryKeyCreator;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.junit.JUnitMethodThread;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Tests for multithreading problems when using read-uncommitted with
+ * secondaries.  If a primary record is updated while performing a
+ * read-uncommitted (in between reading the secondary and the primary), we need
+ * to be sure that we don't return inconsistent results to the user.  For
+ * example, we should not return a primary data value that no longer contains
+ * the secondary key.  We also need to ensure that deleting a primary record in
+ * the middle of a secondary read does not appear as a corrupt secondary.  In
+ * both of these cases it should appear that the record does not exist, from
+ * the viewpoint of an application using a cursor.
+ *
+ * <p>These tests create two threads, one reading and the other deleting or
+ * updating.  The intention is for reading thread and the delete/update thread
+ * to race in operating on the same key (nextKey).  If the reading thread reads
+ * the secondary, then the other thread deletes the primary, then the reading
+ * thread tries to read the primary, we've accomplished our goal.  Prior to
+ * when we handled that case in SecondaryCursor, that situation would cause a
+ * "secondary corrupt" exception.</p>
+ */
+public class SecondaryDirtyReadTest extends MultiKeyTxnTestCase {
+
+    private static final int MAX_KEY = 1000;
+
+    public static Test suite() {
+        return multiKeyTxnTestSuite(SecondaryDirtyReadTest.class, null,
+                                    null);
+                                    //new String[] {TxnTestCase.TXN_NULL});
+    }
+
+    private int nextKey;
+    private Database priDb;
+    private SecondaryDatabase secDb;
+    private LockMode lockMode = LockMode.READ_UNCOMMITTED;
+
+    /**
+     * Closes databases, then calls the super.tearDown to close the env.
+     */
+    public void tearDown()
+        throws Exception {
+
+        if (secDb != null) {
+            try {
+                secDb.close();
+            } catch (Exception e) {}
+            secDb = null;
+        }
+        if (priDb != null) {
+            try {
+                priDb.close();
+            } catch (Exception e) {}
+            priDb = null;
+        }
+        super.tearDown();
+    }
+
+    /**
+     * Tests that deleting primary records does not cause secondary
+     * read-uncommitted to throw a "secondary corrupt" exception.
+     */
+    public void testDeleteWhileReadingByKey()
+	throws Throwable {
+
+        doTest("runReadUncommittedByKey", "runPrimaryDelete");
+    }
+
+    /**
+     * Same as testDeleteWhileReadingByKey but does a scan.  Read-uncommitted
+     * for scan and keyed reads are implemented differently, since scanning
+     * moves to the next record when a deletion is detected while a keyed read
+     * returns NOTFOUND.
+     */
+    public void testDeleteWhileScanning()
+	throws Throwable {
+
+        doTest("runReadUncommittedScan", "runPrimaryDelete");
+    }
+
+    /**
+     * Tests that updating primary records, to cause deletion of the secondary
+     * key record, does not cause secondary read-uncommitted to return
+     * inconsistent data (a primary datum without a secondary key value).
+     */
+    public void testUpdateWhileReadingByKey()
+	throws Throwable {
+
+        doTest("runReadUncommittedByKey", "runPrimaryUpdate");
+    }
+
+    /**
+     * Same as testUpdateWhileReadingByKey but does a scan.
+     */
+    public void testUpdateWhileScanning()
+	throws Throwable {
+
+        doTest("runReadUncommittedScan", "runPrimaryUpdate");
+    }
+
+    /**
+     * Runs two threads for the given method names, after populating the
+     * database.
+     */
+    public void doTest(String method1, String method2)
+	throws Throwable {
+
+        JUnitMethodThread tester1 = new JUnitMethodThread(method1 + "-t1",
+                                                          method1, this);
+        JUnitMethodThread tester2 = new JUnitMethodThread(method2 + "-t2",
+                                                          method2, this);
+        priDb = openPrimary("testDB");
+        secDb = openSecondary(priDb, "testSecDB", false);
+        addRecords();
+        tester1.start();
+        tester2.start();
+        tester1.finishTest();
+        tester2.finishTest();
+        secDb.close();
+        secDb = null;
+        priDb.close();
+        priDb = null;
+    }
+
+    /**
+     * Deletes the key that is being read by the other thread.
+     */
+    public void runPrimaryDelete()
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        while (nextKey < MAX_KEY - 1) {
+            Transaction txn = txnBegin();
+            key.setData(TestUtils.getTestArray(nextKey));
+            OperationStatus status = priDb.delete(txn, key);
+            if (status != OperationStatus.SUCCESS) {
+                assertEquals(OperationStatus.NOTFOUND, status);
+            }
+            txnCommit(txn);
+        }
+    }
+
+    /**
+     * Updates the record for the key that is being read by the other thread,
+     * changing the datum to -1 so it will cause the secondary key record to
+     * be deleted.
+     */
+    public void runPrimaryUpdate()
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        while (nextKey < MAX_KEY - 1) {
+            Transaction txn = txnBegin();
+            key.setData(TestUtils.getTestArray(nextKey));
+            data.setData(TestUtils.getTestArray(-1));
+            OperationStatus status = priDb.put(txn, key, data);
+            assertEquals(OperationStatus.SUCCESS, status);
+            txnCommit(txn);
+        }
+    }
+
+    /**
+     * Does a read-uncommitted by key, retrying until it is deleted by the
+     * delete/update thread, then moves to the next key.  We shouldn't get an
+     * exception, just a NOTFOUND when it is deleted.
+     */
+    public void runReadUncommittedByKey()
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry pKey = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        while (nextKey < MAX_KEY - 1) {
+            key.setData(TestUtils.getTestArray(nextKey));
+            OperationStatus status = secDb.get(null, key, pKey, data,
+                                               lockMode);
+            if (status != OperationStatus.SUCCESS) {
+                assertEquals(OperationStatus.NOTFOUND, status);
+                nextKey++;
+            } else {
+                assertEquals(nextKey, TestUtils.getTestVal(key.getData()));
+                assertEquals(nextKey, TestUtils.getTestVal(pKey.getData()));
+                assertEquals(nextKey, TestUtils.getTestVal(data.getData()));
+            }
+        }
+    }
+
+    /**
+     * Does a read-uncommitted scan through the whole key range, but moves
+     * forward only after the key is deleted by the delete/update thread.  We
+     * shouldn't get an exception or a NOTFOUND, but we may skip values when a
+     * key is deleted.
+     */
+    public void runReadUncommittedScan()
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry pKey = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        SecondaryCursor cursor = secDb.openSecondaryCursor(null, null);
+        while (nextKey < MAX_KEY - 1) {
+            OperationStatus status = cursor.getNext(key, pKey, data,
+                                                    lockMode);
+            assertEquals("nextKey=" + nextKey,
+                         OperationStatus.SUCCESS, status);
+            int keyFound = TestUtils.getTestVal(key.getData());
+            assertEquals(keyFound, TestUtils.getTestVal(pKey.getData()));
+            assertEquals(keyFound, TestUtils.getTestVal(data.getData()));
+            /* Let the delete/update thread catch up. */
+            nextKey = keyFound;
+            if (nextKey < MAX_KEY - 1) {
+                while (status != OperationStatus.KEYEMPTY) {
+                    assertEquals(OperationStatus.SUCCESS, status);
+                    status = cursor.getCurrent(key, pKey, data,
+                                               lockMode);
+                }
+                nextKey = keyFound + 1;
+            }
+        }
+        cursor.close();
+    }
+
+    /**
+     * Adds records for the entire key range.
+     */
+    private void addRecords()
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        Transaction txn = txnBegin();
+        for (int i = 0; i < MAX_KEY; i += 1) {
+            byte[] val = TestUtils.getTestArray(i);
+            key.setData(val);
+            data.setData(val);
+            OperationStatus status = priDb.putNoOverwrite(txn, key, data);
+            assertEquals(OperationStatus.SUCCESS, status);
+        }
+        txnCommit(txn);
+    }
+
+    /**
+     * Opens the primary database.
+     */
+    private Database openPrimary(String name)
+        throws DatabaseException {
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(isTransactional);
+        dbConfig.setAllowCreate(true);
+        Transaction txn = txnBegin();
+        Database priDb;
+        try {
+            priDb = env.openDatabase(txn, name, dbConfig);
+        } finally {
+            txnCommit(txn);
+        }
+        assertNotNull(priDb);
+        return priDb;
+    }
+
+    /**
+     * Opens the secondary database.
+     */
+    private SecondaryDatabase openSecondary(Database priDb, String dbName,
+                                            boolean allowDuplicates)
+        throws DatabaseException {
+
+        SecondaryConfig dbConfig = new SecondaryConfig();
+        dbConfig.setTransactional(isTransactional);
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(allowDuplicates);
+        if (useMultiKey) {
+            dbConfig.setMultiKeyCreator
+                (new SimpleMultiKeyCreator(new MyKeyCreator()));
+        } else {
+            dbConfig.setKeyCreator(new MyKeyCreator());
+        }
+        Transaction txn = txnBegin();
+        SecondaryDatabase secDb;
+        try {
+            secDb = env.openSecondaryDatabase(txn, dbName, priDb, dbConfig);
+        } finally {
+            txnCommit(txn);
+        }
+        return secDb;
+    }
+
+    /**
+     * Creates secondary keys for a primary datum with a non-negative value.
+     */
+    private static class MyKeyCreator implements SecondaryKeyCreator {
+
+        public boolean createSecondaryKey(SecondaryDatabase secondary,
+                                          DatabaseEntry key,
+                                          DatabaseEntry data,
+                                          DatabaseEntry result)
+            throws DatabaseException {
+
+            int val = TestUtils.getTestVal(data.getData());
+            if (val >= 0) {
+                result.setData(TestUtils.getTestArray(val));
+                return true;
+            } else {
+                return false;
+            }
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/test/SecondarySplitTestMain.java b/test/com/sleepycat/je/test/SecondarySplitTestMain.java
new file mode 100644
index 0000000000000000000000000000000000000000..91a3cc4c2f4424ddb788ca36920ace96caaa4d72
--- /dev/null
+++ b/test/com/sleepycat/je/test/SecondarySplitTestMain.java
@@ -0,0 +1,220 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2005,2008 Oracle.  All rights reserved.
+ *
+ * $Id: SecondarySplitTestMain.java,v 1.7 2008/01/07 14:29:13 cwl Exp $
+ */
+
+package com.sleepycat.je.test;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Random;
+
+import com.sleepycat.bind.tuple.LongBinding;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.SecondaryConfig;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.je.SecondaryKeyCreator;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Tests that splits during secondary inserts don't cause a LatchException
+ * (latch already held).  This was caused by a latch that wasn't released
+ * during a duplicate insert, when a split occurred during the insert.  See
+ * [#12841] in Tree.java.
+ *
+ * The record keys are random long values and the record data is the long
+ * time (millis) of the record creation.  The secondary database is indexed on
+ * the full data value (the timestamp).  When a record is updated, its timstamp
+ * is changed to the current time, cause secondary deletes and inserts.  This
+ * scenario is what happened to bring out the bug in SR [#12841].
+ */
+public class SecondarySplitTestMain {
+
+    private static final int WRITER_THREADS = 2;
+    private static final int INSERTS_PER_ITER = 2;
+    private static final int UPDATES_PER_ITER = 1;
+    private static final int ITERS_PER_THREAD = 20000;
+    private static final int ITERS_PER_TRACE = 1000;
+
+    private File envHome;
+    private Environment env;
+    private Database priDb;
+    private SecondaryDatabase secDb;
+    private Random rnd = new Random(123);
+
+    public static void main(String[] args) {
+        try {
+            SecondarySplitTestMain test = new SecondarySplitTestMain();
+            test.doTest();
+            System.exit(0);
+        } catch (Throwable e) {
+            e.printStackTrace(System.out);
+            System.exit(1);
+        }
+    }
+
+    public SecondarySplitTestMain() throws IOException {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    private void doTest()
+        throws Exception {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+        open();
+        Thread[] writers = new Thread[WRITER_THREADS];
+        for (int i = 0; i < writers.length; i += 1) {
+            writers[i] = new Writer(i);
+        }
+        for (int i = 0; i < writers.length; i += 1) {
+            writers[i].start();
+        }
+        for (int i = 0; i < writers.length; i += 1) {
+            writers[i].join();
+        }
+        close();
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+        System.out.println("SUCCESS");
+    }
+
+    private void open()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+
+        env = new Environment(envHome, envConfig);
+
+        DatabaseConfig priConfig = new DatabaseConfig();
+        priConfig.setAllowCreate(true);
+
+        priDb = env.openDatabase(null, "pri", priConfig);
+
+        SecondaryConfig secConfig = new SecondaryConfig();
+        secConfig.setAllowCreate(true);
+        secConfig.setSortedDuplicates(true);
+        secConfig.setKeyCreator(new KeyCreator());
+
+        secDb = env.openSecondaryDatabase(null, "sec", priDb, secConfig);
+    }
+
+    private void close()
+        throws DatabaseException {
+
+        secDb.close();
+        secDb = null;
+
+        priDb.close();
+        priDb = null;
+
+        env.close();
+        env = null;
+    }
+
+    static class KeyCreator implements SecondaryKeyCreator {
+
+        public boolean createSecondaryKey(SecondaryDatabase db,
+                                          DatabaseEntry key,
+                                          DatabaseEntry data,
+                                          DatabaseEntry result)
+            throws DatabaseException {
+
+            result.setData(data.getData(), data.getOffset(), data.getSize());
+            return true;
+        }
+    }
+
+    private class Writer extends Thread {
+
+        Writer(int id) {
+            super("[Writer " + id + ']');
+        }
+
+        public void run() {
+
+            int inserts = 0;
+            int updates = 0;
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            OperationStatus status;
+
+            for (int iter = 1; iter <= ITERS_PER_THREAD; iter += 1) {
+
+                Cursor cursor = null;
+
+                try {
+
+                    /* Inserts */
+                    for (int i = 0; i < INSERTS_PER_ITER; i += 1) {
+                        LongBinding.longToEntry(rnd.nextLong(), key);
+                        long time = System.currentTimeMillis();
+                        LongBinding.longToEntry(time, data);
+                        status = priDb.putNoOverwrite(null, key, data);
+                        if (status == OperationStatus.SUCCESS) {
+                            inserts += 1;
+                        } else {
+                            System.out.println
+                                (getName() + " *** INSERT " + status);
+                        }
+                    }
+
+                    /* Updates */
+                    for (int i = 0; i < UPDATES_PER_ITER; i += 1) {
+
+                        cursor = priDb.openCursor(null, null);
+
+                        LongBinding.longToEntry(rnd.nextLong(), key);
+                        status = cursor.getSearchKeyRange(key, data,
+                                                          LockMode.RMW);
+                        if (status == OperationStatus.NOTFOUND) {
+                            status = cursor.getFirst(key, data, LockMode.RMW);
+                        }
+
+                        if (status == OperationStatus.SUCCESS) {
+                            long time = System.currentTimeMillis();
+                            LongBinding.longToEntry(time, data);
+                            cursor.putCurrent(data);
+                            updates += 1;
+                        } else {
+                            System.out.println
+                                (getName() + " *** UPDATE " + status);
+                        }
+
+                        cursor.close();
+                        cursor = null;
+                    }
+
+                } catch (Throwable e) {
+
+                    e.printStackTrace(System.out);
+
+                    if (cursor != null) {
+                        try {
+                            cursor.close();
+                        } catch (Exception e2) {
+                            e2.printStackTrace(System.out);
+                        }
+                    }
+                }
+
+                if (iter % ITERS_PER_TRACE == 0) {
+                    System.out.println
+                        (getName() +
+                         " inserts=" + inserts +
+                         " updates=" + updates);
+                }
+            }
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/test/SecondaryTest.java b/test/com/sleepycat/je/test/SecondaryTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..245d5f877aded01c4fae8d0dd89006ca58061423
--- /dev/null
+++ b/test/com/sleepycat/je/test/SecondaryTest.java
@@ -0,0 +1,1588 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SecondaryTest.java,v 1.43.2.3 2010/01/04 15:30:47 cwl Exp $
+ */
+
+package com.sleepycat.je.test;
+
+import java.util.Arrays;
+import java.util.List;
+
+import junit.framework.Test;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DeadlockException;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.SecondaryConfig;
+import com.sleepycat.je.SecondaryCursor;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.je.SecondaryKeyCreator;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.junit.JUnitThread;
+import com.sleepycat.je.util.TestUtils;
+
+public class SecondaryTest extends MultiKeyTxnTestCase {
+
+    private static final int NUM_RECS = 5;
+    private static final int KEY_OFFSET = 100;
+
+    private JUnitThread junitThread;
+
+    private static EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+    static {
+        envConfig.setConfigParam(EnvironmentParams.ENV_CHECK_LEAKS.getName(),
+                                 "false");
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(),
+                                 "6");
+        envConfig.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC));
+        envConfig.setLockTimeout(1); // to speed up intentional deadlocks
+        envConfig.setAllowCreate(true);
+    }
+
+    public static Test suite() {
+
+        return multiKeyTxnTestSuite(SecondaryTest.class, envConfig, null);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        super.tearDown();
+        if (junitThread != null) {
+            while (junitThread.isAlive()) {
+                junitThread.interrupt();
+                Thread.yield();
+            }
+            junitThread = null;
+        }
+    }
+
+    public void testPutAndDelete()
+        throws DatabaseException {
+
+        SecondaryDatabase secDb = initDb();
+        Database priDb = secDb.getPrimaryDatabase();
+
+        DatabaseEntry data = new DatabaseEntry();
+        DatabaseEntry key = new DatabaseEntry();
+        OperationStatus status;
+        Transaction txn = txnBegin();
+
+        /* Database.put() */
+        status = priDb.put(txn, entry(1), entry(2));
+        assertSame(OperationStatus.SUCCESS, status);
+        status = secDb.get(txn, entry(102), key, data, LockMode.DEFAULT);
+        assertSame(OperationStatus.SUCCESS, status);
+        assertDataEquals(entry(1), key);
+        assertDataEquals(entry(2), data);
+
+        /* Database.putNoOverwrite() */
+        status = priDb.putNoOverwrite(txn, entry(1), entry(1));
+        assertSame(OperationStatus.KEYEXIST, status);
+        status = secDb.get(txn, entry(102), key, data, LockMode.DEFAULT);
+        assertSame(OperationStatus.SUCCESS, status);
+        assertDataEquals(entry(1), key);
+        assertDataEquals(entry(2), data);
+
+        /* Database.put() overwrite */
+        status = priDb.put(txn, entry(1), entry(3));
+        assertSame(OperationStatus.SUCCESS, status);
+        status = secDb.get(txn, entry(102), key, data, LockMode.DEFAULT);
+        assertSame(OperationStatus.NOTFOUND, status);
+        status = secDb.get(txn, entry(103), key, data, LockMode.DEFAULT);
+        assertSame(OperationStatus.SUCCESS, status);
+        assertDataEquals(entry(1), key);
+        assertDataEquals(entry(3), data);
+
+        /* Database.delete() */
+        status = priDb.delete(txn, entry(1));
+        assertSame(OperationStatus.SUCCESS, status);
+        status = priDb.delete(txn, entry(1));
+        assertSame(OperationStatus.NOTFOUND, status);
+        status = secDb.get(txn, entry(103), key, data, LockMode.DEFAULT);
+        assertSame(OperationStatus.NOTFOUND, status);
+
+        /* SecondaryDatabase.delete() */
+        status = priDb.put(txn, entry(1), entry(1));
+        assertSame(OperationStatus.SUCCESS, status);
+        status = priDb.put(txn, entry(2), entry(1));
+        assertSame(OperationStatus.SUCCESS, status);
+        status = secDb.get(txn, entry(101), key, data, LockMode.DEFAULT);
+        assertSame(OperationStatus.SUCCESS, status);
+        assertDataEquals(entry(1), key);
+        assertDataEquals(entry(1), data);
+        status = secDb.delete(txn, entry(101));
+        assertSame(OperationStatus.SUCCESS, status);
+        status = secDb.delete(txn, entry(101));
+        assertSame(OperationStatus.NOTFOUND, status);
+        status = secDb.get(txn, entry(101), key, data, LockMode.DEFAULT);
+        assertSame(OperationStatus.NOTFOUND, status);
+        status = priDb.get(txn, entry(1), data, LockMode.DEFAULT);
+        assertSame(OperationStatus.NOTFOUND, status);
+        status = priDb.get(txn, entry(2), data, LockMode.DEFAULT);
+        assertSame(OperationStatus.NOTFOUND, status);
+
+        /*
+         * Database.putNoDupData() cannot be called since the primary cannot be
+         * configured for duplicates.
+         */
+
+        /* Primary and secondary are empty now. */
+
+        /* Get a txn for a cursor. */
+        txnCommit(txn);
+        txn = txnBeginCursor();
+
+        Cursor priCursor = null;
+        SecondaryCursor secCursor = null;
+        try {
+            priCursor = priDb.openCursor(txn, null);
+            secCursor = secDb.openSecondaryCursor(txn, null);
+
+            /* Cursor.putNoOverwrite() */
+            status = priCursor.putNoOverwrite(entry(1), entry(2));
+            assertSame(OperationStatus.SUCCESS, status);
+            status = secCursor.getSearchKey(entry(102), key, data,
+                                            LockMode.DEFAULT);
+            assertSame(OperationStatus.SUCCESS, status);
+            assertDataEquals(entry(1), key);
+            assertDataEquals(entry(2), data);
+
+            /* Cursor.putCurrent() */
+            status = priCursor.putCurrent(entry(3));
+            assertSame(OperationStatus.SUCCESS, status);
+            status = secCursor.getSearchKey(entry(102), key, data,
+                                            LockMode.DEFAULT);
+            assertSame(OperationStatus.NOTFOUND, status);
+            status = secCursor.getSearchKey(entry(103), key, data,
+                                            LockMode.DEFAULT);
+            assertSame(OperationStatus.SUCCESS, status);
+            assertDataEquals(entry(1), key);
+            assertDataEquals(entry(3), data);
+
+            /* Cursor.delete() */
+            status = priCursor.delete();
+            assertSame(OperationStatus.SUCCESS, status);
+            status = priCursor.delete();
+            assertSame(OperationStatus.KEYEMPTY, status);
+            status = secCursor.getSearchKey(entry(103), key, data,
+                                            LockMode.DEFAULT);
+            assertSame(OperationStatus.NOTFOUND, status);
+            status = priCursor.getSearchKey(entry(1), data,
+                                            LockMode.DEFAULT);
+            assertSame(OperationStatus.NOTFOUND, status);
+
+            /* Cursor.put() */
+            status = priCursor.put(entry(1), entry(4));
+            assertSame(OperationStatus.SUCCESS, status);
+            status = secCursor.getSearchKey(entry(104), key, data,
+                                            LockMode.DEFAULT);
+            assertSame(OperationStatus.SUCCESS, status);
+            assertDataEquals(entry(1), key);
+            assertDataEquals(entry(4), data);
+
+            /* SecondaryCursor.delete() */
+            status = secCursor.delete();
+            assertSame(OperationStatus.SUCCESS, status);
+            status = secCursor.delete();
+            assertSame(OperationStatus.KEYEMPTY, status);
+            status = secCursor.getCurrent(new DatabaseEntry(), key, data,
+                                          LockMode.DEFAULT);
+            assertSame(OperationStatus.KEYEMPTY, status);
+            status = secCursor.getSearchKey(entry(104), key, data,
+                                            LockMode.DEFAULT);
+            assertSame(OperationStatus.NOTFOUND, status);
+            status = priCursor.getSearchKey(entry(1), data,
+                                            LockMode.DEFAULT);
+            assertSame(OperationStatus.NOTFOUND, status);
+
+            /*
+             * Cursor.putNoDupData() cannot be called since the primary cannot
+             * be configured for duplicates.
+             */
+
+            /* Primary and secondary are empty now. */
+        } finally {
+            if (secCursor != null) {
+                secCursor.close();
+            }
+            if (priCursor != null) {
+                priCursor.close();
+            }
+        }
+
+        txnCommit(txn);
+        secDb.close();
+        priDb.close();
+    }
+
+    public void testGet()
+        throws DatabaseException {
+
+        SecondaryDatabase secDb = initDb();
+        Database priDb = secDb.getPrimaryDatabase();
+
+        DatabaseEntry data = new DatabaseEntry();
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry secKey = new DatabaseEntry();
+        OperationStatus status;
+        Transaction txn = txnBegin();
+
+        /*
+         * For parameters that do not require initialization with a non-null
+         * data array, we set them to null to make sure this works. [#12121]
+         */
+
+        /* Add one record for each key with one data/duplicate. */
+        for (int i = 0; i < NUM_RECS; i += 1) {
+            status = priDb.put(txn, entry(i), entry(i));
+            assertSame(OperationStatus.SUCCESS, status);
+        }
+
+        /* SecondaryDatabase.get() */
+        for (int i = 0; i < NUM_RECS; i += 1) {
+
+            data.setData(null);
+            status = secDb.get(txn, entry(i + KEY_OFFSET), key,
+                               data, LockMode.DEFAULT);
+            assertSame(OperationStatus.SUCCESS, status);
+            assertDataEquals(entry(i), key);
+            assertDataEquals(entry(i), data);
+        }
+        data.setData(null);
+        status = secDb.get(txn, entry(NUM_RECS + KEY_OFFSET), key,
+                           data, LockMode.DEFAULT);
+        assertSame(OperationStatus.NOTFOUND, status);
+
+        /* SecondaryDatabase.getSearchBoth() */
+        for (int i = 0; i < NUM_RECS; i += 1) {
+            data.setData(null);
+            status = secDb.getSearchBoth(txn, entry(i + KEY_OFFSET), entry(i),
+                                         data, LockMode.DEFAULT);
+            assertSame(OperationStatus.SUCCESS, status);
+            assertDataEquals(entry(i), data);
+        }
+        data.setData(null);
+        status = secDb.getSearchBoth(txn, entry(NUM_RECS + KEY_OFFSET),
+                                     entry(NUM_RECS), data, LockMode.DEFAULT);
+        assertSame(OperationStatus.NOTFOUND, status);
+
+        /* Get a cursor txn. */
+        txnCommit(txn);
+        txn = txnBeginCursor();
+
+        SecondaryCursor cursor = secDb.openSecondaryCursor(txn, null);
+        try {
+            /* SecondaryCursor.getFirst()/getNext() */
+            secKey.setData(null);
+            key.setData(null);
+            data.setData(null);
+            status = cursor.getFirst(secKey, key, data, LockMode.DEFAULT);
+            for (int i = 0; i < NUM_RECS; i += 1) {
+                assertSame(OperationStatus.SUCCESS, status);
+                assertDataEquals(entry(i + KEY_OFFSET), secKey);
+                assertDataEquals(entry(i), key);
+                assertDataEquals(entry(i), data);
+                assertPriLocked(priDb, key);
+                secKey.setData(null);
+                key.setData(null);
+                data.setData(null);
+                status = cursor.getNext(secKey, key, data, LockMode.DEFAULT);
+            }
+            assertSame(OperationStatus.NOTFOUND, status);
+
+            /* SecondaryCursor.getCurrent() (last) */
+            secKey.setData(null);
+            key.setData(null);
+            data.setData(null);
+            status = cursor.getCurrent(secKey, key, data, LockMode.DEFAULT);
+            assertSame(OperationStatus.SUCCESS, status);
+            assertDataEquals(entry(NUM_RECS - 1 + KEY_OFFSET), secKey);
+            assertDataEquals(entry(NUM_RECS - 1), key);
+            assertDataEquals(entry(NUM_RECS - 1), data);
+            assertPriLocked(priDb, key);
+
+            /* SecondaryCursor.getLast()/getPrev() */
+            secKey.setData(null);
+            key.setData(null);
+            data.setData(null);
+            status = cursor.getLast(secKey, key, data, LockMode.DEFAULT);
+            for (int i = NUM_RECS - 1; i >= 0; i -= 1) {
+                assertSame(OperationStatus.SUCCESS, status);
+                assertDataEquals(entry(i + KEY_OFFSET), secKey);
+                assertDataEquals(entry(i), key);
+                assertDataEquals(entry(i), data);
+                assertPriLocked(priDb, key);
+                secKey.setData(null);
+                key.setData(null);
+                data.setData(null);
+                status = cursor.getPrev(secKey, key, data, LockMode.DEFAULT);
+            }
+            assertSame(OperationStatus.NOTFOUND, status);
+
+            /* SecondaryCursor.getCurrent() (first) */
+            secKey.setData(null);
+            key.setData(null);
+            data.setData(null);
+            status = cursor.getCurrent(secKey, key, data, LockMode.DEFAULT);
+            assertSame(OperationStatus.SUCCESS, status);
+            assertDataEquals(entry(0 + KEY_OFFSET), secKey);
+            assertDataEquals(entry(0), key);
+            assertDataEquals(entry(0), data);
+            assertPriLocked(priDb, key);
+
+            /* SecondaryCursor.getSearchKey() */
+            key.setData(null);
+            data.setData(null);
+            status = cursor.getSearchKey(entry(KEY_OFFSET - 1), key,
+                                         data, LockMode.DEFAULT);
+            assertSame(OperationStatus.NOTFOUND, status);
+            for (int i = 0; i < NUM_RECS; i += 1) {
+                key.setData(null);
+                data.setData(null);
+                status = cursor.getSearchKey(entry(i + KEY_OFFSET), key,
+                                             data, LockMode.DEFAULT);
+                assertSame(OperationStatus.SUCCESS, status);
+                assertDataEquals(entry(i), key);
+                assertDataEquals(entry(i), data);
+                assertPriLocked(priDb, key);
+            }
+            key.setData(null);
+            data.setData(null);
+            status = cursor.getSearchKey(entry(NUM_RECS + KEY_OFFSET), key,
+                                         data, LockMode.DEFAULT);
+            assertSame(OperationStatus.NOTFOUND, status);
+
+            /* SecondaryCursor.getSearchBoth() */
+            data.setData(null);
+            status = cursor.getSearchKey(entry(KEY_OFFSET - 1), entry(0),
+                                         data, LockMode.DEFAULT);
+            assertSame(OperationStatus.NOTFOUND, status);
+            for (int i = 0; i < NUM_RECS; i += 1) {
+                data.setData(null);
+                status = cursor.getSearchBoth(entry(i + KEY_OFFSET), entry(i),
+                                              data, LockMode.DEFAULT);
+                assertSame(OperationStatus.SUCCESS, status);
+                assertDataEquals(entry(i), data);
+                assertPriLocked(priDb, entry(i));
+            }
+            data.setData(null);
+            status = cursor.getSearchBoth(entry(NUM_RECS + KEY_OFFSET),
+                                          entry(NUM_RECS), data,
+                                          LockMode.DEFAULT);
+            assertSame(OperationStatus.NOTFOUND, status);
+
+            /* SecondaryCursor.getSearchKeyRange() */
+            key.setData(null);
+            data.setData(null);
+            status = cursor.getSearchKeyRange(entry(KEY_OFFSET - 1), key,
+                                              data, LockMode.DEFAULT);
+            assertSame(OperationStatus.SUCCESS, status);
+            assertDataEquals(entry(0), key);
+            assertDataEquals(entry(0), data);
+            assertPriLocked(priDb, key);
+            for (int i = 0; i < NUM_RECS; i += 1) {
+                key.setData(null);
+                data.setData(null);
+                status = cursor.getSearchKeyRange(entry(i + KEY_OFFSET), key,
+                                                  data, LockMode.DEFAULT);
+                assertSame(OperationStatus.SUCCESS, status);
+                assertDataEquals(entry(i), key);
+                assertDataEquals(entry(i), data);
+                assertPriLocked(priDb, key);
+            }
+            key.setData(null);
+            data.setData(null);
+            status = cursor.getSearchKeyRange(entry(NUM_RECS + KEY_OFFSET),
+                                              key, data, LockMode.DEFAULT);
+            assertSame(OperationStatus.NOTFOUND, status);
+
+            /* SecondaryCursor.getSearchBothRange() */
+            data.setData(null);
+            status = cursor.getSearchBothRange(entry(1 + KEY_OFFSET), entry(1),
+                                               data, LockMode.DEFAULT);
+            assertSame(OperationStatus.SUCCESS, status);
+            assertDataEquals(entry(1), data);
+            assertPriLocked(priDb, entry(1));
+            for (int i = 0; i < NUM_RECS; i += 1) {
+                data.setData(null);
+                status = cursor.getSearchBothRange(entry(i + KEY_OFFSET),
+                                                   entry(i), data,
+                                                   LockMode.DEFAULT);
+                assertSame(OperationStatus.SUCCESS, status);
+                assertDataEquals(entry(i), data);
+                assertPriLocked(priDb, entry(i));
+            }
+            data.setData(null);
+            status = cursor.getSearchBothRange(entry(NUM_RECS + KEY_OFFSET),
+                                               entry(NUM_RECS), data,
+                                               LockMode.DEFAULT);
+            assertSame(OperationStatus.NOTFOUND, status);
+
+            /* Add one duplicate for each key. */
+            Cursor priCursor = priDb.openCursor(txn, null);
+            try {
+                for (int i = 0; i < NUM_RECS; i += 1) {
+                    status = priCursor.put(entry(i + KEY_OFFSET), entry(i));
+                    assertSame(OperationStatus.SUCCESS, status);
+                }
+            } finally {
+                priCursor.close();
+            }
+
+            /* SecondaryCursor.getNextDup() */
+            secKey.setData(null);
+            key.setData(null);
+            data.setData(null);
+            status = cursor.getFirst(secKey, key, data, LockMode.DEFAULT);
+            for (int i = 0; i < NUM_RECS; i += 1) {
+                assertSame(OperationStatus.SUCCESS, status);
+                assertDataEquals(entry(i + KEY_OFFSET), secKey);
+                assertDataEquals(entry(i), key);
+                assertDataEquals(entry(i), data);
+                assertPriLocked(priDb, key, data);
+                secKey.setData(null);
+                key.setData(null);
+                data.setData(null);
+                status = cursor.getNextDup(secKey, key, data,
+                                           LockMode.DEFAULT);
+                assertSame(OperationStatus.SUCCESS, status);
+                assertDataEquals(entry(i + KEY_OFFSET), secKey);
+                assertDataEquals(entry(i + KEY_OFFSET), key);
+                assertDataEquals(entry(i), data);
+                assertPriLocked(priDb, key, data);
+                secKey.setData(null);
+                key.setData(null);
+                data.setData(null);
+                status = cursor.getNextDup(secKey, key, data,
+                                           LockMode.DEFAULT);
+                assertSame(OperationStatus.NOTFOUND, status);
+                secKey.setData(null);
+                key.setData(null);
+                data.setData(null);
+                status = cursor.getNext(secKey, key, data, LockMode.DEFAULT);
+            }
+            assertSame(OperationStatus.NOTFOUND, status);
+
+            /* SecondaryCursor.getNextNoDup() */
+            secKey.setData(null);
+            key.setData(null);
+            data.setData(null);
+            status = cursor.getFirst(secKey, key, data, LockMode.DEFAULT);
+            for (int i = 0; i < NUM_RECS; i += 1) {
+                assertSame(OperationStatus.SUCCESS, status);
+                assertDataEquals(entry(i + KEY_OFFSET), secKey);
+                assertDataEquals(entry(i), key);
+                assertDataEquals(entry(i), data);
+                assertPriLocked(priDb, key, data);
+                secKey.setData(null);
+                key.setData(null);
+                data.setData(null);
+                status = cursor.getNextNoDup(secKey, key, data,
+                                             LockMode.DEFAULT);
+            }
+            assertSame(OperationStatus.NOTFOUND, status);
+
+            /* SecondaryCursor.getPrevDup() */
+            secKey.setData(null);
+            key.setData(null);
+            data.setData(null);
+            status = cursor.getLast(secKey, key, data, LockMode.DEFAULT);
+            for (int i = NUM_RECS - 1; i >= 0; i -= 1) {
+                assertSame(OperationStatus.SUCCESS, status);
+                assertDataEquals(entry(i + KEY_OFFSET), secKey);
+                assertDataEquals(entry(i + KEY_OFFSET), key);
+                assertDataEquals(entry(i), data);
+                assertPriLocked(priDb, key, data);
+                secKey.setData(null);
+                key.setData(null);
+                data.setData(null);
+                status = cursor.getPrevDup(secKey, key, data,
+                                           LockMode.DEFAULT);
+                assertSame(OperationStatus.SUCCESS, status);
+                assertDataEquals(entry(i + KEY_OFFSET), secKey);
+                assertDataEquals(entry(i), key);
+                assertDataEquals(entry(i), data);
+                assertPriLocked(priDb, key, data);
+                secKey.setData(null);
+                key.setData(null);
+                data.setData(null);
+                status = cursor.getPrevDup(secKey, key, data,
+                                           LockMode.DEFAULT);
+                assertSame(OperationStatus.NOTFOUND, status);
+                secKey.setData(null);
+                key.setData(null);
+                data.setData(null);
+                status = cursor.getPrev(secKey, key, data, LockMode.DEFAULT);
+            }
+            assertSame(OperationStatus.NOTFOUND, status);
+
+            /* SecondaryCursor.getPrevNoDup() */
+            secKey.setData(null);
+            key.setData(null);
+            data.setData(null);
+            status = cursor.getLast(secKey, key, data, LockMode.DEFAULT);
+            for (int i = NUM_RECS - 1; i >= 0; i -= 1) {
+                assertSame(OperationStatus.SUCCESS, status);
+                assertDataEquals(entry(i + KEY_OFFSET), secKey);
+                assertDataEquals(entry(i + KEY_OFFSET), key);
+                assertDataEquals(entry(i), data);
+                assertPriLocked(priDb, key, data);
+                secKey.setData(null);
+                key.setData(null);
+                data.setData(null);
+                status = cursor.getPrevNoDup(secKey, key, data,
+                                             LockMode.DEFAULT);
+            }
+            assertSame(OperationStatus.NOTFOUND, status);
+        } finally {
+            cursor.close();
+        }
+
+        txnCommit(txn);
+        secDb.close();
+        priDb.close();
+    }
+
+    public void testOpenAndClose()
+        throws DatabaseException {
+
+        Database priDb = openDatabase(false, "testDB", false);
+
+        /* Open two secondaries as regular databases and as secondaries. */
+        Database secDbDetached = openDatabase(true, "testSecDB", false);
+        SecondaryDatabase secDb = openSecondary(priDb, true, "testSecDB",
+                                                false, false);
+        Database secDb2Detached = openDatabase(true, "testSecDB2", false);
+        SecondaryDatabase secDb2 = openSecondary(priDb, true, "testSecDB2",
+                                                 false, false);
+        assertEquals(priDb.getSecondaryDatabases(),
+                     Arrays.asList(new SecondaryDatabase[] {secDb, secDb2}));
+
+        Transaction txn = txnBegin();
+
+        /* Check that primary writes to both secondaries. */
+        checkSecondaryUpdate(txn, priDb, 1, secDbDetached, true,
+                                            secDb2Detached, true);
+
+        /* New txn before closing database. */
+        txnCommit(txn);
+        txn = txnBegin();
+
+        /* Close 2nd secondary. */
+        secDb2.close();
+        assertEquals(priDb.getSecondaryDatabases(),
+                     Arrays.asList(new SecondaryDatabase[] {secDb }));
+
+        /* Check that primary writes to 1st secondary only. */
+        checkSecondaryUpdate(txn, priDb, 2, secDbDetached, true,
+                                             secDb2Detached, false);
+
+        /* New txn before closing database. */
+        txnCommit(txn);
+        txn = txnBegin();
+
+        /* Close 1st secondary. */
+        secDb.close();
+        assertEquals(0, priDb.getSecondaryDatabases().size());
+
+        /* Check that primary writes to no secondaries. */
+        checkSecondaryUpdate(txn, priDb, 3, secDbDetached, false,
+                                            secDb2Detached, false);
+
+        /* Open the two secondaries again. */
+        secDb = openSecondary(priDb, true, "testSecDB", false, false);
+        secDb2 = openSecondary(priDb, true, "testSecDB2", false, false);
+        assertEquals(priDb.getSecondaryDatabases(),
+                     Arrays.asList(new SecondaryDatabase[] {secDb, secDb2}));
+
+        /* Check that primary writes to both secondaries. */
+        checkSecondaryUpdate(txn, priDb, 4, secDbDetached, true,
+                                            secDb2Detached, true);
+
+        /* Close the primary first to disassociate secondaries. */
+        txnCommit(txn);
+        priDb.close();
+        assertNull(secDb.getPrimaryDatabase());
+        assertNull(secDb2.getPrimaryDatabase());
+        secDb2.close();
+        secDb.close();
+
+        secDb2Detached.close();
+        secDbDetached.close();
+    }
+
+    /**
+     * Check that primary put() writes to each secondary that is open.
+     */
+    private void checkSecondaryUpdate(Transaction txn, Database priDb, int val,
+                                      Database secDb, boolean expectSecDbVal,
+                                      Database secDb2, boolean expectSecDb2Val)
+        throws DatabaseException {
+
+        OperationStatus status;
+        DatabaseEntry data = new DatabaseEntry();
+        int secVal = KEY_OFFSET + val;
+
+        status = priDb.put(txn, entry(val), entry(val));
+        assertSame(OperationStatus.SUCCESS, status);
+
+        status = secDb.get(txn, entry(secVal), data, LockMode.DEFAULT);
+        assertSame(expectSecDbVal ? OperationStatus.SUCCESS
+                                  : OperationStatus.NOTFOUND, status);
+
+
+        status = secDb2.get(txn, entry(secVal), data, LockMode.DEFAULT);
+        assertSame(expectSecDb2Val ? OperationStatus.SUCCESS
+                                   : OperationStatus.NOTFOUND, status);
+
+        status = priDb.delete(txn, entry(val));
+        assertSame(OperationStatus.SUCCESS, status);
+    }
+
+    public void testReadOnly()
+        throws DatabaseException {
+
+        SecondaryDatabase secDb = initDb();
+        Database priDb = secDb.getPrimaryDatabase();
+        OperationStatus status;
+        Transaction txn = txnBegin();
+
+        for (int i = 0; i < NUM_RECS; i += 1) {
+            status = priDb.put(txn, entry(i), entry(i));
+            assertSame(OperationStatus.SUCCESS, status);
+        }
+
+        /*
+         * Secondaries can be opened without a key creator if the primary is
+         * read only.  openSecondary will specify a null key creator if the
+         * readOnly param is false.
+         */
+        Database readOnlyPriDb = openDatabase(false, "testDB", true);
+        SecondaryDatabase readOnlySecDb = openSecondary(readOnlyPriDb,
+                                                        true, "testSecDB",
+                                                        false, true);
+        assertNull(readOnlySecDb.getSecondaryConfig().getKeyCreator());
+        verifyRecords(txn, readOnlySecDb, NUM_RECS, true);
+
+        txnCommit(txn);
+        readOnlySecDb.close();
+        readOnlyPriDb.close();
+        secDb.close();
+        priDb.close();
+    }
+
+    public void testPopulate()
+        throws DatabaseException {
+
+        Database priDb = openDatabase(false, "testDB", false);
+        Transaction txn = txnBegin();
+
+        /* Test population of newly created secondary database. */
+
+        for (int i = 0; i < NUM_RECS; i += 1) {
+            assertSame(OperationStatus.SUCCESS,
+                       priDb.put(txn, entry(i), entry(i)));
+        }
+        txnCommit(txn);
+
+        SecondaryDatabase secDb = openSecondary(priDb, true, "testSecDB",
+                                                true, false);
+        txn = txnBegin();
+        verifyRecords(txn, secDb, NUM_RECS, true);
+        txnCommit(txn);
+
+        /*
+         * Clear secondary and perform populate again, to test the case where
+         * an existing database is opened, and therefore a write txn will only
+         * be created in order to populate it
+         */
+        Database secDbDetached = openDatabase(true, "testSecDB", false);
+        secDb.close();
+        txn = txnBegin();
+        for (int i = 0; i < NUM_RECS; i += 1) {
+            assertSame(OperationStatus.SUCCESS,
+                       secDbDetached.delete(txn, entry(i + KEY_OFFSET)));
+        }
+        verifyRecords(txn, secDbDetached, 0, true);
+        txnCommit(txn);
+        secDb = openSecondary(priDb, true, "testSecDB", true, false);
+        txn = txnBegin();
+        verifyRecords(txn, secDb, NUM_RECS, true);
+        verifyRecords(txn, secDbDetached, NUM_RECS, true);
+
+        txnCommit(txn);
+        secDbDetached.close();
+        secDb.close();
+        priDb.close();
+    }
+
+    public void testTruncate()
+        throws DatabaseException {
+
+        SecondaryDatabase secDb = initDb();
+        Database priDb = secDb.getPrimaryDatabase();
+        Transaction txn = txnBegin();
+
+        for (int i = 0; i < NUM_RECS; i += 1) {
+            priDb.put(txn, entry(i), entry(i));
+        }
+        verifyRecords(txn, priDb, NUM_RECS, false);
+        verifyRecords(txn, secDb, NUM_RECS, true);
+        txnCommit(txn);
+        secDb.close();
+        priDb.close();
+
+        txn = txnBegin();
+        assertEquals(NUM_RECS, env.truncateDatabase(txn, "testDB", true));
+        assertEquals(NUM_RECS, env.truncateDatabase(txn, "testSecDB", true));
+        txnCommit(txn);
+
+        secDb = initDb();
+        priDb = secDb.getPrimaryDatabase();
+
+        txn = txnBegin();
+        verifyRecords(txn, priDb, 0, false);
+        verifyRecords(txn, secDb, 0, true);
+        txnCommit(txn);
+
+        secDb.close();
+        priDb.close();
+    }
+
+    private void verifyRecords(Transaction txn, Database db, int numRecs,
+                               boolean isSecondary)
+        throws DatabaseException {
+
+        /* We're only reading, so txn may be null. */
+        Cursor cursor = db.openCursor(txn, null);
+        try {
+            DatabaseEntry data = new DatabaseEntry();
+            DatabaseEntry key = new DatabaseEntry();
+            OperationStatus status;
+            int count = 0;
+            status = cursor.getFirst(key, data, LockMode.DEFAULT);
+            while (status == OperationStatus.SUCCESS) {
+                assertDataEquals(entry(count), data);
+                if (isSecondary) {
+                    assertDataEquals(entry(count + KEY_OFFSET), key);
+                } else {
+                    assertDataEquals(entry(count), key);
+                }
+                count += 1;
+                status = cursor.getNext(key, data, LockMode.DEFAULT);
+            }
+            assertEquals(numRecs, count);
+        } finally {
+            cursor.close();
+        }
+    }
+
+    public void testUniqueSecondaryKey()
+        throws DatabaseException {
+
+        Database priDb = openDatabase(false, "testDB", false);
+        SecondaryDatabase secDb = openSecondary(priDb, false, "testSecDB",
+                                                false, false);
+        DatabaseEntry key;
+        DatabaseEntry data;
+        DatabaseEntry pkey = new DatabaseEntry();
+        Transaction txn;
+
+        /* Put {0, 0} */
+        txn = txnBegin();
+        key = entry(0);
+        data = entry(0);
+        priDb.put(txn, key, data);
+        txnCommit(txn);
+        assertEquals(OperationStatus.SUCCESS,
+                     secDb.get(null, entry(0 + KEY_OFFSET),
+                               pkey, data, null));
+        assertEquals(0, TestUtils.getTestVal(pkey.getData()));
+        assertEquals(0, TestUtils.getTestVal(data.getData()));
+
+        /* Put {1, 1} */
+        txn = txnBegin();
+        key = entry(1);
+        data = entry(1);
+        priDb.put(txn, key, data);
+        txnCommit(txn);
+        assertEquals(OperationStatus.SUCCESS,
+                     secDb.get(null, entry(1 + KEY_OFFSET),
+                               pkey, data, null));
+        assertEquals(1, TestUtils.getTestVal(pkey.getData()));
+        assertEquals(1, TestUtils.getTestVal(data.getData()));
+
+        /* Put {2, 0} */
+        txn = txnBegin();
+        key = entry(2);
+        data = entry(0);
+        try {
+            priDb.put(txn, key, data);
+            /* Expect exception because secondary key must be unique. */
+            fail();
+        } catch (DatabaseException e) {
+            txnAbort(txn);
+            /* Ensure that primary record was not inserted. */
+            assertEquals(OperationStatus.NOTFOUND,
+                         secDb.get(null, key, data, null));
+            /* Ensure that secondary record has not changed. */
+            assertEquals(OperationStatus.SUCCESS,
+                         secDb.get(null, entry(0 + KEY_OFFSET),
+                                   pkey, data, null));
+            assertEquals(0, TestUtils.getTestVal(pkey.getData()));
+            assertEquals(0, TestUtils.getTestVal(data.getData()));
+        }
+
+        /* Overwrite {1, 1} */
+        txn = txnBegin();
+        key = entry(1);
+        data = entry(1);
+        priDb.put(txn, key, data);
+        txnCommit(txn);
+        assertEquals(OperationStatus.SUCCESS,
+                     secDb.get(null, entry(1 + KEY_OFFSET),
+                               pkey, data, null));
+        assertEquals(1, TestUtils.getTestVal(pkey.getData()));
+        assertEquals(1, TestUtils.getTestVal(data.getData()));
+
+        /* Modify secondary key to {1, 3} */
+        txn = txnBegin();
+        key = entry(1);
+        data = entry(3);
+        priDb.put(txn, key, data);
+        txnCommit(txn);
+        assertEquals(OperationStatus.SUCCESS,
+                     secDb.get(null, entry(3 + KEY_OFFSET),
+                               pkey, data, null));
+        assertEquals(1, TestUtils.getTestVal(pkey.getData()));
+        assertEquals(3, TestUtils.getTestVal(data.getData()));
+
+        secDb.close();
+        priDb.close();
+    }
+
+    /**
+     */
+    public void testOperationsNotAllowed()
+        throws DatabaseException {
+
+        SecondaryDatabase secDb = initDb();
+        Database priDb = secDb.getPrimaryDatabase();
+        Transaction txn = txnBegin();
+
+        /* Open secondary without a key creator. */
+        try {
+            env.openSecondaryDatabase(txn, "xxx", priDb, null);
+            fail();
+        } catch (NullPointerException expected) { }
+        try {
+            env.openSecondaryDatabase(txn, "xxx", priDb,
+                                      new SecondaryConfig());
+            fail();
+        } catch (NullPointerException expected) { }
+
+        /* Open secondary with both single and multi key creators. */
+        SecondaryConfig config = new SecondaryConfig();
+        config.setKeyCreator(new MyKeyCreator());
+        config.setMultiKeyCreator
+            (new SimpleMultiKeyCreator(new MyKeyCreator()));
+        try {
+            env.openSecondaryDatabase(txn, "xxx", priDb, config);
+            fail();
+        } catch (IllegalArgumentException expected) { }
+
+        /* Database operations. */
+
+        DatabaseEntry key = entry(1);
+        DatabaseEntry data = entry(2);
+
+        try {
+            secDb.getSearchBoth(txn, key, data, LockMode.DEFAULT);
+            fail();
+        } catch (UnsupportedOperationException expected) { }
+
+        try {
+            secDb.put(txn, key, data);
+            fail();
+        } catch (UnsupportedOperationException expected) { }
+
+        try {
+            secDb.putNoOverwrite(txn, key, data);
+            fail();
+        } catch (UnsupportedOperationException expected) { }
+
+        try {
+            secDb.putNoDupData(txn, key, data);
+            fail();
+        } catch (UnsupportedOperationException expected) { }
+
+        try {
+            secDb.join(new Cursor[0], null);
+            fail();
+        } catch (UnsupportedOperationException expected) { }
+
+        /* Cursor operations. */
+
+        txnCommit(txn);
+        txn = txnBeginCursor();
+
+        SecondaryCursor cursor = null;
+        try {
+            cursor = secDb.openSecondaryCursor(txn, null);
+
+            try {
+                cursor.getSearchBoth(key, data, LockMode.DEFAULT);
+                fail();
+            } catch (UnsupportedOperationException expected) { }
+
+            try {
+                cursor.getSearchBothRange(key, data, LockMode.DEFAULT);
+                fail();
+            } catch (UnsupportedOperationException expected) { }
+
+            try {
+                cursor.putCurrent(data);
+                fail();
+            } catch (UnsupportedOperationException expected) { }
+
+            try {
+                cursor.put(key, data);
+                fail();
+            } catch (UnsupportedOperationException expected) { }
+
+            try {
+                cursor.putNoOverwrite(key, data);
+                fail();
+            } catch (UnsupportedOperationException expected) { }
+
+            try {
+                cursor.putNoDupData(key, data);
+                fail();
+            } catch (UnsupportedOperationException expected) { }
+        } finally {
+            if (cursor != null) {
+                cursor.close();
+            }
+        }
+
+        txnCommit(txn);
+        secDb.close();
+        priDb.close();
+
+        /* Primary with duplicates. */
+        priDb = openDatabase(true, "testDBWithDups", false);
+        try {
+            openSecondary(priDb, true, "testSecDB", false, false);
+            fail();
+        } catch (IllegalArgumentException expected) {}
+
+        priDb.close();
+
+        /* Single secondary with two primaries.*/
+        Database pri1 = openDatabase(false, "pri1", false);
+        Database pri2 = openDatabase(false, "pri2", false);
+        Database sec1 = openSecondary(pri1, false, "sec", false, false);
+        try {
+            openSecondary(pri2, false, "sec", false, false);
+            fail();
+        } catch (IllegalArgumentException expected) {}
+        sec1.close();
+        pri1.close();
+        pri2.close();
+    }
+
+    /**
+     * Test that null can be passed for the LockMode to all get methods.
+     */
+    public void testNullLockMode()
+        throws DatabaseException {
+
+        SecondaryDatabase secDb = initDb();
+        Database priDb = secDb.getPrimaryDatabase();
+        Transaction txn = txnBegin();
+
+        DatabaseEntry key = entry(0);
+        DatabaseEntry data = entry(0);
+        DatabaseEntry secKey = entry(KEY_OFFSET);
+        DatabaseEntry found = new DatabaseEntry();
+        DatabaseEntry found2 = new DatabaseEntry();
+        DatabaseEntry found3 = new DatabaseEntry();
+
+        assertEquals(OperationStatus.SUCCESS,
+                     priDb.put(txn, key, data));
+        assertEquals(OperationStatus.SUCCESS,
+                     priDb.put(txn, entry(1), data));
+        assertEquals(OperationStatus.SUCCESS,
+                     priDb.put(txn, entry(2), entry(2)));
+
+        /* Database operations. */
+
+        assertEquals(OperationStatus.SUCCESS,
+                     priDb.get(txn, key, found, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     priDb.getSearchBoth(txn, key, data, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     secDb.get(txn, secKey, found, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     secDb.get(txn, secKey, found, found2, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     secDb.getSearchBoth(txn, secKey, key, found, null));
+
+        /* Cursor operations. */
+
+        txnCommit(txn);
+        txn = txnBeginCursor();
+        Cursor cursor = priDb.openCursor(txn, null);
+        SecondaryCursor secCursor = secDb.openSecondaryCursor(txn, null);
+
+        assertEquals(OperationStatus.SUCCESS,
+                     cursor.getSearchKey(key, found, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     cursor.getSearchBoth(key, data, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     cursor.getSearchKeyRange(key, found, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     cursor.getSearchBothRange(key, data, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     cursor.getFirst(found, found2, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     cursor.getNext(found, found2, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     cursor.getPrev(found, found2, null));
+        assertEquals(OperationStatus.NOTFOUND,
+                     cursor.getNextDup(found, found2, null));
+        assertEquals(OperationStatus.NOTFOUND,
+                     cursor.getPrevDup(found, found2, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     cursor.getNextNoDup(found, found2, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     cursor.getPrevNoDup(found, found2, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     cursor.getLast(found, found2, null));
+
+        assertEquals(OperationStatus.SUCCESS,
+                     secCursor.getSearchKey(secKey, found, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     secCursor.getSearchKeyRange(secKey, found, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     secCursor.getFirst(found, found2, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     secCursor.getNext(found, found2, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     secCursor.getPrev(found, found2, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     secCursor.getNextDup(found, found2, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     secCursor.getPrevDup(found, found2, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     secCursor.getNextNoDup(found, found2, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     secCursor.getPrevNoDup(found, found2, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     secCursor.getLast(found, found2, null));
+
+        assertEquals(OperationStatus.SUCCESS,
+                     secCursor.getSearchKey(secKey, found, found2, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     secCursor.getSearchBoth(secKey, data, found, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     secCursor.getSearchKeyRange(secKey, found, found2, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     secCursor.getSearchBothRange(secKey, data, found, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     secCursor.getFirst(found, found2, found3, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     secCursor.getNext(found, found2, found3, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     secCursor.getPrev(found, found2, found3, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     secCursor.getNextDup(found, found2, found3, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     secCursor.getPrevDup(found, found2, found3, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     secCursor.getNextNoDup(found, found2, found3, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     secCursor.getPrevNoDup(found, found2, found3, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     secCursor.getLast(found, found2, found3, null));
+
+        secCursor.close();
+        cursor.close();
+        txnCommit(txn);
+        secDb.close();
+        priDb.close();
+        env.close();
+        env = null;
+    }
+
+    /**
+     * Test that an exception is thrown when a cursor is used in the wrong
+     * state.  No put or get is allowed in the closed state, and certain gets
+     * and puts are not allowed in the uninitialized state.
+     */
+    public void testCursorState()
+        throws DatabaseException {
+
+        SecondaryDatabase secDb = initDb();
+        Database priDb = secDb.getPrimaryDatabase();
+        Transaction txn = txnBegin();
+
+        DatabaseEntry key = entry(0);
+        DatabaseEntry data = entry(0);
+        DatabaseEntry secKey = entry(KEY_OFFSET);
+        DatabaseEntry found = new DatabaseEntry();
+        DatabaseEntry found2 = new DatabaseEntry();
+
+        assertEquals(OperationStatus.SUCCESS,
+                     priDb.put(txn, key, data));
+
+        txnCommit(txn);
+        txn = txnBeginCursor();
+        Cursor cursor = priDb.openCursor(txn, null);
+        SecondaryCursor secCursor = secDb.openSecondaryCursor(txn, null);
+
+        /* Check the uninitialized state for certain operations. */
+
+        try {
+            cursor.count();
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            cursor.delete();
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            cursor.putCurrent(data);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            cursor.getCurrent(key, data, null);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            cursor.getNextDup(found, found2, null);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            cursor.getPrevDup(found, found2, null);
+            fail();
+        } catch (DatabaseException expected) {}
+
+        try {
+            secCursor.count();
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            secCursor.delete();
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            secCursor.getCurrent(key, data, null);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            secCursor.getNextDup(found, found2, null);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            secCursor.getPrevDup(found, found2, null);
+            fail();
+        } catch (DatabaseException expected) {}
+
+        /* Cursor.dup works whether initialized or not. */
+        {
+            Cursor c2 = secCursor.dup(false);
+            c2.close();
+            c2 = secCursor.dup(true);
+            c2.close();
+            c2 = secCursor.dup(false);
+            c2.close();
+            c2 = secCursor.dup(true);
+            c2.close();
+        }
+
+        /* Initialize, then close, then check all operations. */
+
+        assertEquals(OperationStatus.SUCCESS,
+                     cursor.getSearchKey(key, found, null));
+        assertEquals(OperationStatus.SUCCESS,
+                     secCursor.getSearchKey(secKey, found, null));
+
+        /* Cursor.dup works whether initialized or not. */
+        {
+            Cursor c2 = cursor.dup(false);
+            c2.close();
+            c2 = cursor.dup(true);
+            c2.close();
+            c2 = secCursor.dup(false);
+            c2.close();
+            c2 = secCursor.dup(true);
+            c2.close();
+        }
+
+        /* Close, then check all operations. */
+
+        secCursor.close();
+        cursor.close();
+
+        try {
+            cursor.close();
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            cursor.count();
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            cursor.delete();
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            cursor.put(key, data);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            cursor.putNoOverwrite(key, data);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            cursor.putNoDupData(key, data);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            cursor.putCurrent(data);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            cursor.getCurrent(key, data, null);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            cursor.getSearchKey(key, found, null);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            cursor.getSearchBoth(key, data, null);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            cursor.getSearchKeyRange(key, found, null);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            cursor.getSearchBothRange(key, data, null);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            cursor.getFirst(found, found2, null);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            cursor.getNext(found, found2, null);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            cursor.getPrev(found, found2, null);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            cursor.getNextDup(found, found2, null);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            cursor.getPrevDup(found, found2, null);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            cursor.getNextNoDup(found, found2, null);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            cursor.getPrevNoDup(found, found2, null);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            cursor.getLast(found, found2, null);
+            fail();
+        } catch (DatabaseException expected) {}
+
+        try {
+            secCursor.close();
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            secCursor.count();
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            secCursor.delete();
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            secCursor.getCurrent(key, data, null);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            secCursor.getSearchKey(secKey, found, null);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            secCursor.getSearchKeyRange(secKey, found, null);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            secCursor.getFirst(found, found2, null);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            secCursor.getNext(found, found2, null);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            secCursor.getPrev(found, found2, null);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            secCursor.getNextDup(found, found2, null);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            secCursor.getPrevDup(found, found2, null);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            secCursor.getNextNoDup(found, found2, null);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            secCursor.getPrevNoDup(found, found2, null);
+            fail();
+        } catch (DatabaseException expected) {}
+        try {
+            secCursor.getLast(found, found2, null);
+            fail();
+        } catch (DatabaseException expected) {}
+
+        txnCommit(txn);
+        secDb.close();
+        priDb.close();
+        env.close();
+        env = null;
+    }
+
+    /**
+     * [#14966]
+     */
+    public void testDirtyReadPartialGet()
+        throws DatabaseException {
+
+        SecondaryDatabase secDb = initDb();
+        Database priDb = secDb.getPrimaryDatabase();
+
+        DatabaseEntry data = new DatabaseEntry();
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry secKey = new DatabaseEntry();
+        OperationStatus status;
+
+        /* Put a record */
+        Transaction txn = txnBegin();
+        status = priDb.put(txn, entry(0), entry(0));
+        assertSame(OperationStatus.SUCCESS, status);
+        txnCommit(txn);
+
+        /* Regular get */
+        status = secDb.get(null, entry(0 + KEY_OFFSET), key,
+                           data, LockMode.DEFAULT);
+        assertSame(OperationStatus.SUCCESS, status);
+        assertDataEquals(entry(0), key);
+        assertDataEquals(entry(0), data);
+
+        /* Dirty read returning no data */
+        data.setPartial(0, 0, true);
+        status = secDb.get(null, entry(0 + KEY_OFFSET), key,
+                           data, LockMode.READ_UNCOMMITTED);
+        assertSame(OperationStatus.SUCCESS, status);
+        assertDataEquals(entry(0), key);
+        assertEquals(0, data.getData().length);
+        assertEquals(0, data.getSize());
+
+        /* Dirty read returning partial data */
+        data.setPartial(0, 1, true);
+        status = secDb.get(null, entry(0 + KEY_OFFSET), key,
+                           data, LockMode.READ_UNCOMMITTED);
+        assertSame(OperationStatus.SUCCESS, status);
+        assertDataEquals(entry(0), key);
+        assertEquals(1, data.getData().length);
+        assertEquals(1, data.getSize());
+
+        secDb.close();
+        priDb.close();
+    }
+
+    /**
+     * Open environment, primary and secondary db
+     */
+    private SecondaryDatabase initDb()
+        throws DatabaseException {
+
+        Database priDb = openDatabase(false, "testDB", false);
+        SecondaryDatabase secDb = openSecondary(priDb, true, "testSecDB",
+                                                false, false);
+        return secDb;
+    }
+
+    private Database openDatabase(boolean allowDuplicates, String name,
+                                  boolean readOnly)
+        throws DatabaseException {
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(isTransactional);
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(allowDuplicates);
+        dbConfig.setReadOnly(readOnly);
+        Transaction txn = txnBegin();
+        Database priDb;
+        try {
+            priDb = env.openDatabase(txn, name, dbConfig);
+        } finally {
+            txnCommit(txn);
+        }
+        assertNotNull(priDb);
+        return priDb;
+    }
+
+    private SecondaryDatabase openSecondary(Database priDb,
+                                            boolean allowDuplicates,
+                                            String dbName,
+                                            boolean allowPopulate,
+                                            boolean readOnly)
+        throws DatabaseException {
+
+        List secListBefore = priDb.getSecondaryDatabases();
+        SecondaryConfig dbConfig = new SecondaryConfig();
+        dbConfig.setTransactional(isTransactional);
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(allowDuplicates);
+        dbConfig.setReadOnly(readOnly);
+        dbConfig.setAllowPopulate(allowPopulate);
+        if (!readOnly) {
+            if (useMultiKey) {
+                dbConfig.setMultiKeyCreator
+                    (new SimpleMultiKeyCreator(new MyKeyCreator()));
+            } else {
+                dbConfig.setKeyCreator(new MyKeyCreator());
+            }
+        }
+        Transaction txn = txnBegin();
+        SecondaryDatabase secDb;
+        try {
+            secDb = env.openSecondaryDatabase(txn, dbName, priDb, dbConfig);
+        } finally {
+            txnCommit(txn);
+        }
+        assertNotNull(secDb);
+
+        /* Check configuration. */
+        assertSame(priDb, secDb.getPrimaryDatabase());
+        SecondaryConfig config2 = secDb.getSecondaryConfig();
+        assertEquals(allowPopulate, config2.getAllowPopulate());
+        assertEquals(dbConfig.getKeyCreator(), config2.getKeyCreator());
+
+        /* Make sure the new secondary is added to the primary's list. */
+        List secListAfter = priDb.getSecondaryDatabases();
+        assertTrue(secListAfter.remove(secDb));
+        assertEquals(secListBefore, secListAfter);
+
+        return secDb;
+    }
+
+    private DatabaseEntry entry(int val) {
+
+        return new DatabaseEntry(TestUtils.getTestArray(val));
+    }
+
+    private void assertDataEquals(DatabaseEntry e1, DatabaseEntry e2) {
+        assertTrue(e1.equals(e2));
+    }
+
+    private void assertPriLocked(Database priDb, DatabaseEntry key) {
+        assertPriLocked(priDb, key, null);
+    }
+
+    /**
+     * Checks that the given key (or both key and data if data is non-null) is
+     * locked in the primary database.  The primary record should be locked
+     * whenever a secondary cursor is positioned to point to that primary
+     * record. [#15573]
+     */
+    private void assertPriLocked(final Database priDb,
+                                 final DatabaseEntry key,
+                                 final DatabaseEntry data) {
+
+        /*
+         * Whether the record is locked transactionally or not in the current
+         * thread, we should not be able to write lock the record
+         * non-transactionally in another thread.
+         */
+        final StringBuffer error = new StringBuffer();
+        junitThread = new JUnitThread("primary-locker") {
+            public void testBody()
+                throws DatabaseException {
+                try {
+                    if (data != null) {
+                        priDb.getSearchBoth(null, key, data, LockMode.RMW);
+                    } else {
+                        DatabaseEntry myData = new DatabaseEntry();
+                        priDb.get(null, key, myData, LockMode.RMW);
+                    }
+                    error.append("Expected DeadlockException");
+                } catch (DeadlockException expected) {
+                }
+            }
+        };
+
+        junitThread.start();
+        Throwable t = null;
+        try {
+            junitThread.finishTest();
+        } catch (Throwable e) {
+            t = e;
+        } finally {
+            junitThread = null;
+        }
+
+        if (t != null) {
+            t.printStackTrace();
+            fail(t.toString());
+        }
+        if (error.length() > 0) {
+            fail(error.toString());
+        }
+    }
+
+    private static class MyKeyCreator implements SecondaryKeyCreator {
+
+        public boolean createSecondaryKey(SecondaryDatabase secondary,
+                                          DatabaseEntry key,
+                                          DatabaseEntry data,
+                                          DatabaseEntry result)
+            throws DatabaseException {
+
+            result.setData(
+                TestUtils.getTestArray(
+                    TestUtils.getTestVal(data.getData()) + KEY_OFFSET));
+            return true;
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/test/SequenceTest.java b/test/com/sleepycat/je/test/SequenceTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..d486600bc92ac6224fdfdbc960553f52d5b61ee1
--- /dev/null
+++ b/test/com/sleepycat/je/test/SequenceTest.java
@@ -0,0 +1,493 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SequenceTest.java,v 1.10.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.test;
+
+import junit.framework.Test;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Sequence;
+import com.sleepycat.je.SequenceConfig;
+import com.sleepycat.je.SequenceStats;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.util.test.TxnTestCase;
+
+public class SequenceTest extends TxnTestCase {
+
+    public static Test suite() {
+        return txnTestSuite(SequenceTest.class, null, null);
+    }
+
+    public void testIllegal()
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry(new byte[1]);
+        SequenceConfig config = new SequenceConfig();
+        config.setAllowCreate(true);
+
+        /* Duplicates not allowed. */
+
+        Database db = openDb("dups", true);
+        Transaction txn = txnBegin();
+        try {
+            db.openSequence(txn, key, config);
+            fail();
+        } catch (IllegalArgumentException expected) {
+            String msg = expected.getMessage();
+            assertTrue(msg, msg.indexOf("duplicates") >= 0);
+        }
+        txnCommit(txn);
+        db.close();
+
+        db = openDb("foo");
+        txn = txnBegin();
+
+        /* Range min must be less than max. */
+
+        config.setRange(0, 0);
+        try {
+            db.openSequence(txn, key, config);
+            fail();
+        } catch (IllegalArgumentException expected) {
+            String msg = expected.getMessage();
+            assertTrue(msg, msg.indexOf("less than the maximum") >= 0);
+        }
+
+        /* Initial value must be within range. */
+
+        config.setRange(-10, 10);
+        config.setInitialValue(-11);
+        try {
+            db.openSequence(txn, key, config);
+            fail();
+        } catch (IllegalArgumentException expected) {
+            String msg = expected.getMessage();
+            assertTrue(msg, msg.indexOf("out of range") >= 0);
+        }
+        config.setInitialValue(11);
+        try {
+            db.openSequence(txn, key, config);
+            fail();
+        } catch (IllegalArgumentException expected) {
+            String msg = expected.getMessage();
+            assertTrue(msg, msg.indexOf("out of range") >= 0);
+        }
+
+        /* Cache size must be within range. */
+
+        config.setRange(-10, 10);
+        config.setCacheSize(21);
+        config.setInitialValue(0);
+        try {
+            db.openSequence(txn, key, config);
+            fail();
+        } catch (IllegalArgumentException expected) {
+            String msg = expected.getMessage();
+            assertTrue(msg, msg.indexOf("cache size is larger") >= 0);
+        }
+
+        /* Create with legal range values. */
+
+        config.setRange(1, 2);
+        config.setInitialValue(1);
+        config.setCacheSize(0);
+        Sequence seq = db.openSequence(txn, key, config);
+
+        /* Key must not exist if ExclusiveCreate=true. */
+
+        config.setExclusiveCreate(true);
+        try {
+            db.openSequence(txn, key, config);
+            fail();
+        } catch (DatabaseException expected) {
+            String msg = expected.getMessage();
+            assertTrue(msg, msg.indexOf("already exists") >= 0);
+        }
+        config.setExclusiveCreate(false);
+        seq.close();
+
+        /* Key must exist if AllowCreate=false. */
+
+        db.removeSequence(txn, key);
+        config.setAllowCreate(false);
+        try {
+            db.openSequence(txn, key, config);
+            fail();
+        } catch (DatabaseException expected) {
+            String msg = expected.getMessage();
+            assertTrue(msg, msg.indexOf("does not exist") >= 0);
+        }
+
+        /* Check wrapping not allowed. */
+
+        db.removeSequence(txn, key);
+        config.setAllowCreate(true);
+        config.setRange(-5, 5);
+        config.setInitialValue(-5);
+        seq = db.openSequence(txn, key, config);
+        for (long i = config.getRangeMin(); i <= config.getRangeMax(); i++) {
+            assertEquals(i, seq.get(txn, 1));
+        }
+        try {
+            seq.get(txn, 1);
+            fail();
+        } catch (DatabaseException expected) {
+            String msg = expected.getMessage();
+            assertTrue(msg, msg.indexOf("overflow") >= 0);
+        }
+
+        /* Check wrapping not allowed, decrement. */
+
+        db.removeSequence(txn, key);
+        config.setAllowCreate(true);
+        config.setAllowCreate(true);
+        config.setRange(-5, 5);
+        config.setInitialValue(5);
+        config.setDecrement(true);
+        seq = db.openSequence(txn, key, config);
+        for (long i = config.getRangeMax(); i >= config.getRangeMin(); i--) {
+            assertEquals(i, seq.get(txn, 1));
+        }
+        try {
+            seq.get(txn, 1);
+            fail();
+        } catch (DatabaseException expected) {
+            String msg = expected.getMessage();
+            assertTrue(msg, msg.indexOf("overflow") >= 0);
+        }
+
+        /* Check delta less than one. */
+        try {
+            seq.get(txn, 0);
+            fail();
+        } catch (IllegalArgumentException expected) {
+            String msg = expected.getMessage();
+            assertTrue(msg, msg.indexOf("greater than zero") >= 0);
+        }
+
+        /* Check delta greater than range. */
+        try {
+            seq.get(txn, 11);
+            fail();
+        } catch (IllegalArgumentException expected) {
+            String msg = expected.getMessage();
+            assertTrue(msg, msg.indexOf("larger than the range") >= 0);
+        }
+
+        seq.close();
+        txnCommit(txn);
+        db.close();
+    }
+
+    public void testBasic()
+        throws DatabaseException {
+
+        Database db = openDb("foo");
+        DatabaseEntry key = new DatabaseEntry(new byte[0]);
+        DatabaseEntry data = new DatabaseEntry();
+
+        SequenceConfig config = new SequenceConfig();
+        config.setAllowCreate(true);
+
+        Transaction txn = txnBegin();
+        Sequence seq = db.openSequence(txn, key, config);
+        txnCommit(txn);
+
+        txn = txnBegin();
+
+        /* Check default values before calling get(). */
+
+        SequenceStats stats = seq.getStats(null);
+        assertEquals(0, stats.getCurrent());
+        assertEquals(0, stats.getCacheSize());
+        assertEquals(0, stats.getNGets());
+        assertEquals(Long.MIN_VALUE, stats.getMin());
+        assertEquals(Long.MAX_VALUE, stats.getMax());
+
+        /* Get the first value. */
+
+        long val = seq.get(txn, 1);
+        assertEquals(0, val);
+        stats = seq.getStats(null);
+        assertEquals(1, stats.getCurrent());
+        assertEquals(1, stats.getValue());
+        assertEquals(0, stats.getLastValue());
+        assertEquals(1, stats.getNGets());
+
+        /* Use deltas greater than one. */
+
+        assertEquals(1, seq.get(txn, 2));
+        assertEquals(3, seq.get(txn, 3));
+        assertEquals(6, seq.get(txn, 1));
+        assertEquals(7, seq.get(txn, 1));
+
+        /* Remove a sequence and expect the key to be deleted. */
+
+        seq.close();
+        db.removeSequence(txn, key);
+        assertEquals(OperationStatus.NOTFOUND, db.get(txn, key, data, null));
+        txnCommit(txn);
+        assertEquals(OperationStatus.NOTFOUND, db.get(null, key, data, null));
+
+        db.close();
+    }
+
+    public void testMultipleHandles()
+        throws DatabaseException {
+
+        Database db = openDb("foo");
+        DatabaseEntry key = new DatabaseEntry(new byte[0]);
+
+        /* Create a sequence. */
+
+        SequenceConfig config = new SequenceConfig();
+        config.setAllowCreate(true);
+        config.setDecrement(true);
+        config.setRange(1, 3);
+        config.setInitialValue(3);
+
+        Transaction txn = txnBegin();
+        Sequence seq = db.openSequence(txn, key, config);
+        assertEquals(3, seq.get(txn, 1));
+        txnCommit(txn);
+
+        /* Open another handle on the same sequence -- config should match. */
+
+        txn = txnBegin();
+        Sequence seq2 = db.openSequence(txn, key, config);
+        assertEquals(2, seq2.get(txn, 1));
+        txnCommit(txn);
+
+        SequenceStats stats = seq2.getStats(null);
+        assertEquals(1, stats.getCurrent());
+        assertEquals(1, stats.getMin());
+        assertEquals(3, stats.getMax());
+
+        /* Values are assigned from a single sequence for both handles. */
+
+        assertEquals(1, seq.get(null, 1));
+
+        seq.close();
+        seq2.close();
+        db.close();
+    }
+
+    public void testRanges()
+        throws DatabaseException {
+
+        Database db = openDb("foo");
+
+        /* Positive and negative ranges. */
+
+        doRange(db, 1, 10, 1, 0);
+        doRange(db, -10, -1, 1, 0);
+        doRange(db, -10, 10, 1, 0);
+
+        /* Extreme min/max values. */
+
+        doRange(db, Integer.MIN_VALUE, Integer.MIN_VALUE + 10, 1, 0);
+        doRange(db, Integer.MAX_VALUE - 10, Integer.MAX_VALUE, 1, 0);
+
+        doRange(db, Long.MIN_VALUE, Long.MIN_VALUE + 10, 1, 0);
+        doRange(db, Long.MAX_VALUE - 10, Long.MAX_VALUE, 1, 0);
+
+        /* Deltas greater than one. */
+
+        doRange(db, -10, 10, 2, 0);
+        doRange(db, -10, 10, 3, 0);
+        doRange(db, -10, 10, 5, 0);
+        doRange(db, -10, 10, 10, 0);
+        doRange(db, -10, 10, 20, 0);
+
+        /*
+         * Cache sizes.  We cheat a little by making the cache size an even
+         * multiple of the delta whenever the cache size is greater than the
+         * delta; otherwise, it is too difficult to predict caching.
+         */
+
+        doRange(db, -10, 10, 1, 1);
+        doRange(db, -10, 10, 1, 2);
+        doRange(db, -10, 10, 1, 3);
+        doRange(db, -10, 10, 1, 7);
+        doRange(db, -10, 10, 1, 20);
+        doRange(db, -10, 10, 3, 1);
+        doRange(db, -10, 10, 3, 2);
+        doRange(db, -10, 10, 3, 3);
+        doRange(db, -10, 10, 3, 9);
+        doRange(db, -10, 10, 3, 18);
+
+        db.close();
+    }
+
+    private void doRange(Database db, long min, long max, int delta, int cache)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry(new byte[1]);
+        boolean incr;
+        boolean wrap;
+
+        for (int option = 0; option < 4; option += 1) {
+            switch (option) {
+            case 0:
+                incr = true;
+                wrap = false;
+                break;
+            case 1:
+                incr = true;
+                wrap = false;
+                break;
+            case 2:
+                incr = true;
+                wrap = false;
+                break;
+            case 3:
+                incr = true;
+                wrap = false;
+                break;
+            default:
+                throw new IllegalStateException();
+            }
+
+            SequenceConfig config = new SequenceConfig();
+            config.setAllowCreate(true);
+            config.setInitialValue(incr ? min : max);
+            config.setWrap(wrap);
+            config.setDecrement(!incr);
+            config.setRange(min, max);
+            config.setCacheSize(cache);
+
+            String msg =
+                "incr=" + incr +
+                " wrap=" + wrap +
+                " min=" + min +
+                " max=" + max +
+                " delta=" + delta +
+                " cache=" + cache;
+
+            Transaction txn = txnBegin();
+            db.removeSequence(txn, key);
+            Sequence seq = db.openSequence(txn, key, config);
+            txnCommit(txn);
+
+            txn = txnBegin();
+
+            if (incr) {
+                for (long i = min;; i += delta) {
+
+                    boolean expectCached = false;
+                    if (cache != 0) {
+                        expectCached = delta < cache && i != max &&
+                            (((i - min) % cache) != 0);
+                    }
+
+                    doOne(msg, seq, txn, delta, i, expectCached);
+
+                    /* Test for end without causing long overflow. */
+                    if (i > max - delta) {
+                        if (delta == 1) {
+                            assertEquals(msg, i, max);
+                        }
+                        break;
+                    }
+                }
+                if (wrap) {
+                    assertEquals(msg, min, seq.get(txn, delta));
+                    assertEquals(msg, min + delta, seq.get(txn, delta));
+                }
+            } else {
+                for (long i = max;; i -= delta) {
+
+                    boolean expectCached = false;
+                    if (cache != 0) {
+                        expectCached = delta < cache && i != min &&
+                            (((max - i) % cache) != 0);
+                    }
+
+                    doOne(msg, seq, txn, delta, i, expectCached);
+
+                    /* Test for end without causing long overflow. */
+                    if (i < min + delta) {
+                        if (delta == 1) {
+                            assertEquals(msg, i, min);
+                        }
+                        break;
+                    }
+                }
+                if (wrap) {
+                    assertEquals(msg, max, seq.get(txn, delta));
+                    assertEquals(msg, max - delta, seq.get(txn, delta));
+                }
+            }
+
+            if (!wrap) {
+                try {
+                    seq.get(txn, delta);
+                    fail(msg);
+                } catch (DatabaseException expected) {
+                    String emsg = expected.getMessage();
+                    assertTrue(emsg, emsg.indexOf("overflow") >= 0);
+                }
+            }
+
+            txnCommit(txn);
+            seq.close();
+        }
+    }
+
+    private void doOne(String msg,
+                       Sequence seq,
+                       Transaction txn,
+                       int delta,
+                       long expectValue,
+                       boolean expectCached)
+        throws DatabaseException {
+
+        msg += " value=" + expectValue;
+
+        try {
+            assertEquals(msg, expectValue, seq.get(txn, delta));
+        } catch (DatabaseException e) {
+            fail(msg + ' ' + e);
+        }
+
+        StatsConfig clearConfig = new StatsConfig();
+        clearConfig.setFast(true);
+        clearConfig.setClear(true);
+        SequenceStats stats = seq.getStats(clearConfig);
+
+        assertEquals(msg, 1, stats.getNGets());
+        assertEquals(msg, expectCached ? 1 : 0, stats.getNCachedGets());
+    }
+
+    private Database openDb(String name)
+        throws DatabaseException {
+
+        return openDb(name, false);
+    }
+
+    private Database openDb(String name, boolean duplicates)
+        throws DatabaseException {
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(isTransactional);
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(duplicates);
+
+        Transaction txn = txnBegin();
+        try {
+            return env.openDatabase(txn, name, dbConfig);
+        } finally {
+            txnCommit(txn);
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/test/ToManyTest.java b/test/com/sleepycat/je/test/ToManyTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..560b9d885f37ce4a319a3be2cc731cdbd22b7303
--- /dev/null
+++ b/test/com/sleepycat/je/test/ToManyTest.java
@@ -0,0 +1,346 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ToManyTest.java,v 1.10.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.test;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+
+import junit.framework.Test;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.SecondaryConfig;
+import com.sleepycat.je.SecondaryCursor;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.je.SecondaryMultiKeyCreator;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.util.test.TxnTestCase;
+
+/**
+ * Tests multi-key secondary operations.  Exhaustive API testing of multi-key
+ * secondaries is part of SecondaryTest and ForeignKeyTest, which test the use
+ * of a single key with SecondaryMultiKeyCreator.  This class adds tests for
+ * multiple keys per record.
+ */
+public class ToManyTest extends TxnTestCase {
+
+    /*
+     * The primary database has a single byte key and byte[] array data.  Each
+     * byte of the data array is a secondary key in the to-many index.
+     *
+     * The primary map mirrors the primary database and contains Byte keys and
+     * a set of Byte objects for each map entry value.  The secondary map
+     * mirrors the secondary database, and for every secondary key (Byte)
+     * contains a set of primary keys (set of Byte).
+     */
+    private Map<Byte, Set<Byte>> priMap0 = new HashMap<Byte, Set<Byte>>();
+    private Map<Byte, Set<Byte>> secMap0 = new HashMap<Byte, Set<Byte>>();
+    private Database priDb;
+    private SecondaryDatabase secDb;
+
+    public static Test suite() {
+
+        /*
+         * This test does not work with TXN_NULL because with transactions we
+         * cannot abort the update in a one-to-many test when secondary key
+         * already exists in another primary record.
+         */
+        return txnTestSuite(ToManyTest.class, null,
+                            new String[] {TxnTestCase.TXN_USER,
+                                          TxnTestCase.TXN_AUTO});
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        super.tearDown();
+        priMap0 = null;
+        secMap0 = null;
+        priDb = null;
+        secDb = null;
+    }
+
+    public void testManyToMany()
+        throws DatabaseException {
+
+        priDb = openPrimary("pri");
+        secDb = openSecondary(priDb, "sec", true /*dups*/);
+
+        writeAndVerify((byte) 0, new byte[] {});
+        writeAndVerify((byte) 0, null);
+        writeAndVerify((byte) 0, new byte[] {0, 1, 2});
+        writeAndVerify((byte) 0, null);
+        writeAndVerify((byte) 0, new byte[] {});
+        writeAndVerify((byte) 0, new byte[] {0});
+        writeAndVerify((byte) 0, new byte[] {0, 1});
+        writeAndVerify((byte) 0, new byte[] {0, 1, 2});
+        writeAndVerify((byte) 0, new byte[] {1, 2});
+        writeAndVerify((byte) 0, new byte[] {2});
+        writeAndVerify((byte) 0, new byte[] {});
+        writeAndVerify((byte) 0, null);
+
+        writeAndVerify((byte) 0, new byte[] {0, 1, 2});
+        writeAndVerify((byte) 1, new byte[] {1, 2, 3});
+        writeAndVerify((byte) 0, null);
+        writeAndVerify((byte) 1, null);
+        writeAndVerify((byte) 0, new byte[] {0, 1, 2});
+        writeAndVerify((byte) 1, new byte[] {1, 2, 3});
+        writeAndVerify((byte) 0, new byte[] {0});
+        writeAndVerify((byte) 1, new byte[] {3});
+        writeAndVerify((byte) 0, null);
+        writeAndVerify((byte) 1, null);
+
+        secDb.close();
+        priDb.close();
+    }
+
+    public void testOneToMany()
+        throws DatabaseException {
+
+        priDb = openPrimary("pri");
+        secDb = openSecondary(priDb, "sec", false /*dups*/);
+
+        writeAndVerify((byte) 0, new byte[] {1, 5});
+        writeAndVerify((byte) 1, new byte[] {2, 4});
+        writeAndVerify((byte) 0, new byte[] {0, 1, 5, 6});
+        writeAndVerify((byte) 1, new byte[] {2, 3, 4});
+        write((byte) 0, new byte[] {3}, true /*expectException*/);
+        writeAndVerify((byte) 1, new byte[] {});
+        writeAndVerify((byte) 0, new byte[] {0, 1, 2, 3, 4, 5, 6});
+        writeAndVerify((byte) 0, null);
+        writeAndVerify((byte) 1, new byte[] {0, 1, 2, 3, 4, 5, 6});
+        writeAndVerify((byte) 1, null);
+
+        secDb.close();
+        priDb.close();
+    }
+
+    /**
+     * Puts or deletes a single primary record, updates the maps, and verifies
+     * that the maps match the databases.
+     */
+    private void writeAndVerify(byte priKey, byte[] priData)
+        throws DatabaseException {
+
+        write(priKey, priData, false /*expectException*/);
+        updateMaps(new Byte(priKey), bytesToSet(priData));
+        verify();
+    }
+
+    /**
+     * Puts or deletes a single primary record.
+     */
+    private void write(byte priKey, byte[] priData, boolean expectException)
+        throws DatabaseException {
+
+        DatabaseEntry keyEntry = new DatabaseEntry(new byte[] { priKey });
+        DatabaseEntry dataEntry = new DatabaseEntry(priData);
+
+        Transaction txn = txnBegin();
+        try {
+            OperationStatus status;
+            if (priData != null) {
+                status = priDb.put(txn, keyEntry, dataEntry);
+            } else {
+                status = priDb.delete(txn, keyEntry);
+            }
+            assertSame(OperationStatus.SUCCESS, status);
+            txnCommit(txn);
+            assertTrue(!expectException);
+        } catch (Exception e) {
+            txnAbort(txn);
+            assertTrue(e.toString(), expectException);
+        }
+    }
+
+    /**
+     * Updates map 0 to reflect a record added to the primary database.
+     */
+    private void updateMaps(Byte priKey, Set<Byte> newPriData) {
+
+        /* Remove old secondary keys. */
+        Set<Byte> oldPriData = priMap0.get(priKey);
+        if (oldPriData != null) {
+            for (Iterator<Byte> i = oldPriData.iterator(); i.hasNext();) {
+                Byte secKey = (Byte) i.next();
+                Set<Byte> priKeySet = secMap0.get(secKey);
+                assertNotNull(priKeySet);
+                assertTrue(priKeySet.remove(priKey));
+                if (priKeySet.isEmpty()) {
+                    secMap0.remove(secKey);
+                }
+            }
+        }
+
+        if (newPriData != null) {
+            /* Put primary entry. */
+            priMap0.put(priKey, newPriData);
+            /* Add new secondary keys. */
+            for (Iterator<Byte> i = newPriData.iterator(); i.hasNext();) {
+                Byte secKey = i.next();
+                Set<Byte> priKeySet = secMap0.get(secKey);
+                if (priKeySet == null) {
+                    priKeySet = new HashSet<Byte>();
+                    secMap0.put(secKey, priKeySet);
+                }
+                assertTrue(priKeySet.add(priKey));
+            }
+        } else {
+            /* Remove primary entry. */
+            priMap0.remove(priKey);
+        }
+    }
+
+    /**
+     * Verifies that the maps match the databases.
+     */
+    private void verify()
+        throws DatabaseException {
+
+        Transaction txn = txnBeginCursor();
+        DatabaseEntry priKeyEntry = new DatabaseEntry();
+        DatabaseEntry secKeyEntry = new DatabaseEntry();
+        DatabaseEntry dataEntry = new DatabaseEntry();
+        Map<Byte, Set<Byte>> priMap1 = new HashMap<Byte, Set<Byte>>();
+        Map<Byte, Set<Byte>> priMap2 = new HashMap<Byte, Set<Byte>>();
+        Map<Byte, Set<Byte>> secMap1 = new HashMap<Byte, Set<Byte>>();
+        Map<Byte, Set<Byte>> secMap2 = new HashMap<Byte, Set<Byte>>();
+
+        /* Build map 1 from the primary database. */
+        priMap2 = new HashMap<Byte, Set<Byte>>();
+        Cursor priCursor = priDb.openCursor(txn, null);
+        while (priCursor.getNext(priKeyEntry, dataEntry, null) ==
+               OperationStatus.SUCCESS) {
+            Byte priKey = new Byte(priKeyEntry.getData()[0]);
+            Set<Byte> priData = bytesToSet(dataEntry.getData());
+
+            /* Update primary map. */
+            priMap1.put(priKey, priData);
+
+            /* Update secondary map. */
+            for (Iterator<Byte> i = priData.iterator(); i.hasNext();) {
+                Byte secKey = i.next();
+                Set<Byte> priKeySet = secMap1.get(secKey);
+                if (priKeySet == null) {
+                    priKeySet = new HashSet<Byte>();
+                    secMap1.put(secKey, priKeySet);
+                }
+                assertTrue(priKeySet.add(priKey));
+            }
+
+            /*
+             * Add empty primary records to priMap2 while we're here, since
+             * they cannot be built from the secondary database.
+             */
+            if (priData.isEmpty()) {
+                priMap2.put(priKey, priData);
+            }
+        }
+        priCursor.close();
+
+        /* Build map 2 from the secondary database. */
+        SecondaryCursor secCursor = secDb.openSecondaryCursor(txn, null);
+        while (secCursor.getNext(secKeyEntry, priKeyEntry, dataEntry, null) ==
+               OperationStatus.SUCCESS) {
+            Byte priKey = new Byte(priKeyEntry.getData()[0]);
+            Byte secKey = new Byte(secKeyEntry.getData()[0]);
+
+            /* Update primary map. */
+            Set<Byte> priData = priMap2.get(priKey);
+            if (priData == null) {
+                priData = new HashSet<Byte>();
+                priMap2.put(priKey, priData);
+            }
+            priData.add(secKey);
+
+            /* Update secondary map. */
+            Set<Byte> secData = secMap2.get(secKey);
+            if (secData == null) {
+                secData = new HashSet<Byte>();
+                secMap2.put(secKey, secData);
+            }
+            secData.add(priKey);
+        }
+        secCursor.close();
+
+        /* Compare. */
+        assertEquals(priMap0, priMap1);
+        assertEquals(priMap1, priMap2);
+        assertEquals(secMap0, secMap1);
+        assertEquals(secMap1, secMap2);
+
+        txnCommit(txn);
+    }
+
+    private Set<Byte> bytesToSet(byte[] bytes) {
+        Set<Byte> set = null;
+        if (bytes != null) {
+            set = new HashSet<Byte>();
+            for (int i = 0; i < bytes.length; i += 1) {
+                set.add(new Byte(bytes[i]));
+            }
+        }
+        return set;
+    }
+
+    private Database openPrimary(String name)
+        throws DatabaseException {
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(isTransactional);
+        dbConfig.setAllowCreate(true);
+
+        Transaction txn = txnBegin();
+        try {
+            return env.openDatabase(txn, name, dbConfig);
+        } finally {
+            txnCommit(txn);
+        }
+    }
+
+    private SecondaryDatabase openSecondary(Database priDb,
+                                            String dbName,
+                                            boolean dups)
+        throws DatabaseException {
+
+        SecondaryConfig dbConfig = new SecondaryConfig();
+        dbConfig.setTransactional(isTransactional);
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(dups);
+        dbConfig.setMultiKeyCreator(new MyKeyCreator());
+
+        Transaction txn = txnBegin();
+        try {
+            return env.openSecondaryDatabase(txn, dbName, priDb, dbConfig);
+        } finally {
+            txnCommit(txn);
+        }
+    }
+
+    private static class MyKeyCreator implements SecondaryMultiKeyCreator {
+
+        public void createSecondaryKeys(SecondaryDatabase secondary,
+                                        DatabaseEntry key,
+                                        DatabaseEntry data,
+                                        Set<DatabaseEntry> results)
+            throws DatabaseException {
+
+            for (int i = 0; i < data.getSize(); i+= 1) {
+                results.add(new DatabaseEntry(data.getData(), i, 1));
+            }
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/tree/BinDeltaTest.java b/test/com/sleepycat/je/tree/BinDeltaTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..756d2fb750af2af1ad845b1b20969491389fa978
--- /dev/null
+++ b/test/com/sleepycat/je/tree/BinDeltaTest.java
@@ -0,0 +1,296 @@
+/*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: BinDeltaTest.java,v 1.50.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+package com.sleepycat.je.tree;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.CursorImpl;
+import com.sleepycat.je.log.LogManager;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.log.entry.LogEntry;
+import com.sleepycat.je.tree.Key.DumpType;
+import com.sleepycat.je.txn.BasicLocker;
+import com.sleepycat.je.txn.Locker;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.DbLsn;
+
+/**
+ * Exercise the delta based BIN logging.
+ */
+public class BinDeltaTest extends TestCase {
+    private static final String DB_NAME = "test";
+    private static final boolean DEBUG = false;
+    private Environment env;
+    private File envHome;
+    private Database db;
+    private LogManager logManager;
+
+    public BinDeltaTest() throws DatabaseException {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+
+       	/* Print keys as numbers */
+       	Key.DUMP_TYPE = DumpType.BINARY;
+    }
+
+    public void setUp() throws IOException, DatabaseException {
+        TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX);
+
+        /*
+         * Properties for creating an environment.  Disable the evictor for
+         * this test, use larger BINS.
+         */
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setTransactional(true);
+        envConfig.setConfigParam
+	    (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "true");
+        envConfig.setConfigParam
+	    (EnvironmentParams.NODE_MAX.getName(), "50");
+        envConfig.setConfigParam
+	    (EnvironmentParams.BIN_DELTA_PERCENT.getName(), "50");
+        envConfig.setAllowCreate(true);
+        env = new Environment(envHome, envConfig);
+        logManager = DbInternal.envGetEnvironmentImpl(env).getLogManager();
+    }
+
+    public void tearDown() throws IOException, DatabaseException {
+        if (env != null) {
+            try {
+                env.close();
+            } catch (DatabaseException E) {
+            }
+        }
+        TestUtils.removeFiles("TearDown", envHome,
+                              FileManager.JE_SUFFIX, true);
+    }
+
+    /**
+     * Create a db, fill with numRecords, return the first BIN.
+     * @param numRecords
+     */
+    private BIN initDb(int start, int end)
+        throws DatabaseException {
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        db = env.openDatabase(null, DB_NAME, dbConfig);
+
+        addRecords(start, end);
+
+        /* Now reach into the tree and get the first BIN */
+        Locker txn = BasicLocker.
+	    createBasicLocker(DbInternal.envGetEnvironmentImpl(env));
+        CursorImpl internalCursor =
+	    new CursorImpl(DbInternal.dbGetDatabaseImpl(db), txn);
+        assertTrue(internalCursor.positionFirstOrLast(true, null));
+        BIN firstBIN = internalCursor.getBIN();
+        firstBIN.releaseLatch();
+        internalCursor.close();
+        txn.operationEnd();
+        return firstBIN;
+    }
+
+    /**
+     * Modify the data, just to dirty the BIN.
+     */
+    private void modifyRecords(int start, int end, int increment)
+        throws DatabaseException {
+
+        Transaction txn = env.beginTransaction(null, null);
+        Cursor cursor = db.openCursor(txn, null);
+        DatabaseEntry searchKey = new DatabaseEntry();
+        DatabaseEntry foundData = new DatabaseEntry();
+        DatabaseEntry newData = new DatabaseEntry();
+
+        for (int i = start; i <= end; i++) {
+            searchKey.setData(TestUtils.getTestArray(i));
+            assertEquals(OperationStatus.SUCCESS,
+                         cursor.getSearchKey(searchKey, foundData,
+                                             LockMode.DEFAULT));
+            newData.setData(TestUtils.getTestArray(i+increment));
+            cursor.putCurrent(newData);
+        }
+	cursor.close();
+        txn.commit();
+    }
+
+    /*
+     * Add the specified records.
+     */
+    private void addRecords(int start, int end)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        for (int i = start;  i < end; i++) {
+            byte[] keyData = TestUtils.getTestArray(i);
+            byte[] dataData = TestUtils.byteArrayCopy(keyData);
+            key.setData(keyData);
+            data.setData(dataData);
+            db.put(null, key, data);
+        }
+    }
+	
+    /**
+     * Simple test, delta a BIN several times, reconstruct.
+     */
+    public void testSimple()
+    	throws Throwable {
+
+        try {
+            /* Create a db, insert records value 10 - 30, get the first BIN */
+            BIN bin = initDb(10, 30);
+
+            /* Log a full version. */
+	    bin.latch();
+            long fullLsn = bin.log
+                (logManager, true, false, false, false, null);
+	    bin.releaseLatch();
+            assertTrue(fullLsn != DbLsn.NULL_LSN);
+
+            if (DEBUG) {
+                System.out.println("Start");
+                System.out.println(bin.dumpString(0, true));
+            }
+
+            /* Modify some of the data, add data so the BIN is changed. */
+            modifyRecords(11,13,10);
+            addRecords(1,3);
+            logAndCheck(bin);
+
+            /* Modify more of the data, so the BIN is changed. */
+            modifyRecords(14,15,10);
+            logAndCheck(bin);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        } finally {
+            db.close();
+	}
+    }
+
+    /**
+     * Test that a delta is correctly generated when there are entries
+     * that have been aborted and rolled back.
+     *
+     * The case we're trying to test, (that was in error before)
+     *  - a record is deleted
+     *  - a full version of BIN x is written to the log, reflecting that
+     *    deletion.
+     *  - the deleting txn is aborted, so the record is restored. Now the
+     *    BIN has an entry where the child LSN is less than the last full
+     *    BIN version LSN.
+     *  - generate a delta, make sure that the restoration of the record is
+     *    present.
+     */
+    public void testUndo()
+        throws Throwable {
+
+        try {
+            /* Create a db, insert records value 10 - 30, get the first BIN */
+            BIN bin = initDb(10, 30);
+
+            /* Delete the first record, then abort the delete. */
+            Transaction txn = env.beginTransaction(null, null);
+            Cursor cursor = db.openCursor(txn, null);
+            DatabaseEntry firstKey = new DatabaseEntry();
+            DatabaseEntry foundData = new DatabaseEntry();
+            OperationStatus status = cursor.getFirst(firstKey, foundData,
+						     LockMode.DEFAULT);
+            assertEquals(OperationStatus.SUCCESS, status);
+            status = cursor.delete();
+            assertEquals(OperationStatus.SUCCESS, status);
+	    cursor.close();
+
+            /* Log a full version. This will reflect the delete. */
+	    bin.latch();
+            long fullLsn = bin.log
+                (logManager, true, false, false, false, null);
+	    bin.releaseLatch();
+            assertTrue(fullLsn != DbLsn.NULL_LSN);
+
+            /*
+             * Roll back the deletion. Now the full version of the LSN is out
+             * of date.
+             */
+            txn.abort();
+
+            /*
+             * Make sure a delta reflect the abort, even though the abort
+             * returns an older LSN back into the BIN.
+             */
+            logAndCheck(bin);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        } finally {
+            db.close();
+        }
+    }
+
+    /* Check if full is logged when percent > max */
+    /* Check that max deltas works. */
+    /* check knownDelete. */
+
+    /**
+     * Log the targetBIN, then read it back from the log and make sure
+     * the recreated BIN matches the in memory BIN.
+     */
+    private void logAndCheck(BIN targetBIN)
+        throws DatabaseException {
+
+        /*
+         *  Log it as a delta. If the logging was done as a delta, this method
+         * returns null, so we expect null
+         */
+        assertTrue(targetBIN.log
+                    (logManager, true, false, false, false, null) ==
+		   DbLsn.NULL_LSN);
+
+        /* Read the delta back. */
+        LogEntry partial =
+            logManager.getLogEntry(targetBIN.getLastDeltaVersion());
+
+        /* Make sure that this is was a delta entry. */
+        assertTrue(partial.getMainItem() instanceof BINDelta);
+        BINDelta delta = (BINDelta) partial.getMainItem();
+
+        /* Compare to the current version. */
+        BIN createdBIN =
+            delta.reconstituteBIN(DbInternal.envGetEnvironmentImpl(env));
+        if (DEBUG) {
+            System.out.println("created");
+            System.out.println(createdBIN.dumpString(0, true));
+        }
+
+        assertEquals(targetBIN.getClass().getName(),
+                     createdBIN.getClass().getName());
+        assertEquals(targetBIN.getNEntries(), createdBIN.getNEntries());
+
+        for (int i = 0; i < createdBIN.getNEntries(); i++) {
+            assertEquals("LSN " + i, targetBIN.getLsn(i),
+                         createdBIN.getLsn(i));
+        }
+        assertEquals(true, createdBIN.getDirty());
+        assertEquals(true, targetBIN.getDirty());
+    }
+}
diff --git a/test/com/sleepycat/je/tree/GetParentNodeTest.java b/test/com/sleepycat/je/tree/GetParentNodeTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..45fd5079ea1d91aca30278912ef550b3414d6cc6
--- /dev/null
+++ b/test/com/sleepycat/je/tree/GetParentNodeTest.java
@@ -0,0 +1,481 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: GetParentNodeTest.java,v 1.47.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.io.File;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.latch.LatchSupport;
+import com.sleepycat.je.util.StringDbt;
+import com.sleepycat.je.util.TestUtils;
+
+public class GetParentNodeTest extends TestCase {
+    static private final boolean DEBUG = false;
+
+    private File envHome;
+    private Environment env;
+    private Database db;
+    private IN rootIN;
+    private IN firstLevel2IN;
+    private BIN firstBIN;
+    private DBIN firstDBIN;
+
+    public GetParentNodeTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws Exception {
+        TestUtils.removeLogFiles("Setup", envHome, false);
+        initEnv();
+    }
+
+    public void tearDown()
+        throws Exception {
+        try {
+            db.close();
+            env.close();
+        } catch (DatabaseException E) {
+        }
+
+        TestUtils.removeLogFiles("TearDown", envHome, true);
+    }
+
+    private void initEnv()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "4");
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+        env = new Environment(envHome, envConfig);
+
+        String databaseName = "testDb";
+        Transaction txn = env.beginTransaction(null, null);
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(true);
+        db = env.openDatabase(txn, databaseName, dbConfig);
+        txn.commit();
+    }
+
+    /**
+     * Test getParentINForChildIN and GetParentBINForChildLN painstakingly on a
+     * hand constructed tree.
+     */
+    public void testBasic()
+        throws Exception {
+
+        try {
+            /*
+             * Make a tree w/3 levels in the main tree and a single dup
+             * tree. The dupTree has two levels. The tree looks like this:
+	     *
+             *            root(key=a)
+             *             |
+             *      +---------------------------+
+             *    IN(key=a)                   IN(key=e)
+             *     |                            |
+             *  +------------------+       +--------+--------+
+             * BIN(key=a)       BIN(c)    BIN(e)   BIN(g)  BIN(i)
+             *   |   |            | |      | |       | |     | | |
+             *  LNa DINb        LNc,d    LNe,f     LNg,h   LNi,j,k
+             *       |
+             *       +----------+-------------+
+             *       |          |             |
+             *   DBIN(data1) DBIN(data3)  DBIN(data5)
+             *    LN LN         LN LN      LN LN LN
+             */
+            assertEquals(OperationStatus.SUCCESS,
+                         db.put(null, new StringDbt("a"),
+                                new StringDbt("data1")));
+            assertEquals(OperationStatus.SUCCESS,
+                         db.put(null, new StringDbt("b"),
+                                new StringDbt("data1")));
+            assertEquals(OperationStatus.SUCCESS,
+                         db.put(null, new StringDbt("c"),
+                                new StringDbt("data1")));
+            assertEquals(OperationStatus.SUCCESS,
+                         db.put(null, new StringDbt("d"),
+                                new StringDbt("data1")));
+            assertEquals(OperationStatus.SUCCESS,
+                         db.put(null, new StringDbt("e"),
+                                new StringDbt("data1")));
+            assertEquals(OperationStatus.SUCCESS,
+                         db.put(null, new StringDbt("f"),
+                                new StringDbt("data1")));
+            assertEquals(OperationStatus.SUCCESS,
+                         db.put(null, new StringDbt("g"),
+                                new StringDbt("data1")));
+            assertEquals(OperationStatus.SUCCESS,
+                         db.put(null, new StringDbt("h"),
+                                new StringDbt("data1")));
+            assertEquals(OperationStatus.SUCCESS,
+                         db.put(null, new StringDbt("i"),
+                                new StringDbt("data1")));
+            assertEquals(OperationStatus.SUCCESS,
+                         db.put(null, new StringDbt("j"),
+                                new StringDbt("data1")));
+            assertEquals(OperationStatus.SUCCESS,
+                         db.put(null, new StringDbt("k"),
+                                new StringDbt("data1")));
+
+            /* Add one dup tree. */
+            byte[] dupKey = "b".getBytes();
+            assertEquals(OperationStatus.SUCCESS,
+                         db.put(null, new StringDbt("b"),
+                                new StringDbt("data2")));
+            assertEquals(OperationStatus.SUCCESS,
+                         db.put(null, new StringDbt("b"),
+                                new StringDbt("data3")));
+            assertEquals(OperationStatus.SUCCESS,
+                         db.put(null, new StringDbt("b"),
+                                new StringDbt("data4")));
+            assertEquals(OperationStatus.SUCCESS,
+                         db.put(null, new StringDbt("b"),
+                                new StringDbt("data5")));
+            assertEquals(OperationStatus.SUCCESS,
+                         db.put(null, new StringDbt("b"),
+                                new StringDbt("data6")));
+            assertEquals(OperationStatus.SUCCESS,
+                         db.put(null, new StringDbt("b"),
+				new StringDbt("data7")));
+
+            /*
+             * Test exact matches.
+             */
+            checkTreeUsingExistingNodes(dupKey, true);
+            checkTreeUsingExistingNodes(dupKey, false);
+
+            /* Test potential matches. */
+            checkTreeUsingPotentialNodes();
+
+            /* Test deletes. */
+	    checkTreeWithDeletedBins(true);
+	    checkTreeWithDeletedBins(false);
+
+            /* Should be no latches held. */
+            assertEquals(0, LatchSupport.countLatchesHeld());
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw new Exception(t);
+        }
+    }
+
+    private void checkTreeUsingExistingNodes(byte[] dupKey,
+                                             boolean requireExactMatch)
+        throws DatabaseException {
+
+        /* Start at the root. */
+        DatabaseImpl database = DbInternal.dbGetDatabaseImpl(db);
+        Tree tree = database.getTree();
+
+        if (DEBUG) {
+            tree.dump();
+        }
+
+        rootIN = tree.withRootLatchedShared
+	    (new GetRoot(DbInternal.dbGetDatabaseImpl(db)));
+        rootIN.latch();
+        SearchResult result =
+            tree.getParentINForChildIN(rootIN, true, CacheMode.DEFAULT);
+        assertFalse(result.exactParentFound);
+        assertEquals(rootIN.getNEntries(), 2);
+
+        /* Second and third level. */
+        BIN dupTreeBin = null;
+        DIN dupTreeDINRoot = null;
+        firstBIN = null;
+        int dupIndex = -1;
+        for (int i = 0; i < rootIN.getNEntries(); i++) {
+            /* Each level 2 IN. */
+            IN in = (IN) rootIN.fetchTarget(i);
+            if (i == 0) {
+                firstLevel2IN = in;
+            }
+            checkMatch(tree, in, rootIN, i, requireExactMatch);
+
+            /* For each BIN, find its parent, and then find its LNs. */
+            for (int j = 0; j < in.getNEntries(); j++) {
+                BIN bin = (BIN) in.fetchTarget(j);
+                if (firstBIN == null) {
+                    firstBIN = bin;
+                }
+                checkMatch(tree, bin, in, j, requireExactMatch);
+
+                for (int k = 0; k < bin.getNEntries(); k++) {
+                    Node n = bin.fetchTarget(k);
+                    if (n instanceof LN) {
+                        checkMatch(tree, (LN) n, bin, bin.getKey(k),
+                                   null, k, bin.getLsn(k));
+                    }
+                }
+
+                int findIndex = bin.findEntry(dupKey, false, true);
+                if (findIndex > 0) {
+                    dupIndex = findIndex;
+                    dupTreeDINRoot =
+                        (DIN) bin.fetchTarget(dupIndex);
+                    dupTreeBin = bin;
+                }
+            }
+        }
+
+        /* Check dup tree, assumes a 2 level tree. */
+        assertTrue(dupTreeBin != null);
+        assertTrue(dupTreeDINRoot != null);
+        checkMatch(tree, dupTreeDINRoot, dupTreeBin, dupIndex,
+                   requireExactMatch);
+        assertTrue(dupTreeDINRoot.getNEntries() > 0);
+
+        for (int i = 0; i < dupTreeDINRoot.getNEntries(); i++) {
+            IN in = (IN) dupTreeDINRoot.fetchTarget(i);
+            checkMatch(tree, in, dupTreeDINRoot, i, requireExactMatch);
+            if (firstDBIN == null) {
+                firstDBIN = (DBIN)in;
+            }
+
+            for (int k = 0; k < in.getNEntries(); k++) {
+                Node n = in.fetchTarget(k);
+                LN ln = (LN) n;
+                checkMatch(tree, ln, (BIN)in, dupKey,
+                           ln.getData(),
+                           k, in.getLsn(k));
+            }
+        }
+    }
+
+    /*
+     * Do a parent search, expect to find the parent, check that we do.
+     */
+    private void checkMatch(Tree tree,
+                            IN target,
+                            IN parent,
+                            int index,
+                            boolean requireExactMatch)
+        throws DatabaseException {
+
+        target.latch();
+        SearchResult result = tree.getParentINForChildIN
+            (target, requireExactMatch, CacheMode.DEFAULT);
+        assertTrue(result.exactParentFound);
+        assertEquals("Target=" + target + " parent=" + parent,
+                     index, result.index);
+        assertEquals(parent, result.parent);
+        parent.releaseLatch();
+    }
+
+    /*
+     * Search for the BIN for this LN.
+     */
+    private void checkMatch(Tree tree,
+			    LN target,
+			    BIN parent,
+			    byte[] mainKey,
+                            byte[] dupKey,
+			    int index,
+			    long expectedLsn)
+        throws DatabaseException {
+        TreeLocation location = new TreeLocation();
+
+
+        assertTrue
+	    (tree.getParentBINForChildLN(location, mainKey, dupKey, target,
+					 false, true, false,
+                                         CacheMode.DEFAULT));
+        location.bin.releaseLatch();
+        assertEquals(parent, location.bin);
+        assertEquals(index, location.index);
+        assertEquals(expectedLsn, location.childLsn);
+
+        assertTrue
+	    (tree.getParentBINForChildLN(location, mainKey, dupKey, target,
+					 true, false, true,
+                                         CacheMode.DEFAULT));
+        location.bin.releaseLatch();
+        assertEquals(parent, location.bin);
+        assertEquals(index, location.index);
+        assertEquals(expectedLsn, location.childLsn);
+
+        assertTrue
+	    (tree.getParentBINForChildLN(location, mainKey, dupKey, target,
+					 true, true, false,
+                                         CacheMode.DEFAULT));
+        location.bin.releaseLatch();
+        assertEquals(parent, location.bin);
+        assertEquals(index, location.index);
+        assertEquals(expectedLsn, location.childLsn);
+    }
+
+    private class GetRoot implements WithRootLatched {
+
+        private DatabaseImpl db;
+
+        GetRoot(DatabaseImpl db) {
+	    this.db = db;
+        }
+
+        public IN doWork(ChildReference root)
+            throws DatabaseException {
+
+            return (IN) root.fetchTarget(db, null);
+        }
+    }
+
+    /**
+     * Make up non-existent nodes and see where they'd fit in. This exercises
+     * recovery type processing and cleaning.
+     */
+    private void checkTreeUsingPotentialNodes()
+        throws DatabaseException {
+
+        DatabaseImpl database = DbInternal.dbGetDatabaseImpl(db);
+        Tree tree = database.getTree();
+
+        /*
+         * Make an IN with the key "ab". Its potential parent should be the
+         * first level 2 IN.
+         */
+        IN inAB = new IN(database, "ab".getBytes(), 4, 2);
+        checkPotential(tree, inAB, firstLevel2IN);
+
+        /*
+         * Make an BIN with the key "x". Its potential parent should be the
+         * first level 2 IN.
+         */
+        BIN binAB =
+	    new BIN(database, "ab".getBytes(), 4, 1);
+        checkPotential(tree, binAB, firstLevel2IN);
+
+        /*
+         * Make a DIN with the key "a". Its potential parent should be BIN(c).
+         */
+        DIN dinA = new DIN(database,
+                           "data1".getBytes(),
+                           4,
+                           "a".getBytes(),
+                           null, 3);
+        checkPotential(tree, dinA, firstBIN);
+
+        /*
+         * Make an LN with the key "ab". It's potential parent should be the
+         * BINa.
+         */
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+        LN LNab = new LN("foo".getBytes(), envImpl, false /* replicated */);
+        byte[] mainKey = "ab".getBytes();
+        checkPotential(tree, LNab, firstBIN, mainKey,
+                       LNab.getData(), mainKey);
+
+        /**
+         * Make a dup LN with the key b. Its potential parent should be DBINb.
+         */
+        LN LNdata = new LN("data".getBytes(), envImpl, false /* replicated */);
+        mainKey = "b".getBytes();
+        byte[] dupKey = LNdata.getData();
+        checkPotential(tree, LNdata, firstDBIN, mainKey, dupKey, dupKey);
+    }
+
+    private void checkPotential(Tree tree, IN potential, IN expectedParent)
+        throws DatabaseException {
+
+        /* Try an exact match, expect a failure, then try an inexact match. */
+        potential.latch();
+        SearchResult result = tree.getParentINForChildIN
+            (potential, true, CacheMode.DEFAULT);
+        assertFalse(result.exactParentFound);
+        assertTrue(result.parent == null);
+
+        potential.latch();
+        result =
+            tree.getParentINForChildIN(potential, false, CacheMode.DEFAULT);
+        assertFalse(result.exactParentFound);
+        assertEquals("expected = " + expectedParent.getNodeId() +
+                     " got" + result.parent.getNodeId(),
+                     expectedParent, result.parent);
+        result.parent.releaseLatch();
+    }
+
+    private void checkPotential(Tree tree, LN potential, BIN expectedParent,
+                                byte[] mainKey, byte[] dupKey, byte[] expectedKey)
+        throws DatabaseException {
+
+        /* Try an exact match, expect a failure, then try an inexact match. */
+        TreeLocation location = new TreeLocation();
+        assertFalse(tree.getParentBINForChildLN
+                    (location, mainKey, dupKey, potential, false,
+                     false, true, CacheMode.DEFAULT));
+        location.bin.releaseLatch();
+        assertEquals(location.bin, expectedParent);
+        assertEquals(expectedKey, location.lnKey);
+    }
+
+    private void checkTreeWithDeletedBins(boolean requireExactMatch)
+        throws DatabaseException {
+
+	/**
+	 * Mark all refs from the IN's to the BIN's as "known deleted".  Start
+	 * at the root.
+	 */
+        DatabaseImpl database = DbInternal.dbGetDatabaseImpl(db);
+        Tree tree = database.getTree();
+
+        rootIN = tree.withRootLatchedShared
+	    (new GetRoot(DbInternal.dbGetDatabaseImpl(db)));
+
+        /* Second and third level. */
+        for (int i = 0; i < rootIN.getNEntries(); i++) {
+            /* Each level 2 IN. */
+            IN in = (IN) rootIN.fetchTarget(i);
+            for (int j = 0; j < in.getNEntries(); j++) {
+                BIN bin = (BIN) in.getTarget(j);
+		in.setKnownDeleted(j);
+		checkDeletedMatch(tree, bin, in, j, requireExactMatch);
+            }
+	}
+    }
+
+    /*
+     * Do a parent search, expect to find the parent, check that we do.
+     */
+    private void checkDeletedMatch(Tree tree,
+				   IN target,
+				   IN parent,
+				   int index,
+				   boolean requireExactMatch)
+        throws DatabaseException {
+
+        target.latch();
+        SearchResult result = tree.getParentINForChildIN
+            (target, requireExactMatch, CacheMode.DEFAULT);
+        assertFalse(result.exactParentFound);
+        assertEquals("Target=" + target + " parent=" + parent,
+                     index, result.index);
+	if (requireExactMatch) {
+	    assertEquals(null, result.parent);
+	} else {
+	    assertEquals(parent, result.parent);
+	    parent.releaseLatch();
+	}
+    }
+}
diff --git a/test/com/sleepycat/je/tree/INTest.java b/test/com/sleepycat/je/tree/INTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..6355caa56f6fd54a543dd6749b4d36c58321158a
--- /dev/null
+++ b/test/com/sleepycat/je/tree/INTest.java
@@ -0,0 +1,395 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: INTest.java,v 1.67.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.io.File;
+import java.util.Random;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.tree.Key.DumpType;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.DbLsn;
+
+public class INTest extends TestCase {
+    static private final int N_BYTES_IN_KEY = 3;
+    private int initialINCapacity;
+    private DatabaseImpl db = null;
+    static private long FAKE_LSN = DbLsn.makeLsn(0, 0);
+    private EnvironmentImpl noLogEnv;
+    private File envHome;
+
+    public INTest()
+        throws DatabaseException {
+
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6");
+        envConfig.setAllowCreate(true);
+        noLogEnv = new EnvironmentImpl(envHome,
+                                       envConfig,
+                                       null /*sharedCacheEnv*/,
+                                       false /*replicationIntended*/);
+        initialINCapacity =
+            noLogEnv.getConfigManager().getInt(EnvironmentParams.NODE_MAX);
+        db = new DatabaseImpl("foo", new DatabaseId(11), noLogEnv,
+                              new DatabaseConfig());
+    }
+
+    public void tearDown()
+        throws DatabaseException {
+
+        db.releaseTreeAdminMemory();
+        noLogEnv.close();
+    }
+
+    public void testFindEntry()
+        throws DatabaseException {
+
+        IN in = new IN(db, new byte[0], initialINCapacity, 7);
+        in.latch();
+
+        byte[] zeroBytes = new byte[N_BYTES_IN_KEY];
+        for (int i = 0; i < N_BYTES_IN_KEY; i++) {
+            zeroBytes[i] = 0x00;
+        }
+
+        byte[] maxBytes = new byte[N_BYTES_IN_KEY];
+        for (int i = 0; i < N_BYTES_IN_KEY; i++) {
+
+            /* 
+             * Use FF since that sets the sign bit negative on a byte.  This
+             * checks the Key.compareTo routine for proper unsigned
+             * comparisons.
+             */
+            maxBytes[i] = (byte) 0xFF;
+        }
+
+        assertTrue(in.findEntry(zeroBytes, false, false) == -1);
+        assertTrue(in.findEntry(maxBytes, false, false) == -1);
+        assertTrue(in.findEntry(zeroBytes, false, true) == -1);
+        assertTrue(in.findEntry(maxBytes, false, true) == -1);
+        assertTrue(in.findEntry(zeroBytes, true, false) == -1);
+        assertTrue(in.findEntry(maxBytes, true, false) == -1);
+        assertTrue(in.findEntry(zeroBytes, true, true) == -1);
+        assertTrue(in.findEntry(maxBytes, true, true) == -1);
+        for (int i = 0; i < initialINCapacity; i++) {
+
+            /* 
+             * Insert a key and check that we get the same index in return from
+             * the binary search.  Check the next highest and next lowest keys
+             * also.
+             */
+            byte[] keyBytes = new byte[N_BYTES_IN_KEY];
+            byte[] nextKeyBytes = new byte[N_BYTES_IN_KEY];
+            byte[] prevKeyBytes = new byte[N_BYTES_IN_KEY];
+            nextKeyBytes[0] = prevKeyBytes[0] = keyBytes[0] = 0x01;
+            nextKeyBytes[1] = prevKeyBytes[1] = keyBytes[1] = (byte) i;
+            nextKeyBytes[2] = prevKeyBytes[2] = keyBytes[2] = 0x10;
+            nextKeyBytes[2]++;
+            prevKeyBytes[2]--;
+            in.setEntry(i, null, keyBytes, FAKE_LSN, (byte) 0);
+            assertTrue(in.findEntry(zeroBytes, false, false) == 0);
+            assertTrue(in.findEntry(maxBytes, false, false) == i);
+            assertTrue(in.findEntry(zeroBytes, false, true) == -1);
+            assertTrue(in.findEntry(maxBytes, false, true) == -1);
+            assertTrue(in.findEntry(zeroBytes, true, false) == -1);
+            assertTrue(in.findEntry(maxBytes, true, false) == i);
+            assertTrue(in.findEntry(zeroBytes, true, true) == -1);
+            assertTrue(in.findEntry(maxBytes, true, true) == -1);
+            for (int j = 1; j < in.getNEntries(); j++) { // 0th key is virtual
+                assertTrue(in.findEntry(in.getKey(j), false, false)
+                           == j);
+                assertTrue(in.findEntry(in.getKey(j), false, true)
+                           == j);
+                assertTrue(in.findEntry(in.getKey(j), true, false) ==
+                           (j | IN.EXACT_MATCH));
+                assertTrue(in.findEntry(in.getKey(j), true, true) ==
+                           (j | IN.EXACT_MATCH));
+                assertTrue(in.findEntry(nextKeyBytes, false, false) == i);
+                assertTrue(in.findEntry(prevKeyBytes, false, false) == i - 1);
+                assertTrue(in.findEntry(nextKeyBytes, false, true) == -1);
+                assertTrue(in.findEntry(prevKeyBytes, false, true) == -1);
+            }
+        }
+        in.releaseLatch();
+    }
+
+    public void testInsertEntry()
+        throws DatabaseException {
+
+        for (int i = 0; i < 10; i++) {          // cwl: consider upping this
+            doInsertEntry(false);
+            doInsertEntry(true);
+        }
+    }
+
+    private void doInsertEntry(boolean withMinMax)
+        throws DatabaseException {
+
+        IN in = new IN(db, new byte[0], initialINCapacity, 7);
+        in.latch();
+
+        byte[] zeroBytes = new byte[N_BYTES_IN_KEY];
+        for (int i = 0; i < N_BYTES_IN_KEY; i++) {
+            zeroBytes[i] = 0x00;
+        }
+
+        byte[] maxBytes = new byte[N_BYTES_IN_KEY];
+        for (int i = 0; i < N_BYTES_IN_KEY; i++) {
+            maxBytes[i] = (byte) 0xFF;
+        }
+
+        if (withMinMax) {
+            try {
+                in.insertEntry(new ChildReference(null, zeroBytes, FAKE_LSN));
+                in.verify(null);
+                in.insertEntry(new ChildReference(null, maxBytes, FAKE_LSN));
+                in.verify(null);
+            } catch (InconsistentNodeException INE) {
+                fail("caught InconsistentNodeException");
+            }
+
+            assertTrue(in.findEntry(zeroBytes, false, false) == 0);
+            assertTrue(in.findEntry(maxBytes, false, false) == 1);
+            /* Shadowed by the virtual 0'th key. */
+            assertTrue(in.findEntry(zeroBytes, false, true) == 0);
+            assertTrue(in.findEntry(maxBytes, false, true) == 1);
+
+            assertTrue(in.findEntry(zeroBytes, true, false) == IN.EXACT_MATCH);
+            assertTrue(in.findEntry(maxBytes, true, false) ==
+                       (1 | IN.EXACT_MATCH));
+            /* Shadowed by the virtual 0'th key. */
+            assertTrue(in.findEntry(zeroBytes, true, true) == IN.EXACT_MATCH);
+            assertTrue(in.findEntry(maxBytes, true, true) ==
+                       (1 | IN.EXACT_MATCH));
+        }
+
+        Random rnd = new Random();
+
+        try {
+            for (int i = 0;
+                 i < initialINCapacity - (withMinMax ? 2 : 0);
+                 i++) {
+
+                /* 
+                 * Insert a key and check that we get the same index in return
+                 * from the binary search.  Check the next highest and next
+                 * lowest keys also.
+                 */
+                byte[] keyBytes = new byte[N_BYTES_IN_KEY];
+
+                /* 
+                 * There's a small chance that we may generate the same
+                 * sequence of bytes that are already present.
+                 */
+                while (true) {
+                    rnd.nextBytes(keyBytes);
+                    int index = in.findEntry(keyBytes, true, false);
+                    if ((index & IN.EXACT_MATCH) != 0 &&
+                        index >= 0) {
+                        continue;
+                    }
+                    break;
+                }
+
+                in.insertEntry(new ChildReference(null, keyBytes, FAKE_LSN));
+                try {
+                    in.verify(null);
+                } catch (InconsistentNodeException INE) {
+                    Key.DUMP_TYPE = DumpType.BINARY;
+                    in.dump(0);
+                }
+
+                if (withMinMax) {
+                    assertTrue(in.findEntry(zeroBytes, false, false) == 0);
+                    assertTrue(in.findEntry(maxBytes, false, false) ==
+                               in.getNEntries() - 1);
+                    /* Shadowed by the virtual 0'th key. */
+                    assertTrue(in.findEntry(zeroBytes, false, true) == 0);
+                    assertTrue(in.findEntry(maxBytes, false, true) ==
+                               in.getNEntries() - 1);
+
+                    assertTrue(in.findEntry(zeroBytes, true, false) ==
+                               IN.EXACT_MATCH);
+                    assertTrue(in.findEntry(maxBytes, true, false) ==
+                               ((in.getNEntries() - 1) | IN.EXACT_MATCH));
+                    /* Shadowed by the virtual 0'th key. */
+                    assertTrue(in.findEntry(zeroBytes, true, true) ==
+                               IN.EXACT_MATCH);
+                    assertTrue(in.findEntry(maxBytes, true, true) ==
+                               ((in.getNEntries() - 1) | IN.EXACT_MATCH));
+                } else {
+                    assertTrue(in.findEntry(zeroBytes, false, false) == 0);
+                    assertTrue(in.findEntry(maxBytes, false, false) ==
+                               in.getNEntries() - 1);
+                    assertTrue(in.findEntry(zeroBytes, false, true) == -1);
+                    assertTrue(in.findEntry(maxBytes, false, true) == -1);
+
+                    assertTrue(in.findEntry(zeroBytes, true, false) == -1);
+                    assertTrue(in.findEntry(maxBytes, true, false) ==
+                               in.getNEntries() - 1);
+                }
+
+                for (int j = 1; j < in.getNEntries(); j++) {
+                    assertTrue(in.findEntry(in.getKey(j), false, false) == j);
+                    assertTrue(in.findEntry(in.getKey(j), false, true) == j);
+
+                    assertTrue(in.findEntry(in.getKey(j), false, true) == j);
+                    assertTrue(in.findEntry(in.getKey(j), true, false) ==
+                               (j | IN.EXACT_MATCH));
+                }
+            }
+        } catch (InconsistentNodeException INE) {
+            fail("caught InconsistentNodeException");
+        }
+
+        /* Should be full so insertEntry should return false. */
+        byte[] keyBytes = new byte[N_BYTES_IN_KEY];
+        rnd.nextBytes(keyBytes);
+
+        try {
+            in.insertEntry(new ChildReference(null, keyBytes, FAKE_LSN));
+            fail("should have caught InconsistentNodeException, but didn't");
+        } catch (InconsistentNodeException INE) {
+        }
+        in.releaseLatch();
+    }
+
+    public void testDeleteEntry()
+        throws DatabaseException {
+
+        for (int i = 0; i < 10; i++) {           // cwl: consider upping this
+            doDeleteEntry(true);
+            doDeleteEntry(false);
+        }
+    }
+
+    private void doDeleteEntry(boolean withMinMax)
+        throws DatabaseException {
+
+        IN in = new IN(db, new byte[0], initialINCapacity, 7);
+        in.latch();
+
+        byte[] zeroBytes = new byte[N_BYTES_IN_KEY];
+        for (int i = 0; i < N_BYTES_IN_KEY; i++) {
+            zeroBytes[i] = 0x00;
+        }
+
+        byte[] maxBytes = new byte[N_BYTES_IN_KEY];
+        for (int i = 0; i < N_BYTES_IN_KEY; i++) {
+            maxBytes[i] = (byte) 0xFF;
+        }
+
+        if (withMinMax) {
+            try {
+                in.insertEntry(new ChildReference(null, zeroBytes, FAKE_LSN));
+                in.verify(null);
+                in.insertEntry(new ChildReference(null, maxBytes, FAKE_LSN));
+                in.verify(null);
+            } catch (InconsistentNodeException INE) {
+                fail("caught InconsistentNodeException");
+            }
+
+            assertTrue(in.findEntry(zeroBytes, false, false) == 0);
+            assertTrue(in.findEntry(maxBytes, false, false) == 1);
+            /* Shadowed by the virtual 0'th key. */
+            assertTrue(in.findEntry(zeroBytes, false, true) == 0);
+            assertTrue(in.findEntry(maxBytes, false, true) == 1);
+
+            assertTrue(in.findEntry(zeroBytes, true, false) == IN.EXACT_MATCH);
+            assertTrue(in.findEntry(maxBytes, true, false) ==
+                       (1 | IN.EXACT_MATCH));
+            /* Shadowed by the virtual 0'th key. */
+            assertTrue(in.findEntry(zeroBytes, true, true) == IN.EXACT_MATCH);
+            assertTrue(in.findEntry(maxBytes, true, true) ==
+                       (1 | IN.EXACT_MATCH));
+        }
+
+        Random rnd = new Random();
+
+        try {
+            /* Fill up the IN with random entries. */
+            for (int i = 0;
+                 i < initialINCapacity - (withMinMax ? 2 : 0);
+                 i++) {
+
+                /* 
+                 * Insert a key and check that we get the same index in return
+                 * from the binary search.  Check the next highest and next
+                 * lowest keys also.
+                 */
+                byte[] keyBytes = new byte[N_BYTES_IN_KEY];
+
+                /*
+                 * There's a small chance that we may generate the same
+                 * sequence of bytes that are already present.
+                 */
+                while (true) {
+                    rnd.nextBytes(keyBytes);
+                    int index = in.findEntry(keyBytes, true, false);
+                    if ((index & IN.EXACT_MATCH) != 0 &&
+                        index >= 0) {
+                        continue;
+                    }
+                    break;
+                }
+
+                in.insertEntry(new ChildReference(null, keyBytes, FAKE_LSN));
+            }
+
+            if (withMinMax) {
+                assertTrue(in.findEntry(zeroBytes, false, false) == 0);
+                assertTrue(in.findEntry(maxBytes, false, false) ==
+                           in.getNEntries() - 1);
+                /* 
+                 * zeroBytes is in the 0th entry, but that's the virtual key so
+                 * it's not an exact match.
+                 */
+                assertTrue(in.findEntry(zeroBytes, false, true) == 0);
+                assertTrue(in.findEntry(maxBytes, false, true) ==
+                           in.getNEntries() - 1);
+
+                assertTrue(in.findEntry(zeroBytes, false, true) == 0);
+                assertTrue(in.findEntry(maxBytes, false, true) ==
+                           in.getNEntries() - 1);
+                assertTrue(in.findEntry(zeroBytes, true, false) ==
+                           IN.EXACT_MATCH);
+                assertTrue(in.findEntry(maxBytes, true, false) ==
+                           ((in.getNEntries() - 1) | IN.EXACT_MATCH));
+            }
+
+            while (in.getNEntries() > 1) {
+                int i = rnd.nextInt(in.getNEntries() - 1) + 1;
+                assertTrue(in.deleteEntry(in.getKey(i), false));
+            }
+
+            /*
+             * We should only be able to delete the zero Key if it was inserted
+             * in the first place.
+             */
+            assertEquals(withMinMax, in.deleteEntry(zeroBytes, false));
+        } catch (InconsistentNodeException INE) {
+            fail("caught InconsistentNodeException");
+        }
+        in.releaseLatch();
+    }
+}
diff --git a/test/com/sleepycat/je/tree/KeyPrefixTest.java b/test/com/sleepycat/je/tree/KeyPrefixTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..d904dd74335659ba1e9d279eb1be4fb03c80e24c
--- /dev/null
+++ b/test/com/sleepycat/je/tree/KeyPrefixTest.java
@@ -0,0 +1,317 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: KeyPrefixTest.java,v 1.2.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Random;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.tuple.LongBinding;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.tree.Key.DumpType;
+import com.sleepycat.je.util.TestUtils;
+
+public class KeyPrefixTest extends TestCase {
+
+    private File envHome;
+    private Environment env;
+    private Database db;
+
+    public KeyPrefixTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws Exception {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        try {
+            db.close();
+            env.close();
+        } catch (DatabaseException E) {
+        }
+
+        TestUtils.removeLogFiles("TearDown", envHome, true);
+    }
+
+    private void initEnv(int nodeMax)
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        if (nodeMax > 0) {
+            envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(),
+                                     Integer.toString(nodeMax));
+        }
+        envConfig.setAllowCreate(true);
+        env = new Environment(envHome, envConfig);
+
+        String databaseName = "testDb";
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setKeyPrefixing(true);
+        dbConfig.setAllowCreate(true);
+        db = env.openDatabase(null, databaseName, dbConfig);
+    }
+
+    private static final String[] keys = {
+        "aaa", "aab", "aac", "aae",                // BIN1
+        "aaf", "aag", "aah", "aaj",                // BIN2
+        "aak", "aala", "aalb", "aam",              // BIN3
+        "aan", "aao", "aap", "aas",                // BIN4
+        "aat", "aau", "aav", "aaz",                // BIN5
+        "baa", "bab", "bac", "bam",                // BIN6
+        "ban", "bax", "bay", "baz",                // BIN7
+        "caa", "cab", "cay", "caz",                // BIN8
+        "daa", "eaa", "faa", "fzz",                // BIN10
+        "Aaza", "Aazb", "aal", "aama"
+    };
+
+    public void testPrefixBasic()
+        throws Exception {
+
+        initEnv(5);
+        Key.DUMP_TYPE = DumpType.TEXT;
+        try {
+
+            /* Build up a tree. */
+            for (int i = 0; i < keys.length; i++) {
+                assertEquals(OperationStatus.SUCCESS,
+                             db.put(null,
+                                    new DatabaseEntry(keys[i].getBytes()),
+                                    new DatabaseEntry(new byte[] { 1 })));
+            }
+
+            String[] sortedKeys = new String[keys.length];
+            System.arraycopy(keys, 0, sortedKeys, 0, keys.length);
+            Arrays.sort(sortedKeys);
+
+            Cursor cursor = null;
+            int i = 0;
+            try {
+                cursor = db.openCursor(null, null);
+                DatabaseEntry key = new DatabaseEntry();
+                DatabaseEntry data = new DatabaseEntry();
+
+                boolean somePrefixSeen = false;
+                while (cursor.getNext(key, data, LockMode.DEFAULT) ==
+                       OperationStatus.SUCCESS) {
+                    assertEquals(new String(key.getData()), sortedKeys[i++]);
+                    byte[] prefix =
+                        DbInternal.getCursorImpl(cursor).getBIN().
+                        getKeyPrefix();
+                    if (prefix != null) {
+                        somePrefixSeen = true;
+                    }
+                }
+                assertTrue(somePrefixSeen);
+            } finally {
+                if (cursor != null) {
+                    cursor.close();
+                }
+            }
+
+            if (false) {
+                System.out.println("<dump>");
+                DbInternal.dbGetDatabaseImpl(db).getTree().dump();
+            }
+
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw new Exception(t);
+        }
+    }
+
+    public void testPrefixManyRandom()
+        throws Exception {
+
+        doTestPrefixMany(true);
+    }
+
+    public void testPrefixManySequential()
+        throws Exception {
+
+        doTestPrefixMany(false);
+    }
+
+    private void doTestPrefixMany(boolean random)
+        throws Exception {
+
+        initEnv(0);
+        final int N_EXTRA_ENTRIES = 1000;
+        Key.DUMP_TYPE = DumpType.BINARY;
+        try {
+
+            /* 2008-02-28 11:06:50.009 */
+            long start = 1204214810009L;
+
+            /* 3 years after start. Prefixes will be 3 and 4 bytes long. */
+            long end = start + (long) (3L * 365L * 24L * 60L * 60L * 1000L);
+
+            /* This will yield 94,608 entries. */
+            long inc = 1000000L;
+            int nEntries = insertTimestamps(start, end, inc, random);
+
+            /*
+             * This will force some splits on the left side of the tree which
+             * will force recalculating the suffix on the leg after the initial
+             * prefix/suffix calculation.
+             */
+            insertExtraTimestamps(0, N_EXTRA_ENTRIES);
+
+            /* Do the same on the right side of the tree. */
+            insertExtraTimestamps(end, N_EXTRA_ENTRIES);
+            assertEquals((nEntries + 2 * N_EXTRA_ENTRIES), db.count());
+
+            Cursor cursor = null;
+            try {
+                cursor = db.openCursor(null, null);
+
+                verifyEntries(0, N_EXTRA_ENTRIES, cursor, 1);
+                verifyEntries(start, nEntries, cursor, inc);
+                verifyEntries(end, N_EXTRA_ENTRIES, cursor, 1);
+
+                deleteEntries(start, nEntries);
+                DatabaseEntry key = new DatabaseEntry();
+                DatabaseEntry data = new DatabaseEntry();
+                cursor.close();
+                cursor = db.openCursor(null, null);
+                verifyEntries(0, N_EXTRA_ENTRIES, cursor, 1);
+                assertEquals(OperationStatus.SUCCESS,
+                             cursor.getNext(key, data, LockMode.DEFAULT));
+                assertEquals(end, LongBinding.entryToLong(key));
+            } finally {
+                if (cursor != null) {
+                    cursor.close();
+                }
+            }
+
+            if (false) {
+                System.out.println("<dump>");
+                DbInternal.dbGetDatabaseImpl(db).getTree().dump();
+            }
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw new Exception(t);
+        }
+    }
+
+    private int insertTimestamps(long start,
+                                 long end,
+                                 long inc,
+                                 boolean random)
+        throws DatabaseException {
+
+        int nEntries = (int) ((end - start) / inc);
+        List<Long> keyList = new ArrayList<Long>(nEntries);
+        long[] keys = null;
+        if (random) {
+            for (long i = start; i < end; i += inc) {
+                keyList.add(i);
+            }
+            keys = new long[keyList.size()];
+            Random rnd = new Random(10); // fixed seed
+            int nextKeyIdx = 0;
+            while (keyList.size() > 0) {
+                int idx = rnd.nextInt(keyList.size());
+                keys[nextKeyIdx++] = keyList.get(idx);
+                keyList.remove(idx);
+            }
+        }
+
+        /* Build up a tree. */
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        data.setData(new byte[1]);
+        int j = 0;
+        for (long i = start; i < end; i += inc) {
+            if (random) {
+                LongBinding.longToEntry(keys[j], key);
+            } else {
+                LongBinding.longToEntry(i, key);
+            }
+            j++;
+            assertEquals(OperationStatus.SUCCESS,
+                         db.put(null, key, data));
+        }
+        return j;
+    }
+
+    private void insertExtraTimestamps(long start, int nExtraEntries)
+        throws DatabaseException {
+
+        /* Add (more than one node's worth) to the left side of the tree.*/
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry(new byte[] { 0 });
+        long next = start;
+        for (int i = 0; i < nExtraEntries; i++) {
+            LongBinding.longToEntry((long) next, key);
+            assertEquals(OperationStatus.SUCCESS,
+                         db.put(null, key, data));
+            next++;
+        }
+    }
+
+    private void deleteEntries(long start, int nEntries)
+        throws DatabaseException {
+
+        Cursor cursor = db.openCursor(null, null);
+        try {
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            LongBinding.longToEntry(start, key);
+            assertEquals(OperationStatus.SUCCESS,
+                         cursor.getSearchKey(key, data, LockMode.DEFAULT));
+            for (int i = 0; i < nEntries; i++) {
+                assertEquals(OperationStatus.SUCCESS, cursor.delete());
+                assertEquals(OperationStatus.SUCCESS,
+                             cursor.getNext(key, data, LockMode.DEFAULT));
+            }
+        } finally {
+            if (cursor != null) {
+                cursor.close();
+            }
+        }
+    }
+
+    private void verifyEntries(long start,
+                               int nEntries,
+                               Cursor cursor,
+                               long inc)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        long check = start;
+        for (int i = 0; i < nEntries; i++) {
+            assertEquals(OperationStatus.SUCCESS,
+                         cursor.getNext(key, data, LockMode.DEFAULT));
+            long keyInfo = LongBinding.entryToLong(key);
+            assertTrue(keyInfo == check);
+            check += inc;
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/tree/KeyTest.java b/test/com/sleepycat/je/tree/KeyTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..6973a5d25730532e67c77a01080b87197bde1e43
--- /dev/null
+++ b/test/com/sleepycat/je/tree/KeyTest.java
@@ -0,0 +1,126 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: KeyTest.java,v 1.18.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.io.File;
+import junit.framework.TestCase;
+
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.dbi.DatabaseId;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.util.TestUtils;
+
+public class KeyTest extends TestCase {
+    private File envHome;
+    private Environment env;
+
+    public void setUp() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void tearDown() {
+    }
+
+    public void testKeyPrefixer() {
+        assertEquals("aaa", makePrefix("aaaa", "aaab"));
+        assertEquals("a", makePrefix("abaa", "aaab"));
+        assertNull(makePrefix("baaa", "aaab"));
+        assertEquals("aaa", makePrefix("aaa", "aaa"));
+        assertEquals("aaa", makePrefix("aaa", "aaab"));
+    }
+
+    private String makePrefix(String k1, String k2) {
+        byte[] ret = Key.createKeyPrefix(k1.getBytes(), k2.getBytes());
+        if (ret == null) {
+            return null;
+        } else {
+            return new String(ret);
+        }
+    }
+
+    public void testKeyPrefixSubsetting() {
+        keyPrefixSubsetTest("aaa", "aaa", true);
+        keyPrefixSubsetTest("aa", "aaa", true);
+        keyPrefixSubsetTest("aaa", "aa", false);
+        keyPrefixSubsetTest("", "aa", false);
+        keyPrefixSubsetTest(null, "aa", false);
+        keyPrefixSubsetTest("baa", "aa", false);
+    }
+
+    private void keyPrefixSubsetTest(String keyPrefix,
+                                     String newKey,
+                                     boolean expect) {
+        try {
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setAllowCreate(true);
+            env = new Environment(envHome, envConfig);
+            byte[] keyPrefixBytes =
+                (keyPrefix == null ? null : keyPrefix.getBytes());
+            byte[] newKeyBytes = newKey.getBytes();
+            DatabaseConfig dbConf = new DatabaseConfig();
+            dbConf.setKeyPrefixing(true);
+            EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+            DatabaseImpl databaseImpl =
+                new DatabaseImpl("dummy", new DatabaseId(10), envImpl, dbConf);
+            IN in = new IN(databaseImpl, null, 10, 10);
+            in.setKeyPrefix(keyPrefixBytes);
+            boolean result = in.compareToKeyPrefix(newKeyBytes);
+            assertTrue(result == expect);
+        } catch (Exception E) {
+            E.printStackTrace();
+            fail("caught " + E);
+        }
+    }
+
+    public void testKeyComparisonPerformance() {
+	byte[] key1 = "abcdefghijabcdefghij".getBytes();
+	byte[] key2 = "abcdefghijabcdefghij".getBytes();
+
+	for (int i = 0; i < 1000000; i++) {
+	    assertTrue(Key.compareKeys(key1, key2, null) == 0);
+	}
+    }
+
+    public void testKeyComparison() {
+	byte[] key1 = "aaa".getBytes();
+	byte[] key2 = "aab".getBytes();
+	assertTrue(Key.compareKeys(key1, key2, null) < 0);
+	assertTrue(Key.compareKeys(key2, key1, null) > 0);
+	assertTrue(Key.compareKeys(key1, key1, null) == 0);
+
+	key1 = "aa".getBytes();
+	key2 = "aab".getBytes();
+	assertTrue(Key.compareKeys(key1, key2, null) < 0);
+	assertTrue(Key.compareKeys(key2, key1, null) > 0);
+
+	key1 = "".getBytes();
+	key2 = "aab".getBytes();
+	assertTrue(Key.compareKeys(key1, key2, null) < 0);
+	assertTrue(Key.compareKeys(key2, key1, null) > 0);
+	assertTrue(Key.compareKeys(key1, key1, null) == 0);
+
+	key1 = "".getBytes();
+	key2 = "".getBytes();
+	assertTrue(Key.compareKeys(key1, key2, null) == 0);
+
+	byte[] ba1 = { -1, -1, -1 };
+	byte[] ba2 = { 0x7f, 0x7f, 0x7f };
+	assertTrue(Key.compareKeys(ba1, ba2, null) > 0);
+
+	try {
+	    Key.compareKeys(key1, null, null);
+	    fail("NullPointerException not caught");
+	} catch (NullPointerException NPE) {
+	}
+    }
+}
diff --git a/test/com/sleepycat/je/tree/LSNArrayTest.java b/test/com/sleepycat/je/tree/LSNArrayTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..f8dcbb70f966975e27f976816bfb8852923d1a6f
--- /dev/null
+++ b/test/com/sleepycat/je/tree/LSNArrayTest.java
@@ -0,0 +1,77 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LSNArrayTest.java,v 1.7.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.utilint.DbLsn;
+
+public class LSNArrayTest extends TestCase {
+    private static final int N_ELTS = 128;
+
+    private IN theIN;
+
+    public void setUp() {
+	theIN = new IN();
+    }
+
+    public void tearDown() {
+    }
+
+    public void testPutGetElement() {
+	doTest(N_ELTS);
+    }
+
+    public void testOverflow() {
+	doTest(N_ELTS << 2);
+    }
+
+    public void testFileOffsetGreaterThan3Bytes() {
+	theIN.initEntryLsn(10);
+	theIN.setLsnElement(0, 0xfffffe);
+	assertTrue(theIN.getLsn(0) == 0xfffffe);
+	assertTrue(theIN.getEntryLsnByteArray() != null);
+	assertTrue(theIN.getEntryLsnLongArray() == null);
+	theIN.setLsnElement(1, 0xffffff);
+	assertTrue(theIN.getLsn(1) == 0xffffff);
+	assertTrue(theIN.getEntryLsnLongArray() != null);
+	assertTrue(theIN.getEntryLsnByteArray() == null);
+
+	theIN.initEntryLsn(10);
+	theIN.setLsnElement(0, 0xfffffe);
+	assertTrue(theIN.getLsn(0) == 0xfffffe);
+	assertTrue(theIN.getEntryLsnByteArray() != null);
+	assertTrue(theIN.getEntryLsnLongArray() == null);
+	theIN.setLsnElement(1, 0xffffff + 1);
+	assertTrue(theIN.getLsn(1) == 0xffffff + 1);
+	assertTrue(theIN.getEntryLsnLongArray() != null);
+	assertTrue(theIN.getEntryLsnByteArray() == null);
+    }
+
+    private void doTest(int nElts) {
+	theIN.initEntryLsn(nElts);
+	for (int i = nElts - 1; i >= 0; i--) {
+	    long thisLsn = DbLsn.makeLsn(i, i);
+	    theIN.setLsnElement(i, thisLsn);
+	    if (theIN.getLsn(i) != thisLsn) {
+		System.out.println(i + " found: " +
+				   DbLsn.toString(theIN.getLsn(i)) +
+				   " expected: " +
+				   DbLsn.toString(thisLsn));
+	    }
+	    assertTrue(theIN.getLsn(i) == thisLsn);
+	}
+
+	for (int i = 0; i < nElts; i++) {
+	    long thisLsn = DbLsn.makeLsn(i, i);
+	    theIN.setLsnElement(i, thisLsn);
+	    assertTrue(theIN.getLsn(i) == thisLsn);
+	}
+    }
+}
diff --git a/test/com/sleepycat/je/tree/MemorySizeTest.java b/test/com/sleepycat/je/tree/MemorySizeTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..fcc2fc8d9446fc466b3e727a411875198f3d29b9
--- /dev/null
+++ b/test/com/sleepycat/je/tree/MemorySizeTest.java
@@ -0,0 +1,440 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: MemorySizeTest.java,v 1.31.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DbTree;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.INList;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.tree.Key.DumpType;
+import com.sleepycat.je.txn.Txn;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Check maintenance of the memory size count within nodes.
+ */
+public class MemorySizeTest extends TestCase {
+    private Environment env;
+    private File envHome;
+    private Database db;
+
+    public MemorySizeTest()
+	throws DatabaseException {
+
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+
+       	/* Print keys as numbers */
+       	Key.DUMP_TYPE = DumpType.BINARY;
+    }
+
+    public void setUp()
+	throws IOException, DatabaseException {
+
+	IN.ACCUMULATED_LIMIT = 0;
+	Txn.ACCUMULATED_LIMIT = 0;
+
+        TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX);
+
+        /*
+         * Properties for creating an environment.
+         * Disable the evictor for this test, use larger BINS
+         */
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setConfigParam(EnvironmentParams.ENV_RUN_EVICTOR.getName(),
+                                 "false");
+        envConfig.setConfigParam(
+                       EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(),
+                       "false");
+        envConfig.setConfigParam(
+                       EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(),
+                       "false");
+        envConfig.setConfigParam(
+                       EnvironmentParams.ENV_RUN_CLEANER.getName(),
+                       "false");
+
+        /* Don't checkpoint utilization info for this test. */
+        DbInternal.setCheckpointUP(envConfig, false);
+
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "4");
+        envConfig.setAllowCreate(true);
+        envConfig.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC));
+        envConfig.setTransactional(true);
+        env = new Environment(envHome, envConfig);
+    }
+
+    public void tearDown()
+	throws IOException, DatabaseException {
+
+        if (env != null) {
+            try {
+                env.close();
+            } catch (DatabaseException E) {
+            }
+        }
+        TestUtils.removeFiles("TearDown", envHome,
+                              FileManager.JE_SUFFIX, true);
+    }
+
+    /*
+     * Do a series of these actions and make sure that the stored memory
+     * sizes match the calculated memory size.
+     * - create db
+     * - insert records, no split
+     * - cause IN split
+     * - modify
+     * - delete, compress
+     * - checkpoint
+     * - evict
+     * - insert duplicates
+     * - cause duplicate IN split
+     * - do an abort
+     */
+    public void testMemSizeMaintenance()
+        throws Throwable {
+
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+        try {
+            initDb();
+
+            /* Insert one record. Adds two INs and an LN to our cost.*/
+            insert((byte) 1, 10, (byte) 1, 100, true);
+            long newSize = TestUtils.validateNodeMemUsage(envImpl, true);
+            assertTrue(newSize > 0);
+
+            /* Fill out the node. */
+            insert((byte) 2, 10, (byte) 2, 100, true);
+            insert((byte) 3, 10, (byte) 3, 100, true);
+            insert((byte) 4, 10, (byte) 4, 100, true);
+            long oldSize = newSize;
+            newSize = TestUtils.validateNodeMemUsage(envImpl, true);
+            assertTrue(newSize > oldSize);
+
+            /* Cause a split */
+            insert((byte) 5, 10, (byte) 5, 100, true);
+            insert((byte) 6, 10, (byte) 6, 100, true);
+            insert((byte) 7, 10, (byte) 7, 100, true);
+            oldSize = newSize;
+            newSize = TestUtils.validateNodeMemUsage(envImpl, true);
+            assertTrue(newSize > oldSize);
+
+            /* Modify data */
+            modify((byte) 1, 10, (byte) 1, 1010, true);
+            modify((byte) 7, 10, (byte) 7, 1010, true);
+            oldSize = newSize;
+            newSize = TestUtils.validateNodeMemUsage(envImpl, true);
+            assertTrue(newSize > oldSize);
+
+            /* Delete data */
+            delete((byte) 2, 10, true);
+            delete((byte) 6, 10, true);
+            oldSize = newSize;
+            newSize = TestUtils.validateNodeMemUsage(envImpl, true);
+            assertTrue(newSize < oldSize);
+
+            /* Compress. */
+            compress();
+            oldSize = newSize;
+            newSize = TestUtils.validateNodeMemUsage(envImpl, true);
+            assertTrue(newSize < oldSize);
+
+            /* Checkpoint */
+            CheckpointConfig ckptConfig = new CheckpointConfig();
+            ckptConfig.setForce(true);
+            env.checkpoint(ckptConfig);
+            oldSize = newSize;
+            newSize = TestUtils.validateNodeMemUsage(envImpl, true);
+            assertEquals(oldSize, newSize);
+
+            /* Evict by doing LN stripping. */
+            evict();
+            TestUtils.validateNodeMemUsage(envImpl, true);
+            oldSize = newSize;
+            newSize = TestUtils.validateNodeMemUsage(envImpl, true);
+            assertTrue(newSize < oldSize);
+
+            /* insert duplicates */
+            insert((byte) 3, 10, (byte) 30, 200, true);
+            insert((byte) 3, 10, (byte) 31, 200, true);
+            insert((byte) 3, 10, (byte) 32, 200, true);
+            insert((byte) 3, 10, (byte) 33, 200, true);
+            oldSize = newSize;
+            newSize = TestUtils.validateNodeMemUsage(envImpl, true);
+            assertTrue(newSize > oldSize);
+
+            /* create duplicate split. */
+            insert((byte) 3, 10, (byte) 34, 200, true);
+            insert((byte) 3, 10, (byte) 35, 200, true);
+            oldSize = newSize;
+            newSize = TestUtils.validateNodeMemUsage(envImpl, true);
+            assertTrue(newSize > oldSize);
+
+            /* There should be 11 records. */
+            checkCount(11);
+            oldSize = newSize;
+            newSize = TestUtils.validateNodeMemUsage(envImpl, true);
+            assertTrue(newSize > oldSize);
+
+            /* modify and abort */
+            modify((byte) 5, 10, (byte) 30, 1000, false);
+            oldSize = newSize;
+            newSize = TestUtils.validateNodeMemUsage(envImpl, true);
+            assertTrue(newSize == oldSize);
+
+            /* delete and abort */
+            delete((byte) 1, 10, false);
+            delete((byte) 7, 10, false);
+            oldSize = newSize;
+            newSize = TestUtils.validateNodeMemUsage(envImpl, true);
+
+            /* Delete dup */
+            delete((byte) 3, 10, (byte)34, 200, false);
+            oldSize = newSize;
+            newSize = TestUtils.validateNodeMemUsage(envImpl, true);
+
+            /* insert and abort */
+            insert((byte) 2, 10, (byte) 5, 100, false);
+            insert((byte) 6, 10, (byte) 6, 100, false);
+            insert((byte) 8, 10, (byte) 7, 100, false);
+            oldSize = newSize;
+            newSize = TestUtils.validateNodeMemUsage(envImpl, true);
+
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        } finally {
+            if (db != null) {
+                db.close();
+            }
+
+            if (env != null) {
+                env.close();
+            }
+
+
+        }
+    }
+
+    /*
+     * Do a series of these actions and make sure that the stored memory
+     * sizes match the calculated memory size.
+     * - create db
+     * - insert records, cause split
+     * - delete
+     * - insert and re-use slots.
+     */
+    public void testSlotReuseMaintenance()
+        throws Exception {
+
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+        try {
+
+            initDb();
+
+            /* Insert enough records to create one node. */
+            insert((byte) 1, 10, (byte) 1, 100, true);
+            insert((byte) 2, 10, (byte) 2, 100, true);
+            insert((byte) 3, 10, (byte) 3, 100, true);
+            long newSize = TestUtils.validateNodeMemUsage(envImpl, true);
+
+            /* Delete  */
+            delete((byte) 3, 10, true);
+            long oldSize = newSize;
+            newSize = TestUtils.validateNodeMemUsage(envImpl, true);
+            assertTrue(newSize < oldSize);
+
+            /* Insert again, reuse those slots */
+            insert((byte) 3, 10, (byte) 2, 400, true);
+            oldSize = newSize;
+            newSize = TestUtils.validateNodeMemUsage(envImpl, true);
+            assertTrue(newSize > oldSize);
+        } catch (Exception e) {
+            e.printStackTrace();
+            throw e;
+        } finally {
+            if (db != null) {
+                db.close();
+            }
+
+            if (env != null) {
+                env.close();
+            }
+        }
+    }
+
+
+    private void initDb()
+        throws DatabaseException {
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(true);
+        dbConfig.setTransactional(true);
+        db = env.openDatabase(null, "foo", dbConfig);
+    }
+
+    private void insert(byte keyVal, int keySize,
+                        byte dataVal, int dataSize,
+                        boolean commit)
+        throws DatabaseException {
+
+        Transaction txn = null;
+        if (!commit) {
+            txn = env.beginTransaction(null, null);
+        }
+        assertEquals(OperationStatus.SUCCESS,
+                     db.put(null, getEntry(keyVal, keySize),
+                            getEntry(dataVal, dataSize)));
+        if (!commit) {
+            txn.abort();
+        }
+    }
+
+    private void modify(byte keyVal, int keySize,
+                        byte dataVal, int dataSize,
+                        boolean commit)
+        throws DatabaseException {
+
+        Transaction txn = null;
+
+        txn = env.beginTransaction(null, null);
+        Cursor cursor = db.openCursor(txn, null);
+        assertEquals(OperationStatus.SUCCESS,
+                     cursor.getSearchKey(getEntry(keyVal, keySize),
+                                     new DatabaseEntry(),
+                                     LockMode.DEFAULT));
+        assertEquals(OperationStatus.SUCCESS,
+                     cursor.delete());
+        assertEquals(OperationStatus.SUCCESS,
+                     cursor.put(getEntry(keyVal, keySize),
+				getEntry(dataVal, dataSize)));
+        cursor.close();
+
+        if (commit) {
+            txn.commit();
+        } else {
+            txn.abort();
+        }
+    }
+
+    private void delete(byte keyVal, int keySize, boolean commit)
+        throws DatabaseException {
+
+        Transaction txn = null;
+        if (!commit) {
+            txn = env.beginTransaction(null, null);
+        }
+        assertEquals(OperationStatus.SUCCESS,
+                     db.delete(txn, getEntry(keyVal, keySize)));
+        if (!commit) {
+            txn.abort();
+        }
+    }
+    private void delete(byte keyVal, int keySize,
+                        byte dataVal, int dataSize, boolean commit)
+        throws DatabaseException {
+
+        Transaction txn = env.beginTransaction(null, null);
+        Cursor cursor = db.openCursor(txn, null);
+        assertEquals(OperationStatus.SUCCESS,
+                     cursor.getSearchBoth(getEntry(keyVal, keySize),
+                                          getEntry(dataVal, dataSize),
+                                          LockMode.DEFAULT));
+        assertEquals(OperationStatus.SUCCESS,  cursor.delete());
+        cursor.close();
+
+        if (commit) {
+            txn.commit();
+        } else {
+            txn.abort();
+        }
+    }
+
+    /*
+     * Fake compressing daemon by call BIN.compress explicitly on all
+     * BINS on the IN list.
+     */
+    private void compress()
+        throws DatabaseException {
+
+        INList inList = DbInternal.envGetEnvironmentImpl(env).getInMemoryINs();
+        for (IN in : inList) {
+            in.latch();
+            if (in instanceof BIN) {
+                in.compress(null, true, null);
+            }
+            in.releaseLatch();
+        }
+    }
+
+    /*
+     * Fake eviction daemon by call BIN.evictLNs explicitly on all
+     * BINS on the IN list.
+     */
+    private void evict()
+        throws DatabaseException {
+
+        INList inList = DbInternal.envGetEnvironmentImpl(env).getInMemoryINs();
+        for (IN in : inList) {
+            if (in instanceof BIN &&
+                !in.getDatabase().getId().equals(DbTree.ID_DB_ID)) {
+                BIN bin = (BIN) in;
+                bin.latch();
+                assertTrue(bin.evictLNs() > 0);
+                bin.releaseLatch();
+            }
+        }
+    }
+
+
+    private DatabaseEntry getEntry(byte val, int size) {
+        byte[] bArray = new byte[size];
+        bArray[0] = val;
+        return new DatabaseEntry(bArray);
+    }
+
+    private void checkCount(int expectedCount)
+        throws DatabaseException {
+
+        Cursor cursor = db.openCursor(null, null);
+        int count = 0;
+        while (cursor.getNext(new DatabaseEntry(), new DatabaseEntry(),
+                              LockMode.DEFAULT) == OperationStatus.SUCCESS) {
+            count++;
+        }
+        cursor.close();
+        assertEquals(expectedCount, count);
+    }
+
+    private void dumpINList()
+        throws DatabaseException {
+
+        INList inList = DbInternal.envGetEnvironmentImpl(env).getInMemoryINs();
+        for (IN in : inList) {
+            System.out.println("in nodeId=" + in.getNodeId());
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/tree/ReleaseLatchesTest.java b/test/com/sleepycat/je/tree/ReleaseLatchesTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..1ac4c8b78f7600b995b00026920b6ef2b28d3f0f
--- /dev/null
+++ b/test/com/sleepycat/je/tree/ReleaseLatchesTest.java
@@ -0,0 +1,529 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ReleaseLatchesTest.java,v 1.21.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+package com.sleepycat.je.tree;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Enumeration;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.RunRecoveryException;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.latch.LatchSupport;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.TestHook;
+
+/**
+ * Check that latches are release properly even if we run into read errors.
+ */
+public class ReleaseLatchesTest extends TestCase {
+    private static final boolean DEBUG = false;
+
+    private Environment env;
+    private File envHome;
+    private Database db;
+    private TestDescriptor testActivity;
+
+    /*
+     * The OPERATIONS declared here define the test cases for this test.  Each
+     * TestDescriptor describes a particular JE activity. The
+     * testCheckLatchLeaks method generates read i/o exceptions during the test
+     * descriptor's action, and will check that we come up clean.
+     */
+    public static TestDescriptor[] OPERATIONS = {
+
+        /*
+         * TestDescriptor params:
+         *  - operation name: for debugging
+         *  - number of times to generate an exception. For example if N,
+         *   the test action will be executed in a loop N times, with an
+         *   read/io on read 1, read 2, read 3 ... read n-1
+         *  - number of records in the database.
+         */
+        new TestDescriptor("database put", 6, 30, false) {
+            void doAction(ReleaseLatchesTest test, int exceptionCount)
+                throws DatabaseException {
+
+                test.populate(false);
+            }
+
+            void reinit(ReleaseLatchesTest test)
+                throws DatabaseException{
+
+                test.closeDb();
+            	test.getEnv().truncateDatabase(null, "foo", false);
+            }
+        },
+        new TestDescriptor("cursor scan", 31, 20, false) {
+            void doAction(ReleaseLatchesTest test, int exceptionCount)
+		throws DatabaseException {
+
+                test.scan();
+            }
+        },
+        new TestDescriptor("cursor scan duplicates", 23, 3, true) {
+            void doAction(ReleaseLatchesTest test, int exceptionCount)
+		throws DatabaseException {
+
+                test.scan();
+            }
+        },
+        new TestDescriptor("database get", 31, 20, false) {
+            void doAction(ReleaseLatchesTest test, int exceptionCount)
+                throws DatabaseException {
+
+                test.get();
+            }
+        },
+        new TestDescriptor("database delete", 40, 30, false) {
+            void doAction(ReleaseLatchesTest test, int exceptionCount)
+                throws DatabaseException {
+
+                test.delete();
+            }
+
+            void reinit(ReleaseLatchesTest test)
+                throws DatabaseException{
+
+                test.populate(false);
+            }
+        },
+        new TestDescriptor("checkpoint", 40, 10, false) {
+            void doAction(ReleaseLatchesTest test, int exceptionCount)
+                throws DatabaseException {
+
+                test.modify(exceptionCount);
+                CheckpointConfig config = new CheckpointConfig();
+                config.setForce(true);
+                if (DEBUG) {
+                    System.out.println("Got to checkpoint");
+                }
+                test.getEnv().checkpoint(config);
+            }
+        },
+        new TestDescriptor("clean", 100, 5, false) {
+            void doAction(ReleaseLatchesTest test, int exceptionCount)
+                throws DatabaseException {
+
+                test.modify(exceptionCount);
+                CheckpointConfig config = new CheckpointConfig();
+                config.setForce(true);
+                if (DEBUG) {
+                    System.out.println("Got to cleaning");
+                }
+                test.getEnv().cleanLog();
+            }
+        },
+        new TestDescriptor("compress", 20, 10, false) {
+            void doAction(ReleaseLatchesTest test, int exceptionCount)
+                throws DatabaseException {
+
+                     test.delete();
+                     if (DEBUG) {
+                         System.out.println("Got to compress");
+                     }
+                     test.getEnv().compress();
+            }
+
+            void reinit(ReleaseLatchesTest test)
+                throws DatabaseException{
+
+                test.populate(false);
+            }
+        }
+    };
+
+    public static Test suite() {
+        TestSuite allTests = new TestSuite();
+        for (int i = 0; i < OPERATIONS.length; i++) {
+            TestSuite suite = new TestSuite(ReleaseLatchesTest.class);
+            Enumeration e = suite.tests();
+            while (e.hasMoreElements()) {
+                ReleaseLatchesTest t = (ReleaseLatchesTest) e.nextElement();
+                t.initTest(OPERATIONS[i]);
+                allTests.addTest(t);
+            }
+        }
+        return allTests;
+    }
+
+    public ReleaseLatchesTest() {
+
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+	throws IOException, DatabaseException {
+
+        TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX);
+    }
+
+    public void tearDown()
+	throws IOException, DatabaseException {
+
+        setName(getName() + ":" + testActivity.getName());
+        TestUtils.removeFiles("TearDown", envHome,
+                              FileManager.JE_SUFFIX, true);
+    }
+
+    private void init(boolean duplicates)
+        throws DatabaseException {
+
+        openEnvAndDb();
+
+        populate(duplicates);
+        env.checkpoint(null);
+        db.close();
+        db = null;
+        env.close();
+        env = null;
+    }
+
+    private void openEnvAndDb()
+        throws DatabaseException {
+
+        /*
+         * Make an environment with small nodes and no daemons.
+         */
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+	DbInternal.disableParameterValidation(envConfig);
+        envConfig.setAllowCreate(true);
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "4");
+        envConfig.setConfigParam("je.env.runEvictor", "false");
+        envConfig.setConfigParam("je.env.runCheckpointer", "false");
+        envConfig.setConfigParam("je.env.runCleaner", "false");
+        envConfig.setConfigParam("je.env.runINCompressor", "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(), "90");
+        envConfig.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(),
+                  Integer.toString(20000));
+
+        env = new Environment(envHome, envConfig);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(true);
+        db = env.openDatabase(null, "foo", dbConfig);
+    }
+
+    /* Calling close under -ea will check for leaked latches. */
+    private void doCloseAndCheckLeaks()
+        throws Throwable {
+
+        try {
+            if (db != null) {
+                db.close();
+                db = null;
+            }
+
+            if (env != null) {
+                env.close();
+                env = null;
+            }
+        } catch (Throwable t) {
+            System.out.println("operation = " + testActivity.name);
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    private void closeDb()
+        throws DatabaseException {
+
+        if (db != null) {
+            db.close();
+            db = null;
+        }
+    }
+
+    private Environment getEnv() {
+        return env;
+    }
+
+    private void initTest(TestDescriptor action) {
+        this.testActivity = action;
+    }
+
+    /*
+     * This is the heart of the unit test. Given a TestDescriptor, run the
+     * operation's activity in a loop, generating read i/o exceptions at
+     * different points. Check for latch leaks after the i/o exception
+     * happens.
+     */
+    public void testCheckLatchLeaks()
+        throws Throwable {
+
+        int maxExceptionCount = testActivity.getNumExceptions();
+        if (DEBUG) {
+            System.out.println("Starting test: " + testActivity.getName());
+        }
+
+        try {
+            init(testActivity.getDuplicates());
+
+            /*
+             * Run the action repeatedly, generating exceptions at different
+             * points.
+             */
+            for (int i = 1; i <= maxExceptionCount; i++) {
+
+                /*
+                 * Open the env and database anew each time, so that we need to
+                 * fault in objects and will trigger read i/o exceptions.
+                 */
+                openEnvAndDb();
+                EnvironmentImpl envImpl =
+                    DbInternal.envGetEnvironmentImpl(env);
+                boolean exceptionOccurred = false;
+
+                try {
+                    ReadIOExceptionHook readHook = new ReadIOExceptionHook(i);
+                    envImpl.getLogManager().setReadHook(readHook);
+                    testActivity.doAction(this, i);
+                } catch (Throwable e) {
+                    while (e.getCause() != null &&
+                           !(e instanceof DatabaseException)) {
+                        e = e.getCause();
+                    }
+                    if (e instanceof RunRecoveryException) {
+
+                        /*
+                         * It's possible for a read error to induce a
+                         * RunRecoveryException if the read error happens when
+                         * we are opening a new write file channel. (We read
+                         * and validate the file header). In that case, check
+                         * for latches, and re-open the database.
+                         */
+                        checkLatchCount((DatabaseException) e, i);
+                        env.close();
+                        openEnvAndDb();
+                        exceptionOccurred = true;
+                    } else if (e instanceof DatabaseException) {
+                        checkLatchCount((DatabaseException) e, i);
+                        exceptionOccurred = true;
+                    } else {
+                        throw e;
+                    }
+                }
+
+                if (DEBUG && !exceptionOccurred) {
+                    System.out.println("Don't need ex count " + i +
+                                       " for test activity " +
+                                       testActivity.getName());
+                }
+
+                envImpl.getLogManager().setReadHook(null);
+                testActivity.reinit(this);
+                doCloseAndCheckLeaks();
+            }
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    private void checkLatchCount(DatabaseException e,
+                                 int exceptionCount)
+        throws DatabaseException {
+
+	/* Only rethrow the exception if we didn't clean up latches. */
+        if (LatchSupport.countLatchesHeld() > 0) {
+            LatchSupport.dumpLatchesHeld();
+            System.out.println("Operation = " + testActivity.getName() +
+                               " exception count=" + exceptionCount +
+                               " Held latches = " +
+                               LatchSupport.countLatchesHeld());
+            /* Show stacktrace where the latch was lost. */
+            e.printStackTrace();
+            throw e;
+        }
+    }
+
+    /* Insert records into a database. */
+    private void populate(boolean duplicates)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        DatabaseEntry data1 = new DatabaseEntry();
+        DatabaseEntry data2 = new DatabaseEntry();
+        DatabaseEntry data3 = new DatabaseEntry();
+        DatabaseEntry data4 = new DatabaseEntry();
+        IntegerBinding.intToEntry(0, data);
+        IntegerBinding.intToEntry(1, data1);
+        IntegerBinding.intToEntry(2, data2);
+        IntegerBinding.intToEntry(3, data3);
+        IntegerBinding.intToEntry(4, data4);
+
+        for (int i = 0; i < testActivity.getNumRecords(); i++) {
+            IntegerBinding.intToEntry(i, key);
+            assertEquals(OperationStatus.SUCCESS,  db.put(null, key, data));
+	    if (duplicates) {
+		assertEquals(OperationStatus.SUCCESS,
+			     db.put(null, key, data1));
+		assertEquals(OperationStatus.SUCCESS,
+			     db.put(null, key, data2));
+		assertEquals(OperationStatus.SUCCESS,
+			     db.put(null, key, data3));
+		assertEquals(OperationStatus.SUCCESS,
+			     db.put(null, key, data4));
+	    }
+        }
+    }
+
+    /* Modify the database. */
+    private void modify(int dataVal)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        IntegerBinding.intToEntry(dataVal, data);
+
+        for (int i = 0; i < testActivity.getNumRecords(); i++) {
+            IntegerBinding.intToEntry(i, key);
+            assertEquals(OperationStatus.SUCCESS,  db.put(null, key, data));
+        }
+    }
+
+    /* Cursor scan the data. */
+    private void scan()
+        throws DatabaseException {
+
+        Cursor cursor = null;
+        try {
+            cursor = db.openCursor(null, null);
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+
+            while (cursor.getNext(key, data, LockMode.DEFAULT) ==
+                   OperationStatus.SUCCESS) {
+            }
+        } finally {
+            if (cursor != null) {
+                cursor.close();
+            }
+        }
+    }
+
+    /* Database.get() for all records. */
+    private void get()
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        for (int i = 0; i < testActivity.getNumRecords(); i++) {
+            IntegerBinding.intToEntry(i, key);
+            assertEquals(OperationStatus.SUCCESS,
+                         db.get(null, key, data, LockMode.DEFAULT));
+        }
+    }
+
+    /* Delete all records. */
+    private void delete()
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        for (int i = 0; i < testActivity.getNumRecords(); i++) {
+            IntegerBinding.intToEntry(i, key);
+            assertEquals("key = " + IntegerBinding.entryToInt(key),
+                         OperationStatus.SUCCESS, db.delete(null, key));
+        }
+    }
+    /*
+     * This TestHook implementation generates io exceptions during reads.
+     */
+    static class ReadIOExceptionHook implements TestHook {
+        private int counter = 0;
+        private int throwCount;
+
+        ReadIOExceptionHook(int throwCount) {
+            this.throwCount = throwCount;
+        }
+        public void doIOHook()
+            throws IOException {
+
+            if (throwCount == counter) {
+                counter++;
+                throw new IOException("Generated exception: " +
+                                      this.getClass().getName());
+            } else {
+                counter++;
+            }
+        }
+        public Object getHookValue() {
+            throw new UnsupportedOperationException();
+        }
+        public void doHook() {
+            throw new UnsupportedOperationException();
+        }
+        public void hookSetup() {
+            throw new UnsupportedOperationException();
+        }
+    }
+
+    static abstract class TestDescriptor {
+        private String name;
+        private int numExceptions;
+        private int numRecords;
+	private boolean duplicates;
+
+        TestDescriptor(String name,
+		       int numExceptions,
+		       int numRecords,
+		       boolean duplicates) {
+            this.name = name;
+            this.numExceptions = numExceptions;
+            this.numRecords = numRecords;
+	    this.duplicates = duplicates;
+        }
+
+        int getNumRecords() {
+            return numRecords;
+        }
+
+        int getNumExceptions() {
+            return numExceptions;
+        }
+
+        String getName() {
+            return name;
+        }
+
+	boolean getDuplicates() {
+	    return duplicates;
+	}
+
+        /* Do a series of operations. */
+        abstract void doAction(ReleaseLatchesTest test,
+                               int exceptionCount)
+            throws DatabaseException;
+
+        /* Reinitialize the database if doAction modified it. */
+        void reinit(ReleaseLatchesTest test)
+	    throws DatabaseException {
+
+	}
+    }
+}
diff --git a/test/com/sleepycat/je/tree/SR13034Test.java b/test/com/sleepycat/je/tree/SR13034Test.java
new file mode 100644
index 0000000000000000000000000000000000000000..321670f833091996bcaa58ff1048b76e07d7e06e
--- /dev/null
+++ b/test/com/sleepycat/je/tree/SR13034Test.java
@@ -0,0 +1,174 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2005,2008 Oracle.  All rights reserved.
+ *
+ * $Id: SR13034Test.java,v 1.8 2008/01/07 14:29:13 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.tuple.StringBinding;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ * Reproduce a bug where fetchEntry rather than fetchEntryIgnoreKnownDeleted
+ * was being called when searching the duplicate tree by LN node ID during
+ * recovery.
+ *
+ * The trick is to create a DBIN with a KnownDeleted flag set on an entry.  And
+ * to cause recovery to search that DBIN by node ID during redo of a deleted
+ * LN.  This deleted LN log entry must not have any data -- it must have been
+ * deleted before creation of the dup tree as in SR 8984.
+ *
+ * In addition, the deleted LN must appear after the entries with KnownDeleted
+ * set in the BIN, otherwise the search by node ID will find the LN before
+ * it encounters a KnownDeleted entry.
+
+ * The sequence in the test is as follows.  I'm not positive this was the same
+ * sequence as seen by the user, since the user did not send their logs, but
+ * I believe the bug fix is general enough to cover similar cases.
+ *
+ * 1) Insert {A, C} (LN with key A, data C) in T1.
+ * 2) Delete {A, C} in T1.  The LN log entry will not have any data.
+ * 3) Commit T1 so these log entries will be replayed during recovery redo.
+ * 4) Insert {A, A} and {A, B} in T2.
+ * 5) Abort T2 so that the KnownDeleted flag will be set on these DBIN entries
+ * during recovery.
+ * 6) Close without a checkpoint and recover.  When replaying the deleted LN
+ * {A, C}, we don't have a dup key because it was deleted before the dup tree
+ * was created.  So we search the dup tree by LN node ID.  Calling fetchEntry
+ * on {A, A} (or {A, B}) throws an exception because KnownDeleted is set.  We
+ * neglected to check KnownDeleted.
+ */
+public class SR13034Test extends TestCase {
+
+    private File envHome;
+    private Environment env;
+    private Database db;
+
+    public SR13034Test() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        try {
+            if (env != null) {
+		env.close();
+            }
+        } catch (Exception e) {
+            System.out.println("During tearDown: " + e);
+        }
+
+        env = null;
+        db = null;
+
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+    }
+
+    private void open()
+	throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setTransactional(true);
+        /* Do not run the daemons to avoid timing considerations. */
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false");
+        envConfig.setConfigParam
+	    (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false");
+        env = new Environment(envHome, envConfig);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setTransactional(true);
+        dbConfig.setSortedDuplicates(true);
+        db = env.openDatabase(null, "foo", dbConfig);
+    }
+
+    private void close()
+	throws DatabaseException {
+
+        db.close();
+        db = null;
+
+        env.close();
+        env = null;
+    }
+
+    public void testSR13034()
+	throws DatabaseException {
+
+        open();
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status;
+        Transaction txn;
+
+        /*
+         * Insert {A, C}, then delete it.  No dup tree has been created, so
+         * this logs a deleted LN with no data.
+         */
+        txn = env.beginTransaction(null, null);
+        StringBinding.stringToEntry("A", key);
+        StringBinding.stringToEntry("C", data);
+        status = db.putNoOverwrite(txn, key, data);
+        assertEquals(OperationStatus.SUCCESS, status);
+        status = db.delete(txn, key);
+        assertEquals(OperationStatus.SUCCESS, status);
+        txn.commit();
+
+        /*
+         * Insert {A, A}, {A, B}, which creates a dup tree.  Then abort to set
+         * KnownDeleted on these entries.
+         */
+        txn = env.beginTransaction(null, null);
+        StringBinding.stringToEntry("A", key);
+        StringBinding.stringToEntry("A", data);
+        status = db.putNoDupData(txn, key, data);
+        StringBinding.stringToEntry("A", key);
+        StringBinding.stringToEntry("B", data);
+        status = db.putNoDupData(txn, key, data);
+        assertEquals(OperationStatus.SUCCESS, status);
+        txn.abort();
+
+        /*
+         * Close without a checkpoint and recover.  Before the bug fix, the
+         * recovery would throw DatabaseException "attempt to fetch a deleted
+         * entry".
+         */
+        db.close();
+        DbInternal.envGetEnvironmentImpl(env).close(false);
+        open();
+
+        close();
+    }
+}
diff --git a/test/com/sleepycat/je/tree/SR13126Test.java b/test/com/sleepycat/je/tree/SR13126Test.java
new file mode 100644
index 0000000000000000000000000000000000000000..f246f76966eeb347f53e426ee8eae0aabaddbe56
--- /dev/null
+++ b/test/com/sleepycat/je/tree/SR13126Test.java
@@ -0,0 +1,213 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2005,2008 Oracle.  All rights reserved.
+ *
+ * $Id: SR13126Test.java,v 1.12 2008/01/07 14:29:13 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.collections.CurrentTransaction;
+import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.RunRecoveryException;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.util.TestUtils;
+
+/**
+ */
+public class SR13126Test extends TestCase {
+
+    private File envHome;
+    private Environment env;
+    private Database db;
+    private long maxMem;
+
+    public SR13126Test() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        try {
+            if (env != null) {
+		env.close();
+            }
+        } catch (Exception e) {
+            System.out.println("During tearDown: " + e);
+        }
+
+        env = null;
+        db = null;
+
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+    }
+
+    private boolean open()
+	throws DatabaseException {
+
+        maxMem = MemoryBudget.getRuntimeMaxMemory();
+        if (maxMem == -1) {
+            System.out.println
+                ("*** Warning: not able to run this test because the JVM " +
+                 "heap size is not available");
+            return false;
+        }
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setTransactional(true);
+        /* Do not run the daemons to avoid timing considerations. */
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false");
+        envConfig.setConfigParam
+	    (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false");
+        env = new Environment(envHome, envConfig);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setTransactional(true);
+        db = env.openDatabase(null, "foo", dbConfig);
+
+        return true;
+    }
+
+    private void close()
+	throws DatabaseException {
+
+        db.close();
+        db = null;
+
+        env.close();
+        env = null;
+    }
+
+    public void testSR13126()
+	throws DatabaseException {
+
+        if (!open()) {
+            return;
+        }
+
+        Transaction txn = env.beginTransaction(null, null);
+
+        try {
+            insertUntilOutOfMemory(txn);
+            fail("Expected OutOfMemoryError");
+        } catch (RunRecoveryException expected) {}
+
+        verifyDataAndClose();
+    }
+
+    public void testTransactionRunner()
+	throws Exception {
+
+        if (!open()) {
+            return;
+        }
+
+        final CurrentTransaction currentTxn =
+            CurrentTransaction.getInstance(env);
+
+        TransactionRunner runner = new TransactionRunner(env);
+	/* Don't print exception stack traces during test runs. */
+	DbCompat.TRANSACTION_RUNNER_PRINT_STACK_TRACES = false;
+        try {
+            runner.run(new TransactionWorker() {
+                public void doWork()
+                    throws Exception {
+
+                    insertUntilOutOfMemory(currentTxn.getTransaction());
+                }
+            });
+            fail("Expected OutOfMemoryError");
+        } catch (RunRecoveryException expected) { }
+
+        /*
+         * If TransactionRunner does not abort the transaction, this thread
+         * will be left with a transaction attached.
+         */
+        assertNull(currentTxn.getTransaction());
+
+        verifyDataAndClose();
+    }
+
+    private void insertUntilOutOfMemory(Transaction txn)
+	throws DatabaseException, OutOfMemoryError {
+
+        DatabaseEntry key = new DatabaseEntry(new byte[1]);
+        DatabaseEntry data = new DatabaseEntry();
+
+        int startMem = (int) (maxMem / 3);
+        int bumpMem = (int) ((maxMem - maxMem / 3) / 5);
+
+        /* Insert larger and larger LNs until an OutOfMemoryError occurs. */
+        for (int memSize = startMem;; memSize += bumpMem) {
+
+            /*
+             * If the memory error occurs when we do "new byte[]" below, this
+             * is not a test of the bug in question, so the test fails.
+             */
+            data.setData(new byte[memSize]);
+            try {
+                db.put(null, key, data);
+            } catch (OutOfMemoryError e) {
+                //System.err.println("Error during write " + memSize);
+                throw e;
+            }
+        }
+    }
+
+    private void verifyDataAndClose()
+	throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        /*
+         * If a NULL_LSN is present in a BIN entry because of an incomplete
+         * insert, an assertion will fire during the checkpoint when writing
+         * the BIN.
+         */
+        env.close();
+        env = null;
+
+        /*
+         * If the NULL_LSN was written above because assertions are disabled,
+         * check that we don't get an exception when fetching it.
+         */
+        open();
+        Cursor c = db.openCursor(null, null);
+        while (c.getNext(key, data, null) == OperationStatus.SUCCESS) {}
+        c.close();
+        close();
+    }
+}
diff --git a/test/com/sleepycat/je/tree/SplitRace_SR11144Test.java b/test/com/sleepycat/je/tree/SplitRace_SR11144Test.java
new file mode 100644
index 0000000000000000000000000000000000000000..de112669cf55b2ef6f90f12c52da7354202617a7
--- /dev/null
+++ b/test/com/sleepycat/je/tree/SplitRace_SR11144Test.java
@@ -0,0 +1,314 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2005,2008 Oracle.  All rights reserved.
+ *
+ * $Id: SplitRace_SR11144Test.java,v 1.13 2008/01/07 14:29:13 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.TestHook;
+
+/*********************************************************************
+  Exercise a race condition in split processing. The case requires a
+  at least 3 level btree where the root has maxEntries-1 children.
+  i.e suppose node max = 4. Our test case will start with data like this:
+
+                        RootIN
+                 +--------+----------+
+                 /        |           \
+              INa        INb           INc
+                      /   |   \      /   |   \
+                     BIN BIN BINx   BIN BIN BINy
+                             /||\           /||\
+
+  Note that it takes some finagling to make the data look this way. An insert
+  of sequentially ascending values won't look like this, because opportunistic
+  splitting prevents all but the righitmost BIN from being completely full.
+
+  At this point, suppose that thread1 wants to insert into BINx and thread2
+  wants to insert into BINy. Our split code looks like this:
+
+  Body of Tree.searchSplitsAllowed()
+
+     rootLatch.acquire()
+     fetch rootIN
+     rootIN.latch
+     opportunitically split root (dropping and re-acquiring rootINlatches)
+      splitting the root requires updating the dbmapping tree
+     rootLatch.release()
+
+     // leave this block of code owning the rootIN latch.
+     call searchSubTreeSplitsAllowed()
+
+  Body of Tree.searchSubTreeSplitsAllowed()
+     while (true) {
+       try {
+          // throws if finds a node that needs splitting
+          return searchSubTreeUntilSplit()
+       } catch (SplitRequiredException e) {
+          // acquire latches down the depth of the tree
+          forceSplit();
+       }
+     }
+
+  If code is executed in this order:
+
+  thread 1 executes searchSplitsAllowed(), root doesn't need splitting
+  thread 1 executes searchSubTreeUntilSplit(), throws out because of BINx
+  thread 1 hold no latches before executing forceSplit()
+  thread 2 executes searchSplitsAllowed(), root doesn't need splitting
+  thread 2 executes searchSubTreeUntilSplit(), throws out because of BINy
+  thread 2 hold no latches before executing forceSplit()
+  thread 1 executes forceSplit, splits BINx, which ripples upward,
+               adding a new level 2 IN. The root is full
+  thread 2 executes forceSplit, splits BINy, which ripples upward,
+               adding a new level 2 IN. The root can't hold the new child!
+
+ The root split is done this way, outside forceSplit, because it's special
+ because you must hold the rootLatch.
+
+ This case does not exist for duplicates because:
+   a. in 1 case, the owning BIN (the equivalent of the root) stays latched
+   b. in a 2nd case, the caller is recovery, which is single threaded.
+
+ The solution was to check for root fullness in forceSplit(), before
+ latching down the whole depth of the tree. In that case, we throw out
+ and re-execute the rootLatch latching.
+
+********************************************************************/
+
+public class SplitRace_SR11144Test extends TestCase {
+    private static final boolean DEBUG = false;
+    private File envHome;
+    private Environment env = null;
+    private Database db = null;
+
+    public SplitRace_SR11144Test() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        try {
+            /* Close in case we hit an exception and didn't close */
+            if (env != null) {
+		env.close();
+            }
+        } catch (DatabaseException e) {
+            /* Ok if already closed */
+        }
+        env = null; // for JUNIT, to reduce memory usage when run in a suite.
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+    }
+
+    public void testSplitRootRace()
+        throws Throwable {
+
+        /* Create tree topology described in header comments. */
+        initData();
+
+        /*
+         * Create two threads, and hold them in a barrier at the
+         * designated point in Tree.java. They'll insert keys which
+         * will split BINx and BINy.
+         */
+
+        InsertThread a = new InsertThread(92, db);
+        InsertThread b = new InsertThread(202, db);
+        setWaiterHook();
+        b.start();
+        a.start();
+
+        a.join();
+        b.join();
+
+        close();
+    }
+
+    /**
+     * Create this:
+     *                   RootIN
+     *            +--------+----------+
+     *            /        |           \
+     *         INa        INb           INc
+     *                 /   |   \      /   |   \
+     *                BIN BIN BINx   BIN BIN BINy
+     *                        /||\           /||\
+     *
+     */
+    private void initData() {
+	try {
+	    initEnvInternal(true);
+
+            /*
+             * Opportunistic splitting will cause the following inserts to
+             * add three child entries per parent.
+             */
+            int value = 0;
+            for (int i = 0; i < 23; i++) {
+                put(db, value);
+                value += 10;
+            }
+
+            /* Add a fourth child to BINx and BINy */
+            put(db, 91);
+            put(db, 201);
+
+            if (DEBUG) {
+                dump();
+            }
+        } catch (DatabaseException DBE) {
+	    throw new RuntimeException(DBE);
+	}
+    }
+
+    private static void put(Database db, int value)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        /* put the value in the key. */
+        IntegerBinding.intToEntry(11, data);
+        IntegerBinding.intToEntry(value, key);
+
+        OperationStatus status = db.putNoOverwrite(null, key, data);
+        if (status != OperationStatus.SUCCESS) {
+            throw new RuntimeException("status=" + status);
+        }
+    }
+
+    private void close() {
+        try {
+            db.close();
+            env.close();
+	} catch (DatabaseException DBE) {
+	    throw new RuntimeException(DBE);
+	}
+    }
+
+    private void dump() {
+        try {
+            Cursor cursor = db.openCursor(null, null);
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            while (cursor.getNext(key, data, LockMode.DEFAULT) ==
+                   OperationStatus.SUCCESS) {
+                System.out.println("<rec key=\"" +
+                                   IntegerBinding.entryToInt(key) +
+                                   "\" data=\"" +
+                                   IntegerBinding.entryToInt(data) +
+                                   "\"/>");
+            }
+            DbInternal.dbGetDatabaseImpl(db).getTree().dump();
+            cursor.close();
+        } catch (DatabaseException DBE) {
+            throw new RuntimeException(DBE);
+        }
+    }
+
+    private void initEnvInternal(boolean create)
+	throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(create);
+        envConfig.setConfigParam("je.nodeMaxEntries", "4");
+        envConfig.setConfigParam("je.nodeDupTreeMaxEntries", "4");
+        env = new Environment(envHome, envConfig);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(create);
+        dbConfig.setTransactional(true);
+        dbConfig.setExclusiveCreate(create);
+        db = env.openDatabase(null, "foo", dbConfig);
+    }
+
+    private void setWaiterHook() {
+        TestHook hook = new WaiterHook();
+        DbInternal.dbGetDatabaseImpl(db).getTree().setWaitHook(hook);
+    }
+
+    /*
+     * This hook merely acts as a barrier. 2 threads enter and cannot
+     * proceed until both have arrived at that point.
+     */
+    static class WaiterHook implements TestHook {
+        private int numArrived;
+        private Object block;
+
+        WaiterHook() {
+            numArrived = 0;
+            block = new Object();
+        }
+        public void doHook() {
+            synchronized (block) {
+                if (numArrived == 0) {
+                    numArrived = 1;
+                    try {
+                        block.wait();
+                    } catch (InterruptedException e) {
+                        e.printStackTrace();
+                    }
+                } else if (numArrived == 1) {
+                    numArrived = 2;
+                    block.notify();
+                }
+            }
+        }
+        public Object getHookValue() {
+            throw new UnsupportedOperationException();
+        }
+        public void doIOHook() throws IOException {
+            throw new UnsupportedOperationException();
+        }
+        public void hookSetup() {
+            throw new UnsupportedOperationException();
+        }
+    }
+
+    /* This thread merely inserts the specified value. */
+    static class InsertThread extends Thread {
+        private int value;
+        private Database db;
+
+        InsertThread(int value, Database db) {
+            this.value = value;
+            this.db = db;
+        }
+
+        public void run() {
+            try {
+                put(db, value);
+            } catch (Exception e) {
+                e.printStackTrace();
+                fail(e.getMessage());
+            }
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/tree/SplitTest.java b/test/com/sleepycat/je/tree/SplitTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..cdd6cdb64474da4c1b74ee54dd139252a5e118aa
--- /dev/null
+++ b/test/com/sleepycat/je/tree/SplitTest.java
@@ -0,0 +1,244 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SplitTest.java,v 1.29.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.io.File;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.tree.Key.DumpType;
+import com.sleepycat.je.util.TestUtils;
+
+public class SplitTest extends TestCase {
+    private static final boolean DEBUG = false;
+
+    private File envHome;
+    private Environment env;
+    private Database db;
+
+    public SplitTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws Exception {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+        initEnv();
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        try {
+            db.close();
+            env.close();
+        } catch (DatabaseException E) {
+        }
+
+        TestUtils.removeLogFiles("TearDown", envHome, true);
+    }
+
+    private void initEnv()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "4");
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+        env = new Environment(envHome, envConfig);
+
+        String databaseName = "testDb";
+        Transaction txn = env.beginTransaction(null, null);
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        db = env.openDatabase(txn, databaseName, dbConfig);
+        txn.commit();
+    }
+
+    /**
+     * Test splits on a case where the 0th entry gets promoted.
+     */
+    public void test0Split()
+        throws Exception {
+
+        Key.DUMP_TYPE = DumpType.BINARY;
+        try {
+            /* Build up a tree. */
+            for (int i = 160; i > 0; i-= 10) {
+                assertEquals(OperationStatus.SUCCESS,
+                             db.put(null, new DatabaseEntry
+				 (new byte[] { (byte) i }),
+                                    new DatabaseEntry(new byte[] {1})));
+            }
+
+            if (DEBUG) {
+                System.out.println("<dump>");
+                DbInternal.dbGetDatabaseImpl(db).getTree().dump();
+            }
+
+            assertEquals(OperationStatus.SUCCESS,
+                         db.put(null, new DatabaseEntry(new byte[]{(byte)151}),
+                                new DatabaseEntry(new byte[] {1})));
+            assertEquals(OperationStatus.SUCCESS,
+                         db.put(null, new DatabaseEntry(new byte[]{(byte)152}),
+                                new DatabaseEntry(new byte[] {1})));
+            assertEquals(OperationStatus.SUCCESS,
+                         db.put(null, new DatabaseEntry(new byte[]{(byte)153}),
+                                new DatabaseEntry(new byte[] {1})));
+
+            if (DEBUG) {
+                DbInternal.dbGetDatabaseImpl(db).getTree().dump();
+                System.out.println("</dump>");
+            }
+
+            /*
+             * These inserts make a tree where the right most mid-level IN
+             * has an idkey greater than its parent entry.
+             *
+             *     +---------------+
+             *     | id = 90       |
+             *     | 50 | 90 | 130 |
+             *     +---------------+
+             *       |     |    |
+             *                  |
+             *              +-----------------+
+             *              | id = 160        |
+             *              | 130 | 150 | 152 |
+             *              +-----------------+
+             *                 |      |    |
+             *                 |      |    +-----------+
+             *                 |      |                |
+             *       +-----------+  +-----------+ +-----------------+
+             *       | BIN       |  | BIN       | | BIN             |
+             *       | id = 130  |  | id = 150  | | id=160          |
+             *       | 130 | 140 |  | 150 | 151 | | 152 | 153 | 160 |
+             *       +-----------+  +-----------+ +-----------------+
+	     *
+             * Now delete records 130 and 140 to empty out the subtree with BIN
+             * with id=130.
+             */
+            assertEquals(OperationStatus.SUCCESS,
+                         db.delete(null,
+                                   new DatabaseEntry(new byte[]{(byte) 130})));
+            assertEquals(OperationStatus.SUCCESS,
+                         db.delete(null,
+                                   new DatabaseEntry(new byte[]{(byte) 140})));
+            env.compress();
+
+            /*
+             * These deletes make the mid level IN's 0th entry > its parent
+             * reference.
+	     *
+             *     +---------------+
+             *     | id = 90       |
+             *     | 50 | 90 | 130 |
+             *     +---------------+
+             *       |     |    |
+             *                  |
+             *              +-----------+
+             *              | id = 160  |
+             *              | 150 | 152 |
+             *              +-----------+
+             *                 |      |
+             *                 |      |
+             *                 |      |
+             *       +-----------+ +-----------------+
+             *       | BIN       | | BIN             |
+             *       | id = 150  | | id=160          |
+             *       | 150 | 151 | | 152 | 153 | 160 |
+             *       +-----------+ +-----------------+
+             *
+             * Now insert 140 into BIN (id = 160) so that its first entry is
+             * less than the mid level IN.
+             */
+            assertEquals(OperationStatus.SUCCESS,
+                         db.put(null, new DatabaseEntry(new byte[]{(byte)140}),
+                                new DatabaseEntry(new byte[] {1})));
+
+            /*
+             * Now note that the mid level tree's 0th entry is greater than its
+             * reference in the root.
+             *
+             *     +---------------+
+             *     | id = 90       |
+             *     | 50 | 90 | 130 |
+             *     +---------------+
+             *       |     |    |
+             *                  |
+             *              +-----------+
+             *              | id = 160  |
+             *              | 150 | 152 |
+             *              +-----------+
+             *                 |      |
+             *                 |      |
+             *                 |      |
+             *   +----------------+ +-----------------+
+             *   | BIN            | | BIN             |
+             *   | id = 150       | | id=160          |
+             *   | 140 |150 | 151 | | 152 | 153 | 160 |
+             *   +----------------+ +-----------------+
+             *
+             * Now split the mid level node, putting the new child on the left.
+             */
+            for (int i = 154; i < 159; i++) {
+                assertEquals(OperationStatus.SUCCESS,
+                             db.put(null,
+                                    new DatabaseEntry(new byte[]{(byte)i}),
+                                    new DatabaseEntry(new byte[] {1})));
+            }
+
+            /*
+             * This used to result in the following broken tree, which would
+             * cause us to not be able to retrieve record 140. With the new
+             * split code, entry "150" in the root should stay 130.
+	     *
+             *     +---------------------+
+             *     | id = 90             |
+             *     | 50 | 90 | 150 | 154 |  NOTE: we'v lost record 140
+             *     +---------------------+
+             *       |     |    |        \
+             *                  |         \
+             *              +-----------+  +----------+
+             *              | id = 150  |  |id=160    |
+             *              | 150 | 152 |  |154 | 156 |
+             *              +-----------+  +----------+
+             *                 |      |
+             *                 |      |
+             *                 |      |
+             *   +------------+ +-------+
+             *   | BIN        | | BIN   |
+             *   | id = 150   | | id=152|
+             *   | 140|150|151| |152|153|
+             *   +------------+ +-------+
+             */
+            DatabaseEntry data = new DatabaseEntry();
+            assertEquals(OperationStatus.SUCCESS,
+                         db.get(null, new DatabaseEntry(new byte[]
+			     { (byte) 140 }),
+                                data, LockMode.DEFAULT));
+
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw new Exception(t);
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/tree/TreeDuplicateTest.java b/test/com/sleepycat/je/tree/TreeDuplicateTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..92e74a409f615a91813066834327c9605b1fec79
--- /dev/null
+++ b/test/com/sleepycat/je/tree/TreeDuplicateTest.java
@@ -0,0 +1,245 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TreeDuplicateTest.java,v 1.50.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.NullCursor;
+import com.sleepycat.je.log.ReplicationContext;
+import com.sleepycat.je.txn.BasicLocker;
+import com.sleepycat.je.txn.LockResult;
+import com.sleepycat.je.txn.Locker;
+import com.sleepycat.je.util.TestUtils;
+
+public class TreeDuplicateTest extends TreeTestBase {
+
+    public TreeDuplicateTest() {
+	super();
+    }
+
+    private static final int N_DUPLICATES_PER_KEY = N_KEYS;
+    private static final int N_TOP_LEVEL_KEYS = 10;
+
+    /**
+     * Rudimentary insert/retrieve test.
+     */
+    public void testSimpleTreeCreation()
+	throws Throwable {
+
+        try {
+            initEnv(true);
+            EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+            Locker txn = BasicLocker.
+		createBasicLocker(DbInternal.envGetEnvironmentImpl(env));
+            NullCursor cursor = new NullCursor(tree.getDatabase(), txn);
+
+            try {
+                byte[][] keys = new byte[N_TOP_LEVEL_KEYS][];
+                LN[][] lns = new LN[N_TOP_LEVEL_KEYS][];
+                for (int i = 0; i < N_TOP_LEVEL_KEYS; i++) {
+                    byte[] key = new byte[N_KEY_BYTES];
+                    keys[i] = key;
+                    lns[i] = new LN[N_DUPLICATES_PER_KEY];
+                    TestUtils.generateRandomAlphaBytes(key);
+                    for (int j = 0; j < N_DUPLICATES_PER_KEY; j++) {
+                        byte[] data = new byte[N_KEY_BYTES];
+                        TestUtils.generateRandomAlphaBytes(data);
+                        LN ln = new LN(data, envImpl, false);
+                        lns[i][j] = ln;
+                        insertAndRetrieveDuplicate(key, ln, cursor);
+                    }
+                }
+
+                for (int i = 0; i < N_TOP_LEVEL_KEYS; i++) {
+                    byte[] key = keys[i];
+                    for (int j = 0; j < N_DUPLICATES_PER_KEY; j++) {
+                        LN ln = lns[i][j];
+                        retrieveDuplicateLN(key, ln);
+                    }
+                }
+            } finally {
+                txn.operationEnd();
+            }
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Make sure that IllegalArgumentException is returned from search(null).
+     */
+    public void testNullRoot()
+	throws DatabaseException {
+
+        initEnv(false);
+        assertTrue(tree.search(null, Tree.SearchType.NORMAL,
+                               -1, null, CacheMode.DEFAULT) ==
+                   null);
+        TestUtils.checkLatchCount();
+    }
+
+    /**
+     * Insert a bunch of keys.  Validate that getFirstNode and getLastNode
+     * return the right values.
+     */
+    public void testGetFirstLast()
+	throws DatabaseException {
+
+        initEnv(true);
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+
+        Locker txn = BasicLocker.
+	    createBasicLocker(DbInternal.envGetEnvironmentImpl(env));
+        NullCursor cursor = new NullCursor(tree.getDatabase(), txn);
+
+	/* Make sure IllegalArgumentException is thrown for null args. */
+        try {
+            TestUtils.checkLatchCount();
+            tree.getFirstNode(null, null);
+            fail("Tree.getFirstNode didn't throw IllegalArgumentException");
+        } catch (IllegalArgumentException IAE) {
+        }
+        TestUtils.checkLatchCount();
+
+        try {
+            TestUtils.checkLatchCount();
+            tree.getLastNode(null, null);
+            fail("Tree.getLastNode didn't throw IllegalArgumentException");
+        } catch (IllegalArgumentException IAE) {
+        }
+        TestUtils.checkLatchCount();
+
+        byte[][] keys = new byte[N_TOP_LEVEL_KEYS][];
+        LN[][] lns = new LN[N_TOP_LEVEL_KEYS][];
+	byte[][] minKeys = new byte[N_TOP_LEVEL_KEYS][];
+	byte[][] maxKeys = new byte[N_TOP_LEVEL_KEYS][];
+        for (int i = 0; i < N_TOP_LEVEL_KEYS; i++) {
+            byte[] key = new byte[N_KEY_BYTES];
+	    byte[] minKey = null;
+	    byte[] maxKey = null;
+            keys[i] = key;
+	    lns[i] = new LN[N_DUPLICATES_PER_KEY];
+            TestUtils.generateRandomAlphaBytes(key);
+	    for (int j = 0; j < N_DUPLICATES_PER_KEY; j++) {
+		byte[] data = new byte[N_KEY_BYTES];
+		TestUtils.generateRandomAlphaBytes(data);
+		byte[] dupKey = data;
+
+		if (minKey == null) {
+		    minKey = dupKey;
+		} else if (Key.compareKeys(dupKey, minKey, null) < 0) {
+		    minKey = dupKey;
+		}
+
+		if (maxKey == null) {
+		    maxKey = dupKey;
+		} else if (Key.compareKeys(maxKey, dupKey, null) < 0) {
+		    maxKey = dupKey;
+		}
+
+		LN ln = new LN(data, envImpl, false);
+		lns[i][j] = ln;
+		insertAndRetrieveDuplicate(key, ln, cursor);
+	    }
+	    minKeys[i] = minKey;
+	    maxKeys[i] = maxKey;
+        }
+
+        for (int i = 0; i < N_TOP_LEVEL_KEYS; i++) {
+	    byte[] key = keys[i];
+	    for (int j = 0; j < N_DUPLICATES_PER_KEY; j++) {
+		validateFirstLast(key, minKeys[i], maxKeys[i]);
+	    }
+        }
+        txn.operationEnd();
+    }
+
+    /**
+     * Find the first and last dup for key and make sure they match the minKey
+     * and maxKey args.
+     */
+    private void validateFirstLast(byte[] key, byte[] minKey, byte[] maxKey)
+	throws DatabaseException {
+
+        TestUtils.checkLatchCount();
+
+	/* find the top of the dup tree. */
+	IN dr = tree.search(key, Tree.SearchType.NORMAL, -1,
+                            null, CacheMode.DEFAULT);
+	if (!(dr instanceof BIN)) {
+	    fail("search didn't return a BIN for key: " + key);
+	}
+	BIN topBin = (BIN) dr;
+	int index = topBin.findEntry(key, false, true);
+	if (index == -1) {
+	    fail("Didn't read back key: " + key);
+	}
+	Node dupEntry = topBin.getTarget(index);
+	if (!(dupEntry instanceof DIN)) {
+	    fail("Didn't find a DIN");
+	}
+	topBin.releaseLatch();
+	DIN duplicateRoot = (DIN) dupEntry;
+	duplicateRoot.latch();
+
+	DBIN leftMostNode =
+            tree.getFirstNode(duplicateRoot, CacheMode.DEFAULT);
+
+        assertTrue(leftMostNode instanceof DBIN);
+        leftMostNode.releaseLatch();
+        assertTrue(Key.compareKeys(leftMostNode.getKey(0), minKey, null) == 0);
+
+	duplicateRoot.latch();
+	DBIN rightMostNode =
+            tree.getLastNode(duplicateRoot, CacheMode.DEFAULT);
+
+        assertTrue(rightMostNode instanceof DBIN);
+        rightMostNode.releaseLatch();
+        assertTrue(Key.compareKeys
+            (rightMostNode.getKey(rightMostNode.getNEntries() - 1), maxKey,
+             null) == 0);
+
+        TestUtils.checkLatchCount();
+    }
+
+    private void insertAndRetrieveDuplicate(byte[] key,
+					    LN ln,
+					    NullCursor cursor)
+	throws DatabaseException {
+
+        TestUtils.checkLatchCount();
+        assertTrue(tree.insert(ln, key, true, cursor,
+			       new LockResult(null, null),
+                               ReplicationContext.NO_REPLICATE));
+        TestUtils.checkLatchCount();
+        assertTrue(retrieveDuplicateLN(key, ln) == ln);
+    }
+
+    /**
+     * Helper routine to read the duplicate LN referred to by key.
+     */
+    private LN retrieveDuplicateLN(byte[] key, LN ln)
+	throws DatabaseException {
+
+        TreeLocation location = new TreeLocation();
+        try {
+            assertTrue(tree.getParentBINForChildLN
+		       (location, key, ln.getData(), ln,
+			false, false, false, CacheMode.DEFAULT));
+
+            return (LN) location.bin.getTarget(location.index);
+        } finally {
+            location.bin.releaseLatch();
+            TestUtils.checkLatchCount();
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/tree/TreeTest.java b/test/com/sleepycat/je/tree/TreeTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..54ebb351ceecd08061bb6c8290bf1b321cf13c6c
--- /dev/null
+++ b/test/com/sleepycat/je/tree/TreeTest.java
@@ -0,0 +1,386 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TreeTest.java,v 1.94.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.io.IOException;
+
+import com.sleepycat.je.BtreeStats;
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DatabaseStats;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.VerifyConfig;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.NullCursor;
+import com.sleepycat.je.txn.BasicLocker;
+import com.sleepycat.je.txn.Locker;
+import com.sleepycat.je.util.TestUtils;
+
+public class TreeTest extends TreeTestBase {
+
+    public TreeTest() {
+	super();
+    }
+
+    /**
+     * Rudimentary insert/retrieve test.
+     */
+    public void testSimpleTreeCreation()
+	throws DatabaseException {
+        initEnv(false);
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+
+        Locker txn = BasicLocker.
+	    createBasicLocker(DbInternal.envGetEnvironmentImpl(env));
+        NullCursor cursor = new NullCursor(tree.getDatabase(), txn);
+        insertAndRetrieve(cursor, "aaaaa".getBytes(),
+                          new LN((byte[]) null,
+                                  envImpl,
+                                  false)); // replicated
+        insertAndRetrieve(cursor, "aaaab".getBytes(),
+                          new LN((byte[]) null,
+                                  envImpl,
+                                  false)); // replicated
+        insertAndRetrieve(cursor, "aaaa".getBytes(),
+                          new LN((byte[]) null,
+                                  envImpl,
+                                  false)); // replicated
+        insertAndRetrieve(cursor, "aaa".getBytes(),
+                          new LN((byte[]) null,
+                                  envImpl,
+                                  false)); // replicated
+        txn.operationEnd();
+    }
+
+    /**
+     * Slightly less rudimentary test inserting a handfull of keys and LN's.
+     */
+    public void testMultipleInsertRetrieve0()
+	throws DatabaseException {
+
+        /*
+	 * Set the seed to reproduce a specific problem found while debugging:
+         * IN.split was splitting with the identifier key being on the right
+         * side.
+	 */
+        initEnv(false);
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+        Locker txn = BasicLocker.createBasicLocker(envImpl);
+        NullCursor cursor = new NullCursor(tree.getDatabase(), txn);
+        for (int i = 0; i < 21; i++) {
+            byte[] key = new byte[N_KEY_BYTES];
+            TestUtils.generateRandomAlphaBytes(key);
+            insertAndRetrieve(cursor, key, new LN((byte[]) null,
+                                                  envImpl,
+                                                  false)); // replicated
+        }
+        txn.operationEnd();
+    }
+
+    /**
+     * Insert a bunch of keys and test that they retrieve back ok.  While we
+     * insert, maintain the highest and lowest keys inserted.  Verify that
+     * getFirstNode and getLastNode return those two entries.  Lather, rinse,
+     * repeat.
+     */
+    public void testMultipleInsertRetrieve1()
+	throws DatabaseException, IOException {
+
+        initEnv(false);
+	doMultipleInsertRetrieve1();
+    }
+
+    /**
+     * Helper routine for above.
+     */
+    private void doMultipleInsertRetrieve1()
+	throws DatabaseException {
+
+        byte[][] keys = new byte[N_KEYS][];
+        LN[] lns = new LN[N_KEYS];
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+        Locker txn = BasicLocker.createBasicLocker(envImpl);
+        NullCursor cursor = new NullCursor(tree.getDatabase(), txn);
+        for (int i = 0; i < N_KEYS; i++) {
+            byte[] key = new byte[N_KEY_BYTES];
+            keys[i] = key;
+            lns[i] = new LN((byte[]) null, envImpl, false /* replicated */);
+            TestUtils.generateRandomAlphaBytes(key);
+            insertAndRetrieve(cursor, key, lns[i]);
+        }
+
+        for (int i = 0; i < N_KEYS; i++) {
+            assertTrue(retrieveLN(keys[i]) == lns[i]);
+        }
+
+        TestUtils.checkLatchCount();
+        IN leftMostNode = tree.getFirstNode(CacheMode.DEFAULT);
+
+        assertTrue(leftMostNode instanceof BIN);
+        BIN lmn = (BIN) leftMostNode;
+        lmn.releaseLatch();
+        TestUtils.checkLatchCount();
+        assertTrue(Key.compareKeys(lmn.getKey(0), minKey, null) == 0);
+
+        TestUtils.checkLatchCount();
+        IN rightMostNode = tree.getLastNode(CacheMode.DEFAULT);
+
+        assertTrue(rightMostNode instanceof BIN);
+        BIN rmn = (BIN) rightMostNode;
+        rmn.releaseLatch();
+        TestUtils.checkLatchCount();
+        assertTrue(Key.compareKeys
+            (rmn.getKey(rmn.getNEntries() - 1), maxKey, null) == 0);
+        TreeStats ts = tree.getTreeStats();
+        assertTrue(ts.nRootSplits > 1);
+
+        txn.operationEnd();
+    }
+
+    /**
+     * Create a tree.  After creation, walk the bins forwards using getNextBin
+     * counting the keys and validating that the keys are being returned in
+     * ascending order.  Ensure that the correct number of keys were returned.
+     */
+    public void testCountAndValidateKeys()
+	throws DatabaseException, IOException {
+
+        initEnv(false);
+	doCountAndValidateKeys();
+    }
+
+    /**
+     * Helper routine for above test.
+     */
+    private void doCountAndValidateKeys()
+	throws DatabaseException {
+        byte[][] keys = new byte[N_KEYS][];
+        LN[] lns = new LN[N_KEYS];
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+        Locker txn = BasicLocker.createBasicLocker(envImpl);
+        NullCursor cursor = new NullCursor(tree.getDatabase(), txn);
+
+        for (int i = 0; i < N_KEYS; i++) {
+            byte[] key = new byte[N_KEY_BYTES];
+            keys[i] = key;
+            lns[i] = new LN((byte[]) null, envImpl, false /* replicated */);
+            TestUtils.generateRandomAlphaBytes(key);
+            insertAndRetrieve(cursor, key, lns[i]);
+        }
+        assertTrue(countAndValidateKeys(tree) == N_KEYS);
+        txn.operationEnd();
+    }
+
+    /**
+     * Create a tree.  After creation, walk the bins backwards using getPrevBin
+     * counting the keys and validating that the keys are being returned in
+     * descending order.  Ensure that the correct number of keys were returned.
+     */
+    public void testCountAndValidateKeysBackwards()
+	throws DatabaseException, IOException {
+
+        initEnv(false);
+	doCountAndValidateKeysBackwards();
+    }
+
+    /**
+     * Helper routine for above test.
+     */
+    public void doCountAndValidateKeysBackwards()
+	throws DatabaseException {
+
+        byte[][] keys = new byte[N_KEYS][];
+        LN[] lns = new LN[N_KEYS];
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+        Locker txn = BasicLocker.createBasicLocker(envImpl);
+        NullCursor cursor = new NullCursor(tree.getDatabase(), txn);
+        for (int i = 0; i < N_KEYS; i++) {
+            byte[] key = new byte[N_KEY_BYTES];
+            keys[i] = key;
+            lns[i] = new LN((byte[]) null, envImpl, false /* replicated */);
+            TestUtils.generateRandomAlphaBytes(key);
+            insertAndRetrieve(cursor, key, lns[i]);
+        }
+        assertTrue(countAndValidateKeysBackwards(tree) == N_KEYS);
+        txn.operationEnd();
+    }
+
+    public void testAscendingInsertBalance()
+	throws DatabaseException {
+
+        initEnv(false);
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+        Locker txn = BasicLocker.createBasicLocker(envImpl);
+        NullCursor cursor = new NullCursor(tree.getDatabase(), txn);
+
+        /* Fill up a db with data */
+        for (int i = 0; i < N_KEYS; i++) {
+            byte[] keyBytes = new byte[4];
+            TestUtils.putUnsignedInt(keyBytes, TestUtils.alphaKey(i));
+            insertAndRetrieve(cursor, keyBytes,
+                              new LN((byte[]) null,
+                                     envImpl,
+                                     false)); // replicated
+        }
+
+        TestUtils.checkLatchCount();
+
+        /* Count the number of levels on the left. */
+        IN leftMostNode = tree.getFirstNode(CacheMode.DEFAULT);
+        assertTrue(leftMostNode instanceof BIN);
+        int leftSideLevels = 0;
+        do {
+            SearchResult result =
+		tree.getParentINForChildIN(leftMostNode, true,
+                                           CacheMode.DEFAULT);
+            leftMostNode = result.parent;
+            leftSideLevels++;
+        } while (leftMostNode != null);
+        TestUtils.checkLatchCount();
+
+        /* Count the number of levels on the right. */
+        IN rightMostNode = tree.getLastNode(CacheMode.DEFAULT);
+        assertTrue(rightMostNode instanceof BIN);
+        int rightSideLevels = 0;
+        do {
+            SearchResult result =
+		tree.getParentINForChildIN(rightMostNode, true,
+                                           CacheMode.DEFAULT);
+            rightMostNode = result.parent;
+            rightSideLevels++;
+        } while (rightMostNode != null);
+        TestUtils.checkLatchCount();
+        if (leftSideLevels > 10 ||
+            rightSideLevels > 10) {
+            fail("Levels too high (" +
+                 leftSideLevels +
+                 "/" +
+                 rightSideLevels +
+                 ") on descending insert");
+        }
+        txn.operationEnd();
+    }
+
+    public void testDescendingInsertBalance()
+	throws DatabaseException {
+        initEnv(false);
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+        Locker txn = BasicLocker.createBasicLocker(envImpl);
+        NullCursor cursor = new NullCursor(tree.getDatabase(), txn);
+
+        for (int i = N_KEYS; i >= 0; --i) {
+            byte[] keyBytes = new byte[4];
+            TestUtils.putUnsignedInt(keyBytes, TestUtils.alphaKey(i));
+            insertAndRetrieve(cursor, keyBytes,
+                              new LN((byte[]) null,
+                                     envImpl,
+                                     false)); // replicated
+        }
+
+        TestUtils.checkLatchCount();
+        IN leftMostNode = tree.getFirstNode(CacheMode.DEFAULT);
+
+        assertTrue(leftMostNode instanceof BIN);
+        int leftSideLevels = 0;
+        do {
+            SearchResult result =
+		tree.getParentINForChildIN(leftMostNode, true,
+                                           CacheMode.DEFAULT);
+            leftMostNode = result.parent;
+            leftSideLevels++;
+        } while (leftMostNode != null);
+        TestUtils.checkLatchCount();
+
+        IN rightMostNode = tree.getLastNode(CacheMode.DEFAULT);
+
+        assertTrue(rightMostNode instanceof BIN);
+        int rightSideLevels = 0;
+        do {
+            SearchResult result =
+		tree.getParentINForChildIN(rightMostNode, true,
+                                           CacheMode.DEFAULT);
+            rightMostNode = result.parent;
+            rightSideLevels++;
+        } while (rightMostNode != null);
+        TestUtils.checkLatchCount();
+        if (leftSideLevels > 10 ||
+            rightSideLevels > 10) {
+            fail("Levels too high (" +
+                 leftSideLevels +
+                 "/" +
+                 rightSideLevels +
+                 ") on descending insert");
+        }
+        txn.operationEnd();
+    }
+
+    /**
+     * Insert a bunch of keys.  Call verify and validate the results.
+     */
+    public void testVerify()
+	throws DatabaseException {
+
+        initEnv(false);
+	byte[][] keys = new byte[N_KEYS][];
+	LN[] lns = new LN[N_KEYS];
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+        Locker txn = BasicLocker.createBasicLocker(envImpl);
+        NullCursor cursor = new NullCursor(tree.getDatabase(), txn);
+
+	for (int i = 0; i < N_KEYS; i++) {
+	    byte[] key = new byte[N_KEY_BYTES];
+	    keys[i] = key;
+	    lns[i] = new LN((byte[]) new byte[1],
+                            envImpl,
+                            false); // replicated
+	    TestUtils.generateRandomAlphaBytes(key);
+	    insertAndRetrieve(cursor, key, lns[i]);
+	}
+
+        /*
+         * Note that verify will attempt to continue past errors, so
+         * assertTrue on the status return.
+         */
+        assertTrue(env.verify(new VerifyConfig(), System.err));
+	DatabaseStats stats = db.verify(new VerifyConfig());
+	BtreeStats btStats = (BtreeStats) stats;
+
+	assertTrue(btStats.getInternalNodeCount() <
+		   btStats.getBottomInternalNodeCount());
+	assertTrue(btStats.getBottomInternalNodeCount() <
+		   btStats.getLeafNodeCount() +
+		   btStats.getDeletedLeafNodeCount());
+	assertTrue(btStats.getLeafNodeCount() +
+		   btStats.getDeletedLeafNodeCount() ==
+		   N_KEYS);
+        txn.operationEnd();
+
+        /* Now intentionally create LogFileNotFoundExceptions */
+        /*
+          db.close();
+          env.close();
+
+          This is disabled until the method for flipping files is
+          introduced. It's too hard to create a LogFileNotFoundException
+          by brute force deleting a file; often recovery doesn't work.
+          Instead, use a flipped file later on.
+
+        String[] jeFiles =
+            FileManager.listFiles(envHome,
+                                  new String[] {FileManager.JE_SUFFIX});
+        int targetIdx = jeFiles.length / 2;
+        assertTrue(targetIdx > 0);
+        File targetFile = new File(envHome, jeFiles[targetIdx]);
+        assertTrue(targetFile.delete());
+
+        initEnv(false);
+        assertFalse(env.verify(new VerifyConfig(), System.err));
+        */
+    }
+}
diff --git a/test/com/sleepycat/je/tree/TreeTestBase.java b/test/com/sleepycat/je/tree/TreeTestBase.java
new file mode 100644
index 0000000000000000000000000000000000000000..0dc983eca44c64450f9d870961247b3028dd0da8
--- /dev/null
+++ b/test/com/sleepycat/je/tree/TreeTestBase.java
@@ -0,0 +1,226 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TreeTestBase.java,v 1.59.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.NullCursor;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.log.ReplicationContext;
+import com.sleepycat.je.txn.LockResult;
+import com.sleepycat.je.util.TestUtils;
+
+public class TreeTestBase extends TestCase {
+    static protected final boolean DEBUG = true;
+
+    static protected int N_KEY_BYTES = 10;
+    static protected int N_ITERS = 1;
+    static protected int N_KEYS = 10000;
+    static protected int MAX_ENTRIES_PER_NODE = 6;
+
+    protected Tree tree = null;
+    protected byte[] minKey = null;
+    protected byte[] maxKey = null;
+    protected Database db = null;
+    protected Environment env = null;
+    protected File envHome = null;
+
+    public TreeTestBase() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException  {
+
+        TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX);
+    }
+
+    void initEnv(boolean duplicatesAllowed)
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setConfigParam(EnvironmentParams.ENV_RUN_EVICTOR.getName(),
+                                 "false");
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(),
+                                 Integer.toString(MAX_ENTRIES_PER_NODE));
+        envConfig.setAllowCreate(true);
+        envConfig.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC));
+        env = new Environment(envHome, envConfig);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(duplicatesAllowed);
+        db = env.openDatabase(null, "foo", dbConfig);
+
+        tree = DbInternal.dbGetDatabaseImpl(db).getTree();
+        minKey = null;
+        maxKey = null;
+    }
+
+    public void tearDown()
+	throws DatabaseException, IOException {
+
+        db.close();
+        if (env != null) {
+            env.close();
+        }
+        env = null;
+        db = null;
+        tree = null;
+        minKey = null;
+        maxKey = null;
+        TestUtils.removeFiles("TearDown", envHome, FileManager.JE_SUFFIX);
+    }
+
+    protected IN makeDupIN(IN old) {
+        IN ret = new IN(DbInternal.dbGetDatabaseImpl(db),
+			old.getIdentifierKey(),
+                        MAX_ENTRIES_PER_NODE, 2);
+        ret.setNodeId(old.getNodeId());
+        ret.setIsRoot(old.isRoot());
+        for (int i = 0; i < old.getNEntries(); i++) {
+            ret.setEntry(i, old.getTarget(i), old.getKey(i),
+			 old.getLsn(i), old.getState(i));
+        }
+
+        return ret;
+    }
+
+    /**
+     * Helper routine to insert a key and immediately read it back.
+     */
+    protected void insertAndRetrieve(NullCursor cursor, byte[] key, LN ln)
+        throws DatabaseException {
+
+        if (minKey == null) {
+            minKey = key;
+        } else if (Key.compareKeys(key, minKey, null) < 0) {
+            minKey = key;
+        }
+
+        if (maxKey == null) {
+            maxKey = key;
+        } else if (Key.compareKeys(maxKey, key, null) < 0) {
+            maxKey = key;
+        }
+
+        TestUtils.checkLatchCount();
+        assertTrue(tree.insert(ln, key, false, cursor,
+			       new LockResult(null, null),
+                               ReplicationContext.NO_REPLICATE));
+        TestUtils.checkLatchCount();
+        assertTrue(retrieveLN(key) == ln);
+    }
+
+    /**
+     * Helper routine to read the LN referred to by key.
+     */
+    protected LN retrieveLN(byte[] key)
+        throws DatabaseException {
+
+        TestUtils.checkLatchCount();
+        IN n = tree.search(key, Tree.SearchType.NORMAL, -1,
+                           null, CacheMode.DEFAULT);
+        if (!(n instanceof BIN)) {
+            fail("search didn't return a BIN for key: " + key);
+        }
+        BIN bin = (BIN) n;
+        try {
+            int index = bin.findEntry(key, false, true);
+            if (index == -1) {
+                fail("Didn't read back key: " + key);
+            } else {
+                Node node = bin.getTarget(index);
+                if (node instanceof LN) {
+                    return (LN) node;
+                } else {
+                    fail("Didn't read back LN for: " + key);
+                }
+            }
+            /* We never get here, but the compiler doesn't know that. */
+            return null;
+        } finally {
+            bin.releaseLatch();
+            TestUtils.checkLatchCount();
+        }
+    }
+
+    /**
+     * Using getNextBin, count all the keys in the database.  Ensure that
+     * they're returned in ascending order.
+     */
+    protected int countAndValidateKeys(Tree tree)
+        throws DatabaseException {
+
+        TestUtils.checkLatchCount();
+        BIN nextBin = (BIN) tree.getFirstNode(CacheMode.DEFAULT);
+        byte[] prevKey = { 0x00 };
+
+        int cnt = 0;
+
+        while (nextBin != null) {
+            for (int i = 0; i < nextBin.getNEntries(); i++) {
+                byte[] curKey = nextBin.getKey(i);
+                if (Key.compareKeys(curKey, prevKey, null) <= 0) {
+                    throw new InconsistentNodeException
+                        ("keys are out of order");
+                }
+                cnt++;
+                prevKey = curKey;
+            }
+            nextBin = tree.getNextBin(nextBin,
+                                      false /*traverseWithinDupTree*/,
+                                      CacheMode.DEFAULT);
+        }
+        TestUtils.checkLatchCount();
+        return cnt;
+    }
+
+    /**
+     * Using getPrevBin, count all the keys in the database.  Ensure that
+     * they're returned in descending order.
+     */
+    protected int countAndValidateKeysBackwards(Tree tree)
+        throws DatabaseException {
+
+        TestUtils.checkLatchCount();
+        BIN nextBin = (BIN) tree.getLastNode(CacheMode.DEFAULT);
+        byte[] prevKey = null;
+
+        int cnt = 0;
+
+        while (nextBin != null) {
+            for (int i = nextBin.getNEntries() - 1; i >= 0; i--) {
+                byte[] curKey = nextBin.getKey(i);
+                if (prevKey != null &&
+                    Key.compareKeys(prevKey, curKey, null) <= 0) {
+                    throw new InconsistentNodeException
+                        ("keys are out of order");
+                }
+                cnt++;
+                prevKey = curKey;
+            }
+            nextBin = tree.getPrevBin(nextBin,
+                                      false /*traverseWithinDupTree*/,
+                                      CacheMode.DEFAULT);
+        }
+        return cnt;
+    }
+}
diff --git a/test/com/sleepycat/je/tree/ValidateSubtreeDeleteTest.java b/test/com/sleepycat/je/tree/ValidateSubtreeDeleteTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..e5cc7322d8faba1c5595118309d10596c02f1bc5
--- /dev/null
+++ b/test/com/sleepycat/je/tree/ValidateSubtreeDeleteTest.java
@@ -0,0 +1,159 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ValidateSubtreeDeleteTest.java,v 1.34.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.tree;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.util.TestUtils;
+
+public class ValidateSubtreeDeleteTest extends TestCase {
+
+    private File envHome;
+    private Environment env;
+    private Database testDb;
+
+    public ValidateSubtreeDeleteTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+	throws IOException, DatabaseException {
+
+        TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX);
+
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setTransactional(true);
+        envConfig.setConfigParam(EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(),
+                                 "false");
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6");
+        envConfig.setAllowCreate(true);
+        env = new Environment(envHome, envConfig);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(true);
+        testDb = env.openDatabase(null, "Test", dbConfig);
+    }
+
+    public void tearDown() throws IOException, DatabaseException {
+        testDb.close();
+        if (env != null) {
+            try {
+                env.close();
+            } catch (DatabaseException E) {
+            }
+        }
+        TestUtils.removeFiles("TearDown", envHome, FileManager.JE_SUFFIX);
+    }
+
+    public void testBasic()
+        throws Exception  {
+        try {
+            /* Make a 3 level tree full of data */
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            byte[] testData = new byte[1];
+            testData[0] = 1;
+            data.setData(testData);
+
+            Transaction txn = env.beginTransaction(null, null);
+            for (int i = 0; i < 15; i ++) {
+                key.setData(TestUtils.getTestArray(i));
+                testDb.put(txn, key, data);
+            }
+
+            /* Should not be able to delete any of it */
+            assertFalse(DbInternal.dbGetDatabaseImpl(testDb).getTree().validateDelete(0));
+            assertFalse(DbInternal.dbGetDatabaseImpl(testDb).getTree().validateDelete(1));
+
+            /*
+             * Should be able to delete both, the txn is aborted and the data
+             * isn't there.
+             */
+            txn.abort();
+            assertTrue(DbInternal.dbGetDatabaseImpl(testDb).getTree().validateDelete(0));
+            assertTrue(DbInternal.dbGetDatabaseImpl(testDb).getTree().validateDelete(1));
+
+
+            /*
+             * Try explicit deletes.
+             */
+            txn = env.beginTransaction(null, null);
+            for (int i = 0; i < 15; i ++) {
+                key.setData(TestUtils.getTestArray(i));
+                testDb.put(txn, key, data);
+            }
+            for (int i = 0; i < 15; i ++) {
+                key.setData(TestUtils.getTestArray(i));
+                testDb.delete(txn, key);
+            }
+            assertFalse(DbInternal.dbGetDatabaseImpl(testDb).getTree().validateDelete(0));
+            assertFalse(DbInternal.dbGetDatabaseImpl(testDb).getTree().validateDelete(1));
+
+            // XXX, now commit the delete and compress and test that the
+            // subtree is deletable. Not finished yet! Also must test deletes.
+            txn.abort();
+        } catch (Exception e) {
+            e.printStackTrace();
+            throw e;
+        }
+    }
+
+    public void testDuplicates()
+        throws Exception  {
+        try {
+            /* Make a 3 level tree full of data */
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            byte[] testData = new byte[1];
+            testData[0] = 1;
+            key.setData(testData);
+
+            Transaction txn = env.beginTransaction(null, null);
+            for (int i = 0; i < 4; i ++) {
+                data.setData(TestUtils.getTestArray(i));
+                testDb.put(txn, key, data);
+            }
+
+            /* Should not be able to delete any of it */
+            Tree tree = DbInternal.dbGetDatabaseImpl(testDb).getTree();
+            assertFalse(tree.validateDelete(0));
+
+            /*
+             * Should be able to delete, the txn is aborted and the data
+             * isn't there.
+             */
+            txn.abort();
+            assertTrue(tree.validateDelete(0));
+
+            /*
+             * Try explicit deletes.
+             */
+        } catch (Exception e) {
+            e.printStackTrace();
+            throw e;
+        }
+    }
+
+}
diff --git a/test/com/sleepycat/je/txn/CursorTxnTest.java b/test/com/sleepycat/je/txn/CursorTxnTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..84066966f15f0a95ffa5a0bb1ef542a722885643
--- /dev/null
+++ b/test/com/sleepycat/je/txn/CursorTxnTest.java
@@ -0,0 +1,226 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: CursorTxnTest.java,v 1.43.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.DbTestProxy;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.LockStats;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.CursorImpl;
+import com.sleepycat.je.dbi.DbEnvPool;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.util.TestUtils;
+
+public class CursorTxnTest extends TestCase {
+    private File envHome;
+    private Environment env;
+    private Database myDb;
+    private int initialEnvReadLocks;
+    private int initialEnvWriteLocks;
+    private boolean noLocking;
+
+    public CursorTxnTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+        DbEnvPool.getInstance().clear();
+    }
+
+    public void setUp()
+	throws IOException, DatabaseException {
+
+        TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX);
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        DbInternal.setLoadPropertyFile(envConfig, false);
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+        envConfig.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+        envConfig.setAllowCreate(true);
+        env = new Environment(envHome, envConfig);
+
+	EnvironmentConfig envConfigAsSet = env.getConfig();
+	noLocking = !(envConfigAsSet.getLocking());
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setSortedDuplicates(true);
+        myDb = env.openDatabase(null, "test", dbConfig);
+    }
+
+    public void tearDown()
+	throws IOException, DatabaseException {
+
+        try {
+            myDb.close();
+        } catch (DatabaseException ignored) {}
+        try {
+            env.close();
+        } catch (DatabaseException ignored) {}
+        TestUtils.removeFiles("TearDown", envHome, FileManager.JE_SUFFIX);
+    }
+
+    /**
+     * Create a cursor with a null transaction.
+     */
+    public void testNullTxnLockRelease()
+        throws DatabaseException {
+
+        getInitialEnvStats();
+        Cursor cursor = myDb.openCursor(null, null);
+
+        /* First put() holds a write lock on the non-duplicate entry. */
+        insertData(cursor, 10, 1);
+        checkReadWriteLockCounts(cursor, 0, 1);
+
+        // Check that count does not add more locks
+        int count = cursor.count();
+        assertEquals(1, count);
+        checkReadWriteLockCounts(cursor, 0, 1);
+
+        /*
+	 * Second put() holds a write lock on first record (since it
+	 * was write locked to discover that the duplicate tree is not
+	 * present yet) and a write lock on the duplicate entry and a
+	 * write lock on the DupCountLN.
+	 */
+        insertData(cursor, 10, 2);
+        checkReadWriteLockCounts(cursor, 0, 3);
+
+        /* Check that count does not add more locks. */
+        count = cursor.count();
+        assertEquals(2, count);
+        checkReadWriteLockCounts(cursor, 0, 3);
+
+        /* Third put() holds a write lock on the duplicate entry and a write
+         * lock on the DupCountLN. */
+        insertData(cursor, 10, 3);
+        checkReadWriteLockCounts(cursor, 0, 2);
+
+        DatabaseEntry foundKey = new DatabaseEntry();
+        DatabaseEntry foundData = new DatabaseEntry();
+
+        /* Check that read locks are held on forward traversal. */
+        OperationStatus status =
+	    cursor.getFirst(foundKey, foundData, LockMode.DEFAULT);
+        checkReadWriteLockCounts(cursor, 1, 0);
+        int numSeen = 0;
+        while (status == OperationStatus.SUCCESS) {
+            numSeen++;
+            status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT);
+            checkReadWriteLockCounts(cursor, 1, 0);
+	    if (status != OperationStatus.SUCCESS) {
+		break;
+	    }
+
+            status = cursor.getCurrent(foundKey, foundData,
+                                       LockMode.DEFAULT);
+            checkReadWriteLockCounts(cursor, 1, 0);
+        }
+        assertEquals(30, numSeen);
+
+        /* Check that read locks are held on backwards traversal and count. */
+        status = cursor.getLast(foundKey, foundData, LockMode.DEFAULT);
+        checkReadWriteLockCounts(cursor, 1, 0);
+
+        while (status == OperationStatus.SUCCESS) {
+            count = cursor.count();
+            assertEquals("For key " +
+                         TestUtils.dumpByteArray(foundKey.getData()),
+                         3, count);
+            status = cursor.getPrev(foundKey, foundData, LockMode.DEFAULT);
+            checkReadWriteLockCounts(cursor, 1, 0);
+        }
+
+        /* Check that delete holds a write lock. */
+        status = cursor.getFirst(foundKey, foundData, LockMode.DEFAULT);
+        while (status == OperationStatus.SUCCESS) {
+            assertEquals("For key " +
+                         TestUtils.dumpByteArray(foundKey.getData()),
+                         OperationStatus.SUCCESS, cursor.delete());
+            checkReadWriteLockCounts(cursor, 0, 2);
+            status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT);
+            if (status == OperationStatus.SUCCESS) {
+                checkReadWriteLockCounts(cursor, 1, 0);
+            } else {
+                checkReadWriteLockCounts(cursor, 0, 2);
+            }
+        }
+
+        /* Check that count does not add more locks. */
+        count = cursor.count();
+        assertEquals(0, count);
+        checkReadWriteLockCounts(cursor, 0, 2);
+
+	cursor.close();
+    }
+
+    private void checkReadWriteLockCounts(Cursor cursor,
+                                          int expectReadLocks,
+                                          int expectWriteLocks)
+        throws DatabaseException {
+
+	if (noLocking) {
+	    expectReadLocks = expectWriteLocks = 0;
+	}
+
+        CursorImpl cursorImpl = DbTestProxy.dbcGetCursorImpl(cursor);
+        LockStats lockStats = cursorImpl.getLockStats();
+        assertEquals(expectReadLocks, lockStats.getNReadLocks());
+        assertEquals(expectWriteLocks, lockStats.getNWriteLocks());
+        lockStats = env.getLockStats(null);
+        assertEquals(initialEnvReadLocks + expectReadLocks,
+                     lockStats.getNReadLocks());
+        assertEquals(initialEnvWriteLocks + expectWriteLocks,
+                     lockStats.getNWriteLocks());
+    }
+
+    private void getInitialEnvStats()
+        throws DatabaseException {
+
+        LockStats lockStats = env.getLockStats(null);
+        initialEnvReadLocks = lockStats.getNReadLocks();
+        initialEnvWriteLocks = lockStats.getNWriteLocks();
+    }
+
+    private void insertData(Cursor cursor, int numRecords, int dataVal)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        for (int i = 0; i < numRecords; i++) {
+            byte[] keyData = TestUtils.getTestArray(i);
+            byte[] dataData = new byte[1];
+            dataData[0] = (byte) dataVal;
+            key.setData(keyData);
+            data.setData(dataData);
+            OperationStatus status = cursor.putNoDupData(key, data);
+            assertEquals(OperationStatus.SUCCESS, status);
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/txn/LockManagerTest.java b/test/com/sleepycat/je/txn/LockManagerTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..df85886e51e5046630bbc7dcf7257e14ad1f2e13
--- /dev/null
+++ b/test/com/sleepycat/je/txn/LockManagerTest.java
@@ -0,0 +1,879 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LockManagerTest.java,v 1.55.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import java.io.File;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DeadlockException;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.TransactionConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.junit.JUnitThread;
+import com.sleepycat.je.log.ReplicationContext;
+import com.sleepycat.je.util.TestUtils;
+
+public class LockManagerTest extends TestCase {
+
+    private LockManager lockManager = null;
+    private Locker txn1;
+    private Locker txn2;
+    private Locker txn3;
+    private Locker txn4;
+    private Long nid;
+    private volatile int sequence;
+
+    private EnvironmentImpl env;
+    private File envHome;
+
+    public LockManagerTest() {
+        envHome =  new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+	throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6");
+        envConfig.setConfigParam(EnvironmentParams.N_LOCK_TABLES.getName(),
+                                 "11");
+        envConfig.setAllowCreate(true);
+	envConfig.setTransactional(true);
+        env = new EnvironmentImpl(envHome,
+                                  envConfig,
+                                  null /*sharedCacheEnv*/,
+                                  false /*replicationIntended*/);
+
+        TxnManager txnManager = env.getTxnManager();
+	lockManager = txnManager.getLockManager();
+	txn1 = BasicLocker.createBasicLocker(env);
+	txn2 = BasicLocker.createBasicLocker(env);
+	txn3 = BasicLocker.createBasicLocker(env);
+	txn4 = BasicLocker.createBasicLocker(env);
+	nid = new Long(1);
+	sequence = 0;
+    }
+
+    public void tearDown()
+	throws DatabaseException {
+
+        txn1.operationEnd();
+        txn2.operationEnd();
+        txn3.operationEnd();
+        txn4.operationEnd();
+        env.close();
+    }
+
+    /*
+     * SR15926 showed a bug where nodeIds that are > 0x80000000 produce
+     * negative lock table indexes becuase of the modulo arithmetic in
+     * LockManager.getLockTableIndex().
+     */
+    public void testSR15926LargeNodeIds()
+        throws Exception {
+
+        try {
+            lockManager.lock(0x80000000L, txn1, LockType.WRITE,
+                             0, false, null);
+        } catch (Exception e) {
+            fail("shouldn't get exception " + e);
+        }
+    }
+
+    public void testNegatives()
+        throws Exception {
+
+	try {
+	    assertFalse(lockManager.isOwner(nid, txn1, LockType.READ));
+	    assertFalse(lockManager.isOwner(nid, txn1, LockType.WRITE));
+	    assertFalse(lockManager.isLocked(nid));
+	    assertFalse(lockManager.isWaiter(nid, txn1));
+	    lockManager.lock(1, txn1, LockType.READ, 0, false, null);
+
+	    /* already holds this lock */
+	    assertEquals(LockGrantType.EXISTING,
+                         lockManager.lock(1, txn1, LockType.READ, 0,
+					  false, null));
+	    assertFalse(lockManager.isOwner(nid, txn2, LockType.READ));
+	    assertFalse(lockManager.isOwner(nid, txn2, LockType.WRITE));
+	    assertTrue(lockManager.isLocked(nid));
+	    assertTrue(lockManager.nOwners(new Long(2)) == -1);
+	    assertTrue(lockManager.nWaiters(new Long(2)) == -1);
+
+            /* lock 2 doesn't exist, shouldn't affect any the existing lock */
+	    lockManager.release(2L, txn1);
+	    txn1.removeLock(2L);
+	    assertTrue(lockManager.isLocked(nid));
+
+            /* txn2 is not the owner, shouldn't release lock 1. */
+	    lockManager.release(1L, txn2);
+	    txn2.removeLock(1L);
+	    assertTrue(lockManager.isLocked(nid));
+            assertTrue(lockManager.isOwner(nid, txn1, LockType.READ));
+	    assertTrue(lockManager.nOwners(nid) == 1);
+
+            /* Now really release. */
+	    lockManager.release(1L, txn1);
+	    txn1.removeLock(1L);
+	    assertFalse(lockManager.isLocked(nid));
+            assertFalse(lockManager.isOwner(nid, txn1, LockType.READ));
+	    assertFalse(lockManager.nOwners(nid) == 1);
+
+	    lockManager.lock(1, txn1, LockType.WRITE, 0, false, null);
+	    /* holds write and subsequent request for READ is ok */
+	    lockManager.lock(1, txn1, LockType.READ, 0, false, null);
+	    /* already holds this lock */
+	    assertTrue(lockManager.lock(1, txn1, LockType.WRITE,
+					0, false, null) ==
+		       LockGrantType.EXISTING);
+	    assertFalse(lockManager.isWaiter(nid, txn1));
+	} catch (Exception e) {
+            e.printStackTrace();
+            throw e;
+	}
+    }
+
+    /**
+     * Acquire three read locks and make sure that they share nicely.
+     */
+    public void testMultipleReaders()
+	throws Throwable {
+
+	JUnitThread tester1 =
+	    new JUnitThread("testMultipleReaders1") {
+		public void testBody() {
+		    try {
+			lockManager.lock(1, txn1, LockType.READ, 0,
+					 false, null);
+			assertTrue
+			    (lockManager.isOwner(nid, txn1, LockType.READ));
+			sequence++;
+			while (sequence < 3) {
+			    Thread.yield();
+			}
+			lockManager.release(1L, txn1);
+			txn1.removeLock(1L);
+		    } catch (DatabaseException DBE) {
+                        DBE.printStackTrace();
+			fail("caught DatabaseException " + DBE);
+		    }
+		}
+	    };
+
+	JUnitThread tester2 =
+	    new JUnitThread("testMultipleReaders2") {
+		public void testBody() {
+		    try {
+			lockManager.lock(1, txn2, LockType.READ, 0,
+					 false, null);
+			assertTrue
+			    (lockManager.isOwner(nid, txn2, LockType.READ));
+			sequence++;
+			while (sequence < 3) {
+			    Thread.yield();
+			}
+			lockManager.release(1L, txn2);
+			txn2.removeLock(1L);
+		    } catch (DatabaseException DBE) {
+                        DBE.printStackTrace();
+			fail("caught DatabaseException " + DBE);
+		    }
+		}
+	    };
+
+	JUnitThread tester3 =
+	    new JUnitThread("testMultipleReaders3") {
+		public void testBody() {
+		    try {
+			lockManager.lock(1, txn3, LockType.READ, 0,
+					 false, null);
+			assertTrue
+			    (lockManager.isOwner(nid, txn3, LockType.READ));
+			sequence++;
+			while (sequence < 3) {
+			    Thread.yield();
+			}
+			lockManager.release(1L, txn3);
+			txn3.removeLock(1L);
+		    } catch (DatabaseException DBE) {
+                        DBE.printStackTrace();
+			fail("caught DatabaseException " + DBE);
+		    }
+		}
+	    };
+
+	tester1.start();
+	tester2.start();
+	tester3.start();
+	tester1.finishTest();
+	tester2.finishTest();
+	tester3.finishTest();
+    }
+
+    /**
+     * Grab two read locks, hold them, and make sure that a write lock
+     * waits for them to be released.
+     */
+    public void testMultipleReadersSingleWrite1()
+	throws Throwable {
+
+	JUnitThread tester1 =
+	    new JUnitThread("testMultipleReaders1") {
+		public void testBody() {
+		    try {
+			lockManager.lock(1, txn1, LockType.READ, 0,
+					 false, null);
+			assertTrue
+			    (lockManager.isOwner(nid, txn1, LockType.READ));
+			while (lockManager.nWaiters(nid) < 1) {
+			    Thread.yield();
+			}
+			assertTrue(lockManager.isWaiter(nid, txn3));
+			assertFalse(lockManager.isWaiter(nid, txn1));
+			lockManager.release(1L, txn1);
+			txn1.removeLock(1L);
+			assertFalse
+			    (lockManager.isOwner(nid, txn1, LockType.READ));
+		    } catch (DatabaseException DBE) {
+                        DBE.printStackTrace();
+			fail("caught DatabaseException " + DBE);
+		    }
+		}
+	    };
+
+	JUnitThread tester2 =
+	    new JUnitThread("testMultipleReaders2") {
+		public void testBody() {
+		    try {
+			lockManager.lock(1, txn2, LockType.READ, 0,
+					 false, null);
+			assertTrue
+			    (lockManager.isOwner(nid, txn2, LockType.READ));
+			while (lockManager.nWaiters(nid) < 1) {
+			    Thread.yield();
+			}
+			assertTrue(lockManager.isWaiter(nid, txn3));
+			lockManager.release(1L, txn2);
+			txn2.removeLock(1L);
+			assertFalse
+			    (lockManager.isOwner(nid, txn2, LockType.READ));
+		    } catch (DatabaseException DBE) {
+                        DBE.printStackTrace();
+			fail("caught DatabaseException " + DBE);
+		    }
+		}
+	    };
+
+	JUnitThread tester3 =
+	    new JUnitThread("testMultipleReaders3") {
+		public void testBody() {
+		    try {
+			while (lockManager.nOwners(nid) < 2) {
+			    Thread.yield();
+			}
+			lockManager.lock(1, txn3, LockType.WRITE, 0,
+					 false, null);
+			assertTrue
+			    (lockManager.isOwner(nid, txn3, LockType.WRITE));
+		    } catch (DatabaseException DBE) {
+                        DBE.printStackTrace();
+			fail("caught DatabaseException " + DBE);
+		    }
+		}
+	    };
+
+	tester1.start();
+	tester2.start();
+	tester3.start();
+	tester1.finishTest();
+	tester2.finishTest();
+	tester3.finishTest();
+    }
+
+    /**
+     * Acquire two read locks, put a write locker behind the two
+     * read lockers, and then queue a read locker behind the writer.
+     * Ensure that the third reader is not granted until the writer
+     * releases the lock.
+     */
+    public void testMultipleReadersSingleWrite2()
+	throws Throwable {
+
+	JUnitThread tester1 =
+	    new JUnitThread("testMultipleReaders1") {
+		public void testBody() {
+		    try {
+			lockManager.lock(1, txn1, LockType.READ, 0,
+					 false, null);
+			assertTrue
+			    (lockManager.isOwner(nid, txn1, LockType.READ));
+			while (lockManager.nWaiters(nid) < 2) {
+			    Thread.yield();
+			}
+			lockManager.release(1L, txn1);
+			txn1.removeLock(1L);
+		    } catch (DatabaseException DBE) {
+                        DBE.printStackTrace();
+			fail("caught DatabaseException " + DBE);
+		    }
+		}
+	    };
+
+	JUnitThread tester2 =
+	    new JUnitThread("testMultipleReaders2") {
+		public void testBody() {
+		    try {
+			lockManager.lock(1, txn2, LockType.READ, 0,
+					 false, null);
+			assertTrue
+			    (lockManager.isOwner(nid, txn2, LockType.READ));
+			while (lockManager.nWaiters(nid) < 2) {
+			    Thread.yield();
+			}
+			lockManager.release(1L, txn2);
+			txn2.removeLock(1L);
+		    } catch (DatabaseException DBE) {
+                        DBE.printStackTrace();
+			fail("caught DatabaseException " + DBE);
+		    }
+		}
+	    };
+
+	JUnitThread tester3 =
+	    new JUnitThread("testMultipleReaders3") {
+		public void testBody() {
+		    try {
+			while (lockManager.nOwners(nid) < 2) {
+			    Thread.yield();
+			}
+			lockManager.lock(1, txn3, LockType.WRITE, 0,
+					 false, null);
+			while (lockManager.nWaiters(nid) < 1) {
+			    Thread.yield();
+			}
+			assertTrue
+			    (lockManager.isOwner(nid, txn3, LockType.WRITE));
+			lockManager.release(1L, txn3);
+			txn3.removeLock(1L);
+		    } catch (DatabaseException DBE) {
+                        DBE.printStackTrace();
+			fail("caught DatabaseException " + DBE);
+		    }
+		}
+	    };
+
+	JUnitThread tester4 =
+	    new JUnitThread("testMultipleReaders4") {
+		public void testBody() {
+		    try {
+			while (lockManager.nWaiters(nid) < 1) {
+			    Thread.yield();
+			}
+			lockManager.lock(1, txn4, LockType.READ, 0,
+					 false, null);
+			assertTrue
+			    (lockManager.isOwner(nid, txn4, LockType.READ));
+			lockManager.release(1L, txn4);
+			txn4.removeLock(1L);
+		    } catch (DatabaseException DBE) {
+                        DBE.printStackTrace();
+			fail("caught DatabaseException " + DBE);
+		    }
+		}
+	    };
+
+	tester1.start();
+	tester2.start();
+	tester3.start();
+	tester4.start();
+	tester1.finishTest();
+	tester2.finishTest();
+	tester3.finishTest();
+	tester4.finishTest();
+    }
+
+    /**
+     * Acquire two read locks for two transactions, then request a write
+     * lock for a third transaction.  Then request a write lock for one
+     * of the first transactions that already has a read lock (i.e.
+     * request an upgrade lock).  Make sure it butts in front of the
+     * existing wait lock.
+     */
+    public void testUpgradeLock()
+	throws Throwable {
+
+	JUnitThread tester1 =
+	    new JUnitThread("testUpgradeLock1") {
+		public void testBody() {
+		    try {
+			lockManager.lock(1, txn1, LockType.READ, 0,
+					 false, null);
+			assertTrue
+			    (lockManager.isOwner(nid, txn1, LockType.READ));
+			while (lockManager.nWaiters(nid) < 2) {
+			    Thread.yield();
+			}
+			lockManager.release(1L, txn1);
+			txn1.removeLock(1L);
+		    } catch (DatabaseException DBE) {
+                        DBE.printStackTrace();
+			fail("caught DatabaseException " + DBE);
+		    }
+		}
+	    };
+
+	JUnitThread tester2 =
+	    new JUnitThread("testUpgradeLock2") {
+		public void testBody() {
+		    try {
+			lockManager.lock(1, txn2, LockType.READ, 0,
+					 false, null);
+			assertTrue
+			    (lockManager.isOwner(nid, txn2, LockType.READ));
+			while (lockManager.nWaiters(nid) < 1) {
+			    Thread.yield();
+			}
+			lockManager.lock(1, txn2, LockType.WRITE, 0,
+					 false, null);
+			assertTrue(lockManager.nWaiters(nid) == 1);
+			lockManager.release(1L, txn2);
+			txn2.removeLock(1L);
+		    } catch (DatabaseException DBE) {
+                        DBE.printStackTrace();
+			fail("caught DatabaseException " + DBE);
+		    }
+		}
+	    };
+
+	JUnitThread tester3 =
+	    new JUnitThread("testUpgradeLock3") {
+		public void testBody() {
+		    try {
+			while (lockManager.nOwners(nid) < 2) {
+			    Thread.yield();
+			}
+			lockManager.lock(1, txn3, LockType.WRITE, 0,
+					 false, null);
+			assertTrue
+			    (lockManager.isOwner(nid, txn3, LockType.WRITE));
+			lockManager.release(1L, txn3);
+			txn3.removeLock(1L);
+		    } catch (DatabaseException DBE) {
+                        DBE.printStackTrace();
+			fail("caught DatabaseException " + DBE);
+		    }
+		}
+	    };
+
+	tester1.start();
+	tester2.start();
+	tester3.start();
+	tester1.finishTest();
+	tester2.finishTest();
+	tester3.finishTest();
+    }
+
+    /**
+     * Acquire a read lock, then request a write lock for a second
+     * transaction in non-blocking mode.  Make sure it fails.
+     */
+    public void testNonBlockingLock1()
+	throws Throwable {
+
+	JUnitThread tester1 =
+	    new JUnitThread("testNonBlocking1") {
+		public void testBody() {
+		    try {
+			lockManager.lock(1, txn1, LockType.READ, 0,
+					 false, null);
+			assertTrue
+			    (lockManager.isOwner(nid, txn1, LockType.READ));
+			while (sequence < 1) {
+			    Thread.yield();
+			}
+			lockManager.release(1L, txn1);
+			txn1.removeLock(1L);
+		    } catch (DatabaseException DBE) {
+                        DBE.printStackTrace();
+			fail("caught DatabaseException " + DBE);
+		    }
+		}
+	    };
+
+	JUnitThread tester2 =
+	    new JUnitThread("testNonBlocking2") {
+		public void testBody() {
+		    try {
+			/* wait for tester1 */
+			while (lockManager.nOwners(nid) < 1) {
+			    Thread.yield();
+			}
+                        LockGrantType grant = lockManager.lock
+                            (1, txn2, LockType.WRITE, 0, true, null);
+                        assertSame(LockGrantType.DENIED, grant);
+			assertFalse
+			    (lockManager.isOwner(nid, txn2, LockType.WRITE));
+			assertFalse
+			    (lockManager.isOwner(nid, txn2, LockType.READ));
+			assertTrue(lockManager.nWaiters(nid) == 0);
+			assertTrue(lockManager.nOwners(nid) == 1);
+			sequence++;
+			/* wait for tester1 to release the lock */
+			while (lockManager.nOwners(nid) > 0) {
+			    Thread.yield();
+			}
+			assertTrue
+			    (lockManager.lock(1, txn2, LockType.WRITE, 0,
+                                              false, null) ==
+			     LockGrantType.NEW);
+			assertTrue
+			    (lockManager.isOwner(nid, txn2, LockType.WRITE));
+			assertTrue
+			    (lockManager.isOwner(nid, txn2, LockType.READ));
+			assertTrue(lockManager.nWaiters(nid) == 0);
+			assertTrue(lockManager.nOwners(nid) == 1);
+			lockManager.release(1L, txn2);
+			txn2.removeLock(1L);
+		    } catch (DatabaseException DBE) {
+                        DBE.printStackTrace();
+			fail("caught DatabaseException " + DBE);
+		    }
+		}
+	    };
+
+	tester1.start();
+	tester2.start();
+	tester1.finishTest();
+	tester2.finishTest();
+    }
+
+    /**
+     * Acquire a write lock, then request a read lock for a second
+     * transaction in non-blocking mode.  Make sure it fails.
+     */
+    public void testNonBlockingLock2()
+	throws Throwable {
+
+	JUnitThread tester1 =
+	    new JUnitThread("testNonBlocking1") {
+		public void testBody() {
+		    try {
+			lockManager.lock(1, txn1, LockType.WRITE, 0,
+					 false, null);
+			assertTrue
+			    (lockManager.isOwner(nid, txn1, LockType.WRITE));
+			sequence++;
+			while (sequence < 2) {
+			    Thread.yield();
+			}
+			lockManager.release(1L, txn1);
+			txn1.removeLock(1L);
+		    } catch (DatabaseException DBE) {
+                        DBE.printStackTrace();
+			fail("caught DatabaseException " + DBE);
+		    }
+		}
+	    };
+
+	JUnitThread tester2 =
+	    new JUnitThread("testNonBlocking2") {
+		public void testBody() {
+		    try {
+			/* wait for tester1 */
+			while (sequence < 1) {
+			    Thread.yield();
+			}
+                        LockGrantType grant = lockManager.lock
+                            (1, txn2, LockType.READ, 0, true, null);
+                        assertSame(LockGrantType.DENIED, grant);
+			assertFalse
+			    (lockManager.isOwner(nid, txn2, LockType.READ));
+			assertFalse
+			    (lockManager.isOwner(nid, txn2, LockType.WRITE));
+			assertTrue(lockManager.nWaiters(nid) == 0);
+			assertTrue(lockManager.nOwners(nid) == 1);
+			sequence++;
+			/* wait for tester1 to release the lock */
+			while (lockManager.nOwners(nid) > 0) {
+			    Thread.yield();
+			}
+			assertTrue
+			    (lockManager.lock(1, txn2, LockType.READ, 0,
+                                              false, null) ==
+			     LockGrantType.NEW);
+			assertTrue
+			    (lockManager.isOwner(nid, txn2, LockType.READ));
+			assertFalse
+			    (lockManager.isOwner(nid, txn2, LockType.WRITE));
+			assertTrue(lockManager.nWaiters(nid) == 0);
+			assertTrue(lockManager.nOwners(nid) == 1);
+			lockManager.release(1L, txn2);
+			txn2.removeLock(1L);
+		    } catch (DatabaseException DBE) {
+                        DBE.printStackTrace();
+			fail("caught DatabaseException " + DBE);
+		    }
+		}
+	    };
+
+	tester1.start();
+	tester2.start();
+	tester1.finishTest();
+	tester2.finishTest();
+    }
+
+    /**
+     * Acquire a write lock, then request a read lock for a second
+     * transaction in blocking mode.  Make sure it waits.
+     */
+    public void testWaitingLock()
+	throws Throwable {
+
+	JUnitThread tester1 =
+	    new JUnitThread("testBlocking1") {
+		public void testBody() {
+		    try {
+			lockManager.lock(1, txn1, LockType.WRITE, 0,
+					 false, null);
+			assertTrue
+			    (lockManager.isOwner(nid, txn1, LockType.WRITE));
+			sequence++;
+			while (sequence < 2) {
+			    Thread.yield();
+			}
+			lockManager.release(1L, txn1);
+			txn1.removeLock(1L);
+		    } catch (DatabaseException DBE) {
+                        DBE.printStackTrace();
+			fail("caught DatabaseException " + DBE);
+		    }
+		}
+	    };
+
+	JUnitThread tester2 =
+	    new JUnitThread("testBlocking2") {
+		public void testBody() {
+		    try {
+			/* wait for tester1 */
+			while (sequence < 1) {
+			    Thread.yield();
+			}
+			try {
+			    lockManager.lock(1, txn2, LockType.READ, 500,
+                                             false, null);
+			    fail("didn't time out");
+			} catch (DeadlockException e) {
+                            assertTrue(TestUtils.skipVersion(e).startsWith("Lock "));
+			}
+			assertFalse
+			    (lockManager.isOwner(nid, txn2, LockType.READ));
+			assertFalse
+			    (lockManager.isOwner(nid, txn2, LockType.WRITE));
+			assertTrue(lockManager.nWaiters(nid) == 0);
+			assertTrue(lockManager.nOwners(nid) == 1);
+			sequence++;
+			/* wait for tester1 to release the lock */
+			while (lockManager.nOwners(nid) > 0) {
+			    Thread.yield();
+			}
+			assertTrue
+			    (lockManager.lock(1, txn2, LockType.READ, 0,
+                                              false, null) ==
+			     LockGrantType.NEW);
+			assertTrue
+			    (lockManager.isOwner(nid, txn2, LockType.READ));
+			assertFalse
+			    (lockManager.isOwner(nid, txn2, LockType.WRITE));
+			assertTrue(lockManager.nWaiters(nid) == 0);
+			assertTrue(lockManager.nOwners(nid) == 1);
+			lockManager.release(1L, txn2);
+			txn2.removeLock(1L);
+		    } catch (DatabaseException DBE) {
+                        DBE.printStackTrace();
+			fail("caught DatabaseException " + DBE);
+		    }
+		}
+	    };
+
+	tester1.start();
+	tester2.start();
+	tester1.finishTest();
+	tester2.finishTest();
+    }
+
+    /**
+     * Test that DeadlockException has the correct owners and waiters when
+     * it is thrown due to a timeout.
+     *
+     * Create five threads, the first two of which take a readlock and the
+     * second two of which try for a write lock backed up behind the two
+     * read locks.  Then have a fifth thread try for a read lock which backs
+     * up behind all of them.  The first two threads (read lockers) are owners
+     * and the second two threads are waiters.  When the fifth thread catches
+     * the DeadlockException make sure that it contains the txn ids for the
+     * two readers in the owners array and the txn ids for the two writers
+     * in the waiters array.
+     */
+    public void testDeadlock()
+	throws Throwable {
+
+	/* Get rid of these inferior BasicLockers -- we want real Txns. */
+        txn1.operationEnd();
+        txn2.operationEnd();
+        txn3.operationEnd();
+        txn4.operationEnd();
+
+	TransactionConfig config = new TransactionConfig();
+	txn1 = Txn.createTxn(env, config, ReplicationContext.NO_REPLICATE);
+	txn2 = Txn.createTxn(env, config, ReplicationContext.NO_REPLICATE);
+	txn3 = Txn.createTxn(env, config, ReplicationContext.NO_REPLICATE);
+	txn4 = Txn.createTxn(env, config, ReplicationContext.NO_REPLICATE);
+	final Txn txn5 =
+	    Txn.createTxn(env, config, ReplicationContext.NO_REPLICATE);
+
+	sequence = 0;
+	JUnitThread tester1 =
+	    new JUnitThread("testMultipleReaders1") {
+		public void testBody() {
+		    try {
+			lockManager.lock(1, txn1, LockType.READ, 0,
+					 false, null);
+			assertTrue
+			    (lockManager.isOwner(nid, txn1, LockType.READ));
+			while (sequence < 1) {
+			    Thread.yield();
+			}
+			lockManager.release(1L, txn1);
+			txn1.removeLock(1L);
+		    } catch (DatabaseException DBE) {
+                        DBE.printStackTrace();
+			fail("caught DatabaseException " + DBE);
+		    }
+		}
+	    };
+
+	JUnitThread tester2 =
+	    new JUnitThread("testMultipleReaders2") {
+		public void testBody() {
+		    try {
+			lockManager.lock(1, txn2, LockType.READ, 0,
+					 false, null);
+			assertTrue
+			    (lockManager.isOwner(nid, txn2, LockType.READ));
+			while (sequence < 1) {
+			    Thread.yield();
+			}
+			lockManager.release(1L, txn2);
+			txn2.removeLock(1L);
+		    } catch (DatabaseException DBE) {
+                        DBE.printStackTrace();
+			fail("caught DatabaseException " + DBE);
+		    }
+		}
+	    };
+
+	JUnitThread tester3 =
+	    new JUnitThread("testMultipleReaders3") {
+		public void testBody() {
+		    try {
+			while (lockManager.nOwners(nid) < 2) {
+			    Thread.yield();
+			}
+			lockManager.lock(1, txn3, LockType.WRITE, 0,
+					 false, null);
+			while (sequence < 1) {
+			    Thread.yield();
+			}
+			assertTrue
+			    (lockManager.isOwner(nid, txn3, LockType.WRITE));
+			lockManager.release(1L, txn3);
+			txn3.removeLock(1L);
+		    } catch (DatabaseException DBE) {
+                        DBE.printStackTrace();
+			fail("caught DatabaseException " + DBE);
+		    }
+		}
+	    };
+
+	JUnitThread tester4 =
+	    new JUnitThread("testMultipleReaders4") {
+		public void testBody() {
+		    try {
+			while (lockManager.nOwners(nid) < 2) {
+			    Thread.yield();
+			}
+			lockManager.lock(1, txn4, LockType.WRITE, 0,
+					 false, null);
+			while (sequence < 1) {
+			    Thread.yield();
+			}
+			assertTrue
+			    (lockManager.isOwner(nid, txn4, LockType.WRITE));
+			lockManager.release(1L, txn4);
+			txn4.removeLock(1L);
+		    } catch (DatabaseException DBE) {
+                        DBE.printStackTrace();
+			fail("caught DatabaseException " + DBE);
+		    }
+		}
+	    };
+
+	JUnitThread tester5 =
+	    new JUnitThread("testMultipleReaders5") {
+		public void testBody() {
+		    try {
+			while (lockManager.nWaiters(nid) < 1) {
+			    Thread.yield();
+			}
+			lockManager.lock(1, txn5, LockType.READ, 900,
+					 false, null);
+			fail("expected DeadlockException");
+		    } catch (DeadlockException DLE) {
+
+			long[] owners = DLE.getOwnerTxnIds();
+			long[] waiters = DLE.getWaiterTxnIds();
+
+			assertTrue((owners[0] == txn1.getId() &&
+				    owners[1] == txn2.getId()) ||
+				   (owners[1] == txn1.getId() &&
+				    owners[0] == txn2.getId()));
+
+			assertTrue((waiters[0] == txn3.getId() &&
+				    waiters[1] == txn4.getId()) ||
+				   (waiters[1] == txn3.getId() &&
+				    waiters[0] == txn4.getId()));
+
+		    } catch (DatabaseException DBE) {
+			fail("expected DeadlockException");
+			DBE.printStackTrace(System.out);
+		    }
+		    sequence = 1;
+		}
+	    };
+
+	tester1.start();
+	tester2.start();
+	tester3.start();
+	tester4.start();
+	tester5.start();
+	tester1.finishTest();
+	tester2.finishTest();
+	tester3.finishTest();
+	tester4.finishTest();
+	tester5.finishTest();
+	((Txn) txn1).abort(false);
+	((Txn) txn2).abort(false);
+	((Txn) txn3).abort(false);
+	((Txn) txn4).abort(false);
+	txn5.abort(false);
+    }
+}
diff --git a/test/com/sleepycat/je/txn/LockTest.java b/test/com/sleepycat/je/txn/LockTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..59f750bdb2a57c86b7cd444e392245c47a4bc0c8
--- /dev/null
+++ b/test/com/sleepycat/je/txn/LockTest.java
@@ -0,0 +1,1035 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: LockTest.java,v 1.61.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.TransactionConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.log.ReplicationContext;
+import com.sleepycat.je.util.TestUtils;
+
+public class LockTest extends TestCase {
+    private EnvironmentImpl envImpl;
+    private File envHome;
+
+    public LockTest() {
+        envHome =  new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6");
+        envConfig.setAllowCreate(true);
+        envImpl = new EnvironmentImpl(envHome,
+                                      envConfig,
+                                      null /*sharedCacheEnv*/,
+                                      false /*replicationIntended*/);
+    }
+
+    public void tearDown()
+        throws DatabaseException {
+
+        envImpl.close();
+    }
+
+    public void testLockConflicts()
+        throws Exception {
+
+        Locker txn1 = BasicLocker.createBasicLocker(envImpl);
+        Locker txn2 = BasicLocker.createBasicLocker(envImpl);
+        Locker txn3 = BasicLocker.createBasicLocker(envImpl);
+
+        MemoryBudget mb = envImpl.getMemoryBudget();
+        try {
+
+            /*
+             * Start fresh. Ask for a read lock from txn1 twice,
+             * should only be one owner. Then add multiple
+             * would-be-writers as waiters.
+             */
+            Lock lock = new LockImpl();
+            assertEquals(LockGrantType.NEW,
+                         lock.lock(LockType.READ, txn1, false, mb, 0).
+                         lockGrant);
+            assertEquals(LockGrantType.EXISTING,
+                         lock.lock(LockType.READ, txn1, false, mb, 0).
+                         lockGrant);
+
+            /* txn1 has a READ lock. */
+            assertEquals(1, lock.nOwners());
+            assertEquals(0, lock.nWaiters());
+
+            /* txn2 asks for a read lock, gets it. */
+            assertEquals(LockGrantType.NEW,
+                         lock.lock(LockType.READ, txn2, false, mb, 0).
+                         lockGrant);
+
+            /* txn1 asks for WRITE, must wait */
+            assertEquals(LockGrantType.WAIT_PROMOTION,
+                         lock.lock(LockType.WRITE, txn1, false, mb, 0).
+                         lockGrant);
+
+            /* txn2 write request must wait */
+            assertEquals(LockGrantType.WAIT_PROMOTION,
+                         lock.lock(LockType.WRITE, txn2, false, mb, 0).
+                         lockGrant);
+
+            /* Two read locks, two write waiters */
+            assertEquals(2, lock.nOwners());
+            assertEquals(2, lock.nWaiters());
+
+            /*
+             * Release txn1 read lock, which causes txn2's read lock to be
+             * promoted to a write lock.
+             */
+            lock.release(txn1, mb, 0 /* lockTableIndex */);
+            assertEquals(1, lock.nOwners());
+            assertEquals(1, lock.nWaiters());
+
+            /* Release txn2 write lock, now txn1 will get its write lock. */
+            lock.release(txn2, mb, 0 /* lockTableIndex */);
+            assertEquals(1, lock.nOwners());
+            assertEquals(0, lock.nWaiters());
+
+            /* Release txn1's write lock. */
+            lock.release(txn1, mb, 0 /* lockTableIndex */);
+            assertEquals(0, lock.nOwners());
+            assertEquals(0, lock.nWaiters());
+
+            /* Start fresh. Get a write lock, then get a read lock. */
+            lock = new LockImpl();
+
+            assertEquals(LockGrantType.NEW,
+                         lock.lock(LockType.WRITE, txn1, false, mb, 0).
+                         lockGrant);
+            assertEquals(LockGrantType.EXISTING,
+                         lock.lock(LockType.READ, txn1, false, mb, 0).
+                         lockGrant);
+            assertEquals(1, lock.nOwners());
+            assertEquals(0, lock.nWaiters());
+            lock.release(txn1, mb, 0 /* lockTableIndex */);
+
+            /* Start fresh. Get a read lock, upgrade to a write lock. */
+            lock = new LockImpl();
+            assertEquals(LockGrantType.NEW,
+                         lock.lock(LockType.READ, txn1, false, mb, 0).
+                         lockGrant);
+            assertEquals(LockGrantType.PROMOTION,
+                         lock.lock(LockType.WRITE, txn1, false, mb, 0).
+                         lockGrant);
+            assertEquals(1, lock.nOwners());
+            assertEquals(0, lock.nWaiters());
+            lock.release(txn1, mb, 0 /* lockTableIndex */);
+
+            /*
+             * Start fresh. Get a read lock, then ask for a non-blocking
+             * write lock. The latter should be denied.
+             */
+            lock = new LockImpl();
+
+            assertEquals(LockGrantType.NEW,
+                         lock.lock(LockType.READ, txn1, false, mb, 0).
+                         lockGrant);
+            assertEquals(LockGrantType.DENIED,
+                         lock.lock(LockType.WRITE, txn2, true, mb, 0).
+                         lockGrant);
+            assertEquals(1, lock.nOwners());
+            assertEquals(0, lock.nWaiters());
+            lock.release(txn1, mb, 0 /* lockTableIndex */);
+
+            /* Two write requests, should be one owner. */
+            lock = new LockImpl();
+            assertEquals(LockGrantType.NEW,
+                         lock.lock(LockType.WRITE, txn1, false, mb, 0).
+                         lockGrant);
+            assertEquals(LockGrantType.EXISTING,
+                         lock.lock(LockType.WRITE, txn1, false, mb, 0).
+                         lockGrant);
+            assertEquals(1, lock.nOwners());
+            assertEquals(0, lock.nWaiters());
+            lock.release(txn1, mb, 0 /* lockTableIndex */);
+
+            /*
+             * Ensure that a read request behind a write request that waits
+             * also waits.  blocking requests.
+             */
+            lock = new LockImpl();
+
+            assertEquals(LockGrantType.NEW,
+                         lock.lock(LockType.READ, txn1, false, mb, 0).
+                         lockGrant);
+
+            assertEquals(LockGrantType.WAIT_NEW,
+                         lock.lock(LockType.WRITE, txn2, false, mb, 0).
+                         lockGrant);
+
+            assertEquals(LockGrantType.WAIT_NEW,
+                         lock.lock(LockType.READ, txn3, false, mb, 0).
+                         lockGrant);
+
+            assertEquals(1, lock.nOwners());
+            assertEquals(2, lock.nWaiters());
+            lock.release(txn1, mb, 0 /* lockTableIndex */);
+            lock.release(txn2, mb, 0 /* lockTableIndex */);
+            lock.release(txn3, mb, 0 /* lockTableIndex */);
+
+            /* Check non blocking requests */
+            lock = new LockImpl();
+
+            assertEquals(LockGrantType.NEW,
+                         lock.lock(LockType.READ, txn1, false, mb, 0).
+                         lockGrant);
+
+            /* Since non-blocking request, this fails and doesn't go
+               on the wait queue. */
+            assertEquals(LockGrantType.DENIED,
+                         lock.lock(LockType.WRITE, txn2, true, mb, 0).
+                         lockGrant);
+            assertEquals(LockGrantType.NEW,
+                         lock.lock(LockType.READ, txn3, true, mb, 0).
+                         lockGrant);
+            assertEquals(2, lock.nOwners());
+            assertEquals(0, lock.nWaiters());
+            lock.release(txn1, mb, 0 /* lockTableIndex */);
+            lock.release(txn3, mb, 0 /* lockTableIndex */);
+
+            lock = new LockImpl();
+
+            assertEquals(LockGrantType.NEW,
+                         lock.lock(LockType.READ, txn1, false, mb, 0).
+                         lockGrant);
+            assertEquals(LockGrantType.NEW,
+                         lock.lock(LockType.READ, txn2, false, mb, 0).
+                         lockGrant);
+            assertEquals(LockGrantType.NEW,
+                         lock.lock(LockType.READ, txn3, false, mb, 0).
+                         lockGrant);
+            assertEquals(3, lock.nOwners());
+            assertEquals(0, lock.nWaiters());
+            lock.release(txn1, mb, 0 /* lockTableIndex */);
+            lock.release(txn2, mb, 0 /* lockTableIndex */);
+            lock.release(txn3, mb, 0 /* lockTableIndex */);
+        } finally {
+            txn1.operationEnd();
+            txn2.operationEnd();
+            txn3.operationEnd();
+        }
+    }
+
+    public void testOwners()
+        throws Exception {
+
+        Locker txn1 = BasicLocker.createBasicLocker(envImpl);
+        Locker txn2 = BasicLocker.createBasicLocker(envImpl);
+        Locker txn3 = BasicLocker.createBasicLocker(envImpl);
+        Locker txn4 = BasicLocker.createBasicLocker(envImpl);
+        MemoryBudget mb = envImpl.getMemoryBudget();
+
+        try {
+            /*
+             * Build up 3 owners and waiters for a lock, to test the
+             * lazy initialization and optimization for single owner/waiter.
+             */
+            Lock lock = new LockImpl();
+            /* should be no writer. */
+            assertTrue(lock.getWriteOwnerLocker() == null);
+
+            assertEquals(LockGrantType.NEW,
+                         lock.lock(LockType.READ, txn1, false, mb, 0).
+                         lockGrant);
+            assertEquals(LockGrantType.NEW,
+                         lock.lock(LockType.READ, txn2, false, mb, 0).
+                         lockGrant);
+            assertEquals(LockGrantType.NEW,
+                         lock.lock(LockType.READ, txn3, false, mb, 0).
+                         lockGrant);
+
+            /* should be no writer. */
+            assertTrue(lock.getWriteOwnerLocker() == null);
+
+            /* expect 3 owners, 0 waiters. */
+            Set<LockInfo> expectedOwners = new HashSet<LockInfo>();
+            expectedOwners.add(new LockInfo(txn1, LockType.READ));
+            expectedOwners.add(new LockInfo(txn2, LockType.READ));
+            expectedOwners.add(new LockInfo(txn3, LockType.READ));
+            checkOwners(expectedOwners, lock, 0);
+
+            /* release the first locker. */
+            lock.release(txn1, mb, 0);
+            expectedOwners = new HashSet<LockInfo>();
+            expectedOwners.add(new LockInfo(txn2, LockType.READ));
+            expectedOwners.add(new LockInfo(txn3, LockType.READ));
+            checkOwners(expectedOwners, lock, 0);
+
+            /* Add more. */
+            assertEquals(LockGrantType.NEW,
+                         lock.lock(LockType.READ, txn4, false, mb, 0).
+                         lockGrant);
+            expectedOwners = new HashSet<LockInfo>();
+            expectedOwners.add(new LockInfo(txn2, LockType.READ));
+            expectedOwners.add(new LockInfo(txn3, LockType.READ));
+            expectedOwners.add(new LockInfo(txn4, LockType.READ));
+            checkOwners(expectedOwners, lock, 0);
+
+            /* release */
+            lock.release(txn2, mb, 0);
+            expectedOwners = new HashSet<LockInfo>();
+            expectedOwners.add(new LockInfo(txn3, LockType.READ));
+            expectedOwners.add(new LockInfo(txn4, LockType.READ));
+            checkOwners(expectedOwners, lock, 0);
+
+            /* release */
+            lock.release(txn3, mb, 0);
+            expectedOwners = new HashSet<LockInfo>();
+            expectedOwners.add(new LockInfo(txn4, LockType.READ));
+            /* only 1 lock, in the owner set, but not a write owner. */
+            assertTrue(lock.getWriteOwnerLocker() == null);
+
+            /* release */
+            lock.release(txn4, mb, 0);
+            expectedOwners = new HashSet<LockInfo>();
+            checkOwners(expectedOwners, lock, 0);
+
+            /* Add owners again. */
+            assertEquals(LockGrantType.NEW,
+                         lock.lock(LockType.READ, txn1, false, mb, 0).
+                         lockGrant);
+            assertEquals(LockGrantType.NEW,
+                         lock.lock(LockType.READ, txn2, false, mb, 0).
+                         lockGrant);
+            expectedOwners = new HashSet<LockInfo>();
+            expectedOwners.add(new LockInfo(txn1, LockType.READ));
+            expectedOwners.add(new LockInfo(txn2, LockType.READ));
+            checkOwners(expectedOwners, lock, 0);
+
+            /* Release for the sake of the memory leak checking */
+            lock.release(txn1, mb, 0);
+            lock.release(txn2, mb, 0);
+
+        } catch (Exception e) {
+            e.printStackTrace();
+            throw e;
+        } finally {
+            txn1.operationEnd();
+            txn2.operationEnd();
+            txn3.operationEnd();
+            txn4.operationEnd();
+        }
+    }
+
+    public void testWaiters()
+        throws Exception {
+
+        Locker txn1 = Txn.createAutoTxn(envImpl, new TransactionConfig(),
+                                        false, /*noAPIReadLock*/
+                                        ReplicationContext.NO_REPLICATE);
+        Locker txn2 = Txn.createAutoTxn(envImpl, new TransactionConfig(),
+                                        false, /*noAPIReadLock*/
+                                        ReplicationContext.NO_REPLICATE);
+        Locker txn3 = Txn.createAutoTxn(envImpl, new TransactionConfig(),
+                                        false, /*noAPIReadLock*/
+                                        ReplicationContext.NO_REPLICATE);
+        Locker txn4 = Txn.createAutoTxn(envImpl, new TransactionConfig(),
+                                        false, /*noAPIReadLock*/
+                                        ReplicationContext.NO_REPLICATE);
+        Locker txn5 = Txn.createAutoTxn(envImpl, new TransactionConfig(),
+                                        false, /*noAPIReadLock*/
+                                        ReplicationContext.NO_REPLICATE);
+        MemoryBudget mb = envImpl.getMemoryBudget();
+
+        try {
+            /*
+             * Build up 1 owners and 3waiters for a lock, to test the
+             * lazy initialization and optimization for single owner/waiter.
+             */
+            Lock lock = new LockImpl();
+            assertEquals(LockGrantType.NEW,
+                         lock.lock(LockType.READ, txn1, false, mb, 0).
+                         lockGrant);
+            assertEquals(LockGrantType.NEW,
+                         lock.lock(LockType.READ, txn2, false, mb, 0).
+                         lockGrant);
+            assertEquals(LockGrantType.WAIT_NEW,
+                         lock.lock(LockType.WRITE, txn3, false, mb, 0).
+                         lockGrant);
+            assertEquals(LockGrantType.WAIT_NEW,
+                         lock.lock(LockType.WRITE, txn4, false, mb, 0).
+                         lockGrant);
+            assertEquals(LockGrantType.WAIT_PROMOTION,
+                         lock.lock(LockType.WRITE, txn1, false, mb, 0).
+                         lockGrant);
+
+            /* should be no writer. */
+            assertTrue(lock.getWriteOwnerLocker() == null);
+
+            /* expect 2 owners, 3 waiters. */
+            Set<LockInfo> expectedOwners = new HashSet<LockInfo>();
+            expectedOwners.add(new LockInfo(txn1, LockType.READ));
+            expectedOwners.add(new LockInfo(txn2, LockType.READ));
+            checkOwners(expectedOwners, lock, 3);
+
+            List<LockInfo> waiters = new ArrayList<LockInfo>();
+            waiters.add(new LockInfo(txn1, LockType.WRITE));
+            waiters.add(new LockInfo(txn3, LockType.WRITE));
+            waiters.add(new LockInfo(txn4, LockType.WRITE));
+            checkWaiters(waiters, lock);
+
+            /* release a waiter, shouldn't change anything. */
+            lock.release(txn4, mb, 0);
+            checkWaiters(waiters, lock);
+
+            /*
+             * Release the other read lock, expect txn1 to be promoted to a
+             * write lock.
+             */
+            lock.release(txn2, mb, 0);
+            expectedOwners = new HashSet<LockInfo>();
+            expectedOwners.add(new LockInfo(txn1, LockType.WRITE));
+            checkOwners(expectedOwners, lock, 2);
+
+            waiters.remove(0);
+            checkWaiters(waiters, lock);
+
+            /* release */
+            lock.release(txn1, mb, 0);
+            expectedOwners = new HashSet<LockInfo>();
+            expectedOwners.add(new LockInfo(txn3, LockType.WRITE));
+            checkOwners(expectedOwners, lock, 1);
+
+            waiters.remove(0);
+            checkWaiters(waiters, lock);
+
+            /*
+             * Add multiple read lock waiters so that we can promoting multiple
+             * waiters.
+             */
+            assertEquals(LockGrantType.WAIT_NEW,
+                         lock.lock(LockType.READ, txn1, false, mb, 0).
+                         lockGrant);
+            assertEquals(LockGrantType.WAIT_NEW,
+                         lock.lock(LockType.READ, txn2, false, mb, 0).
+                         lockGrant);
+            assertEquals(LockGrantType.WAIT_NEW,
+                         lock.lock(LockType.READ, txn5, false, mb, 0).
+                         lockGrant);
+
+            checkOwners(expectedOwners, lock, 4);
+            waiters.add(new LockInfo(txn1, LockType.READ));
+            waiters.add(new LockInfo(txn2, LockType.READ));
+            waiters.add(new LockInfo(txn5, LockType.READ));
+            checkWaiters(waiters, lock);
+
+            /* flush one of the waiters. */
+            lock.flushWaiter(txn5, mb, 0);
+            waiters.remove(3);
+            checkWaiters(waiters, lock);
+
+            /* re-add. */
+            assertEquals(LockGrantType.WAIT_NEW,
+                         lock.lock(LockType.READ, txn5, false, mb, 0).
+                         lockGrant);
+            waiters.add(new LockInfo(txn5, LockType.READ));
+
+            /* release txn3 */
+            lock.release(txn3, mb, 0);
+            expectedOwners = new HashSet<LockInfo>();
+            expectedOwners.add(new LockInfo(txn4, LockType.WRITE));
+            checkOwners(expectedOwners, lock, 3);
+            waiters.remove(0);
+            checkWaiters(waiters, lock);
+
+            /* release txn4, expect all read locks to promote. */
+            lock.release(txn4, mb, 0);
+            expectedOwners = new HashSet<LockInfo>();
+            expectedOwners.add(new LockInfo(txn1, LockType.READ));
+            expectedOwners.add(new LockInfo(txn2, LockType.READ));
+            expectedOwners.add(new LockInfo(txn5, LockType.READ));
+            checkOwners(expectedOwners, lock, 0);
+            waiters.clear();
+            checkWaiters(waiters, lock);
+
+            /* Release for the sake of the memory leak checking */
+            lock.release(txn1, mb, 0);
+            lock.release(txn2, mb, 0);
+            lock.release(txn5, mb, 0);
+        } catch (Exception e) {
+            e.printStackTrace();
+            throw e;
+        } finally {
+            txn1.operationEnd();
+            txn2.operationEnd();
+            txn3.operationEnd();
+            txn4.operationEnd();
+            txn5.operationEnd();
+        }
+    }
+
+    public void testPromotion()
+        throws Exception {
+
+        Locker txn1 = Txn.createAutoTxn(envImpl, new TransactionConfig(),
+                                        false, /*noAPIReadLock*/
+                                        ReplicationContext.NO_REPLICATE);
+        Locker txn2 = Txn.createAutoTxn(envImpl, new TransactionConfig(),
+                                        false, /*noAPIReadLock*/
+                                        ReplicationContext.NO_REPLICATE);
+        Locker txn3 = Txn.createAutoTxn(envImpl, new TransactionConfig(),
+                                        false, /*noAPIReadLock*/
+                                        ReplicationContext.NO_REPLICATE);
+        Locker txn4 = Txn.createAutoTxn(envImpl, new TransactionConfig(),
+                                        false, /*noAPIReadLock*/
+                                        ReplicationContext.NO_REPLICATE);
+        Locker txn5 = Txn.createAutoTxn(envImpl, new TransactionConfig(),
+                                        false, /*noAPIReadLock*/
+                                        ReplicationContext.NO_REPLICATE);
+        MemoryBudget mb = envImpl.getMemoryBudget();
+
+        try {
+            /*
+             * Build up 1 owners and 3 read waiters for a lock. Then
+             * check that all the waiters promote properly.
+             */
+            Lock lock = new LockImpl();
+            assertEquals(LockGrantType.NEW,
+                         lock.lock(LockType.WRITE, txn1, false, mb, 0).
+                         lockGrant);
+            assertEquals(LockGrantType.WAIT_NEW,
+                         lock.lock(LockType.READ, txn2, false, mb, 0).
+                         lockGrant);
+            assertEquals(LockGrantType.WAIT_NEW,
+                         lock.lock(LockType.READ, txn3, false, mb, 0).
+                         lockGrant);
+            assertEquals(LockGrantType.WAIT_NEW,
+                         lock.lock(LockType.READ, txn4, false, mb, 0).
+                         lockGrant);
+
+            /* Check that 1 owner, 3 waiters exist. */
+            Set<LockInfo> expectedOwners = new HashSet<LockInfo>();
+            expectedOwners.add(new LockInfo(txn1, LockType.WRITE));
+            checkOwners(expectedOwners, lock, 3);
+
+            List<LockInfo> waiters = new ArrayList<LockInfo>();
+            waiters.add(new LockInfo(txn2, LockType.READ));
+            waiters.add(new LockInfo(txn3, LockType.READ));
+            waiters.add(new LockInfo(txn4, LockType.READ));
+            checkWaiters(waiters, lock);
+
+            /* Release the writer, expect all 3 waiters to promote. */
+            lock.release(txn1, mb, 0);
+            expectedOwners = new HashSet<LockInfo>();
+            expectedOwners.add(new LockInfo(txn2, LockType.READ));
+            expectedOwners.add(new LockInfo(txn3, LockType.READ));
+            expectedOwners.add(new LockInfo(txn4, LockType.READ));
+            checkOwners(expectedOwners, lock, 0);
+            waiters.clear();
+            checkWaiters(waiters, lock);
+
+            /* Release for the sake of the memory leak checking */
+            lock.release(txn2, mb, 0);
+            lock.release(txn3, mb, 0);
+            lock.release(txn4, mb, 0);
+        } catch (Exception e) {
+            e.printStackTrace();
+            throw e;
+        } finally {
+            txn1.operationEnd();
+            txn2.operationEnd();
+            txn3.operationEnd();
+            txn4.operationEnd();
+            txn5.operationEnd();
+        }
+    }
+
+    /**
+     * Tests conflicts between range locks and all other lock types.
+     */
+    public void testRangeConflicts()
+        throws Exception {
+
+        /* No owner */
+        checkConflict(null,
+                      LockType.RANGE_READ,
+                      LockGrantType.NEW);
+        checkConflict(null,
+                      LockType.RANGE_WRITE,
+                      LockGrantType.NEW);
+        checkConflict(null,
+                      LockType.RANGE_INSERT,
+                      LockGrantType.NEW);
+
+        /* Owner has READ */
+        checkConflict(LockType.READ,
+                      LockType.RANGE_READ,
+                      LockGrantType.NEW);
+        checkConflict(LockType.READ,
+                      LockType.RANGE_WRITE,
+                      LockGrantType.WAIT_NEW);
+        checkConflict(LockType.READ,
+                      LockType.RANGE_INSERT,
+                      LockGrantType.NEW);
+
+        /* Owner has WRITE */
+        checkConflict(LockType.WRITE,
+                      LockType.RANGE_READ,
+                      LockGrantType.WAIT_NEW);
+        checkConflict(LockType.WRITE,
+                      LockType.RANGE_WRITE,
+                      LockGrantType.WAIT_NEW);
+        checkConflict(LockType.WRITE,
+                      LockType.RANGE_INSERT,
+                      LockGrantType.NEW);
+
+        /* Owner has RANGE_READ */
+        checkConflict(LockType.RANGE_READ,
+                      LockType.READ,
+                      LockGrantType.NEW);
+        checkConflict(LockType.RANGE_READ,
+                      LockType.WRITE,
+                      LockGrantType.WAIT_NEW);
+        checkConflict(LockType.RANGE_READ,
+                      LockType.RANGE_READ,
+                      LockGrantType.NEW);
+        checkConflict(LockType.RANGE_READ,
+                      LockType.RANGE_WRITE,
+                      LockGrantType.WAIT_NEW);
+        checkConflict(LockType.RANGE_READ,
+                      LockType.RANGE_INSERT,
+                      LockGrantType.WAIT_NEW);
+
+        /* Owner has RANGE_WRITE */
+        checkConflict(LockType.RANGE_WRITE,
+                      LockType.READ,
+                      LockGrantType.WAIT_NEW);
+        checkConflict(LockType.RANGE_WRITE,
+                      LockType.WRITE,
+                      LockGrantType.WAIT_NEW);
+        checkConflict(LockType.RANGE_WRITE,
+                      LockType.RANGE_READ,
+                      LockGrantType.WAIT_NEW);
+        checkConflict(LockType.RANGE_WRITE,
+                      LockType.RANGE_WRITE,
+                      LockGrantType.WAIT_NEW);
+        checkConflict(LockType.RANGE_WRITE,
+                      LockType.RANGE_INSERT,
+                      LockGrantType.WAIT_NEW);
+
+        /* Owner has RANGE_INSERT */
+        checkConflict(LockType.RANGE_INSERT,
+                      LockType.READ,
+                      LockGrantType.NEW);
+        checkConflict(LockType.RANGE_INSERT,
+                      LockType.WRITE,
+                      LockGrantType.NEW);
+        checkConflict(LockType.RANGE_INSERT,
+                      LockType.RANGE_READ,
+                      LockGrantType.WAIT_RESTART);
+        checkConflict(LockType.RANGE_INSERT,
+                      LockType.RANGE_WRITE,
+                      LockGrantType.WAIT_RESTART);
+        checkConflict(LockType.RANGE_INSERT,
+                      LockType.RANGE_INSERT,
+                      LockGrantType.NEW);
+    }
+
+    /**
+     * Tests that when the first request is held and the second request is
+     * requested, the second grant type is returned.
+     */
+    private void checkConflict(LockType firstRequest, LockType secondRequest,
+                               LockGrantType secondGrantType)
+        throws Exception {
+
+        Locker txn1 = Txn.createAutoTxn(envImpl, new TransactionConfig(),
+                                        false, /*noAPIReadLock*/
+                                        ReplicationContext.NO_REPLICATE);
+        Locker txn2 = Txn.createAutoTxn(envImpl, new TransactionConfig(),
+                                        false, /*noAPIReadLock*/
+                                        ReplicationContext.NO_REPLICATE);
+        MemoryBudget mb = envImpl.getMemoryBudget();
+
+        try {
+            Lock lock = new LockImpl();
+
+            if (firstRequest != null) {
+                assertEquals(LockGrantType.NEW,
+                             lock.lock(firstRequest, txn1, false, mb, 0).
+                             lockGrant);
+            }
+            LockGrantType typeGranted =
+                lock.lock(secondRequest, txn2, false, mb, 0).
+                lockGrant;
+            assertEquals(secondGrantType, typeGranted);
+
+            boolean wait = (typeGranted == LockGrantType.WAIT_NEW ||
+                            typeGranted == LockGrantType.WAIT_PROMOTION ||
+                            typeGranted == LockGrantType.WAIT_RESTART);
+            boolean given = (typeGranted == LockGrantType.NEW);
+            boolean restart = (typeGranted == LockGrantType.WAIT_RESTART);
+
+            Set<LockInfo> expectedOwners = new HashSet<LockInfo>();
+            List<LockInfo> expectedWaiters = new ArrayList<LockInfo>();
+
+            if (firstRequest != null) {
+                expectedOwners.add(new LockInfo(txn1, firstRequest));
+            }
+            if (given) {
+                expectedOwners.add(new LockInfo(txn2, secondRequest));
+            } else if (wait) {
+                if (restart) {
+                    expectedWaiters.add(new LockInfo(txn2, LockType.RESTART));
+                } else {
+                    expectedWaiters.add(new LockInfo(txn2, secondRequest));
+                }
+            }
+
+            checkOwners(expectedOwners, lock, expectedWaiters.size());
+            checkWaiters(expectedWaiters, lock);
+
+            lock.release(txn1, mb, 0);
+            if (wait) {
+                if (restart) {
+                    checkOwners(new HashSet<LockInfo>(), lock, 0);
+                } else {
+                    checkOwners(new HashSet<LockInfo>(expectedWaiters), lock, 0);
+                }
+            }
+            lock.release(txn2, mb, 0);
+            assertEquals(0, lock.nOwners());
+            assertEquals(0, lock.nWaiters());
+        } catch (Exception e) {
+            e.printStackTrace();
+            throw e;
+        } finally {
+            txn1.operationEnd();
+            txn2.operationEnd();
+        }
+    }
+
+    /**
+     * Tests upgrades between range locks and all other lock types.
+     */
+    public void testRangeUpgrades()
+        throws Exception {
+
+        /* Owner has READ */
+        checkUpgrade(LockType.READ,
+                     LockType.RANGE_READ,
+                     LockGrantType.EXISTING,
+                     LockType.RANGE_READ);
+        checkUpgrade(LockType.READ,
+                     LockType.RANGE_WRITE,
+                     LockGrantType.PROMOTION,
+                     LockType.RANGE_WRITE);
+        checkUpgrade(LockType.READ,
+                     LockType.RANGE_INSERT,
+                     null,
+                     LockType.READ);
+
+        /* Owner has WRITE */
+        checkUpgrade(LockType.WRITE,
+                     LockType.RANGE_READ,
+                     LockGrantType.EXISTING,
+                     LockType.RANGE_WRITE);
+        checkUpgrade(LockType.WRITE,
+                     LockType.RANGE_WRITE,
+                     LockGrantType.EXISTING,
+                     LockType.RANGE_WRITE);
+        checkUpgrade(LockType.WRITE,
+                     LockType.RANGE_INSERT,
+                     null,
+                     LockType.WRITE);
+
+        /* Owner has RANGE_READ */
+        checkUpgrade(LockType.RANGE_READ,
+                     LockType.READ,
+                     LockGrantType.EXISTING,
+                     LockType.RANGE_READ);
+        checkUpgrade(LockType.RANGE_READ,
+                     LockType.WRITE,
+                     LockGrantType.PROMOTION,
+                     LockType.RANGE_WRITE);
+        checkUpgrade(LockType.RANGE_READ,
+                     LockType.RANGE_READ,
+                     LockGrantType.EXISTING,
+                     LockType.RANGE_READ);
+        checkUpgrade(LockType.RANGE_READ,
+                     LockType.RANGE_WRITE,
+                     LockGrantType.PROMOTION,
+                     LockType.RANGE_WRITE);
+        checkUpgrade(LockType.RANGE_READ,
+                     LockType.RANGE_INSERT,
+                     null,
+                     LockType.RANGE_READ);
+
+        /* Owner has RANGE_WRITE */
+        checkUpgrade(LockType.RANGE_WRITE,
+                     LockType.READ,
+                     LockGrantType.EXISTING,
+                     LockType.RANGE_WRITE);
+        checkUpgrade(LockType.RANGE_WRITE,
+                     LockType.WRITE,
+                     LockGrantType.EXISTING,
+                     LockType.RANGE_WRITE);
+        checkUpgrade(LockType.RANGE_WRITE,
+                     LockType.RANGE_READ,
+                     LockGrantType.EXISTING,
+                     LockType.RANGE_WRITE);
+        checkUpgrade(LockType.RANGE_WRITE,
+                     LockType.RANGE_WRITE,
+                     LockGrantType.EXISTING,
+                     LockType.RANGE_WRITE);
+        checkUpgrade(LockType.RANGE_WRITE,
+                     LockType.RANGE_INSERT,
+                     null,
+                     LockType.RANGE_WRITE);
+
+        /* Owner has RANGE_INSERT */
+        checkUpgrade(LockType.RANGE_INSERT,
+                     LockType.READ,
+                     null,
+                     LockType.RANGE_INSERT);
+        checkUpgrade(LockType.RANGE_INSERT,
+                     LockType.WRITE,
+                     null,
+                     LockType.RANGE_INSERT);
+        checkUpgrade(LockType.RANGE_INSERT,
+                     LockType.RANGE_READ,
+                     null,
+                     LockType.RANGE_INSERT);
+        checkUpgrade(LockType.RANGE_INSERT,
+                     LockType.RANGE_WRITE,
+                     null,
+                     LockType.RANGE_INSERT);
+        checkUpgrade(LockType.RANGE_INSERT,
+                     LockType.RANGE_INSERT,
+                     LockGrantType.EXISTING,
+                     LockType.RANGE_INSERT);
+    }
+
+    /**
+     * Tests that when the first request is held and the second request is
+     * requested, the second grant type is returned and the final type is then
+     * held.  A null secondGrantType arg means that an assertion is expected.
+     */
+    private void checkUpgrade(LockType firstRequest, LockType secondRequest,
+                              LockGrantType secondGrantType,
+                              LockType finalType)
+        throws Exception {
+
+        Locker txn1 = Txn.createAutoTxn(envImpl, new TransactionConfig(),
+                                        false, /*noAPIReadLock*/
+                                        ReplicationContext.NO_REPLICATE);
+        MemoryBudget mb = envImpl.getMemoryBudget();
+
+        try {
+            Lock lock = new LockImpl();
+
+            assertEquals(LockGrantType.NEW,
+                         lock.lock(firstRequest, txn1, false, mb, 0).
+                         lockGrant);
+            LockGrantType typeGranted = null;
+            try {
+                typeGranted = lock.lock(secondRequest, txn1, false, mb, 0).
+                    lockGrant;
+                if (secondGrantType == null) {
+                    fail("expected AssertionError");
+                }
+            } catch (AssertionError e) {
+                if (secondGrantType != null) {
+                    fail(e.toString());
+                }
+            }
+            assertEquals(secondGrantType, typeGranted);
+
+            Set<LockInfo> expectedOwners = new HashSet<LockInfo>();
+            expectedOwners.add(new LockInfo(txn1, finalType));
+            checkOwners(expectedOwners, lock, 0);
+            lock.release(txn1, mb, 0);
+            assertEquals(0, lock.nOwners());
+            assertEquals(0, lock.nWaiters());
+        } catch (Exception e) {
+            e.printStackTrace();
+            throw e;
+        } finally {
+            txn1.operationEnd();
+        }
+    }
+
+    /**
+     * Tests that when a range read/write is requested, and a range insert is
+     * waiting but not held, a WAIT_RESTART occurs.  This requires that the
+     * waiter list is examined by Lock.lock().
+     */
+    public void testRangeInsertWaiterConflict()
+        throws Exception {
+        Locker txn1 = Txn.createAutoTxn(envImpl, new TransactionConfig(),
+                                        false, /*noAPIReadLock*/
+                                        ReplicationContext.NO_REPLICATE);
+        Locker txn2 = Txn.createAutoTxn(envImpl, new TransactionConfig(),
+                                        false, /*noAPIReadLock*/
+                                        ReplicationContext.NO_REPLICATE);
+        Locker txn3 = Txn.createAutoTxn(envImpl, new TransactionConfig(),
+                                        false, /*noAPIReadLock*/
+                                        ReplicationContext.NO_REPLICATE);
+        MemoryBudget mb = envImpl.getMemoryBudget();
+
+        try {
+            Lock lock = new LockImpl();
+            assertEquals(LockGrantType.NEW,
+                         lock.lock(LockType.RANGE_READ, txn1, false, mb, 0).
+                         lockGrant);
+            assertEquals(LockGrantType.WAIT_NEW,
+                         lock.lock(LockType.RANGE_INSERT, txn2, false, mb, 0).
+                         lockGrant);
+            assertEquals(LockGrantType.WAIT_RESTART,
+                         lock.lock(LockType.RANGE_READ, txn3, false, mb, 0).
+                         lockGrant);
+
+            /* Check that 1 owner, 1 waiter exist. */
+
+            Set<LockInfo> expectedOwners = new HashSet<LockInfo>();
+            expectedOwners.add(new LockInfo(txn1, LockType.RANGE_READ));
+            checkOwners(expectedOwners, lock, 2);
+
+            List<LockInfo> waiters = new ArrayList<LockInfo>();
+            waiters.add(new LockInfo(txn2, LockType.RANGE_INSERT));
+            waiters.add(new LockInfo(txn3, LockType.RESTART));
+            checkWaiters(waiters, lock);
+
+            /* Release for the sake of the memory leak checking */
+            lock.release(txn1, mb, 0);
+            lock.release(txn2, mb, 0);
+            lock.release(txn3, mb, 0);
+        } catch (Exception e) {
+            e.printStackTrace();
+            throw e;
+        } finally {
+            txn1.operationEnd();
+            txn2.operationEnd();
+            txn3.operationEnd();
+        }
+    }
+
+    private void checkOwners(Set<LockInfo> expectedOwners,
+                             Lock lock,
+                             int numExpectedWaiters) {
+
+        /* check number of owners. */
+        Set owners = lock.getOwnersClone();
+        assertEquals(expectedOwners.size(), owners.size());
+
+        /* check number of waiters. */
+        assertEquals(numExpectedWaiters, lock.nWaiters());
+
+        /* Make sure that isOwner returns the right thing. */
+        Iterator<LockInfo> iter = expectedOwners.iterator();
+        while (iter.hasNext()) {
+            LockInfo info = iter.next();
+
+            /* Make sure it's an owner, of the right type of lock. */
+            assertEquals(info.getLockType().isWriteLock(),
+                         lock.isOwnedWriteLock(info.getLocker()));
+            assertTrue(lock.isOwner(info.getLocker(), info.getLockType()));
+        }
+    }
+
+    private void checkWaiters(List<LockInfo> expectedWaiters,
+                              Lock lock) {
+        List waiters = lock.getWaitersListClone();
+        assertEquals(expectedWaiters.size(), waiters.size());
+
+        /* check order of the list. */
+        for (int i = 0; i < expectedWaiters.size(); i++) {
+            LockInfo info = expectedWaiters.get(i);
+            LockInfo waiterInfo = (LockInfo) waiters.get(i);
+            assertEquals("i=" + i, info.getLocker(), waiterInfo.getLocker());
+            assertEquals("i=" + i,
+                         info.getLockType(), waiterInfo.getLockType());
+            assertFalse(lock.isOwner(info.getLocker(), info.getLockType()));
+            assertTrue(lock.isWaiter(info.getLocker()));
+        }
+    }
+
+    public void testTransfer()
+        throws Exception {
+
+        Locker txn1 = Txn.createAutoTxn(envImpl, new TransactionConfig(),
+                                        false, /*noAPIReadLock*/
+                                        ReplicationContext.NO_REPLICATE);
+        Locker txn2 = Txn.createAutoTxn(envImpl, new TransactionConfig(),
+                                        false, /*noAPIReadLock*/
+                                        ReplicationContext.NO_REPLICATE);
+        Locker txn3 = Txn.createAutoTxn(envImpl, new TransactionConfig(),
+                                        false, /*noAPIReadLock*/
+                                        ReplicationContext.NO_REPLICATE);
+        Locker txn4 = Txn.createAutoTxn(envImpl, new TransactionConfig(),
+                                        false, /*noAPIReadLock*/
+                                        ReplicationContext.NO_REPLICATE);
+        Locker txn5 = Txn.createAutoTxn(envImpl, new TransactionConfig(),
+                                        false, /*noAPIReadLock*/
+                                        ReplicationContext.NO_REPLICATE);
+        MemoryBudget mb = envImpl.getMemoryBudget();
+
+        try {
+            /* Transfer from one locker to another locker. */
+            Long nid = new Long(1);
+            Lock lock = new LockImpl();
+            assertEquals(LockGrantType.NEW,
+                         lock.lock(LockType.WRITE, txn1, false, mb, 0).
+                         lockGrant);
+            assertEquals(LockGrantType.WAIT_NEW,
+                         lock.lock(LockType.READ, txn2, false, mb, 0).
+                         lockGrant);
+            assertTrue(lock.isOwner(txn1, LockType.WRITE));
+            assertFalse(lock.isOwner(txn2, LockType.READ));
+
+            lock.transfer(nid, txn1, txn2, mb, 0);
+            assertFalse(lock.isOwner(txn1, LockType.WRITE));
+            assertFalse(lock.isOwner(txn1, LockType.READ));
+            assertTrue(lock.isOwnedWriteLock(txn2));
+
+            /* Transfer to multiple lockers. */
+            Locker[] destLockers = new Locker[3];
+            destLockers[0] = txn3;
+            destLockers[1] = txn4;
+            destLockers[2] = txn5;
+            lock.demote(txn2);
+            lock.transferMultiple(nid, txn2, destLockers, mb, 0);
+            assertFalse(lock.isOwner(txn2, LockType.WRITE));
+            assertFalse(lock.isOwner(txn2, LockType.READ));
+
+            for (int i = 0; i < destLockers.length; i++) {
+                assertTrue(lock.isOwner(destLockers[i], LockType.READ));
+                assertFalse(lock.isOwner(destLockers[i], LockType.WRITE));
+                lock.release(destLockers[i], mb, 0);
+            }
+
+        } finally {
+            txn1.operationEnd();
+            txn2.operationEnd();
+            txn3.operationEnd();
+            txn4.operationEnd();
+            txn5.operationEnd();
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/txn/TwoPCTest.java b/test/com/sleepycat/je/txn/TwoPCTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..f109f6b6bb9a2e9ce744d4074567a4faba4b64bb
--- /dev/null
+++ b/test/com/sleepycat/je/txn/TwoPCTest.java
@@ -0,0 +1,215 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TwoPCTest.java,v 1.10.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import java.io.File;
+import java.io.IOException;
+
+import javax.transaction.xa.XAResource;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.TransactionStats;
+import com.sleepycat.je.XAEnvironment;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.log.LogUtils.XidImpl;
+import com.sleepycat.je.util.StringDbt;
+import com.sleepycat.je.util.TestUtils;
+
+/*
+ * Simple 2PC transaction testing.
+ */
+public class TwoPCTest extends TestCase {
+    private File envHome;
+    private XAEnvironment env;
+    private Database db;
+
+    public TwoPCTest()
+        throws DatabaseException {
+
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException, DatabaseException {
+
+        TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX);
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+        env = new XAEnvironment(envHome, envConfig);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        db = env.openDatabase(null, "foo", dbConfig);
+    }
+
+    public void tearDown()
+        throws IOException, DatabaseException {
+
+        db.close();
+        env.close();
+        TestUtils.removeFiles("TearDown", envHome, FileManager.JE_SUFFIX);
+    }
+
+    /**
+     * Basic Two Phase Commit calls.
+     */
+    public void testBasic2PC()
+        throws Throwable {
+
+        try {
+        TransactionStats stats =
+            env.getTransactionStats(TestUtils.FAST_STATS);
+        int numBegins = 2; // 1 for setting up XA env and 1 for open db
+        int numCommits = 2;
+        int numXAPrepares = 0;
+        int numXACommits = 0;
+        assertEquals(numBegins, stats.getNBegins());
+        assertEquals(numCommits, stats.getNCommits());
+        assertEquals(numXAPrepares, stats.getNXAPrepares());
+        assertEquals(numXACommits, stats.getNXACommits());
+
+        Transaction txn = env.beginTransaction(null, null);
+        stats = env.getTransactionStats(TestUtils.FAST_STATS);
+        numBegins++;
+        assertEquals(numBegins, stats.getNBegins());
+        assertEquals(numCommits, stats.getNCommits());
+        assertEquals(numXAPrepares, stats.getNXAPrepares());
+        assertEquals(numXACommits, stats.getNXACommits());
+        assertEquals(1, stats.getNActive());
+
+        XidImpl xid = new XidImpl(1, "TwoPCTest1".getBytes(), null);
+        env.setXATransaction(xid, txn);
+        stats = env.getTransactionStats(TestUtils.FAST_STATS);
+        assertEquals(numBegins, stats.getNBegins());
+        assertEquals(numCommits, stats.getNCommits());
+        assertEquals(numXAPrepares, stats.getNXAPrepares());
+        assertEquals(numXACommits, stats.getNXACommits());
+        assertEquals(1, stats.getNActive());
+
+        StringDbt key = new StringDbt("key");
+        StringDbt data = new StringDbt("data");
+        db.put(txn, key, data);
+        stats = env.getTransactionStats(TestUtils.FAST_STATS);
+        assertEquals(numBegins, stats.getNBegins());
+        assertEquals(numCommits, stats.getNCommits());
+        assertEquals(numXAPrepares, stats.getNXAPrepares());
+        assertEquals(numXACommits, stats.getNXACommits());
+        assertEquals(1, stats.getNActive());
+
+        env.prepare(xid);
+        numXAPrepares++;
+        stats = env.getTransactionStats(TestUtils.FAST_STATS);
+        assertEquals(numBegins, stats.getNBegins());
+        assertEquals(numCommits, stats.getNCommits());
+        assertEquals(numXAPrepares, stats.getNXAPrepares());
+        assertEquals(numXACommits, stats.getNXACommits());
+        assertEquals(1, stats.getNActive());
+         
+        env.commit(xid, false);
+        numCommits++;
+        numXACommits++;
+        stats = env.getTransactionStats(TestUtils.FAST_STATS);
+        assertEquals(numBegins, stats.getNBegins());
+        assertEquals(numCommits, stats.getNCommits());
+        assertEquals(numXAPrepares, stats.getNXAPrepares());
+        assertEquals(numXACommits, stats.getNXACommits());
+        assertEquals(0, stats.getNActive());
+        } catch (Exception E) {
+            System.out.println("caught " + E);
+        }
+    }
+
+    /**
+     * Basic readonly-prepare.
+     */
+    public void testROPrepare()
+        throws Throwable {
+
+        try {
+            Transaction txn = env.beginTransaction(null, null);
+            XidImpl xid = new XidImpl(1, "TwoPCTest1".getBytes(), null);
+            env.setXATransaction(xid, txn);
+
+            assertEquals(XAResource.XA_RDONLY, env.prepare(xid));
+        } catch (Exception E) {
+            System.out.println("caught " + E);
+        }
+    }
+
+    /**
+     * Test calling prepare twice (should throw exception).
+     */
+    public void testTwicePreparedTransaction()
+        throws Throwable {
+
+        Transaction txn = env.beginTransaction(null, null);
+        XidImpl xid = new XidImpl(1, "TwoPCTest2".getBytes(), null);
+        env.setXATransaction(xid, txn);
+        StringDbt key = new StringDbt("key");
+        StringDbt data = new StringDbt("data");
+        db.put(txn, key, data);
+
+        try {
+            env.prepare(xid);
+            env.prepare(xid);
+            fail("should not be able to prepare twice");
+        } catch (Exception E) {
+            env.commit(xid, false);
+        }
+    }
+
+    /**
+     * Test calling rollback(xid) on an unregistered xa txn.
+     */
+    public void testRollbackNonExistent()
+        throws Throwable {
+
+        Transaction txn = env.beginTransaction(null, null);
+        StringDbt key = new StringDbt("key");
+        StringDbt data = new StringDbt("data");
+        db.put(txn, key, data);
+        XidImpl xid = new XidImpl(1, "TwoPCTest2".getBytes(), null);
+
+        try {
+            env.rollback(xid);
+            fail("should not be able to call rollback on an unknown xid");
+        } catch (Exception E) {
+        }
+        txn.abort();
+    }
+
+    /**
+     * Test calling commit(xid) on an unregistered xa txn.
+     */
+    public void testCommitNonExistent()
+        throws Throwable {
+
+        Transaction txn = env.beginTransaction(null, null);
+        StringDbt key = new StringDbt("key");
+        StringDbt data = new StringDbt("data");
+        db.put(txn, key, data);
+        XidImpl xid = new XidImpl(1, "TwoPCTest2".getBytes(), null);
+
+        try {
+            env.commit(xid, false);
+            fail("should not be able to call commit on an unknown xid");
+        } catch (Exception E) {
+        }
+        txn.abort();
+    }
+}
diff --git a/test/com/sleepycat/je/txn/TxnEndTest.java b/test/com/sleepycat/je/txn/TxnEndTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..f1dead1e6b15f23e871a417f94642f5680800c47
--- /dev/null
+++ b/test/com/sleepycat/je/txn/TxnEndTest.java
@@ -0,0 +1,688 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TxnEndTest.java,v 1.74.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Date;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.CursorConfig;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.TransactionStats;
+import com.sleepycat.je.VerifyConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.junit.JUnitThread;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.util.TestUtils;
+
+/*
+ * Test transaction aborts and commits.
+ */
+public class TxnEndTest extends TestCase {
+    private static final int NUM_DBS = 1;
+    private Environment env;
+    private File envHome;
+    private Database[] dbs;
+    private Cursor[] cursors;
+
+    public TxnEndTest()
+	throws DatabaseException {
+
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException, DatabaseException {
+
+        TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX);
+
+        /*
+         * Run environment without in compressor on so we can check the
+         * compressor queue in a deterministic way.
+         */
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setTransactional(true);
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6");
+        envConfig.setConfigParam(EnvironmentParams.
+				 ENV_RUN_INCOMPRESSOR.getName(),
+                                 "false");
+        envConfig.setAllowCreate(true);
+        env = new Environment(envHome, envConfig);
+    }
+
+    public void tearDown()
+        throws IOException, DatabaseException {
+
+        if (env != null) {
+            try {
+                env.close();
+            } catch (Exception e) {
+                System.out.println("tearDown: " + e);
+            }
+        }
+        env = null;
+
+        TestUtils.removeFiles("TearDown", envHome, FileManager.JE_SUFFIX);
+    }
+
+    private void createDbs()
+        throws DatabaseException {	
+
+        // Make databases
+        dbs = new Database[NUM_DBS];
+        cursors = new Cursor[NUM_DBS];
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        for (int i = 0; i < NUM_DBS; i++) {
+            dbs[i] = env.openDatabase(null, "testDB" + i, dbConfig);
+        }
+    }
+
+    private void closeAll()
+        throws DatabaseException {	
+
+        for (int i = 0; i < NUM_DBS; i++) {
+            dbs[i].close();
+        }
+        dbs = null;
+        env.close();
+        env = null;
+    }
+
+    /**
+     * Create cursors with this owning transaction
+     */
+    private void createCursors(Transaction txn)
+        throws DatabaseException {
+
+        for (int i = 0; i < cursors.length; i++) {
+            cursors[i] = dbs[i].openCursor(txn, null);
+        }
+    }
+
+    /**
+     * Close the current set of cursors
+     */
+    private void closeCursors()
+        throws DatabaseException {
+
+        for (int i = 0; i < cursors.length; i++) {
+            cursors[i].close();
+        }
+    }
+
+    /**
+     * Insert keys from i=start; i <end using a cursor
+     */
+    private void cursorInsertData(int start, int end)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        for (int i = 0; i < NUM_DBS; i++) {
+            for (int d = start; d < end; d++) {
+                key.setData(TestUtils.getTestArray(d));
+                data.setData(TestUtils.getTestArray(d));
+                cursors[i].put(key, data);
+            }
+        }
+    }
+    /**
+     * Insert keys from i=start; i < end using a db
+     */
+    private void dbInsertData(int start, int end, Transaction txn)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        for (int i = 0; i < NUM_DBS; i++) {
+            for (int d = start; d < end; d++) {
+                key.setData(TestUtils.getTestArray(d));
+                data.setData(TestUtils.getTestArray(d));
+                dbs[i].put(txn, key, data);
+            }
+        }
+    }
+
+    /**
+     * Modify keys from i=start; i <end
+     */
+    private void cursorModifyData(int start, int end, int valueOffset)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        for (int i = 0; i < NUM_DBS; i++) {
+            OperationStatus status =
+		cursors[i].getFirst(key, data, LockMode.DEFAULT);
+            for (int d = start; d < end; d++) {
+                assertEquals(OperationStatus.SUCCESS, status);
+                byte[] changedVal =
+                    TestUtils.getTestArray(d + valueOffset);
+                data.setData(changedVal);
+                cursors[i].putCurrent(data);
+                status = cursors[i].getNext(key, data, LockMode.DEFAULT);
+            }
+        }
+    }
+
+    /**
+     * Delete records from i = start; i < end.
+     */
+    private void cursorDeleteData(int start, int end)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry foundData = new DatabaseEntry();
+        for (int i = 0; i < NUM_DBS; i++) {
+            for (int d = start; d < end; d++) {
+                byte[] searchValue =
+                    TestUtils.getTestArray(d);
+                key.setData(searchValue);
+                OperationStatus status =
+		    cursors[i].getSearchKey(key, foundData, LockMode.DEFAULT);
+                assertEquals(OperationStatus.SUCCESS, status);
+                assertEquals(OperationStatus.SUCCESS, cursors[i].delete());
+            }
+        }
+    }
+
+    /**
+     * Delete records with a db.
+     */
+    private void dbDeleteData(int start, int end, Transaction txn)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry();
+        for (int i = 0; i < NUM_DBS; i++) {
+            for (int d = start; d < end; d++) {
+                byte[] searchValue =
+                    TestUtils.getTestArray(d);
+                key.setData(searchValue);
+                dbs[i].delete(txn, key);
+            }
+        }
+    }
+
+    /**
+     * Check that there are numKeys records in each db, and their value
+     * is i + offset.
+     */
+    private void verifyData(int numKeys, int valueOffset)
+        throws DatabaseException {
+
+        for (int i = 0; i < NUM_DBS; i++) {
+            /* Run verify */
+            DatabaseImpl dbImpl = DbInternal.dbGetDatabaseImpl(dbs[i]);
+            assertTrue(dbImpl.verify(new VerifyConfig(),
+                                      dbImpl.getEmptyStats()));
+
+            Cursor verifyCursor =
+		dbs[i].openCursor(null, CursorConfig.READ_UNCOMMITTED);
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            OperationStatus status =
+		verifyCursor.getFirst(key, data, LockMode.DEFAULT);
+            for (int d = 0; d < numKeys; d++) {
+                assertEquals("key=" + d, OperationStatus.SUCCESS, status);
+                byte[] expected = TestUtils.getTestArray(d + valueOffset);
+                assertTrue(Arrays.equals(expected, key.getData()));
+                assertTrue("Expected= " + TestUtils.dumpByteArray(expected) +
+                           " saw=" + TestUtils.dumpByteArray(data.getData()),
+                           Arrays.equals(expected, data.getData()));
+                status = verifyCursor.getNext(key, data, LockMode.DEFAULT);
+            }
+            // should be the end of this database
+            assertTrue("More data than expected",
+		       (status != OperationStatus.SUCCESS));
+            verifyCursor.close();
+        }
+    }
+
+    /**
+     * Test basic commits, aborts with cursors
+     */
+    public void testBasicCursor()
+        throws Throwable {
+
+        try {
+            int numKeys = 7;
+            createDbs();
+
+            // Insert more data with a user transaction, commit
+            Transaction txn = env.beginTransaction(null, null);
+            createCursors(txn);
+            cursorInsertData(0, numKeys*2);
+            closeCursors();
+            txn.commit();
+            verifyData(numKeys*2, 0);
+
+            // Insert more data, abort, check that data is unchanged
+            txn = env.beginTransaction(null, null);
+            createCursors(txn);
+            cursorInsertData(numKeys*2, numKeys*3);
+            closeCursors();
+            txn.abort();
+            verifyData(numKeys*2, 0);
+
+            /*
+             * Check the in compressor queue, we should have some number of
+             * bins on. If the queue size is 0, then check the processed stats,
+             * the in compressor thread may have already woken up and dealt
+             * with the entries.
+             */
+            EnvironmentStats envStat = env.getStats(TestUtils.FAST_STATS);
+            long queueSize = envStat.getInCompQueueSize();
+            assertTrue(queueSize > 0);
+
+            // Modify data, abort, check that data is unchanged
+            txn = env.beginTransaction(null, null);
+            createCursors(txn);
+            cursorModifyData(0, numKeys * 2, 1);
+            closeCursors();
+            txn.abort();
+            verifyData(numKeys*2, 0);
+
+            // Delete data, abort, check that data is still there
+            txn = env.beginTransaction(null, null);
+            createCursors(txn);
+            cursorDeleteData(numKeys+1, numKeys*2);
+            closeCursors();
+            txn.abort();
+            verifyData(numKeys*2, 0);
+            // Check the in compressor queue, nothing should be loaded
+            envStat = env.getStats(TestUtils.FAST_STATS);
+            assertEquals(queueSize, envStat.getInCompQueueSize());
+
+            // Delete data, commit, check that data is gone
+            txn = env.beginTransaction(null, null);
+            createCursors(txn);
+            cursorDeleteData(numKeys, numKeys*2);
+            closeCursors();
+            txn.commit();
+            verifyData(numKeys, 0);
+
+            // Check the inCompressor queue, there should be more entries.
+            envStat = env.getStats(TestUtils.FAST_STATS);
+            assertTrue(envStat.getInCompQueueSize() > queueSize);
+
+            closeAll();
+
+        } catch (Throwable t) {
+            // print stacktrace before attempt to run tearDown
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Test db creation and deletion.
+     */
+    public void testTxnClose()
+        throws DatabaseException {
+
+        createDbs();
+        Transaction txn = env.beginTransaction(null, null);
+        createCursors(txn);
+        try {
+            txn.commit();
+            fail("Commit should fail");
+        } catch (DatabaseException e) {
+        }
+	closeCursors();
+        closeAll();
+    }
+
+    class CascadingAbortTestJUnitThread extends JUnitThread {
+	Transaction txn = null;
+	Database db = null;
+
+	CascadingAbortTestJUnitThread(Transaction txn,
+				      Database db) {
+	    super("testCascadingAborts");
+	    this.txn = txn;
+	    this.db = db;
+	}
+    }
+
+    /**
+     * Test cascading aborts in the face of deletes.
+     * [work in progress: cwl 1/15/04]
+     */
+    public void xtestCascadingAborts()
+	throws Throwable {
+
+        Database db = null;
+
+	try {
+	    DatabaseConfig dbConfig = new DatabaseConfig();
+	    dbConfig.setAllowCreate(true);
+            dbConfig.setTransactional(true);
+	    db = env.openDatabase(null, "testDB", dbConfig);
+
+	    DatabaseEntry key = new DatabaseEntry();
+	    DatabaseEntry data = new DatabaseEntry();
+
+	    Transaction txn = env.beginTransaction(null, null);
+	    key.setData("abb".getBytes());
+	    data.setData("def".getBytes());
+	    //db.put(txn, key, data, null);
+	    key.setData("abc".getBytes());
+	    data.setData("def".getBytes());
+	    db.put(txn, key, data);
+	    txn.commit();
+
+	    //DbInternal.dbGetDatabaseImpl(db).getTree().dump();
+
+	    Transaction txn1 = env.beginTransaction(null, null);
+	    Transaction txn2 = env.beginTransaction(null, null);
+
+	    CascadingAbortTestJUnitThread tester1 =
+		new CascadingAbortTestJUnitThread(txn2, db) {
+		    public void testBody()
+			throws Throwable {
+
+			Cursor c = db.openCursor(txn, null);
+			DatabaseEntry data = new DatabaseEntry();
+			try {
+			    Thread.yield();
+			    DatabaseEntry key = new DatabaseEntry();
+			    key.setData("abc".getBytes());
+			    OperationStatus status =
+				c.getSearchKeyRange(key, data, LockMode.DEFAULT);
+			    assertEquals(status, OperationStatus.SUCCESS);
+			    status = c.delete();
+			    assertEquals(status, OperationStatus.SUCCESS);
+			} catch (Throwable T) {
+			    T.printStackTrace();
+			} finally {
+			    c.close();
+			}
+		    }
+		};
+
+	    tester1.start();
+	    Thread.yield();
+	    key.setData("abc".getBytes());
+	    OperationStatus status = db.delete(txn1, key);
+	    assertEquals(OperationStatus.SUCCESS, status);
+
+	    txn1.abort();
+	    Thread.yield();
+
+	    txn2.abort();
+	    tester1.finishTest();
+
+	    //DbInternal.dbGetDatabaseImpl(db).getTree().dump();
+
+	    if (false) {
+		db.close();
+		env.close();
+		EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+		envConfig.setTransactional(true);
+		envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(),
+					 "6");
+		envConfig.setConfigParam(EnvironmentParams.
+					 ENV_RUN_INCOMPRESSOR.
+					 getName(),
+					 "false");
+		envConfig.setAllowCreate(true);
+		env = new Environment(envHome, envConfig);
+		db = env.openDatabase(null, "testDB", dbConfig);
+	    }
+
+	    txn = env.beginTransaction(null, null);
+	    System.out.println(db.getSearchBoth(txn, key, data,
+                                                LockMode.DEFAULT));
+	    txn.commit();
+	} catch (Throwable T) {
+	    T.printStackTrace();
+	} finally {
+	    db.close();
+	}
+    }
+
+    /**
+     * Test use through db.
+     */
+    public void testBasicDb()
+        throws Throwable {
+
+        try {
+            TransactionStats stats =
+                env.getTransactionStats(TestUtils.FAST_STATS);
+            assertEquals(0, stats.getNAborts());
+            int initialCommits = 1; // 1 commits for adding UP database
+            assertEquals(initialCommits, stats.getNCommits());
+
+	    long locale = new Date().getTime();
+	    TransactionStats.Active[] at = new TransactionStats.Active[4];
+	
+	    for(int i = 0; i < 4; i++) {
+                at[i] = new TransactionStats.Active("TransactionStatForTest",
+			                     	    i, i - 1);
+	    }
+
+	    stats.setActiveTxns(at);
+	    stats.setLastCheckpointTime(locale);
+	    stats.setLastTxnId(3);
+	    stats.setNAborts(12);
+	    stats.setNXAAborts(15);
+	    stats.setNActive(20);
+	    stats.setNBegins(25);
+	    stats.setNCommits(1);
+	    stats.setNXACommits(30);
+	    stats.setNXAPrepares(20);
+
+	    TransactionStats.Active[] at1 = stats.getActiveTxns();
+	
+	    for(int i = 0; i < 4; i++) {
+		assertEquals("TransactionStatForTest", at1[i].getName());
+		assertEquals(i, at1[i].getId());
+		assertEquals(i - 1, at1[i].getParentId());
+		at1[i].toString();
+	    }
+	    assertEquals(locale, stats.getLastCheckpointTime());
+	    assertEquals(3, stats.getLastTxnId());
+	    assertEquals(12, stats.getNAborts());
+	    assertEquals(15, stats.getNXAAborts());
+	    assertEquals(20, stats.getNActive());
+	    assertEquals(25, stats.getNBegins());
+	    assertEquals(1, stats.getNCommits());
+	    assertEquals(30, stats.getNXACommits());
+	    assertEquals(20, stats.getNXAPrepares());
+	    stats.toString();
+	
+	    stats.setActiveTxns(null);
+	    stats.toString();
+
+            int numKeys = 7;
+            createDbs();
+
+            // Insert data with autocommit
+            dbInsertData(0, numKeys, null);
+            verifyData(numKeys, 0);
+
+            // Insert data with a txn
+            Transaction txn = env.beginTransaction(null, null);
+            dbInsertData(numKeys, numKeys*2, txn);
+            txn.commit();
+            verifyData(numKeys*2, 0);
+
+            stats = env.getTransactionStats(TestUtils.FAST_STATS);
+            assertEquals(0, stats.getNAborts());
+            assertEquals((initialCommits + 1 +  // 1 explicit commit above
+                          (1 * NUM_DBS) +       // 1 per create/open
+                          (numKeys*NUM_DBS)),   // 1 per record, using autotxn
+			 stats.getNCommits());
+
+            // delete data with a txn, abort
+            txn = env.beginTransaction(null, null);
+            dbDeleteData(numKeys, numKeys * 2, txn);
+            verifyData(numKeys, 0);  // verify w/dirty read
+            txn.abort();
+
+            closeAll();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+    /**
+     * Test TransactionStats.
+     */
+    public void testTxnStats()
+        throws Throwable {
+
+        try {
+            TransactionStats stats =
+                env.getTransactionStats(TestUtils.FAST_STATS);
+            assertEquals(0, stats.getNAborts());
+            int numBegins = 1; // 1 begins for adding UP database
+            int numCommits = 1; // 1 commits for adding UP database
+            assertEquals(numBegins, stats.getNBegins());
+            assertEquals(numCommits, stats.getNCommits());
+
+            int numKeys = 7;
+            createDbs();
+            numBegins += NUM_DBS; // 1 begins per database
+            numCommits += NUM_DBS; // 1 commits per database
+            stats = env.getTransactionStats(TestUtils.FAST_STATS);
+            assertEquals(numBegins, stats.getNBegins());
+            assertEquals(numCommits, stats.getNCommits());
+
+            /* Insert data with autocommit. */
+            dbInsertData(0, numKeys, null);
+            numBegins += (numKeys * NUM_DBS);
+            numCommits += (numKeys * NUM_DBS);
+            stats = env.getTransactionStats(TestUtils.FAST_STATS);
+            assertEquals(numBegins, stats.getNBegins());
+            assertEquals(numCommits, stats.getNCommits());
+            verifyData(numKeys, 0);
+
+            /* Insert data with a txn. */
+            Transaction txn = env.beginTransaction(null, null);
+            numBegins++;
+            stats = env.getTransactionStats(TestUtils.FAST_STATS);
+            assertEquals(numBegins, stats.getNBegins());
+            assertEquals(numCommits, stats.getNCommits());
+            assertEquals(1, stats.getNActive());
+            dbInsertData(numKeys, numKeys*2, txn);
+            txn.commit();
+            numCommits++;
+            stats = env.getTransactionStats(TestUtils.FAST_STATS);
+            assertEquals(numBegins, stats.getNBegins());
+            assertEquals(numCommits, stats.getNCommits());
+            assertEquals(0, stats.getNActive());
+            verifyData(numKeys*2, 0);
+
+            /* Delete data with a txn, abort. */
+            txn = env.beginTransaction(null, null);
+            numBegins++;
+            stats = env.getTransactionStats(TestUtils.FAST_STATS);
+            assertEquals(numBegins, stats.getNBegins());
+            assertEquals(numCommits, stats.getNCommits());
+            assertEquals(1, stats.getNActive());
+
+            dbDeleteData(numKeys, numKeys * 2, txn);
+            verifyData(numKeys, 0);  // verify w/dirty read
+            txn.abort();
+            stats = env.getTransactionStats(TestUtils.FAST_STATS);
+            assertEquals(numBegins, stats.getNBegins());
+            assertEquals(numCommits, stats.getNCommits());
+            assertEquals(1, stats.getNAborts());
+            assertEquals(0, stats.getNActive());
+
+            closeAll();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Test db creation and deletion
+     */
+
+    public void testDbCreation()
+        throws DatabaseException {
+
+        Transaction txnA = env.beginTransaction(null, null);
+        Transaction txnB = env.beginTransaction(null, null);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setTransactional(true);
+        Database dbA =
+	    env.openDatabase(txnA, "foo", dbConfig);
+
+        // Try to see this database with another txn -- we should not see it
+
+        dbConfig.setAllowCreate(false);
+
+        try {
+            txnB.setLockTimeout(1000);
+
+		env.openDatabase(txnB, "foo", dbConfig);
+            fail("Shouldn't be able to open foo");
+        } catch (DatabaseException e) {
+        }
+	/* txnB must be aborted since openDatabase timed out. */
+	txnB.abort();
+
+        // Open this database with the same txn and another handle
+        Database dbC =
+	    env.openDatabase(txnA, "foo", dbConfig);
+
+        // Now commit txnA and txnB should be able to open this.
+        txnA.commit();
+	txnB = env.beginTransaction(null, null);
+        Database dbB =
+	    env.openDatabase(txnB, "foo", dbConfig);
+        txnB.commit();
+
+        // XXX, test db deletion
+
+        dbA.close();
+        dbB.close();
+        dbC.close();
+    }
+
+    /* Test that the transaction is unsable about a close. */
+    public void testClose()
+        throws DatabaseException {
+
+        Transaction txnA = env.beginTransaction(null, null);
+        txnA.commit();
+
+        try {
+            env.openDatabase(txnA, "foo", null);
+            fail("Should not be able to use a closed exception");
+        } catch (DatabaseException expected) {
+        }
+    }
+
+}
diff --git a/test/com/sleepycat/je/txn/TxnFSyncTest.java b/test/com/sleepycat/je/txn/TxnFSyncTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..0258cc5ccf6865a960bade0fe30a1529f70ddeca
--- /dev/null
+++ b/test/com/sleepycat/je/txn/TxnFSyncTest.java
@@ -0,0 +1,124 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TxnFSyncTest.java,v 1.15.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import junit.framework.Test;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DbEnvPool;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.util.test.TxnTestCase;
+
+/*
+ * Make sure that transactions sync to disk. Mimic a crash by failing to
+ * close the environment and explicitly flush the log manager. If we haven't
+ * properly written and synced data to disk, we'll have unflushed data and
+ * we won't find the expected data in the log.
+ *
+ * Note that this test is run with the TxnTestCase framework and will
+ * be exercised with app-created and autocommit txns.
+ */
+
+public class TxnFSyncTest extends TxnTestCase {
+
+    private static final int NUM_RECS = 5;
+
+    private static EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+    static {
+        envConfig.setAllowCreate(true);
+        setupEnvConfig(envConfig);
+    }
+
+    private static void setupEnvConfig(EnvironmentConfig envConfig) {
+        envConfig.setTransactional(true);
+        envConfig.setConfigParam(
+            EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false");
+    }
+
+    public static Test suite() {
+        /* Run these tests with user and autocommit txns. */
+        return txnTestSuite(TxnFSyncTest.class,
+                            envConfig,
+                            new String[] {TxnTestCase.TXN_USER,
+                                          TxnTestCase.TXN_AUTO});
+    }
+
+    public void testFSyncButNoClose()
+        throws Exception {
+
+        try {
+            /* Create a database. */
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setTransactional(isTransactional);
+            dbConfig.setAllowCreate(true);
+            Transaction txn = txnBegin();
+            Database db = env.openDatabase(txn, "foo", dbConfig);
+
+            /* Insert data. */
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            for (int i = 0; i < NUM_RECS; i++) {
+                Integer val = new Integer(i);
+                key.setData(val.toString().getBytes());
+                data.setData(val.toString().getBytes());
+
+                assertEquals(OperationStatus.SUCCESS,
+                             db.putNoOverwrite(txn, key, data));
+            }
+            txnCommit(txn);
+
+            /*
+             * Now throw away this environment WITHOUT flushing the log
+             * manager. We do need to release the environment file lock
+             * and all file handles so we can recover in this test and
+             * run repeated test cases within this one test program.
+             */
+            EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+            envImpl.getFileManager().clear(); // release file handles
+            envImpl.getFileManager().close(); // release file lock
+            env = null;
+            DbEnvPool.getInstance().clear();
+
+            /*
+             * Open the environment and database again. The database should
+             * exist.
+             */
+            EnvironmentConfig envConfig2 = TestUtils.initEnvConfig();
+            setupEnvConfig(envConfig2);
+            env = new Environment(envHome, envConfig2);
+            dbConfig.setAllowCreate(false);
+            db = env.openDatabase(null, "foo", dbConfig);
+
+            /* Read all the data. */
+            for (int i = 0; i < NUM_RECS; i++) {
+                Integer val = new Integer(i);
+                key.setData(val.toString().getBytes());
+
+                assertEquals(OperationStatus.SUCCESS,
+                             db.get(null, key, data, LockMode.DEFAULT));
+                /* add test of data. */
+            }
+            db.close();
+        } catch (Exception e) {
+            e.printStackTrace();
+            throw e;
+        }
+    }
+}
+
diff --git a/test/com/sleepycat/je/txn/TxnMemoryTest.java b/test/com/sleepycat/je/txn/TxnMemoryTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..629e97178c3e0c0c6fcd3c86bdf0adba3a1edd43
--- /dev/null
+++ b/test/com/sleepycat/je/txn/TxnMemoryTest.java
@@ -0,0 +1,313 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TxnMemoryTest.java,v 1.17.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Enumeration;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.txn.Txn;
+import com.sleepycat.je.util.TestUtils;
+
+public class TxnMemoryTest extends TestCase {
+    private static final boolean DEBUG = false;
+    private static final String DB_NAME = "foo";
+
+    private static final String LOCK_AUTOTXN = "lock-autotxn";
+    private static final String LOCK_USERTXN  = "lock-usertxn";
+    private static final String LOCK_NOTXN  = "lock-notxn";
+    private static final String[] LOCK_MODE = {LOCK_AUTOTXN,
+                                               LOCK_USERTXN,
+                                               LOCK_NOTXN};
+    private static final String COMMIT = "commit";
+    private static final String ABORT = "abort";
+    private static final String[] END_MODE = {COMMIT, ABORT};
+
+    private File envHome;
+    private Environment env;
+    private EnvironmentImpl envImpl;
+    private MemoryBudget mb;
+    private Database db;
+    private DatabaseEntry keyEntry = new DatabaseEntry();
+    private DatabaseEntry dataEntry = new DatabaseEntry();
+    private String lockMode;
+    private String endMode;
+
+    private long beforeAction;
+    private long afterTxnsCreated;
+    private long afterAction;
+    private Transaction[] txns;
+
+    private int numTxns = 2;
+    private int numRecordsPerTxn = 30;
+
+    public static Test suite() {
+        TestSuite allTests = new TestSuite();
+        for (int i = 0; i < LOCK_MODE.length; i += 1) {
+            for (int eMode = 0; eMode < END_MODE.length; eMode ++) {
+                TestSuite suite = new TestSuite(TxnMemoryTest.class);
+                Enumeration e = suite.tests();
+                while (e.hasMoreElements()) {
+                    TxnMemoryTest test = (TxnMemoryTest) e.nextElement();
+                    test.init(LOCK_MODE[i], END_MODE[eMode]);
+                    allTests.addTest(test);
+                }
+            }
+        }
+        return allTests;
+    }
+
+    public TxnMemoryTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    private void init(String lockMode, String endMode) {
+        this.lockMode = lockMode;
+        this.endMode = endMode;
+    }
+
+    public void setUp()
+        throws IOException, DatabaseException {
+
+	IN.ACCUMULATED_LIMIT = 0;
+	Txn.ACCUMULATED_LIMIT = 0;
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+        TestUtils.removeFiles("Setup", envHome, FileManager.DEL_SUFFIX);
+    }
+
+    public void tearDown()
+        throws IOException, DatabaseException {
+
+        /* Set test name for reporting; cannot be done in the ctor or setUp. */
+        setName(lockMode + '/' + endMode + ":" + getName());
+
+        try {
+            if (env != null) {
+                env.close();
+            }
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        try {
+            TestUtils.removeLogFiles("tearDown", envHome, true);
+            TestUtils.removeFiles("tearDown", envHome, FileManager.DEL_SUFFIX);
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+
+        db = null;
+        env = null;
+    }
+
+    /**
+     * Opens the environment and database.
+     */
+    private void openEnv()
+        throws DatabaseException {
+
+        EnvironmentConfig config = TestUtils.initEnvConfig();
+
+        /*
+         * ReadCommitted isolation is not allowed by this test because we
+         * expect no locks/memory to be freed when using a transaction.
+         */
+        DbInternal.setTxnReadCommitted(config, false);
+
+        /* Cleaner detail tracking adds to the memory budget; disable it. */
+        config.setConfigParam
+            (EnvironmentParams.CLEANER_TRACK_DETAIL.getName(), "false");
+
+        config.setTransactional(true);
+        config.setAllowCreate(true);
+        env = new Environment(envHome, config);
+        envImpl = DbInternal.envGetEnvironmentImpl(env);
+        mb = envImpl.getMemoryBudget();
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(!lockMode.equals(LOCK_NOTXN));
+        dbConfig.setAllowCreate(true);
+        db = env.openDatabase(null, DB_NAME, dbConfig);
+    }
+
+    /**
+     * Closes the environment and database.
+     */
+    private void closeEnv(boolean doCheckpoint)
+        throws DatabaseException {
+
+        if (db != null) {
+            db.close();
+            db = null;
+        }
+        if (env != null) {
+            env.close();
+            env = null;
+        }
+    }
+
+    /**
+     * Insert and then update some records. Measure memory usage at different
+     * points in this sequence, asserting that the memory usage count is
+     * properly decremented.
+     */
+    public void testWriteLocks()
+        throws DatabaseException {
+
+        loadData();
+
+        /*
+         * Now update the database transactionally. This should not change
+         * the node related memory, but should add txn related cache
+         * consumption. If this is a user transaction, we should
+         * hold locks and consume more memory.
+         */
+        for (int t = 0; t < numTxns; t++) {
+            for (int i = 0; i < numRecordsPerTxn; i++) {
+                int value = i + (t*numRecordsPerTxn);
+                IntegerBinding.intToEntry(value, keyEntry);
+                IntegerBinding.intToEntry(value+1, dataEntry);
+                assertEquals(db.put(txns[t], keyEntry, dataEntry),
+                             OperationStatus.SUCCESS);
+            }
+        }
+        afterAction = mb.getLockMemoryUsage();
+
+        closeTxns(true);
+    }
+
+    /**
+     * Insert and then scan some records. Measure memory usage at different
+     * points in this sequence, asserting that the memory usage count is
+     * properly decremented.
+     */
+    public void testReadLocks()
+        throws DatabaseException {
+
+        loadData();
+
+        /*
+         * Now scan the database. Make sure all locking overhead is
+         * released.
+         */
+        for (int t = 0; t < numTxns; t++) {
+            Cursor c = db.openCursor(txns[t], null);
+            while (c.getNext(keyEntry, dataEntry, null) ==
+                   OperationStatus.SUCCESS) {
+            }
+            c.close();
+        }
+        afterAction = mb.getLockMemoryUsage();
+
+        closeTxns(false);
+    }
+
+    private void loadData()
+        throws DatabaseException {
+
+        openEnv();
+
+        /* Build up a database to establish a given cache size. */
+        for (int t = 0; t < numTxns; t++) {
+            for (int i = 0; i < numRecordsPerTxn; i++) {
+
+                int value = i + (t*numRecordsPerTxn);
+                IntegerBinding.intToEntry(value, keyEntry);
+                IntegerBinding.intToEntry(value, dataEntry);
+                assertEquals(db.put(null, keyEntry, dataEntry),
+                             OperationStatus.SUCCESS);
+            }
+        }
+
+        beforeAction = mb.getLockMemoryUsage();
+
+        /* Make some transactions. */
+        txns = new Transaction[numTxns];
+        if (lockMode.equals(LOCK_USERTXN)) {
+            for (int t = 0; t < numTxns; t++) {
+                txns[t] = env.beginTransaction(null, null);
+            }
+
+            afterTxnsCreated = mb.getLockMemoryUsage();
+            assertTrue( "afterTxns=" + afterTxnsCreated +
+                        "beforeUpdate=" + beforeAction,
+                        (afterTxnsCreated > beforeAction));
+        }
+    }
+
+    private void closeTxns(boolean writesDone)
+        throws DatabaseException {
+
+	assertTrue(afterAction > afterTxnsCreated);
+
+        /*
+         * If this is not a user transactional lock, we should be done
+         * with all locking overhead. If it is a user transaction, we
+         * only release memory after locks are released at commit or
+         * abort.
+         */
+        if (lockMode.equals(LOCK_USERTXN)) {
+
+            /*
+             * Note: expectedLockUsage is annoyingly fragile. If we change
+             * the lock implementation, this may not be the right number
+             * to check.
+             */
+            long expectedLockUsage =
+                   (numRecordsPerTxn * numTxns *
+		    MemoryBudget.THINLOCKIMPL_OVERHEAD);
+
+            assertTrue((afterAction - afterTxnsCreated) >= expectedLockUsage);
+
+            for (int t = 0; t < numTxns; t++) {
+                Transaction txn = txns[t];
+                if (endMode.equals(COMMIT)) {
+                    txn.commit();
+                } else {
+                    txn.abort();
+                }
+            }
+
+            long afterTxnEnd = mb.getLockMemoryUsage();
+
+            assertTrue("lockMode=" + lockMode +
+		       " endMode=" + endMode +
+		       " afterTxnEnd=" + afterTxnEnd +
+		       " beforeAction=" + beforeAction,
+		       (afterTxnEnd <= beforeAction));
+        }
+        if (DEBUG) {
+            System.out.println("afterUpdate = " + afterAction +
+                               " before=" + beforeAction);
+        }
+
+        closeEnv(true);
+    }
+}
diff --git a/test/com/sleepycat/je/txn/TxnTest.java b/test/com/sleepycat/je/txn/TxnTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..302922252307a6ca904931cb4cb588d40c86b246
--- /dev/null
+++ b/test/com/sleepycat/je/txn/TxnTest.java
@@ -0,0 +1,931 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TxnTest.java,v 1.74.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.DeadlockException;
+import com.sleepycat.je.Durability;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.EnvironmentMutableConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.LockNotGrantedException;
+import com.sleepycat.je.LockStats;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.TransactionConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.DatabaseImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.log.ReplicationContext;
+import com.sleepycat.je.tree.ChildReference;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.tree.LN;
+import com.sleepycat.je.tree.WithRootLatched;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.je.utilint.DbLsn;
+
+/*
+ * Simple transaction testing
+ */
+public class TxnTest extends TestCase {
+    private File envHome;
+    private Environment env;
+    private Database db;
+
+    public TxnTest()
+        throws DatabaseException {
+
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException, DatabaseException {
+
+	IN.ACCUMULATED_LIMIT = 0;
+	Txn.ACCUMULATED_LIMIT = 0;
+
+        TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX);
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6");
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+        env = new Environment(envHome, envConfig);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        db = env.openDatabase(null, "foo", dbConfig);
+    }
+
+    public void tearDown()
+        throws IOException, DatabaseException {
+
+        db.close();
+        env.close();
+        TestUtils.removeFiles("TearDown", envHome, FileManager.JE_SUFFIX);
+    }
+
+    /**
+     * Test transaction locking and releasing.
+     */
+    public void testBasicLocking()
+        throws Throwable {
+
+        try {
+            EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+            LN ln = new LN(new byte[0], envImpl, false);
+
+            /*
+             * Make a null txn that will lock. Take a lock and then end the
+             * operation.
+             */
+            MemoryBudget mb = envImpl.getMemoryBudget();
+
+            long beforeLock = mb.getCacheMemoryUsage();
+            Locker nullTxn = BasicLocker.createBasicLocker(envImpl);
+
+            LockGrantType lockGrant = nullTxn.lock
+                (ln.getNodeId(), LockType.READ, false,
+                 DbInternal.dbGetDatabaseImpl(db)).
+		getLockGrant();
+            assertEquals(LockGrantType.NEW, lockGrant);
+            long afterLock = mb.getCacheMemoryUsage();
+            checkHeldLocks(nullTxn, 1, 0);
+
+            nullTxn.releaseNonTxnLocks();
+            long afterRelease = mb.getCacheMemoryUsage();
+            checkHeldLocks(nullTxn, 0, 0);
+            checkCacheUsage(beforeLock, afterLock, afterRelease,
+                            LockManager.TOTAL_THINLOCKIMPL_OVERHEAD);
+
+            /* Take a lock, release it. */
+            beforeLock = mb.getCacheMemoryUsage();
+            lockGrant = nullTxn.lock
+                (ln.getNodeId(), LockType.READ, false,
+                 DbInternal.dbGetDatabaseImpl(db)).
+		getLockGrant();
+            afterLock = mb.getCacheMemoryUsage();
+            assertEquals(LockGrantType.NEW, lockGrant);
+            checkHeldLocks(nullTxn, 1, 0);
+
+            nullTxn.releaseLock(ln.getNodeId());
+            checkHeldLocks(nullTxn, 0, 0);
+            afterRelease = mb.getCacheMemoryUsage();
+            checkCacheUsage(beforeLock, afterLock, afterRelease,
+                            LockManager.TOTAL_THINLOCKIMPL_OVERHEAD);
+
+            /*
+             * Make a user transaction, check lock and release.
+             */
+            beforeLock = mb.getCacheMemoryUsage();
+            Txn userTxn = Txn.createTxn(envImpl, new TransactionConfig(),
+					ReplicationContext.NO_REPLICATE);
+            lockGrant = userTxn.lock
+                (ln.getNodeId(), LockType.READ, false,
+                 DbInternal.dbGetDatabaseImpl(db)).
+		getLockGrant();
+            afterLock = mb.getCacheMemoryUsage();
+
+            assertEquals(LockGrantType.NEW, lockGrant);
+            checkHeldLocks(userTxn, 1, 0);
+
+            /* Try demoting, nothing should happen. */
+            try {
+                userTxn.demoteLock(ln.getNodeId());
+                fail("exception not thrown on phoney demoteLock");
+            } catch (AssertionError e){
+            }
+            checkHeldLocks(userTxn, 1, 0);
+            long afterDemotion = mb.getCacheMemoryUsage();
+            assertEquals(afterLock, afterDemotion);
+
+            /* Make it a write lock, then demote. */
+            lockGrant = userTxn.lock
+                (ln.getNodeId(), LockType.WRITE, false,
+                 DbInternal.dbGetDatabaseImpl(db)).
+		getLockGrant();
+            assertEquals(LockGrantType.PROMOTION, lockGrant);
+            long afterWriteLock = mb.getCacheMemoryUsage();
+            assertTrue(afterWriteLock > afterLock);
+            assertTrue(afterLock > beforeLock);
+
+            checkHeldLocks(userTxn, 0, 1);
+            userTxn.demoteLock(ln.getNodeId());
+            checkHeldLocks(userTxn, 1, 0);
+
+            /* Shouldn't release at operation end. */
+            userTxn.operationEnd();
+            checkHeldLocks(userTxn, 1, 0);
+
+            userTxn.releaseLock(ln.getNodeId());
+            checkHeldLocks(userTxn, 0, 0);
+            userTxn.commit(TransactionConfig.SYNC);
+            afterRelease = mb.getCacheMemoryUsage();
+            assertTrue(afterLock > beforeLock);
+        } catch (Throwable t) {
+            /* print stack trace before going to teardown. */
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Test lock mutation.
+     */
+    public void testLockMutation()
+        throws Throwable {
+
+        try {
+
+            EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+            LN ln = new LN(new byte[0], envImpl, false);
+
+            MemoryBudget mb = envImpl.getMemoryBudget();
+
+            long beforeLock = mb.getCacheMemoryUsage();
+            Txn userTxn1 = Txn.createTxn(envImpl, new TransactionConfig());
+            Txn userTxn2 = Txn.createTxn(envImpl, new TransactionConfig());
+
+	    LockStats envStats = env.getLockStats(null);
+	    assertEquals(1, envStats.getNTotalLocks());
+            LockGrantType lockGrant1 = userTxn1.lock
+                (ln.getNodeId(), LockType.READ, false,
+                 DbInternal.dbGetDatabaseImpl(db)).
+		getLockGrant();
+            assertEquals(LockGrantType.NEW, lockGrant1);
+            checkHeldLocks(userTxn1, 1, 0);
+	    envStats = env.getLockStats(null);
+	    assertEquals(2, envStats.getNTotalLocks());
+
+	    try {
+		    userTxn2.lock(ln.getNodeId(), LockType.WRITE, false,
+		                  DbInternal.dbGetDatabaseImpl(db)).getLockGrant();
+	    } catch (DeadlockException DE) {
+		// ok
+	    }
+	    envStats = env.getLockStats(null);
+	    assertEquals(2, envStats.getNTotalLocks());
+            checkHeldLocks(userTxn2, 0, 0);
+
+            userTxn1.commit();
+            userTxn2.abort(false);
+
+            long afterRelease = mb.getCacheMemoryUsage();
+            assertEquals(beforeLock, afterRelease);
+        } catch (Throwable t) {
+            /* print stack trace before going to teardown. */
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    private void checkHeldLocks(Locker txn,
+				int numReadLocks,
+				int numWriteLocks)
+        throws DatabaseException {
+
+        LockStats stat = txn.collectStats(new LockStats());
+        assertEquals(numReadLocks, stat.getNReadLocks());
+        assertEquals(numWriteLocks, stat.getNWriteLocks());
+    }
+
+    /**
+     * Test transaction commit, from the locking point of view.
+     */
+    public void testCommit()
+        throws Throwable {
+
+        try {
+            EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+            LN ln1 = new LN(new byte[0], envImpl, false);
+            LN ln2 = new LN(new byte[0], envImpl, false);
+
+            Txn userTxn = Txn.createTxn(envImpl, new TransactionConfig(),
+					ReplicationContext.NO_REPLICATE);
+
+            /* Get read lock 1. */
+            LockGrantType lockGrant = userTxn.lock
+                (ln1.getNodeId(), LockType.READ, false,
+                 DbInternal.dbGetDatabaseImpl(db)).
+		getLockGrant();
+            assertEquals(LockGrantType.NEW, lockGrant);
+            checkHeldLocks(userTxn, 1, 0);
+
+            /* Get read lock 2. */
+            lockGrant = userTxn.lock
+                (ln2.getNodeId(), LockType.READ, false,
+                 DbInternal.dbGetDatabaseImpl(db)).
+		getLockGrant();
+            assertEquals(LockGrantType.NEW, lockGrant);
+            checkHeldLocks(userTxn, 2, 0);
+
+            /* Upgrade read lock 2 to a write. */
+            lockGrant = userTxn.lock
+                (ln2.getNodeId(), LockType.WRITE, false,
+                 DbInternal.dbGetDatabaseImpl(db)).
+		getLockGrant();
+            assertEquals(LockGrantType.PROMOTION, lockGrant);
+            checkHeldLocks(userTxn, 1, 1);
+
+            /* Read lock 1 again, shouldn't increase count. */
+            lockGrant = userTxn.lock
+                (ln1.getNodeId(), LockType.READ, false,
+                 DbInternal.dbGetDatabaseImpl(db)).
+		getLockGrant();
+            assertEquals(LockGrantType.EXISTING, lockGrant);
+            checkHeldLocks(userTxn, 1, 1);
+
+
+            /* 
+             * The commit won't actually write a log record if this 
+             * transaction has never done an update, so fake it out and simulate
+             * a write.
+             */
+            userTxn.addLogInfo(DbLsn.makeLsn(0, 100));
+            long commitLsn = userTxn.commit(TransactionConfig.SYNC);
+            checkHeldLocks(userTxn, 0, 0);
+
+            TxnCommit commitRecord =
+                (TxnCommit) envImpl.getLogManager().get(commitLsn);
+
+            assertEquals(userTxn.getId(), commitRecord.getId());
+            assertEquals(userTxn.getLastLsn(), commitRecord.getLastLsn());
+        } catch (Throwable t) {
+            /* Print stack trace before going to teardown. */
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Make sure an abort never tries to split the tree.
+     */
+    public void testAbortNoSplit()
+        throws Throwable {
+
+        try {
+            Transaction txn = env.beginTransaction(null, null);
+
+            DatabaseEntry keyDbt = new DatabaseEntry();
+            DatabaseEntry dataDbt = new DatabaseEntry();
+            dataDbt.setData(new byte[1]);
+
+            /* Insert enough data so that the tree is ripe for a split. */
+            int numForSplit = 25;
+            for (int i = 0; i < numForSplit; i++) {
+                keyDbt.setData(TestUtils.getTestArray(i));
+                db.put(txn, keyDbt, dataDbt);
+            }
+
+            /* Check that we're ready for a split. */
+            DatabaseImpl database = DbInternal.dbGetDatabaseImpl(db);
+            CheckReadyToSplit splitChecker = new CheckReadyToSplit(database);
+            database.getTree().withRootLatchedShared(splitChecker);
+            assertTrue(splitChecker.getReadyToSplit());
+
+            /*
+             * Make another txn that will get a read lock on the map
+             * LSN. Then abort the first txn. It shouldn't try to do a
+             * split, if it does, we'll run into the
+             * no-latches-while-locking check.
+             */
+            Transaction txnSpoiler = env.beginTransaction(null, null);
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setTransactional(true);
+            Database dbSpoiler = env.openDatabase(txnSpoiler, "foo", dbConfig);
+
+            txn.abort();
+
+            /*
+             * The database should be empty
+             */
+            Cursor cursor = dbSpoiler.openCursor(txnSpoiler, null);
+
+            assertTrue(cursor.getFirst(keyDbt, dataDbt, LockMode.DEFAULT) !=
+                       OperationStatus.SUCCESS);
+            cursor.close();
+            txnSpoiler.abort();
+        } catch (Throwable t) {
+            /* print stack trace before going to teardown. */
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testTransactionName()
+        throws Throwable {
+
+        try {
+            Transaction txn = env.beginTransaction(null, null);
+	    txn.setName("blort");
+	    assertEquals("blort", txn.getName());
+            txn.abort();
+
+            /*
+             * [#14349] Make sure the txn is printable after closing. We
+             * once had a NullPointerException.
+             */
+            txn.toString();
+        } catch (Throwable t) {
+            /* print stack trace before going to teardown. */
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Test all combinations of sync, nosync, and writeNoSync for txn
+     * commits.
+     */
+
+    /* SyncCombo expresses all the combinations of txn sync properties. */
+    private static class SyncCombo {
+        private boolean envNoSync;
+        private boolean envWriteNoSync;
+        private boolean txnNoSync;
+        private boolean txnWriteNoSync;
+        private boolean txnSync;
+        boolean expectSync;
+        boolean expectWrite;
+
+        SyncCombo(int envWriteNoSync,
+                  int envNoSync,
+                  int txnSync,
+                  int txnWriteNoSync,
+                  int txnNoSync,
+                  boolean expectSync,
+                  boolean expectWrite) {
+            this.envNoSync = (envNoSync == 0) ? false : true;
+            this.envWriteNoSync = (envWriteNoSync == 0) ? false : true;
+            this.txnNoSync = (txnNoSync == 0) ? false : true;
+            this.txnWriteNoSync = (txnWriteNoSync == 0) ? false : true;
+            this.txnSync = (txnSync == 0) ? false : true;
+            this.expectSync = expectSync;
+            this.expectWrite = expectWrite;
+        }
+
+        TransactionConfig getTxnConfig() {
+            TransactionConfig txnConfig = new TransactionConfig();
+            txnConfig.setSync(txnSync);
+            txnConfig.setWriteNoSync(txnWriteNoSync);
+            txnConfig.setNoSync(txnNoSync);
+            return txnConfig;
+        }
+
+        void setEnvironmentMutableConfig(Environment env)
+            throws DatabaseException {
+            EnvironmentMutableConfig config = env.getMutableConfig();
+            config.setTxnNoSync(envNoSync);
+            config.setTxnWriteNoSync(envWriteNoSync);
+            env.setMutableConfig(config);
+        }
+    }
+
+    public void testSyncCombo()
+        throws Throwable {
+
+        RandomAccessFile logFile =
+            new RandomAccessFile(new File(envHome, "00000000.jdb"), "r");
+        try {
+            SyncCombo[] testCombinations = {
+            /*            Env    Env    Txn    Txn    Txn    Expect Expect
+             *            WrNoSy NoSy   Sync  WrNoSy  NoSyc  Sync   Write */
+            new SyncCombo(  0,     0,     0,     0,     0,    true,  true),
+            new SyncCombo(  0,     0,     0,     0,     1,   false, false),
+            new SyncCombo(  0,     0,     0,     1,     0,   false,  true),
+            new SyncCombo(  0,     0,     0,     1,     1,   false,  true),
+            new SyncCombo(  0,     0,     1,     0,     0,    true,  true),
+            new SyncCombo(  0,     0,     1,     0,     1,    true,  true),
+            new SyncCombo(  0,     0,     1,     1,     0,    true,  true),
+            new SyncCombo(  0,     0,     1,     1,     1,    true,  true),
+            new SyncCombo(  0,     1,     0,     0,     0,   false, false),
+            new SyncCombo(  0,     1,     0,     0,     1,   false, false),
+            new SyncCombo(  0,     1,     0,     1,     0,   false,  true),
+            new SyncCombo(  0,     1,     0,     1,     1,   false,  true),
+            new SyncCombo(  0,     1,     1,     0,     0,    true,  true),
+            new SyncCombo(  0,     1,     1,     0,     1,    true,  true),
+            new SyncCombo(  0,     1,     1,     1,     0,    true,  true),
+            new SyncCombo(  0,     1,     1,     1,     1,    true,  true),
+            new SyncCombo(  1,     0,     0,     0,     0,   false,  true),
+            new SyncCombo(  1,     0,     0,     0,     1,   false, false),
+            new SyncCombo(  1,     0,     0,     1,     0,   false,  true),
+            new SyncCombo(  1,     0,     0,     1,     1,   false,  true),
+            new SyncCombo(  1,     0,     1,     0,     0,    true,  true),
+            new SyncCombo(  1,     0,     1,     0,     1,    true,  true),
+            new SyncCombo(  1,     0,     1,     1,     0,    true,  true),
+            new SyncCombo(  1,     0,     1,     1,     1,    true,  true),
+            new SyncCombo(  1,     1,     0,     0,     0,   false,  true),
+            new SyncCombo(  1,     1,     0,     0,     1,   false, false),
+            new SyncCombo(  1,     1,     0,     1,     0,   false,  true),
+            new SyncCombo(  1,     1,     0,     1,     1,   false,  true),
+            new SyncCombo(  1,     1,     1,     0,     0,    true,  true),
+            new SyncCombo(  1,     1,     1,     0,     1,    true,  true),
+            new SyncCombo(  1,     1,     1,     1,     0,    true,  true),
+            new SyncCombo(  1,     1,     1,     1,     1,    true,  true)};
+
+            /* envNoSync=false with default env config */
+            assertTrue(!env.getMutableConfig().getTxnNoSync());
+
+            /* envWriteNoSync=false with default env config */
+            assertTrue(!env.getMutableConfig().getTxnWriteNoSync());
+
+            /*
+             * For each combination of settings, call commit and
+             * check that we have the expected sync and log
+             * write. Make sure that commitSync(), commitNoSync always
+             * override all preferences.
+             */
+            for (int i = 0; i < testCombinations.length; i++) {
+                SyncCombo combo = testCombinations[i];
+                TransactionConfig txnConfig = combo.getTxnConfig();
+                combo.setEnvironmentMutableConfig(env);
+                syncExplicit(logFile, txnConfig,
+                             combo.expectSync, combo.expectWrite);
+            }
+
+            SyncCombo[] autoCommitCombinations = {
+            /*            Env    Env    Txn    Txn    Txn    Expect Expect
+             *            WrNoSy NoSy   Sync  WrNoSy  NoSyc  Sync   Write */
+            new SyncCombo(  0,     0,     0,     0,     0,    true,  true),
+            new SyncCombo(  0,     1,     0,     0,     0,   false, false),
+            new SyncCombo(  1,     0,     0,     0,     0,   false,  true),
+            new SyncCombo(  1,     1,     0,     0,     0,   false,  true)};
+
+            for (int i = 0; i < autoCommitCombinations.length; i++) {
+                SyncCombo combo = autoCommitCombinations[i];
+                combo.setEnvironmentMutableConfig(env);
+                syncAutoCommit(logFile, combo.expectSync, combo.expectWrite);
+            }
+        } catch (Throwable t) {
+            /* print stack trace before going to teardown. */
+            t.printStackTrace();
+            throw t;
+        } finally {
+            logFile.close();
+        }
+    }
+
+    enum DurabilityAPI {SYNC_API, DUR_API, DEFAULT_API} ;
+
+    /*
+     * Returns true if there is mixed mode usage across the two apis
+     */
+    private boolean mixedModeUsage(DurabilityAPI outerAPI,
+                                   DurabilityAPI innerAPI) {
+        if ((innerAPI == DurabilityAPI.DEFAULT_API) ||
+             (outerAPI == DurabilityAPI.DEFAULT_API)){
+            return false;
+        }
+
+        if (innerAPI == outerAPI) {
+            return false;
+        }
+        /* Mix of sync and durability APIs */
+        return true;
+    }
+
+    /*
+     * Does a three level check at the env, config and transaction levels to
+     * check for mixed mode uaage
+     */
+    boolean mixedModeUsage(DurabilityAPI envAPI,
+                           DurabilityAPI tconfigAPI,
+                           DurabilityAPI transAPI) {
+        DurabilityAPI outerAPI;
+        if (tconfigAPI == DurabilityAPI.DEFAULT_API) {
+            outerAPI = envAPI;
+        } else {
+            outerAPI = tconfigAPI;
+        }
+        return mixedModeUsage(outerAPI, transAPI);
+    }
+
+    /*
+     * Test local mixed mode operations on MutableConfig and TransactionConfig
+     */
+    public void testOneLevelDurabilityComboErrors()
+        throws Throwable {
+
+        EnvironmentMutableConfig config = new EnvironmentMutableConfig();
+        config.setTxnNoSync(true);
+        try {
+            config.setDurability(TransactionConfig.NO_SYNC);
+            fail("expected exception");
+        } catch (IllegalArgumentException e) {
+            assertTrue(true); // pass expected exception
+        }
+        config =  new EnvironmentMutableConfig();
+        config.setDurability(TransactionConfig.NO_SYNC);
+        try {
+            config.setTxnNoSync(true);
+            fail("expected exception");
+        } catch (IllegalArgumentException e) {
+            assertTrue(true); // pass expected exception
+        }
+
+        TransactionConfig txnConfig = new TransactionConfig();
+        txnConfig.setNoSync(true);
+        try {
+            txnConfig.setDurability(TransactionConfig.NO_SYNC);
+        } catch (IllegalArgumentException e) {
+            assertTrue(true); // pass expected exception
+        }
+
+        txnConfig = new TransactionConfig();
+        txnConfig.setDurability(TransactionConfig.NO_SYNC);
+        try {
+        txnConfig.setNoSync(true);
+        } catch (IllegalArgumentException e) {
+            assertTrue(true); // pass expected exception
+        }
+    }
+
+    /*
+     * Test for exceptions resulting from mixed mode usage.
+     */
+    public void testMultiLevelLocalDurabilityComboErrors()
+        throws Throwable {
+
+        for (DurabilityAPI envAPI: DurabilityAPI.values()) {
+            EnvironmentMutableConfig config =  new EnvironmentMutableConfig();
+            switch (envAPI) {
+                case SYNC_API:
+                    config.setTxnNoSync(true);
+                    break;
+                case DUR_API:
+                    config.setDurability(TransactionConfig.NO_SYNC);
+                    break;
+                case DEFAULT_API:
+                    break;
+            }
+            env.setMutableConfig(config);
+            for (DurabilityAPI tconfigAPI: DurabilityAPI.values()) {
+                TransactionConfig txnConfig = new TransactionConfig();
+                switch (tconfigAPI) {
+                    case SYNC_API:
+                        txnConfig.setNoSync(true);
+                        break;
+                    case DUR_API:
+                        txnConfig.setDurability(TransactionConfig.NO_SYNC);
+                        break;
+                    case DEFAULT_API:
+                        txnConfig = null;
+                        break;
+                    }
+                try {
+                    Transaction txn = env.beginTransaction(null, txnConfig);
+                    txn.abort();
+                    assertFalse(mixedModeUsage(envAPI,tconfigAPI));
+                    for (DurabilityAPI transAPI : DurabilityAPI.values()) {
+                        Transaction t = env.beginTransaction(null, txnConfig);
+                        try {
+                            switch (transAPI) {
+                                case SYNC_API:
+                                    t.commitNoSync();
+                                    break;
+                                case DUR_API:
+                                    t.commit(TransactionConfig.NO_SYNC);
+                                    break;
+                                case DEFAULT_API:
+                                    t.commit();
+                                    break;
+                            }
+                            assertFalse(mixedModeUsage(envAPI,
+                                                       tconfigAPI,
+                                                       transAPI));
+                        } catch (IllegalArgumentException e) {
+                            t.abort();
+                            assertTrue(mixedModeUsage(envAPI,
+                                                      tconfigAPI,
+                                                      transAPI));
+                        }
+                    }
+                } catch (IllegalArgumentException e) {
+                    assertTrue(mixedModeUsage(envAPI,tconfigAPI));
+                }
+            }
+        }
+
+    }
+
+    public void testLocalDurabilityCombo()
+        throws Throwable {
+
+        RandomAccessFile logFile =
+            new RandomAccessFile(new File(envHome, "00000000.jdb"), "r");
+        Durability[] localDurabilities = new Durability[] {
+                    TransactionConfig.SYNC,
+                    TransactionConfig.WRITE_NO_SYNC,
+                    TransactionConfig.NO_SYNC,
+                    null /* Use the default */
+                    };
+
+        DatabaseEntry key = new DatabaseEntry(new byte[1]);
+        DatabaseEntry data = new DatabaseEntry(new byte[1]);
+
+        try {
+            for (Durability envDurability : localDurabilities) {
+                EnvironmentMutableConfig config =  env.getMutableConfig();
+                config.setDurability(envDurability);
+                env.setMutableConfig(config);
+                for (Durability transConfigDurability : localDurabilities) {
+                    TransactionConfig txnConfig = null;
+                    if (transConfigDurability != null) {
+                        txnConfig = new TransactionConfig();
+                        txnConfig.setDurability(transConfigDurability);
+                    }
+                    for (Durability transDurability : localDurabilities) {
+                        long beforeSyncs = getNSyncs();
+                        Transaction txn = env.beginTransaction(null, txnConfig);
+                        db.put(txn, key, data);
+                        long beforeLength = logFile.length();
+                        if (transDurability == null) {
+                            txn.commit();
+                        } else {
+                            txn.commit(transDurability);
+                        }
+                        Durability effectiveDurability =
+                            (transDurability != null) ?
+                            transDurability :
+                            ((transConfigDurability != null) ?
+                             transConfigDurability :
+                             ((envDurability != null) ?
+                              envDurability :
+                              TransactionConfig.SYNC));
+
+                        long afterSyncs = getNSyncs();
+                        long afterLength = logFile.length();
+                        boolean syncOccurred = afterSyncs > beforeSyncs;
+                        boolean writeOccurred = afterLength > beforeLength;
+                        switch (effectiveDurability.getLocalSync()) {
+                            case SYNC:
+                                assertTrue(syncOccurred);
+                                assertTrue(writeOccurred);
+                                break;
+                            case NO_SYNC:
+                                if (syncOccurred) {
+                                    assertFalse(syncOccurred);
+                                }
+                                assertFalse(writeOccurred);
+                                break;
+                            case WRITE_NO_SYNC:
+                                assertFalse(syncOccurred);
+                                assertTrue(writeOccurred);
+                                break;
+                        }
+                    }
+                }
+            }
+        } finally {
+            logFile.close();
+        }
+    }
+
+
+    /**
+     * Does an explicit commit and returns whether an fsync occured.
+     */
+    private void syncExplicit(RandomAccessFile lastLogFile,
+                              TransactionConfig config,
+                              boolean expectSync,
+                              boolean expectWrite)
+        throws DatabaseException, IOException {
+
+        DatabaseEntry key = new DatabaseEntry(new byte[1]);
+        DatabaseEntry data = new DatabaseEntry(new byte[1]);
+
+        long beforeSyncs = getNSyncs();
+        Transaction txn = env.beginTransaction(null, config);
+        db.put(txn, key, data);
+        long beforeLength = lastLogFile.length();
+        txn.commit();
+        long afterSyncs = getNSyncs();
+        long afterLength = lastLogFile.length();
+        boolean syncOccurred = afterSyncs > beforeSyncs;
+        boolean writeOccurred = afterLength > beforeLength;
+        assertEquals(expectSync, syncOccurred);
+        assertEquals(expectWrite, writeOccurred);
+
+        /*
+         * Make sure explicit sync/noSync/writeNoSync always works.
+         */
+
+        /* Expect a sync and write. */
+        beforeSyncs = getNSyncs();
+        beforeLength = lastLogFile.length();
+        txn = env.beginTransaction(null, config);
+        db.put(txn, key, data);
+        txn.commitSync();
+        afterSyncs = getNSyncs();
+        afterLength = lastLogFile.length();
+        assert(afterSyncs > beforeSyncs);
+        assert(afterLength > beforeLength);
+
+        /* Expect neither a sync nor write. */
+        beforeSyncs = getNSyncs();
+        beforeLength = lastLogFile.length();
+        txn = env.beginTransaction(null, config);
+        db.put(txn, key, data);
+        txn.commitNoSync();
+        afterSyncs = getNSyncs();
+        afterLength = lastLogFile.length();
+        assert(afterSyncs == beforeSyncs);
+        assert(afterLength == beforeLength);
+
+        /* Expect no sync but do expect a write. */
+        beforeSyncs = getNSyncs();
+        beforeLength = lastLogFile.length();
+        txn = env.beginTransaction(null, config);
+        db.put(txn, key, data);
+        txn.commitWriteNoSync();
+        afterSyncs = getNSyncs();
+        afterLength = lastLogFile.length();
+        assert(afterSyncs == beforeSyncs);
+        assert(afterLength > beforeLength);
+    }
+
+    /**
+     * Does an auto-commit and returns whether an fsync occured.
+     */
+    private void syncAutoCommit(RandomAccessFile lastLogFile,
+                                boolean expectSync,
+                                boolean expectWrite)
+        throws DatabaseException, IOException {
+
+        DatabaseEntry key = new DatabaseEntry(new byte[1]);
+        DatabaseEntry data = new DatabaseEntry(new byte[1]);
+        long beforeSyncs = getNSyncs();
+        long beforeLength = lastLogFile.length();
+        db.put(null, key, data);
+        long afterLength = lastLogFile.length();
+        long afterSyncs = getNSyncs();
+        boolean syncOccurred = afterSyncs > beforeSyncs;
+        assertEquals(expectSync, syncOccurred);
+        assertEquals(expectWrite, (afterLength > beforeLength));
+    }
+
+    /**
+     * Returns number of fsyncs statistic.
+     */
+    private long getNSyncs() {
+        return DbInternal.envGetEnvironmentImpl(env)
+                         .getFileManager()
+                         .getNFSyncs();
+    }
+
+    public void testNoWaitConfig()
+        throws Throwable {
+
+        try {
+            TransactionConfig defaultConfig = new TransactionConfig();
+            TransactionConfig noWaitConfig = new TransactionConfig();
+            noWaitConfig.setNoWait(true);
+            Transaction txn;
+
+            /* noWait=false */
+
+            assertTrue(!isNoWaitTxn(null));
+
+            txn = env.beginTransaction(null, null);
+            assertTrue(!isNoWaitTxn(txn));
+            txn.abort();
+
+            txn = env.beginTransaction(null, defaultConfig);
+            assertTrue(!isNoWaitTxn(txn));
+            txn.abort();
+
+            /* noWait=true */
+
+            txn = env.beginTransaction(null, noWaitConfig);
+            assertTrue(isNoWaitTxn(txn));
+            txn.abort();
+
+        } catch (Throwable t) {
+            /* print stack trace before going to teardown. */
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    /**
+     * Returns whether the given txn is a no-wait txn, or if the txn parameter
+     * is null returns whether an auto-commit txn is a no-wait txn.
+     */
+    private boolean isNoWaitTxn(Transaction txn)
+        throws DatabaseException {
+
+        DatabaseEntry key = new DatabaseEntry(new byte[1]);
+        DatabaseEntry data = new DatabaseEntry(new byte[1]);
+
+        /* Use a wait txn to get a write lock. */
+        Transaction txn2 = env.beginTransaction(null, null);
+        db.put(txn2, key, data);
+
+        try {
+            db.put(txn, key, data);
+            throw new IllegalStateException
+                ("Lock should not have been granted");
+        } catch (LockNotGrantedException e) {
+            return true;
+        } catch (DeadlockException e) {
+            return false;
+        } finally {
+            txn2.abort();
+        }
+    }
+
+    /*
+     * Assert that cache utilization is correctly incremented by locks and
+     * txns, and decremented after release.
+     */
+    private void checkCacheUsage(long beforeLock,
+                                 long afterLock,
+                                 long afterRelease,
+                                 long expectedSize) {
+        assertEquals(beforeLock, afterRelease);
+        assertEquals(afterLock, (beforeLock + expectedSize));
+    }
+
+    class CheckReadyToSplit implements WithRootLatched {
+        private boolean readyToSplit;
+        private DatabaseImpl database;
+
+        CheckReadyToSplit(DatabaseImpl database) {
+            readyToSplit = false;
+            this.database = database;
+        }
+
+        public boolean getReadyToSplit() {
+            return readyToSplit;
+        }
+
+        public IN doWork(ChildReference root)
+            throws DatabaseException {
+
+            IN rootIN = (IN) root.fetchTarget(database, null);
+            readyToSplit = rootIN.needsSplitting();
+            return null;
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/txn/TxnTimeoutTest.java b/test/com/sleepycat/je/txn/TxnTimeoutTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..c75bc5932fa9b42be472f8a2b8ebb8642d3af678
--- /dev/null
+++ b/test/com/sleepycat/je/txn/TxnTimeoutTest.java
@@ -0,0 +1,504 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TxnTimeoutTest.java,v 1.34.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.txn;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.DeadlockException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockStats;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.TransactionConfig;
+import com.sleepycat.je.txn.Locker;
+import com.sleepycat.je.txn.BasicLocker;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.util.TestUtils;
+
+/*
+ * Test transaction and lock timeouts.
+ */
+public class TxnTimeoutTest extends TestCase {
+
+    private Environment env;
+    private File envHome;
+
+    public TxnTimeoutTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX);
+    }
+
+    public void tearDown() {
+        try {
+            if (env != null) {
+                env.close();
+            }
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+        try {
+            TestUtils.removeFiles("TearDown", envHome, FileManager.JE_SUFFIX);
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+        env = null;
+        envHome = null;
+    }
+
+    private void createEnv(boolean setTimeout,
+                           long txnTimeoutVal,
+                           long lockTimeoutVal)
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+        if (setTimeout) {
+            envConfig.setTxnTimeout(txnTimeoutVal);
+            envConfig.setLockTimeout(lockTimeoutVal);
+        }
+
+        env = new Environment(envHome, envConfig);
+    }
+
+    private void closeEnv()
+        throws DatabaseException {
+
+        env.close();
+        env = null;
+    }
+
+    /**
+     * Test timeout set at txn level.
+     */
+    public void testTxnTimeout()
+        throws DatabaseException, InterruptedException {
+
+        createEnv(false, 0, 0);
+
+        Transaction txnA = env.beginTransaction(null, null);
+
+        /* Grab a lock */
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        env.openDatabase(txnA, "foo", dbConfig);
+
+        /* Now make a second txn so we can induce some blocking. */
+        Transaction txnB = env.beginTransaction(null, null);
+        txnB.setTxnTimeout(300000);  // microseconds
+        txnB.setLockTimeout(9000000);
+        Thread.sleep(400);
+
+        try {
+            env.openDatabase(txnB, "foo", dbConfig);
+            fail("Should time out");
+        } catch (DeadlockException e) {
+            /* Skip the version string. */
+            assertTrue
+                (TestUtils.skipVersion(e).startsWith("Transaction "));
+            assertEquals(300, e.getTimeoutMillis());
+            /* Good, expect this exception */
+            txnB.abort();
+        } catch (Exception e) {
+            e.printStackTrace();
+            fail("Should not get another kind of exception");
+        }
+
+        /* Now try a lock timeout. */
+        txnB = env.beginTransaction(null, null);
+        txnB.setLockTimeout(100000);
+
+        try {
+            env.openDatabase(txnB, "foo", dbConfig);
+            fail("Should time out");
+        } catch (DeadlockException e) {
+            assertTrue(TestUtils.skipVersion(e).startsWith("Lock "));
+            assertEquals(100, e.getTimeoutMillis());
+            /* Good, expect this exception */
+            txnB.abort();
+        } catch (Exception e) {
+            e.printStackTrace();
+            fail("Should not get another kind of exception");
+        }
+
+        txnA.abort();
+        LockStats stats = env.getLockStats(TestUtils.FAST_STATS);
+        assertEquals(2, stats.getNWaits());
+
+        closeEnv();
+    }
+
+    /**
+     * Use Txn.setTimeout(), expect a txn timeout.
+     */
+    public void testPerTxnTimeout()
+        throws DatabaseException, InterruptedException {
+
+        doEnvTimeout(false, true, true, 300000, 9000000, false);
+    }
+
+    /**
+     * Use EnvironmentConfig.setTxnTimeout(), expect a txn timeout.
+     */
+    public void testEnvTxnTimeout()
+        throws DatabaseException, InterruptedException {
+
+        doEnvTimeout(true, true, true, 300000, 9000000, false);
+    }
+
+    /**
+     * Use EnvironmentConfig.setTxnTimeout(), use
+     * EnvironmentConfig.setLockTimeout(0), expect a txn timeout.
+     */
+    public void testEnvNoLockTimeout()
+        throws DatabaseException, InterruptedException {
+
+        doEnvTimeout(true, true, true, 300000, 0, false);
+    }
+
+    /**
+     * Use Txn.setLockTimeout(), expect a lock timeout.
+     */
+    public void testPerLockTimeout()
+        throws DatabaseException, InterruptedException {
+
+        doEnvTimeout(false, false, true, 0, 100000, true);
+    }
+
+    /**
+     * Use EnvironmentConfig.setTxnTimeout(0), Use
+     * EnvironmentConfig.setLockTimeout(xxx), expect a lcok timeout.
+     */
+    public void testEnvLockTimeout()
+        throws DatabaseException, InterruptedException {
+
+        doEnvTimeout(true, false, true, 0, 100000, true);
+    }
+
+    /**
+     * @param setEnvConfigTimeout
+     * if true, use EnvironmentConfig.set{Lock,Txn}Timeout
+     * @param setPerTxnTimeout if true, use Txn.setTxnTimeout()
+     * @param setPerLockTimeout if true, use Txn.setLockTimeout()
+     * @param long txnTimeout value for txn timeout
+     * @param long lockTimeout value for lock timeout
+     * @param expectLockException if true, expect a LockTimoutException, if
+     * false, expect a TxnTimeoutException
+     */
+    private void doEnvTimeout(boolean setEnvConfigTimeout,
+                              boolean setPerTxnTimeout,
+                              boolean setPerLockTimeout,
+                              long txnTimeout,
+                              long lockTimeout,
+                              boolean expectLockException)
+        throws DatabaseException, InterruptedException {
+
+        createEnv(setEnvConfigTimeout, txnTimeout, lockTimeout);
+
+        Transaction txnA = env.beginTransaction(null, null);
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        Database dbA = env.openDatabase(txnA, "foo", dbConfig);
+
+        /*
+         * Now make a second txn so we can induce some blocking. Make the
+         * txn timeout environment wide.
+         */
+        Transaction txnB = env.beginTransaction(null, null);
+        long expectTxnTimeoutMillis;
+        long expectLockTimeoutMillis;
+        if (setEnvConfigTimeout) {
+            expectTxnTimeoutMillis = txnTimeout / 1000;
+            expectLockTimeoutMillis = lockTimeout / 1000;
+        } else {
+            if (setPerTxnTimeout) {
+                txnB.setTxnTimeout(300000);
+                expectTxnTimeoutMillis = 300;
+            } else {
+                expectTxnTimeoutMillis = 500;
+            }
+            if (setPerLockTimeout) {
+                txnB.setLockTimeout(9000000);
+                expectLockTimeoutMillis = 9000;
+            } else {
+                expectLockTimeoutMillis = 500;
+            }
+        }
+
+        Thread.sleep(400);
+
+        try {
+            env.openDatabase(txnB, "foo", dbConfig);
+            fail("Should time out");
+        } catch (DeadlockException e) {
+            if (expectLockException) {
+                assertTrue(TestUtils.skipVersion(e).startsWith("Lock "));
+                assertEquals(expectLockTimeoutMillis,
+                             e.getTimeoutMillis());
+            } else {
+                assertTrue
+                    (TestUtils.skipVersion(e).startsWith("Transaction "));
+                assertEquals(expectTxnTimeoutMillis, e.getTimeoutMillis());
+            }
+
+            /* Good, expect this exception */
+            txnB.abort();
+        } catch (Exception e) {
+            e.printStackTrace();
+            fail("Should not get another kind of exception");
+        }
+
+        dbA.close();
+        txnA.abort();
+
+        closeEnv();
+    }
+
+    /**
+     * Use Locker.setTxnTimeout(), expect a lock timeout.
+     */
+    public void testPerLockerTimeout()
+        throws DatabaseException, InterruptedException {
+       
+        createEnv(true, 500000000, 0);
+    
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+
+        /* 
+         * Create our Locker object and set the transaction timeout to 0.
+         * 0 should mean no timeout per berkeley API docs).
+         */
+        Locker locker = BasicLocker.createBasicLocker(envImpl);
+        locker.setTxnTimeout(0);
+        /* Wait for a short period. */
+        Thread.sleep(100);
+        /* Set the timeout to zero and should never be timed out. */
+        assertFalse(locker.isTimedOut());
+
+        /* Set timeout to 10 milliseconds. */
+        locker.setTxnTimeout(10);
+        /* Wait for 100 milliseconds. */
+        Thread.sleep(100);
+        /* Should be timed out. */
+        assertTrue(locker.isTimedOut());
+
+        try {
+
+            /*
+             * Set timeout to a negative value, and expect a
+             * IllegalArgumentException.
+             */
+            locker.setTxnTimeout(-1000);
+            fail("should get an exception");
+        } catch (IllegalArgumentException ie) {
+            assertTrue(ie.
+                       getMessage().
+                       contains("the timeout value cannot be negative"));
+        } catch (Exception e) {
+            e.printStackTrace();
+            fail("Should not get another kind of exception");
+        }
+
+        try {
+
+            /*
+             * Set timeout to a value greater than 2^32, and expect a
+             * IllegalArgumentException.
+             */
+            long timeout = (long) Math.pow(2, 33);
+            locker.setTxnTimeout(timeout);
+            fail("should get an exception");
+        } catch (IllegalArgumentException ie) {
+            assertTrue(ie.getMessage().contains
+                    ("the timeout value cannot be greater than 2^32"));
+        } catch (Exception e) {
+            e.printStackTrace();
+            fail("Should not get another kind of exception");
+        }
+
+        closeEnv();
+    }
+
+    public void testReadCommittedTxnTimeout()
+        throws DatabaseException, InterruptedException {
+
+        doReadCommittedTimeout(true);
+    }
+
+    public void testReadCommittedLockTimeout()
+        throws DatabaseException, InterruptedException {
+
+        doReadCommittedTimeout(false);
+    }
+
+    /**
+     * Tests that Transaction.setTxnTimeout and setLockTimeout work with the
+     * BuddyLocker used for ReadCommitted reads.  [#16017]
+     */
+    private void doReadCommittedTimeout(boolean useTxnTimeout)
+        throws DatabaseException, InterruptedException {
+       
+        createEnv(false, 0, 0);
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        Database db = env.openDatabase(null, "foo", dbConfig);
+
+        TransactionConfig txnConfig = new TransactionConfig();
+        txnConfig.setReadCommitted(true);
+
+        Transaction txnA = null;
+        Transaction txnB = null;
+
+        try {
+            /* Insert a record with txnA and keep it write-locked. */
+            txnA = env.beginTransaction(null, txnConfig);
+            key.setData(new byte[1]);
+            data.setData(new byte[1]);
+            OperationStatus status = db.put(txnA, key, data);
+            assertSame(OperationStatus.SUCCESS, status);
+
+            /*
+             * An insert with txnB will block because entire range is locked by
+             * txnA.
+             */
+            txnB = env.beginTransaction(null, txnConfig);
+            if (useTxnTimeout) {
+                txnB.setTxnTimeout(100 * 1000);
+                txnB.setLockTimeout(9000 * 1000);
+                /* Ensure txn timeout triggers before waiting. */
+                Thread.sleep(150);
+            } else {
+                txnB.setTxnTimeout(9000 * 1000);
+                txnB.setLockTimeout(100 * 1000);
+            }
+            key.setData(new byte[1]);
+            try {
+                db.get(txnB, key, data, null);
+                fail();
+            } catch (DeadlockException e) {
+                assertTrue(e.toString(), TestUtils.skipVersion(e).startsWith
+                            (useTxnTimeout ? "Transaction " : "Lock "));
+                assertEquals(100, e.getTimeoutMillis());
+            }
+        } finally {
+            if (txnB != null) {
+                txnB.abort();
+            }
+            if (txnA != null) {
+                txnA.abort();
+            }
+        }
+
+        db.close();
+        closeEnv();
+    }
+
+    public void testSerializableTxnTimeout()
+        throws DatabaseException, InterruptedException {
+
+        doSerializableTimeout(true);
+    }
+
+    public void testSerializableLockTimeout()
+        throws DatabaseException, InterruptedException {
+
+        doSerializableTimeout(false);
+    }
+
+    /**
+     * Tests that Transaction.setTxnTimeout and setLockTimeout work with the
+     * BuddyLocker used for Serializable inserts. [#16017]
+     */
+    private void doSerializableTimeout(boolean useTxnTimeout)
+        throws DatabaseException, InterruptedException {
+       
+        createEnv(false, 0, 0);
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        Database db = env.openDatabase(null, "foo", dbConfig);
+
+        TransactionConfig txnConfig = new TransactionConfig();
+        txnConfig.setSerializableIsolation(true);
+
+        Transaction txnA = null;
+        Transaction txnB = null;
+
+        try {
+            /* Lock virtual EOF node with txnA by scanning an empty DB. */
+            txnA = env.beginTransaction(null, txnConfig);
+            Cursor c = db.openCursor(txnA, null);
+            OperationStatus status = c.getFirst(key, data, null);
+            assertSame(OperationStatus.NOTFOUND, status);
+            c.close();
+
+            /*
+             * Insert with txnB will block because entire range is locked by
+             * txnA.
+             */
+            txnB = env.beginTransaction(null, txnConfig);
+            if (useTxnTimeout) {
+                txnB.setTxnTimeout(100 * 1000);
+                txnB.setLockTimeout(9000 * 1000);
+                /* Ensure txn timeout triggers before waiting. */
+                Thread.sleep(150);
+            } else {
+                txnB.setTxnTimeout(9000 * 1000);
+                txnB.setLockTimeout(100 * 1000);
+            }
+            key.setData(new byte[1]);
+            data.setData(new byte[1]);
+            try {
+                db.put(txnB, key, data);
+                fail();
+            } catch (DeadlockException e) {
+                assertTrue(e.toString(), TestUtils.skipVersion(e).startsWith
+                            (useTxnTimeout ? "Transaction " : "Lock "));
+                assertEquals(100, e.getTimeoutMillis());
+            }
+        } finally {
+            if (txnB != null) {
+                txnB.abort();
+            }
+            if (txnA != null) {
+                txnA.abort();
+            }
+        }
+
+        db.close();
+        closeEnv();
+    }
+}
diff --git a/test/com/sleepycat/je/util/Adler32Test.java b/test/com/sleepycat/je/util/Adler32Test.java
new file mode 100644
index 0000000000000000000000000000000000000000..412068a3d58ad5511d440f68c4d96dbcdc5561a7
--- /dev/null
+++ b/test/com/sleepycat/je/util/Adler32Test.java
@@ -0,0 +1,111 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Adler32Test.java,v 1.13.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.util;
+
+import java.util.Random;
+import java.util.zip.Checksum;
+
+import junit.framework.TestCase;
+
+public class Adler32Test extends TestCase {
+
+    static private int N_ITERS = 1000;
+
+    public void testRandomAdler32ByteArray() {
+	Checksum javaChecksum = new java.util.zip.Adler32();
+	Checksum jeChecksum = new com.sleepycat.je.utilint.Adler32();
+	Checksum chunkingChecksum =
+	    new com.sleepycat.je.utilint.Adler32.ChunkingAdler32(128);
+	Random rnd = new Random();
+	for (int i = 0; i < N_ITERS; i++) {
+	    int nBytes = rnd.nextInt(65535);
+	    byte[] b = new byte[nBytes];
+	    rnd.nextBytes(b);
+	    javaChecksum.reset();
+	    jeChecksum.reset();
+	    chunkingChecksum.reset();
+	    javaChecksum.update(b, 0, nBytes);
+	    jeChecksum.update(b, 0, nBytes);
+	    chunkingChecksum.update(b, 0, nBytes);
+	    assertEquals(javaChecksum.getValue(), jeChecksum.getValue());
+	    assertEquals(javaChecksum.getValue(), chunkingChecksum.getValue());
+	}
+    }
+
+    public void xtestRandomAdler32ByteArrayPerformance() {
+	Checksum javaChecksum = new java.util.zip.Adler32();
+	Checksum jeChecksum = new com.sleepycat.je.utilint.Adler32();
+	Random rnd = new Random();
+	byte[][] baa = new byte[N_ITERS][];
+	int[] lengths = new int[N_ITERS];
+	long totalBytes = 0;
+	for (int i = 0; i < N_ITERS; i++) {
+	    int nBytes = rnd.nextInt(65535);
+	    byte[] b = new byte[nBytes];
+	    baa[i] = b;
+	    lengths[i] = nBytes;
+	    totalBytes += nBytes;
+	    rnd.nextBytes(b);
+	}
+	long jeChecksumTime =
+	    measureChecksum(baa, lengths, jeChecksum, false);
+	long javaChecksumTime =
+	    measureChecksum(baa, lengths, javaChecksum, false);
+	long jeChecksumTimeByteAtATime =
+	    measureChecksum(baa, lengths, jeChecksum, true);
+	long javaChecksumTimeByteAtATime =
+	    measureChecksum(baa, lengths, javaChecksum, true);
+	System.out.println(N_ITERS + " Iterations, " +
+			   totalBytes + " bytes:\n " +
+			   javaChecksumTime + " millis. for java\n" +
+			   jeChecksumTime + " millis. for je\n" +
+			   javaChecksumTimeByteAtATime +
+			   " millis. for java byte at a time\n" +
+			   jeChecksumTimeByteAtATime +
+			   " millis. for je byte at a time");
+    }
+
+    private long measureChecksum(byte[][] baa,
+				 int[] lengths,
+				 Checksum cksum,
+				 boolean byteAtATime) {
+	long startTime = System.currentTimeMillis();
+	for (int i = 0; i < N_ITERS; i++) {
+	    byte[] b = baa[i];
+	    int len = lengths[i];
+	    cksum.reset();
+	    if (byteAtATime) {
+		for (int j = 0; j < len; j++) {
+		    cksum.update(b[j]);
+		}
+	    } else {
+		cksum.update(b, 0, len);
+	    }
+	}
+	long endTime = System.currentTimeMillis();
+	return (endTime - startTime);
+    }
+
+    public void testRandomAdler32SingleBytes() {
+	Checksum javaChecksum = new java.util.zip.Adler32();
+	Checksum jeChecksum = new com.sleepycat.je.utilint.Adler32();
+	Random rnd = new Random();
+	for (int i = 0; i < N_ITERS; i++) {
+	    int nBytes = rnd.nextInt(65535);
+	    javaChecksum.reset();
+	    jeChecksum.reset();
+	    for (int j = 0; j < nBytes; j++) {
+		byte b = (byte) (rnd.nextInt(256) & 0xff);
+		javaChecksum.update(b);
+		jeChecksum.update(b);
+	    }
+	    assertEquals(javaChecksum.getValue(), jeChecksum.getValue());
+	}
+    }
+}
diff --git a/test/com/sleepycat/je/util/BadFileFilter.java b/test/com/sleepycat/je/util/BadFileFilter.java
new file mode 100644
index 0000000000000000000000000000000000000000..1b91cc00c67b0d8d837c44df908e5228543f7517
--- /dev/null
+++ b/test/com/sleepycat/je/util/BadFileFilter.java
@@ -0,0 +1,48 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: BadFileFilter.java,v 1.14.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.util;
+
+import java.io.File;
+import java.io.FilenameFilter;
+import java.util.StringTokenizer;
+
+public class BadFileFilter implements FilenameFilter {
+
+    /**
+     * Accept files of this format:
+     * <nnnnnnnn>.bad.<n>
+     */
+    public boolean accept(File dir, String name) {
+        boolean ok = false;
+        StringTokenizer tokenizer = new StringTokenizer(name, ".");
+        /* There should be two parts. */
+        if (tokenizer.countTokens() == 3) {
+            String fileNumber = tokenizer.nextToken();
+            String fileSuffix = tokenizer.nextToken();
+            String repeat = tokenizer.nextToken();
+
+            /* Check the length and the suffix. */
+            if ((fileNumber.length() == 8) &&
+                (fileSuffix.equals("bad"))) {
+
+                /* The first and third parts should be a numbers. */
+                try {
+                    Integer.parseInt(fileNumber);
+                    Integer.parseInt(repeat);
+                    ok = true;
+                } catch (NumberFormatException e) {
+                    ok = false;
+                }
+            }
+        }
+
+        return ok;
+    }
+}
+
diff --git a/test/com/sleepycat/je/util/DbBackupTest.java b/test/com/sleepycat/je/util/DbBackupTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..75381267ee9c4f990ddd9b9f4a4929efb03c0f8f
--- /dev/null
+++ b/test/com/sleepycat/je/util/DbBackupTest.java
@@ -0,0 +1,566 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbBackupTest.java,v 1.13.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.util;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.nio.channels.FileChannel;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.utilint.DbLsn;
+
+public class DbBackupTest extends TestCase {
+
+    private static StatsConfig CLEAR_CONFIG = new StatsConfig();
+    static {
+        CLEAR_CONFIG.setClear(true);
+    }
+
+    private static CheckpointConfig FORCE_CONFIG = new CheckpointConfig();
+    static {
+        FORCE_CONFIG.setForce(true);
+    }
+
+    private static final String SAVE1 = "save1";
+    private static final String SAVE2 = "save2";
+    private static final String SAVE3 = "save3";
+    private static final int NUM_RECS = 60;
+
+    private File envHome;
+    private Environment env;
+    private FileManager fileManager;
+
+    public DbBackupTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+        deleteSaveDir(SAVE1);
+        deleteSaveDir(SAVE2);
+        deleteSaveDir(SAVE3);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+        deleteSaveDir(SAVE1);
+        deleteSaveDir(SAVE2);
+        deleteSaveDir(SAVE3);
+    }
+
+    /**
+     * Test basic backup, make sure log cleaning isn't running.
+     */
+    public void testBackupVsCleaning()
+        throws Throwable {
+
+        env = createEnv(false, envHome); /* read-write env */
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+        fileManager = envImpl.getFileManager();
+
+        boolean success = false;
+        try {
+
+            /*
+             * Grow files, creating obsolete entries to create cleaner
+             * opportunity.
+             */
+            growFiles("db1", env, 8);
+
+            /* Start backup. */
+            DbBackup backupHelper = new DbBackup(env);
+            backupHelper.startBackup();
+
+            long lastFileNum =  backupHelper.getLastFileInBackupSet();
+            long checkLastFileNum = lastFileNum;
+
+            /* Copy the backup set. */
+            saveFiles(backupHelper, -1, lastFileNum, SAVE1);
+
+            /*
+             * Try to clean and checkpoint. Check that the logs grew as
+             * a result.
+             */
+            batchClean(0);
+            long newLastFileNum = (fileManager.getLastFileNum()).longValue();
+            assertTrue(checkLastFileNum < newLastFileNum);
+            checkLastFileNum = newLastFileNum;
+
+            /* Copy the backup set after attempting cleaning */
+            saveFiles(backupHelper, -1, lastFileNum, SAVE2);
+
+            /* Insert more data. */
+            growFiles("db2", env, 8);
+
+            /*
+             * Try to clean and checkpoint. Check that the logs grew as
+             * a result.
+             */
+            batchClean(0);
+            newLastFileNum = fileManager.getLastFileNum().longValue();
+            assertTrue(checkLastFileNum < newLastFileNum);
+            checkLastFileNum = newLastFileNum;
+
+            /* Copy the backup set after inserting more data */
+            saveFiles(backupHelper, -1, lastFileNum, SAVE3);
+
+            /* Check the membership of the saved set. */
+            long lastFile =  backupHelper.getLastFileInBackupSet();
+            String[] backupSet = backupHelper.getLogFilesInBackupSet();
+            assertEquals((lastFile + 1), backupSet.length);
+
+            /* End backup. */
+            backupHelper.endBackup();
+
+            /*
+             * Run cleaning, and verify that quite a few files are deleted.
+             */
+            long numCleaned = batchClean(100);
+            assertTrue(numCleaned > 5);
+            env.close();
+            env = null;
+
+            /* Verify backups. */
+            TestUtils.removeLogFiles("Verify", envHome, false);
+            verifyDb1(SAVE1, true);
+            TestUtils.removeLogFiles("Verify", envHome, false);
+            verifyDb1(SAVE2, true);
+            TestUtils.removeLogFiles("Verify", envHome, false);
+            verifyDb1(SAVE3, true);
+            success = true;
+        } finally {
+            if (env != null) {
+                try {
+                    env.close();
+                } catch (Exception e) {
+                    /* 
+                     * Don't bother with this exception if there is another
+                     * earlier problem.
+                     */
+                    if (success) {
+                        throw e;
+                    } 
+                }
+            }
+        }
+    }
+
+    /**
+     * Test multiple backup passes
+     */
+    public void testIncrementalBackup()
+        throws Throwable {
+
+        env = createEnv(false, envHome); /* read-write env */
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+        fileManager = envImpl.getFileManager();
+
+        try {
+
+            /*
+             * Grow files, creating obsolete entries to create cleaner
+             * opportunity.
+             */
+            growFiles("db1", env, 8);
+
+            /* Backup1. */
+            DbBackup backupHelper = new DbBackup(env);
+            backupHelper.startBackup();
+            long b1LastFile =  backupHelper.getLastFileInBackupSet();
+            saveFiles(backupHelper, -1, b1LastFile, SAVE1);
+            String lastName = fileManager.getFullFileName(b1LastFile,
+                                                 FileManager.JE_SUFFIX);
+            File f = new File(lastName);
+            long savedLength = f.length();
+            backupHelper.endBackup();
+
+            /*
+             * Add more data. Check that the file did flip, and is not modified
+             * by the additional data.
+             */
+            growFiles("db2", env, 8);
+            checkFileLen(b1LastFile, savedLength);
+
+            /* Backup2. */
+            backupHelper.startBackup();
+            long b2LastFile =  backupHelper.getLastFileInBackupSet();
+            saveFiles(backupHelper, b1LastFile, b2LastFile, SAVE2);
+            backupHelper.endBackup();
+
+            env.close();
+            env = null;
+
+            /* Verify backups. */
+            TestUtils.removeLogFiles("Verify", envHome, false);
+            verifyDb1(SAVE1, false);
+            TestUtils.removeLogFiles("Verify", envHome, false);
+            verifyBothDbs(SAVE1, SAVE2);
+        } finally {
+            if (env != null) {
+                env.close();
+            }
+        }
+    }
+
+    public void testBadUsage()
+        throws Exception {
+
+        Environment env = createEnv(false, envHome); /* read-write env */
+
+        try {
+            DbBackup backup = new DbBackup(env);
+
+            /* end can only be called after start. */
+            try {
+                backup.endBackup();
+                fail("should fail");
+            } catch (DatabaseException expected) {
+            }
+
+            /* start can't be called twice. */
+            backup.startBackup();
+            try {
+                backup.startBackup();
+                fail("should fail");
+            } catch (DatabaseException expected) {
+            }
+
+            /*
+             * You can only get the backup set when you're in between start
+             * and end.
+             */
+            backup.endBackup();
+
+            try {
+                backup.getLastFileInBackupSet();
+                fail("should fail");
+            } catch (DatabaseException expected) {
+            }
+
+            try {
+                backup.getLogFilesInBackupSet();
+                fail("should fail");
+            } catch (DatabaseException expected) {
+            }
+
+            try {
+                backup.getLogFilesInBackupSet(0);
+                fail("should fail");
+            } catch (DatabaseException expected) {
+            }
+        } finally {
+            env.close();
+        }
+    }
+
+    /*
+     * This test can't be run by default, because it makes a directory
+     * read/only, and Java doesn't support a way to make it writable again
+     * except in Mustang. There's no way to clean up a read-only directory.
+     */
+    public void xtestReadOnly()
+        throws Exception {
+
+        /* Make a read-only handle on a read-write environment directory.*/
+        Environment env = createEnv(true, envHome);
+
+        try {
+            @SuppressWarnings("unused")
+            DbBackup backup = new DbBackup(env);
+            fail("Should fail because env is read/only.");
+        } catch (DatabaseException expected) {
+        }
+
+        env.close();
+
+        /*
+         * Make a read-only handle on a read-only environment directory. Use a
+         * new environment directory because we're going to set it read0nly and
+         * there doesn't seem to be a way of undoing that.
+         */
+        File tempEnvDir = new File(envHome, SAVE1);
+        assertTrue(tempEnvDir.mkdirs());
+        env = createEnv(false, tempEnvDir);
+        growFiles("db1", env, 8);
+        env.close();
+        //assertTrue(tempEnvDir.setReadOnly());
+
+        env = createEnv(true, tempEnvDir);
+
+        DbBackup backupHelper = new DbBackup(env);
+        backupHelper.startBackup();
+
+        FileManager fileManager =
+            DbInternal.envGetEnvironmentImpl(env).getFileManager();
+        long lastFile = fileManager.getLastFileNum().longValue();
+        assertEquals(lastFile, backupHelper.getLastFileInBackupSet());
+
+        backupHelper.endBackup();
+        env.close();
+        assertTrue(tempEnvDir.delete());
+    }
+
+    private Environment createEnv(boolean readOnly, File envDir)
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        DbInternal.disableParameterValidation(envConfig);
+        envConfig.setAllowCreate(true);
+        envConfig.setReadOnly(readOnly);
+        envConfig.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(),
+                                 "400");
+        envConfig.setConfigParam(EnvironmentParams.ENV_RUN_CLEANER.getName(),
+                                 "false");
+
+        Environment env = new Environment(envDir, envConfig);
+
+        return env;
+    }
+
+    private long growFiles(String dbName,
+                           Environment env,
+                           int minNumFiles)
+        throws DatabaseException {
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        Database db = env.openDatabase(null, dbName, dbConfig);
+        FileManager fileManager =
+            DbInternal.envGetEnvironmentImpl(env).getFileManager();
+        long startLastFileNum =
+            DbLsn.getFileNumber(fileManager.getLastUsedLsn());
+
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        /* Update twice, in order to create plenty of cleaning opportunity. */
+        for (int i = 0; i < NUM_RECS; i++) {
+            IntegerBinding.intToEntry(i, key);
+            IntegerBinding.intToEntry(i, data);
+            assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+        }
+
+        for (int i = 0; i < NUM_RECS; i++) {
+            IntegerBinding.intToEntry(i, key);
+            IntegerBinding.intToEntry(i+5, data);
+            assertEquals(OperationStatus.SUCCESS, db.put(null, key, data));
+        }
+
+        db.close();
+
+        long endLastFileNum =
+            DbLsn.getFileNumber(fileManager.getLastUsedLsn());
+        assertTrue((endLastFileNum -
+                    startLastFileNum) >= minNumFiles);
+        return endLastFileNum;
+    }
+
+    private long batchClean(int expectedDeletions)
+        throws DatabaseException {
+
+        EnvironmentStats stats = env.getStats(CLEAR_CONFIG);
+        while (env.cleanLog() > 0) {
+        }
+        env.checkpoint(FORCE_CONFIG);
+        stats = env.getStats(CLEAR_CONFIG);
+        assertTrue(stats.getNCleanerDeletions() <= expectedDeletions);
+
+        return stats.getNCleanerDeletions();
+    }
+
+    private void saveFiles(DbBackup backupHelper,
+                           long lastFileFromPrevBackup,
+                           long lastFileNum,
+                           String saveDirName)
+        throws IOException, DatabaseException {
+
+        /* Check that the backup set contains only the files it should have. */
+        String[] fileList =
+            backupHelper.getLogFilesInBackupSet(lastFileFromPrevBackup);
+        assertEquals(lastFileNum,
+                     fileManager.getNumFromName(fileList[fileList.length-1]).
+                     longValue());
+
+        /* Make a new save directory. */
+        File saveDir = new File(envHome, saveDirName);
+        assertTrue(saveDir.mkdir());
+        copyFiles(envHome, saveDir, fileList);
+    }
+
+    private void copyFiles(File sourceDir, File destDir, String[] fileList)
+        throws DatabaseException {
+
+        try {
+            for (int i = 0; i < fileList.length; i++) {
+                File source = new File(sourceDir, fileList[i]);
+                FileChannel sourceChannel =
+                    new FileInputStream(source).getChannel();
+                File save = new File(destDir, fileList[i]);
+                FileChannel saveChannel =
+                    new FileOutputStream(save).getChannel();
+
+                saveChannel.transferFrom(sourceChannel, 0,
+                                         sourceChannel.size());
+
+                // Close the channels
+                sourceChannel.close();
+                saveChannel.close();
+            }
+        } catch (IOException e) {
+            throw new DatabaseException(e);
+        }
+    }
+
+    /**
+     * Delete all the contents and the directory itself.
+     */
+    private void deleteSaveDir(String saveDirName)
+        throws IOException {
+
+        File saveDir = new File(envHome, saveDirName);
+        if (saveDir.exists()) {
+            String[] savedFiles = saveDir.list();
+            if (savedFiles != null) {
+            for (int i = 0; i < savedFiles.length; i++) {
+                File f = new File(saveDir, savedFiles[i]);
+                assertTrue(f.delete());
+            }
+            assertTrue(saveDir.delete());
+            }
+        }
+    }
+
+    /**
+     * Copy the saved files in, check values.
+     */
+    private void verifyDb1(String saveDirName, boolean rename)
+        throws DatabaseException {
+
+        File saveDir = new File(envHome, saveDirName);
+        String[] savedFiles = saveDir.list();
+        if (rename){
+            for (int i = 0; i < savedFiles.length; i++) {
+                File saved = new File(saveDir, savedFiles[i]);
+                File dest = new File(envHome, savedFiles[i]);
+                assertTrue(saved.renameTo(dest));
+            }
+        } else {
+            /* copy. */
+            copyFiles(saveDir, envHome, savedFiles);
+        }
+        env = createEnv(false, envHome);
+        try {
+            checkDb("db1");
+
+            /* Db 2 should not exist. */
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            try {
+                @SuppressWarnings("unused")
+                Database db = env.openDatabase(null, "db2", dbConfig);
+                fail("db2 should not exist");
+            } catch (DatabaseException expected) {
+            }
+
+        } finally {
+            env.close();
+            env = null;
+        }
+    }
+
+    /**
+     * Copy the saved files in, check values.
+     */
+    private void verifyBothDbs(String saveDirName1, String saveDirName2)
+        throws DatabaseException {
+
+        File saveDir = new File(envHome, saveDirName1);
+        String[] savedFiles = saveDir.list();
+        for (int i = 0; i < savedFiles.length; i++) {
+            File saved = new File(saveDir, savedFiles[i]);
+            File dest = new File(envHome, savedFiles[i]);
+            assertTrue(saved.renameTo(dest));
+        }
+
+        saveDir = new File(envHome, saveDirName2);
+        savedFiles = saveDir.list();
+        for (int i = 0; i < savedFiles.length; i++) {
+            File saved = new File(saveDir, savedFiles[i]);
+            File dest = new File(envHome, savedFiles[i]);
+            assertTrue(saved.renameTo(dest));
+        }
+
+        env = createEnv(false, envHome);
+        try {
+            checkDb("db1");
+            checkDb("db2");
+        } finally {
+            env.close();
+            env = null;
+        }
+    }
+
+    private void checkDb(String dbName)
+        throws DatabaseException {
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        Database db = env.openDatabase(null, dbName, dbConfig);
+        Cursor c = null;
+        try {
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            c = db.openCursor(null, null);
+
+            for (int i = 0; i < NUM_RECS; i++) {
+                assertEquals(OperationStatus.SUCCESS,
+                             c.getNext(key, data, LockMode.DEFAULT));
+                assertEquals(i, IntegerBinding.entryToInt(key));
+                assertEquals(i + 5, IntegerBinding.entryToInt(data));
+            }
+            assertEquals(OperationStatus.NOTFOUND,
+                         c.getNext(key, data, LockMode.DEFAULT));
+        } finally {
+            if (c != null)
+                c.close();
+            db.close();
+        }
+    }
+
+    private void checkFileLen(long fileNum, long length)
+        throws IOException {
+        String fileName = fileManager.getFullFileName(fileNum,
+                                                      FileManager.JE_SUFFIX);
+        File f = new File(fileName);
+        assertEquals(length, f.length());
+    }
+}
+
+
diff --git a/test/com/sleepycat/je/util/DbDumpTest.java b/test/com/sleepycat/je/util/DbDumpTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..5e6e0b5094d3d86b53f54ce42db0285345174617
--- /dev/null
+++ b/test/com/sleepycat/je/util/DbDumpTest.java
@@ -0,0 +1,272 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbDumpTest.java,v 1.49.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.util;
+
+import java.io.BufferedReader;
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.PrintStream;
+import java.util.Hashtable;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.tree.Key;
+
+public class DbDumpTest extends TestCase {
+
+    private File envHome;
+
+    private static final int N_KEYS = 100;
+    private static final int N_KEY_BYTES = 1000;
+    private static final String dbName = "testDB";
+
+    private Environment env;
+
+    public DbDumpTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+	throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+	throws IOException {
+
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+    }
+
+    /**
+     * A simple test to check if JE's dump format matches Core.
+     */
+    public void testMatchCore()
+        throws Throwable {
+
+        try {
+            /* Set up a new environment. */
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setAllowCreate(true);
+            env = new Environment(envHome, envConfig);
+
+            /*
+             * Make a stream holding a small dump in a format known to be
+             * the same as Core DB.
+             */
+            ByteArrayOutputStream dumpInfo = new ByteArrayOutputStream();
+            PrintStream dumpStream = new PrintStream(dumpInfo);
+            dumpStream.println("VERSION=3");
+            dumpStream.println("format=print");
+            dumpStream.println("type=btree");
+            dumpStream.println("dupsort=0");
+            dumpStream.println("HEADER=END");
+            dumpStream.println(" abc");
+            dumpStream.println(" firstLetters");
+            dumpStream.println(" xyz");
+            dumpStream.println(" lastLetters");
+            dumpStream.println("DATA=END");
+
+            /* load it */
+            DbLoad loader = new DbLoad();
+            loader.setEnv(env);
+            loader.setInputReader(new BufferedReader(new InputStreamReader
+						     (new ByteArrayInputStream(dumpInfo.toByteArray()))));
+            loader.setNoOverwrite(false);
+	    loader.setDbName("foobar");
+            loader.load();
+
+            /* Make sure we retrieve the expected data. */
+            Database checkDb = env.openDatabase(null, "foobar", null);
+            Cursor cursor = checkDb.openCursor(null, null);
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry();
+            assertEquals(OperationStatus.SUCCESS,
+                         cursor.getNext(key, data, LockMode.DEFAULT));
+            assertEquals("abc", new String(key.getData()));
+            assertEquals("firstLetters", new String(data.getData()));
+            assertEquals(OperationStatus.SUCCESS,
+                         cursor.getNext(key, data, LockMode.DEFAULT));
+            assertEquals("xyz", new String(key.getData()));
+            assertEquals("lastLetters", new String(data.getData()));
+            assertEquals(OperationStatus.NOTFOUND,
+                         cursor.getNext(key, data, LockMode.DEFAULT));
+            cursor.close();
+            checkDb.close();
+
+            /* Check that a dump of the database matches the input file. */
+            ByteArrayOutputStream dump2 = new ByteArrayOutputStream();
+            DbDump dumper2 = new DbDump(env, "foobar",
+                                        new PrintStream(dump2), true);
+            dumper2.dump();
+            assertEquals(dump2.toString(), dumpInfo.toString());
+
+            env.close();
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testDumpLoadBinary()
+        throws Throwable {
+
+        try {
+            doDumpLoadTest(false, 1);
+        } catch (Throwable t) {
+            t.printStackTrace();
+            throw t;
+        }
+    }
+
+    public void testDumpLoadPrintable()
+        throws IOException, DatabaseException {
+
+	doDumpLoadTest(true, 1);
+    }
+
+    public void testDumpLoadTwo()
+        throws IOException, DatabaseException {
+
+	doDumpLoadTest(false, 2);
+    }
+
+    public void testDumpLoadThree()
+        throws IOException, DatabaseException {
+
+	doDumpLoadTest(true, 3);
+    }
+
+    private void doDumpLoadTest(boolean printable, int nDumps)
+	throws IOException, DatabaseException {
+
+	Hashtable[] dataMaps = new Hashtable[nDumps];
+        for (int i = 0; i < nDumps; i += 1) {
+            dataMaps[i] = new Hashtable();
+        }
+	initDbs(nDumps, dataMaps);
+	ByteArrayOutputStream baos = new ByteArrayOutputStream();
+	PrintStream out = new PrintStream(baos);
+        for (int i = 0; i < nDumps; i += 1) {
+            DbDump dumper =
+		new DbDump(env, dbName + i, out, printable);
+            dumper.dump();
+        }
+	byte[] baosba = baos.toByteArray();
+        BufferedReader rdr = new BufferedReader
+            (new InputStreamReader(new ByteArrayInputStream(baosba)));
+        for (int i = 0; i < nDumps; i += 1) {
+            DbLoad loader = new DbLoad();
+            loader.setEnv(env);
+            loader.setInputReader(rdr);
+            loader.setNoOverwrite(false);
+	    loader.setDbName(dbName + i);
+            loader.load();
+            verifyDb(dataMaps[i], i);
+        }
+
+        ByteArrayOutputStream baos2 = new ByteArrayOutputStream();
+        PrintStream out2 = new PrintStream(baos2);
+        for (int i = 0; i < nDumps; i += 1) {
+            DbDump dumper2 =
+		new DbDump(env, dbName + i, out2, printable);
+            dumper2.dump();
+        }
+        assertEquals(0, Key.compareKeys(baosba, baos2.toByteArray(), null));
+
+	env.close();
+    }
+
+    /**
+     * Set up the environment and db.
+     */
+    private void initDbs(int nDumps, Hashtable[] dataMaps)
+	throws DatabaseException {
+
+	EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6");
+        envConfig.setAllowCreate(true);
+        env = new Environment(envHome, envConfig);
+
+        /* Make a db and open it. */
+        for (int i = 0; i < nDumps; i += 1) {
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setAllowCreate(true);
+            dbConfig.setSortedDuplicates(true);
+            Database myDb = env.openDatabase(null, dbName + i, dbConfig);
+            Cursor cursor = myDb.openCursor(null, null);
+            doLargePut(dataMaps[i], cursor, N_KEYS);
+            cursor.close();
+            myDb.close();
+        }
+    }
+
+    private void verifyDb(Hashtable<String,String> dataMap, int dumpIndex)
+	throws DatabaseException {
+
+        DatabaseConfig config = new DatabaseConfig();
+        config.setReadOnly(true);
+        DbInternal.setUseExistingConfig(config, true);
+	Database myDb = env.openDatabase(null, dbName + dumpIndex, config);
+	Cursor cursor = myDb.openCursor(null, null);
+	StringDbt foundKey = new StringDbt();
+	StringDbt foundData = new StringDbt();
+	OperationStatus status =
+	    cursor.getFirst(foundKey, foundData, LockMode.DEFAULT);
+	while (status == OperationStatus.SUCCESS) {
+	    String foundKeyString = foundKey.getString();
+	    String foundDataString = foundData.getString();
+	    if (dataMap.get(foundKeyString) != null) {
+                assertTrue((dataMap.get(foundKeyString)).
+			   equals(foundDataString));
+		dataMap.remove(foundKeyString);
+	    } else {
+		fail("didn't find key in either map (" +
+		     foundKeyString +
+		     ")");
+	    }
+	    status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT);
+	}
+	assertTrue(dataMap.size() == 0);
+        cursor.close();
+        myDb.close();
+    }
+
+    private void doLargePut(Hashtable<String, String> dataMap, Cursor cursor, int nKeys)
+	throws DatabaseException {
+
+	for (int i = 0; i < nKeys; i++) {
+	    byte[] key = new byte[N_KEY_BYTES];
+	    TestUtils.generateRandomAlphaBytes(key);
+	    String keyString = new String(key);
+	    String dataString = Integer.toString(i);
+	    OperationStatus status =
+		cursor.put(new StringDbt(key),
+                           new StringDbt(dataString));
+	    assertEquals(OperationStatus.SUCCESS, status);
+	    if (dataMap != null) {
+		dataMap.put(keyString, dataString);
+	    }
+	}
+    }
+}
diff --git a/test/com/sleepycat/je/util/DbLsnTest.java b/test/com/sleepycat/je/util/DbLsnTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..7133d15a8f9f6db297ad9b06281a4fd4be5d7b68
--- /dev/null
+++ b/test/com/sleepycat/je/util/DbLsnTest.java
@@ -0,0 +1,158 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbLsnTest.java,v 1.20.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.util;
+
+import java.io.File;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.utilint.DbLsn;
+
+public class DbLsnTest extends TestCase {
+    long[] values = { 0xFF, 0xFFFF, 0xFFFFFF, 0x7FFFFFFF, 0xFFFFFFFFL };
+
+    public void testDbLsn() {
+	for (int i = 0; i < values.length; i++) {
+	    long value = values[i];
+	    long lsn = DbLsn.makeLsn(value, value);
+	    assertTrue((DbLsn.getFileNumber(lsn) == value) &&
+		       (DbLsn.getFileOffset(lsn) == value));
+	}
+    }
+
+    public void testComparableEquality() {
+	/* Test equality */
+
+	/* Don't bother with last values[] entry -- it makes NULL_LSN. */
+	int lastValue = values.length - 1;
+	for (int i = 0; i < lastValue; i++) {
+	    long value = values[i];
+	    long lsn1 = DbLsn.makeLsn(value, value);
+	    long lsn2 = DbLsn.makeLsn(value, value);
+	    assertTrue(DbLsn.compareTo(lsn1, lsn2) == 0);
+	}
+
+	/* Check NULL_LSN. */
+	assertTrue(DbLsn.makeLsn(values[lastValue],
+				 values[lastValue]) ==
+		   DbLsn.makeLsn(values[lastValue],
+				 values[lastValue]));
+    }
+
+    public void testComparableNullPointerException() {
+	/* Check that compareTo throws NullPointerException */
+
+	try {
+	    long lsn1 = DbLsn.makeLsn(0, 0);
+	    DbLsn.compareTo(lsn1, DbLsn.NULL_LSN);
+	    fail("compareTo(null) didn't throw NullPointerException");
+	} catch (NullPointerException NPE) {
+	}
+
+	try {
+	    long lsn1 = DbLsn.makeLsn(0, 0);
+	    DbLsn.compareTo(DbLsn.NULL_LSN, lsn1);
+	    fail("compareTo(null) didn't throw NullPointerException");
+	} catch (NullPointerException NPE) {
+	}
+    }
+
+    public void testComparableInequalityFileNumber() {
+	/* Check for inequality in the file number */
+
+	/* Don't bother with last values[] entry -- it makes NULL_LSN. */
+	int lastValue = values.length - 1;
+	for (int i = 0; i < lastValue; i++) {
+	    long value = values[i];
+	    long lsn1 = DbLsn.makeLsn(value, value);
+	    long lsn2 = DbLsn.makeLsn(0, value);
+	    assertTrue(DbLsn.compareTo(lsn1, lsn2) == 1);
+	    assertTrue(DbLsn.compareTo(lsn2, lsn1) == -1);
+	}
+
+	/* Check against NULL_LSN. */
+	long lsn1 = DbLsn.makeLsn(values[lastValue], values[lastValue]);
+	long lsn2 = DbLsn.makeLsn(0, values[lastValue]);
+	try {
+	    assertTrue(DbLsn.compareTo(lsn1, lsn2) == 1);
+	} catch (NullPointerException NPE) {
+	}
+
+	try {
+	    assertTrue(DbLsn.compareTo(lsn2, lsn1) == 1);
+	} catch (NullPointerException NPE) {
+	}
+    }
+
+    public void testComparableInequalityFileOffset() {
+	/* Check for inequality in the file offset */
+
+	for (int i = 0; i < values.length - 1; i++) {
+	    long value = values[i];
+	    long lsn1 = DbLsn.makeLsn(value, value);
+	    long lsn2 = DbLsn.makeLsn(value, 0);
+	    /* Can't compareTo(NULL_LSN). */
+	    if (lsn1 != DbLsn.NULL_LSN &&
+		lsn2 != DbLsn.NULL_LSN) {
+		assertTrue(DbLsn.compareTo(lsn1, lsn2) == 1);
+		assertTrue(DbLsn.compareTo(lsn2, lsn1) == -1);
+	    }
+	}
+    }
+
+    public void testSubtractNoCleaning() {
+        long a = DbLsn.makeLsn(1, 10);
+        long b = DbLsn.makeLsn(3, 40);
+        assertEquals(230, DbLsn.getNoCleaningDistance(b, a, 100));
+        assertEquals(230, DbLsn.getNoCleaningDistance(a, b, 100));
+
+        long c = DbLsn.makeLsn(1, 50);
+        assertEquals(40, DbLsn.getNoCleaningDistance(a, c, 100));
+        assertEquals(40, DbLsn.getNoCleaningDistance(c, a, 100));
+    }
+
+    public void testSubtractWithCleaning()
+        throws Exception {
+
+        /* Try with non-consecutive files (due to cleaning). */
+
+        File envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+        TestUtils.removeLogFiles("TestSubtract", envHome, false);
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(true);
+        Environment env = new Environment(envHome, envConfig);
+
+        try {
+            File file1 = new File (envHome, "00000001.jdb");
+            File file2 = new File (envHome, "00000003.jdb");
+            file1.createNewFile();
+            file2.createNewFile();
+            long a = DbLsn.makeLsn(1, 10);
+            long b = DbLsn.makeLsn(3, 40);
+            FileManager fileManager =
+                DbInternal.envGetEnvironmentImpl(env).getFileManager();
+            assertEquals(130, DbLsn.getWithCleaningDistance
+			 (b, fileManager, a, 100));
+            assertEquals(130, DbLsn.getWithCleaningDistance
+			 (a, fileManager, b, 100));
+
+            long c = DbLsn.makeLsn(1, 50);
+            assertEquals(40, DbLsn.getWithCleaningDistance
+			 (a, fileManager, c, 100));
+            assertEquals(40, DbLsn.getWithCleaningDistance
+			 (c, fileManager, a, 100));
+        } finally {
+            env.close();
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/util/DbScavengerTest.java b/test/com/sleepycat/je/util/DbScavengerTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..e3027645236a7ce76d0cd4e6a1e408fe46bf54d2
--- /dev/null
+++ b/test/com/sleepycat/je/util/DbScavengerTest.java
@@ -0,0 +1,651 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DbScavengerTest.java,v 1.21.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.util;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FilenameFilter;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.RandomAccessFile;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+import java.util.StringTokenizer;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DatabaseNotFoundException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.utilint.DbLsn;
+
+public class DbScavengerTest extends TestCase {
+
+    private static final int TRANSACTIONAL = 1 << 0;
+    private static final int WRITE_MULTIPLE = 1 << 1;
+    private static final int PRINTABLE = 1 << 2;
+    private static final int ABORT_BEFORE = 1 << 3;
+    private static final int ABORT_AFTER = 1 << 4;
+    private static final int CORRUPT_LOG = 1 << 5;
+    private static final int DELETE_DATA = 1 << 6;
+    private static final int AGGRESSIVE = 1 << 7;
+
+    private static final int N_DBS = 3;
+    private static final int N_KEYS = 100;
+    private static final int N_DATA_BYTES = 100;
+    private static final int LOG_SIZE = 10000;
+
+    private String envHomeName;
+    private File envHome;
+
+    private Environment env;
+
+    private Database[] dbs = new Database[N_DBS];
+
+    private boolean duplicatesAllowed = true;
+
+    public DbScavengerTest() {
+	envHomeName = System.getProperty(TestUtils.DEST_DIR);
+        envHome = new File(envHomeName);
+    }
+
+    public void setUp()
+	throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+        TestUtils.removeFiles("Setup", envHome, ".dump");
+    }
+
+    public void tearDown()
+	throws IOException {
+
+        if (env != null) {
+            try {
+                env.close();
+            } catch (Exception e) {
+                System.out.println("TearDown: " + e);
+            }
+            env = null;
+        }
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+        TestUtils.removeFiles("Teardown", envHome, ".dump");
+    }
+
+    public void testScavenger1()
+        throws Throwable {
+
+	try {
+	    doScavengerTest(PRINTABLE | TRANSACTIONAL |
+			    ABORT_BEFORE | ABORT_AFTER);
+	} catch (Throwable T) {
+	    System.out.println("caught " + T);
+	    T.printStackTrace();
+	}
+    }
+
+    public void testScavenger2()
+        throws Throwable {
+
+	try {
+	    doScavengerTest(PRINTABLE | TRANSACTIONAL | ABORT_BEFORE);
+	} catch (Throwable T) {
+	    System.out.println("caught " + T);
+	    T.printStackTrace();
+	}
+    }
+
+    public void testScavenger3()
+        throws Throwable {
+
+	try {
+	    doScavengerTest(PRINTABLE | TRANSACTIONAL | ABORT_AFTER);
+	} catch (Throwable T) {
+	    System.out.println("caught " + T);
+	    T.printStackTrace();
+	}
+    }
+
+    public void testScavenger4()
+        throws Throwable {
+
+	try {
+	    doScavengerTest(PRINTABLE | TRANSACTIONAL);
+	} catch (Throwable T) {
+	    System.out.println("caught " + T);
+	    T.printStackTrace();
+	}
+    }
+
+    public void testScavenger5()
+        throws Throwable {
+
+	try {
+	    doScavengerTest(PRINTABLE | WRITE_MULTIPLE | TRANSACTIONAL);
+	} catch (Throwable T) {
+	    System.out.println("caught " + T);
+	    T.printStackTrace();
+	}
+    }
+
+    public void testScavenger6()
+        throws Throwable {
+
+	try {
+	    doScavengerTest(PRINTABLE);
+	} catch (Throwable T) {
+	    System.out.println("caught " + T);
+	    T.printStackTrace();
+	    throw T;
+	}
+    }
+
+    public void testScavenger7()
+        throws Throwable {
+
+	try {
+	    doScavengerTest(TRANSACTIONAL | ABORT_BEFORE | ABORT_AFTER);
+	} catch (Throwable T) {
+	    System.out.println("caught " + T);
+	    T.printStackTrace();
+	}
+    }
+
+    public void testScavenger8()
+        throws Throwable {
+
+	try {
+	    doScavengerTest(TRANSACTIONAL | ABORT_BEFORE);
+	} catch (Throwable T) {
+	    System.out.println("caught " + T);
+	    T.printStackTrace();
+	}
+    }
+
+    public void testScavenger9()
+        throws Throwable {
+
+	try {
+	    doScavengerTest(TRANSACTIONAL);
+	} catch (Throwable T) {
+	    System.out.println("caught " + T);
+	    T.printStackTrace();
+	}
+    }
+
+    public void testScavenger10()
+        throws Throwable {
+
+	try {
+	    doScavengerTest(TRANSACTIONAL | ABORT_AFTER);
+	} catch (Throwable T) {
+	    System.out.println("caught " + T);
+	    T.printStackTrace();
+	}
+    }
+
+    public void testScavenger11()
+        throws Throwable {
+
+	try {
+	    doScavengerTest(0);
+	} catch (Throwable T) {
+	    System.out.println("caught " + T);
+	    T.printStackTrace();
+	}
+    }
+
+    public void testScavenger12()
+        throws Throwable {
+
+	try {
+	    doScavengerTest(CORRUPT_LOG);
+	} catch (Throwable T) {
+	    System.out.println("caught " + T);
+	    T.printStackTrace();
+	}
+    }
+
+    public void testScavenger13()
+        throws Throwable {
+
+	try {
+	    doScavengerTest(DELETE_DATA);
+	} catch (Throwable T) {
+	    System.out.println("caught " + T);
+	    T.printStackTrace();
+	}
+    }
+
+    public void testScavenger14()
+        throws Throwable {
+
+	try {
+	    doScavengerTest(AGGRESSIVE);
+	} catch (Throwable T) {
+	    System.out.println("caught " + T);
+	    T.printStackTrace();
+	}
+    }
+
+    public void testScavengerAbortedDbLevelOperations()
+        throws Throwable {
+
+	try {
+	    createEnv(true, true);
+	    boolean doAbort = true;
+	    byte[] dataBytes = new byte[N_DATA_BYTES];
+	    DatabaseEntry key = new DatabaseEntry();
+	    DatabaseEntry data = new DatabaseEntry(dataBytes);
+	    IntegerBinding.intToEntry(1, key);
+	    TestUtils.generateRandomAlphaBytes(dataBytes);
+	    for (int i = 0; i < 2; i++) {
+		Transaction txn = env.beginTransaction(null, null);
+		for (int dbCnt = 0; dbCnt < N_DBS; dbCnt++) {
+		    String databaseName = null;
+		    if (doAbort) {
+			databaseName = "abortedDb" + dbCnt;
+		    } else {
+			databaseName = "simpleDb" + dbCnt;
+		    }
+		    DatabaseConfig dbConfig = new DatabaseConfig();
+		    dbConfig.setAllowCreate(true);
+		    dbConfig.setSortedDuplicates(duplicatesAllowed);
+		    dbConfig.setTransactional(true);
+		    if (dbs[dbCnt] != null) {
+			throw new DatabaseException("database already open");
+		    }
+		    Database db =
+			env.openDatabase(txn, databaseName, dbConfig);
+		    dbs[dbCnt] = db;
+		    db.put(txn, key, data);
+		}
+		if (doAbort) {
+		    txn.abort();
+		    dbs = new Database[N_DBS];
+		} else {
+		    txn.commit();
+		}
+		doAbort = !doAbort;
+	    }
+
+	    closeEnv();
+	    createEnv(false, false);
+	    openDbs(false, false, duplicatesAllowed, null);
+	    dumpDbs(false, false);
+
+	    /* Close the environment, delete it completely from the disk. */
+	    closeEnv();
+	    TestUtils.removeLogFiles("doScavengerTest", envHome, false);
+
+	    /* Recreate and reload the environment from the scavenger files. */
+	    createEnv(true, true);
+	    loadDbs();
+
+	    /* Verify that the data is the same as when it was created. */
+	    for (int dbCnt = 0; dbCnt < N_DBS; dbCnt++) {
+		String databaseName = "abortedDb" + dbCnt;
+		DatabaseConfig dbConfig = new DatabaseConfig();
+		dbConfig.setAllowCreate(false);
+		try {
+		    env.openDatabase(null, databaseName, dbConfig);
+		    fail("expected DatabaseNotFoundException");
+		} catch (DatabaseNotFoundException DNFE) {
+		    /* Expected. */
+		}
+	    }
+	    closeEnv();
+
+	} catch (Throwable T) {
+	    System.out.println("caught " + T);
+	    T.printStackTrace();
+	}
+    }
+
+    private void doScavengerTest(int config)
+	throws DatabaseException, IOException {
+
+	boolean printable = (config & PRINTABLE) != 0;
+	boolean transactional = (config & TRANSACTIONAL) != 0;
+	boolean writeMultiple = (config & WRITE_MULTIPLE) != 0;
+	boolean abortBefore = (config & ABORT_BEFORE) != 0;
+	boolean abortAfter = (config & ABORT_AFTER) != 0;
+	boolean corruptLog = (config & CORRUPT_LOG) != 0;
+	boolean deleteData = (config & DELETE_DATA) != 0;
+	boolean aggressive = (config & AGGRESSIVE) != 0;
+
+	assert transactional ||
+	    (!abortBefore && !abortAfter);
+
+	Map[] dataMaps = new Map[N_DBS];
+	Set<Long> lsnsToCorrupt = new HashSet<Long>();
+	/* Create the environment and some data. */
+	createEnvAndDbs(dataMaps,
+			writeMultiple,
+			transactional,
+			abortBefore,
+			abortAfter,
+			corruptLog,
+			lsnsToCorrupt,
+			deleteData);
+	closeEnv();
+	createEnv(false, false);
+	if (corruptLog) {
+	    corruptFiles(lsnsToCorrupt);
+	}
+	openDbs(false, false, duplicatesAllowed, null);
+	dumpDbs(printable, aggressive);
+
+	/* Close the environment, delete it completely from the disk. */
+	closeEnv();
+        TestUtils.removeLogFiles("doScavengerTest", envHome, false);
+
+	/* Recreate the environment and load it from the scavenger files. */
+	createEnv(true, transactional);
+	loadDbs();
+
+	/* Verify that the data is the same as when it was created. */
+	openDbs(false, false, duplicatesAllowed, null);
+	verifyDbs(dataMaps);
+	closeEnv();
+    }
+
+    private void closeEnv()
+	throws DatabaseException {
+
+	for (int i = 0; i < N_DBS; i++) {
+	    if (dbs[i] != null) {
+		dbs[i].close();
+		dbs[i] = null;
+	    }
+	}
+
+	env.close();
+        env = null;
+    }
+
+    private void createEnv(boolean create, boolean transactional)
+	throws DatabaseException {
+
+	EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+	DbInternal.disableParameterValidation(envConfig);
+        envConfig.setConfigParam(EnvironmentParams.ENV_RUN_CLEANER.getName(),
+				 "false");
+        envConfig.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(),
+				 "" + LOG_SIZE);
+	envConfig.setTransactional(transactional);
+	envConfig.setAllowCreate(create);
+	env = new Environment(envHome, envConfig);
+    }
+
+    private void createEnvAndDbs(Map[] dataMaps,
+				 boolean writeMultiple,
+				 boolean transactional,
+				 boolean abortBefore,
+				 boolean abortAfter,
+				 boolean corruptLog,
+				 Set<Long> lsnsToCorrupt,
+				 boolean deleteData)
+	throws DatabaseException {
+
+	createEnv(true, transactional);
+	Transaction txn = null;
+	if (transactional) {
+	    txn = env.beginTransaction(null, null);
+	}
+
+	openDbs(true, transactional, duplicatesAllowed, txn);
+
+	if (transactional) {
+	    txn.commit();
+	}
+
+	long lastCorruptedFile = -1;
+	for (int dbCnt = 0; dbCnt < N_DBS; dbCnt++) {
+	    Map<Integer, String> dataMap = new HashMap<Integer, String>();
+	    dataMaps[dbCnt] = dataMap;
+	    Database db = dbs[dbCnt];
+
+	    for (int i = 0; i < N_KEYS; i++) {
+		byte[] dataBytes = new byte[N_DATA_BYTES];
+		DatabaseEntry key = new DatabaseEntry();
+		DatabaseEntry data = new DatabaseEntry(dataBytes);
+		IntegerBinding.intToEntry(i, key);
+		TestUtils.generateRandomAlphaBytes(dataBytes);
+
+		boolean corruptedThisEntry = false;
+
+		if (transactional) {
+		    txn = env.beginTransaction(null, null);
+		}
+
+		if (transactional &&
+		    abortBefore) {
+		    assertEquals(OperationStatus.SUCCESS,
+				 db.put(txn, key, data));
+		    txn.abort();
+		    txn = env.beginTransaction(null, null);
+		}
+
+		assertEquals(OperationStatus.SUCCESS,
+			     db.put(txn, key, data));
+		if (corruptLog) {
+		    long currentLsn = getLastLsn();
+		    long fileNumber = DbLsn.getFileNumber(currentLsn);
+		    long fileOffset = DbLsn.getFileOffset(currentLsn);
+		    if (fileOffset > (LOG_SIZE >> 1) &&
+			/* We're writing in the second half of the file. */
+			fileNumber > lastCorruptedFile) {
+			/* Corrupt this file. */
+			lsnsToCorrupt.add(new Long(currentLsn));
+			lastCorruptedFile = fileNumber;
+			corruptedThisEntry = true;
+		    }
+		}
+
+		if (writeMultiple) {
+		    assertEquals(OperationStatus.SUCCESS,
+				 db.delete(txn, key));
+		    assertEquals(OperationStatus.SUCCESS,
+				 db.put(txn, key, data));
+		}
+
+		if (deleteData) {
+		    assertEquals(OperationStatus.SUCCESS,
+				 db.delete(txn, key));
+		    /* overload this for deleted data. */
+		    corruptedThisEntry = true;
+		}
+
+		if (!corruptedThisEntry) {
+		    dataMap.put(new Integer(i), new String(dataBytes));
+		}
+
+		if (transactional) {
+		    txn.commit();
+		}
+
+		if (transactional &&
+		    abortAfter) {
+		    txn = env.beginTransaction(null, null);
+		    assertEquals(OperationStatus.SUCCESS,
+				 db.put(txn, key, data));
+		    txn.abort();
+		}
+	    }
+	}
+    }
+
+    private void openDbs(boolean create,
+			 boolean transactional,
+			 boolean duplicatesAllowed,
+			 Transaction txn)
+	throws DatabaseException {
+
+	for (int dbCnt = 0; dbCnt < N_DBS; dbCnt++) {
+	    String databaseName = "simpleDb" + dbCnt;
+	    DatabaseConfig dbConfig = new DatabaseConfig();
+	    dbConfig.setAllowCreate(create);
+	    dbConfig.setSortedDuplicates(duplicatesAllowed);
+	    dbConfig.setTransactional(transactional);
+	    if (dbs[dbCnt] != null) {
+		throw new DatabaseException("database already open");
+	    }
+	    dbs[dbCnt] = env.openDatabase(txn, databaseName, dbConfig);
+	}
+    }
+
+    private void dumpDbs(boolean printable, boolean aggressive)
+	throws DatabaseException {
+
+	try {
+	    DbScavenger scavenger =
+                new DbScavenger(env, envHomeName, printable, aggressive,
+                                false /* verbose */);
+	    scavenger.dump();
+	} catch (IOException IOE) {
+	    throw new DatabaseException(IOE);
+	}
+    }
+
+    private void loadDbs()
+	throws DatabaseException {
+
+	try {
+	    String dbNameBase = "simpleDb";
+	    for (int i = 0; i < N_DBS; i++) {
+		DbLoad loader = new DbLoad();
+		File file = new File(envHomeName, dbNameBase + i + ".dump");
+		FileInputStream is = new FileInputStream(file);
+		BufferedReader reader =
+		    new BufferedReader(new InputStreamReader(is));
+		loader.setEnv(env);
+		loader.setInputReader(reader);
+		loader.setNoOverwrite(false);
+		loader.setDbName(dbNameBase + i);
+		loader.load();
+		is.close();
+	    }
+        } catch (IOException IOE) {
+	    throw new DatabaseException(IOE);
+	}
+    }
+
+    private void verifyDbs(Map[] dataMaps)
+	throws DatabaseException {
+
+	for (int i = 0; i < N_DBS; i++) {
+	    Map dataMap = dataMaps[i];
+	    Cursor cursor = dbs[i].openCursor(null, null);
+	    DatabaseEntry key = new DatabaseEntry();
+	    DatabaseEntry data = new DatabaseEntry();
+	    while (cursor.getNext(key, data, null) ==
+		   OperationStatus.SUCCESS) {
+		Integer keyInt =
+		    new Integer(IntegerBinding.entryToInt(key));
+		String databaseString = new String(data.getData());
+		String originalString = (String) dataMap.get(keyInt);
+		if (originalString == null) {
+		    fail("couldn't find " + keyInt);
+		} else if (databaseString.equals(originalString)) {
+		    dataMap.remove(keyInt);
+		} else {
+		    fail(" Mismatch: key=" + keyInt +
+			 " Expected: " + originalString +
+			 " Found: " + databaseString);
+		}
+	    }
+
+	    if (dataMap.size() > 0) {
+		fail("entries still remain");
+	    }
+
+	    cursor.close();
+	}
+    }
+
+    private static DumpFileFilter dumpFileFilter = new DumpFileFilter();
+
+    static class DumpFileFilter implements FilenameFilter {
+
+	/**
+	 * Accept files of this format:
+	 * *.dump
+	 */
+	public boolean accept(File dir, String name) {
+	    StringTokenizer tokenizer = new StringTokenizer(name, ".");
+	    /* There should be two parts. */
+	    if (tokenizer.countTokens() == 2) {
+		tokenizer.nextToken();
+		String fileSuffix = tokenizer.nextToken();
+
+		/* Check the length and the suffix. */
+		if (fileSuffix.equals("dump")) {
+		    return true;
+		}
+	    }
+
+	    return false;
+	}
+    }
+
+    private long getLastLsn()
+	throws DatabaseException {
+
+	return DbInternal.envGetEnvironmentImpl(env).
+	    getFileManager().getLastUsedLsn();
+    }
+
+    private void corruptFiles(Set<Long> lsnsToCorrupt)
+	throws DatabaseException {
+
+	Iterator<Long> iter = lsnsToCorrupt.iterator();
+	while (iter.hasNext()) {
+	    long lsn = iter.next().longValue();
+	    corruptFile(DbLsn.getFileNumber(lsn),
+			DbLsn.getFileOffset(lsn));
+	}
+    }
+
+    private void corruptFile(long fileNumber, long fileOffset)
+	throws DatabaseException {
+
+	String fileName = DbInternal.envGetEnvironmentImpl(env).
+	    getFileManager().getFullFileName(fileNumber,
+					     FileManager.JE_SUFFIX);
+	/*
+	System.out.println("corrupting 1 byte at " +
+			   DbLsn.makeLsn(fileNumber, fileOffset));
+	*/
+	try {
+	    RandomAccessFile raf = new RandomAccessFile(fileName, "rw");
+	    raf.seek(fileOffset);
+	    int current = raf.read();
+	    raf.seek(fileOffset);
+	    raf.write(current + 1);
+	    raf.close();
+	} catch (IOException IOE) {
+	    throw new DatabaseException(IOE);
+	}
+    }
+}
diff --git a/test/com/sleepycat/je/util/DebugRecordTest.java b/test/com/sleepycat/je/util/DebugRecordTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..378a6510ae3f99c82a9a14dd1702dea0709362f3
--- /dev/null
+++ b/test/com/sleepycat/je/util/DebugRecordTest.java
@@ -0,0 +1,229 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DebugRecordTest.java,v 1.51.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.util;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.text.DateFormat;
+import java.text.ParsePosition;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.StringTokenizer;
+import java.util.logging.Level;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.log.LogEntryType;
+import com.sleepycat.je.log.SearchFileReader;
+import com.sleepycat.je.recovery.RecoveryInfo;
+import com.sleepycat.je.utilint.DbLsn;
+import com.sleepycat.je.utilint.Tracer;
+import com.sleepycat.je.utilint.TracerFormatter;
+
+public class DebugRecordTest extends TestCase {
+    private File envHome;
+    private EnvironmentImpl env;
+
+    public DebugRecordTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+        env = null;
+    }
+
+    public void setUp()
+	throws IOException {
+
+        TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX);
+        TestUtils.removeFiles(envHome, new InfoFileFilter());
+    }
+
+    public void tearDown()
+	throws IOException {
+
+        TestUtils.removeFiles("TearDown", envHome, FileManager.JE_SUFFIX);
+        TestUtils.removeFiles(envHome, new InfoFileFilter());
+    }
+
+
+    public void testDebugLogging()
+	throws DatabaseException, IOException {
+
+        try {
+
+            /*
+	     * Turn on the txt file and db log logging, turn off the console.
+	     */
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+            envConfig.setConfigParam
+		(EnvironmentParams.JE_LOGGING_FILE.getName(), "true");
+            envConfig.setConfigParam
+		(EnvironmentParams.JE_LOGGING_CONSOLE.getName(),
+		 "false");
+            envConfig.setConfigParam
+		(EnvironmentParams.JE_LOGGING_LEVEL.getName(), "CONFIG");
+            envConfig.setConfigParam
+		(EnvironmentParams.JE_LOGGING_DBLOG.getName(), "true");
+            envConfig.setConfigParam
+		(EnvironmentParams.NODE_MAX.getName(), "6");
+	    envConfig.setAllowCreate(true);
+            /* Disable noisy UtilizationProfile database creation. */
+            DbInternal.setCreateUP(envConfig, false);
+            /* Don't run the cleaner without a UtilizationProfile. */
+            envConfig.setConfigParam
+                (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false");
+	
+            env = new EnvironmentImpl(envHome,
+                                      envConfig,
+                                      null /*sharedCacheEnv*/,
+                                      false /*replicationIntended*/);
+
+            List<Tracer> expectedRecords = new ArrayList<Tracer>();
+
+            /* Recovery itself will log two messages. */
+            RecoveryInfo info = new RecoveryInfo();
+            expectedRecords.add(new Tracer("Recovery w/no files."));
+            expectedRecords.add(new Tracer
+				("Checkpoint 1: source=recovery" +
+				 " success=true nFullINFlushThisRun=0" +
+				 " nDeltaINFlushThisRun=0"));
+            expectedRecords.add(new Tracer("Recovery finished: "  +
+					   info.toString()));
+
+            /* Log a message. */
+            Tracer.trace(Level.INFO, env, "hi there");
+            expectedRecords.add(new Tracer("hi there"));
+
+            /* Log an exception. */
+            DatabaseException e = new DatabaseException("fake exception");
+            Tracer.trace(env, "DebugRecordTest", "testException", "foo", e);
+            expectedRecords.add(new Tracer("foo\n" + Tracer.getStackTrace(e)));
+
+            /* Log a split and flush the log to disk. */
+            env.getLogManager().flush();
+            env.getFileManager().clear();
+            env.closeLogger();
+
+            /* Verify. */
+            checkDatabaseLog(expectedRecords);
+            checkTextFile(expectedRecords);
+
+        } finally {
+            if (env != null) {
+                env.close();
+            }
+        }
+    }
+
+    /**
+     * Check what's in the database log.
+     */
+    private void checkDatabaseLog(List<Tracer> expectedList)
+        throws DatabaseException, IOException {
+
+        SearchFileReader searcher =
+            new SearchFileReader(env, 1000, true, DbLsn.NULL_LSN,
+				 DbLsn.NULL_LSN, LogEntryType.LOG_TRACE);
+
+        int numSeen = 0;
+        while (searcher.readNextEntry()) {
+            Tracer dRec = (Tracer) searcher.getLastObject();
+            assertEquals("Should see this as " + numSeen + " record: ",
+			 expectedList.get(numSeen).getMessage(),
+                         dRec.getMessage());
+            numSeen++;
+        }
+
+        assertEquals("Should see this many debug records",
+                     expectedList.size(), numSeen);
+    }
+
+    /**
+     * Check what's in the text file.
+     */
+    private void checkTextFile(List<Tracer> expectedList)
+        throws IOException {
+
+        FileReader fr = null;
+        BufferedReader br = null;
+        try {
+            String textFileName = envHome + File.separator + "je.info.0";
+            fr = new FileReader(textFileName);
+            br = new BufferedReader(fr);
+
+            String line = br.readLine();
+            int numSeen = 0;
+
+            /*
+	     * Read the file, checking only lines that start with valid Levels.
+	     */
+            while (line != null) {
+                try {
+                    /* The line should start with a valid date. */
+                    ParsePosition pp = new ParsePosition(0);
+                    DateFormat ff = TracerFormatter.makeDateFormat();
+                    ff.parse(line, pp);
+
+                    /* There should be a java.util.logging.level next. */
+                    int dateEnd = pp.getIndex();
+                    int levelEnd = line.indexOf(" ", dateEnd + 1);
+                    String possibleLevel = line.substring(dateEnd + 1,
+                                                          levelEnd);
+                    Level.parse(possibleLevel);
+
+                    String expected =
+                        expectedList.get(numSeen).getMessage();
+                    StringBuffer seen = new StringBuffer();
+                    seen.append(line.substring(levelEnd + 1));
+                    /*
+                     * Assemble the log message by reading the right number
+                     * of lines
+                     */
+                    StringTokenizer st =
+                        new StringTokenizer(expected,
+                                            Character.toString('\n'), false);
+
+                    for (int i = 1; i < st.countTokens(); i++) {
+                        seen.append('\n');
+                        String l = br.readLine();
+                        seen.append(l);
+                        if (i == (st.countTokens() -1)) {
+                            seen.append('\n');
+                        }
+                    }
+                    /* XXX, diff of multiline stuff isn't right yet. */
+                    if (st.countTokens() == 1) {
+                        assertEquals("Line " + numSeen + " should be the same",
+                                     expected, seen.toString());
+                    }
+                    numSeen++;
+                } catch (Exception e) {
+                    /* Skip this line, not a message. */
+                }
+                line = br.readLine();
+            }
+            assertEquals("Should see this many debug records",
+                         expectedList.size(), numSeen);
+        } finally {
+	    if (br != null) {
+		br.close();
+	    }
+
+	    if (fr != null) {
+		fr.close();
+	    }
+	}
+    }
+}
diff --git a/test/com/sleepycat/je/util/HexFormatterTest.java b/test/com/sleepycat/je/util/HexFormatterTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..e03c29450c77cd369337114b13972385ba310b5b
--- /dev/null
+++ b/test/com/sleepycat/je/util/HexFormatterTest.java
@@ -0,0 +1,27 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: HexFormatterTest.java,v 1.14.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.util;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.utilint.HexFormatter;
+
+/**
+ * Trivial formatting class that sticks leading 0's on the front of a hex
+ * number.
+ */
+public class HexFormatterTest extends TestCase {
+    public void testFormatLong() {
+	assertTrue(HexFormatter.formatLong(0).equals("0x0000000000000000"));
+	assertTrue(HexFormatter.formatLong(1).equals("0x0000000000000001"));
+	assertTrue(HexFormatter.formatLong(0x1234567890ABCDEFL).equals("0x1234567890abcdef"));
+	assertTrue(HexFormatter.formatLong(0x1234567890L).equals("0x0000001234567890"));
+	assertTrue(HexFormatter.formatLong(0xffffffffffffffffL).equals("0xffffffffffffffff"));
+    }
+}
diff --git a/test/com/sleepycat/je/util/InfoFileFilter.java b/test/com/sleepycat/je/util/InfoFileFilter.java
new file mode 100644
index 0000000000000000000000000000000000000000..7e1f5baf184db0c82cc300a7c770e64f352709fc
--- /dev/null
+++ b/test/com/sleepycat/je/util/InfoFileFilter.java
@@ -0,0 +1,45 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: InfoFileFilter.java,v 1.13.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.util;
+
+import java.io.File;
+import java.io.FilenameFilter;
+import java.util.StringTokenizer;
+
+public class InfoFileFilter implements FilenameFilter {
+
+    /**
+     * Accept files of this format:
+     * je.info.#
+     */
+    public boolean accept(File dir, String name) {
+        boolean ok = false;
+        StringTokenizer tokenizer = new StringTokenizer(name, ".");
+        // there should be two parts
+        if (tokenizer.countTokens() == 3) {
+            String filePrefix = tokenizer.nextToken();
+            String fileSuffix = tokenizer.nextToken();
+            String repeat = tokenizer.nextToken();
+
+            // check the length and the suffix
+            if (filePrefix.equals("je") && fileSuffix.equals("info")) {
+                // The last part should be a number
+                try {
+                    Integer.parseInt(repeat);
+                    ok = true;
+                } catch (NumberFormatException e) {
+                    ok = false;
+                }
+            }
+        }
+
+        return ok;
+    }
+}
+
diff --git a/test/com/sleepycat/je/util/MiniPerf.java b/test/com/sleepycat/je/util/MiniPerf.java
new file mode 100644
index 0000000000000000000000000000000000000000..6e55001e347ffb0a733e346cb1130794608f6231
--- /dev/null
+++ b/test/com/sleepycat/je/util/MiniPerf.java
@@ -0,0 +1,171 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: MiniPerf.java,v 1.23.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.util;
+
+import java.io.File;
+import java.io.IOException;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+
+public class MiniPerf {
+
+    private File envHome;
+    private Environment exampleEnv;
+    private Database exampleDb;
+    private Cursor cursor;
+
+    static int nKeys;
+
+    static public void main(String argv[])
+	throws DatabaseException, IOException, NumberFormatException {
+
+	boolean create = false;
+	if (argv.length > 0) {
+	    nKeys = Integer.parseInt(argv[0]);
+	    create = true;
+	} else {
+	    create = false;
+	}
+	new MiniPerf().doit(create);
+    }
+
+    void doit(boolean create)
+	throws DatabaseException, IOException {
+
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+        setUp(create);
+	testIterationPerformance(create);
+	tearDown();
+    }
+
+    public void setUp(boolean create)
+	throws IOException, DatabaseException {
+
+	if (create) {
+	    TestUtils.removeLogFiles("Setup", envHome, false);
+	}
+
+        // Set up an environment
+        EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+        envConfig.setAllowCreate(create);
+        exampleEnv = new Environment(envHome, envConfig);
+
+        // Set up a database
+        String databaseName = "simpleDb";
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        exampleDb = exampleEnv.openDatabase(null, databaseName, dbConfig);
+
+        // Set up cursors
+        cursor = exampleDb.openCursor(null, null);
+    }
+
+    public void tearDown()
+	throws IOException, DatabaseException {
+
+	exampleEnv.sync();
+
+	if (exampleDb != null) {
+	    exampleDb.close();
+	    exampleDb = null;
+	}
+        if (exampleEnv != null) {
+	    try {
+		exampleEnv.close();
+	    } catch (DatabaseException DE) {
+		/*
+		 * Ignore this exception.  It's caused by us calling
+		 * tearDown() within the test.  Each tearDown() call
+		 * forces the database closed.  So when the call from
+		 * junit comes along, it's already closed.
+		 */
+	    }
+            exampleEnv = null;
+        }
+
+        cursor = null;
+    }
+
+    public void testIterationPerformance(boolean create)
+        throws IOException, DatabaseException {
+
+	final int N_KEY_BYTES = 10;
+	final int N_DATA_BYTES = 20;
+
+	if (create) {
+	    System.out.print("Creating...");
+	    for (int i = 0; i < nKeys; i++) {
+		if (i % 100000 == 0) {
+		    System.out.println(i);
+		}
+		byte[] key = new byte[N_KEY_BYTES];
+		TestUtils.generateRandomAlphaBytes(key);
+		String keyString = new String(key);
+
+		byte[] data = new byte[N_DATA_BYTES];
+		TestUtils.generateRandomAlphaBytes(data);
+		String dataString = new String(data);
+		cursor.put(new StringDbt(keyString),
+                           new StringDbt(dataString));
+	    }
+	    System.out.print("done.");
+	} else {
+	    String middleKey = null;
+	    int middleEntry = -1;
+	    int count = 0;
+	    for (int i = 0; i < 3; i++) {
+		System.out.print("Iterating...");
+		StringDbt foundKey = new StringDbt();
+		StringDbt foundData = new StringDbt();
+		
+		long startTime = System.currentTimeMillis();
+		OperationStatus status = cursor.getFirst(foundKey, foundData, LockMode.DEFAULT);
+
+		count = 0;
+		while (status == OperationStatus.SUCCESS) {
+		    status =
+			cursor.getNext(foundKey, foundData, LockMode.DEFAULT);
+		    count++;
+		    if (count == middleEntry) {
+			middleKey = foundKey.getString();
+		    }
+		}
+		long endTime = System.currentTimeMillis();
+		System.out.println("done.");
+		System.out.println(count + " records found.");
+		middleEntry = count >> 1;
+		System.out.println((endTime - startTime) + " millis");
+	    }
+
+	    System.out.println("Middle key: " + middleKey);
+
+	    StringDbt searchKey = new StringDbt(middleKey);
+	    StringDbt searchData = new StringDbt();
+	    for (int j = 0; j < 3; j++) {
+		long startTime = System.currentTimeMillis();
+		for (int i = 0; i < count; i++) {
+		    if (cursor.getSearchKey(searchKey,
+					    searchData,
+					    LockMode.DEFAULT) != OperationStatus.SUCCESS) {
+			System.out.println("non-0 return");
+		    }
+		}
+		long endTime = System.currentTimeMillis();
+		System.out.println((endTime - startTime) + " millis");
+	    }
+	}
+    }
+}
diff --git a/test/com/sleepycat/je/util/PropUtilTest.java b/test/com/sleepycat/je/util/PropUtilTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..ab1ea0f038529a11480d6e5d396366019f6b0146
--- /dev/null
+++ b/test/com/sleepycat/je/util/PropUtilTest.java
@@ -0,0 +1,71 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PropUtilTest.java,v 1.22.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.util;
+
+import java.util.HashSet;
+import java.util.Properties;
+import java.util.Set;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.utilint.PropUtil;
+
+public class PropUtilTest extends TestCase {
+    public void testGetBoolean() {
+        Properties props = new Properties();
+
+        props.setProperty("foo", "true");
+        props.setProperty("bar", "True");
+        props.setProperty("baz", "false");
+
+        assertTrue(PropUtil.getBoolean(props, "foo"));
+        assertTrue(PropUtil.getBoolean(props, "bar"));
+        assertFalse(PropUtil.getBoolean(props, "baz"));
+    }
+
+    public void testValidate()
+        throws DatabaseException {
+
+        Properties props = new Properties();
+
+        props.setProperty("foo", "true");
+        props.setProperty("bar", "True");
+        props.setProperty("baz", "false");
+
+        Set<String> allowedSet = new HashSet<String>();
+        allowedSet.add("foo");
+        allowedSet.add("bar");
+        allowedSet.add("baz");
+
+        PropUtil.validateProps(props, allowedSet, "test");
+
+        // test negative case
+        allowedSet.remove("foo");
+
+        try {
+            PropUtil.validateProps(props, allowedSet, "test");
+            fail();
+        } catch (DatabaseException e) {
+            //System.out.println(e);
+            assertEquals(DatabaseException.getVersionHeader() +
+                         "foo is not a valid property for test",
+                         e.getMessage());
+        }
+    }
+
+    public void testMicrosToMillis() {
+
+        assertEquals(0, PropUtil.microsToMillis(0));
+        assertEquals(1, PropUtil.microsToMillis(1));
+        assertEquals(1, PropUtil.microsToMillis(999));
+        assertEquals(1, PropUtil.microsToMillis(1000));
+        assertEquals(2, PropUtil.microsToMillis(1001));
+    }
+}
diff --git a/test/com/sleepycat/je/util/RecordSearch.java b/test/com/sleepycat/je/util/RecordSearch.java
new file mode 100644
index 0000000000000000000000000000000000000000..98604f5420da58d652596b121a29e00afa797ceb
--- /dev/null
+++ b/test/com/sleepycat/je/util/RecordSearch.java
@@ -0,0 +1,164 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RecordSearch.java,v 1.28.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.util;
+
+import java.io.File;
+import java.util.logging.Level;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.utilint.CmdUtil;
+
+/**
+ * KeySearch is a debugging aid that searches the database for a given
+ * record.
+ */
+public class RecordSearch {
+
+    public static void main(String[] argv) {
+        try {
+            int whichArg = 0;
+            DatabaseEntry searchKey = null;
+            String dbName = null;
+            String keyVal = null;
+            String levelVal = "SEVERE";
+            boolean dumpAll = false;
+	    boolean searchKeyRange = false;
+
+            /*
+             * Usage: -h  <envHomeDir> (optional
+             *        -db <db name>
+             *        -ks <key to search for, as a string>
+             *        -ksr <key to range search for, as a string>
+             *        -a  <if true, dump the whole db>
+             *        -l  <logging level>
+             */
+            String envHome = "."; // default to current directory
+            while (whichArg < argv.length) {
+                String nextArg = argv[whichArg];
+
+                if (nextArg.equals("-h")) {
+                    whichArg++;
+                    envHome = CmdUtil.getArg(argv, whichArg);
+                } else if (nextArg.equals("-db")) {
+                    whichArg++;
+                    dbName = CmdUtil.getArg(argv, whichArg);
+                } else if (nextArg.equals("-ks")) {
+                    whichArg++;
+                    keyVal = CmdUtil.getArg(argv, whichArg);
+                    searchKey = new DatabaseEntry(keyVal.getBytes());
+                } else if (nextArg.equals("-ksr")) {
+                    whichArg++;
+                    keyVal = CmdUtil.getArg(argv, whichArg);
+                    searchKey = new DatabaseEntry(keyVal.getBytes());
+		    searchKeyRange = true;
+                } else if (nextArg.equals("-l")) {
+                    whichArg++;
+                    levelVal = CmdUtil.getArg(argv, whichArg);
+                    Level.parse(levelVal); // sanity check level
+                } else if (nextArg.equals("-a")) {
+                    whichArg++;
+                    String dumpVal = CmdUtil.getArg(argv, whichArg);
+                    dumpAll = Boolean.valueOf(dumpVal).booleanValue();
+                } else {
+                    throw new IllegalArgumentException
+                        (nextArg + " is not a supported option.");
+                }
+                whichArg++;
+            }
+
+            if (dbName == null) {
+                usage();
+                System.exit(1);
+            }
+
+            /* Make a read only environment */
+            EnvironmentConfig envConfig = TestUtils.initEnvConfig();
+
+            // Don't debug log to the database log.
+            envConfig.setConfigParam
+                (EnvironmentParams.JE_LOGGING_DBLOG.getName(), "false");
+
+            // Do debug log to the console
+            envConfig.setConfigParam
+                (EnvironmentParams.JE_LOGGING_CONSOLE.getName(), "true");
+
+            // Set logging level to only show errors
+            envConfig.setConfigParam
+                (EnvironmentParams.JE_LOGGING_LEVEL.getName(), levelVal);
+
+            envConfig.setReadOnly(true);
+
+            Environment envHandle = new Environment(new File(envHome),
+						    envConfig);
+
+            /* Open the db. */
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setReadOnly(true);
+            DbInternal.setUseExistingConfig(dbConfig, true);
+            Database db = envHandle.openDatabase(null, dbName, dbConfig);
+
+            DatabaseEntry foundData = new DatabaseEntry();
+            if (dumpAll) {
+                Cursor cursor = db.openCursor(null, null);
+                DatabaseEntry foundKey = new DatabaseEntry();
+                int i = 0;
+                while (cursor.getNext(foundKey, foundData,
+                                      LockMode.DEFAULT) == OperationStatus.SUCCESS) {
+                    System.out.println(i + ":key=" +
+                                       new String(foundKey.getData()));
+                    i++;
+                }
+		cursor.close();
+            } else if (searchKeyRange) {
+                /* Range Search for the key. */
+		Cursor cursor = db.openCursor(null, null);
+		OperationStatus status = cursor.getSearchKeyRange(searchKey,
+								  foundData,
+								  LockMode.DEFAULT);
+		cursor.close();
+                System.out.println("Range Search for key " + keyVal +
+                                   " status = " + status + " => " +
+				   new String(searchKey.getData()));
+            } else {
+                /* Search for the key. */
+                OperationStatus status = db.get(null, searchKey, foundData,
+						LockMode.DEFAULT);
+                System.out.println("Search for key " + keyVal +
+                                   " status = " + status);
+	    }
+            db.close();
+            envHandle.close();
+
+        } catch (Exception e) {
+            e.printStackTrace();
+            System.out.println(e.getMessage());
+            usage();
+            System.exit(1);
+        }
+    }
+
+    private static void usage() {
+        System.out.println("Usage: RecordSearch");
+	System.out.println("  -h <environment home> ");
+        System.out.println("  -a <true if dump all>");
+        System.out.println("  -db <db name>");
+        System.out.println("  -l logging level");
+        System.out.println("  -ks <key to search for, as a string");
+        System.out.println("  -ksr <key to range search for, as a string");
+    }
+}
diff --git a/test/com/sleepycat/je/util/StringDbt.java b/test/com/sleepycat/je/util/StringDbt.java
new file mode 100644
index 0000000000000000000000000000000000000000..48cb7a56d3f250fd2bed77aad052f37b74140748
--- /dev/null
+++ b/test/com/sleepycat/je/util/StringDbt.java
@@ -0,0 +1,38 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: StringDbt.java,v 1.15.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.util;
+
+import com.sleepycat.je.DatabaseEntry;
+
+public class StringDbt extends DatabaseEntry {
+    public StringDbt() {
+    }
+
+    public StringDbt(String value) {
+	setString(value);
+    }
+
+    public StringDbt(byte[] value) {
+	setData(value);
+    }
+
+    public void setString(String value) {
+	byte[] data = value.getBytes();
+	setData(data);
+    }
+
+    public String getString() {
+	return new String(getData(), 0, getSize());
+    }
+
+    public String toString() {
+        return getString();
+    }
+}
+
diff --git a/test/com/sleepycat/je/util/TestUtils.java b/test/com/sleepycat/je/util/TestUtils.java
new file mode 100644
index 0000000000000000000000000000000000000000..cce35549af110fb17f9cb4cecf63a4c0b06fbb1e
--- /dev/null
+++ b/test/com/sleepycat/je/util/TestUtils.java
@@ -0,0 +1,568 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TestUtils.java,v 1.88.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.util;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.FilenameFilter;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.text.NumberFormat;
+import java.util.Random;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.CacheMode;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.DbTestProxy;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.ExceptionEvent;
+import com.sleepycat.je.ExceptionListener;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.dbi.CursorImpl;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.latch.LatchSupport;
+import com.sleepycat.je.log.FileManager;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.tree.ChildReference;
+import com.sleepycat.je.tree.IN;
+import com.sleepycat.je.tree.SearchResult;
+import com.sleepycat.je.tree.Tree;
+import com.sleepycat.je.tree.WithRootLatched;
+import com.sleepycat.util.test.SharedTestUtils;
+
+public class TestUtils {
+    public static String DEST_DIR = SharedTestUtils.DEST_DIR;
+    public static String NO_SYNC = SharedTestUtils.NO_SYNC;
+
+    public static final String LOG_FILE_NAME = "00000000.jdb";
+
+    public static final StatsConfig FAST_STATS;
+
+    static {
+        FAST_STATS = new StatsConfig();
+        FAST_STATS.setFast(true);
+    }
+
+    private static final boolean DEBUG = true;
+    private static Random rnd = new Random();
+
+    public void debugMsg(String message) {
+
+        if (DEBUG) {
+            System.out.println
+		(Thread.currentThread().toString() + " " + message);
+        }
+    }
+
+    static public void setRandomSeed(int seed) {
+
+        rnd = new Random(seed);
+    }
+
+    static public void generateRandomAlphaBytes(byte[] bytes) {
+
+        byte[] aAndZ = "AZ".getBytes();
+        int range = aAndZ[1] - aAndZ[0] + 1;
+
+        for (int i = 0; i < bytes.length; i++) {
+            bytes[i] = (byte) (rnd.nextInt(range) + aAndZ[0]);
+        }
+    }
+
+    static public void checkLatchCount() {
+        TestCase.assertTrue(LatchSupport.countLatchesHeld() == 0);
+    }
+
+    static public void printLatchCount(String msg) {
+        System.out.println(msg + " : " + LatchSupport.countLatchesHeld());
+    }
+
+    static public void printLatches(String msg) {
+        System.out.println(msg + " : ");
+        LatchSupport.dumpLatchesHeld();
+    }
+
+    /**
+     * Generate a synthetic base 26 four byte alpha key from an int.
+     * The bytes of the key are between 'A' and 'Z', inclusive.  0 maps
+     * to 'AAAA', 1 to 'AAAB', etc.
+     */
+    static public int alphaKey(int i) {
+
+        int ret = 0;
+        for (int j = 0; j < 4; j++) {
+            byte b = (byte) (i % 26);
+            ret <<= 8;
+            ret |= (b + 65);
+            i /= 26;
+        }
+
+        return ret;
+    }
+
+    /**
+     * Marshall an unsigned int (long) into a four byte buffer.
+     */
+    static public void putUnsignedInt(byte[] buf, long value) {
+
+        int i = 0;
+        buf[i++] = (byte) (value >>> 0);
+        buf[i++] = (byte) (value >>> 8);
+        buf[i++] = (byte) (value >>> 16);
+        buf[i] =   (byte) (value >>> 24);
+    }
+
+    /**
+     * All flavors of removeLogFiles should check if the remove has been
+     * disabled. (Used for debugging, so that the tester can dump the
+     * log file.
+     */
+    private static boolean removeDisabled() {
+
+        String doRemove = System.getProperty("removeLogFiles");
+        return ((doRemove != null) && doRemove.equalsIgnoreCase("false"));
+    }
+
+    /**
+     * Remove je log files from the home directory. Will be disabled
+     * if the unit test is run with -DremoveLogFiles=false
+     * @param msg prefix to append to error messages
+     * @param envFile environment directory
+     */
+    public static void removeLogFiles(String msg,
+                                      File envFile,
+                                      boolean checkRemove)
+        throws IOException {
+
+        removeFiles(msg, envFile, FileManager.JE_SUFFIX, checkRemove);
+    }
+
+    /**
+     * Remove files with this suffix from the je home directory
+     * @param msg prefix to append to error messages
+     * @param envFile environment directory
+     * @param suffix files with this suffix will be removed
+     */
+    public static void removeFiles(String msg,
+				   File envFile,
+				   String suffix)
+        throws IOException {
+
+        removeFiles(msg, envFile, suffix, false);
+    }
+
+    /**
+     * Remove files with this suffix from the je home directory
+     * @param msg prefix to append to error messages
+     * @param envFile environment directory
+     * @param suffix files with this suffix will be removed
+     * @param checkRemove if true, check the -DremoveLogFiles system
+     *  property before removing.
+     */
+    public static void removeFiles(String msg,
+                                   File envFile,
+                                   String suffix,
+                                   boolean checkRemove)
+        throws IOException {
+
+        if (checkRemove && removeDisabled()) {
+            return;
+        }
+
+	String[] suffixes = new String[] { suffix };
+        String[] names = FileManager.listFiles(envFile, suffixes);
+
+        /* Clean up any target files in this directory. */
+        for (int i = 0; i < names.length; i++) {
+            File oldFile = new File(envFile, names[i]);
+            boolean done = oldFile.delete();
+            assert done :
+                msg + " couldn't delete " + names[i] + " out of " +
+                names[names.length - 1];
+            oldFile = null;
+        }
+    }
+
+    /**
+     * Remove files with the pattern indicated by the filename filter from the
+     * environment home directory.
+     * Note that BadFileFilter looks for this pattern: NNNNNNNN.bad.#
+     *           InfoFileFilter looks for this pattern: je.info.#
+     * @param envFile environment directory
+     */
+    public static void removeFiles(File envFile, FilenameFilter filter)
+        throws IOException {
+
+        if (removeDisabled()) {
+            return;
+        }
+
+        File[] targetFiles = envFile.listFiles(filter);
+
+        // Clean up any target files in this directory
+        for (int i = 0; i < targetFiles.length; i++) {
+            boolean done = targetFiles[i].delete();
+            if (!done) {
+                System.out.println
+		    ("Warning, couldn't delete "
+		     + targetFiles[i]
+		     + " out of "
+		     + targetFiles[targetFiles.length - 1]);
+            }
+        }
+    }
+
+    /**
+     * Useful utility for generating byte arrays with a known order.
+     * Vary the length just to introduce more variability.
+     * @return a byte array of length val % 100 with the value of "val"
+     */
+    public static byte[] getTestArray(int val) {
+
+        int length = val % 10;
+        length = length < 4 ? 4 : length;
+        byte[] test = new byte[length];
+        test[3] = (byte) ((val >>> 0) & 0xff);
+        test[2] = (byte) ((val >>> 8) & 0xff);
+        test[1] = (byte) ((val >>> 16) & 0xff);
+        test[0] = (byte) ((val >>> 24) & 0xff);
+        return test;
+    }
+
+    /**
+     * Return the value of a test data array generated with getTestArray
+     * as an int
+     */
+    public static int getTestVal(byte[] testArray) {
+
+        int val = 0;
+        val |= (testArray[3] & 0xff);
+        val |= ((testArray[2] & 0xff) << 8);
+        val |= ((testArray[1] & 0xff) << 16);
+        val |= ((testArray[0] & 0xff) << 24);
+        return val;
+    }
+
+    /**
+     * @return length and data of a byte array, printed as decimal numbers
+     */
+    public static String dumpByteArray(byte[] b) {
+
+        StringBuffer sb = new StringBuffer();
+        sb.append("<byteArray len = ");
+        sb.append(b.length);
+        sb.append(" data = \"");
+        for (int i = 0; i < b.length; i++) {
+            sb.append(b[i]).append(",");
+        }
+        sb.append("\"/>");
+        return sb.toString();
+    }
+
+    /**
+     * @return a copy of the passed in byte array
+     */
+    public static byte[] byteArrayCopy(byte[] ba) {
+
+        int len = ba.length;
+        byte[] ret = new byte[len];
+        System.arraycopy(ba, 0, ret, 0, len);
+        return ret;
+    }
+
+    /*
+     * Check that the stored memory count for all INs on the inlist
+     * matches their computed count. The environment mem usage check
+     * may be run with assertions or not.
+     *
+     * In a multithreaded environment (or one with daemons running),
+     * you can't be sure that the cached size will equal the calculated size.
+     *
+     * Nodes, txns, and locks are all counted within the memory budget.
+     */
+    public static long validateNodeMemUsage(EnvironmentImpl envImpl,
+                                            boolean assertOnError)
+        throws DatabaseException {
+
+        long total = tallyNodeMemUsage(envImpl);
+        long nodeCacheUsage = envImpl.getMemoryBudget().getTreeMemoryUsage();
+        NumberFormat formatter = NumberFormat.getNumberInstance();
+        if (assertOnError) {
+            assert (total==nodeCacheUsage) :
+                  "calculatedTotal=" + formatter.format(total) +
+                  " envCacheUsage=" + formatter.format(nodeCacheUsage);
+        } else {
+            if (DEBUG) {
+                if (nodeCacheUsage != total) {
+                    long diff = Math.abs(nodeCacheUsage - total);
+                    if ((diff / nodeCacheUsage) > .05) {
+                        System.out.println("calculatedTotal=" +
+                                           formatter.format(total) +
+                                           " envCacheUsage=" +
+                                           formatter.format(nodeCacheUsage));
+                    }
+                }
+            }
+        }
+
+        return nodeCacheUsage;
+    }
+
+    public static long tallyNodeMemUsage(EnvironmentImpl envImpl)
+        throws DatabaseException {
+
+        long total = 0;
+        for (IN in : envImpl.getInMemoryINs()) {
+            in.latch();
+            try {
+
+                /*
+                 * verifyMemorySize cannot currently be called for BINs
+                 * containing FileSummaryLNs because the parent IN's in-memory
+                 * size does not reflect changes to the FileSummaryLN's
+                 * ObsoleteOffsets.
+                 */
+                if ((in.getDatabase() !=
+                     envImpl.getUtilizationProfile().getFileSummaryDb()) ||
+                    !(in instanceof BIN)) {
+                    assert in.verifyMemorySize():
+                        "in nodeId=" + in.getNodeId() +
+                        ' ' + in.getClass().getName();
+                }
+                total += in.getBudgetedMemorySize();
+            } finally {
+                in.releaseLatch();
+            }
+        }
+        return total;
+    }
+
+    /**
+     * Called by each unit test to enforce isolation level settings specified
+     * in the isolationLevel system property.  Other system properties or
+     * default settings may be applied in the future.
+     */
+    public static EnvironmentConfig initEnvConfig() {
+
+        EnvironmentConfig config = new EnvironmentConfig();
+        String val = System.getProperty("isolationLevel");
+        if (val != null && val.length() > 0) {
+            if ("serializable".equals(val)) {
+                config.setTxnSerializableIsolation(true);
+            } else if ("readCommitted".equals(val)) {
+                DbInternal.setTxnReadCommitted(config, true);
+            } else {
+                throw new IllegalArgumentException
+                    ("Unknown isolationLevel system property value: " + val);
+            }
+        }
+        return config;
+    }
+
+    /**
+     * If a unit test needs to override the isolation level, it should call
+     * this method after calling initEnvConfig.
+     */
+    public static void clearIsolationLevel(EnvironmentConfig config) {
+        DbInternal.setTxnReadCommitted(config, false);
+        config.setTxnSerializableIsolation(false);
+    }
+
+    /**
+     * Loads the given resource relative to the given class, and copies it to
+     * log file zero in the given directory.
+     */
+    public static void loadLog(Class<?> cls, String resourceName, File envHome)
+        throws IOException {
+
+        loadLog(cls, resourceName, envHome, LOG_FILE_NAME);
+    }
+
+    /**
+     * Loads the given resource relative to the given class, and copies it to
+     * the given log file in the given directory.
+     */
+    public static void loadLog(Class cls,
+                               String resourceName,
+                               File envHome,
+                               String logFileName)
+        throws IOException {
+
+        File logFile = new File(envHome, logFileName);
+        InputStream is = cls.getResourceAsStream(resourceName);
+        OutputStream os = new FileOutputStream(logFile);
+        byte[] buf = new byte[is.available()];
+        int len = is.read(buf);
+        if (buf.length != len) {
+            throw new IllegalStateException();
+        }
+        os.write(buf, 0, len);
+        is.close();
+        os.close();
+    }
+
+    /**
+     * Logs the BIN at the cursor provisionally and the parent IN
+     * non-provisionally.  Used to simulate a partial checkpoint or eviction.
+     */
+    public static void logBINAndIN(Environment env, Cursor cursor)
+        throws DatabaseException {
+
+        BIN bin = getBIN(cursor);
+        Tree tree = bin.getDatabase().getTree();
+
+
+        /* Log the BIN and update its parent entry. */
+        bin.latch();
+        SearchResult result = tree.getParentINForChildIN(bin, true,
+                                                         CacheMode.DEFAULT);
+        assert result.parent != null;
+        assert result.exactParentFound;
+        IN binParent = result.parent;
+        long binLsn = logIN(env, bin, true, binParent);
+        binParent.updateNode(result.index, bin, binLsn, null /*lnSlotKey*/);
+        result.parent.releaseLatch();
+
+        /* Log the BIN parent and update its parent entry. */
+        binParent.latch();
+        result =
+            tree.getParentINForChildIN(binParent, true, CacheMode.DEFAULT);
+        IN inParent = null;
+        if (result.parent != null) {
+            result.parent.releaseLatch();
+            assert result.exactParentFound;
+            inParent = result.parent;
+	    inParent.latch();
+        }
+        final long inLsn = logIN(env, binParent, false, null);
+        if (inParent != null) {
+            inParent.updateNode(result.index, binParent, inLsn,
+                                null /*lnSlotKey*/);
+	    inParent.releaseLatch();
+        } else {
+            tree.withRootLatchedExclusive(new WithRootLatched() {
+                public IN doWork(ChildReference root)
+                    throws DatabaseException {
+                    root.setLsn(inLsn);
+                    return null;
+                }
+            });
+        }
+    }
+
+    /**
+     * Logs the given IN.
+     */
+    public static long logIN(Environment env,
+                             IN in,
+                             boolean provisional,
+                             IN parent)
+        throws DatabaseException {
+
+        EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+        in.latch();
+        long lsn;
+        if (provisional) {
+            lsn = in.log(envImpl.getLogManager(),
+        	         false,  // allowDeltas
+                         true,   // isProvisional
+                         false,  // proactiveMigration
+                         false,  // backgroundIO
+        	         parent);// provisional parent
+        } else {
+            lsn = in.log(envImpl.getLogManager());
+        }
+        in.releaseLatch();
+        return lsn;
+    }
+
+    /**
+     * Returns the parent IN of the given IN.
+     */
+    public static IN getIN(IN in)
+        throws DatabaseException {
+
+        Tree tree = in.getDatabase().getTree();
+        in.latch();
+        SearchResult result =
+            tree.getParentINForChildIN(in, true, CacheMode.DEFAULT);
+        assert result.parent != null;
+        result.parent.releaseLatch();
+        assert result.exactParentFound;
+        return result.parent;
+    }
+
+    /**
+     * Returns the target BIN for the given cursor.
+     */
+    public static BIN getBIN(Cursor cursor)
+        throws DatabaseException {
+
+        CursorImpl impl = DbTestProxy.dbcGetCursorImpl(cursor);
+        BIN bin = impl.getDupBIN();
+        if (bin == null) {
+            bin = impl.getBIN();
+            assert bin != null;
+        }
+        return bin;
+    }
+
+    /**
+     * Assert if the tree is not this deep. Use to ensure that data setups
+     * are as expected.
+     */
+    public static boolean checkTreeDepth(Database db, int desiredDepth)
+        throws DatabaseException {
+
+        Tree tree = DbInternal.dbGetDatabaseImpl(db).getTree();
+        IN rootIN = tree.getRootIN(CacheMode.UNCHANGED);
+        int level = 0;
+        if (rootIN != null) {
+            level = rootIN.getLevel() & IN.LEVEL_MASK;
+            rootIN.releaseLatch();
+        }
+
+        return (desiredDepth == level);
+    }
+
+    /**
+     * @return true if long running tests are enabled.
+     */
+    static public boolean runLongTests() {
+        return SharedTestUtils.runLongTests();
+    }
+
+    /**
+     * Skip over the JE version number at the start of the exception
+     * message for tests which are looking for a specific message.
+     */
+    public static String skipVersion(Exception e) {
+        int versionHeaderLen = DatabaseException.getVersionHeader().length();
+        return (e.getMessage().substring(versionHeaderLen));
+    }
+
+    /**
+     * Dump any exception messages to stderr.
+     */
+    public static class StdErrExceptionListener
+	implements ExceptionListener {
+
+	public void exceptionThrown(ExceptionEvent event) {
+	    System.err.println(Thread.currentThread() +
+			       " received " +
+			       event);
+	}
+    }
+}
diff --git a/test/com/sleepycat/je/utilint/BitMapTest.java b/test/com/sleepycat/je/utilint/BitMapTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..2f9ad4b3ce28ef6ef776da4b9542ef38a5f09390
--- /dev/null
+++ b/test/com/sleepycat/je/utilint/BitMapTest.java
@@ -0,0 +1,64 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: BitMapTest.java,v 1.7.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.utilint;
+
+import junit.framework.TestCase;
+
+
+public class BitMapTest extends TestCase {
+
+    public void testSegments() {
+
+        BitMap bmap = new BitMap();
+        int startBit = 15;
+        int endBit = 62;
+        assertEquals(0, bmap.cardinality());
+        assertEquals(0, bmap.getNumSegments());
+
+        assertFalse(bmap.get(1001L));
+        assertEquals(0, bmap.getNumSegments());
+
+        /* set a bit in different segments. */
+        for (int i = startBit; i <= endBit; i++) {
+            long index = 1L << i;
+            index += 17;
+            bmap.set(index);
+        }
+
+        assertEquals((endBit - startBit +1), bmap.cardinality());
+        assertEquals((endBit - startBit + 1), bmap.getNumSegments());
+
+        /* should be set. */
+        for (int i = startBit; i <= endBit; i++) {
+            long index = 1L << i;
+            index += 17;
+            assertTrue(bmap.get(index));
+        }
+
+        /* should be clear. */
+        for (int i = startBit; i <= endBit; i++) {
+            long index = 7 + (1L << i);
+            assertFalse(bmap.get(index));
+        }
+
+        /* checking for non-set bits should not create more segments. */
+        assertEquals((endBit - startBit +1), bmap.cardinality());
+        assertEquals((endBit - startBit + 1), bmap.getNumSegments());
+    }
+
+    public void testNegative() {
+        BitMap bMap = new BitMap();
+
+        try {
+            bMap.set(-300);
+            fail("should have thrown exception");
+        } catch (IndexOutOfBoundsException expected) {
+        }
+    }
+}
diff --git a/test/com/sleepycat/je/utilint/ExceptionListenerTest.java b/test/com/sleepycat/je/utilint/ExceptionListenerTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..f3d414d0503c3ea6b21c3f07dacd1d2368be9081
--- /dev/null
+++ b/test/com/sleepycat/je/utilint/ExceptionListenerTest.java
@@ -0,0 +1,128 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ExceptionListenerTest.java,v 1.11.2.2 2010/01/04 15:30:48 cwl Exp $
+ */
+
+package com.sleepycat.je.utilint;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.ExceptionEvent;
+import com.sleepycat.je.ExceptionListener;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.util.TestUtils;
+
+public class ExceptionListenerTest extends TestCase {
+
+    private File envHome;
+
+    private volatile boolean exceptionThrownCalled = false;
+
+    private DaemonThread dt = null;
+
+    public ExceptionListenerTest() {
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+    }
+
+    public void setUp()
+        throws IOException {
+
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        TestUtils.removeLogFiles("TearDown", envHome, false);
+    }
+
+    public void testExceptionListener()
+	throws Exception {
+
+	EnvironmentConfig envConfig = new EnvironmentConfig();
+	envConfig.setExceptionListener(new MyExceptionListener());
+	envConfig.setAllowCreate(true);
+	Environment env = new Environment(envHome, envConfig);
+	EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env);
+
+        assertSame(envConfig.getExceptionListener(),
+                   envImpl.getExceptionListener());
+        assertSame(envConfig.getExceptionListener(),
+                   envImpl.getCheckpointer().getExceptionListener());
+        assertSame(envConfig.getExceptionListener(),
+                   envImpl.getINCompressor().getExceptionListener());
+        assertSame(envConfig.getExceptionListener(),
+                   envImpl.getEvictor().getExceptionListener());
+
+	dt = new MyDaemonThread(0, Environment.CLEANER_NAME, envImpl);
+        dt.setExceptionListener(envImpl.getExceptionListener());
+	dt.stifleExceptionChatter = true;
+	dt.runOrPause(true);
+        long startTime = System.currentTimeMillis();
+	while (!dt.isShutdownRequested() &&
+               System.currentTimeMillis() - startTime < 10 * 1000) {
+	    Thread.yield();
+	}
+	assertTrue("ExceptionListener apparently not called",
+		   exceptionThrownCalled);
+
+	/* Change the exception listener. */
+	envConfig = env.getConfig();
+	exceptionThrownCalled = false;
+	envConfig.setExceptionListener(new MyExceptionListener());
+	env.setMutableConfig(envConfig);
+
+        assertSame(envConfig.getExceptionListener(),
+                   envImpl.getExceptionListener());
+        assertSame(envConfig.getExceptionListener(),
+                   envImpl.getCheckpointer().getExceptionListener());
+        assertSame(envConfig.getExceptionListener(),
+                   envImpl.getINCompressor().getExceptionListener());
+        assertSame(envConfig.getExceptionListener(),
+                   envImpl.getEvictor().getExceptionListener());
+
+	dt = new MyDaemonThread(0, Environment.CLEANER_NAME, envImpl);
+        dt.setExceptionListener(envImpl.getExceptionListener());
+	dt.stifleExceptionChatter = true;
+	dt.runOrPause(true);
+        startTime = System.currentTimeMillis();
+	while (!dt.isShutdownRequested() &&
+               System.currentTimeMillis() - startTime < 10 * 1000) {
+	    Thread.yield();
+	}
+	assertTrue("ExceptionListener apparently not called",
+		   exceptionThrownCalled);
+    }
+
+    private class MyDaemonThread extends DaemonThread {
+	MyDaemonThread(long waitTime, String name, EnvironmentImpl envImpl) {
+	    super(waitTime, name, envImpl);
+	}
+
+	protected void onWakeup()
+	    throws DatabaseException {
+
+	    throw new RuntimeException("test exception listener");
+	}
+    }
+
+    private class MyExceptionListener implements ExceptionListener {
+	public void exceptionThrown(ExceptionEvent event) {
+            assertEquals("daemonName should be CLEANER_NAME",
+			 Environment.CLEANER_NAME,
+                         event.getThreadName());
+	    dt.requestShutdown();
+	    exceptionThrownCalled = true;
+	}
+    }
+}
diff --git a/test/com/sleepycat/persist/test/BindingTest.java b/test/com/sleepycat/persist/test/BindingTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..a468baa6d756b66d56dba52bf03372792495f0e5
--- /dev/null
+++ b/test/com/sleepycat/persist/test/BindingTest.java
@@ -0,0 +1,2382 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: BindingTest.java,v 1.33.2.3 2010/01/04 15:30:49 cwl Exp $
+ */
+
+package com.sleepycat.persist.test;
+
+import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE;
+import static com.sleepycat.persist.model.Relationship.ONE_TO_MANY;
+import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE;
+
+import java.io.File;
+import java.io.IOException;
+import java.lang.reflect.Array;
+import java.lang.reflect.Field;
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.ForeignMultiKeyNullifier;
+import com.sleepycat.je.SecondaryKeyCreator;
+import com.sleepycat.je.SecondaryMultiKeyCreator;
+import com.sleepycat.persist.impl.PersistCatalog;
+import com.sleepycat.persist.impl.PersistComparator;
+import com.sleepycat.persist.impl.PersistEntityBinding;
+import com.sleepycat.persist.impl.PersistKeyBinding;
+import com.sleepycat.persist.impl.PersistKeyCreator;
+import com.sleepycat.persist.impl.SimpleCatalog;
+import com.sleepycat.persist.model.AnnotationModel;
+import com.sleepycat.persist.model.ClassMetadata;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.EntityMetadata;
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.model.KeyField;
+import com.sleepycat.persist.model.Persistent;
+import com.sleepycat.persist.model.PersistentProxy;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.PrimaryKeyMetadata;
+import com.sleepycat.persist.model.SecondaryKey;
+import com.sleepycat.persist.model.SecondaryKeyMetadata;
+import com.sleepycat.persist.raw.RawField;
+import com.sleepycat.persist.raw.RawObject;
+import com.sleepycat.persist.raw.RawType;
+import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * @author Mark Hayes
+ */
+public class BindingTest extends TestCase {
+
+    private static final String STORE_PREFIX = "persist#foo#";
+
+    private File envHome;
+    private Environment env;
+    private EntityModel model;
+    private PersistCatalog catalog;
+    private DatabaseEntry keyEntry;
+    private DatabaseEntry dataEntry;
+
+    public void setUp()
+        throws IOException {
+
+        envHome = new File(System.getProperty(SharedTestUtils.DEST_DIR));
+        SharedTestUtils.emptyDir(envHome);
+        keyEntry = new DatabaseEntry();
+        dataEntry = new DatabaseEntry();
+    }
+
+    public void tearDown()
+        throws IOException {
+
+        if (env != null) {
+            try {
+                env.close();
+            } catch (DatabaseException e) {
+                System.out.println("During tearDown: " + e);
+            }
+        }
+        envHome = null;
+        env = null;
+        catalog = null;
+        keyEntry = null;
+        dataEntry = null;
+    }
+
+    private void open()
+        throws IOException, DatabaseException {
+
+        EnvironmentConfig envConfig = TestEnv.BDB.getConfig();
+        envConfig.setAllowCreate(true);
+        env = new Environment(envHome, envConfig);
+
+        openCatalog();
+    }
+
+    private void openCatalog()
+        throws DatabaseException {
+
+        model = new AnnotationModel();
+        model.registerClass(LocalizedTextProxy.class);
+        model.registerClass(LocaleProxy.class);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        DbCompat.setTypeBtree(dbConfig);
+        catalog = new PersistCatalog
+            (null, env, STORE_PREFIX, STORE_PREFIX + "catalog", dbConfig,
+             model, null, false /*rawAccess*/, null /*Store*/);
+    }
+
+    private void close()
+        throws DatabaseException {
+
+        /* Close/open/close catalog to test checks for class evolution. */
+        catalog.close();
+        PersistCatalog.expectNoClassChanges = true;
+        try {
+            openCatalog();
+        } finally {
+            PersistCatalog.expectNoClassChanges = false;
+        }
+        catalog.close();
+        catalog = null;
+
+        env.close();
+        env = null;
+    }
+
+    public void testBasic()
+        throws IOException, DatabaseException {
+
+        open();
+
+        checkEntity(Basic.class,
+                    new Basic(1, "one", 2.2, "three"));
+        checkEntity(Basic.class,
+                    new Basic(0, null, 0, null));
+        checkEntity(Basic.class,
+                    new Basic(-1, "xxx", -2, "xxx"));
+
+        checkMetadata(Basic.class.getName(), new String[][] {
+                          {"id", "long"},
+                          {"one", "java.lang.String"},
+                          {"two", "double"},
+                          {"three", "java.lang.String"},
+                      },
+                      0 /*priKeyIndex*/, null);
+
+        close();
+    }
+
+    @Entity
+    static class Basic implements MyEntity {
+
+        @PrimaryKey
+        private long id;
+        private String one;
+        private double two;
+        private String three;
+
+        private Basic() { }
+
+        private Basic(long id, String one, double two, String three) {
+            this.id = id;
+            this.one = one;
+            this.two = two;
+            this.three = three;
+        }
+
+        public String getBasicOne() {
+            return one;
+        }
+
+        public Object getPriKeyObject() {
+            return id;
+        }
+
+        public void validate(Object other) {
+            Basic o = (Basic) other;
+            TestCase.assertEquals(id, o.id);
+            TestCase.assertTrue(nullOrEqual(one, o.one));
+            TestCase.assertEquals(two, o.two);
+            TestCase.assertTrue(nullOrEqual(three, o.three));
+            if (one == three) {
+                TestCase.assertSame(o.one, o.three);
+            }
+        }
+
+        @Override
+        public String toString() {
+            return "" + id + ' ' + one + ' ' + two;
+        }
+    }
+
+    public void testSimpleTypes()
+        throws IOException, DatabaseException {
+
+        open();
+
+        checkEntity(SimpleTypes.class, new SimpleTypes());
+
+        checkMetadata(SimpleTypes.class.getName(), new String[][] {
+                          {"f0", "boolean"},
+                          {"f1", "char"},
+                          {"f2", "byte"},
+                          {"f3", "short"},
+                          {"f4", "int"},
+                          {"f5", "long"},
+                          {"f6", "float"},
+                          {"f7", "double"},
+                          {"f8", "java.lang.String"},
+                          {"f9", "java.math.BigInteger"},
+                          //{"f10", "java.math.BigDecimal"},
+                          {"f11", "java.util.Date"},
+                          {"f12", "java.lang.Boolean"},
+                          {"f13", "java.lang.Character"},
+                          {"f14", "java.lang.Byte"},
+                          {"f15", "java.lang.Short"},
+                          {"f16", "java.lang.Integer"},
+                          {"f17", "java.lang.Long"},
+                          {"f18", "java.lang.Float"},
+                          {"f19", "java.lang.Double"},
+                      },
+                      0 /*priKeyIndex*/, null);
+
+        close();
+    }
+
+    @Entity
+    static class SimpleTypes implements MyEntity {
+
+        @PrimaryKey
+        private boolean f0 = true;
+        private char f1 = 'a';
+        private byte f2 = 123;
+        private short f3 = 123;
+        private int f4 = 123;
+        private long f5 = 123;
+        private float f6 = 123.4f;
+        private double f7 = 123.4;
+        private String f8 = "xxx";
+        private BigInteger f9 = BigInteger.valueOf(123);
+        //private BigDecimal f10 = BigDecimal.valueOf(123.4);
+        private Date f11 = new Date();
+        private Boolean f12 = true;
+        private Character f13 = 'a';
+        private Byte f14 = 123;
+        private Short f15 = 123;
+        private Integer f16 = 123;
+        private Long f17 = 123L;
+        private Float f18 = 123.4f;
+        private Double f19 = 123.4;
+
+        SimpleTypes() { }
+
+        public Object getPriKeyObject() {
+            return f0;
+        }
+
+        public void validate(Object other) {
+            SimpleTypes o = (SimpleTypes) other;
+            TestCase.assertEquals(f0, o.f0);
+            TestCase.assertEquals(f1, o.f1);
+            TestCase.assertEquals(f2, o.f2);
+            TestCase.assertEquals(f3, o.f3);
+            TestCase.assertEquals(f4, o.f4);
+            TestCase.assertEquals(f5, o.f5);
+            TestCase.assertEquals(f6, o.f6);
+            TestCase.assertEquals(f7, o.f7);
+            TestCase.assertEquals(f8, o.f8);
+            TestCase.assertEquals(f9, o.f9);
+            //TestCase.assertEquals(f10, o.f10);
+            TestCase.assertEquals(f11, o.f11);
+            TestCase.assertEquals(f12, o.f12);
+            TestCase.assertEquals(f13, o.f13);
+            TestCase.assertEquals(f14, o.f14);
+            TestCase.assertEquals(f15, o.f15);
+            TestCase.assertEquals(f16, o.f16);
+            TestCase.assertEquals(f17, o.f17);
+            TestCase.assertEquals(f18, o.f18);
+            TestCase.assertEquals(f19, o.f19);
+        }
+    }
+
+    public void testArrayTypes()
+        throws IOException, DatabaseException {
+
+        open();
+
+        checkEntity(ArrayTypes.class, new ArrayTypes());
+
+        checkMetadata(ArrayTypes.class.getName(), new String[][] {
+                          {"id", "int"},
+                          {"f0", boolean[].class.getName()},
+                          {"f1", char[].class.getName()},
+                          {"f2", byte[].class.getName()},
+                          {"f3", short[].class.getName()},
+                          {"f4", int[].class.getName()},
+                          {"f5", long[].class.getName()},
+                          {"f6", float[].class.getName()},
+                          {"f7", double[].class.getName()},
+                          {"f8", String[].class.getName()},
+                          {"f9", Address[].class.getName()},
+                          {"f10", boolean[][][].class.getName()},
+                          {"f11", String[][][].class.getName()},
+                      },
+                      0 /*priKeyIndex*/, null);
+
+        close();
+    }
+
+    @Entity
+    static class ArrayTypes implements MyEntity {
+
+        @PrimaryKey
+        private int id = 1;
+        private boolean[] f0 =  {false, true};
+        private char[] f1 = {'a', 'b'};
+        private byte[] f2 = {1, 2};
+        private short[] f3 = {1, 2};
+        private int[] f4 = {1, 2};
+        private long[] f5 = {1, 2};
+        private float[] f6 = {1.1f, 2.2f};
+        private double[] f7 = {1.1, 2,2};
+        private String[] f8 = {"xxx", null, "yyy"};
+        private Address[] f9 = {new Address("city", "state", 123),
+                                null,
+                                new Address("x", "y", 444)};
+        private boolean[][][] f10 =
+        {
+            {
+                {false, true},
+                {false, true},
+            },
+            null,
+            {
+                {false, true},
+                {false, true},
+            },
+        };
+        private String[][][] f11 =
+        {
+            {
+                {"xxx", null, "yyy"},
+                null,
+                {"xxx", null, "yyy"},
+            },
+            null,
+            {
+                {"xxx", null, "yyy"},
+                null,
+                {"xxx", null, "yyy"},
+            },
+        };
+
+        ArrayTypes() { }
+
+        public Object getPriKeyObject() {
+            return id;
+        }
+
+        public void validate(Object other) {
+            ArrayTypes o = (ArrayTypes) other;
+            TestCase.assertEquals(id, o.id);
+            TestCase.assertTrue(Arrays.equals(f0, o.f0));
+            TestCase.assertTrue(Arrays.equals(f1, o.f1));
+            TestCase.assertTrue(Arrays.equals(f2, o.f2));
+            TestCase.assertTrue(Arrays.equals(f3, o.f3));
+            TestCase.assertTrue(Arrays.equals(f4, o.f4));
+            TestCase.assertTrue(Arrays.equals(f5, o.f5));
+            TestCase.assertTrue(Arrays.equals(f6, o.f6));
+            TestCase.assertTrue(Arrays.equals(f7, o.f7));
+            TestCase.assertTrue(Arrays.equals(f8, o.f8));
+            TestCase.assertTrue(Arrays.deepEquals(f9, o.f9));
+            TestCase.assertTrue(Arrays.deepEquals(f10, o.f10));
+            TestCase.assertTrue(Arrays.deepEquals(f11, o.f11));
+        }
+    }
+
+    public void testEnumTypes()
+        throws IOException, DatabaseException {
+
+        open();
+
+        checkEntity(EnumTypes.class, new EnumTypes());
+
+        checkMetadata(EnumTypes.class.getName(), new String[][] {
+                          {"f0", "int"},
+                          {"f1", Thread.State.class.getName()},
+                          {"f2", EnumTypes.MyEnum.class.getName()},
+                          {"f3", Object.class.getName()},
+                      },
+                      0 /*priKeyIndex*/, null);
+
+        close();
+    }
+
+    @Entity
+    static class EnumTypes implements MyEntity {
+
+        private static enum MyEnum { ONE, TWO };
+
+        @PrimaryKey
+        private int f0 = 1;
+        private Thread.State f1 = Thread.State.RUNNABLE;
+        private MyEnum f2 = MyEnum.ONE;
+        private Object f3 = MyEnum.TWO;
+
+        EnumTypes() { }
+
+        public Object getPriKeyObject() {
+            return f0;
+        }
+
+        public void validate(Object other) {
+            EnumTypes o = (EnumTypes) other;
+            TestCase.assertEquals(f0, o.f0);
+            TestCase.assertSame(f1, o.f1);
+            TestCase.assertSame(f2, o.f2);
+            TestCase.assertSame(f3, o.f3);
+        }
+    }
+
+    public void testProxyTypes()
+        throws IOException, DatabaseException {
+
+        open();
+
+        checkEntity(ProxyTypes.class, new ProxyTypes());
+
+        checkMetadata(ProxyTypes.class.getName(), new String[][] {
+                          {"f0", "int"},
+                          {"f1", Locale.class.getName()},
+                          {"f2", Set.class.getName()},
+                          {"f3", Set.class.getName()},
+                          {"f4", Object.class.getName()},
+                          {"f5", HashMap.class.getName()},
+                          {"f6", TreeMap.class.getName()},
+                          {"f7", List.class.getName()},
+                          {"f8", LinkedList.class.getName()},
+                          {"f9", LocalizedText.class.getName()},
+                      },
+                      0 /*priKeyIndex*/, null);
+
+        close();
+    }
+
+    @Entity
+    static class ProxyTypes implements MyEntity {
+
+        @PrimaryKey
+        private int f0 = 1;
+        private Locale f1 = Locale.getDefault();
+        private Set<Integer> f2 = new HashSet<Integer>();
+        private Set<Integer> f3 = new TreeSet<Integer>();
+        private Object f4 = new HashSet<Address>();
+        private HashMap<String,Integer> f5 = new HashMap<String,Integer>();
+        private TreeMap<String,Address> f6 = new TreeMap<String,Address>();
+        private List<Integer> f7 = new ArrayList<Integer>();
+        private LinkedList<Integer> f8 = new LinkedList<Integer>();
+        private LocalizedText f9 = new LocalizedText(f1, "xyz");
+
+        ProxyTypes() {
+            f2.add(123);
+            f2.add(456);
+            f3.add(456);
+            f3.add(123);
+            HashSet<Address> s = (HashSet) f4;
+            s.add(new Address("city", "state", 11111));
+            s.add(new Address("city2", "state2", 22222));
+            s.add(new Address("city3", "state3", 33333));
+            f5.put("one", 111);
+            f5.put("two", 222);
+            f5.put("three", 333);
+            f6.put("one", new Address("city", "state", 11111));
+            f6.put("two", new Address("city2", "state2", 22222));
+            f6.put("three", new Address("city3", "state3", 33333));
+            f7.add(123);
+            f7.add(456);
+            f8.add(123);
+            f8.add(456);
+        }
+
+        public Object getPriKeyObject() {
+            return f0;
+        }
+
+        public void validate(Object other) {
+            ProxyTypes o = (ProxyTypes) other;
+            TestCase.assertEquals(f0, o.f0);
+            TestCase.assertEquals(f1, o.f1);
+            TestCase.assertEquals(f2, o.f2);
+            TestCase.assertEquals(f3, o.f3);
+            TestCase.assertEquals(f4, o.f4);
+            TestCase.assertEquals(f5, o.f5);
+            TestCase.assertEquals(f6, o.f6);
+            TestCase.assertEquals(f7, o.f7);
+            TestCase.assertEquals(f8, o.f8);
+            TestCase.assertEquals(f9, o.f9);
+        }
+    }
+
+    @Persistent(proxyFor=Locale.class)
+    static class LocaleProxy implements PersistentProxy<Locale> {
+
+        String language;
+        String country;
+        String variant;
+
+        private LocaleProxy() {}
+
+        public void initializeProxy(Locale object) {
+            language = object.getLanguage();
+            country = object.getCountry();
+            variant = object.getVariant();
+        }
+
+        public Locale convertProxy() {
+            return new Locale(language, country, variant);
+        }
+    }
+
+    static class LocalizedText {
+
+        Locale locale;
+        String text;
+
+        LocalizedText(Locale locale, String text) {
+            this.locale = locale;
+            this.text = text;
+        }
+
+        @Override
+        public boolean equals(Object other) {
+            LocalizedText o = (LocalizedText) other;
+            return text.equals(o.text) &&
+                   locale.equals(o.locale);
+        }
+    }
+
+    @Persistent(proxyFor=LocalizedText.class)
+    static class LocalizedTextProxy implements PersistentProxy<LocalizedText> {
+
+        Locale locale;
+        String text;
+
+        private LocalizedTextProxy() {}
+
+        public void initializeProxy(LocalizedText object) {
+            locale = object.locale;
+            text = object.text;
+        }
+
+        public LocalizedText convertProxy() {
+            return new LocalizedText(locale, text);
+        }
+    }
+
+    public void testEmbedded()
+        throws IOException, DatabaseException {
+
+        open();
+
+        Address a1 = new Address("city", "state", 123);
+        Address a2 = new Address("Wikieup", "AZ", 85360);
+
+        checkEntity(Embedded.class,
+                    new Embedded("x", a1, a2));
+        checkEntity(Embedded.class,
+                    new Embedded("y", a1, null));
+        checkEntity(Embedded.class,
+                    new Embedded("", a2, a2));
+
+        checkMetadata(Embedded.class.getName(), new String[][] {
+                        {"id", "java.lang.String"},
+                        {"idShadow", "java.lang.String"},
+                        {"one", Address.class.getName()},
+                        {"two", Address.class.getName()},
+                      },
+                      0 /*priKeyIndex*/, null);
+
+        checkMetadata(Address.class.getName(), new String[][] {
+                        {"street", "java.lang.String"},
+                        {"city", "java.lang.String"},
+                        {"zip", "int"},
+                      },
+                      -1 /*priKeyIndex*/, null);
+
+        close();
+    }
+
+    @Entity
+    static class Embedded implements MyEntity {
+
+        @PrimaryKey
+        private String id;
+        private String idShadow;
+        private Address one;
+        private Address two;
+
+        private Embedded() { }
+
+        private Embedded(String id, Address one, Address two) {
+            this.id = id;
+            idShadow = id;
+            this.one = one;
+            this.two = two;
+        }
+
+        public Object getPriKeyObject() {
+            return id;
+        }
+
+        public void validate(Object other) {
+            Embedded o = (Embedded) other;
+            TestCase.assertEquals(id, o.id);
+            if (one != null) {
+                one.validate(o.one);
+            } else {
+                assertNull(o.one);
+            }
+            if (two != null) {
+                two.validate(o.two);
+            } else {
+                assertNull(o.two);
+            }
+            TestCase.assertSame(o.id, o.idShadow);
+            if (one == two) {
+                TestCase.assertSame(o.one, o.two);
+            }
+        }
+
+        @Override
+        public String toString() {
+            return "" + id + ' ' + one + ' ' + two;
+        }
+    }
+
+    @Persistent
+    static class Address {
+
+        private String street;
+        private String city;
+        private int zip;
+
+        private Address() {}
+
+        Address(String street, String city, int zip) {
+            this.street = street;
+            this.city = city;
+            this.zip = zip;
+        }
+
+        void validate(Address o) {
+            TestCase.assertTrue(nullOrEqual(street, o.street));
+            TestCase.assertTrue(nullOrEqual(city, o.city));
+            TestCase.assertEquals(zip, o.zip);
+        }
+
+        @Override
+        public String toString() {
+            return "" + street + ' ' + city + ' ' + zip;
+        }
+
+        @Override
+        public boolean equals(Object other) {
+            if (other == null) {
+                return false;
+            }
+            Address o = (Address) other;
+            return nullOrEqual(street, o.street) &&
+                   nullOrEqual(city, o.city) &&
+                   nullOrEqual(zip, o.zip);
+        }
+
+        @Override
+        public int hashCode() {
+            return zip;
+        }
+    }
+
+    public void testSubclass()
+        throws IOException, DatabaseException {
+
+        open();
+
+        checkEntity(Basic.class,
+                    new Subclass(-1, "xxx", -2, "xxx", "xxx", true));
+
+        checkMetadata(Basic.class.getName(), new String[][] {
+                          {"id", "long"},
+                          {"one", "java.lang.String"},
+                          {"two", "double"},
+                          {"three", "java.lang.String"},
+                      },
+                      0 /*priKeyIndex*/, null);
+        checkMetadata(Subclass.class.getName(), new String[][] {
+                          {"one", "java.lang.String"},
+                          {"two", "boolean"},
+                      },
+                      -1 /*priKeyIndex*/, Basic.class.getName());
+
+        close();
+    }
+
+    @Persistent
+    static class Subclass extends Basic {
+
+        private String one;
+        private boolean two;
+
+	private Subclass() {
+	}
+
+        private Subclass(long id, String one, double two, String three,
+                         String subOne, boolean subTwo) {
+            super(id, one, two, three);
+            this.one = subOne;
+            this.two = subTwo;
+	}
+
+        public void validate(Object other) {
+            super.validate(other);
+            Subclass o = (Subclass) other;
+            TestCase.assertTrue(nullOrEqual(one, o.one));
+            TestCase.assertEquals(two, o.two);
+            if (one == getBasicOne()) {
+                TestCase.assertSame(o.one, o.getBasicOne());
+            }
+        }
+    }
+
+    public void testSuperclass()
+        throws IOException, DatabaseException {
+
+        open();
+
+        checkEntity(UseSuperclass.class,
+                    new UseSuperclass(33, "xxx"));
+
+        checkMetadata(Superclass.class.getName(), new String[][] {
+                          {"id", "int"},
+                          {"one", "java.lang.String"},
+                      },
+                      0 /*priKeyIndex*/, null);
+        checkMetadata(UseSuperclass.class.getName(), new String[][] {
+                      },
+                      -1 /*priKeyIndex*/, Superclass.class.getName());
+
+        close();
+    }
+
+    @Persistent
+    static class Superclass implements MyEntity {
+
+        @PrimaryKey
+        private int id;
+        private String one;
+
+        private Superclass() { }
+
+        private Superclass(int id, String one) {
+            this.id = id;
+            this.one = one;
+        }
+
+        public Object getPriKeyObject() {
+            return id;
+        }
+
+        public void validate(Object other) {
+            Superclass o = (Superclass) other;
+            TestCase.assertEquals(id, o.id);
+            TestCase.assertTrue(nullOrEqual(one, o.one));
+        }
+    }
+
+    @Entity
+    static class UseSuperclass extends Superclass {
+
+        private UseSuperclass() { }
+
+        private UseSuperclass(int id, String one) {
+            super(id, one);
+        }
+    }
+
+    public void testAbstract()
+        throws IOException, DatabaseException {
+
+        open();
+
+        checkEntity(EntityUseAbstract.class,
+                    new EntityUseAbstract(33, "xxx"));
+
+        checkMetadata(Abstract.class.getName(), new String[][] {
+                          {"one", "java.lang.String"},
+                      },
+                      -1 /*priKeyIndex*/, null);
+        checkMetadata(EmbeddedUseAbstract.class.getName(), new String[][] {
+                          {"two", "java.lang.String"},
+                      },
+                      -1 /*priKeyIndex*/, Abstract.class.getName());
+        checkMetadata(EntityUseAbstract.class.getName(), new String[][] {
+                          {"id", "int"},
+                          {"f1", EmbeddedUseAbstract.class.getName()},
+                          {"f2", Abstract.class.getName()},
+                          {"f3", Object.class.getName()},
+                          {"f4", Interface.class.getName()},
+                          {"a1", EmbeddedUseAbstract[].class.getName()},
+                          {"a2", Abstract[].class.getName()},
+                          {"a3", Abstract[].class.getName()},
+                          {"a4", Object[].class.getName()},
+                          {"a5", Interface[].class.getName()},
+                          {"a6", Interface[].class.getName()},
+                          {"a7", Interface[].class.getName()},
+                      },
+                      0 /*priKeyIndex*/, Abstract.class.getName());
+
+        close();
+    }
+
+    @Persistent
+    static abstract class Abstract implements Interface {
+
+        String one;
+
+        private Abstract() { }
+
+        private Abstract(String one) {
+            this.one = one;
+        }
+
+        public void validate(Object other) {
+            Abstract o = (Abstract) other;
+            TestCase.assertTrue(nullOrEqual(one, o.one));
+        }
+
+        @Override
+        public boolean equals(Object other) {
+            Abstract o = (Abstract) other;
+            return nullOrEqual(one, o.one);
+        }
+    }
+
+    interface Interface {
+        void validate(Object other);
+    }
+
+    @Persistent
+    static class EmbeddedUseAbstract extends Abstract {
+
+        private String two;
+
+        private EmbeddedUseAbstract() { }
+
+        private EmbeddedUseAbstract(String one, String two) {
+            super(one);
+            this.two = two;
+        }
+
+        @Override
+        public void validate(Object other) {
+            super.validate(other);
+            EmbeddedUseAbstract o = (EmbeddedUseAbstract) other;
+            TestCase.assertTrue(nullOrEqual(two, o.two));
+        }
+
+        @Override
+        public boolean equals(Object other) {
+            if (!super.equals(other)) {
+                return false;
+            }
+            EmbeddedUseAbstract o = (EmbeddedUseAbstract) other;
+            return nullOrEqual(two, o.two);
+        }
+    }
+
+    @Entity
+    static class EntityUseAbstract extends Abstract implements MyEntity {
+
+        @PrimaryKey
+        private int id;
+
+        private EmbeddedUseAbstract f1;
+        private Abstract f2;
+        private Object f3;
+        private Interface f4;
+        private EmbeddedUseAbstract[] a1;
+        private Abstract[] a2;
+        private Abstract[] a3;
+        private Object[] a4;
+        private Interface[] a5;
+        private Interface[] a6;
+        private Interface[] a7;
+
+        private EntityUseAbstract() { }
+
+        private EntityUseAbstract(int id, String one) {
+            super(one);
+            this.id = id;
+            f1 = new EmbeddedUseAbstract(one, one);
+            f2 = new EmbeddedUseAbstract(one + "x", one + "y");
+            f3 = new EmbeddedUseAbstract(null, null);
+            f4 = new EmbeddedUseAbstract(null, null);
+            a1 = new EmbeddedUseAbstract[3];
+            a2 = new EmbeddedUseAbstract[3];
+            a3 = new Abstract[3];
+            a4 = new Object[3];
+            a5 = new EmbeddedUseAbstract[3];
+            a6 = new Abstract[3];
+            a7 = new Interface[3];
+            for (int i = 0; i < 3; i += 1) {
+                a1[i] = new EmbeddedUseAbstract("1" + i, null);
+                a2[i] = new EmbeddedUseAbstract("2" + i, null);
+                a3[i] = new EmbeddedUseAbstract("3" + i, null);
+                a4[i] = new EmbeddedUseAbstract("4" + i, null);
+                a5[i] = new EmbeddedUseAbstract("5" + i, null);
+                a6[i] = new EmbeddedUseAbstract("6" + i, null);
+                a7[i] = new EmbeddedUseAbstract("7" + i, null);
+            }
+        }
+
+        public Object getPriKeyObject() {
+            return id;
+        }
+
+        @Override
+        public void validate(Object other) {
+            super.validate(other);
+            EntityUseAbstract o = (EntityUseAbstract) other;
+            TestCase.assertEquals(id, o.id);
+            f1.validate(o.f1);
+            assertSame(o.one, o.f1.one);
+            assertSame(o.f1.one, o.f1.two);
+            f2.validate(o.f2);
+            ((Abstract) f3).validate(o.f3);
+            f4.validate(o.f4);
+            assertTrue(arrayToString(a1) + ' ' + arrayToString(o.a1),
+                       Arrays.equals(a1, o.a1));
+            assertTrue(Arrays.equals(a2, o.a2));
+            assertTrue(Arrays.equals(a3, o.a3));
+            assertTrue(Arrays.equals(a4, o.a4));
+            assertTrue(Arrays.equals(a5, o.a5));
+            assertTrue(Arrays.equals(a6, o.a6));
+            assertTrue(Arrays.equals(a7, o.a7));
+            assertSame(EmbeddedUseAbstract.class, f2.getClass());
+            assertSame(EmbeddedUseAbstract.class, f3.getClass());
+            assertSame(EmbeddedUseAbstract[].class, a1.getClass());
+            assertSame(EmbeddedUseAbstract[].class, a2.getClass());
+            assertSame(Abstract[].class, a3.getClass());
+            assertSame(Object[].class, a4.getClass());
+            assertSame(EmbeddedUseAbstract[].class, a5.getClass());
+            assertSame(Abstract[].class, a6.getClass());
+            assertSame(Interface[].class, a7.getClass());
+        }
+    }
+
+    public void testCompositeKey()
+        throws IOException, DatabaseException {
+
+        open();
+
+        CompositeKey key =
+            new CompositeKey(123, 456L, "xyz", BigInteger.valueOf(789));
+        checkEntity(UseCompositeKey.class,
+                    new UseCompositeKey(key, "one"));
+
+        checkMetadata(UseCompositeKey.class.getName(), new String[][] {
+                          {"key", CompositeKey.class.getName()},
+                          {"one", "java.lang.String"},
+                      },
+                      0 /*priKeyIndex*/, null);
+
+        checkMetadata(CompositeKey.class.getName(), new String[][] {
+                        {"f1", "int"},
+                        {"f2", "java.lang.Long"},
+                        {"f3", "java.lang.String"},
+                        {"f4", "java.math.BigInteger"},
+                      },
+                      -1 /*priKeyIndex*/, null);
+
+        close();
+    }
+
+    @Persistent
+    static class CompositeKey {
+        @KeyField(3)
+        private int f1;
+        @KeyField(2)
+        private Long f2;
+        @KeyField(1)
+        private String f3;
+        @KeyField(4)
+        private BigInteger f4;
+
+        private CompositeKey() {}
+
+        CompositeKey(int f1, Long f2, String f3, BigInteger f4) {
+            this.f1 = f1;
+            this.f2 = f2;
+            this.f3 = f3;
+            this.f4 = f4;
+        }
+
+        void validate(CompositeKey o) {
+            TestCase.assertEquals(f1, o.f1);
+            TestCase.assertTrue(nullOrEqual(f2, o.f2));
+            TestCase.assertTrue(nullOrEqual(f3, o.f3));
+            TestCase.assertTrue(nullOrEqual(f4, o.f4));
+        }
+
+        @Override
+        public boolean equals(Object other) {
+            CompositeKey o = (CompositeKey) other;
+            return f1 == o.f1 &&
+                   nullOrEqual(f2, o.f2) &&
+                   nullOrEqual(f3, o.f3) &&
+                   nullOrEqual(f4, o.f4);
+        }
+
+        @Override
+        public int hashCode() {
+            return f1;
+        }
+
+        @Override
+        public String toString() {
+            return "" + f1 + ' ' + f2 + ' ' + f3 + ' ' + f4;
+        }
+    }
+
+    @Entity
+    static class UseCompositeKey implements MyEntity {
+
+        @PrimaryKey
+        private CompositeKey key;
+        private String one;
+
+        private UseCompositeKey() { }
+
+        private UseCompositeKey(CompositeKey key, String one) {
+            this.key = key;
+            this.one = one;
+        }
+
+        public Object getPriKeyObject() {
+            return key;
+        }
+
+        public void validate(Object other) {
+            UseCompositeKey o = (UseCompositeKey) other;
+            TestCase.assertNotNull(key);
+            TestCase.assertNotNull(o.key);
+            key.validate(o.key);
+            TestCase.assertTrue(nullOrEqual(one, o.one));
+        }
+    }
+
+    public void testComparableKey()
+        throws IOException, DatabaseException {
+
+        open();
+
+        ComparableKey key = new ComparableKey(123, 456);
+        checkEntity(UseComparableKey.class,
+                    new UseComparableKey(key, "one"));
+
+        checkMetadata(UseComparableKey.class.getName(), new String[][] {
+                          {"key", ComparableKey.class.getName()},
+                          {"one", "java.lang.String"},
+                      },
+                      0 /*priKeyIndex*/, null);
+
+        checkMetadata(ComparableKey.class.getName(), new String[][] {
+                        {"f1", "int"},
+                        {"f2", "int"},
+                      },
+                      -1 /*priKeyIndex*/, null);
+
+        ClassMetadata classMeta =
+            model.getClassMetadata(UseComparableKey.class.getName());
+        assertNotNull(classMeta);
+
+        PersistKeyBinding binding = new PersistKeyBinding
+            (catalog, ComparableKey.class.getName(), false);
+
+        PersistComparator comparator = new PersistComparator
+            (ComparableKey.class.getName(),
+             classMeta.getCompositeKeyFields(),
+             binding);
+
+        compareKeys(comparator, binding, new ComparableKey(1, 1),
+                                         new ComparableKey(1, 1), 0);
+        compareKeys(comparator, binding, new ComparableKey(1, 2),
+                                         new ComparableKey(1, 1), -1);
+        compareKeys(comparator, binding, new ComparableKey(2, 1),
+                                         new ComparableKey(1, 1), -1);
+        compareKeys(comparator, binding, new ComparableKey(2, 1),
+                                         new ComparableKey(3, 1), 1);
+
+        close();
+    }
+
+    private void compareKeys(Comparator<byte[]> comparator,
+                             EntryBinding binding,
+                             Object key1,
+                             Object key2,
+                             int expectResult) {
+        DatabaseEntry entry1 = new DatabaseEntry();
+        DatabaseEntry entry2 = new DatabaseEntry();
+        binding.objectToEntry(key1, entry1);
+        binding.objectToEntry(key2, entry2);
+        int result = comparator.compare(entry1.getData(), entry2.getData());
+        assertEquals(expectResult, result);
+    }
+
+    @Persistent
+    static class ComparableKey implements Comparable<ComparableKey> {
+        @KeyField(2)
+        private int f1;
+        @KeyField(1)
+        private int f2;
+
+        private ComparableKey() {}
+
+        ComparableKey(int f1, int f2) {
+            this.f1 = f1;
+            this.f2 = f2;
+        }
+
+        void validate(ComparableKey o) {
+            TestCase.assertEquals(f1, o.f1);
+            TestCase.assertEquals(f2, o.f2);
+        }
+
+        @Override
+        public boolean equals(Object other) {
+            ComparableKey o = (ComparableKey) other;
+            return f1 == o.f1 && f2 == o.f2;
+        }
+
+        @Override
+        public int hashCode() {
+            return f1 + f2;
+        }
+
+        @Override
+        public String toString() {
+            return "" + f1 + ' ' + f2;
+        }
+
+        /** Compare f1 then f2, in reverse integer order. */
+        public int compareTo(ComparableKey o) {
+            if (f1 != o.f1) {
+                return o.f1 - f1;
+            } else {
+                return o.f2 - f2;
+            }
+        }
+    }
+
+    @Entity
+    static class UseComparableKey implements MyEntity {
+
+        @PrimaryKey
+        private ComparableKey key;
+        private String one;
+
+        private UseComparableKey() { }
+
+        private UseComparableKey(ComparableKey key, String one) {
+            this.key = key;
+            this.one = one;
+        }
+
+        public Object getPriKeyObject() {
+            return key;
+        }
+
+        public void validate(Object other) {
+            UseComparableKey o = (UseComparableKey) other;
+            TestCase.assertNotNull(key);
+            TestCase.assertNotNull(o.key);
+            key.validate(o.key);
+            TestCase.assertTrue(nullOrEqual(one, o.one));
+        }
+    }
+
+    public void testSecKeys()
+        throws IOException, DatabaseException {
+
+        open();
+
+        SecKeys obj = new SecKeys();
+        checkEntity(SecKeys.class, obj);
+
+        checkMetadata(SecKeys.class.getName(), new String[][] {
+                          {"id", "long"},
+                          {"f0", "boolean"},
+                          {"g0", "boolean"},
+                          {"f1", "char"},
+                          {"g1", "char"},
+                          {"f2", "byte"},
+                          {"g2", "byte"},
+                          {"f3", "short"},
+                          {"g3", "short"},
+                          {"f4", "int"},
+                          {"g4", "int"},
+                          {"f5", "long"},
+                          {"g5", "long"},
+                          {"f6", "float"},
+                          {"g6", "float"},
+                          {"f7", "double"},
+                          {"g7", "double"},
+                          {"f8", "java.lang.String"},
+                          {"g8", "java.lang.String"},
+                          {"f9", "java.math.BigInteger"},
+                          {"g9", "java.math.BigInteger"},
+                          //{"f10", "java.math.BigDecimal"},
+                          //{"g10", "java.math.BigDecimal"},
+                          {"f11", "java.util.Date"},
+                          {"g11", "java.util.Date"},
+                          {"f12", "java.lang.Boolean"},
+                          {"g12", "java.lang.Boolean"},
+                          {"f13", "java.lang.Character"},
+                          {"g13", "java.lang.Character"},
+                          {"f14", "java.lang.Byte"},
+                          {"g14", "java.lang.Byte"},
+                          {"f15", "java.lang.Short"},
+                          {"g15", "java.lang.Short"},
+                          {"f16", "java.lang.Integer"},
+                          {"g16", "java.lang.Integer"},
+                          {"f17", "java.lang.Long"},
+                          {"g17", "java.lang.Long"},
+                          {"f18", "java.lang.Float"},
+                          {"g18", "java.lang.Float"},
+                          {"f19", "java.lang.Double"},
+                          {"g19", "java.lang.Double"},
+                          {"f20", CompositeKey.class.getName()},
+                          {"g20", CompositeKey.class.getName()},
+                          {"f21", int[].class.getName()},
+                          {"g21", int[].class.getName()},
+                          {"f22", Integer[].class.getName()},
+                          {"g22", Integer[].class.getName()},
+                          {"f23", Set.class.getName()},
+                          {"g23", Set.class.getName()},
+                          {"f24", CompositeKey[].class.getName()},
+                          {"g24", CompositeKey[].class.getName()},
+                          {"f25", Set.class.getName()},
+                          {"g25", Set.class.getName()},
+                          {"f31", "java.util.Date"},
+                          {"f32", "java.lang.Boolean"},
+                          {"f33", "java.lang.Character"},
+                          {"f34", "java.lang.Byte"},
+                          {"f35", "java.lang.Short"},
+                          {"f36", "java.lang.Integer"},
+                          {"f37", "java.lang.Long"},
+                          {"f38", "java.lang.Float"},
+                          {"f39", "java.lang.Double"},
+                          {"f40", CompositeKey.class.getName()},
+                      },
+                      0 /*priKeyIndex*/, null);
+
+        checkSecKey(obj, "f0", obj.f0, Boolean.class);
+        checkSecKey(obj, "f1", obj.f1, Character.class);
+        checkSecKey(obj, "f2", obj.f2, Byte.class);
+        checkSecKey(obj, "f3", obj.f3, Short.class);
+        checkSecKey(obj, "f4", obj.f4, Integer.class);
+        checkSecKey(obj, "f5", obj.f5, Long.class);
+        checkSecKey(obj, "f6", obj.f6, Float.class);
+        checkSecKey(obj, "f7", obj.f7, Double.class);
+        checkSecKey(obj, "f8", obj.f8, String.class);
+        checkSecKey(obj, "f9", obj.f9, BigInteger.class);
+        //checkSecKey(obj, "f10", obj.f10, BigDecimal.class);
+        checkSecKey(obj, "f11", obj.f11, Date.class);
+        checkSecKey(obj, "f12", obj.f12, Boolean.class);
+        checkSecKey(obj, "f13", obj.f13, Character.class);
+        checkSecKey(obj, "f14", obj.f14, Byte.class);
+        checkSecKey(obj, "f15", obj.f15, Short.class);
+        checkSecKey(obj, "f16", obj.f16, Integer.class);
+        checkSecKey(obj, "f17", obj.f17, Long.class);
+        checkSecKey(obj, "f18", obj.f18, Float.class);
+        checkSecKey(obj, "f19", obj.f19, Double.class);
+        checkSecKey(obj, "f20", obj.f20, CompositeKey.class);
+
+        checkSecMultiKey(obj, "f21", toSet(obj.f21), Integer.class);
+        checkSecMultiKey(obj, "f22", toSet(obj.f22), Integer.class);
+        checkSecMultiKey(obj, "f23", toSet(obj.f23), Integer.class);
+        checkSecMultiKey(obj, "f24", toSet(obj.f24), CompositeKey.class);
+        checkSecMultiKey(obj, "f25", toSet(obj.f25), CompositeKey.class);
+
+        nullifySecKey(obj, "f8", obj.f8, String.class);
+        nullifySecKey(obj, "f9", obj.f9, BigInteger.class);
+        //nullifySecKey(obj, "f10", obj.f10, BigDecimal.class);
+        nullifySecKey(obj, "f11", obj.f11, Date.class);
+        nullifySecKey(obj, "f12", obj.f12, Boolean.class);
+        nullifySecKey(obj, "f13", obj.f13, Character.class);
+        nullifySecKey(obj, "f14", obj.f14, Byte.class);
+        nullifySecKey(obj, "f15", obj.f15, Short.class);
+        nullifySecKey(obj, "f16", obj.f16, Integer.class);
+        nullifySecKey(obj, "f17", obj.f17, Long.class);
+        nullifySecKey(obj, "f18", obj.f18, Float.class);
+        nullifySecKey(obj, "f19", obj.f19, Double.class);
+        nullifySecKey(obj, "f20", obj.f20, CompositeKey.class);
+
+        nullifySecMultiKey(obj, "f21", obj.f21, Integer.class);
+        nullifySecMultiKey(obj, "f22", obj.f22, Integer.class);
+        nullifySecMultiKey(obj, "f23", obj.f23, Integer.class);
+        nullifySecMultiKey(obj, "f24", obj.f24, CompositeKey.class);
+        nullifySecMultiKey(obj, "f25", obj.f25, CompositeKey.class);
+
+        nullifySecKey(obj, "f31", obj.f31, Date.class);
+        nullifySecKey(obj, "f32", obj.f32, Boolean.class);
+        nullifySecKey(obj, "f33", obj.f33, Character.class);
+        nullifySecKey(obj, "f34", obj.f34, Byte.class);
+        nullifySecKey(obj, "f35", obj.f35, Short.class);
+        nullifySecKey(obj, "f36", obj.f36, Integer.class);
+        nullifySecKey(obj, "f37", obj.f37, Long.class);
+        nullifySecKey(obj, "f38", obj.f38, Float.class);
+        nullifySecKey(obj, "f39", obj.f39, Double.class);
+        nullifySecKey(obj, "f40", obj.f40, CompositeKey.class);
+
+        close();
+    }
+
+    static Set toSet(int[] a) {
+        Set set = new HashSet();
+        for (int i : a) {
+            set.add(i);
+        }
+        return set;
+    }
+
+    static Set toSet(Object[] a) {
+        return new HashSet(Arrays.asList(a));
+    }
+
+    static Set toSet(Set s) {
+        return s;
+    }
+
+    @Entity
+    static class SecKeys implements MyEntity {
+
+        @PrimaryKey
+        long id;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private boolean f0 = false;
+        private boolean g0 = false;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private char f1 = '1';
+        private char g1 = '1';
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private byte f2 = 2;
+        private byte g2 = 2;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private short f3 = 3;
+        private short g3 = 3;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private int f4 = 4;
+        private int g4 = 4;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private long f5 = 5;
+        private long g5 = 5;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private float f6 = 6.6f;
+        private float g6 = 6.6f;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private double f7 = 7.7;
+        private double g7 = 7.7;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private String f8 = "8";
+        private String g8 = "8";
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private BigInteger f9;
+        private BigInteger g9;
+
+        //@SecondaryKey(relate=MANY_TO_ONE)
+        //private BigDecimal f10;
+        //private BigDecimal g10;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private Date f11 = new Date(11);
+        private Date g11 = new Date(11);
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private Boolean f12 = true;
+        private Boolean g12 = true;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private Character f13 = '3';
+        private Character g13 = '3';
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private Byte f14 = 14;
+        private Byte g14 = 14;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private Short f15 = 15;
+        private Short g15 = 15;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private Integer f16 = 16;
+        private Integer g16 = 16;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private Long f17= 17L;
+        private Long g17= 17L;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private Float f18 = 18.18f;
+        private Float g18 = 18.18f;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private Double f19 = 19.19;
+        private Double g19 = 19.19;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private CompositeKey f20 =
+            new CompositeKey(20, 20L, "20", BigInteger.valueOf(20));
+        private CompositeKey g20 =
+            new CompositeKey(20, 20L, "20", BigInteger.valueOf(20));
+
+        private static int[] arrayOfInt = { 100, 101, 102 };
+
+        private static Integer[] arrayOfInteger = { 100, 101, 102 };
+
+        private static CompositeKey[] arrayOfCompositeKey = {
+            new CompositeKey(100, 100L, "100", BigInteger.valueOf(100)),
+            new CompositeKey(101, 101L, "101", BigInteger.valueOf(101)),
+            new CompositeKey(102, 102L, "102", BigInteger.valueOf(102)),
+        };
+
+        @SecondaryKey(relate=ONE_TO_MANY)
+        private int[] f21 = arrayOfInt;
+        private int[] g21 = f21;
+
+        @SecondaryKey(relate=ONE_TO_MANY)
+        private Integer[] f22 = arrayOfInteger;
+        private Integer[] g22 = f22;
+
+        @SecondaryKey(relate=ONE_TO_MANY)
+        private Set<Integer> f23 = toSet(arrayOfInteger);
+        private Set<Integer> g23 = f23;
+
+        @SecondaryKey(relate=ONE_TO_MANY)
+        private CompositeKey[] f24 = arrayOfCompositeKey;
+        private CompositeKey[] g24 = f24;
+
+        @SecondaryKey(relate=ONE_TO_MANY)
+        private Set<CompositeKey> f25 = toSet(arrayOfCompositeKey);
+        private Set<CompositeKey> g25 = f25;
+
+        /* Repeated key values to test shared references. */
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private Date f31 = f11;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private Boolean f32 = f12;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private Character f33 = f13;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private Byte f34 = f14;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private Short f35 = f15;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private Integer f36 = f16;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private Long f37= f17;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private Float f38 = f18;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private Double f39 = f19;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private CompositeKey f40 = f20;
+
+        public Object getPriKeyObject() {
+            return id;
+        }
+
+        public void validate(Object other) {
+            SecKeys o = (SecKeys) other;
+            TestCase.assertEquals(id, o.id);
+
+            TestCase.assertEquals(f0, o.f0);
+            TestCase.assertEquals(f1, o.f1);
+            TestCase.assertEquals(f2, o.f2);
+            TestCase.assertEquals(f3, o.f3);
+            TestCase.assertEquals(f4, o.f4);
+            TestCase.assertEquals(f5, o.f5);
+            TestCase.assertEquals(f6, o.f6);
+            TestCase.assertEquals(f7, o.f7);
+            TestCase.assertEquals(f8, o.f8);
+            TestCase.assertEquals(f9, o.f9);
+            //TestCase.assertEquals(f10, o.f10);
+            TestCase.assertEquals(f11, o.f11);
+            TestCase.assertEquals(f12, o.f12);
+            TestCase.assertEquals(f13, o.f13);
+            TestCase.assertEquals(f14, o.f14);
+            TestCase.assertEquals(f15, o.f15);
+            TestCase.assertEquals(f16, o.f16);
+            TestCase.assertEquals(f17, o.f17);
+            TestCase.assertEquals(f18, o.f18);
+            TestCase.assertEquals(f19, o.f19);
+            TestCase.assertEquals(f20, o.f20);
+            TestCase.assertTrue(Arrays.equals(f21, o.f21));
+            TestCase.assertTrue(Arrays.equals(f22, o.f22));
+            TestCase.assertEquals(f23, o.f23);
+            TestCase.assertTrue(Arrays.equals(f24, o.f24));
+            TestCase.assertEquals(f25, o.f25);
+
+            TestCase.assertEquals(g0, o.g0);
+            TestCase.assertEquals(g1, o.g1);
+            TestCase.assertEquals(g2, o.g2);
+            TestCase.assertEquals(g3, o.g3);
+            TestCase.assertEquals(g4, o.g4);
+            TestCase.assertEquals(g5, o.g5);
+            TestCase.assertEquals(g6, o.g6);
+            TestCase.assertEquals(g7, o.g7);
+            TestCase.assertEquals(g8, o.g8);
+            TestCase.assertEquals(g9, o.g9);
+            //TestCase.assertEquals(g10, o.g10);
+            TestCase.assertEquals(g11, o.g11);
+            TestCase.assertEquals(g12, o.g12);
+            TestCase.assertEquals(g13, o.g13);
+            TestCase.assertEquals(g14, o.g14);
+            TestCase.assertEquals(g15, o.g15);
+            TestCase.assertEquals(g16, o.g16);
+            TestCase.assertEquals(g17, o.g17);
+            TestCase.assertEquals(g18, o.g18);
+            TestCase.assertEquals(g19, o.g19);
+            TestCase.assertEquals(g20, o.g20);
+            TestCase.assertTrue(Arrays.equals(g21, o.g21));
+            TestCase.assertTrue(Arrays.equals(g22, o.g22));
+            TestCase.assertEquals(g23, o.g23);
+            TestCase.assertTrue(Arrays.equals(g24, o.g24));
+            TestCase.assertEquals(g25, o.g25);
+
+            TestCase.assertEquals(f31, o.f31);
+            TestCase.assertEquals(f32, o.f32);
+            TestCase.assertEquals(f33, o.f33);
+            TestCase.assertEquals(f34, o.f34);
+            TestCase.assertEquals(f35, o.f35);
+            TestCase.assertEquals(f36, o.f36);
+            TestCase.assertEquals(f37, o.f37);
+            TestCase.assertEquals(f38, o.f38);
+            TestCase.assertEquals(f39, o.f39);
+            TestCase.assertEquals(f40, o.f40);
+
+            checkSameIfNonNull(o.f31, o.f11);
+            checkSameIfNonNull(o.f32, o.f12);
+            checkSameIfNonNull(o.f33, o.f13);
+            checkSameIfNonNull(o.f34, o.f14);
+            checkSameIfNonNull(o.f35, o.f15);
+            checkSameIfNonNull(o.f36, o.f16);
+            checkSameIfNonNull(o.f37, o.f17);
+            checkSameIfNonNull(o.f38, o.f18);
+            checkSameIfNonNull(o.f39, o.f19);
+            checkSameIfNonNull(o.f40, o.f20);
+        }
+    }
+
+    public void testSecKeyRefToPriKey()
+        throws IOException, DatabaseException {
+
+        open();
+
+        SecKeyRefToPriKey obj = new SecKeyRefToPriKey();
+        checkEntity(SecKeyRefToPriKey.class, obj);
+
+        checkMetadata(SecKeyRefToPriKey.class.getName(), new String[][] {
+                          {"priKey", "java.lang.String"},
+                          {"secKey1", "java.lang.String"},
+                          {"secKey2", String[].class.getName()},
+                          {"secKey3", Set.class.getName()},
+                      },
+                      0 /*priKeyIndex*/, null);
+
+        checkSecKey(obj, "secKey1", obj.secKey1, String.class);
+        checkSecMultiKey(obj, "secKey2", toSet(obj.secKey2), String.class);
+        checkSecMultiKey(obj, "secKey3", toSet(obj.secKey3), String.class);
+
+        close();
+    }
+
+    @Entity
+    static class SecKeyRefToPriKey implements MyEntity {
+
+        @PrimaryKey
+        private String priKey;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        private String secKey1;
+
+        @SecondaryKey(relate=ONE_TO_MANY)
+        private String[] secKey2;
+
+        @SecondaryKey(relate=ONE_TO_MANY)
+        private Set<String> secKey3 = new HashSet<String>();
+
+        private SecKeyRefToPriKey() {
+            priKey = "sharedValue";
+            secKey1 = priKey;
+            secKey2 = new String[] { priKey };
+            secKey3.add(priKey);
+        }
+
+        public Object getPriKeyObject() {
+            return priKey;
+        }
+
+        public void validate(Object other) {
+            SecKeyRefToPriKey o = (SecKeyRefToPriKey) other;
+            TestCase.assertEquals(priKey, o.priKey);
+            TestCase.assertNotNull(o.secKey1);
+            TestCase.assertEquals(1, o.secKey2.length);
+            TestCase.assertEquals(1, o.secKey3.size());
+            TestCase.assertSame(o.secKey1, o.priKey);
+            TestCase.assertSame(o.secKey2[0], o.priKey);
+            TestCase.assertSame(o.secKey3.iterator().next(), o.priKey);
+        }
+    }
+
+    public void testSecKeyInSuperclass()
+        throws IOException, DatabaseException {
+
+        open();
+
+        SecKeyInSuperclassEntity obj = new SecKeyInSuperclassEntity();
+        checkEntity(SecKeyInSuperclassEntity.class, obj);
+
+        checkMetadata(SecKeyInSuperclass.class.getName(), new String[][] {
+                          {"priKey", "java.lang.String"},
+                          {"secKey1", String.class.getName()},
+                      },
+                      0/*priKeyIndex*/, null);
+
+        checkMetadata(SecKeyInSuperclassEntity.class.getName(), new String[][] {
+                          {"secKey2", "java.lang.String"},
+                      },
+                      -1 /*priKeyIndex*/, SecKeyInSuperclass.class.getName());
+
+        checkSecKey
+            (obj, SecKeyInSuperclassEntity.class, "secKey1", obj.secKey1,
+             String.class);
+        checkSecKey
+            (obj, SecKeyInSuperclassEntity.class, "secKey2", obj.secKey2,
+             String.class);
+
+        close();
+    }
+
+    @Persistent
+    static class SecKeyInSuperclass implements MyEntity {
+
+        @PrimaryKey
+        String priKey = "1";
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        String secKey1 = "1";
+
+        public Object getPriKeyObject() {
+            return priKey;
+        }
+
+        public void validate(Object other) {
+            SecKeyInSuperclass o = (SecKeyInSuperclass) other;
+            TestCase.assertEquals(secKey1, o.secKey1);
+        }
+    }
+
+    @Entity
+    static class SecKeyInSuperclassEntity extends SecKeyInSuperclass {
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        String secKey2 = "2";
+
+        public void validate(Object other) {
+            super.validate(other);
+            SecKeyInSuperclassEntity o = (SecKeyInSuperclassEntity) other;
+            TestCase.assertEquals(priKey, o.priKey);
+            TestCase.assertEquals(secKey2, o.secKey2);
+        }
+    }
+
+    public void testSecKeyInSubclass()
+        throws IOException, DatabaseException {
+
+        open();
+
+        SecKeyInSubclass obj = new SecKeyInSubclass();
+        checkEntity(SecKeyInSubclassEntity.class, obj);
+
+        checkMetadata(SecKeyInSubclassEntity.class.getName(), new String[][] {
+                          {"priKey", "java.lang.String"},
+                          {"secKey1", "java.lang.String"},
+                      },
+                      0 /*priKeyIndex*/, null);
+
+        checkMetadata(SecKeyInSubclass.class.getName(), new String[][] {
+                          {"secKey2", String.class.getName()},
+                      },
+                      -1 /*priKeyIndex*/,
+                      SecKeyInSubclassEntity.class.getName());
+
+        checkSecKey
+            (obj, SecKeyInSubclassEntity.class, "secKey1", obj.secKey1,
+             String.class);
+        checkSecKey
+            (obj, SecKeyInSubclassEntity.class, "secKey2", obj.secKey2,
+             String.class);
+
+        close();
+    }
+
+    @Entity
+    static class SecKeyInSubclassEntity implements MyEntity {
+
+        @PrimaryKey
+        String priKey = "1";
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        String secKey1;
+
+        public Object getPriKeyObject() {
+            return priKey;
+        }
+
+        public void validate(Object other) {
+            SecKeyInSubclassEntity o = (SecKeyInSubclassEntity) other;
+            TestCase.assertEquals(priKey, o.priKey);
+            TestCase.assertEquals(secKey1, o.secKey1);
+        }
+    }
+
+    @Persistent
+    static class SecKeyInSubclass extends SecKeyInSubclassEntity {
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        String secKey2 = "2";
+
+        public void validate(Object other) {
+            super.validate(other);
+            SecKeyInSubclass o = (SecKeyInSubclass) other;
+            TestCase.assertEquals(secKey2, o.secKey2);
+        }
+    }
+
+    private static void checkSameIfNonNull(Object o1, Object o2) {
+        if (o1 != null && o2 != null) {
+            assertSame(o1, o2);
+        }
+    }
+
+    private void checkEntity(Class entityCls, MyEntity entity)
+        throws DatabaseException {
+
+        Object priKey = entity.getPriKeyObject();
+        Class keyCls = priKey.getClass();
+        DatabaseEntry keyEntry2 = new DatabaseEntry();
+        DatabaseEntry dataEntry2 = new DatabaseEntry();
+
+        /* Write object, read it back and validate (compare) it. */
+        PersistEntityBinding entityBinding =
+            new PersistEntityBinding(catalog, entityCls.getName(), false);
+        entityBinding.objectToData(entity, dataEntry);
+        entityBinding.objectToKey(entity, keyEntry);
+        Object entity2 = entityBinding.entryToObject(keyEntry, dataEntry);
+        entity.validate(entity2);
+
+        /* Read back the primary key and validate it. */
+        PersistKeyBinding keyBinding =
+            new PersistKeyBinding(catalog, keyCls.getName(), false);
+        Object priKey2 = keyBinding.entryToObject(keyEntry);
+        assertEquals(priKey, priKey2);
+        keyBinding.objectToEntry(priKey2, keyEntry2);
+        assertEquals(keyEntry, keyEntry2);
+
+        /* Check raw entity binding. */
+        PersistEntityBinding rawEntityBinding =
+            new PersistEntityBinding(catalog, entityCls.getName(), true);
+        RawObject rawEntity =
+            (RawObject) rawEntityBinding.entryToObject(keyEntry, dataEntry);
+        rawEntityBinding.objectToKey(rawEntity, keyEntry2);
+        rawEntityBinding.objectToData(rawEntity, dataEntry2);
+        entity2 = entityBinding.entryToObject(keyEntry2, dataEntry2);
+        entity.validate(entity2);
+        RawObject rawEntity2 =
+            (RawObject) rawEntityBinding.entryToObject(keyEntry2, dataEntry2);
+        assertEquals(rawEntity, rawEntity2);
+        assertEquals(dataEntry, dataEntry2);
+        assertEquals(keyEntry, keyEntry2);
+
+        /* Check that raw entity can be converted to a regular entity. */
+        entity2 = catalog.convertRawObject(rawEntity, null);
+        entity.validate(entity2);
+
+        /* Check raw key binding. */
+        PersistKeyBinding rawKeyBinding =
+            new PersistKeyBinding(catalog, keyCls.getName(), true);
+        Object rawKey = rawKeyBinding.entryToObject(keyEntry);
+        rawKeyBinding.objectToEntry(rawKey, keyEntry2);
+        priKey2 = keyBinding.entryToObject(keyEntry2);
+        assertEquals(priKey, priKey2);
+        assertEquals(keyEntry, keyEntry2);
+    }
+
+    private void checkSecKey(MyEntity entity,
+                             String keyName,
+                             Object keyValue,
+                             Class keyCls)
+        throws DatabaseException {
+
+        checkSecKey(entity, entity.getClass(), keyName, keyValue, keyCls);
+    }
+
+    private void checkSecKey(MyEntity entity,
+                             Class entityCls,
+                             String keyName,
+                             Object keyValue,
+                             Class keyCls)
+        throws DatabaseException {
+
+        /* Get entity metadata. */
+        EntityMetadata entityMeta =
+            model.getEntityMetadata(entityCls.getName());
+        assertNotNull(entityMeta);
+
+        /* Get secondary key metadata. */
+        SecondaryKeyMetadata secKeyMeta =
+            entityMeta.getSecondaryKeys().get(keyName);
+        assertNotNull(secKeyMeta);
+
+        /* Create key creator/nullifier. */
+        SecondaryKeyCreator keyCreator = new PersistKeyCreator
+            (catalog, entityMeta, keyCls.getName(), secKeyMeta,
+             false /*rawAcess*/);
+
+        /* Convert entity to bytes. */
+        PersistEntityBinding entityBinding =
+            new PersistEntityBinding(catalog, entityCls.getName(), false);
+        entityBinding.objectToData(entity, dataEntry);
+        entityBinding.objectToKey(entity, keyEntry);
+
+        /* Extract secondary key bytes from entity bytes. */
+        DatabaseEntry secKeyEntry = new DatabaseEntry();
+        boolean isKeyPresent = keyCreator.createSecondaryKey
+            (null, keyEntry, dataEntry, secKeyEntry);
+        assertEquals(keyValue != null, isKeyPresent);
+
+        /* Convert secondary key bytes back to an object. */
+        PersistKeyBinding keyBinding =
+            new PersistKeyBinding(catalog, keyCls.getName(), false);
+        if (isKeyPresent) {
+            Object keyValue2 = keyBinding.entryToObject(secKeyEntry);
+            assertEquals(keyValue, keyValue2);
+            DatabaseEntry secKeyEntry2 = new DatabaseEntry();
+            keyBinding.objectToEntry(keyValue2, secKeyEntry2);
+            assertEquals(secKeyEntry, secKeyEntry2);
+        }
+    }
+
+    private void checkSecMultiKey(MyEntity entity,
+                                  String keyName,
+                                  Set keyValues,
+                                  Class keyCls)
+        throws DatabaseException {
+
+        /* Get entity metadata. */
+        Class entityCls = entity.getClass();
+        EntityMetadata entityMeta =
+            model.getEntityMetadata(entityCls.getName());
+        assertNotNull(entityMeta);
+
+        /* Get secondary key metadata. */
+        SecondaryKeyMetadata secKeyMeta =
+            entityMeta.getSecondaryKeys().get(keyName);
+        assertNotNull(secKeyMeta);
+
+        /* Create key creator/nullifier. */
+        SecondaryMultiKeyCreator keyCreator = new PersistKeyCreator
+            (catalog, entityMeta, keyCls.getName(), secKeyMeta,
+             false /*rawAcess*/);
+
+        /* Convert entity to bytes. */
+        PersistEntityBinding entityBinding =
+            new PersistEntityBinding(catalog, entityCls.getName(), false);
+        entityBinding.objectToData(entity, dataEntry);
+        entityBinding.objectToKey(entity, keyEntry);
+
+        /* Extract secondary key bytes from entity bytes. */
+        Set<DatabaseEntry> results = new HashSet<DatabaseEntry>();
+        keyCreator.createSecondaryKeys
+            (null, keyEntry, dataEntry, results);
+        assertEquals(keyValues.size(), results.size());
+
+        /* Convert secondary key bytes back to objects. */
+        PersistKeyBinding keyBinding =
+            new PersistKeyBinding(catalog, keyCls.getName(), false);
+        Set keyValues2 = new HashSet();
+        for (DatabaseEntry secKeyEntry : results) {
+            Object keyValue2 = keyBinding.entryToObject(secKeyEntry);
+            keyValues2.add(keyValue2);
+        }
+        assertEquals(keyValues, keyValues2);
+    }
+
+    private void nullifySecKey(MyEntity entity,
+                              String keyName,
+                              Object keyValue,
+                              Class keyCls)
+        throws DatabaseException {
+
+        /* Get entity metadata. */
+        Class entityCls = entity.getClass();
+        EntityMetadata entityMeta =
+            model.getEntityMetadata(entityCls.getName());
+        assertNotNull(entityMeta);
+
+        /* Get secondary key metadata. */
+        SecondaryKeyMetadata secKeyMeta =
+            entityMeta.getSecondaryKeys().get(keyName);
+        assertNotNull(secKeyMeta);
+
+        /* Create key creator/nullifier. */
+        ForeignMultiKeyNullifier keyNullifier = new PersistKeyCreator
+            (catalog, entityMeta, keyCls.getName(), secKeyMeta,
+             false /*rawAcess*/);
+
+        /* Convert entity to bytes. */
+        PersistEntityBinding entityBinding =
+            new PersistEntityBinding(catalog, entityCls.getName(), false);
+        entityBinding.objectToData(entity, dataEntry);
+        entityBinding.objectToKey(entity, keyEntry);
+
+        /* Convert secondary key to bytes. */
+        PersistKeyBinding keyBinding =
+            new PersistKeyBinding(catalog, keyCls.getName(), false);
+        DatabaseEntry secKeyEntry = new DatabaseEntry();
+        if (keyValue != null) {
+            keyBinding.objectToEntry(keyValue, secKeyEntry);
+        }
+
+        /* Nullify secondary key bytes within entity bytes. */
+        boolean isKeyPresent = keyNullifier.nullifyForeignKey
+            (null, keyEntry, dataEntry, secKeyEntry);
+        assertEquals(keyValue != null, isKeyPresent);
+
+        /* Convert modified entity bytes back to an entity. */
+        Object entity2 = entityBinding.entryToObject(keyEntry, dataEntry);
+        setFieldToNull(entity, keyName);
+        entity.validate(entity2);
+
+        /* Do a full check after nullifying it. */
+        checkSecKey(entity, keyName, null, keyCls);
+    }
+
+    private void nullifySecMultiKey(MyEntity entity,
+                                    String keyName,
+                                    Object keyValue,
+                                    Class keyCls)
+        throws DatabaseException {
+
+        /* Get entity metadata. */
+        Class entityCls = entity.getClass();
+        EntityMetadata entityMeta =
+            model.getEntityMetadata(entityCls.getName());
+        assertNotNull(entityMeta);
+
+        /* Get secondary key metadata. */
+        SecondaryKeyMetadata secKeyMeta =
+            entityMeta.getSecondaryKeys().get(keyName);
+        assertNotNull(secKeyMeta);
+
+        /* Create key creator/nullifier. */
+        ForeignMultiKeyNullifier keyNullifier = new PersistKeyCreator
+            (catalog, entityMeta, keyCls.getName(), secKeyMeta,
+             false /*rawAcess*/);
+
+        /* Convert entity to bytes. */
+        PersistEntityBinding entityBinding =
+            new PersistEntityBinding(catalog, entityCls.getName(), false);
+        entityBinding.objectToData(entity, dataEntry);
+        entityBinding.objectToKey(entity, keyEntry);
+
+        /* Get secondary key binding. */
+        PersistKeyBinding keyBinding =
+            new PersistKeyBinding(catalog, keyCls.getName(), false);
+        DatabaseEntry secKeyEntry = new DatabaseEntry();
+
+        /* Nullify one key value at a time until all of them are gone. */
+        while (true) {
+            Object fieldObj = getField(entity, keyName);
+            fieldObj = nullifyFirstElement(fieldObj, keyBinding, secKeyEntry);
+            if (fieldObj == null) {
+                break;
+            }
+            setField(entity, keyName, fieldObj);
+
+            /* Nullify secondary key bytes within entity bytes. */
+            boolean isKeyPresent = keyNullifier.nullifyForeignKey
+                (null, keyEntry, dataEntry, secKeyEntry);
+            assertEquals(keyValue != null, isKeyPresent);
+
+            /* Convert modified entity bytes back to an entity. */
+            Object entity2 = entityBinding.entryToObject(keyEntry, dataEntry);
+            entity.validate(entity2);
+
+            /* Do a full check after nullifying it. */
+            Set keyValues;
+            if (fieldObj instanceof Set) {
+                keyValues = (Set) fieldObj;
+            } else if (fieldObj instanceof Object[]) {
+                keyValues = toSet((Object[]) fieldObj);
+            } else if (fieldObj instanceof int[]) {
+                keyValues = toSet((int[]) fieldObj);
+            } else {
+                throw new IllegalStateException(fieldObj.getClass().getName());
+            }
+            checkSecMultiKey(entity, keyName, keyValues, keyCls);
+        }
+    }
+
+    /**
+     * Nullifies the first element of an array or collection object by removing
+     * it from the array or collection.  Returns the resulting array or
+     * collection.  Also outputs the removed element to the keyEntry using the
+     * keyBinding.
+     */
+    private Object nullifyFirstElement(Object obj,
+                                       EntryBinding keyBinding,
+                                       DatabaseEntry keyEntry) {
+        if (obj instanceof Collection) {
+            Iterator i = ((Collection) obj).iterator();
+            if (i.hasNext()) {
+                Object elem = i.next();
+                i.remove();
+                keyBinding.objectToEntry(elem, keyEntry);
+                return obj;
+            } else {
+                return null;
+            }
+        } else if (obj instanceof Object[]) {
+            Object[] a1 = (Object[]) obj;
+            if (a1.length > 0) {
+                Object[] a2 = (Object[]) Array.newInstance
+                    (obj.getClass().getComponentType(), a1.length - 1);
+                System.arraycopy(a1, 1, a2, 0, a2.length);
+                keyBinding.objectToEntry(a1[0], keyEntry);
+                return a2;
+            } else {
+                return null;
+            }
+        } else if (obj instanceof int[]) {
+            int[] a1 = (int[]) obj;
+            if (a1.length > 0) {
+                int[] a2 = new int[a1.length - 1];
+                System.arraycopy(a1, 1, a2, 0, a2.length);
+                keyBinding.objectToEntry(a1[0], keyEntry);
+                return a2;
+            } else {
+                return null;
+            }
+        } else {
+            throw new IllegalStateException(obj.getClass().getName());
+        }
+    }
+
+    private void checkMetadata(String clsName,
+                               String[][] nameTypePairs,
+                               int priKeyIndex,
+                               String superClsName)
+        throws DatabaseException {
+
+        /* Check metadata/types against the live model. */
+        checkMetadata
+            (catalog, model, clsName, nameTypePairs, priKeyIndex,
+             superClsName);
+
+        /*
+         * Open a catalog that uses the stored model.
+         */
+        PersistCatalog storedCatalog = new PersistCatalog
+            (null, env, STORE_PREFIX, STORE_PREFIX + "catalog", null, null,
+             null, false /*useCurrentModel*/, null /*Store*/);
+        EntityModel storedModel = storedCatalog.getResolvedModel();
+
+        /* Check metadata/types against the stored catalog/model. */
+        checkMetadata
+            (storedCatalog, storedModel, clsName, nameTypePairs, priKeyIndex,
+             superClsName);
+
+        storedCatalog.close();
+    }
+
+    private void checkMetadata(PersistCatalog checkCatalog,
+                               EntityModel checkModel,
+                               String clsName,
+                               String[][] nameTypePairs,
+                               int priKeyIndex,
+                               String superClsName)
+        throws DatabaseException {
+
+        ClassMetadata classMeta = checkModel.getClassMetadata(clsName);
+        assertNotNull(clsName, classMeta);
+
+        PrimaryKeyMetadata priKeyMeta = classMeta.getPrimaryKey();
+        if (priKeyIndex >= 0) {
+            assertNotNull(priKeyMeta);
+            String fieldName = nameTypePairs[priKeyIndex][0];
+            String fieldType = nameTypePairs[priKeyIndex][1];
+            assertEquals(priKeyMeta.getName(), fieldName);
+            assertEquals(priKeyMeta.getClassName(), fieldType);
+            assertEquals(priKeyMeta.getDeclaringClassName(), clsName);
+            assertNull(priKeyMeta.getSequenceName());
+        } else {
+            assertNull(priKeyMeta);
+        }
+
+        RawType type = checkCatalog.getFormat(clsName);
+        assertNotNull(type);
+        assertEquals(clsName, type.getClassName());
+        assertEquals(0, type.getVersion());
+        assertTrue(!type.isSimple());
+        assertTrue(!type.isPrimitive());
+        assertTrue(!type.isEnum());
+        assertNull(type.getEnumConstants());
+        assertTrue(!type.isArray());
+        assertEquals(0, type.getDimensions());
+        assertNull(type.getComponentType());
+        RawType superType = type.getSuperType();
+        if (superClsName != null) {
+            assertNotNull(superType);
+            assertEquals(superClsName, superType.getClassName());
+        } else {
+            assertNull(superType);
+        }
+
+        Map<String,RawField> fields = type.getFields();
+        assertNotNull(fields);
+
+        int nFields = nameTypePairs.length;
+        assertEquals(nFields, fields.size());
+
+        for (String[] pair : nameTypePairs) {
+            String fieldName = pair[0];
+            String fieldType = pair[1];
+            Class fieldCls;
+            try {
+                fieldCls = SimpleCatalog.classForName(fieldType);
+            } catch (ClassNotFoundException e) {
+                fail(e.toString());
+                return; /* For compiler */
+            }
+            RawField field = fields.get(fieldName);
+            assertNotNull(field);
+            assertEquals(fieldName, field.getName());
+            type = field.getType();
+            assertNotNull(type);
+            int dim = getArrayDimensions(fieldType);
+            while (dim > 0) {
+                assertEquals(dim, type.getDimensions());
+                assertEquals(dim, getArrayDimensions(fieldType));
+                assertEquals(true, type.isArray());
+                assertEquals(fieldType, type.getClassName());
+                assertEquals(0, type.getVersion());
+                assertTrue(!type.isSimple());
+                assertTrue(!type.isPrimitive());
+                assertTrue(!type.isEnum());
+                assertNull(type.getEnumConstants());
+                fieldType = getArrayComponent(fieldType, dim);
+                type = type.getComponentType();
+                assertNotNull(fieldType, type);
+                dim -= 1;
+            }
+            assertEquals(fieldType, type.getClassName());
+            List<String> enums = getEnumConstants(fieldType);
+            assertEquals(isSimpleType(fieldType), type.isSimple());
+            assertEquals(isPrimitiveType(fieldType), type.isPrimitive());
+            assertNull(type.getComponentType());
+            assertTrue(!type.isArray());
+            assertEquals(0, type.getDimensions());
+            if (enums != null) {
+                assertTrue(type.isEnum());
+                assertEquals(enums, type.getEnumConstants());
+                assertNull(type.getSuperType());
+            } else {
+                assertTrue(!type.isEnum());
+                assertNull(type.getEnumConstants());
+            }
+        }
+    }
+
+    private List<String> getEnumConstants(String clsName) {
+        if (isPrimitiveType(clsName)) {
+            return null;
+        }
+        Class cls;
+        try {
+            cls = Class.forName(clsName);
+        } catch (ClassNotFoundException e) {
+            fail(e.toString());
+            return null; /* Never happens. */
+        }
+        if (!cls.isEnum()) {
+            return null;
+        }
+        List<String> enums = new ArrayList<String>();
+        Object[] vals = cls.getEnumConstants();
+        for (Object val : vals) {
+            enums.add(val.toString());
+        }
+        return enums;
+    }
+
+    private String getArrayComponent(String clsName, int dim) {
+        clsName = clsName.substring(1);
+        if (dim > 1) {
+            return clsName;
+        }
+        if (clsName.charAt(0) == 'L' &&
+            clsName.charAt(clsName.length() - 1) == ';') {
+            return clsName.substring(1, clsName.length() - 1);
+        }
+        if (clsName.length() != 1) {
+            fail();
+        }
+        switch (clsName.charAt(0)) {
+        case 'Z': return "boolean";
+        case 'B': return "byte";
+        case 'C': return "char";
+        case 'D': return "double";
+        case 'F': return "float";
+        case 'I': return "int";
+        case 'J': return "long";
+        case 'S': return "short";
+        default: fail();
+        }
+        return null; /* Should never happen. */
+    }
+
+    private static int getArrayDimensions(String clsName) {
+        int i = 0;
+        while (clsName.charAt(i) == '[') {
+            i += 1;
+        }
+        return i;
+    }
+
+    private static boolean isSimpleType(String clsName) {
+        return isPrimitiveType(clsName) ||
+               clsName.equals("java.lang.Boolean") ||
+               clsName.equals("java.lang.Character") ||
+               clsName.equals("java.lang.Byte") ||
+               clsName.equals("java.lang.Short") ||
+               clsName.equals("java.lang.Integer") ||
+               clsName.equals("java.lang.Long") ||
+               clsName.equals("java.lang.Float") ||
+               clsName.equals("java.lang.Double") ||
+               clsName.equals("java.lang.String") ||
+               clsName.equals("java.math.BigInteger") ||
+               //clsName.equals("java.math.BigDecimal") ||
+               clsName.equals("java.util.Date");
+    }
+
+    private static boolean isPrimitiveType(String clsName) {
+        return clsName.equals("boolean") ||
+               clsName.equals("char") ||
+               clsName.equals("byte") ||
+               clsName.equals("short") ||
+               clsName.equals("int") ||
+               clsName.equals("long") ||
+               clsName.equals("float") ||
+               clsName.equals("double");
+    }
+
+    interface MyEntity {
+        Object getPriKeyObject();
+        void validate(Object other);
+    }
+
+    private static boolean nullOrEqual(Object o1, Object o2) {
+        return (o1 != null) ? o1.equals(o2) : (o2 == null);
+    }
+
+    private static String arrayToString(Object[] array) {
+        StringBuffer buf = new StringBuffer();
+        buf.append('[');
+        for (Object o : array) {
+            if (o instanceof Object[]) {
+                buf.append(arrayToString((Object[]) o));
+            } else {
+                buf.append(o);
+            }
+            buf.append(',');
+        }
+        buf.append(']');
+        return buf.toString();
+    }
+
+    private void setFieldToNull(Object obj, String fieldName) {
+        try {
+            Field field = obj.getClass().getDeclaredField(fieldName);
+            field.setAccessible(true);
+            field.set(obj, null);
+        } catch (NoSuchFieldException e) {
+            fail(e.toString());
+        } catch (IllegalAccessException e) {
+            fail(e.toString());
+        }
+    }
+
+    private void setField(Object obj, String fieldName, Object fieldValue) {
+        try {
+            Field field = obj.getClass().getDeclaredField(fieldName);
+            field.setAccessible(true);
+            field.set(obj, fieldValue);
+        } catch (NoSuchFieldException e) {
+            throw new IllegalStateException(e.toString());
+        } catch (IllegalAccessException e) {
+            throw new IllegalStateException(e.toString());
+        }
+    }
+
+    private Object getField(Object obj, String fieldName) {
+        try {
+            Field field = obj.getClass().getDeclaredField(fieldName);
+            field.setAccessible(true);
+            return field.get(obj);
+        } catch (NoSuchFieldException e) {
+            throw new IllegalStateException(e.toString());
+        } catch (IllegalAccessException e) {
+            throw new IllegalStateException(e.toString());
+        }
+    }
+}
diff --git a/test/com/sleepycat/persist/test/ConvertAndAddTest.java b/test/com/sleepycat/persist/test/ConvertAndAddTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..76d7a97eea966f35a4f0b38e7ca2e034bf4f2f5a
--- /dev/null
+++ b/test/com/sleepycat/persist/test/ConvertAndAddTest.java
@@ -0,0 +1,174 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ConvertAndAddTest.java,v 1.6.2.2 2010/01/04 15:30:49 cwl Exp $
+ */
+
+package com.sleepycat.persist.test;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.evolve.Conversion;
+import com.sleepycat.persist.evolve.Converter;
+import com.sleepycat.persist.evolve.Mutations;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * Test a bug fix where an IndexOutOfBoundsException occurs when adding a field
+ * and converting another field, where the latter field is alphabetically
+ * higher than the former.  This is also tested by
+ * EvolveClasses.FieldAddAndConvert, but that class does not test evolving an
+ * entity that was created by catalog version 0.  [#15797]
+ *
+ * A modified version of this program was run manually with JE 3.2.30 to
+ * produce a log, which is the result of the testSetup() test.  The sole log
+ * file was renamed from 00000000.jdb to ConvertAndAddTest.jdb and added to CVS
+ * in this directory.  When that log file is opened here, the bug is
+ * reproduced.  The modifications to this program for 3.2.30 are:
+ *
+ *  + X in testSetup
+ *  + X out testConvertAndAddField
+ *  + don't remove log files in tearDown
+ *  + @Entity version is 0
+ *  + removed field MyEntity.a
+ *
+ * This test should be excluded from the BDB build because it uses a stored JE
+ * log file and it tests a fix for a bug that was never present in BDB.
+ *
+ * @author Mark Hayes
+ */ 
+public class ConvertAndAddTest extends TestCase {
+
+    private static final String STORE_NAME = "test";
+
+    private File envHome;
+    private Environment env;
+
+    public void setUp()
+        throws IOException {
+
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+        throws IOException {
+
+        if (env != null) {
+            try {
+                env.close();
+            } catch (DatabaseException e) {
+                System.out.println("During tearDown: " + e);
+            }
+        }
+        try {
+            TestUtils.removeLogFiles("TearDown", envHome, false);
+        } catch (Error e) {
+            System.out.println("During tearDown: " + e);
+        }
+        envHome = null;
+        env = null;
+    }
+
+    private EntityStore open(boolean addConverter)
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestEnv.BDB.getConfig();
+        envConfig.setAllowCreate(true);
+        env = new Environment(envHome, envConfig);
+
+        Mutations mutations = new Mutations();
+        mutations.addConverter(new Converter
+            (MyEntity.class.getName(), 0, "b", new MyConversion()));
+
+        StoreConfig storeConfig = new StoreConfig();
+        storeConfig.setAllowCreate(true);
+        storeConfig.setMutations(mutations);
+        return new EntityStore(env, "foo", storeConfig);
+    }
+
+    private void close(EntityStore store)
+        throws DatabaseException {
+
+        store.close();
+        env.close();
+        env = null;
+    }
+
+    public void testConvertAndAddField()
+        throws DatabaseException, IOException {
+
+        /* Copy log file resource to log file zero. */
+        TestUtils.loadLog(getClass(), "ConvertAndAddTest.jdb", envHome);
+
+        EntityStore store = open(true /*addConverter*/);
+
+        PrimaryIndex<Long, MyEntity> index =
+            store.getPrimaryIndex(Long.class, MyEntity.class);
+
+        MyEntity entity = index.get(1L);
+        assertNotNull(entity);
+        assertEquals(123, entity.b);
+
+        close(store);
+    }
+
+    public void xtestSetup()
+        throws DatabaseException {
+
+        EntityStore store = open(false /*addConverter*/);
+
+        PrimaryIndex<Long, MyEntity> index =
+            store.getPrimaryIndex(Long.class, MyEntity.class);
+
+        MyEntity entity = new MyEntity();
+        entity.key = 1;
+        entity.b = 123;
+        index.put(entity);
+
+        close(store);
+    }
+
+    @Entity(version=1)
+    static class MyEntity {
+
+        @PrimaryKey
+        long key;
+
+        int a; // added in version 1
+        int b;
+
+        private MyEntity() {}
+    }
+
+    @SuppressWarnings("serial")
+    public static class MyConversion implements Conversion {
+
+        public void initialize(EntityModel model) {
+        }
+
+        public Object convert(Object fromValue) {
+            return fromValue;
+        }
+      
+        @Override
+        public boolean equals(Object o) {
+            return o instanceof MyConversion;
+        }
+    }
+}
diff --git a/test/com/sleepycat/persist/test/ConvertAndAddTest.jdb b/test/com/sleepycat/persist/test/ConvertAndAddTest.jdb
new file mode 100644
index 0000000000000000000000000000000000000000..1dd55ac27dfa7567a71329aa3c144b559cca606f
Binary files /dev/null and b/test/com/sleepycat/persist/test/ConvertAndAddTest.jdb differ
diff --git a/test/com/sleepycat/persist/test/DevolutionTest.java b/test/com/sleepycat/persist/test/DevolutionTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..0e989b46e185e197dbe87dae9491154f8fb53002
--- /dev/null
+++ b/test/com/sleepycat/persist/test/DevolutionTest.java
@@ -0,0 +1,180 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: DevolutionTest.java,v 1.1.2.3 2010/01/04 15:30:49 cwl Exp $
+ */
+
+package com.sleepycat.persist.test;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.util.TestUtils;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.evolve.Mutations;
+import com.sleepycat.persist.evolve.Renamer;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.model.Persistent;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.Relationship;
+import com.sleepycat.persist.model.SecondaryKey;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * Test a bug fix for an evolution error when a class is evolved and then 
+ * changed back to its original version.  Say there are two versions of a
+ * class A1 and A2 in the catalog, plus a new version A3 of the class.  The
+ * problem occurs when A2 is different than A3 and must be evolved, but A1
+ * happens to be identical to A3 and no evolution is needed.  In that case, A3
+ * was never added to the format list in the catalog (never assigned a format
+ * ID), but was still used as the "latest version" of A2.  This caused all
+ * kinds of trouble since the class catalog was effectively corrupt.  [#16467]
+ *
+ * We reproduce this scenario using type Other[], which is represented using
+ * ArrayObjectFormat internally.  By renaming Other to Other2, and then back to
+ * Other, we create the scenario described above for the array format itself.
+ * Array formats are only evolved if their component class name has changed
+ * (see ArrayObjectFormat.evolve).
+ *
+ * A modified version of this program was run manually with JE 3.3.71 to
+ * produce a log, which is the result of the testSetup() test.  The sole log
+ * file was renamed from 00000000.jdb to DevolutionTest.jdb and added to CVS
+ * in this directory.  When that log file is opened here, the bug is
+ * reproduced.
+ *
+ * This test should be excluded from the BDB build because it uses a stored JE
+ * log file and it tests a fix for a bug that was never present in BDB.
+ *
+ * @author Mark Hayes
+ */ 
+public class DevolutionTest extends TestCase {
+
+    private static final String STORE_NAME = "test";
+
+    private File envHome;
+    private Environment env;
+
+    public void setUp()
+        throws IOException {
+
+        envHome = new File(System.getProperty(TestUtils.DEST_DIR));
+        TestUtils.removeLogFiles("Setup", envHome, false);
+    }
+
+    public void tearDown()
+        throws IOException {
+
+        if (env != null) {
+            try {
+                env.close();
+            } catch (Throwable e) {
+                System.out.println("During tearDown: " + e);
+            }
+        }
+        try {
+            TestUtils.removeLogFiles("TearDown", envHome, false);
+        } catch (Throwable e) {
+            System.out.println("During tearDown: " + e);
+        }
+        envHome = null;
+        env = null;
+    }
+
+    private EntityStore open()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = TestEnv.BDB.getConfig();
+        envConfig.setAllowCreate(true);
+        env = new Environment(envHome, envConfig);
+
+        /*
+         * When version 0 of Other is used, no renamer is configured.  When
+         * version 1 is used, a renamer from Other version 0 to Other2 is used.
+         * For version 2, the current version, a renamer from Other2 version 1
+         * to Other is used.
+         */
+        String clsName = getClass().getName() + "$Other";
+        Renamer renamer = new Renamer(clsName + '2', 1, clsName);
+        Mutations mutations = new Mutations();
+        mutations.addRenamer(renamer);
+
+        StoreConfig storeConfig = new StoreConfig();
+        storeConfig.setAllowCreate(true);
+        storeConfig.setMutations(mutations);
+        return new EntityStore(env, "foo", storeConfig);
+    }
+
+    private void close(EntityStore store)
+        throws DatabaseException {
+
+        store.close();
+        env.close();
+        env = null;
+    }
+
+    public void testDevolution()
+        throws DatabaseException, IOException {
+
+        /* Copy log file resource to log file zero. */
+        TestUtils.loadLog(getClass(), "DevolutionTest.jdb", envHome);
+
+        EntityStore store = open();
+
+        PrimaryIndex<Long, MyEntity> index =
+            store.getPrimaryIndex(Long.class, MyEntity.class);
+
+        MyEntity entity = index.get(1L);
+        assertNotNull(entity);
+        assertEquals(123, entity.b);
+
+        close(store);
+    }
+
+    public void xtestSetup()
+        throws DatabaseException {
+
+        EntityStore store = open();
+
+        PrimaryIndex<Long, MyEntity> index =
+            store.getPrimaryIndex(Long.class, MyEntity.class);
+
+        MyEntity entity = new MyEntity();
+        entity.key = 1L;
+        entity.b = 123;
+        index.put(entity);
+
+        close(store);
+    }
+
+    /**
+     * This class name is changed from Other to Other2 in version 1 and back to
+     * Other in the version 2.  testSetup is executed for versions 0 and 1,
+     * which evolves the format.  testDevolution is run with version 2.
+     */
+    @Persistent(version=2)
+    static class Other {
+    }
+
+    @Entity(version=0)
+    static class MyEntity {
+
+        @PrimaryKey
+        long key;
+
+        Other[] a;
+
+        int b;
+
+        private MyEntity() {}
+    }
+}
diff --git a/test/com/sleepycat/persist/test/DevolutionTest.jdb b/test/com/sleepycat/persist/test/DevolutionTest.jdb
new file mode 100644
index 0000000000000000000000000000000000000000..8f7456f02181c7ad5ad08ee74f70819572da7866
Binary files /dev/null and b/test/com/sleepycat/persist/test/DevolutionTest.jdb differ
diff --git a/test/com/sleepycat/persist/test/Enhanced0.java b/test/com/sleepycat/persist/test/Enhanced0.java
new file mode 100644
index 0000000000000000000000000000000000000000..d5892864d268239f85bab0ac7a1a449e7ec45eb5
--- /dev/null
+++ b/test/com/sleepycat/persist/test/Enhanced0.java
@@ -0,0 +1,36 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Enhanced0.java,v 1.6.2.2 2010/01/04 15:30:49 cwl Exp $
+ */
+
+package com.sleepycat.persist.test;
+
+import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE;
+
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.SecondaryKey;
+
+/**
+ * For running ASMifier -- before any enhancements.
+ */
+@Entity
+class Enhanced0 {
+
+    @PrimaryKey
+    private String f1;
+
+    @SecondaryKey(relate=MANY_TO_ONE)
+    private int f2;
+    @SecondaryKey(relate=MANY_TO_ONE)
+    private String f3;
+    @SecondaryKey(relate=MANY_TO_ONE)
+    private String f4;
+
+    private int f5;
+    private String f6;
+    private String f7;
+}
diff --git a/test/com/sleepycat/persist/test/Enhanced1.java b/test/com/sleepycat/persist/test/Enhanced1.java
new file mode 100644
index 0000000000000000000000000000000000000000..ed840261fa6471980b228ad0480ce217d44c3f86
--- /dev/null
+++ b/test/com/sleepycat/persist/test/Enhanced1.java
@@ -0,0 +1,252 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Enhanced1.java,v 1.10.2.2 2010/01/04 15:30:49 cwl Exp $
+ */
+
+package com.sleepycat.persist.test;
+
+import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE;
+
+import com.sleepycat.persist.impl.Enhanced;
+import com.sleepycat.persist.impl.EnhancedAccessor;
+import com.sleepycat.persist.impl.EntityInput;
+import com.sleepycat.persist.impl.EntityOutput;
+import com.sleepycat.persist.impl.Format;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.SecondaryKey;
+
+/**
+ * For running ASMifier -- adds minimal enhancements.
+ */
+@Entity
+class Enhanced1 implements Enhanced {
+
+    @PrimaryKey
+    private String f1;
+
+    @SecondaryKey(relate=MANY_TO_ONE)
+    private int f2;
+    @SecondaryKey(relate=MANY_TO_ONE)
+    private String f3;
+    @SecondaryKey(relate=MANY_TO_ONE)
+    private String f4;
+
+    private int f5;
+    private String f6;
+    private String f7;
+    private int f8;
+    private int f9;
+    private int f10;
+    private int f11;
+    private int f12;
+
+    static {
+        EnhancedAccessor.registerClass(null, new Enhanced1());
+    }
+
+    public Object bdbNewInstance() {
+        return new Enhanced1();
+    }
+
+    public Object bdbNewArray(int len) {
+        return new Enhanced1[len];
+    }
+
+    public boolean bdbIsPriKeyFieldNullOrZero() {
+        return f1 == null;
+    }
+
+    public void bdbWritePriKeyField(EntityOutput output, Format format) {
+        output.writeKeyObject(f1, format);
+    }
+
+    public void bdbReadPriKeyField(EntityInput input, Format format) {
+        f1 = (String) input.readKeyObject(format);
+    }
+
+    public void bdbWriteSecKeyFields(EntityOutput output) {
+        /* If primary key is an object: */
+        output.registerPriKeyObject(f1);
+        /* Always: */
+        output.writeInt(f2);
+        output.writeObject(f3, null);
+        output.writeObject(f4, null);
+    }
+
+    public void bdbReadSecKeyFields(EntityInput input,
+                                    int startField,
+                                    int endField,
+                                    int superLevel) {
+        /* If primary key is an object: */
+        input.registerPriKeyObject(f1);
+
+        if (superLevel <= 0) {
+            switch (startField) {
+            case 0:
+                f2 = input.readInt();
+                if (endField == 0) break;
+            case 1:
+                f3 = (String) input.readObject();
+                if (endField == 1) break;
+            case 2:
+                f4 = (String) input.readObject();
+            }
+        }
+    }
+
+    public void bdbWriteNonKeyFields(EntityOutput output) {
+        output.writeInt(f5);
+        output.writeObject(f6, null);
+        output.writeObject(f7, null);
+        output.writeInt(f8);
+        output.writeInt(f9);
+        output.writeInt(f10);
+        output.writeInt(f11);
+        output.writeInt(f12);
+    }
+
+    public void bdbReadNonKeyFields(EntityInput input,
+                                    int startField,
+                                    int endField,
+                                    int superLevel) {
+        if (superLevel <= 0) {
+            switch (startField) {
+            case 0:
+                f5 = input.readInt();
+                if (endField == 0) break;
+            case 1:
+                f6 = (String) input.readObject();
+                if (endField == 1) break;
+            case 2:
+                f7 = (String) input.readObject();
+                if (endField == 2) break;
+            case 3:
+                f8 = input.readInt();
+                if (endField == 3) break;
+            case 4:
+                f9 = input.readInt();
+                if (endField == 4) break;
+            case 5:
+                f10 = input.readInt();
+                if (endField == 5) break;
+            case 6:
+                f11 = input.readInt();
+                if (endField == 6) break;
+            case 7:
+                f12 = input.readInt();
+            }
+        }
+    }
+
+    public boolean bdbNullifyKeyField(Object o,
+                                      int field,
+                                      int superLevel,
+                                      boolean isSecField,
+                                      Object keyElement) {
+        if (superLevel > 0) {
+            return false;
+        } else if (isSecField) {
+            switch (field) {
+            case 1:
+                if (f3 != null) {
+                    f3 = null;
+                    return true;
+                } else {
+                    return false;
+                }
+            case 2:
+                if (f4 != null) {
+                    f4 = null;
+                    return true;
+                } else {
+                    return false;
+                }
+            default:
+                return false;
+            }
+        } else {
+            switch (field) {
+            case 1:
+                if (f6 != null) {
+                    f6 = null;
+                    return true;
+                } else {
+                    return false;
+                }
+            case 2:
+                if (f7 != null) {
+                    f7 = null;
+                    return true;
+                } else {
+                    return false;
+                }
+            default:
+                return false;
+            }
+        }
+    }
+
+    public Object bdbGetField(Object o,
+                              int field,
+                              int superLevel,
+                              boolean isSecField) {
+        if (superLevel > 0) {
+        } else if (isSecField) {
+            switch (field) {
+            case 0:
+                return Integer.valueOf(f2);
+            case 1:
+                return f3;
+            case 2:
+                return f4;
+            }
+        } else {
+            switch (field) {
+            case 0:
+                return Integer.valueOf(f5);
+            case 1:
+                return f6;
+            case 2:
+                return f7;
+            }
+        }
+        return null;
+    }
+
+    public void bdbSetField(Object o,
+                            int field,
+                            int superLevel,
+                            boolean isSecField,
+                            Object value) {
+        if (superLevel > 0) {
+        } else if (isSecField) {
+            switch (field) {
+            case 0:
+                f2 = ((Integer) value).intValue();
+                return;
+            case 1:
+                f3 = (String) value;
+                return;
+            case 2:
+                f4 = (String) value;
+                return;
+            }
+        } else {
+            switch (field) {
+            case 0:
+                f5 = ((Integer) value).intValue();
+                return;
+            case 1:
+                f6 = (String) value;
+                return;
+            case 2:
+                f7 = (String) value;
+                return;
+            }
+        }
+    }
+}
diff --git a/test/com/sleepycat/persist/test/Enhanced2.java b/test/com/sleepycat/persist/test/Enhanced2.java
new file mode 100644
index 0000000000000000000000000000000000000000..0acc4a87d577061eafdc98ed1e12ffe1661e195e
--- /dev/null
+++ b/test/com/sleepycat/persist/test/Enhanced2.java
@@ -0,0 +1,110 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Enhanced2.java,v 1.8.2.2 2010/01/04 15:30:49 cwl Exp $
+ */
+
+package com.sleepycat.persist.test;
+
+import com.sleepycat.persist.impl.EnhancedAccessor;
+import com.sleepycat.persist.impl.EntityInput;
+import com.sleepycat.persist.impl.EntityOutput;
+import com.sleepycat.persist.impl.Format;
+import com.sleepycat.persist.model.Persistent;
+
+/**
+ * For running ASMifier -- entity sublcass.
+ */
+@Persistent
+class Enhanced2 extends Enhanced1 {
+
+    static {
+        EnhancedAccessor.registerClass(null, new Enhanced2());
+    }
+
+    public Object bdbNewInstance() {
+        return new Enhanced2();
+    }
+
+    public Object bdbNewArray(int len) {
+        return new Enhanced2[len];
+    }
+
+    public boolean bdbIsPriKeyFieldNullOrZero() {
+        return super.bdbIsPriKeyFieldNullOrZero();
+    }
+
+    public void bdbWritePriKeyField(EntityOutput output, Format format) {
+        super.bdbWritePriKeyField(output, format);
+    }
+
+    public void bdbReadPriKeyField(EntityInput input, Format format) {
+        super.bdbReadPriKeyField(input, format);
+    }
+
+    public void bdbWriteSecKeyFields(EntityOutput output) {
+        super.bdbWriteSecKeyFields(output);
+    }
+
+    public void bdbReadSecKeyFields(EntityInput input,
+                                    int startField,
+                                    int endField,
+                                    int superLevel) {
+        if (superLevel != 0) {
+            super.bdbReadSecKeyFields
+                (input, startField, endField, superLevel - 1);
+        }
+    }
+
+    public void bdbWriteNonKeyFields(EntityOutput output) {
+        super.bdbWriteNonKeyFields(output);
+    }
+
+    public void bdbReadNonKeyFields(EntityInput input,
+                                    int startField,
+                                    int endField,
+                                    int superLevel) {
+        if (superLevel != 0) {
+            super.bdbReadNonKeyFields
+                (input, startField, endField, superLevel - 1);
+        }
+    }
+
+    public boolean bdbNullifyKeyField(Object o,
+                                      int field,
+                                      int superLevel,
+                                      boolean isSecField,
+                                      Object keyElement) {
+        if (superLevel > 0) {
+            return super.bdbNullifyKeyField
+                (o, field, superLevel - 1, isSecField, keyElement);
+        } else {
+            return false;
+        }
+    }
+
+    public Object bdbGetField(Object o,
+                              int field,
+                              int superLevel,
+                              boolean isSecField) {
+        if (superLevel > 0) {
+            return super.bdbGetField
+                (o, field, superLevel - 1, isSecField);
+        } else {
+            return null;
+        }
+    }
+
+    public void bdbSetField(Object o,
+                            int field,
+                            int superLevel,
+                            boolean isSecField,
+                            Object value) {
+        if (superLevel > 0) {
+            super.bdbSetField
+                (o, field, superLevel - 1, isSecField, value);
+        }
+    }
+}
diff --git a/test/com/sleepycat/persist/test/Enhanced3.java b/test/com/sleepycat/persist/test/Enhanced3.java
new file mode 100644
index 0000000000000000000000000000000000000000..fc4f2ed58725f72156ddbf78505def3f483c4c88
--- /dev/null
+++ b/test/com/sleepycat/persist/test/Enhanced3.java
@@ -0,0 +1,161 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: Enhanced3.java,v 1.8.2.2 2010/01/04 15:30:49 cwl Exp $
+ */
+
+package com.sleepycat.persist.test;
+
+/*
+import java.math.BigIngeter;
+import java.math.BigDecimal;
+*/
+import java.util.Date;
+
+import com.sleepycat.persist.impl.Enhanced;
+import com.sleepycat.persist.impl.EnhancedAccessor;
+import com.sleepycat.persist.impl.EntityInput;
+import com.sleepycat.persist.impl.EntityOutput;
+import com.sleepycat.persist.impl.Format;
+import com.sleepycat.persist.model.KeyField;
+import com.sleepycat.persist.model.Persistent;
+
+/**
+ * For running ASMifier -- a composite key class using all simple data types,
+ * does not follow from previous EnhancedN.java files
+ */
+@Persistent
+class Enhanced3 implements Enhanced {
+
+    @KeyField(1) boolean z;
+    @KeyField(2) char c;
+    @KeyField(3) byte b;
+    @KeyField(4) short s;
+    @KeyField(5) int i;
+    @KeyField(6) long l;
+    @KeyField(7) float f;
+    @KeyField(8) double d;
+
+    @KeyField(9) Boolean zw;
+    @KeyField(10) Character cw;
+    @KeyField(11) Byte bw;
+    @KeyField(12) Short sw;
+    @KeyField(13) Integer iw;
+    @KeyField(14) Long lw;
+    @KeyField(15) Float fw;
+    @KeyField(16) Double dw;
+
+    @KeyField(17) Date date;
+    @KeyField(18) String str;
+    /*
+    @KeyField(19) BigIngeter bigint;
+    @KeyField(20) BigDecimal bigdec;
+    */
+
+    static {
+        EnhancedAccessor.registerClass(null, new Enhanced3());
+    }
+
+    public Object bdbNewInstance() {
+        return new Enhanced3();
+    }
+
+    public Object bdbNewArray(int len) {
+        return new Enhanced3[len];
+    }
+
+    public boolean bdbIsPriKeyFieldNullOrZero() {
+        return false;
+    }
+
+    public void bdbWritePriKeyField(EntityOutput output, Format format) {
+    }
+
+    public void bdbReadPriKeyField(EntityInput input, Format format) {
+    }
+
+    public void bdbWriteSecKeyFields(EntityOutput output) {
+    }
+
+    public void bdbReadSecKeyFields(EntityInput input,
+                                    int startField,
+                                    int endField,
+                                    int superLevel) {
+    }
+
+    public void bdbWriteNonKeyFields(EntityOutput output) {
+        output.writeBoolean(z);
+        output.writeChar(c);
+        output.writeByte(b);
+        output.writeShort(s);
+        output.writeInt(i);
+        output.writeLong(l);
+        output.writeSortedFloat(f);
+        output.writeSortedDouble(d);
+
+        output.writeBoolean(zw.booleanValue());
+        output.writeChar(cw.charValue());
+        output.writeByte(bw.byteValue());
+        output.writeShort(sw.shortValue());
+        output.writeInt(iw.intValue());
+        output.writeLong(lw.longValue());
+        output.writeSortedFloat(fw.floatValue());
+        output.writeSortedDouble(dw.doubleValue());
+
+        output.writeLong(date.getTime());
+        output.writeString(str);
+    }
+
+    public void bdbReadNonKeyFields(EntityInput input,
+                                    int startField,
+                                    int endField,
+                                    int superLevel) {
+        z = input.readBoolean();
+        c = input.readChar();
+        b = input.readByte();
+        s = input.readShort();
+        i = input.readInt();
+        l = input.readLong();
+        f = input.readSortedFloat();
+        d = input.readSortedDouble();
+
+        zw = Boolean.valueOf(input.readBoolean());
+        cw = Character.valueOf(input.readChar());
+        bw = Byte.valueOf(input.readByte());
+        sw = Short.valueOf(input.readShort());
+        iw = Integer.valueOf(input.readInt());
+        lw = Long.valueOf(input.readLong());
+        fw = Float.valueOf(input.readSortedFloat());
+        dw = Double.valueOf(input.readSortedDouble());
+
+        date = new Date(input.readLong());
+        str = input.readString();
+    }
+
+    public boolean bdbNullifyKeyField(Object o,
+                                      int field,
+                                      int superLevel,
+                                      boolean isSecField,
+                                      Object keyElement) {
+        // Didn't bother with this one.
+        return false;
+    }
+
+    public Object bdbGetField(Object o,
+                              int field,
+                              int superLevel,
+                              boolean isSecField) {
+        // Didn't bother with this one.
+        return null;
+    }
+
+    public void bdbSetField(Object o,
+                            int field,
+                            int superLevel,
+                            boolean isSecField,
+                            Object value) {
+        // Didn't bother with this one.
+    }
+}
diff --git a/test/com/sleepycat/persist/test/EvolveCase.java b/test/com/sleepycat/persist/test/EvolveCase.java
new file mode 100644
index 0000000000000000000000000000000000000000..6ad3e979aa33ea996fd4337807b902942543a2a1
--- /dev/null
+++ b/test/com/sleepycat/persist/test/EvolveCase.java
@@ -0,0 +1,193 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: EvolveCase.java,v 1.13 2008/06/23 19:18:27 mark Exp $
+ */
+package com.sleepycat.persist.test;
+
+import java.util.Iterator;
+import java.util.List;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.evolve.Mutations;
+import com.sleepycat.persist.model.ClassMetadata;
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.model.Persistent;
+import com.sleepycat.persist.raw.RawStore;
+import com.sleepycat.persist.raw.RawType;
+
+@Persistent
+abstract class EvolveCase {
+
+    static final String STORE_NAME = "foo";
+
+    transient boolean updated;
+
+    Mutations getMutations() {
+        return null;
+    }
+
+    void configure(EntityModel model, StoreConfig config) {
+    }
+
+    String getStoreOpenException() {
+        return null;
+    }
+
+    int getNRecordsExpected() {
+        return 1;
+    }
+
+    void checkUnevolvedModel(EntityModel model, Environment env) {
+    }
+
+    void checkEvolvedModel(EntityModel model,
+                           Environment env,
+                           boolean oldTypesExist) {
+    }
+
+    void writeObjects(EntityStore store)
+        throws DatabaseException {
+    }
+
+    void readObjects(EntityStore store, boolean doUpdate)
+        throws DatabaseException {
+    }
+
+    void readRawObjects(RawStore store,
+                        boolean expectEvolved,
+                        boolean expectUpdated)
+        throws DatabaseException {
+    }
+
+    void copyRawObjects(RawStore rawStore, EntityStore newStore)
+        throws DatabaseException {
+    }
+
+    /**
+     * Checks for equality and prints the entire values rather than
+     * abbreviated values like TestCase.assertEquals does.
+     */
+    static void checkEquals(Object expected, Object got) {
+        if ((expected != null) ? (!expected.equals(got)) : (got != null)) {
+            TestCase.fail("Expected:\n" + expected + "\nBut got:\n" + got);
+        }
+    }
+
+    /**
+     * Asserts than an entity database exists or does not exist.
+     */
+    static void assertDbExists(boolean expectExists,
+                               Environment env,
+                               String entityClassName) {
+        assertDbExists(expectExists, env, entityClassName, null);
+    }
+
+    /**
+     * Checks that an entity class exists or does not exist.
+     */
+    static void checkEntity(boolean exists,
+                            EntityModel model,
+                            Environment env,
+                            String className,
+                            int version,
+                            String secKeyName) {
+        if (exists) {
+            TestCase.assertNotNull(model.getEntityMetadata(className));
+            ClassMetadata meta = model.getClassMetadata(className);
+            TestCase.assertNotNull(meta);
+            TestCase.assertEquals(version, meta.getVersion());
+            TestCase.assertTrue(meta.isEntityClass());
+
+            RawType raw = model.getRawType(className);
+            TestCase.assertNotNull(raw);
+            TestCase.assertEquals(version, raw.getVersion());
+        } else {
+            TestCase.assertNull(model.getEntityMetadata(className));
+            TestCase.assertNull(model.getClassMetadata(className));
+            TestCase.assertNull(model.getRawType(className));
+        }
+
+        assertDbExists(exists, env, className);
+        if (secKeyName != null) {
+            assertDbExists(exists, env, className, secKeyName);
+        }
+    }
+
+    /**
+     * Checks that a non-entity class exists or does not exist.
+     */
+    static void checkNonEntity(boolean exists,
+                               EntityModel model,
+                               Environment env,
+                               String className,
+                               int version) {
+        if (exists) {
+            ClassMetadata meta = model.getClassMetadata(className);
+            TestCase.assertNotNull(meta);
+            TestCase.assertEquals(version, meta.getVersion());
+            TestCase.assertTrue(!meta.isEntityClass());
+
+            RawType raw = model.getRawType(className);
+            TestCase.assertNotNull(raw);
+            TestCase.assertEquals(version, raw.getVersion());
+        } else {
+            TestCase.assertNull(model.getClassMetadata(className));
+            TestCase.assertNull(model.getRawType(className));
+        }
+
+        TestCase.assertNull(model.getEntityMetadata(className));
+        assertDbExists(false, env, className);
+    }
+
+    /**
+     * Asserts than a database expectExists or does not exist. If keyName is
+     * null, checks an entity database.  If keyName is non-null, checks a
+     * secondary database.
+     */
+    static void assertDbExists(boolean expectExists,
+                               Environment env,
+                               String entityClassName,
+                               String keyName) {
+        PersistTestUtils.assertDbExists
+            (expectExists, env, STORE_NAME, entityClassName, keyName);
+    }
+
+    static void checkVersions(EntityModel model, String name, int version) {
+        checkVersions(model, new String[] {name}, new int[] {version});
+    }
+
+    static void checkVersions(EntityModel model,
+                              String name1,
+                              int version1,
+                              String name2,
+                              int version2) {
+        checkVersions
+            (model, new String[] {name1, name2},
+             new int[] {version1, version2});
+    }
+
+    private static void checkVersions(EntityModel model,
+                                      String[] names,
+                                      int[] versions) {
+        List<RawType> all = model.getAllRawTypeVersions(names[0]);
+        TestCase.assertNotNull(all);
+
+        assert names.length == versions.length;
+        TestCase.assertEquals(all.toString(), names.length, all.size());
+
+        Iterator<RawType> iter = all.iterator();
+        for (int i = 0; i < names.length; i += 1) {
+            RawType type = iter.next();
+            TestCase.assertEquals(versions[i], type.getVersion());
+            TestCase.assertEquals(names[i], type.getClassName());
+        }
+    }
+}
diff --git a/test/com/sleepycat/persist/test/EvolveClasses.java b/test/com/sleepycat/persist/test/EvolveClasses.java
new file mode 100644
index 0000000000000000000000000000000000000000..59ed150a50466b7c96d821210b5f680d60220835
--- /dev/null
+++ b/test/com/sleepycat/persist/test/EvolveClasses.java
@@ -0,0 +1,6574 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: EvolveClasses.java,v 1.26.2.2 2009/03/27 17:12:55 mark Exp $
+ */
+package com.sleepycat.persist.test;
+
+import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE;
+import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE;
+
+import java.math.BigInteger;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.StringTokenizer;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.SecondaryIndex;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.evolve.Conversion;
+import com.sleepycat.persist.evolve.Converter;
+import com.sleepycat.persist.evolve.Deleter;
+import com.sleepycat.persist.evolve.EntityConverter;
+import com.sleepycat.persist.evolve.Mutations;
+import com.sleepycat.persist.evolve.Renamer;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.model.KeyField;
+import com.sleepycat.persist.model.Persistent;
+import com.sleepycat.persist.model.PersistentProxy;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.SecondaryKey;
+import com.sleepycat.persist.raw.RawObject;
+import com.sleepycat.persist.raw.RawStore;
+import com.sleepycat.persist.raw.RawType;
+
+/**
+ * Nested classes are modified versions of classes of the same name in
+ * EvolveClasses.java.original.  See EvolveTestBase.java for the steps that are
+ * taken to add a new class (test case).
+ *
+ * @author Mark Hayes
+ */
+class EvolveClasses {
+
+    private static final String PREFIX = EvolveClasses.class.getName() + '$';
+    private static final String CASECLS = EvolveCase.class.getName();
+
+    private static RawObject readRaw(RawStore store,
+                                     Object key,
+                                     Object... classVersionPairs)
+        throws DatabaseException {
+
+        return readRaw(store, null, key, classVersionPairs);
+    }
+
+    /**
+     * Reads a raw object and checks its superclass names and versions.
+     */
+    private static RawObject readRaw(RawStore store,
+                                     String entityClsName,
+                                     Object key,
+                                     Object... classVersionPairs)
+        throws DatabaseException {
+
+        TestCase.assertNotNull(store);
+        TestCase.assertNotNull(key);
+
+        if (entityClsName == null) {
+            entityClsName = (String) classVersionPairs[0];
+        }
+        PrimaryIndex<Object,RawObject> index =
+            store.getPrimaryIndex(entityClsName);
+        TestCase.assertNotNull(index);
+
+        RawObject obj = index.get(key);
+        TestCase.assertNotNull(obj);
+
+        checkRawType(obj.getType(), classVersionPairs);
+
+        RawObject superObj = obj.getSuper();
+        for (int i = 2; i < classVersionPairs.length; i += 2) {
+            Object[] a = new Object[classVersionPairs.length - i];
+            System.arraycopy(classVersionPairs, i, a, 0, a.length);
+            TestCase.assertNotNull(superObj);
+            checkRawType(superObj.getType(), a);
+            superObj = superObj.getSuper();
+        }
+
+        return obj;
+    }
+
+    /**
+     * Reads a raw object and checks its superclass names and versions.
+     */
+    private static void checkRawType(RawType type,
+                                     Object... classVersionPairs) {
+        TestCase.assertNotNull(type);
+        TestCase.assertNotNull(classVersionPairs);
+        TestCase.assertTrue(classVersionPairs.length % 2 == 0);
+
+        for (int i = 0; i < classVersionPairs.length; i += 2) {
+            String clsName = (String) classVersionPairs[i];
+            int clsVersion = (Integer) classVersionPairs[i + 1];
+            TestCase.assertEquals(clsName, type.getClassName());
+            TestCase.assertEquals(clsVersion, type.getVersion());
+            type = type.getSuperType();
+        }
+        TestCase.assertNull(type);
+    }
+
+    /**
+     * Checks that a raw object contains the specified field values.  Does not
+     * check superclass fields.
+     */
+    private static void checkRawFields(RawObject obj,
+                                       Object... nameValuePairs) {
+        TestCase.assertNotNull(obj);
+        TestCase.assertNotNull(obj.getValues());
+        TestCase.assertNotNull(nameValuePairs);
+        TestCase.assertTrue(nameValuePairs.length % 2 == 0);
+
+        Map<String,Object> values = obj.getValues();
+        TestCase.assertEquals(nameValuePairs.length / 2, values.size());
+
+        for (int i = 0; i < nameValuePairs.length; i += 2) {
+            String name = (String) nameValuePairs[i];
+            Object value = nameValuePairs[i + 1];
+            TestCase.assertEquals(name, value, values.get(name));
+        }
+    }
+
+    private static Map<String,Object> makeValues(Object... nameValuePairs) {
+        TestCase.assertTrue(nameValuePairs.length % 2 == 0);
+        Map<String,Object> values = new HashMap<String,Object>();
+        for (int i = 0; i < nameValuePairs.length; i += 2) {
+            values.put((String) nameValuePairs[i], nameValuePairs[i + 1]);
+        }
+        return values;
+    }
+
+    /**
+     * Disallow removing an entity class when no Deleter mutation is specified.
+     */
+    static class DeletedEntity1_ClassRemoved_NoMutation extends EvolveCase {
+
+        private static final String NAME =
+            PREFIX + "DeletedEntity1_ClassRemoved";
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DeletedEntity1_ClassRemoved version: 0 Error: java.lang.ClassNotFoundException: com.sleepycat.persist.test.EvolveClasses$DeletedEntity1_ClassRemoved";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, "skey");
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "skey", 88);
+        }
+    }
+
+    /**
+     * Allow removing an entity class when a Deleter mutation is specified.
+     */
+    static class DeletedEntity2_ClassRemoved_WithDeleter extends EvolveCase {
+
+        private static final String NAME =
+            PREFIX + "DeletedEntity2_ClassRemoved";
+
+        @Override
+        int getNRecordsExpected() {
+            return 0;
+        }
+
+        @Override
+        Mutations getMutations() {
+            Mutations m = new Mutations();
+            m.addDeleter(new Deleter(NAME, 0));
+            return m;
+        }
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkEntity(false, model, env, NAME, 0, "skey");
+            if (oldTypesExist) {
+                checkVersions(model, NAME, 0);
+            }
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                return;
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "skey", 88);
+        }
+    }
+
+    /**
+     * Disallow removing the Entity annotation when no Deleter mutation is
+     * specified.
+     */
+    static class DeletedEntity3_AnnotRemoved_NoMutation extends EvolveCase {
+
+        private static final String NAME =
+            DeletedEntity3_AnnotRemoved_NoMutation.class.getName();
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DeletedEntity3_AnnotRemoved_NoMutation version: 0 Error: java.lang.IllegalArgumentException: Class could not be loaded or is not persistent: com.sleepycat.persist.test.EvolveClasses$DeletedEntity3_AnnotRemoved_NoMutation";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, "skey");
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "skey", 88);
+        }
+    }
+
+    /**
+     * Allow removing the Entity annotation when a Deleter mutation is
+     * specified.
+     */
+    static class DeletedEntity4_AnnotRemoved_WithDeleter extends EvolveCase {
+
+        private static final String NAME =
+            DeletedEntity4_AnnotRemoved_WithDeleter.class.getName();
+
+        @Override
+        int getNRecordsExpected() {
+            return 0;
+        }
+
+        @Override
+        Mutations getMutations() {
+            Mutations m = new Mutations();
+            m.addDeleter(new Deleter(NAME, 0));
+            return m;
+        }
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkEntity(false, model, env, NAME, 0, "skey");
+            if (oldTypesExist) {
+                checkVersions(model, NAME, 0);
+            }
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            try {
+                store.getPrimaryIndex
+                    (Integer.class,
+                     DeletedEntity4_AnnotRemoved_WithDeleter.class);
+                TestCase.fail();
+            } catch (Exception e) {
+                checkEquals
+                    ("java.lang.IllegalArgumentException: Class could not be loaded or is not an entity class: com.sleepycat.persist.test.EvolveClasses$DeletedEntity4_AnnotRemoved_WithDeleter",
+                     e.toString());
+            }
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                return;
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "skey", 88);
+        }
+    }
+
+    /**
+     * Disallow changing the Entity annotation to Persistent when no Deleter
+     * mutation is specified.
+     */
+    @Persistent(version=1)
+    static class DeletedEntity5_EntityToPersist_NoMutation extends EvolveCase {
+
+        private static final String NAME =
+            DeletedEntity5_EntityToPersist_NoMutation.class.getName();
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DeletedEntity5_EntityToPersist_NoMutation version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DeletedEntity5_EntityToPersist_NoMutation version: 1 Error: @Entity switched to/from @Persistent";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, "skey");
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "skey", 88);
+        }
+    }
+
+    /**
+     * Allow changing the Entity annotation to Persistent when a Deleter
+     * mutation is specified.
+     */
+    @Persistent(version=1)
+    static class DeletedEntity6_EntityToPersist_WithDeleter extends EvolveCase {
+
+        private static final String NAME =
+            DeletedEntity6_EntityToPersist_WithDeleter.class.getName();
+        private static final String NAME2 =
+            Embed_DeletedEntity6_EntityToPersist_WithDeleter.class.getName();
+
+        @Override
+        int getNRecordsExpected() {
+            return 0;
+        }
+
+        @Override
+        Mutations getMutations() {
+            Mutations m = new Mutations();
+            m.addDeleter(new Deleter(NAME, 0));
+            return m;
+        }
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkNonEntity(true, model, env, NAME, 1);
+            if (oldTypesExist) {
+                checkVersions(model, NAME, 1, NAME, 0);
+            } else {
+                checkVersions(model, NAME, 1);
+            }
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            /* Cannot get the primary index for the former entity class. */
+            try {
+                store.getPrimaryIndex
+                    (Integer.class,
+                     DeletedEntity6_EntityToPersist_WithDeleter.class);
+                TestCase.fail();
+            } catch (Exception e) {
+                checkEquals
+                    ("java.lang.IllegalArgumentException: Class could not be loaded or is not an entity class: com.sleepycat.persist.test.EvolveClasses$DeletedEntity6_EntityToPersist_WithDeleter",
+                     e.toString());
+            }
+
+            /* Can embed the now persistent class in another entity class. */
+            PrimaryIndex<Long,
+                         Embed_DeletedEntity6_EntityToPersist_WithDeleter>
+                index = store.getPrimaryIndex
+                    (Long.class,
+                     Embed_DeletedEntity6_EntityToPersist_WithDeleter.class);
+
+            if (doUpdate) {
+                Embed_DeletedEntity6_EntityToPersist_WithDeleter embed =
+                    new Embed_DeletedEntity6_EntityToPersist_WithDeleter();
+                index.put(embed);
+                embed = index.get(embed.key);
+                /* This new type should exist only after update. */
+                Environment env = store.getEnvironment();
+                EntityModel model = store.getModel();
+                checkEntity(true, model, env, NAME2, 0, null);
+                checkVersions(model, NAME2, 0);
+            }
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                return;
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "skey", 88);
+        }
+    }
+
+    @Entity
+    static class Embed_DeletedEntity6_EntityToPersist_WithDeleter {
+
+        @PrimaryKey
+        long key = 99;
+
+        DeletedEntity6_EntityToPersist_WithDeleter embedded =
+            new DeletedEntity6_EntityToPersist_WithDeleter();
+    }
+
+    /**
+     * Disallow removing a Persistent class when no Deleter mutation is
+     * specified, even when the Entity class that embedded the Persistent class
+     * is deleted properly (by removing the Entity annotation in this case).
+     */
+    static class DeletedPersist1_ClassRemoved_NoMutation extends EvolveCase {
+
+        private static final String NAME =
+            PREFIX + "DeletedPersist1_ClassRemoved";
+
+        private static final String NAME2 =
+            DeletedPersist1_ClassRemoved_NoMutation.class.getName();
+
+        @Override
+        Mutations getMutations() {
+            Mutations m = new Mutations();
+            m.addDeleter(new Deleter(NAME2, 0));
+            return m;
+        }
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DeletedPersist1_ClassRemoved version: 0 Error: java.lang.ClassNotFoundException: com.sleepycat.persist.test.EvolveClasses$DeletedPersist1_ClassRemoved";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkNonEntity(true, model, env, NAME, 0);
+            checkEntity(true, model, env, NAME2, 0, null);
+            checkVersions(model, NAME, 0);
+            checkVersions(model, NAME2, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+
+            RawType embedType = store.getModel().getRawType(NAME);
+            checkRawType(embedType, NAME, 0);
+
+            RawObject embed =
+                new RawObject(embedType, makeValues("f", 123), null);
+
+            RawObject obj = readRaw(store, 99, NAME2, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "embed", embed);
+        }
+    }
+
+    /**
+     * Allow removing a Persistent class when a Deleter mutation is
+     * specified, and the Entity class that embedded the Persistent class
+     * is also deleted properly (by removing the Entity annotation in this
+     * case).
+     */
+    static class DeletedPersist2_ClassRemoved_WithDeleter extends EvolveCase {
+
+        private static final String NAME =
+            PREFIX + "DeletedPersist2_ClassRemoved";
+        private static final String NAME2 =
+            DeletedPersist2_ClassRemoved_WithDeleter.class.getName();
+
+        @Override
+        int getNRecordsExpected() {
+            return 0;
+        }
+
+        @Override
+        Mutations getMutations() {
+            Mutations m = new Mutations();
+            m.addDeleter(new Deleter(NAME, 0));
+            m.addDeleter(new Deleter(NAME2, 0));
+            return m;
+        }
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkNonEntity(false, model, env, NAME, 0);
+            checkEntity(false, model, env, NAME2, 0, null);
+            if (oldTypesExist) {
+                checkVersions(model, NAME, 0);
+                checkVersions(model, NAME2, 0);
+            }
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            try {
+                store.getPrimaryIndex
+                    (Integer.class,
+                     DeletedPersist2_ClassRemoved_WithDeleter.class);
+                TestCase.fail();
+            } catch (Exception e) {
+                checkEquals
+                    ("java.lang.IllegalArgumentException: Class could not be loaded or is not an entity class: com.sleepycat.persist.test.EvolveClasses$DeletedPersist2_ClassRemoved_WithDeleter",
+                     e.toString());
+            }
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                return;
+            }
+
+            RawType embedType = store.getModel().getRawType(NAME);
+            checkRawType(embedType, NAME, 0);
+
+            RawObject embed =
+                new RawObject(embedType, makeValues("f", 123), null);
+
+            RawObject obj = readRaw(store, 99, NAME2, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "embed", embed);
+        }
+    }
+
+    static class DeletedPersist3_AnnotRemoved {
+
+        int f = 123;
+    }
+
+    /**
+     * Disallow removing the Persistent annotation when no Deleter mutation is
+     * specified, even when the Entity class that embedded the Persistent class
+     * is deleted properly (by removing the Entity annotation in this case).
+     */
+    static class DeletedPersist3_AnnotRemoved_NoMutation extends EvolveCase {
+
+        private static final String NAME =
+            DeletedPersist3_AnnotRemoved.class.getName();
+        private static final String NAME2 =
+            DeletedPersist3_AnnotRemoved_NoMutation.class.getName();
+
+        @Override
+        Mutations getMutations() {
+            Mutations m = new Mutations();
+            m.addDeleter(new Deleter(NAME2, 0));
+            return m;
+        }
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DeletedPersist3_AnnotRemoved version: 0 Error: java.lang.IllegalArgumentException: Class could not be loaded or is not persistent: com.sleepycat.persist.test.EvolveClasses$DeletedPersist3_AnnotRemoved";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkNonEntity(true, model, env, NAME, 0);
+            checkEntity(true, model, env, NAME2, 0, null);
+            checkVersions(model, NAME, 0);
+            checkVersions(model, NAME2, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+
+            RawType embedType = store.getModel().getRawType(NAME);
+            checkRawType(embedType, NAME, 0);
+
+            RawObject embed =
+                new RawObject(embedType, makeValues("f", 123), null);
+
+            RawObject obj = readRaw(store, 99, NAME2, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "embed", embed);
+        }
+    }
+
+    static class DeletedPersist4_AnnotRemoved {
+
+        int f = 123;
+    }
+
+    /**
+     * Allow removing the Persistent annotation when a Deleter mutation is
+     * specified, and the Entity class that embedded the Persistent class
+     * is also be deleted properly (by removing the Entity annotation in this
+     * case).
+     */
+    static class DeletedPersist4_AnnotRemoved_WithDeleter extends EvolveCase {
+
+        private static final String NAME =
+            DeletedPersist4_AnnotRemoved.class.getName();
+        private static final String NAME2 =
+            DeletedPersist4_AnnotRemoved_WithDeleter.class.getName();
+
+        @Override
+        int getNRecordsExpected() {
+            return 0;
+        }
+
+        @Override
+        Mutations getMutations() {
+            Mutations m = new Mutations();
+            m.addDeleter(new Deleter(NAME, 0));
+            m.addDeleter(new Deleter(NAME2, 0));
+            return m;
+        }
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkNonEntity(false, model, env, NAME, 0);
+            checkEntity(false, model, env, NAME2, 0, null);
+            if (oldTypesExist) {
+                checkVersions(model, NAME, 0);
+                checkVersions(model, NAME2, 0);
+            }
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            try {
+                store.getPrimaryIndex
+                    (Integer.class,
+                     DeletedPersist4_AnnotRemoved_WithDeleter.class);
+                TestCase.fail();
+            } catch (Exception e) {
+                checkEquals
+                    ("java.lang.IllegalArgumentException: Class could not be loaded or is not an entity class: com.sleepycat.persist.test.EvolveClasses$DeletedPersist4_AnnotRemoved_WithDeleter",
+                     e.toString());
+            }
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                return;
+            }
+
+            RawType embedType = store.getModel().getRawType(NAME);
+            checkRawType(embedType, NAME, 0);
+
+            RawObject embed =
+                new RawObject(embedType, makeValues("f", 123), null);
+
+            RawObject obj = readRaw(store, 99, NAME2, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "embed", embed);
+        }
+    }
+
+    @Entity(version=1)
+    static class DeletedPersist5_PersistToEntity {
+
+        @PrimaryKey
+        int key = 99;
+
+        int f = 123;
+    }
+
+    /**
+     * Disallow changing the Entity annotation to Persistent when no Deleter
+     * mutation is specified, even when the Entity class that embedded the
+     * Persistent class is deleted properly (by removing the Entity annotation
+     * in this case).
+     */
+    static class DeletedPersist5_PersistToEntity_NoMutation
+        extends EvolveCase {
+
+        private static final String NAME =
+            DeletedPersist5_PersistToEntity.class.getName();
+        private static final String NAME2 =
+            DeletedPersist5_PersistToEntity_NoMutation.class.getName();
+
+        @Override
+        Mutations getMutations() {
+            Mutations m = new Mutations();
+            m.addDeleter(new Deleter(NAME2, 0));
+            return m;
+        }
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DeletedPersist5_PersistToEntity version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DeletedPersist5_PersistToEntity version: 1 Error: @Entity switched to/from @Persistent";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkNonEntity(true, model, env, NAME, 0);
+            checkEntity(true, model, env, NAME2, 0, null);
+            checkVersions(model, NAME, 0);
+            checkVersions(model, NAME2, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+
+            RawType embedType = store.getModel().getRawType(NAME);
+            checkRawType(embedType, NAME, 0);
+
+            RawObject embed =
+                new RawObject(embedType, makeValues("f", 123), null);
+
+            RawObject obj = readRaw(store, 99, NAME2, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "embed", embed);
+        }
+    }
+
+    @Entity(version=1)
+    static class DeletedPersist6_PersistToEntity {
+
+        @PrimaryKey
+        int key = 99;
+
+        int f = 123;
+    }
+
+    /**
+     * Allow changing the Entity annotation to Persistent when a Deleter
+     * mutation is specified, and the Entity class that embedded the Persistent
+     * class is also be deleted properly (by removing the Entity annotation in
+     * this case).
+     */
+    static class DeletedPersist6_PersistToEntity_WithDeleter
+        extends EvolveCase {
+
+        private static final String NAME =
+            DeletedPersist6_PersistToEntity.class.getName();
+        private static final String NAME2 =
+            DeletedPersist6_PersistToEntity_WithDeleter.class.getName();
+
+        @Override
+        int getNRecordsExpected() {
+            return 0;
+        }
+
+        @Override
+        Mutations getMutations() {
+            Mutations m = new Mutations();
+            m.addDeleter(new Deleter(NAME, 0));
+            m.addDeleter(new Deleter(NAME2, 0));
+            return m;
+        }
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkEntity(false, model, env, NAME2, 0, null);
+            if (oldTypesExist) {
+                checkVersions(model, NAME, 1, NAME, 0);
+                checkVersions(model, NAME2, 0);
+            } else {
+                checkVersions(model, NAME, 1);
+            }
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            /* Cannot get the primary index for the former entity class. */
+            try {
+                store.getPrimaryIndex
+                    (Integer.class,
+                     DeletedPersist6_PersistToEntity_WithDeleter.class);
+                TestCase.fail();
+            } catch (Exception e) {
+                checkEquals
+                    ("java.lang.IllegalArgumentException: Class could not be loaded or is not an entity class: com.sleepycat.persist.test.EvolveClasses$DeletedPersist6_PersistToEntity_WithDeleter",
+                     e.toString());
+            }
+
+            /* Can use the primary index of the now entity class. */
+            PrimaryIndex<Integer,
+                         DeletedPersist6_PersistToEntity>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DeletedPersist6_PersistToEntity.class);
+
+            if (doUpdate) {
+                DeletedPersist6_PersistToEntity obj =
+                    new DeletedPersist6_PersistToEntity();
+                index.put(obj);
+                obj = index.get(obj.key);
+                /* This new type should exist only after update. */
+                Environment env = store.getEnvironment();
+                EntityModel model = store.getModel();
+                checkEntity(true, model, env, NAME, 1, null);
+            }
+        }
+
+        @Override
+        void copyRawObjects(RawStore rawStore, EntityStore newStore)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,
+                         DeletedPersist6_PersistToEntity>
+                index = newStore.getPrimaryIndex
+                    (Integer.class,
+                     DeletedPersist6_PersistToEntity.class);
+            RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+            index.put((DeletedPersist6_PersistToEntity)
+                      newStore.getModel().convertRawObject(raw));
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                return;
+            }
+
+            RawType embedType = store.getModel().getRawType(NAME);
+            checkRawType(embedType, NAME, 0);
+
+            RawObject embed =
+                new RawObject(embedType, makeValues("f", 123), null);
+
+            RawObject obj = readRaw(store, 99, NAME2, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "embed", embed);
+        }
+    }
+
+    /**
+     * Disallow renaming an entity class without a Renamer mutation.
+     */
+    @Entity(version=1)
+    static class RenamedEntity1_NewEntityName_NoMutation
+        extends EvolveCase {
+
+        private static final String NAME =
+            PREFIX + "RenamedEntity1_NewEntityName";
+        private static final String NAME2 =
+            RenamedEntity1_NewEntityName_NoMutation.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int skey = 88;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$RenamedEntity1_NewEntityName version: 0 Error: java.lang.ClassNotFoundException: com.sleepycat.persist.test.EvolveClasses$RenamedEntity1_NewEntityName";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, "skey");
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "skey", 88);
+        }
+    }
+
+    /**
+     * Allow renaming an entity class with a Renamer mutation.
+     */
+    @Entity(version=1)
+    static class RenamedEntity2_NewEntityName_WithRenamer
+        extends EvolveCase {
+
+        private static final String NAME =
+            PREFIX + "RenamedEntity2_NewEntityName";
+        private static final String NAME2 =
+            RenamedEntity2_NewEntityName_WithRenamer.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int skey = 88;
+
+        @Override
+        Mutations getMutations() {
+            Mutations m = new Mutations();
+            m.addRenamer(new Renamer(NAME, 0, NAME2));
+            return m;
+        }
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkEntity(false, model, env, NAME, 0, null);
+            checkEntity(true, model, env, NAME2, 1, null);
+            if (oldTypesExist) {
+                checkVersions(model, NAME2, 1, NAME, 0);
+            } else {
+                checkVersions(model, NAME2, 1);
+            }
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,RenamedEntity2_NewEntityName_WithRenamer>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     RenamedEntity2_NewEntityName_WithRenamer.class);
+            RenamedEntity2_NewEntityName_WithRenamer obj = index.get(key);
+            TestCase.assertNotNull(obj);
+            TestCase.assertEquals(99, obj.key);
+            TestCase.assertEquals(88, obj.skey);
+
+            SecondaryIndex<Integer,Integer,
+                           RenamedEntity2_NewEntityName_WithRenamer>
+                sindex = store.getSecondaryIndex(index, Integer.class, "skey");
+            obj = sindex.get(88);
+            TestCase.assertNotNull(obj);
+            TestCase.assertEquals(99, obj.key);
+            TestCase.assertEquals(88, obj.skey);
+
+            if (doUpdate) {
+                index.put(obj);
+            }
+        }
+
+        @Override
+        void copyRawObjects(RawStore rawStore, EntityStore newStore)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,RenamedEntity2_NewEntityName_WithRenamer>
+                index = newStore.getPrimaryIndex
+                    (Integer.class,
+                     RenamedEntity2_NewEntityName_WithRenamer.class);
+            RawObject raw = rawStore.getPrimaryIndex(NAME2).get(99);
+            index.put((RenamedEntity2_NewEntityName_WithRenamer)
+                      newStore.getModel().convertRawObject(raw));
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            RawObject obj;
+            if (expectEvolved) {
+                obj = readRaw(store, 99, NAME2, 1, CASECLS, 0);
+            } else {
+                obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            }
+            checkRawFields(obj, "key", 99, "skey", 88);
+        }
+    }
+
+    @Persistent
+    static class DeleteSuperclass1_BaseClass
+        extends EvolveCase {
+
+        int f = 123;
+    }
+
+    /**
+     * Disallow deleting a superclass from the hierarchy when the superclass
+     * has persistent fields and no Deleter or Converter is specified.
+     */
+    @Entity
+    static class DeleteSuperclass1_NoMutation
+        extends EvolveCase {
+
+        private static final String NAME =
+            DeleteSuperclass1_BaseClass.class.getName();
+        private static final String NAME2 =
+            DeleteSuperclass1_NoMutation.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        int ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DeleteSuperclass1_NoMutation version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DeleteSuperclass1_NoMutation version: 0 Error: When a superclass is removed from the class hierarchy, the superclass or all of its persistent fields must be deleted with a Deleter: com.sleepycat.persist.test.EvolveClasses$DeleteSuperclass1_BaseClass";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkNonEntity(true, model, env, NAME, 0);
+            checkEntity(true, model, env, NAME2, 0, null);
+            checkVersions(model, NAME, 0);
+            checkVersions(model, NAME2, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME2, 0, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", 88);
+            checkRawFields(obj.getSuper(), "f", 123);
+            checkRawFields(obj.getSuper().getSuper());
+        }
+    }
+
+    @Persistent
+    static class DeleteSuperclass2_BaseClass
+        extends EvolveCase {
+
+        int f;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int skey;
+    }
+
+    /**
+     * Allow deleting a superclass from the hierarchy when the superclass has
+     * persistent fields and a class Converter is specified.  Also check that
+     * the secondary key field in the deleted base class is handled properly.
+     */
+    @Entity(version=1)
+    static class DeleteSuperclass2_WithConverter extends EvolveCase {
+
+        private static final String NAME =
+            DeleteSuperclass2_BaseClass.class.getName();
+        private static final String NAME2 =
+            DeleteSuperclass2_WithConverter.class.getName();
+
+        @PrimaryKey
+        int key;
+
+        int ff;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        Integer skey2;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int skey3;
+
+        @Override
+        Mutations getMutations() {
+            Mutations m = new Mutations();
+            m.addConverter(new EntityConverter
+                (NAME2, 0, new MyConversion(),
+                 Collections.singleton("skey")));
+            return m;
+        }
+
+        @SuppressWarnings("serial")
+        static class MyConversion implements Conversion {
+
+            transient RawType newType;
+
+            public void initialize(EntityModel model) {
+                newType = model.getRawType(NAME2);
+                TestCase.assertNotNull(newType);
+            }
+
+            public Object convert(Object fromValue) {
+                TestCase.assertNotNull(newType);
+                RawObject obj = (RawObject) fromValue;
+                RawObject newSuper = obj.getSuper().getSuper();
+                return new RawObject(newType, obj.getValues(), newSuper);
+            }
+
+            @Override
+            public boolean equals(Object other) {
+                return other instanceof MyConversion;
+            }
+        }
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkEntity(true, model, env, NAME2, 1, null);
+            if (oldTypesExist) {
+                checkVersions(model, NAME2, 1, NAME2, 0);
+                checkNonEntity(true, model, env, NAME, 0);
+                checkVersions(model, NAME, 0);
+            } else {
+                checkVersions(model, NAME2, 1);
+            }
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DeleteSuperclass2_WithConverter>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DeleteSuperclass2_WithConverter.class);
+            DeleteSuperclass2_WithConverter obj = index.get(99);
+            TestCase.assertNotNull(obj);
+            TestCase.assertSame
+                (EvolveCase.class, obj.getClass().getSuperclass());
+            TestCase.assertEquals(99, obj.key);
+            TestCase.assertEquals(88, obj.ff);
+            TestCase.assertEquals(Integer.valueOf(77), obj.skey2);
+            TestCase.assertEquals(66, obj.skey3);
+            if (doUpdate) {
+                index.put(obj);
+            }
+        }
+
+        @Override
+        void copyRawObjects(RawStore rawStore, EntityStore newStore)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DeleteSuperclass2_WithConverter>
+                index = newStore.getPrimaryIndex
+                    (Integer.class,
+                     DeleteSuperclass2_WithConverter.class);
+            RawObject raw = rawStore.getPrimaryIndex(NAME2).get(99);
+            index.put((DeleteSuperclass2_WithConverter)
+                      newStore.getModel().convertRawObject(raw));
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            RawObject obj;
+            if (expectEvolved) {
+                obj = readRaw(store, 99, NAME2, 1, CASECLS, 0);
+            } else {
+                obj = readRaw(store, 99, NAME2, 0, NAME, 0, CASECLS, 0);
+            }
+            checkRawFields
+                (obj, "key", 99, "ff", 88, "skey2", 77, "skey3", 66);
+            if (expectEvolved) {
+                checkRawFields(obj.getSuper());
+            } else {
+                checkRawFields(obj.getSuper(), "f", 123, "skey", 456);
+                checkRawFields(obj.getSuper().getSuper());
+            }
+            Environment env = store.getEnvironment();
+            assertDbExists(!expectEvolved, env, NAME2, "skey");
+            assertDbExists(true, env, NAME2, "skey3");
+        }
+    }
+
+    static class DeleteSuperclass3_BaseClass
+        extends EvolveCase {
+
+        int f;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int skey;
+    }
+
+    /**
+     * Allow deleting a superclass from the hierarchy when the superclass
+     * has persistent fields and a class Deleter is specified.  Also check that
+     * the secondary key field in the deleted base class is handled properly.
+     */
+    @Entity(version=1)
+    static class DeleteSuperclass3_WithDeleter extends EvolveCase {
+
+        private static final String NAME =
+            DeleteSuperclass3_BaseClass.class.getName();
+        private static final String NAME2 =
+            DeleteSuperclass3_WithDeleter.class.getName();
+
+        @PrimaryKey
+        int key;
+
+        int ff;
+
+        @Override
+        Mutations getMutations() {
+            Mutations m = new Mutations();
+            m.addDeleter(new Deleter(NAME, 0));
+            return m;
+        }
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkEntity(true, model, env, NAME2, 1, null);
+            if (oldTypesExist) {
+                checkVersions(model, NAME2, 1, NAME2, 0);
+                checkNonEntity(false, model, env, NAME, 0);
+                checkVersions(model, NAME, 0);
+            } else {
+                checkVersions(model, NAME2, 1);
+            }
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DeleteSuperclass3_WithDeleter>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DeleteSuperclass3_WithDeleter.class);
+            DeleteSuperclass3_WithDeleter obj = index.get(99);
+            TestCase.assertNotNull(obj);
+            TestCase.assertSame
+                (EvolveCase.class, obj.getClass().getSuperclass());
+            TestCase.assertEquals(99, obj.key);
+            TestCase.assertEquals(88, obj.ff);
+            if (doUpdate) {
+                index.put(obj);
+            }
+        }
+
+        @Override
+        void copyRawObjects(RawStore rawStore, EntityStore newStore)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DeleteSuperclass3_WithDeleter>
+                index = newStore.getPrimaryIndex
+                    (Integer.class,
+                     DeleteSuperclass3_WithDeleter.class);
+            RawObject raw = rawStore.getPrimaryIndex(NAME2).get(99);
+            index.put((DeleteSuperclass3_WithDeleter)
+                      newStore.getModel().convertRawObject(raw));
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            RawObject obj;
+            if (expectEvolved) {
+                obj = readRaw(store, 99, NAME2, 1, CASECLS, 0);
+            } else {
+                obj = readRaw(store, 99, NAME2, 0, NAME, 0, CASECLS, 0);
+            }
+            checkRawFields(obj, "key", 99, "ff", 88);
+            if (expectEvolved) {
+                checkRawFields(obj.getSuper());
+            } else {
+                checkRawFields(obj.getSuper(), "f", 123, "skey", 456);
+                checkRawFields(obj.getSuper().getSuper());
+            }
+            Environment env = store.getEnvironment();
+            assertDbExists(!expectEvolved, env, NAME2, "skey");
+        }
+    }
+
+    @Persistent
+    static class DeleteSuperclass4_BaseClass
+        extends EvolveCase {
+    }
+
+    /**
+     * Allow deleting a superclass from the hierarchy when the superclass
+     * has NO persistent fields.  No mutations are needed.
+     */
+    @Entity(version=1)
+    static class DeleteSuperclass4_NoFields extends EvolveCase {
+
+        private static final String NAME =
+            DeleteSuperclass4_BaseClass.class.getName();
+        private static final String NAME2 =
+            DeleteSuperclass4_NoFields.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        int ff;
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkEntity(true, model, env, NAME2, 1, null);
+            if (oldTypesExist) {
+                checkVersions(model, NAME2, 1, NAME2, 0);
+                checkNonEntity(true, model, env, NAME, 0);
+                checkVersions(model, NAME, 0);
+            } else {
+                checkVersions(model, NAME2, 1);
+            }
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DeleteSuperclass4_NoFields>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DeleteSuperclass4_NoFields.class);
+            DeleteSuperclass4_NoFields obj = index.get(key);
+            TestCase.assertNotNull(obj);
+            TestCase.assertSame
+                (EvolveCase.class, obj.getClass().getSuperclass());
+            TestCase.assertEquals(99, obj.key);
+            TestCase.assertEquals(88, obj.ff);
+            if (doUpdate) {
+                index.put(obj);
+            }
+        }
+
+        @Override
+        void copyRawObjects(RawStore rawStore, EntityStore newStore)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DeleteSuperclass4_NoFields>
+                index = newStore.getPrimaryIndex
+                    (Integer.class,
+                     DeleteSuperclass4_NoFields.class);
+            RawObject raw = rawStore.getPrimaryIndex(NAME2).get(99);
+            index.put((DeleteSuperclass4_NoFields)
+                      newStore.getModel().convertRawObject(raw));
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            RawObject obj;
+            if (expectEvolved) {
+                obj = readRaw(store, 99, NAME2, 1, CASECLS, 0);
+            } else {
+                obj = readRaw(store, 99, NAME2, 0, NAME, 0, CASECLS, 0);
+            }
+            checkRawFields(obj, "key", 99, "ff", 88);
+            checkRawFields(obj.getSuper());
+            if (expectEvolved) {
+                TestCase.assertNull(obj.getSuper().getSuper());
+            } else {
+                checkRawFields(obj.getSuper().getSuper());
+            }
+        }
+    }
+
+    @Persistent(version=1)
+    static class DeleteSuperclass5_Embedded {
+
+        int f;
+
+        @Override
+        public String toString() {
+            return "" + f;
+        }
+    }
+
+    /**
+     * Ensure that a superclass at the top of the hierarchy can be deleted.  A
+     * class Deleter is used.
+     */
+    @Entity
+    static class DeleteSuperclass5_Top
+        extends EvolveCase {
+
+        private static final String NAME =
+            DeleteSuperclass5_Top.class.getName();
+        private static final String NAME2 =
+            DeleteSuperclass5_Embedded.class.getName();
+        private static final String NAME3 =
+            PREFIX + "DeleteSuperclass5_Embedded_Base";
+
+        @PrimaryKey
+        int key = 99;
+
+        int ff;
+
+        DeleteSuperclass5_Embedded embed =
+            new DeleteSuperclass5_Embedded();
+
+        @Override
+        Mutations getMutations() {
+            Mutations m = new Mutations();
+            m.addDeleter(new Deleter(NAME3, 0));
+            return m;
+        }
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkNonEntity(true, model, env, NAME2, 1);
+            checkNonEntity(false, model, env, NAME3, 0);
+            checkVersions(model, NAME, 0);
+            if (oldTypesExist) {
+                checkVersions(model, NAME2, 1, NAME2, 0);
+                checkVersions(model, NAME3, 0);
+            } else {
+                checkVersions(model, NAME2, 1);
+            }
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DeleteSuperclass5_Top>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DeleteSuperclass5_Top.class);
+            DeleteSuperclass5_Top obj = index.get(key);
+            TestCase.assertNotNull(obj);
+            TestCase.assertNotNull(obj.embed);
+            TestCase.assertEquals(99, obj.key);
+            TestCase.assertEquals(88, obj.ff);
+            TestCase.assertEquals(123, obj.embed.f);
+            if (doUpdate) {
+                index.put(obj);
+            }
+        }
+
+        @Override
+        void copyRawObjects(RawStore rawStore, EntityStore newStore)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DeleteSuperclass5_Top>
+                index = newStore.getPrimaryIndex
+                    (Integer.class,
+                     DeleteSuperclass5_Top.class);
+            RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+            index.put((DeleteSuperclass5_Top)
+                      newStore.getModel().convertRawObject(raw));
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            RawType embedType = store.getModel().getRawType(NAME2);
+            RawObject embedSuper = null;
+            if (!expectEvolved) {
+                RawType embedSuperType = store.getModel().getRawType(NAME3);
+                embedSuper = new RawObject
+                    (embedSuperType, makeValues("g", 456), null);
+            }
+            RawObject embed =
+                new RawObject(embedType, makeValues("f", 123), embedSuper);
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", 88, "embed", embed);
+        }
+    }
+
+    @Persistent
+    static class InsertSuperclass1_BaseClass
+        extends EvolveCase {
+
+        int f = 123;
+    }
+
+    /**
+     * Allow inserting a superclass between two existing classes in the
+     * hierarchy.  No mutations are needed.
+     */
+    @Entity(version=1)
+    static class InsertSuperclass1_Between
+        extends InsertSuperclass1_BaseClass {
+
+        private static final String NAME =
+            InsertSuperclass1_BaseClass.class.getName();
+        private static final String NAME2 =
+            InsertSuperclass1_Between.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        int ff;
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkNonEntity(true, model, env, NAME, 0);
+            checkEntity(true, model, env, NAME2, 1, null);
+            checkVersions(model, NAME, 0);
+            if (oldTypesExist) {
+                checkVersions(model, NAME2, 1, NAME2, 0);
+            } else {
+                checkVersions(model, NAME2, 1);
+            }
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,InsertSuperclass1_Between>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     InsertSuperclass1_Between.class);
+            InsertSuperclass1_Between obj = index.get(key);
+            TestCase.assertNotNull(obj);
+            TestCase.assertSame
+                (InsertSuperclass1_BaseClass.class,
+                 obj.getClass().getSuperclass());
+            TestCase.assertSame
+                (EvolveCase.class,
+                 obj.getClass().getSuperclass().getSuperclass());
+            TestCase.assertEquals(99, obj.key);
+            TestCase.assertEquals(88, obj.ff);
+            TestCase.assertEquals(123, obj.f);
+            if (doUpdate) {
+                index.put(obj);
+            }
+        }
+
+        @Override
+        void copyRawObjects(RawStore rawStore, EntityStore newStore)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,InsertSuperclass1_Between>
+                index = newStore.getPrimaryIndex
+                    (Integer.class,
+                     InsertSuperclass1_Between.class);
+            RawObject raw = rawStore.getPrimaryIndex(NAME2).get(99);
+            index.put((InsertSuperclass1_Between)
+                      newStore.getModel().convertRawObject(raw));
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            RawObject obj;
+            if (expectEvolved) {
+                obj = readRaw(store, 99, NAME2, 1, NAME, 0, CASECLS, 0);
+            } else {
+                obj = readRaw(store, 99, NAME2, 0, CASECLS, 0);
+            }
+            checkRawFields(obj, "key", 99, "ff", 88);
+            if (expectEvolved) {
+                if (expectUpdated) {
+                    checkRawFields(obj.getSuper(), "f", 123);
+                } else {
+                    checkRawFields(obj.getSuper());
+                }
+                checkRawFields(obj.getSuper().getSuper());
+                TestCase.assertNull(obj.getSuper().getSuper().getSuper());
+            } else {
+                checkRawFields(obj.getSuper());
+                TestCase.assertNull(obj.getSuper().getSuper());
+            }
+        }
+    }
+
+    @Persistent
+    static class InsertSuperclass2_Embedded_Base {
+
+        int g = 456;
+    }
+
+    @Persistent(version=1)
+    static class InsertSuperclass2_Embedded
+        extends InsertSuperclass2_Embedded_Base  {
+
+        int f;
+    }
+
+    /**
+     * Allow inserting a superclass at the top of the hierarchy.  No mutations
+     * are needed.
+     */
+    @Entity
+    static class InsertSuperclass2_Top
+        extends EvolveCase {
+
+        private static final String NAME =
+            InsertSuperclass2_Top.class.getName();
+        private static final String NAME2 =
+            InsertSuperclass2_Embedded.class.getName();
+        private static final String NAME3 =
+            InsertSuperclass2_Embedded_Base.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        int ff;
+
+        InsertSuperclass2_Embedded embed =
+            new InsertSuperclass2_Embedded();
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkNonEntity(true, model, env, NAME2, 1);
+            checkNonEntity(true, model, env, NAME3, 0);
+            checkVersions(model, NAME, 0);
+            if (oldTypesExist) {
+                checkVersions(model, NAME2, 1, NAME2, 0);
+            } else {
+                checkVersions(model, NAME2, 1);
+            }
+            checkVersions(model, NAME3, 0);
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,InsertSuperclass2_Top>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     InsertSuperclass2_Top.class);
+            InsertSuperclass2_Top obj = index.get(key);
+            TestCase.assertNotNull(obj);
+            TestCase.assertNotNull(obj.embed);
+            TestCase.assertEquals(99, obj.key);
+            TestCase.assertEquals(88, obj.ff);
+            TestCase.assertEquals(123, obj.embed.f);
+            TestCase.assertEquals(456, obj.embed.g);
+            if (doUpdate) {
+                index.put(obj);
+            }
+        }
+
+        @Override
+        void copyRawObjects(RawStore rawStore, EntityStore newStore)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,InsertSuperclass2_Top>
+                index = newStore.getPrimaryIndex
+                    (Integer.class,
+                     InsertSuperclass2_Top.class);
+            RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+            index.put((InsertSuperclass2_Top)
+                      newStore.getModel().convertRawObject(raw));
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            RawType embedType = store.getModel().getRawType(NAME2);
+            RawObject embedSuper = null;
+            if (expectEvolved) {
+                RawType embedSuperType = store.getModel().getRawType(NAME3);
+                Map<String,Object> values =
+                    expectUpdated ? makeValues("g", 456) : makeValues();
+                embedSuper = new RawObject(embedSuperType, values, null);
+            }
+            RawObject embed =
+                new RawObject(embedType, makeValues("f", 123), embedSuper);
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", 88, "embed", embed);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowNonKeyField_PrimitiveToObject
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowNonKeyField_PrimitiveToObject.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        String ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_PrimitiveToObject version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_PrimitiveToObject version: 1 Error: Old field type: int is not compatible with the new type: java.lang.String for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", 88);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowNonKeyField_ObjectToPrimitive
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowNonKeyField_ObjectToPrimitive.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        int ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_ObjectToPrimitive version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_ObjectToPrimitive version: 1 Error: Old field type: java.lang.String is not compatible with the new type: int for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", "88");
+        }
+    }
+
+    @Persistent
+    static class MyType {
+
+        @Override
+        public boolean equals(Object o) {
+            return o instanceof MyType;
+        }
+    }
+
+    @Persistent
+    static class MySubtype extends MyType {
+
+        @Override
+        public boolean equals(Object o) {
+            return o instanceof MySubtype;
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowNonKeyField_ObjectToSubtype
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowNonKeyField_ObjectToSubtype.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        MySubtype ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_ObjectToSubtype version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_ObjectToSubtype version: 1 Error: Old field type: com.sleepycat.persist.test.EvolveClasses$MyType is not compatible with the new type: com.sleepycat.persist.test.EvolveClasses$MySubtype for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawType embedType = store.getModel().getRawType
+                (MyType.class.getName());
+            RawObject embed = new RawObject(embedType, makeValues(), null);
+
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", embed);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowNonKeyField_ObjectToUnrelatedSimple
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowNonKeyField_ObjectToUnrelatedSimple.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        String ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_ObjectToUnrelatedSimple version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_ObjectToUnrelatedSimple version: 1 Error: Old field type: java.lang.Integer is not compatible with the new type: java.lang.String for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", 88);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowNonKeyField_ObjectToUnrelatedOther
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowNonKeyField_ObjectToUnrelatedOther.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        MyType ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_ObjectToUnrelatedOther version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_ObjectToUnrelatedOther version: 1 Error: Old field type: java.lang.Integer is not compatible with the new type: com.sleepycat.persist.test.EvolveClasses$MyType for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", 88);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowNonKeyField_byte2boolean
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowNonKeyField_byte2boolean.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        boolean ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_byte2boolean version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_byte2boolean version: 1 Error: Old field type: byte is not compatible with the new type: boolean for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", (byte) 88);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowNonKeyField_short2byte
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowNonKeyField_short2byte.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        byte ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_short2byte version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_short2byte version: 1 Error: Old field type: short is not compatible with the new type: byte for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", (short) 88);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowNonKeyField_int2short
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowNonKeyField_int2short.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        short ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_int2short version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_int2short version: 1 Error: Old field type: int is not compatible with the new type: short for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", (int) 88);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowNonKeyField_long2int
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowNonKeyField_long2int.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        int ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_long2int version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_long2int version: 1 Error: Old field type: long is not compatible with the new type: int for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", (long) 88);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowNonKeyField_float2long
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowNonKeyField_float2long.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        long ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_float2long version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_float2long version: 1 Error: Old field type: float is not compatible with the new type: long for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", (float) 88);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowNonKeyField_double2float
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowNonKeyField_double2float.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        float ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_double2float version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_double2float version: 1 Error: Old field type: double is not compatible with the new type: float for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", (double) 88);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowNonKeyField_Byte2byte
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowNonKeyField_Byte2byte.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        byte ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Byte2byte version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Byte2byte version: 1 Error: Old field type: java.lang.Byte is not compatible with the new type: byte for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", (byte) 88);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowNonKeyField_Character2char
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowNonKeyField_Character2char.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        char ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Character2char version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Character2char version: 1 Error: Old field type: java.lang.Character is not compatible with the new type: char for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", (char) 88);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowNonKeyField_Short2short
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowNonKeyField_Short2short.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        short ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Short2short version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Short2short version: 1 Error: Old field type: java.lang.Short is not compatible with the new type: short for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", (short) 88);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowNonKeyField_Integer2int
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowNonKeyField_Integer2int.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        int ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Integer2int version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Integer2int version: 1 Error: Old field type: java.lang.Integer is not compatible with the new type: int for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", (int) 88);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowNonKeyField_Long2long
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowNonKeyField_Long2long.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        long ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Long2long version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Long2long version: 1 Error: Old field type: java.lang.Long is not compatible with the new type: long for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", (long) 88);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowNonKeyField_Float2float
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowNonKeyField_Float2float.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        float ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Float2float version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Float2float version: 1 Error: Old field type: java.lang.Float is not compatible with the new type: float for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", (float) 88);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowNonKeyField_Double2double
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowNonKeyField_Double2double.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        double ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Double2double version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Double2double version: 1 Error: Old field type: java.lang.Double is not compatible with the new type: double for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", (double) 88);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowNonKeyField_float2BigInt
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowNonKeyField_float2BigInt.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        BigInteger ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_float2BigInt version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_float2BigInt version: 1 Error: Old field type: float is not compatible with the new type: java.math.BigInteger for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", (float) 88);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowNonKeyField_BigInt2long
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowNonKeyField_BigInt2long.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        long ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_BigInt2long version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_BigInt2long version: 1 Error: Old field type: java.math.BigInteger is not compatible with the new type: long for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", BigInteger.valueOf(88));
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowSecKeyField_byte2short
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowSecKeyField_byte2short.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        short ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_byte2short version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_byte2short version: 1 Error: Old field type: byte is not compatible with the new type: short for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, "ff");
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", (byte) 88);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowSecKeyField_char2int
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowSecKeyField_char2int.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_char2int version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_char2int version: 1 Error: Old field type: char is not compatible with the new type: int for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, "ff");
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", (char) 88);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowSecKeyField_short2int
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowSecKeyField_short2int.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_short2int version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_short2int version: 1 Error: Old field type: short is not compatible with the new type: int for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, "ff");
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", (short) 88);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowSecKeyField_int2long
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowSecKeyField_int2long.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        long ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_int2long version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_int2long version: 1 Error: Old field type: int is not compatible with the new type: long for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, "ff");
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", (int) 88);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowSecKeyField_long2float
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowSecKeyField_long2float.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        float ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_long2float version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_long2float version: 1 Error: Old field type: long is not compatible with the new type: float for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, "ff");
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", (long) 88);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowSecKeyField_float2double
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowSecKeyField_float2double.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        double ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_float2double version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_float2double version: 1 Error: Old field type: float is not compatible with the new type: double for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, "ff");
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", (float) 88);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowSecKeyField_Byte2short2
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowSecKeyField_Byte2short2.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        short ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Byte2short2 version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Byte2short2 version: 1 Error: Old field type: java.lang.Byte is not compatible with the new type: short for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, "ff");
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", (byte) 88);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowSecKeyField_Character2int
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowSecKeyField_Character2int.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Character2int version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Character2int version: 1 Error: Old field type: java.lang.Character is not compatible with the new type: int for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, "ff");
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", (char) 88);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowSecKeyField_Short2int2
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowSecKeyField_Short2int2.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Short2int2 version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Short2int2 version: 1 Error: Old field type: java.lang.Short is not compatible with the new type: int for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, "ff");
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", (short) 88);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowSecKeyField_Integer2long
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowSecKeyField_Integer2long.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        long ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Integer2long version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Integer2long version: 1 Error: Old field type: java.lang.Integer is not compatible with the new type: long for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, "ff");
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", (int) 88);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowSecKeyField_Long2float2
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowSecKeyField_Long2float2.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        float ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Long2float2 version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Long2float2 version: 1 Error: Old field type: java.lang.Long is not compatible with the new type: float for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, "ff");
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", (long) 88);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowSecKeyField_Float2double2
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowSecKeyField_Float2double2.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        double ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Float2double2 version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Float2double2 version: 1 Error: Old field type: java.lang.Float is not compatible with the new type: double for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, "ff");
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", (float) 88);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowSecKeyField_int2BigInt
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowSecKeyField_int2BigInt.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        BigInteger ff;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_int2BigInt version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_int2BigInt version: 1 Error: Old field type: int is not compatible with the new type: java.math.BigInteger for field: ff";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, "ff");
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "ff", 88);
+        }
+    }
+
+    // ---
+
+    @Entity(version=1)
+    static class DisallowPriKeyField_byte2short
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowPriKeyField_byte2short.class.getName();
+
+        @PrimaryKey
+        short key;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_byte2short version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_byte2short version: 1 Error: Old field type: byte is not compatible with the new type: short for field: key";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, (byte) 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", (byte) 99);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowPriKeyField_char2int
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowPriKeyField_char2int.class.getName();
+
+        @PrimaryKey
+        int key;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_char2int version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_char2int version: 1 Error: Old field type: char is not compatible with the new type: int for field: key";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, (char) 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", (char) 99);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowPriKeyField_short2int
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowPriKeyField_short2int.class.getName();
+
+        @PrimaryKey
+        int key;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_short2int version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_short2int version: 1 Error: Old field type: short is not compatible with the new type: int for field: key";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, (short) 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", (short) 99);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowPriKeyField_int2long
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowPriKeyField_int2long.class.getName();
+
+        @PrimaryKey
+        long key;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_int2long version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_int2long version: 1 Error: Old field type: int is not compatible with the new type: long for field: key";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, (int) 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", (int) 99);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowPriKeyField_long2float
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowPriKeyField_long2float.class.getName();
+
+        @PrimaryKey
+        float key;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_long2float version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_long2float version: 1 Error: Old field type: long is not compatible with the new type: float for field: key";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, (long) 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", (long) 99);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowPriKeyField_float2double
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowPriKeyField_float2double.class.getName();
+
+        @PrimaryKey
+        double key;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_float2double version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_float2double version: 1 Error: Old field type: float is not compatible with the new type: double for field: key";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, (float) 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", (float) 99);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowPriKeyField_Byte2short2
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowPriKeyField_Byte2short2.class.getName();
+
+        @PrimaryKey
+        short key;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Byte2short2 version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Byte2short2 version: 1 Error: Old field type: java.lang.Byte is not compatible with the new type: short for field: key";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, (byte) 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", (byte) 99);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowPriKeyField_Character2int
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowPriKeyField_Character2int.class.getName();
+
+        @PrimaryKey
+        int key;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Character2int version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Character2int version: 1 Error: Old field type: java.lang.Character is not compatible with the new type: int for field: key";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, (char) 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", (char) 99);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowPriKeyField_Short2int2
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowPriKeyField_Short2int2.class.getName();
+
+        @PrimaryKey
+        int key;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Short2int2 version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Short2int2 version: 1 Error: Old field type: java.lang.Short is not compatible with the new type: int for field: key";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, (short) 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", (short) 99);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowPriKeyField_Integer2long
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowPriKeyField_Integer2long.class.getName();
+
+        @PrimaryKey
+        long key;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Integer2long version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Integer2long version: 1 Error: Old field type: java.lang.Integer is not compatible with the new type: long for field: key";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, (int) 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", (int) 99);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowPriKeyField_Long2float2
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowPriKeyField_Long2float2.class.getName();
+
+        @PrimaryKey
+        float key;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Long2float2 version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Long2float2 version: 1 Error: Old field type: java.lang.Long is not compatible with the new type: float for field: key";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, (long) 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", (long) 99);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowPriKeyField_Float2double2
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowPriKeyField_Float2double2.class.getName();
+
+        @PrimaryKey
+        double key;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Float2double2 version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Float2double2 version: 1 Error: Old field type: java.lang.Float is not compatible with the new type: double for field: key";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, (float) 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", (float) 99);
+        }
+    }
+
+    @Entity(version=1)
+    static class DisallowPriKeyField_Long2BigInt
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowPriKeyField_Long2BigInt.class.getName();
+
+        @PrimaryKey
+        BigInteger key;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Long2BigInt version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Long2BigInt version: 1 Error: Old field type: java.lang.Long is not compatible with the new type: java.math.BigInteger for field: key";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawObject obj = readRaw(store, 99L, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99L);
+        }
+    }
+
+    @Persistent(version=1)
+    static class DisallowCompositeKeyField_byte2short_Key {
+
+        @KeyField(1)
+        int f1 = 1;
+
+        @KeyField(2)
+        short f2 = 2;
+
+        @KeyField(3)
+        String f3 = "3";
+    }
+
+    @Entity
+    static class DisallowCompositeKeyField_byte2short
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowCompositeKeyField_byte2short.class.getName();
+        private static final String NAME2 =
+            DisallowCompositeKeyField_byte2short_Key.class.getName();
+
+        @PrimaryKey
+        DisallowCompositeKeyField_byte2short_Key key;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowCompositeKeyField_byte2short_Key version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowCompositeKeyField_byte2short_Key version: 1 Error: Old field type: byte is not compatible with the new type: short for field: f2";
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkNonEntity(true, model, env, NAME2, 0);
+            checkVersions(model, NAME, 0);
+            checkVersions(model, NAME2, 0);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            if (expectEvolved) {
+                TestCase.fail();
+            }
+            RawType rawKeyType = store.getModel().getRawType(NAME2);
+            RawObject rawKey = new RawObject
+                (rawKeyType,
+                 makeValues("f1", (int) 1, "f2", (byte) 2, "f3", "3"),
+                 null);
+
+            RawObject obj = readRaw(store, rawKey, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", rawKey);
+        }
+    }
+
+    @Entity(version=1)
+    static class AllowPriKeyField_byte2Byte
+        extends EvolveCase {
+
+        private static final String NAME =
+            AllowPriKeyField_byte2Byte.class.getName();
+
+        @PrimaryKey
+        Byte key = 99;
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkEntity(true, model, env, NAME, 1, null);
+            if (oldTypesExist) {
+                checkVersions(model, NAME, 1, NAME, 0);
+            } else {
+                checkVersions(model, NAME, 1);
+            }
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            PrimaryIndex<Byte,AllowPriKeyField_byte2Byte>
+                index = store.getPrimaryIndex
+                    (Byte.class,
+                     AllowPriKeyField_byte2Byte.class);
+            AllowPriKeyField_byte2Byte obj = index.get(key);
+            TestCase.assertNotNull(obj);
+            TestCase.assertEquals(Byte.valueOf((byte) 99), obj.key);
+
+            if (doUpdate) {
+                index.put(obj);
+            }
+        }
+
+        @Override
+        void copyRawObjects(RawStore rawStore, EntityStore newStore)
+            throws DatabaseException {
+
+            PrimaryIndex<Byte,AllowPriKeyField_byte2Byte>
+                index = newStore.getPrimaryIndex
+                    (Byte.class,
+                     AllowPriKeyField_byte2Byte.class);
+            RawObject raw = rawStore.getPrimaryIndex(NAME).get((byte) 99);
+            index.put((AllowPriKeyField_byte2Byte)
+                      newStore.getModel().convertRawObject(raw));
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            RawObject obj;
+            if (expectEvolved) {
+                obj = readRaw(store, (byte) 99, NAME, 1, CASECLS, 0);
+            } else {
+                obj = readRaw(store, (byte) 99, NAME, 0, CASECLS, 0);
+            }
+            checkRawFields(obj, "key", (byte) 99);
+        }
+    }
+
+    @Entity(version=1)
+    static class AllowPriKeyField_Byte2byte2
+        extends EvolveCase {
+
+        private static final String NAME =
+            AllowPriKeyField_Byte2byte2.class.getName();
+
+        @PrimaryKey
+        byte key = 99;
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkEntity(true, model, env, NAME, 1, null);
+            if (oldTypesExist) {
+                checkVersions(model, NAME, 1, NAME, 0);
+            } else {
+                checkVersions(model, NAME, 1);
+            }
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            PrimaryIndex<Byte,AllowPriKeyField_Byte2byte2>
+                index = store.getPrimaryIndex
+                    (Byte.class,
+                     AllowPriKeyField_Byte2byte2.class);
+            AllowPriKeyField_Byte2byte2 obj = index.get(key);
+            TestCase.assertNotNull(obj);
+            TestCase.assertEquals((byte) 99, obj.key);
+
+            if (doUpdate) {
+                index.put(obj);
+            }
+        }
+
+        @Override
+        void copyRawObjects(RawStore rawStore, EntityStore newStore)
+            throws DatabaseException {
+
+            PrimaryIndex<Byte,AllowPriKeyField_Byte2byte2>
+                index = newStore.getPrimaryIndex
+                    (Byte.class,
+                     AllowPriKeyField_Byte2byte2.class);
+            RawObject raw = rawStore.getPrimaryIndex(NAME).get((byte) 99);
+            index.put((AllowPriKeyField_Byte2byte2)
+                      newStore.getModel().convertRawObject(raw));
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            RawObject obj;
+            if (expectEvolved) {
+                obj = readRaw(store, (byte) 99, NAME, 1, CASECLS, 0);
+            } else {
+                obj = readRaw(store, (byte) 99, NAME, 0, CASECLS, 0);
+            }
+            checkRawFields(obj, "key", (byte) 99);
+        }
+    }
+
+    @Persistent(version=1)
+    static class AllowFieldTypeChanges_Key {
+
+        AllowFieldTypeChanges_Key() {
+            this(false);
+        }
+
+        AllowFieldTypeChanges_Key(boolean init) {
+            if (init) {
+                f1 = true;
+                f2 = (byte) 2;
+                f3 = (short) 3;
+                f4 = 4;
+                f5 = 5L;
+                f6 = 6F;
+                f7 = 7D;
+                f8 = (char) 8;
+                f9 = true;
+                f10 = (byte) 10;
+                f11 = (short) 11;
+                f12 = 12;
+                f13 = 13L;
+                f14 = 14F;
+                f15 = 15D;
+                f16 = (char) 16;
+            }
+        }
+
+        @KeyField(1)
+        boolean f1;
+
+        @KeyField(2)
+        byte f2;
+
+        @KeyField(3)
+        short f3;
+
+        @KeyField(4)
+        int f4;
+
+        @KeyField(5)
+        long f5;
+
+        @KeyField(6)
+        float f6;
+
+        @KeyField(7)
+        double f7;
+
+        @KeyField(8)
+        char f8;
+
+        @KeyField(9)
+        Boolean f9;
+
+        @KeyField(10)
+        Byte f10;
+
+        @KeyField(11)
+        Short f11;
+
+        @KeyField(12)
+        Integer f12;
+
+        @KeyField(13)
+        Long f13;
+
+        @KeyField(14)
+        Float f14;
+
+        @KeyField(15)
+        Double f15;
+
+        @KeyField(16)
+        Character f16;
+    }
+
+    @Persistent(version=1)
+    static class AllowFieldTypeChanges_Base
+        extends EvolveCase {
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        AllowFieldTypeChanges_Key kComposite;
+
+        Integer f_long2Integer;
+        Long f_String2Long;
+    }
+
+    /**
+     * Allow field type changes: automatic widening, supported widening,
+     * and Converter mutations.  Also tests primary and secondary key field
+     * renaming.
+     */
+    @Entity(version=1)
+    static class AllowFieldTypeChanges
+        extends AllowFieldTypeChanges_Base {
+
+        private static final String NAME =
+            AllowFieldTypeChanges.class.getName();
+        private static final String NAME2 =
+            AllowFieldTypeChanges_Base.class.getName();
+        private static final String NAME3 =
+            AllowFieldTypeChanges_Key.class.getName();
+
+        @PrimaryKey
+        Integer pkeyInteger;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        Boolean kBoolean;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        Byte kByte;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        Short kShort;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        Integer kInteger;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        Long kLong;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        Float kFloat;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        Double kDouble;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        Character kCharacter;
+
+        short f01;
+        int f02;
+        long f03;
+        float f04;
+        double f06;
+        int f07;
+        long f08;
+        float f09;
+        double f10;
+        int f11;
+        long f12;
+        float f13;
+        double f14;
+        long f15;
+        float f16;
+        double f17;
+        float f18;
+        double f19;
+        double f20;
+
+        Short f21;
+        Integer f22;
+        Long f23;
+        Float f24;
+        Double f26;
+        Integer f27;
+        Long f28;
+        Float f29;
+        Double f30;
+        Integer f31;
+        Long f32;
+        Float f33;
+        Double f34;
+        Long f35;
+        Float f36;
+        Double f37;
+        Float f38;
+        Double f39;
+        Double f40;
+
+        Short f41;
+        Integer f42;
+        Long f43;
+        Float f44;
+        Double f46;
+        Integer f47;
+        Long f48;
+        Float f49;
+        Double f50;
+        Integer f51;
+        Long f52;
+        Float f53;
+        Double f54;
+        Long f55;
+        Float f56;
+        Double f57;
+        Float f58;
+        Double f59;
+        Double f60;
+
+        BigInteger f70;
+        BigInteger f71;
+        BigInteger f72;
+        BigInteger f73;
+        BigInteger f74;
+        BigInteger f75;
+        BigInteger f76;
+        BigInteger f77;
+        BigInteger f78;
+        BigInteger f79;
+
+        int f_long2int;
+        long f_String2long;
+
+        @Override
+        Mutations getMutations() {
+            Mutations m = new Mutations();
+            m.addRenamer(new Renamer(NAME, 0, "pkeyint", "pkeyInteger"));
+            m.addRenamer(new Renamer(NAME, 0, "kboolean", "kBoolean"));
+            m.addRenamer(new Renamer(NAME, 0, "kbyte", "kByte"));
+            m.addRenamer(new Renamer(NAME, 0, "kshort", "kShort"));
+            m.addRenamer(new Renamer(NAME, 0, "kint", "kInteger"));
+            m.addRenamer(new Renamer(NAME, 0, "klong", "kLong"));
+            m.addRenamer(new Renamer(NAME, 0, "kfloat", "kFloat"));
+            m.addRenamer(new Renamer(NAME, 0, "kdouble", "kDouble"));
+            m.addRenamer(new Renamer(NAME, 0, "kchar", "kCharacter"));
+            m.addRenamer(new Renamer(NAME2, 0, "kcomposite", "kComposite"));
+
+            Conversion conv1 = new MyConversion1();
+            Conversion conv2 = new MyConversion2();
+
+            m.addConverter(new Converter(NAME, 0, "f_long2int", conv1));
+            m.addConverter(new Converter(NAME, 0, "f_String2long", conv2));
+            m.addConverter(new Converter(NAME2, 0, "f_long2Integer", conv1));
+            m.addConverter(new Converter(NAME2, 0, "f_String2Long", conv2));
+            return m;
+        }
+
+        @SuppressWarnings("serial")
+        static class MyConversion1 implements Conversion {
+
+            public void initialize(EntityModel model) {}
+
+            public Object convert(Object o) {
+                return ((Long) o).intValue();
+            }
+
+            @Override
+            public boolean equals(Object other) { return true; }
+        }
+
+        @SuppressWarnings("serial")
+        static class MyConversion2 implements Conversion {
+
+            public void initialize(EntityModel model) {}
+
+            public Object convert(Object o) {
+                return Long.valueOf((String) o);
+            }
+
+            @Override
+            public boolean equals(Object other) { return true; }
+        }
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkEntity(true, model, env, NAME, 1, null);
+            checkNonEntity(true, model, env, NAME2, 1);
+            if (oldTypesExist) {
+                checkVersions(model, NAME, 1, NAME, 0);
+                checkVersions(model, NAME2, 1, NAME2, 0);
+                checkVersions(model, NAME3, 1, NAME3, 0);
+            } else {
+                checkVersions(model, NAME, 1);
+                checkVersions(model, NAME2, 1);
+                checkVersions(model, NAME3, 1);
+            }
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,AllowFieldTypeChanges>
+                index = store.getPrimaryIndex
+                    (Integer.class, AllowFieldTypeChanges.class);
+            AllowFieldTypeChanges obj = index.get((int) 99);
+            checkValues(obj);
+            checkSecondaries(store, index);
+
+            if (doUpdate) {
+                index.put(obj);
+                checkSecondaries(store, index);
+            }
+        }
+
+        @Override
+        void copyRawObjects(RawStore rawStore, EntityStore newStore)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,AllowFieldTypeChanges>
+                index = newStore.getPrimaryIndex
+                    (Integer.class, AllowFieldTypeChanges.class);
+            RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+            index.put((AllowFieldTypeChanges)
+                      newStore.getModel().convertRawObject(raw));
+        }
+
+        private void checkSecondaries(EntityStore store,
+                                      PrimaryIndex<Integer,
+                                                   AllowFieldTypeChanges>
+                                                   index)
+            throws DatabaseException {
+
+            checkValues(store.getSecondaryIndex
+                (index, Boolean.class, "kBoolean").get(true));
+            checkValues(store.getSecondaryIndex
+                (index, Byte.class, "kByte").get((byte) 77));
+            checkValues(store.getSecondaryIndex
+                (index, Short.class, "kShort").get((short) 66));
+            checkValues(store.getSecondaryIndex
+                (index, Integer.class, "kInteger").get((int) 55));
+            checkValues(store.getSecondaryIndex
+                (index, Long.class, "kLong").get((long) 44));
+            checkValues(store.getSecondaryIndex
+                (index, Float.class, "kFloat").get((float) 33));
+            checkValues(store.getSecondaryIndex
+                (index, Double.class, "kDouble").get((double) 22));
+            checkValues(store.getSecondaryIndex
+                (index, Character.class, "kCharacter").get((char) 11));
+            checkValues(store.getSecondaryIndex
+                (index, AllowFieldTypeChanges_Key.class, "kComposite").get
+                    (new AllowFieldTypeChanges_Key(true)));
+        }
+
+        private void checkValues(AllowFieldTypeChanges obj) {
+            TestCase.assertNotNull(obj);
+            TestCase.assertEquals(obj.pkeyInteger, Integer.valueOf(99));
+            TestCase.assertEquals(obj.kBoolean, Boolean.valueOf(true));
+            TestCase.assertEquals(obj.kByte, Byte.valueOf((byte) 77));
+            TestCase.assertEquals(obj.kShort, Short.valueOf((short) 66));
+            TestCase.assertEquals(obj.kInteger, Integer.valueOf(55));
+            TestCase.assertEquals(obj.kLong, Long.valueOf(44));
+            TestCase.assertEquals(obj.kFloat, Float.valueOf(33));
+            TestCase.assertEquals(obj.kDouble, Double.valueOf(22));
+            TestCase.assertEquals(obj.kCharacter, Character.valueOf((char) 11));
+
+            AllowFieldTypeChanges_Key embed = obj.kComposite;
+            TestCase.assertNotNull(embed);
+            TestCase.assertEquals(embed.f1, true);
+            TestCase.assertEquals(embed.f2, (byte) 2);
+            TestCase.assertEquals(embed.f3, (short) 3);
+            TestCase.assertEquals(embed.f4, 4);
+            TestCase.assertEquals(embed.f5, 5L);
+            TestCase.assertEquals(embed.f6, 6F);
+            TestCase.assertEquals(embed.f7, 7D);
+            TestCase.assertEquals(embed.f8, (char) 8);
+            TestCase.assertEquals(embed.f9, Boolean.valueOf(true));
+            TestCase.assertEquals(embed.f10, Byte.valueOf((byte) 10));
+            TestCase.assertEquals(embed.f11, Short.valueOf((short) 11));
+            TestCase.assertEquals(embed.f12, Integer.valueOf(12));
+            TestCase.assertEquals(embed.f13, Long.valueOf(13L));
+            TestCase.assertEquals(embed.f14, Float.valueOf(14F));
+            TestCase.assertEquals(embed.f15, Double.valueOf(15D));
+            TestCase.assertEquals(embed.f16, Character.valueOf((char) 16));
+
+            TestCase.assertEquals(obj.f01, (short) 1);
+            TestCase.assertEquals(obj.f02, (int) 2);
+            TestCase.assertEquals(obj.f03, (long) 3);
+            TestCase.assertEquals(obj.f04, (float) 4);
+            TestCase.assertEquals(obj.f06, (double) 6);
+            TestCase.assertEquals(obj.f07, (int) 7);
+            TestCase.assertEquals(obj.f08, (long) 8);
+            TestCase.assertEquals(obj.f09, (float) 9);
+            TestCase.assertEquals(obj.f10, (double) 10);
+            TestCase.assertEquals(obj.f11, (int) 11);
+            TestCase.assertEquals(obj.f12, (long) 12);
+            TestCase.assertEquals(obj.f13, (float) 13);
+            TestCase.assertEquals(obj.f14, (double) 14);
+            TestCase.assertEquals(obj.f15, 15L);
+            TestCase.assertEquals(obj.f16, 16F);
+            TestCase.assertEquals(obj.f17, 17D);
+            TestCase.assertEquals(obj.f18, (float) 18);
+            TestCase.assertEquals(obj.f19, (double) 19);
+            TestCase.assertEquals(obj.f20, (double) 20);
+
+            TestCase.assertEquals(obj.f21, Short.valueOf((byte) 21));
+            TestCase.assertEquals(obj.f22, Integer.valueOf((byte) 22));
+            TestCase.assertEquals(obj.f23, Long.valueOf((byte) 23));
+            TestCase.assertEquals(obj.f24, Float.valueOf((byte) 24));
+            TestCase.assertEquals(obj.f26, Double.valueOf((byte) 26));
+            TestCase.assertEquals(obj.f27, Integer.valueOf((short) 27));
+            TestCase.assertEquals(obj.f28, Long.valueOf((short) 28));
+            TestCase.assertEquals(obj.f29, Float.valueOf((short) 29));
+            TestCase.assertEquals(obj.f30, Double.valueOf((short) 30));
+            TestCase.assertEquals(obj.f31, Integer.valueOf((char) 31));
+            TestCase.assertEquals(obj.f32, Long.valueOf((char) 32));
+            TestCase.assertEquals(obj.f33, Float.valueOf((char) 33));
+            TestCase.assertEquals(obj.f34, Double.valueOf((char) 34));
+            TestCase.assertEquals(obj.f35, Long.valueOf(35));
+            TestCase.assertEquals(obj.f36, Float.valueOf(36));
+            TestCase.assertEquals(obj.f37, Double.valueOf(37));
+            TestCase.assertEquals(obj.f38, Float.valueOf((long) 38));
+            TestCase.assertEquals(obj.f39, Double.valueOf((long) 39));
+            TestCase.assertEquals(obj.f40, Double.valueOf((float) 40));
+
+            TestCase.assertEquals(obj.f41, Short.valueOf((byte) 41));
+            TestCase.assertEquals(obj.f42, Integer.valueOf((byte) 42));
+            TestCase.assertEquals(obj.f43, Long.valueOf((byte) 43));
+            TestCase.assertEquals(obj.f44, Float.valueOf((byte) 44));
+            TestCase.assertEquals(obj.f46, Double.valueOf((byte) 46));
+            TestCase.assertEquals(obj.f47, Integer.valueOf((short) 47));
+            TestCase.assertEquals(obj.f48, Long.valueOf((short) 48));
+            TestCase.assertEquals(obj.f49, Float.valueOf((short) 49));
+            TestCase.assertEquals(obj.f50, Double.valueOf((short) 50));
+            TestCase.assertEquals(obj.f51, Integer.valueOf((char) 51));
+            TestCase.assertEquals(obj.f52, Long.valueOf((char) 52));
+            TestCase.assertEquals(obj.f53, Float.valueOf((char) 53));
+            TestCase.assertEquals(obj.f54, Double.valueOf((char) 54));
+            TestCase.assertEquals(obj.f55, Long.valueOf(55));
+            TestCase.assertEquals(obj.f56, Float.valueOf(56));
+            TestCase.assertEquals(obj.f57, Double.valueOf(57));
+            TestCase.assertEquals(obj.f58, Float.valueOf((long) 58));
+            TestCase.assertEquals(obj.f59, Double.valueOf((long) 59));
+            TestCase.assertEquals(obj.f60, Double.valueOf((float) 60));
+
+            TestCase.assertEquals(obj.f70, BigInteger.valueOf(70));
+            TestCase.assertEquals(obj.f71, BigInteger.valueOf(71));
+            TestCase.assertEquals(obj.f72, BigInteger.valueOf(72));
+            TestCase.assertEquals(obj.f73, BigInteger.valueOf(73));
+            TestCase.assertEquals(obj.f74, BigInteger.valueOf(74));
+            TestCase.assertEquals(obj.f75, BigInteger.valueOf(75));
+            TestCase.assertEquals(obj.f76, BigInteger.valueOf(76));
+            TestCase.assertEquals(obj.f77, BigInteger.valueOf(77));
+            TestCase.assertEquals(obj.f78, BigInteger.valueOf(78));
+            TestCase.assertEquals(obj.f79, BigInteger.valueOf(79));
+
+            TestCase.assertEquals(obj.f_long2Integer, Integer.valueOf(111));
+            TestCase.assertEquals(obj.f_String2Long, Long.valueOf(222));
+            TestCase.assertEquals(obj.f_long2int, 333);
+            TestCase.assertEquals(obj.f_String2long, 444L);
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            RawType embedType = store.getModel().getRawType(NAME3);
+            RawObject embed = new RawObject
+                (embedType,
+                 makeValues
+                    ("f1", true,
+                     "f2", (byte) 2,
+                     "f3", (short) 3,
+                     "f4", 4,
+                     "f5", 5L,
+                     "f6", 6F,
+                     "f7", 7D,
+                     "f8", (char) 8,
+                     "f9", true,
+                     "f10", (byte) 10,
+                     "f11", (short) 11,
+                     "f12", 12,
+                     "f13", 13L,
+                     "f14", 14F,
+                     "f15", 15D,
+                     "f16", (char) 16),
+                 null);
+
+            RawObject obj;
+            if (expectEvolved) {
+                obj = readRaw(store, 99, NAME, 1, NAME2, 1, CASECLS, 0);
+                checkRawFields(obj, "pkeyInteger", (int) 99,
+                               "kBoolean", true,
+                               "kByte", (byte) 77,
+                               "kShort", (short) 66,
+                               "kInteger", (int) 55,
+                               "kLong", (long) 44,
+                               "kFloat", (float) 33,
+                               "kDouble", (double) 22,
+                               "kCharacter", (char) 11,
+
+                               "f01", (short) 1,
+                               "f02", (int) 2,
+                               "f03", (long) 3,
+                               "f04", (float) 4,
+                               "f06", (double) 6,
+                               "f07", (int) 7,
+                               "f08", (long) 8,
+                               "f09", (float) 9,
+                               "f10", (double) 10,
+                               "f11", (int) 11,
+                               "f12", (long) 12,
+                               "f13", (float) 13,
+                               "f14", (double) 14,
+                               "f15", 15L,
+                               "f16", 16F,
+                               "f17", 17D,
+                               "f18", (float) 18,
+                               "f19", (double) 19,
+                               "f20", (double) 20,
+
+                               "f21", (short) 21,
+                               "f22", (int) 22,
+                               "f23", (long) 23,
+                               "f24", (float) 24,
+                               "f26", (double) 26,
+                               "f27", (int) 27,
+                               "f28", (long) 28,
+                               "f29", (float) 29,
+                               "f30", (double) 30,
+                               "f31", (int) 31,
+                               "f32", (long) 32,
+                               "f33", (float) 33,
+                               "f34", (double) 34,
+                               "f35", 35L,
+                               "f36", 36F,
+                               "f37", 37D,
+                               "f38", (float) 38,
+                               "f39", (double) 39,
+                               "f40", (double) 40,
+
+                               "f41", (short) 41,
+                               "f42", (int) 42,
+                               "f43", (long) 43,
+                               "f44", (float) 44,
+                               "f46", (double) 46,
+                               "f47", (int) 47,
+                               "f48", (long) 48,
+                               "f49", (float) 49,
+                               "f50", (double) 50,
+                               "f51", (int) 51,
+                               "f52", (long) 52,
+                               "f53", (float) 53,
+                               "f54", (double) 54,
+                               "f55", 55L,
+                               "f56", 56F,
+                               "f57", 57D,
+                               "f58", (float) 58,
+                               "f59", (double) 59,
+                               "f60", (double) 60,
+
+                               "f70", BigInteger.valueOf(70),
+                               "f71", BigInteger.valueOf(71),
+                               "f72", BigInteger.valueOf(72),
+                               "f73", BigInteger.valueOf(73),
+                               "f74", BigInteger.valueOf(74),
+                               "f75", BigInteger.valueOf(75),
+                               "f76", BigInteger.valueOf(76),
+                               "f77", BigInteger.valueOf(77),
+                               "f78", BigInteger.valueOf(78),
+                               "f79", BigInteger.valueOf(79),
+
+                               "f_long2int", 333,
+                               "f_String2long", 444L);
+                checkRawFields(obj.getSuper(),
+                               "kComposite", embed,
+                               "f_long2Integer", 111,
+                               "f_String2Long", 222L);
+            } else {
+                obj = readRaw(store, 99, NAME, 0, NAME2, 0, CASECLS, 0);
+                checkRawFields(obj, "pkeyint", (int) 99,
+                               "kboolean", true,
+                               "kbyte", (byte) 77,
+                               "kshort", (short) 66,
+                               "kint", (int) 55,
+                               "klong", (long) 44,
+                               "kfloat", (float) 33,
+                               "kdouble", (double) 22,
+                               "kchar", (char) 11,
+
+                               "f01", (byte) 1,
+                               "f02", (byte) 2,
+                               "f03", (byte) 3,
+                               "f04", (byte) 4,
+                               "f06", (byte) 6,
+                               "f07", (short) 7,
+                               "f08", (short) 8,
+                               "f09", (short) 9,
+                               "f10", (short) 10,
+                               "f11", (char) 11,
+                               "f12", (char) 12,
+                               "f13", (char) 13,
+                               "f14", (char) 14,
+                               "f15", 15,
+                               "f16", 16,
+                               "f17", 17,
+                               "f18", (long) 18,
+                               "f19", (long) 19,
+                               "f20", (float) 20,
+
+                               "f21", (byte) 21,
+                               "f22", (byte) 22,
+                               "f23", (byte) 23,
+                               "f24", (byte) 24,
+                               "f26", (byte) 26,
+                               "f27", (short) 27,
+                               "f28", (short) 28,
+                               "f29", (short) 29,
+                               "f30", (short) 30,
+                               "f31", (char) 31,
+                               "f32", (char) 32,
+                               "f33", (char) 33,
+                               "f34", (char) 34,
+                               "f35", 35,
+                               "f36", 36,
+                               "f37", 37,
+                               "f38", (long) 38,
+                               "f39", (long) 39,
+                               "f40", (float) 40,
+
+                               "f41", (byte) 41,
+                               "f42", (byte) 42,
+                               "f43", (byte) 43,
+                               "f44", (byte) 44,
+                               "f46", (byte) 46,
+                               "f47", (short) 47,
+                               "f48", (short) 48,
+                               "f49", (short) 49,
+                               "f50", (short) 50,
+                               "f51", (char) 51,
+                               "f52", (char) 52,
+                               "f53", (char) 53,
+                               "f54", (char) 54,
+                               "f55", 55,
+                               "f56", 56,
+                               "f57", 57,
+                               "f58", (long) 58,
+                               "f59", (long) 59,
+                               "f60", (float) 60,
+
+                               "f70", (byte) 70,
+                               "f71", (short) 71,
+                               "f72", (char) 72,
+                               "f73", 73,
+                               "f74", (long) 74,
+                               "f75", (byte) 75,
+                               "f76", (short) 76,
+                               "f77", (char) 77,
+                               "f78", 78,
+                               "f79", (long) 79,
+
+                               "f_long2int", 333L,
+                               "f_String2long", "444");
+
+                checkRawFields(obj.getSuper(),
+                               "kcomposite", embed,
+                               "f_long2Integer", 111L,
+                               "f_String2Long", "222");
+            }
+            Environment env = store.getEnvironment();
+
+            assertDbExists(expectEvolved, env, NAME, "kBoolean");
+            assertDbExists(expectEvolved, env, NAME, "kByte");
+            assertDbExists(expectEvolved, env, NAME, "kShort");
+            assertDbExists(expectEvolved, env, NAME, "kInteger");
+            assertDbExists(expectEvolved, env, NAME, "kLong");
+            assertDbExists(expectEvolved, env, NAME, "kFloat");
+            assertDbExists(expectEvolved, env, NAME, "kDouble");
+            assertDbExists(expectEvolved, env, NAME, "kCharacter");
+            assertDbExists(expectEvolved, env, NAME, "kComposite");
+
+            assertDbExists(!expectEvolved, env, NAME, "kboolean");
+            assertDbExists(!expectEvolved, env, NAME, "kbyte");
+            assertDbExists(!expectEvolved, env, NAME, "kshort");
+            assertDbExists(!expectEvolved, env, NAME, "kint");
+            assertDbExists(!expectEvolved, env, NAME, "klong");
+            assertDbExists(!expectEvolved, env, NAME, "kfloat");
+            assertDbExists(!expectEvolved, env, NAME, "kdouble");
+            assertDbExists(!expectEvolved, env, NAME, "kchar");
+            assertDbExists(!expectEvolved, env, NAME, "kcomposite");
+        }
+    }
+
+    @SuppressWarnings("serial")
+    static class ConvertFieldContent_Conversion implements Conversion {
+
+        public void initialize(EntityModel model) {
+        }
+
+        public Object convert(Object fromValue) {
+            String s1 = (String) fromValue;
+            return (new StringBuilder(s1)).reverse().toString();
+        }
+
+        @Override
+        public boolean equals(Object o) {
+            return o instanceof ConvertFieldContent_Conversion;
+        }
+    }
+
+    @Entity(version=1)
+    static class ConvertFieldContent_Entity
+        extends EvolveCase {
+
+        private static final String NAME =
+            ConvertFieldContent_Entity.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        String f1;
+        String f2;
+
+        @Override
+        Mutations getMutations() {
+            Mutations m = new Mutations();
+            Converter converter = new Converter
+                (ConvertFieldContent_Entity.class.getName(), 0,
+                 "f1", new ConvertFieldContent_Conversion());
+            m.addConverter(converter);
+            converter = new Converter
+                (ConvertFieldContent_Entity.class.getName(), 0,
+                 "f2", new ConvertFieldContent_Conversion());
+            m.addConverter(converter);
+            return m;
+        }
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkEntity(true, model, env, NAME, 1, null);
+            if (oldTypesExist) {
+                checkVersions(model, NAME, 1, NAME, 0);
+            } else {
+                checkVersions(model, NAME, 1);
+            }
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,ConvertFieldContent_Entity>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     ConvertFieldContent_Entity.class);
+            ConvertFieldContent_Entity obj = index.get(99);
+            TestCase.assertNotNull(obj);
+            TestCase.assertEquals(99, obj.key);
+            TestCase.assertEquals("43210", obj.f1);
+            TestCase.assertEquals("98765", obj.f2);
+
+            if (doUpdate) {
+                index.put(obj);
+            }
+        }
+
+        @Override
+        void copyRawObjects(RawStore rawStore, EntityStore newStore)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,ConvertFieldContent_Entity>
+                index = newStore.getPrimaryIndex
+                    (Integer.class,
+                     ConvertFieldContent_Entity.class);
+            RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+            index.put((ConvertFieldContent_Entity)
+                      newStore.getModel().convertRawObject(raw));
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            RawObject obj =
+                readRaw(store, 99, NAME, expectEvolved ? 1 : 0, CASECLS, 0);
+            if (expectEvolved) {
+                checkRawFields(obj, "key", 99,
+                                    "f1", "43210",
+                                    "f2", "98765");
+            } else {
+                checkRawFields(obj, "key", 99,
+                                    "f1", "01234",
+                                    "f2", "56789");
+            }
+        }
+    }
+
+    @Persistent(version=1)
+    static class ConvertExample1_Address {
+        String street;
+        String city;
+        String state;
+        int zipCode;
+    }
+
+    @SuppressWarnings("serial")
+    static class ConvertExample1_Conversion implements Conversion {
+
+        public void initialize(EntityModel model) {
+        }
+
+        public Object convert(Object fromValue) {
+            return Integer.valueOf((String) fromValue);
+        }
+
+        @Override
+        public boolean equals(Object o) {
+            return o instanceof ConvertExample1_Conversion;
+        }
+    }
+
+    @Entity
+    static class ConvertExample1_Entity
+        extends EvolveCase {
+
+        private static final String NAME =
+            ConvertExample1_Entity.class.getName();
+        private static final String NAME2 =
+            ConvertExample1_Address.class.getName();
+
+        @PrimaryKey
+        int key = 99;
+
+        ConvertExample1_Address embed;
+
+        @Override
+        Mutations getMutations() {
+            Mutations m = new Mutations();
+            Converter converter = new Converter
+                (ConvertExample1_Address.class.getName(), 0,
+                 "zipCode", new ConvertExample1_Conversion());
+            m.addConverter(converter);
+            return m;
+        }
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+            if (oldTypesExist) {
+                checkVersions(model, NAME2, 1, NAME2, 0);
+            } else {
+                checkVersions(model, NAME2, 1);
+            }
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,ConvertExample1_Entity>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     ConvertExample1_Entity.class);
+            ConvertExample1_Entity obj = index.get(99);
+            TestCase.assertNotNull(obj);
+            TestCase.assertEquals(99, obj.key);
+            TestCase.assertNotNull(obj.embed);
+            TestCase.assertEquals("street", obj.embed.street);
+            TestCase.assertEquals("city", obj.embed.city);
+            TestCase.assertEquals("state", obj.embed.state);
+            TestCase.assertEquals(12345, obj.embed.zipCode);
+
+            if (doUpdate) {
+                index.put(obj);
+            }
+        }
+
+        @Override
+        void copyRawObjects(RawStore rawStore, EntityStore newStore)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,ConvertExample1_Entity>
+                index = newStore.getPrimaryIndex
+                    (Integer.class,
+                     ConvertExample1_Entity.class);
+            RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+            index.put((ConvertExample1_Entity)
+                      newStore.getModel().convertRawObject(raw));
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            RawType embedType = store.getModel().getRawType(NAME2);
+            RawObject embed;
+            if (expectEvolved) {
+                embed = new RawObject
+                    (embedType,
+                     makeValues("street", "street",
+                                "city", "city",
+                                "state", "state",
+                                "zipCode", 12345),
+                     null);
+            } else {
+                embed = new RawObject
+                    (embedType,
+                     makeValues("street", "street",
+                                "city", "city",
+                                "state", "state",
+                                "zipCode", "12345"),
+                     null);
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "embed", embed);
+        }
+    }
+
+    @Persistent
+    static class ConvertExample2_Address {
+        String street;
+        String city;
+        String state;
+        int zipCode;
+    }
+
+    @Entity(version=1)
+    static class ConvertExample2_Person
+        extends EvolveCase {
+
+        private static final String NAME =
+            ConvertExample2_Person.class.getName();
+        private static final String NAME2 =
+            ConvertExample2_Address .class.getName();
+
+        @PrimaryKey
+        int key;
+
+        ConvertExample2_Address address;
+
+        @Override
+        Mutations getMutations() {
+            Mutations m = new Mutations();
+            Converter converter = new Converter
+                (ConvertExample2_Person.class.getName(), 0,
+                 "address", new ConvertExample2_Conversion());
+            m.addConverter(converter);
+            return m;
+        }
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkEntity(true, model, env, NAME, 1, null);
+            if (oldTypesExist) {
+                checkVersions(model, NAME, 1, NAME, 0);
+            } else {
+                checkVersions(model, NAME, 1);
+            }
+            checkVersions(model, NAME2, 0);
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,ConvertExample2_Person>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     ConvertExample2_Person.class);
+            ConvertExample2_Person obj = index.get(99);
+            TestCase.assertNotNull(obj);
+            TestCase.assertEquals(99, obj.key);
+            TestCase.assertNotNull(obj.address);
+            TestCase.assertEquals("street", obj.address.street);
+            TestCase.assertEquals("city", obj.address.city);
+            TestCase.assertEquals("state", obj.address.state);
+            TestCase.assertEquals(12345, obj.address.zipCode);
+
+            if (doUpdate) {
+                index.put(obj);
+            }
+        }
+
+        @Override
+        void copyRawObjects(RawStore rawStore, EntityStore newStore)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,ConvertExample2_Person>
+                index = newStore.getPrimaryIndex
+                    (Integer.class,
+                     ConvertExample2_Person.class);
+            RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+            index.put((ConvertExample2_Person)
+                      newStore.getModel().convertRawObject(raw));
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            Object embed;
+            if (expectEvolved) {
+                RawType embedType = store.getModel().getRawType(NAME2);
+                embed = new RawObject
+                    (embedType,
+                     makeValues("street", "street",
+                                "city", "city",
+                                "state", "state",
+                                "zipCode", 12345),
+                     null);
+            } else {
+                embed = "street#city#state#12345";
+            }
+            RawObject obj = readRaw
+                (store, 99, NAME, expectEvolved ? 1 : 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "address", embed);
+        }
+    }
+
+    @SuppressWarnings("serial")
+    static class ConvertExample2_Conversion implements Conversion {
+        private transient RawType addressType;
+
+        public void initialize(EntityModel model) {
+            addressType = model.getRawType
+                (ConvertExample2_Address.class.getName());
+        }
+
+        public Object convert(Object fromValue) {
+
+            String oldAddress = (String) fromValue;
+            Map<String,Object> addressValues = new HashMap<String,Object>();
+            addressValues.put("street", parseAddress(1, oldAddress));
+            addressValues.put("city", parseAddress(2, oldAddress));
+            addressValues.put("state", parseAddress(3, oldAddress));
+            addressValues.put("zipCode",
+                              Integer.valueOf(parseAddress(4, oldAddress)));
+
+            return new RawObject(addressType, addressValues, null);
+        }
+
+        @Override
+        public boolean equals(Object o) {
+            return o instanceof ConvertExample2_Conversion;
+        }
+
+        private String parseAddress(int fieldNum, String oldAddress) {
+            StringTokenizer tokens = new StringTokenizer(oldAddress, "#");
+            String field = null;
+            for (int i = 0; i < fieldNum; i += 1) {
+                field = tokens.nextToken();
+            }
+            return field;
+        }
+    }
+
+    @Persistent
+    static class ConvertExample3_Address {
+        String street;
+        String city;
+        String state;
+        int zipCode;
+    }
+
+    @SuppressWarnings("serial")
+    static class ConvertExample3_Conversion implements Conversion {
+        private transient RawType newPersonType;
+        private transient RawType addressType;
+
+        public void initialize(EntityModel model) {
+            newPersonType = model.getRawType
+                (ConvertExample3_Person.class.getName());
+            addressType = model.getRawType
+                (ConvertExample3_Address.class.getName());
+        }
+
+        public Object convert(Object fromValue) {
+
+            RawObject person = (RawObject) fromValue;
+            Map<String,Object> personValues = person.getValues();
+            Map<String,Object> addressValues = new HashMap<String,Object>();
+            RawObject address = new RawObject
+                (addressType, addressValues, null);
+
+            addressValues.put("street", personValues.remove("street"));
+            addressValues.put("city", personValues.remove("city"));
+            addressValues.put("state", personValues.remove("state"));
+            addressValues.put("zipCode", personValues.remove("zipCode"));
+            personValues.put("address", address);
+
+            return new RawObject
+                (newPersonType, personValues, person.getSuper());
+        }
+
+        @Override
+        public boolean equals(Object o) {
+            return o instanceof ConvertExample3_Conversion;
+        }
+    }
+
+    @Entity(version=1)
+    static class ConvertExample3_Person
+        extends EvolveCase {
+
+        private static final String NAME =
+            ConvertExample3_Person.class.getName();
+        private static final String NAME2 =
+            ConvertExample3_Address .class.getName();
+
+        @PrimaryKey
+        int key;
+
+        ConvertExample3_Address address;
+
+        @Override
+        Mutations getMutations() {
+            Mutations m = new Mutations();
+            Converter converter = new Converter
+                (ConvertExample3_Person.class.getName(), 0,
+                 new ConvertExample3_Conversion());
+            m.addConverter(converter);
+            return m;
+        }
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkEntity(true, model, env, NAME, 1, null);
+            if (oldTypesExist) {
+                checkVersions(model, NAME, 1, NAME, 0);
+            } else {
+                checkVersions(model, NAME, 1);
+            }
+            checkVersions(model, NAME2, 0);
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,ConvertExample3_Person>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     ConvertExample3_Person.class);
+            ConvertExample3_Person obj = index.get(99);
+            TestCase.assertNotNull(obj);
+            TestCase.assertEquals(99, obj.key);
+            TestCase.assertNotNull(obj.address);
+            TestCase.assertEquals("street", obj.address.street);
+            TestCase.assertEquals("city", obj.address.city);
+            TestCase.assertEquals("state", obj.address.state);
+            TestCase.assertEquals(12345, obj.address.zipCode);
+
+            if (doUpdate) {
+                index.put(obj);
+            }
+        }
+
+        @Override
+        void copyRawObjects(RawStore rawStore, EntityStore newStore)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,ConvertExample3_Person>
+                index = newStore.getPrimaryIndex
+                    (Integer.class,
+                     ConvertExample3_Person.class);
+            RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+            index.put((ConvertExample3_Person)
+                      newStore.getModel().convertRawObject(raw));
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            RawObject obj = readRaw
+                (store, 99, NAME, expectEvolved ? 1 : 0, CASECLS, 0);
+            if (expectEvolved) {
+                RawType embedType = store.getModel().getRawType(NAME2);
+                Object embed = new RawObject
+                    (embedType,
+                     makeValues("street", "street",
+                                "city", "city",
+                                "state", "state",
+                                "zipCode", 12345),
+                     null);
+                checkRawFields(obj, "key", 99, "address", embed);
+            } else {
+                checkRawFields(obj, "key", 99,
+                                    "street", "street",
+                                    "city", "city",
+                                    "state", "state",
+                                    "zipCode", 12345);
+            }
+        }
+    }
+
+    @SuppressWarnings("serial")
+    static class ConvertExample3Reverse_Conversion implements Conversion {
+        private transient RawType newPersonType;
+
+        public void initialize(EntityModel model) {
+            newPersonType = model.getRawType
+                (ConvertExample3Reverse_Person.class.getName());
+        }
+
+        public Object convert(Object fromValue) {
+
+            RawObject person = (RawObject) fromValue;
+            Map<String,Object> personValues = person.getValues();
+            RawObject address = (RawObject) personValues.remove("address");
+            Map<String,Object> addressValues = address.getValues();
+
+            personValues.put("street", addressValues.remove("street"));
+            personValues.put("city", addressValues.remove("city"));
+            personValues.put("state", addressValues.remove("state"));
+            personValues.put("zipCode", addressValues.remove("zipCode"));
+
+            return new RawObject
+                (newPersonType, personValues, person.getSuper());
+        }
+
+        @Override
+        public boolean equals(Object o) {
+            return o instanceof ConvertExample3Reverse_Conversion;
+        }
+    }
+
+    @Entity(version=1)
+    static class ConvertExample3Reverse_Person
+        extends EvolveCase {
+
+        private static final String NAME =
+            ConvertExample3Reverse_Person.class.getName();
+        private static final String NAME2 =
+            PREFIX + "ConvertExample3Reverse_Address";
+
+        @PrimaryKey
+        int key;
+
+        String street;
+        String city;
+        String state;
+        int zipCode;
+
+        @Override
+        Mutations getMutations() {
+            Mutations m = new Mutations();
+            Converter converter = new Converter
+                (ConvertExample3Reverse_Person.class.getName(), 0,
+                 new ConvertExample3Reverse_Conversion());
+            m.addConverter(converter);
+            m.addDeleter(new Deleter(NAME2, 0));
+            return m;
+        }
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkEntity(true, model, env, NAME, 1, null);
+            if (oldTypesExist) {
+                checkVersions(model, NAME, 1, NAME, 0);
+                checkVersions(model, NAME2, 0);
+            } else {
+                checkVersions(model, NAME, 1);
+            }
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,ConvertExample3Reverse_Person>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     ConvertExample3Reverse_Person.class);
+            ConvertExample3Reverse_Person obj = index.get(99);
+            TestCase.assertNotNull(obj);
+            TestCase.assertEquals(99, obj.key);
+            TestCase.assertEquals("street", obj.street);
+            TestCase.assertEquals("city", obj.city);
+            TestCase.assertEquals("state", obj.state);
+            TestCase.assertEquals(12345, obj.zipCode);
+
+            if (doUpdate) {
+                index.put(obj);
+            }
+        }
+
+        @Override
+        void copyRawObjects(RawStore rawStore, EntityStore newStore)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,ConvertExample3Reverse_Person>
+                index = newStore.getPrimaryIndex
+                    (Integer.class,
+                     ConvertExample3Reverse_Person.class);
+            RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+            index.put((ConvertExample3Reverse_Person)
+                      newStore.getModel().convertRawObject(raw));
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            RawObject obj = readRaw
+                (store, 99, NAME, expectEvolved ? 1 : 0, CASECLS, 0);
+            if (expectEvolved) {
+                checkRawFields(obj, "key", 99,
+                                    "street", "street",
+                                    "city", "city",
+                                    "state", "state",
+                                    "zipCode", 12345);
+            } else {
+                RawType embedType = store.getModel().getRawType(NAME2);
+                Object embed = new RawObject
+                    (embedType,
+                     makeValues("street", "street",
+                                "city", "city",
+                                "state", "state",
+                                "zipCode", 12345),
+                     null);
+                checkRawFields(obj, "key", 99, "address", embed);
+            }
+        }
+    }
+
+    @Persistent(version=1)
+    static class ConvertExample4_A extends ConvertExample4_B {
+    }
+
+    @Persistent(version=1)
+    static class ConvertExample4_B {
+        String name;
+    }
+
+    @SuppressWarnings("serial")
+    static class Example4_Conversion implements Conversion {
+        private transient RawType newAType;
+        private transient RawType newBType;
+
+        public void initialize(EntityModel model) {
+            newAType = model.getRawType(ConvertExample4_A.class.getName());
+            newBType = model.getRawType(ConvertExample4_B.class.getName());
+        }
+
+        public Object convert(Object fromValue) {
+            RawObject oldA = (RawObject) fromValue;
+            RawObject oldB = oldA.getSuper();
+            Map<String,Object> aValues = oldA.getValues();
+            Map<String,Object> bValues = oldB.getValues();
+            bValues.put("name", aValues.remove("name"));
+            RawObject newB = new RawObject(newBType, bValues, oldB.getSuper());
+            RawObject newA = new RawObject(newAType, aValues, newB);
+            return newA;
+        }
+
+        @Override
+        public boolean equals(Object o) {
+            return o instanceof Example4_Conversion;
+        }
+    }
+
+    @Entity(version=1)
+    static class ConvertExample4_Entity
+        extends EvolveCase {
+
+        private static final String NAME =
+            ConvertExample4_Entity.class.getName();
+        private static final String NAME2 =
+            ConvertExample4_A .class.getName();
+        private static final String NAME3 =
+            ConvertExample4_B .class.getName();
+
+        @PrimaryKey
+        int key;
+
+        ConvertExample4_A embed;
+
+        @Override
+        Mutations getMutations() {
+            Mutations m = new Mutations();
+            Converter converter = new Converter
+                (ConvertExample4_A.class.getName(), 0,
+                 new Example4_Conversion());
+            m.addConverter(converter);
+            return m;
+        }
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkEntity(true, model, env, NAME, 1, null);
+            if (oldTypesExist) {
+                checkVersions(model, NAME, 1, NAME, 0);
+                checkVersions(model, NAME2, 1, NAME2, 0);
+                checkVersions(model, NAME3, 1, NAME3, 0);
+            } else {
+                checkVersions(model, NAME, 1);
+                checkVersions(model, NAME2, 1);
+                checkVersions(model, NAME3, 1);
+            }
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,ConvertExample4_Entity>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     ConvertExample4_Entity.class);
+            ConvertExample4_Entity obj = index.get(99);
+            TestCase.assertNotNull(obj);
+            TestCase.assertEquals(99, obj.key);
+            TestCase.assertNotNull(obj.embed);
+            TestCase.assertEquals("name", obj.embed.name);
+
+            if (doUpdate) {
+                index.put(obj);
+            }
+        }
+
+        @Override
+        void copyRawObjects(RawStore rawStore, EntityStore newStore)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,ConvertExample4_Entity>
+                index = newStore.getPrimaryIndex
+                    (Integer.class,
+                     ConvertExample4_Entity.class);
+            RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+            index.put((ConvertExample4_Entity)
+                      newStore.getModel().convertRawObject(raw));
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            RawType embedTypeA = store.getModel().getRawType(NAME2);
+            RawType embedTypeB = store.getModel().getRawType(NAME3);
+            Object embed;
+            if (expectEvolved) {
+                embed = new RawObject(embedTypeA, makeValues(),
+                        new RawObject
+                            (embedTypeB, makeValues("name", "name"), null));
+            } else {
+                embed = new RawObject(embedTypeA, makeValues("name", "name"),
+                        new RawObject
+                            (embedTypeB, makeValues(), null));
+            }
+            RawObject obj = readRaw
+                (store, 99, NAME, expectEvolved ? 1 : 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "embed", embed);
+        }
+    }
+
+    @Persistent(version=1)
+    static class ConvertExample5_Pet {
+        String name;
+    }
+
+    @Persistent
+    static class ConvertExample5_Cat extends ConvertExample5_Pet {
+        int finickyLevel;
+    }
+
+    @Persistent
+    static class ConvertExample5_Dog extends ConvertExample5_Pet {
+        double barkVolume;
+    }
+
+    @SuppressWarnings("serial")
+    static class ConvertExample5_Conversion implements Conversion {
+        private transient RawType newPetType;
+        private transient RawType dogType;
+        private transient RawType catType;
+
+        public void initialize(EntityModel model) {
+            newPetType = model.getRawType(ConvertExample5_Pet.class.getName());
+            dogType = model.getRawType(ConvertExample5_Dog.class.getName());
+            catType = model.getRawType(ConvertExample5_Cat.class.getName());
+        }
+
+        public Object convert(Object fromValue) {
+            RawObject pet = (RawObject) fromValue;
+            Map<String,Object> petValues = pet.getValues();
+            Map<String,Object> subTypeValues = new HashMap<String,Object>();
+            Boolean isCat = (Boolean) petValues.remove("isCatNotDog");
+            Integer finickyLevel = (Integer) petValues.remove("finickyLevel");
+            Double barkVolume = (Double) petValues.remove("barkVolume");
+            RawType newSubType;
+            if (isCat) {
+                newSubType = catType;
+                subTypeValues.put("finickyLevel", finickyLevel);
+            } else {
+                newSubType = dogType;
+                subTypeValues.put("barkVolume", barkVolume);
+            }
+            RawObject newPet = new RawObject
+                (newPetType, petValues, pet.getSuper());
+            return new RawObject(newSubType, subTypeValues, newPet);
+        }
+
+        @Override
+        public boolean equals(Object o) {
+            return o instanceof ConvertExample5_Conversion;
+        }
+    }
+
+    @Entity(version=1)
+    static class ConvertExample5_Entity
+        extends EvolveCase {
+
+        private static final String NAME =
+            ConvertExample5_Entity.class.getName();
+        private static final String NAME2 =
+            ConvertExample5_Pet.class.getName();
+        private static final String NAME3 =
+            ConvertExample5_Cat.class.getName();
+        private static final String NAME4 =
+            ConvertExample5_Dog.class.getName();
+
+        @PrimaryKey
+        int key;
+
+        ConvertExample5_Cat cat;
+        ConvertExample5_Dog dog;
+
+        @Override
+        Mutations getMutations() {
+            Mutations m = new Mutations();
+            Converter converter = new Converter
+                (ConvertExample5_Pet.class.getName(), 0,
+                 new ConvertExample5_Conversion());
+            m.addConverter(converter);
+            return m;
+        }
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkEntity(true, model, env, NAME, 1, null);
+            if (oldTypesExist) {
+                checkVersions(model, NAME, 1, NAME, 0);
+                checkVersions(model, NAME2, 1, NAME2, 0);
+            } else {
+                checkVersions(model, NAME, 1);
+                checkVersions(model, NAME2, 1);
+            }
+            checkVersions(model, NAME3, 0);
+            checkVersions(model, NAME4, 0);
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,ConvertExample5_Entity>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     ConvertExample5_Entity.class);
+            ConvertExample5_Entity obj = index.get(99);
+            TestCase.assertNotNull(obj);
+            TestCase.assertEquals(99, obj.key);
+            TestCase.assertNotNull(obj.cat);
+            TestCase.assertEquals("Jeffry", obj.cat.name);
+            TestCase.assertEquals(999, obj.cat.finickyLevel);
+            TestCase.assertNotNull(obj.dog);
+            TestCase.assertEquals("Nelson", obj.dog.name);
+            TestCase.assertEquals(0.01, obj.dog.barkVolume);
+
+            if (doUpdate) {
+                index.put(obj);
+            }
+        }
+
+        @Override
+        void copyRawObjects(RawStore rawStore, EntityStore newStore)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,ConvertExample5_Entity>
+                index = newStore.getPrimaryIndex
+                    (Integer.class,
+                     ConvertExample5_Entity.class);
+            RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+            index.put((ConvertExample5_Entity)
+                      newStore.getModel().convertRawObject(raw));
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            RawType petType = store.getModel().getRawType(NAME2);
+            RawObject cat;
+            RawObject dog;
+            if (expectEvolved) {
+                RawType catType = store.getModel().getRawType(NAME3);
+                RawType dogType = store.getModel().getRawType(NAME4);
+                cat = new RawObject(catType, makeValues("finickyLevel", 999),
+                      new RawObject(petType, makeValues("name", "Jeffry"),
+                                    null));
+                dog = new RawObject(dogType, makeValues("barkVolume", 0.01),
+                      new RawObject(petType, makeValues("name", "Nelson"),
+                                    null));
+            } else {
+                cat = new RawObject(petType, makeValues("name", "Jeffry",
+                                                        "isCatNotDog", true,
+                                                        "finickyLevel", 999,
+                                                        "barkVolume", 0.0),
+                                    null);
+                dog = new RawObject(petType, makeValues("name", "Nelson",
+                                                        "isCatNotDog", false,
+                                                        "finickyLevel", 0,
+                                                        "barkVolume", 0.01),
+                                    null);
+            }
+            RawObject obj = readRaw
+                (store, 99, NAME, expectEvolved ? 1 : 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "cat", cat, "dog", dog);
+        }
+    }
+
+    @Persistent(version=1)
+    static class AllowFieldAddDelete_Embed {
+        private String f0 = "0";
+        private String f2;
+        private int f3 = 3;
+        private String f4;
+        private int f5 = 5;
+        private String f8 = "8";
+        private int f9 = 9;
+    }
+
+    @Persistent(version=1)
+    static class AllowFieldAddDelete_Base
+        extends EvolveCase {
+
+        private String f0 = "0";
+        private String f2;
+        private int f3 = 3;
+        private String f4;
+        private int f5 = 5;
+        private String f8 = "8";
+        private int f9 = 9;
+    }
+
+    @Entity(version=1)
+    static class AllowFieldAddDelete
+        extends AllowFieldAddDelete_Base {
+
+        private static final String NAME =
+            AllowFieldAddDelete.class.getName();
+        private static final String NAME2 =
+            AllowFieldAddDelete_Base.class.getName();
+        private static final String NAME3 =
+            AllowFieldAddDelete_Embed.class.getName();
+
+        @PrimaryKey
+        int key;
+
+        AllowFieldAddDelete_Embed embed;
+
+        private String f0 = "0";
+        private String f2;
+        private int f3 = 3;
+        private String f4;
+        private int f5 = 5;
+        private String f8 = "8";
+        private int f9 = 9;
+
+        @Override
+        Mutations getMutations() {
+            Mutations m = new Mutations();
+            for (String name : new String[] {NAME, NAME2, NAME3}) {
+                m.addDeleter(new Deleter(name, 0, "f1"));
+                m.addDeleter(new Deleter(name, 0, "f6"));
+                m.addDeleter(new Deleter(name, 0, "f7"));
+            }
+            return m;
+        }
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkEntity(true, model, env, NAME, 1, null);
+            if (oldTypesExist) {
+                checkVersions(model, NAME, 1, NAME, 0);
+                checkVersions(model, NAME2, 1, NAME2, 0);
+                checkVersions(model, NAME3, 1, NAME3, 0);
+            } else {
+                checkVersions(model, NAME, 1);
+                checkVersions(model, NAME2, 1);
+                checkVersions(model, NAME3, 1);
+            }
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,AllowFieldAddDelete>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     AllowFieldAddDelete.class);
+            AllowFieldAddDelete obj = index.get(99);
+            TestCase.assertNotNull(obj);
+            TestCase.assertEquals(99, obj.key);
+            {
+                AllowFieldAddDelete o = obj;
+
+                TestCase.assertNotNull(o);
+                TestCase.assertEquals("0", o.f0);
+                TestCase.assertEquals("2", o.f2);
+                TestCase.assertEquals(3, o.f3);
+                TestCase.assertEquals("4", o.f4);
+                TestCase.assertEquals(5, o.f5);
+                TestCase.assertEquals("8", o.f8);
+                TestCase.assertEquals(9, o.f9);
+            }
+            {
+                AllowFieldAddDelete_Base o = (AllowFieldAddDelete_Base) obj;
+
+                TestCase.assertNotNull(o);
+                TestCase.assertEquals("0", o.f0);
+                TestCase.assertEquals("2", o.f2);
+                TestCase.assertEquals(3, o.f3);
+                TestCase.assertEquals("4", o.f4);
+                TestCase.assertEquals(5, o.f5);
+                TestCase.assertEquals("8", o.f8);
+                TestCase.assertEquals(9, o.f9);
+            }
+            {
+                AllowFieldAddDelete_Embed o = obj.embed;
+
+                TestCase.assertNotNull(o);
+                TestCase.assertEquals("0", o.f0);
+                TestCase.assertEquals("2", o.f2);
+                TestCase.assertEquals(3, o.f3);
+                TestCase.assertEquals("4", o.f4);
+                TestCase.assertEquals(5, o.f5);
+                TestCase.assertEquals("8", o.f8);
+                TestCase.assertEquals(9, o.f9);
+            }
+
+            if (doUpdate) {
+                index.put(obj);
+            }
+        }
+
+        @Override
+        void copyRawObjects(RawStore rawStore, EntityStore newStore)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,AllowFieldAddDelete>
+                index = newStore.getPrimaryIndex
+                    (Integer.class,
+                     AllowFieldAddDelete.class);
+            RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+            index.put((AllowFieldAddDelete)
+                      newStore.getModel().convertRawObject(raw));
+        }
+
+        static final Object[] fixedFields0 = {
+            "f1", 1,
+            "f2", "2",
+            "f4", "4",
+            "f6", 6,
+            "f7", "7",
+        };
+
+        static final Object[] fixedFields1 = {
+            "f2", "2",
+            "f4", "4",
+        };
+
+        static final Object[] fixedFields2 = {
+            "f0", "0",
+            "f2", "2",
+            "f3", 3,
+            "f4", "4",
+            "f5", 5,
+            "f8", "8",
+            "f9", 9,
+        };
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            RawType baseType = store.getModel().getRawType(NAME2);
+            RawType embedType = store.getModel().getRawType(NAME3);
+
+            Object[] ff;
+            if (expectEvolved) {
+                if (expectUpdated) {
+                    ff = fixedFields2;
+                } else {
+                    ff = fixedFields1;
+                }
+            } else {
+                ff = fixedFields0;
+            }
+            RawObject embed = new RawObject(embedType, makeValues(ff), null);
+            RawObject obj = readRaw
+                (store, 99, NAME, expectEvolved ? 1 : 0,
+                            NAME2, expectEvolved ? 1 : 0,
+                            CASECLS, 0);
+            checkRaw(obj, ff, "key", 99, "embed", embed);
+            checkRaw(obj.getSuper(), ff);
+        }
+
+        private void checkRaw(RawObject obj,
+                              Object[] fixedFields,
+                              Object... otherFields) {
+            Object[] allFields =
+                new Object[otherFields.length + fixedFields.length];
+            System.arraycopy(otherFields, 0, allFields, 0, otherFields.length);
+            System.arraycopy(fixedFields, 0, allFields,
+                             otherFields.length, fixedFields.length);
+            checkRawFields(obj, allFields);
+        }
+    }
+
+    static class ProxiedClass {
+        int data;
+
+        ProxiedClass(int data) {
+            this.data = data;
+        }
+    }
+
+    @Persistent(version=1, proxyFor=ProxiedClass.class)
+    static class ProxiedClass_Proxy implements PersistentProxy<ProxiedClass> {
+        long data;
+
+        public void initializeProxy(ProxiedClass o) {
+            data = o.data;
+        }
+
+        public ProxiedClass convertProxy() {
+            return new ProxiedClass((int) data);
+        }
+    }
+
+    @Entity
+    static class ProxiedClass_Entity
+        extends EvolveCase {
+
+        private static final String NAME =
+            ProxiedClass_Entity.class.getName();
+        private static final String NAME2 =
+            ProxiedClass_Proxy.class.getName();
+
+        @PrimaryKey
+        int key;
+
+        ProxiedClass embed;
+
+        @Override
+        void configure(EntityModel model, StoreConfig config) {
+            model.registerClass(ProxiedClass_Proxy.class);
+        }
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+            if (oldTypesExist) {
+                checkVersions(model, NAME2, 1, NAME2, 0);
+            } else {
+                checkVersions(model, NAME2, 1);
+            }
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,ProxiedClass_Entity>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     ProxiedClass_Entity.class);
+            ProxiedClass_Entity obj = index.get(99);
+            TestCase.assertNotNull(obj);
+            TestCase.assertEquals(99, obj.key);
+            TestCase.assertNotNull(obj.embed);
+            TestCase.assertEquals(88, obj.embed.data);
+
+            if (doUpdate) {
+                index.put(obj);
+            }
+        }
+
+        @Override
+        void copyRawObjects(RawStore rawStore, EntityStore newStore)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,ProxiedClass_Entity>
+                index = newStore.getPrimaryIndex
+                    (Integer.class,
+                     ProxiedClass_Entity.class);
+            RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+            index.put((ProxiedClass_Entity)
+                      newStore.getModel().convertRawObject(raw));
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            RawType embedType = store.getModel().getRawType(NAME2);
+            RawObject embed;
+            if (expectEvolved) {
+                embed = new RawObject
+                    (embedType, makeValues("data", 88L), null);
+            } else {
+                embed = new RawObject
+                    (embedType, makeValues("data", 88), null);
+            }
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "embed", embed);
+        }
+    }
+
+    @Persistent(proxyFor=StringBuffer.class)
+    static class DisallowChangeProxyFor_Proxy2
+        implements PersistentProxy<StringBuffer> {
+
+        String data;
+
+        public void initializeProxy(StringBuffer o) {
+            data = o.toString();
+        }
+
+        public StringBuffer convertProxy() {
+            return new StringBuffer(data);
+        }
+    }
+
+    @Persistent(proxyFor=StringBuilder.class)
+    static class DisallowChangeProxyFor_Proxy
+        implements PersistentProxy<StringBuilder> {
+
+        String data;
+
+        public void initializeProxy(StringBuilder o) {
+            data = o.toString();
+        }
+
+        public StringBuilder convertProxy() {
+            return new StringBuilder(data);
+        }
+    }
+
+    @Entity
+    static class DisallowChangeProxyFor
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Error when evolving class: java.lang.StringBuffer version: 0 to class: java.lang.StringBuffer version: 0 Error: The proxy class for this type has been changed from: com.sleepycat.persist.test.EvolveClasses$DisallowChangeProxyFor_Proxy to: com.sleepycat.persist.test.EvolveClasses$DisallowChangeProxyFor_Proxy2";
+        }
+
+        @Override
+        void configure(EntityModel model, StoreConfig config) {
+            model.registerClass(DisallowChangeProxyFor_Proxy.class);
+            model.registerClass(DisallowChangeProxyFor_Proxy2.class);
+        }
+    }
+
+    @Persistent
+    static class DisallowDeleteProxyFor_Proxy {
+        String data;
+    }
+
+    @Entity
+    static class DisallowDeleteProxyFor
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: java.lang.StringBuffer version: 0 Error: java.lang.IllegalArgumentException: Class could not be loaded or is not persistent: java.lang.StringBuffer";
+        }
+    }
+
+    @Persistent(version=1)
+    static class ArrayNameChange_Component_Renamed {
+
+        long data;
+    }
+
+    @Entity
+    static class ArrayNameChange_Entity
+        extends EvolveCase {
+
+        private static final String NAME =
+            ArrayNameChange_Entity.class.getName();
+        private static final String NAME2 =
+            ArrayNameChange_Component_Renamed.class.getName();
+        private static final String NAME3 =
+            PREFIX + "ArrayNameChange_Component";
+
+        @PrimaryKey
+        int key;
+
+        ArrayNameChange_Component_Renamed[] embed;
+        ArrayNameChange_Component_Renamed embed2;
+
+        @Override
+        Mutations getMutations() {
+            Mutations m = new Mutations();
+            m.addRenamer(new Renamer(NAME3, 0, NAME2));
+            return m;
+        }
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+            if (oldTypesExist) {
+                checkVersions(model, NAME2, 1, NAME3, 0);
+            } else {
+                checkVersions(model, NAME2, 1);
+            }
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,ArrayNameChange_Entity>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     ArrayNameChange_Entity.class);
+            ArrayNameChange_Entity obj = index.get(99);
+            TestCase.assertNotNull(obj);
+            TestCase.assertEquals(99, obj.key);
+            TestCase.assertNotNull(obj.embed);
+            TestCase.assertEquals(1, obj.embed.length);
+            TestCase.assertEquals(88L, obj.embed[0].data);
+            TestCase.assertSame(obj.embed2, obj.embed[0]);
+
+            if (doUpdate) {
+                index.put(obj);
+            }
+        }
+
+        @Override
+        void copyRawObjects(RawStore rawStore, EntityStore newStore)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,ArrayNameChange_Entity>
+                index = newStore.getPrimaryIndex
+                    (Integer.class,
+                     ArrayNameChange_Entity.class);
+            RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+            index.put((ArrayNameChange_Entity)
+                      newStore.getModel().convertRawObject(raw));
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            String compTypeName = expectEvolved ? NAME2 : NAME3;
+            String arrayTypeName = "[L" + compTypeName + ';';
+            RawType compType = store.getModel().getRawType(compTypeName);
+            RawType arrayType = store.getModel().getRawType(arrayTypeName);
+            RawObject embed2;
+            if (expectEvolved) {
+                embed2 = new RawObject
+                    (compType, makeValues("data", 88L), null);
+            } else {
+                embed2 = new RawObject
+                    (compType, makeValues("data", 88), null);
+            }
+            RawObject embed = new RawObject
+                (arrayType, new Object[] { embed2 });
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            checkRawFields(obj, "key", 99, "embed", embed, "embed2", embed2);
+        }
+    }
+
+    enum AddEnumConstant_Enum {
+        A, B, C;
+    }
+
+    @Entity(version=1)
+    static class AddEnumConstant_Entity
+        extends EvolveCase {
+
+        private static final String NAME =
+            AddEnumConstant_Entity.class.getName();
+        private static final String NAME2 =
+            AddEnumConstant_Enum.class.getName();
+
+        @PrimaryKey
+        int key;
+
+        AddEnumConstant_Enum e1;
+        AddEnumConstant_Enum e2;
+        AddEnumConstant_Enum e3 = AddEnumConstant_Enum.C;
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkEntity(true, model, env, NAME, 1, null);
+            if (oldTypesExist) {
+                checkVersions(model, NAME, 1, NAME, 0);
+                checkVersions(model, NAME2, 0, NAME2, 0);
+            } else {
+                checkVersions(model, NAME, 1);
+                checkVersions(model, NAME2, 0);
+            }
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,AddEnumConstant_Entity>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     AddEnumConstant_Entity.class);
+            AddEnumConstant_Entity obj = index.get(99);
+            TestCase.assertNotNull(obj);
+            TestCase.assertEquals(99, obj.key);
+            TestCase.assertSame(AddEnumConstant_Enum.A, obj.e1);
+            TestCase.assertSame(AddEnumConstant_Enum.B, obj.e2);
+            TestCase.assertSame(AddEnumConstant_Enum.C, obj.e3);
+
+            if (doUpdate) {
+                index.put(obj);
+            }
+        }
+
+        @Override
+        void copyRawObjects(RawStore rawStore, EntityStore newStore)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,AddEnumConstant_Entity>
+                index = newStore.getPrimaryIndex
+                    (Integer.class,
+                     AddEnumConstant_Entity.class);
+            RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+            index.put((AddEnumConstant_Entity)
+                      newStore.getModel().convertRawObject(raw));
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            RawObject obj = readRaw
+                (store, 99, NAME, expectEvolved ? 1 : 0, CASECLS, 0);
+            RawType enumType = store.getModel().getRawType(NAME2);
+            if (expectUpdated) {
+                checkRawFields(obj, "key", 99,
+                               "e1", new RawObject(enumType, "A"),
+                               "e2", new RawObject(enumType, "B"),
+                               "e3", new RawObject(enumType, "C"));
+            } else {
+                checkRawFields(obj, "key", 99,
+                               "e1", new RawObject(enumType, "A"),
+                               "e2", new RawObject(enumType, "B"));
+            }
+        }
+    }
+
+    enum DeleteEnumConstant_Enum {
+        A, C;
+    }
+
+    /**
+     * For now we don't allow deleting enum values.  This test case has code
+     * for testing conversions, for when we add versioning to enums.
+     */
+    @Entity
+    static class DeleteEnumConstant_NoMutation
+        extends EvolveCase {
+
+        private static final String NAME =
+            DeleteEnumConstant_NoMutation.class.getName();
+        private static final String NAME2 =
+            DeleteEnumConstant_Enum.class.getName();
+
+        @PrimaryKey
+        int key;
+
+        DeleteEnumConstant_Enum e1;
+        DeleteEnumConstant_Enum e2;
+        DeleteEnumConstant_Enum e3;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Incompatible enum type changed detected when evolving class: com.sleepycat.persist.test.EvolveClasses$DeleteEnumConstant_Enum version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DeleteEnumConstant_Enum version: 0 Error: Enum values may not be removed: [B]";
+        }
+
+            /*
+            Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DeleteEnumConstant_Enum version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DeleteEnumConstant_Enum version: 0 Error: Converter is required when a value is removed from an enum: [B]
+            */
+
+        /*
+        @Override
+        Mutations getMutations() {
+            Mutations m = new Mutations();
+            Converter converter = new Converter(NAME2, 0, new MyConversion());
+            m.addConverter(converter);
+            return m;
+        }
+        */
+
+        @SuppressWarnings("serial")
+        static class MyConversion implements Conversion {
+
+            transient RawType newType;
+
+            public void initialize(EntityModel model) {
+                newType = model.getRawType(NAME2);
+                TestCase.assertNotNull(newType);
+            }
+
+            public Object convert(Object fromValue) {
+                TestCase.assertNotNull(newType);
+                RawObject obj = (RawObject) fromValue;
+                String val = obj.getEnum();
+                TestCase.assertNotNull(val);
+                if ("B".equals(val)) {
+                    val = "C";
+                }
+                return new RawObject(newType, val);
+            }
+
+            @Override
+            public boolean equals(Object other) {
+                return other instanceof MyConversion;
+            }
+        }
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkEntity(true, model, env, NAME, 0, null);
+            checkVersions(model, NAME, 0);
+            if (oldTypesExist) {
+                checkVersions(model, NAME2, 0, NAME2, 0);
+            } else {
+                checkVersions(model, NAME2, 0);
+            }
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DeleteEnumConstant_NoMutation>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DeleteEnumConstant_NoMutation.class);
+            DeleteEnumConstant_NoMutation obj = index.get(99);
+            TestCase.assertNotNull(obj);
+            TestCase.assertEquals(99, obj.key);
+            TestCase.assertSame(DeleteEnumConstant_Enum.A, obj.e1);
+            TestCase.assertSame(DeleteEnumConstant_Enum.C, obj.e2);
+            TestCase.assertSame(DeleteEnumConstant_Enum.C, obj.e3);
+
+            if (doUpdate) {
+                index.put(obj);
+            }
+        }
+
+        @Override
+        void copyRawObjects(RawStore rawStore, EntityStore newStore)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DeleteEnumConstant_NoMutation>
+                index = newStore.getPrimaryIndex
+                    (Integer.class,
+                     DeleteEnumConstant_NoMutation.class);
+            RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+            index.put((DeleteEnumConstant_NoMutation)
+                      newStore.getModel().convertRawObject(raw));
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+            RawType enumType = store.getModel().getRawType(NAME2);
+            if (expectEvolved) {
+                checkRawFields(obj, "key", 99,
+                               "e1", new RawObject(enumType, "A"),
+                               "e2", new RawObject(enumType, "C"),
+                               "e3", new RawObject(enumType, "C"));
+            } else {
+                checkRawFields(obj, "key", 99,
+                               "e1", new RawObject(enumType, "A"),
+                               "e2", new RawObject(enumType, "B"),
+                               "e3", new RawObject(enumType, "C"));
+            }
+        }
+    }
+
+    @Entity
+    static class DisallowChangeKeyRelate
+        extends EvolveCase {
+
+        private static final String NAME =
+            DisallowChangeKeyRelate.class.getName();
+
+        @PrimaryKey
+        int key;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        int skey;
+
+        @Override
+        public String getStoreOpenException() {
+            return "com.sleepycat.persist.evolve.IncompatibleClassException: Change detected in the relate attribute (Relationship) of a secondary key when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowChangeKeyRelate version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowChangeKeyRelate version: 0 Error: Old key: skey relate: ONE_TO_ONE new key: skey relate: MANY_TO_ONE";
+        }
+    }
+
+    @Entity(version=1)
+    static class AllowChangeKeyMetadata
+        extends EvolveCase {
+
+        private static final String NAME =
+            AllowChangeKeyMetadata.class.getName();
+
+        @PrimaryKey
+        int key;
+
+        /*
+         * Combined fields from version 0 and 1:
+         *  addAnnotation = 88;
+         *  dropField = 77;
+         *  dropAnnotation = 66;
+         *  addField = 55;
+         *  renamedField = 44; // was toBeRenamedField
+         *  aa = 33;
+         *  ff = 22;
+         */
+
+        int aa;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int addAnnotation;
+
+        int dropAnnotation;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        Integer addField;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int renamedField;
+
+        int ff;
+
+        @Override
+        Mutations getMutations() {
+            Mutations m = new Mutations();
+            m.addDeleter(new Deleter(NAME, 0, "dropField"));
+            m.addRenamer(new Renamer(NAME, 0, "toBeRenamedField",
+                                              "renamedField"));
+            return m;
+        }
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkEntity(true, model, env, NAME, 1, null);
+            if (oldTypesExist) {
+                checkVersions(model, NAME, 1, NAME, 0);
+            } else {
+                checkVersions(model, NAME, 1);
+            }
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,AllowChangeKeyMetadata>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     AllowChangeKeyMetadata.class);
+            AllowChangeKeyMetadata obj = index.get(99);
+            checkValues(obj);
+
+            checkValues(store.getSecondaryIndex
+                (index, Integer.class, "addAnnotation").get(88));
+            checkValues(store.getSecondaryIndex
+                (index, Integer.class, "renamedField").get(44));
+            if (updated) {
+                checkValues(store.getSecondaryIndex
+                    (index, Integer.class, "addField").get(55));
+            } else {
+                TestCase.assertNull(store.getSecondaryIndex
+                    (index, Integer.class, "addField").get(55));
+            }
+
+            if (doUpdate) {
+                obj.addField = 55;
+                index.put(obj);
+                updated = true;
+                checkValues(store.getSecondaryIndex
+                    (index, Integer.class, "addAnnotation").get(88));
+                checkValues(store.getSecondaryIndex
+                    (index, Integer.class, "addField").get(55));
+            }
+        }
+
+        @Override
+        void copyRawObjects(RawStore rawStore, EntityStore newStore)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,AllowChangeKeyMetadata>
+                index = newStore.getPrimaryIndex
+                    (Integer.class,
+                     AllowChangeKeyMetadata.class);
+            RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+            index.put((AllowChangeKeyMetadata)
+                      newStore.getModel().convertRawObject(raw));
+        }
+
+        private void checkValues(AllowChangeKeyMetadata obj) {
+            TestCase.assertNotNull(obj);
+            TestCase.assertEquals(99, obj.key);
+            TestCase.assertEquals(88, obj.addAnnotation);
+            TestCase.assertEquals(66, obj.dropAnnotation);
+            TestCase.assertEquals(44, obj.renamedField);
+            TestCase.assertEquals(33, obj.aa);
+            TestCase.assertEquals(22, obj.ff);
+            if (updated) {
+                TestCase.assertEquals(Integer.valueOf(55), obj.addField);
+            } else {
+                TestCase.assertNull(obj.addField);
+            }
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            RawObject obj = readRaw
+                (store, 99, NAME, expectEvolved ? 1 : 0, CASECLS, 0);
+            if (expectUpdated) {
+                checkRawFields(obj, "key", 99,
+                               "addAnnotation", 88,
+                               "dropAnnotation", 66,
+                               "addField", 55,
+                               "renamedField", 44,
+                               "aa", 33,
+                               "ff", 22);
+            } else if (expectEvolved) {
+                checkRawFields(obj, "key", 99,
+                               "addAnnotation", 88,
+                               "dropAnnotation", 66,
+                               "renamedField", 44,
+                               "aa", 33,
+                               "ff", 22);
+            } else {
+                checkRawFields(obj, "key", 99,
+                               "addAnnotation", 88,
+                               "dropField", 77,
+                               "dropAnnotation", 66,
+                               "toBeRenamedField", 44,
+                               "aa", 33,
+                               "ff", 22);
+            }
+            Environment env = store.getEnvironment();
+            assertDbExists(expectEvolved, env, NAME, "addAnnotation");
+            assertDbExists(expectEvolved, env, NAME, "addField");
+            assertDbExists(expectEvolved, env, NAME, "renamedField");
+            assertDbExists(!expectEvolved, env, NAME, "toBeRenamedField");
+            assertDbExists(!expectEvolved, env, NAME, "dropField");
+            assertDbExists(!expectEvolved, env, NAME, "dropAnnotation");
+        }
+    }
+
+    /**
+     * Same test as AllowChangeKeyMetadata but with the secondary keys in an
+     * entity subclass.  [#16253]
+     */
+    @Persistent(version=1)
+    static class AllowChangeKeyMetadataInSubclass
+        extends AllowChangeKeyMetadataEntity {
+
+        private static final String NAME =
+            AllowChangeKeyMetadataInSubclass.class.getName();
+        private static final String NAME2 =
+            AllowChangeKeyMetadataEntity.class.getName();
+
+        /*
+         * Combined fields from version 0 and 1:
+         *  addAnnotation = 88;
+         *  dropField = 77;
+         *  dropAnnotation = 66;
+         *  addField = 55;
+         *  renamedField = 44; // was toBeRenamedField
+         *  aa = 33;
+         *  ff = 22;
+         */
+
+        int aa;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int addAnnotation;
+
+        int dropAnnotation;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        Integer addField;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int renamedField;
+
+        int ff;
+
+        @Override
+        Mutations getMutations() {
+            Mutations m = new Mutations();
+            m.addDeleter(new Deleter(NAME, 0, "dropField"));
+            m.addRenamer(new Renamer(NAME, 0, "toBeRenamedField",
+                                              "renamedField"));
+            return m;
+        }
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkNonEntity(true, model, env, NAME, 1);
+            checkEntity(true, model, env, NAME2, 0, null);
+            if (oldTypesExist) {
+                checkVersions(model, NAME, 1, NAME, 0);
+                checkVersions(model, NAME2, 0);
+            } else {
+                checkVersions(model, NAME, 1);
+                checkVersions(model, NAME2, 0);
+            }
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,AllowChangeKeyMetadataEntity>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     AllowChangeKeyMetadataEntity.class);
+            AllowChangeKeyMetadataEntity obj = index.get(99);
+            checkValues(obj);
+
+            checkValues(store.getSecondaryIndex
+                (index, Integer.class, "addAnnotation").get(88));
+            checkValues(store.getSecondaryIndex
+                (index, Integer.class, "renamedField").get(44));
+            if (updated) {
+                checkValues(store.getSecondaryIndex
+                    (index, Integer.class, "addField").get(55));
+            } else {
+                TestCase.assertNull(store.getSecondaryIndex
+                    (index, Integer.class, "addField").get(55));
+            }
+
+            if (doUpdate) {
+                ((AllowChangeKeyMetadataInSubclass) obj).addField = 55;
+                index.put(obj);
+                updated = true;
+                checkValues(store.getSecondaryIndex
+                    (index, Integer.class, "addAnnotation").get(88));
+                checkValues(store.getSecondaryIndex
+                    (index, Integer.class, "addField").get(55));
+            }
+        }
+
+        @Override
+        void copyRawObjects(RawStore rawStore, EntityStore newStore)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,AllowChangeKeyMetadataEntity>
+                index = newStore.getPrimaryIndex
+                    (Integer.class,
+                     AllowChangeKeyMetadataEntity.class);
+            RawObject raw = rawStore.getPrimaryIndex(NAME2).get(99);
+            index.put((AllowChangeKeyMetadataInSubclass)
+                      newStore.getModel().convertRawObject(raw));
+        }
+
+        private void checkValues(AllowChangeKeyMetadataEntity objParam) {
+            AllowChangeKeyMetadataInSubclass obj =
+                (AllowChangeKeyMetadataInSubclass) objParam;
+            TestCase.assertNotNull(obj);
+            TestCase.assertEquals(99, obj.key);
+            TestCase.assertEquals(88, obj.addAnnotation);
+            TestCase.assertEquals(66, obj.dropAnnotation);
+            TestCase.assertEquals(44, obj.renamedField);
+            TestCase.assertEquals(33, obj.aa);
+            TestCase.assertEquals(22, obj.ff);
+            if (updated) {
+                TestCase.assertEquals(Integer.valueOf(55), obj.addField);
+            } else {
+                TestCase.assertNull(obj.addField);
+            }
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            RawObject obj = readRaw
+                (store, NAME2, 99, NAME, expectEvolved ? 1 : 0,
+                 NAME2, 0, CASECLS, 0);
+            checkRawFields(obj.getSuper(), "key", 99);
+            if (expectUpdated) {
+                checkRawFields(obj,
+                               "addAnnotation", 88,
+                               "dropAnnotation", 66,
+                               "addField", 55,
+                               "renamedField", 44,
+                               "aa", 33,
+                               "ff", 22);
+            } else if (expectEvolved) {
+                checkRawFields(obj,
+                               "addAnnotation", 88,
+                               "dropAnnotation", 66,
+                               "renamedField", 44,
+                               "aa", 33,
+                               "ff", 22);
+            } else {
+                checkRawFields(obj,
+                               "addAnnotation", 88,
+                               "dropField", 77,
+                               "dropAnnotation", 66,
+                               "toBeRenamedField", 44,
+                               "aa", 33,
+                               "ff", 22);
+            }
+            Environment env = store.getEnvironment();
+            assertDbExists(expectEvolved, env, NAME2, "addAnnotation");
+            assertDbExists(expectEvolved, env, NAME2, "addField");
+            assertDbExists(expectEvolved, env, NAME2, "renamedField");
+            assertDbExists(!expectEvolved, env, NAME2, "toBeRenamedField");
+            assertDbExists(!expectEvolved, env, NAME2, "dropField");
+            assertDbExists(!expectEvolved, env, NAME2, "dropAnnotation");
+        }
+    }
+
+    @Entity
+    static class AllowChangeKeyMetadataEntity
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key;
+    }
+
+    /**
+     * Special case of adding secondaries that caused
+     * IndexOutOfBoundsException. [#15524]
+     */
+    @Entity(version=1)
+    static class AllowAddSecondary
+        extends EvolveCase {
+
+        private static final String NAME =
+            AllowAddSecondary.class.getName();
+
+        @PrimaryKey
+        long key;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int a;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int b;
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkEntity(true, model, env, NAME, 1, null);
+            if (oldTypesExist) {
+                checkVersions(model, NAME, 1, NAME, 0);
+            } else {
+                checkVersions(model, NAME, 1);
+            }
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            PrimaryIndex<Long,AllowAddSecondary>
+                index = store.getPrimaryIndex
+                    (Long.class,
+                     AllowAddSecondary.class);
+            AllowAddSecondary obj = index.get(99L);
+            checkValues(obj);
+
+            checkValues(store.getSecondaryIndex
+                (index, Integer.class, "a").get(1));
+            if (updated) {
+                checkValues(store.getSecondaryIndex
+                    (index, Integer.class, "b").get(3));
+                TestCase.assertNull(store.getSecondaryIndex
+                    (index, Integer.class, "b").get(2));
+            } else {
+                checkValues(store.getSecondaryIndex
+                    (index, Integer.class, "b").get(2));
+                TestCase.assertNull(store.getSecondaryIndex
+                    (index, Integer.class, "b").get(3));
+            }
+
+            if (doUpdate) {
+                obj.b = 3;
+                index.put(obj);
+                updated = true;
+                checkValues(store.getSecondaryIndex
+                    (index, Integer.class, "a").get(1));
+                checkValues(store.getSecondaryIndex
+                    (index, Integer.class, "b").get(3));
+            }
+        }
+
+        @Override
+        void copyRawObjects(RawStore rawStore, EntityStore newStore)
+            throws DatabaseException {
+
+            PrimaryIndex<Long,AllowAddSecondary>
+                index = newStore.getPrimaryIndex
+                    (Long.class,
+                     AllowAddSecondary.class);
+            RawObject raw = rawStore.getPrimaryIndex(NAME).get(99L);
+            index.put((AllowAddSecondary)
+                      newStore.getModel().convertRawObject(raw));
+        }
+
+        private void checkValues(AllowAddSecondary obj) {
+            TestCase.assertNotNull(obj);
+            TestCase.assertEquals(99L, obj.key);
+            TestCase.assertEquals(1, obj.a);
+            if (updated) {
+                TestCase.assertEquals(3, obj.b);
+            } else {
+                TestCase.assertEquals(2, obj.b);
+            }
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            RawObject obj = readRaw
+                (store, 99L, NAME, expectEvolved ? 1 : 0, CASECLS, 0);
+            if (expectUpdated) {
+                checkRawFields(obj, "key", 99L,
+                               "a", 1,
+                               "b", 3);
+            } else {
+                checkRawFields(obj, "key", 99L,
+                               "a", 1,
+                               "b", 2);
+            }
+            Environment env = store.getEnvironment();
+            assertDbExists(expectEvolved, env, NAME, "a");
+            assertDbExists(expectEvolved, env, NAME, "b");
+        }
+    }
+
+    @Entity(version=1)
+    static class FieldAddAndConvert
+        extends EvolveCase {
+
+        private static final String NAME =
+            FieldAddAndConvert.class.getName();
+
+        @PrimaryKey
+        int key;
+
+        private String f0 = "0"; // new field
+        private String f1 = "1"; // converted field
+        private String f2 = "2"; // new field
+        private String f3 = "3"; // converted field
+        private String f4 = "4"; // new field
+
+        @Override
+        Mutations getMutations() {
+            Mutations m = new Mutations();
+            m.addConverter(new Converter(NAME, 0, "f1", new IntToString()));
+            m.addConverter(new Converter(NAME, 0, "f3", new IntToString()));
+            return m;
+        }
+
+        @SuppressWarnings("serial")
+        private static class IntToString implements Conversion {
+
+            public void initialize(EntityModel model) {
+            }
+
+            public Object convert(Object fromValue) {
+                return fromValue.toString();
+            }
+
+            @Override
+            public boolean equals(Object other) {
+                return other instanceof IntToString;
+            }
+        }
+
+        @Override
+        void checkEvolvedModel(EntityModel model,
+                               Environment env,
+                               boolean oldTypesExist) {
+            checkEntity(true, model, env, NAME, 1, null);
+            if (oldTypesExist) {
+                checkVersions(model, NAME, 1, NAME, 0);
+            } else {
+                checkVersions(model, NAME, 1);
+            }
+        }
+
+        @Override
+        void readObjects(EntityStore store, boolean doUpdate)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,FieldAddAndConvert>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     FieldAddAndConvert.class);
+            FieldAddAndConvert obj = index.get(99);
+            TestCase.assertNotNull(obj);
+            TestCase.assertEquals(99, obj.key);
+            TestCase.assertEquals("0", obj.f0);
+            TestCase.assertEquals("1", obj.f1);
+            TestCase.assertEquals("2", obj.f2);
+            TestCase.assertEquals("3", obj.f3);
+            TestCase.assertEquals("4", obj.f4);
+
+            if (doUpdate) {
+                index.put(obj);
+            }
+        }
+
+        @Override
+        void copyRawObjects(RawStore rawStore, EntityStore newStore)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,FieldAddAndConvert>
+                index = newStore.getPrimaryIndex
+                    (Integer.class,
+                     FieldAddAndConvert.class);
+            RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+            index.put((FieldAddAndConvert)
+                      newStore.getModel().convertRawObject(raw));
+        }
+
+        @Override
+        void readRawObjects(RawStore store,
+                            boolean expectEvolved,
+                            boolean expectUpdated)
+            throws DatabaseException {
+
+            RawObject obj = readRaw
+                (store, 99, NAME, expectEvolved ? 1 : 0, CASECLS, 0);
+            if (expectUpdated) {
+                checkRawFields(obj,
+                               "key", 99, 
+                               "f0", "0",
+                               "f1", "1",
+                               "f2", "2",
+                               "f3", "3",
+                               "f4", "4");
+            } else if (expectEvolved) {
+                checkRawFields(obj,
+                               "key", 99, 
+                               "f1", "1",
+                               "f3", "3");
+            } else {
+                checkRawFields(obj,
+                               "key", 99, 
+                               "f1", 1,
+                               "f3", 3);
+            }
+        }
+    }
+}
diff --git a/test/com/sleepycat/persist/test/EvolveClasses.java.original b/test/com/sleepycat/persist/test/EvolveClasses.java.original
new file mode 100644
index 0000000000000000000000000000000000000000..e6523225d00811e4e465a6346574aa49f46da711
--- /dev/null
+++ b/test/com/sleepycat/persist/test/EvolveClasses.java.original
@@ -0,0 +1,2760 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: EvolveClasses.java.original,v 1.17.2.1 2009/03/27 17:12:55 mark Exp $
+ */
+package com.sleepycat.persist.test;
+
+import java.math.BigInteger;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.SecondaryIndex;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.evolve.Mutations;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.model.KeyField;
+import com.sleepycat.persist.model.Persistent;
+import com.sleepycat.persist.model.PersistentProxy;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.SecondaryKey;
+import com.sleepycat.persist.raw.RawStore;
+
+import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE;
+
+/**
+ * Nested classes are original versions of classes of the same name in
+ * EvolveClasses.java.  See EvolveTestBase.java for the steps that are taken to
+ * add a new class (test case).
+ *
+ * @author Mark Hayes
+ */
+class EvolveClasses {
+
+    @Entity
+    static class DeletedEntity1_ClassRemoved extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int skey = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DeletedEntity1_ClassRemoved> index =
+                store.getPrimaryIndex
+                    (Integer.class, DeletedEntity1_ClassRemoved.class);
+            index.put(this);
+
+            SecondaryIndex<Integer,Integer,DeletedEntity1_ClassRemoved>
+                sindex = store.getSecondaryIndex(index, Integer.class, "skey");
+            TestCase.assertNotNull(sindex.get(88));
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            assertDbExists
+                (true, env,
+                 DeletedEntity1_ClassRemoved.class.getName());
+            assertDbExists
+                (true, env,
+                 DeletedEntity1_ClassRemoved.class.getName(), "skey");
+        }
+    }
+
+    @Entity
+    static class DeletedEntity2_ClassRemoved extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int skey = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DeletedEntity2_ClassRemoved> index =
+                store.getPrimaryIndex
+                    (Integer.class, DeletedEntity2_ClassRemoved.class);
+            index.put(this);
+
+            SecondaryIndex<Integer,Integer,DeletedEntity2_ClassRemoved>
+                sindex = store.getSecondaryIndex(index, Integer.class, "skey");
+            TestCase.assertNotNull(sindex.get(88));
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            assertDbExists
+                (true, env,
+                 DeletedEntity2_ClassRemoved.class.getName());
+            assertDbExists
+                (true, env,
+                 DeletedEntity2_ClassRemoved.class.getName(), "skey");
+        }
+    }
+
+    @Entity
+    static class DeletedEntity3_AnnotRemoved_NoMutation extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int skey = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DeletedEntity3_AnnotRemoved_NoMutation>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DeletedEntity3_AnnotRemoved_NoMutation.class);
+            index.put(this);
+
+            SecondaryIndex<Integer,Integer,
+                           DeletedEntity3_AnnotRemoved_NoMutation>
+                sindex = store.getSecondaryIndex(index, Integer.class, "skey");
+            TestCase.assertNotNull(sindex.get(88));
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            assertDbExists
+                (true, env,
+                 DeletedEntity3_AnnotRemoved_NoMutation.class.getName());
+            assertDbExists
+                (true, env,
+                 DeletedEntity3_AnnotRemoved_NoMutation.class.getName(),
+                 "skey");
+        }
+    }
+
+    @Entity
+    static class DeletedEntity4_AnnotRemoved_WithDeleter extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int skey = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DeletedEntity4_AnnotRemoved_WithDeleter>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DeletedEntity4_AnnotRemoved_WithDeleter.class);
+            index.put(this);
+
+            SecondaryIndex<Integer,Integer,
+                           DeletedEntity4_AnnotRemoved_WithDeleter>
+                sindex = store.getSecondaryIndex(index, Integer.class, "skey");
+            TestCase.assertNotNull(sindex.get(88));
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            assertDbExists
+                (true, env,
+                 DeletedEntity4_AnnotRemoved_WithDeleter.class.getName());
+            assertDbExists
+                (true, env,
+                 DeletedEntity4_AnnotRemoved_WithDeleter.class.getName(),
+                 "skey");
+        }
+    }
+
+    @Entity
+    static class DeletedEntity5_EntityToPersist_NoMutation extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int skey = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DeletedEntity5_EntityToPersist_NoMutation>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DeletedEntity5_EntityToPersist_NoMutation.class);
+            index.put(this);
+
+            SecondaryIndex<Integer,Integer,
+                           DeletedEntity5_EntityToPersist_NoMutation>
+                sindex = store.getSecondaryIndex(index, Integer.class, "skey");
+            TestCase.assertNotNull(sindex.get(88));
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            assertDbExists
+                (true, env,
+                 DeletedEntity5_EntityToPersist_NoMutation.class.getName());
+            assertDbExists
+                (true, env,
+                 DeletedEntity5_EntityToPersist_NoMutation.class.getName(),
+                 "skey");
+        }
+    }
+
+    @Entity
+    static class DeletedEntity6_EntityToPersist_WithDeleter extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int skey = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DeletedEntity6_EntityToPersist_WithDeleter>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DeletedEntity6_EntityToPersist_WithDeleter.class);
+            index.put(this);
+
+            SecondaryIndex<Integer,Integer,
+                           DeletedEntity6_EntityToPersist_WithDeleter>
+                sindex = store.getSecondaryIndex(index, Integer.class, "skey");
+            TestCase.assertNotNull(sindex.get(88));
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            assertDbExists
+                (true, env,
+                 DeletedEntity6_EntityToPersist_WithDeleter.class.getName());
+            assertDbExists
+                (true, env,
+                 DeletedEntity6_EntityToPersist_WithDeleter.class.getName(),
+                 "skey");
+        }
+    }
+
+    @Persistent
+    static class DeletedPersist1_ClassRemoved {
+
+        int f = 123;
+    }
+
+    @Entity
+    static class DeletedPersist1_ClassRemoved_NoMutation extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        DeletedPersist1_ClassRemoved embed =
+            new DeletedPersist1_ClassRemoved();
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DeletedPersist1_ClassRemoved_NoMutation>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DeletedPersist1_ClassRemoved_NoMutation.class);
+            index.put(this);
+        }
+    }
+
+    @Persistent
+    static class DeletedPersist2_ClassRemoved {
+
+        int f = 123;
+    }
+
+    @Entity
+    static class DeletedPersist2_ClassRemoved_WithDeleter extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        DeletedPersist2_ClassRemoved embed =
+            new DeletedPersist2_ClassRemoved();
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DeletedPersist2_ClassRemoved_WithDeleter>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DeletedPersist2_ClassRemoved_WithDeleter.class);
+            index.put(this);
+        }
+    }
+
+    @Persistent
+    static class DeletedPersist3_AnnotRemoved {
+
+        int f = 123;
+    }
+
+    @Entity
+    static class DeletedPersist3_AnnotRemoved_NoMutation extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        DeletedPersist3_AnnotRemoved embed =
+            new DeletedPersist3_AnnotRemoved();
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DeletedPersist3_AnnotRemoved_NoMutation>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DeletedPersist3_AnnotRemoved_NoMutation.class);
+            index.put(this);
+        }
+    }
+
+    @Persistent
+    static class DeletedPersist4_AnnotRemoved {
+
+        int f = 123;
+    }
+
+    @Entity
+    static class DeletedPersist4_AnnotRemoved_WithDeleter extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        DeletedPersist4_AnnotRemoved embed =
+            new DeletedPersist4_AnnotRemoved();
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DeletedPersist4_AnnotRemoved_WithDeleter>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DeletedPersist4_AnnotRemoved_WithDeleter.class);
+            index.put(this);
+        }
+    }
+
+    @Persistent
+    static class DeletedPersist5_PersistToEntity {
+
+        int f = 123;
+    }
+
+    @Entity
+    static class DeletedPersist5_PersistToEntity_NoMutation
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        DeletedPersist5_PersistToEntity embed =
+            new DeletedPersist5_PersistToEntity();
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DeletedPersist5_PersistToEntity_NoMutation>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DeletedPersist5_PersistToEntity_NoMutation.class);
+            index.put(this);
+        }
+    }
+
+    @Persistent
+    static class DeletedPersist6_PersistToEntity {
+
+        int f = 123;
+    }
+
+    @Entity
+    static class DeletedPersist6_PersistToEntity_WithDeleter
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        DeletedPersist6_PersistToEntity embed =
+            new DeletedPersist6_PersistToEntity();
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DeletedPersist6_PersistToEntity_WithDeleter>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DeletedPersist6_PersistToEntity_WithDeleter.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class RenamedEntity1_NewEntityName
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int skey = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,RenamedEntity1_NewEntityName>
+                index = store.getPrimaryIndex
+                    (Integer.class, RenamedEntity1_NewEntityName.class);
+            index.put(this);
+
+            SecondaryIndex<Integer,Integer,RenamedEntity1_NewEntityName>
+                sindex = store.getSecondaryIndex(index, Integer.class, "skey");
+            TestCase.assertNotNull(sindex.get(88));
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            assertDbExists
+                (true, env,
+                 RenamedEntity1_NewEntityName.class.getName());
+            assertDbExists
+                (true, env,
+                 RenamedEntity1_NewEntityName.class.getName(), "skey");
+        }
+    }
+
+    @Entity
+    static class RenamedEntity2_NewEntityName
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int skey = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,RenamedEntity2_NewEntityName>
+                index = store.getPrimaryIndex
+                    (Integer.class, RenamedEntity2_NewEntityName.class);
+            index.put(this);
+
+            SecondaryIndex<Integer,Integer,RenamedEntity2_NewEntityName>
+                sindex = store.getSecondaryIndex(index, Integer.class, "skey");
+            TestCase.assertNotNull(sindex.get(88));
+        }
+
+        @Override
+        void checkUnevolvedModel(EntityModel model, Environment env) {
+            assertDbExists
+                (true, env,
+                 RenamedEntity2_NewEntityName.class.getName());
+            assertDbExists
+                (true, env,
+                 RenamedEntity2_NewEntityName.class.getName(), "skey");
+        }
+    }
+
+    @Persistent
+    static class DeleteSuperclass1_BaseClass
+        extends EvolveCase {
+
+        int f = 123;
+    }
+
+    @Entity
+    static class DeleteSuperclass1_NoMutation
+        extends DeleteSuperclass1_BaseClass {
+
+        @PrimaryKey
+        int key = 99;
+
+        int ff = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DeleteSuperclass1_NoMutation>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DeleteSuperclass1_NoMutation.class);
+            index.put(this);
+        }
+    }
+
+    @Persistent
+    static class DeleteSuperclass2_BaseClass
+        extends EvolveCase {
+
+        int f = 123;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int skey = 456;
+    }
+
+    @Entity
+    static class DeleteSuperclass2_WithConverter
+        extends DeleteSuperclass2_BaseClass {
+
+        @PrimaryKey
+        int key = 99;
+
+        int ff = 88;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int skey2 = 77;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        Integer skey3 = 66;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DeleteSuperclass2_WithConverter>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DeleteSuperclass2_WithConverter.class);
+            index.put(this);
+        }
+    }
+
+    @Persistent
+    static class DeleteSuperclass3_BaseClass
+        extends EvolveCase {
+
+        int f = 123;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int skey = 456;
+    }
+
+    @Entity
+    static class DeleteSuperclass3_WithDeleter
+        extends DeleteSuperclass3_BaseClass {
+
+        @PrimaryKey
+        int key = 99;
+
+        int ff = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DeleteSuperclass3_WithDeleter>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DeleteSuperclass3_WithDeleter.class);
+            index.put(this);
+        }
+    }
+
+    @Persistent
+    static class DeleteSuperclass4_BaseClass
+        extends EvolveCase {
+    }
+
+    @Entity
+    static class DeleteSuperclass4_NoFields
+        extends DeleteSuperclass4_BaseClass {
+
+        @PrimaryKey
+        int key = 99;
+
+        int ff = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DeleteSuperclass4_NoFields>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DeleteSuperclass4_NoFields.class);
+            index.put(this);
+        }
+    }
+
+    @Persistent
+    static class DeleteSuperclass5_Embedded_Base {
+
+        int g = 456;
+    }
+
+    @Persistent
+    static class DeleteSuperclass5_Embedded
+        extends DeleteSuperclass5_Embedded_Base  {
+
+        int f = 123;
+    }
+
+    @Entity
+    static class DeleteSuperclass5_Top
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        int ff = 88;
+
+        DeleteSuperclass5_Embedded embed =
+            new DeleteSuperclass5_Embedded();
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DeleteSuperclass5_Top>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DeleteSuperclass5_Top.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class InsertSuperclass1_Between
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        int ff = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,InsertSuperclass1_Between>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     InsertSuperclass1_Between.class);
+            index.put(this);
+        }
+    }
+
+    @Persistent
+    static class InsertSuperclass2_Embedded {
+
+        int f = 123;
+    }
+
+    @Entity
+    static class InsertSuperclass2_Top
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        int ff = 88;
+
+        InsertSuperclass2_Embedded embed =
+            new InsertSuperclass2_Embedded();
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,InsertSuperclass2_Top>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     InsertSuperclass2_Top.class);
+            index.put(this);
+        }
+    }
+
+    /*
+    @Persistent
+    static class RenameFields1_Base
+        extends EvolveCase {
+
+        int f = 123;
+    }
+
+    @Entity
+    static class RenameFields1
+        extends RenameFields1_Base {
+
+        @PrimaryKey
+        int key = 99;
+
+        int ff = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,RenameFields1>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     RenameFields1.class);
+            index.put(this);
+        }
+    }
+    */
+
+    @Entity
+    static class DisallowNonKeyField_PrimitiveToObject
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        int ff = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowNonKeyField_PrimitiveToObject>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowNonKeyField_PrimitiveToObject.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowNonKeyField_ObjectToPrimitive
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        String ff = "88";
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowNonKeyField_ObjectToPrimitive>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowNonKeyField_ObjectToPrimitive.class);
+            index.put(this);
+        }
+    }
+
+    @Persistent
+    static class MyType {
+
+        @Override
+        public boolean equals(Object o) {
+            return o instanceof MyType;
+        }
+    }
+
+    @Persistent
+    static class MySubtype extends MyType {
+
+        @Override
+        public boolean equals(Object o) {
+            return o instanceof MySubtype;
+        }
+    }
+
+    @Entity
+    static class DisallowNonKeyField_ObjectToSubtype
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        MyType ff = new MyType();
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowNonKeyField_ObjectToSubtype>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowNonKeyField_ObjectToSubtype.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowNonKeyField_ObjectToUnrelatedSimple
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        Integer ff = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowNonKeyField_ObjectToUnrelatedSimple>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowNonKeyField_ObjectToUnrelatedSimple.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowNonKeyField_ObjectToUnrelatedOther
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        Integer ff = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowNonKeyField_ObjectToUnrelatedOther>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowNonKeyField_ObjectToUnrelatedOther.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowNonKeyField_byte2boolean
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        byte ff = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowNonKeyField_byte2boolean>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowNonKeyField_byte2boolean.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowNonKeyField_short2byte
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        short ff = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowNonKeyField_short2byte>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowNonKeyField_short2byte.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowNonKeyField_int2short
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        int ff = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowNonKeyField_int2short>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowNonKeyField_int2short.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowNonKeyField_long2int
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        long ff = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowNonKeyField_long2int>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowNonKeyField_long2int.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowNonKeyField_float2long
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        float ff = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowNonKeyField_float2long>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowNonKeyField_float2long.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowNonKeyField_double2float
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        double ff = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowNonKeyField_double2float>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowNonKeyField_double2float.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowNonKeyField_Byte2byte
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        Byte ff = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowNonKeyField_Byte2byte>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowNonKeyField_Byte2byte.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowNonKeyField_Character2char
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        Character ff = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowNonKeyField_Character2char>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowNonKeyField_Character2char.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowNonKeyField_Short2short
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        Short ff = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowNonKeyField_Short2short>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowNonKeyField_Short2short.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowNonKeyField_Integer2int
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        Integer ff = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowNonKeyField_Integer2int>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowNonKeyField_Integer2int.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowNonKeyField_Long2long
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        Long ff = 88L;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowNonKeyField_Long2long>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowNonKeyField_Long2long.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowNonKeyField_Float2float
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        Float ff = 88F;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowNonKeyField_Float2float>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowNonKeyField_Float2float.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowNonKeyField_Double2double
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        Double ff = 88D;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowNonKeyField_Double2double>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowNonKeyField_Double2double.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowNonKeyField_float2BigInt
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        float ff = 88F;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowNonKeyField_float2BigInt>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowNonKeyField_float2BigInt.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowNonKeyField_BigInt2long
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        BigInteger ff = BigInteger.valueOf(88);
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowNonKeyField_BigInt2long>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowNonKeyField_BigInt2long.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowSecKeyField_byte2short
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        byte ff = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowSecKeyField_byte2short>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowSecKeyField_byte2short.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowSecKeyField_char2int
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        char ff = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowSecKeyField_char2int>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowSecKeyField_char2int.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowSecKeyField_short2int
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        short ff = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowSecKeyField_short2int>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowSecKeyField_short2int.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowSecKeyField_int2long
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int ff = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowSecKeyField_int2long>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowSecKeyField_int2long.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowSecKeyField_long2float
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        long ff = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowSecKeyField_long2float>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowSecKeyField_long2float.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowSecKeyField_float2double
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        float ff = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowSecKeyField_float2double>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowSecKeyField_float2double.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowSecKeyField_Byte2short2
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        Byte ff = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowSecKeyField_Byte2short2>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowSecKeyField_Byte2short2.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowSecKeyField_Character2int
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        Character ff = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowSecKeyField_Character2int>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowSecKeyField_Character2int.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowSecKeyField_Short2int2
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        Short ff = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowSecKeyField_Short2int2>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowSecKeyField_Short2int2.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowSecKeyField_Integer2long
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        Integer ff = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowSecKeyField_Integer2long>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowSecKeyField_Integer2long.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowSecKeyField_Long2float2
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        Long ff = 88L;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowSecKeyField_Long2float2>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowSecKeyField_Long2float2.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowSecKeyField_Float2double2
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        Float ff = 88F;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowSecKeyField_Float2double2>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowSecKeyField_Float2double2.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowSecKeyField_int2BigInt
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int ff = 88;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowSecKeyField_int2BigInt>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowSecKeyField_int2BigInt.class);
+            index.put(this);
+        }
+    }
+
+    // --
+
+    @Entity
+    static class DisallowPriKeyField_byte2short
+        extends EvolveCase {
+
+        @PrimaryKey
+        byte key = 99;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Byte,DisallowPriKeyField_byte2short>
+                index = store.getPrimaryIndex
+                    (Byte.class,
+                     DisallowPriKeyField_byte2short.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowPriKeyField_char2int
+        extends EvolveCase {
+
+        @PrimaryKey
+        char key = 99;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Character,DisallowPriKeyField_char2int>
+                index = store.getPrimaryIndex
+                    (Character.class,
+                     DisallowPriKeyField_char2int.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowPriKeyField_short2int
+        extends EvolveCase {
+
+        @PrimaryKey
+        short key = 99;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Short,DisallowPriKeyField_short2int>
+                index = store.getPrimaryIndex
+                    (Short.class,
+                     DisallowPriKeyField_short2int.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowPriKeyField_int2long
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key = 99;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowPriKeyField_int2long>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowPriKeyField_int2long.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowPriKeyField_long2float
+        extends EvolveCase {
+
+        @PrimaryKey
+        long key = 99;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Long,DisallowPriKeyField_long2float>
+                index = store.getPrimaryIndex
+                    (Long.class,
+                     DisallowPriKeyField_long2float.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowPriKeyField_float2double
+        extends EvolveCase {
+
+        @PrimaryKey
+        float key = 99;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Float,DisallowPriKeyField_float2double>
+                index = store.getPrimaryIndex
+                    (Float.class,
+                     DisallowPriKeyField_float2double.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowPriKeyField_Byte2short2
+        extends EvolveCase {
+
+        @PrimaryKey
+        Byte key = 99;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Byte,DisallowPriKeyField_Byte2short2>
+                index = store.getPrimaryIndex
+                    (Byte.class,
+                     DisallowPriKeyField_Byte2short2.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowPriKeyField_Character2int
+        extends EvolveCase {
+
+        @PrimaryKey
+        Character key = 99;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Character,DisallowPriKeyField_Character2int>
+                index = store.getPrimaryIndex
+                    (Character.class,
+                     DisallowPriKeyField_Character2int.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowPriKeyField_Short2int2
+        extends EvolveCase {
+
+        @PrimaryKey
+        Short key = 99;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Short,DisallowPriKeyField_Short2int2>
+                index = store.getPrimaryIndex
+                    (Short.class,
+                     DisallowPriKeyField_Short2int2.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowPriKeyField_Integer2long
+        extends EvolveCase {
+
+        @PrimaryKey
+        Integer key = 99;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowPriKeyField_Integer2long>
+                index = store.getPrimaryIndex
+                    (Integer.class,
+                     DisallowPriKeyField_Integer2long.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowPriKeyField_Long2float2
+        extends EvolveCase {
+
+        @PrimaryKey
+        Long key = 99L;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Long,DisallowPriKeyField_Long2float2>
+                index = store.getPrimaryIndex
+                    (Long.class,
+                     DisallowPriKeyField_Long2float2.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowPriKeyField_Float2double2
+        extends EvolveCase {
+
+        @PrimaryKey
+        Float key = 99F;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Float,DisallowPriKeyField_Float2double2>
+                index = store.getPrimaryIndex
+                    (Float.class,
+                     DisallowPriKeyField_Float2double2.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowPriKeyField_Long2BigInt
+        extends EvolveCase {
+
+        @PrimaryKey
+        Long key = 99L;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Long,DisallowPriKeyField_Long2BigInt>
+                index = store.getPrimaryIndex
+                    (Long.class,
+                     DisallowPriKeyField_Long2BigInt.class);
+            index.put(this);
+        }
+    }
+
+    @Persistent
+    static class DisallowCompositeKeyField_byte2short_Key {
+
+        @KeyField(1)
+        int f1 = 1;
+
+        @KeyField(2)
+        byte f2 = 2;
+
+        @KeyField(3)
+        String f3 = "3";
+    }
+
+    @Entity
+    static class DisallowCompositeKeyField_byte2short
+        extends EvolveCase {
+
+        @PrimaryKey
+        DisallowCompositeKeyField_byte2short_Key key = 
+            new DisallowCompositeKeyField_byte2short_Key();
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<DisallowCompositeKeyField_byte2short_Key,
+                         DisallowCompositeKeyField_byte2short>
+                index = store.getPrimaryIndex
+                    (DisallowCompositeKeyField_byte2short_Key.class,
+                     DisallowCompositeKeyField_byte2short.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class AllowPriKeyField_byte2Byte
+        extends EvolveCase {
+
+        @PrimaryKey
+        byte key = 99;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Byte,AllowPriKeyField_byte2Byte>
+                index = store.getPrimaryIndex
+                    (Byte.class, AllowPriKeyField_byte2Byte.class);
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class AllowPriKeyField_Byte2byte2
+        extends EvolveCase {
+
+        @PrimaryKey
+        Byte key = 99;
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Byte,AllowPriKeyField_Byte2byte2>
+                index = store.getPrimaryIndex
+                    (Byte.class, AllowPriKeyField_Byte2byte2.class);
+            index.put(this);
+        }
+    }
+
+    @Persistent
+    static class AllowFieldTypeChanges_Key {
+
+        AllowFieldTypeChanges_Key() {
+            this(false);
+        }
+
+        AllowFieldTypeChanges_Key(boolean init) {
+            if (init) {
+                f1 = true;
+                f2 = (byte) 2;
+                f3 = (short) 3;
+                f4 = 4;
+                f5 = 5L;
+                f6 = 6F;
+                f7 = 7D;
+                f8 = (char) 8;
+                f9 = true;
+                f10 = (byte) 10;
+                f11 = (short) 11;
+                f12 = 12;
+                f13 = 13L;
+                f14 = 14F;
+                f15 = 15D;
+                f16 = (char) 16;
+            }
+        }
+
+        @KeyField(1)
+        boolean f1;
+
+        @KeyField(2)
+        byte f2;
+
+        @KeyField(3)
+        short f3;
+
+        @KeyField(4)
+        int f4;
+
+        @KeyField(5)
+        long f5;
+
+        @KeyField(6)
+        float f6;
+
+        @KeyField(7)
+        double f7;
+
+        @KeyField(8)
+        char f8;
+
+        @KeyField(9)
+        Boolean f9;
+
+        @KeyField(10)
+        Byte f10;
+
+        @KeyField(11)
+        Short f11;
+
+        @KeyField(12)
+        Integer f12;
+
+        @KeyField(13)
+        Long f13;
+
+        @KeyField(14)
+        Float f14;
+
+        @KeyField(15)
+        Double f15;
+
+        @KeyField(16)
+        Character f16;
+    }
+
+    @Persistent
+    static class AllowFieldTypeChanges_Base
+        extends EvolveCase {
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        AllowFieldTypeChanges_Key kcomposite =
+            new AllowFieldTypeChanges_Key(true);
+
+        long f_long2Integer = 111;
+        String f_String2Long = "222";
+    }
+
+    @Entity
+    static class AllowFieldTypeChanges
+        extends AllowFieldTypeChanges_Base {
+
+        @PrimaryKey
+        int pkeyint = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        boolean kboolean = true;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        byte kbyte = 77;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        short kshort = 66;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int kint = 55;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        long klong = 44;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        float kfloat = 33;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        double kdouble = 22;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        char kchar = 11;
+
+        byte f01;
+        byte f02;
+        byte f03;
+        byte f04;
+        byte f06;
+        short f07;
+        short f08;
+        short f09;
+        short f10;
+        char f11;
+        char f12;
+        char f13;
+        char f14;
+        int f15;
+        int f16;
+        int f17;
+        long f18;
+        long f19;
+        float f20;
+
+        byte f21;
+        byte f22;
+        byte f23;
+        byte f24;
+        byte f26;
+        short f27;
+        short f28;
+        short f29;
+        short f30;
+        char f31;
+        char f32;
+        char f33;
+        char f34;
+        int f35;
+        int f36;
+        int f37;
+        long f38;
+        long f39;
+        float f40;
+
+        Byte f41;
+        Byte f42;
+        Byte f43;
+        Byte f44;
+        Byte f46;
+        Short f47;
+        Short f48;
+        Short f49;
+        Short f50;
+        Character f51;
+        Character f52;
+        Character f53;
+        Character f54;
+        Integer f55;
+        Integer f56;
+        Integer f57;
+        Long f58;
+        Long f59;
+        Float f60;
+
+        byte f70;
+        short f71;
+        char f72;
+        int f73;
+        long f74;
+        Byte f75;
+        Short f76;
+        Character f77;
+        Integer f78;
+        Long f79;
+
+        long f_long2int = 333;
+        String f_String2long = "444";
+
+        private void init() {
+            f01 = (byte) 1;
+            f02 = (byte) 2;
+            f03 = (byte) 3;
+            f04 = (byte) 4;
+            f06 = (byte) 6;
+            f07 = (short) 7;
+            f08 = (short) 8;
+            f09 = (short) 9;
+            f10 = (short) 10;
+            f11 = (char) 11;
+            f12 = (char) 12;
+            f13 = (char) 13;
+            f14 = (char) 14;
+            f15 = 15;
+            f16 = 16;
+            f17 = 17;
+            f18 = (long) 18;
+            f19 = (long) 19;
+            f20 = (float) 20;
+
+            f21 = (byte) 21;
+            f22 = (byte) 22;
+            f23 = (byte) 23;
+            f24 = (byte) 24;
+            f26 = (byte) 26;
+            f27 = (short) 27;
+            f28 = (short) 28;
+            f29 = (short) 29;
+            f30 = (short) 30;
+            f31 = (char) 31;
+            f32 = (char) 32;
+            f33 = (char) 33;
+            f34 = (char) 34;
+            f35 = 35;
+            f36 = 36;
+            f37 = 37;
+            f38 = (long) 38;
+            f39 = (long) 39;
+            f40 = (float) 40;
+
+            f41 = (byte) 41;
+            f42 = (byte) 42;
+            f43 = (byte) 43;
+            f44 = (byte) 44;
+            f46 = (byte) 46;
+            f47 = (short) 47;
+            f48 = (short) 48;
+            f49 = (short) 49;
+            f50 = (short) 50;
+            f51 = (char) 51;
+            f52 = (char) 52;
+            f53 = (char) 53;
+            f54 = (char) 54;
+            f55 = 55;
+            f56 = 56;
+            f57 = 57;
+            f58 = (long) 58;
+            f59 = (long) 59;
+            f60 = (float) 60;
+
+            f70 = (byte) 70;
+            f71 = (short) 71;
+            f72 = (char) 72;
+            f73 = 73;
+            f74 = (long) 74;
+            f75 = (byte) 75;
+            f76 = (short) 76;
+            f77 = (char) 77;
+            f78 = 78;
+            f79 = (long) 79;
+        }
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,AllowFieldTypeChanges>
+                index = store.getPrimaryIndex
+                    (Integer.class, AllowFieldTypeChanges.class);
+            init();
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class ConvertFieldContent_Entity
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key;
+
+        String f1;
+        String f2;
+
+        private void init() {
+            key = 99;
+            f1 = "01234";
+            f2 = "56789";
+        }
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,ConvertFieldContent_Entity>
+                index = store.getPrimaryIndex
+                    (Integer.class, ConvertFieldContent_Entity.class);
+            init();
+            index.put(this);
+        }
+    }
+
+    @Persistent
+    static class ConvertExample1_Address {
+        String street;
+        String city;
+        String state;
+        String zipCode;
+    }
+
+    @Entity
+    static class ConvertExample1_Entity
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key;
+
+        ConvertExample1_Address embed;
+
+        private void init() {
+            key = 99;
+            embed = new ConvertExample1_Address();
+            embed.street = "street";
+            embed.city = "city";
+            embed.state = "state";
+            embed.zipCode = "12345";
+        }
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,ConvertExample1_Entity>
+                index = store.getPrimaryIndex
+                    (Integer.class, ConvertExample1_Entity.class);
+            init();
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class ConvertExample2_Person
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key;
+
+        String address;
+
+        private void init() {
+            key = 99;
+            address = "street#city#state#12345";
+        }
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,ConvertExample2_Person>
+                index = store.getPrimaryIndex
+                    (Integer.class, ConvertExample2_Person.class);
+            init();
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class ConvertExample3_Person
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key;
+
+        String street;
+        String city;
+        String state;
+        int zipCode;
+
+        private void init() {
+            key = 99;
+            street = "street";
+            city = "city";
+            state = "state";
+            zipCode = 12345;
+        }
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,ConvertExample3_Person>
+                index = store.getPrimaryIndex
+                    (Integer.class, ConvertExample3_Person.class);
+            init();
+            index.put(this);
+        }
+    }
+
+    @Persistent
+    static class ConvertExample3Reverse_Address {
+        String street;
+        String city;
+        String state;
+        int zipCode;
+    }
+
+    @Entity
+    static class ConvertExample3Reverse_Person
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key;
+
+        ConvertExample3Reverse_Address address;
+
+        private void init() {
+            key = 99;
+            address = new ConvertExample3Reverse_Address();
+            address.street = "street";
+            address.city = "city";
+            address.state = "state";
+            address.zipCode = 12345;
+        }
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,ConvertExample3Reverse_Person>
+                index = store.getPrimaryIndex
+                    (Integer.class, ConvertExample3Reverse_Person.class);
+            init();
+            index.put(this);
+        }
+    }
+
+    @Persistent
+    static class ConvertExample4_A extends ConvertExample4_B {
+        String name;
+    }
+
+    @Persistent
+    static class ConvertExample4_B {
+    }
+
+    @Entity
+    static class ConvertExample4_Entity
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key;
+
+        ConvertExample4_A embed;
+
+        private void init() {
+            key = 99;
+            embed = new ConvertExample4_A();
+            embed.name = "name";
+        }
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,ConvertExample4_Entity>
+                index = store.getPrimaryIndex
+                    (Integer.class, ConvertExample4_Entity.class);
+            init();
+            index.put(this);
+        }
+    }
+
+    @Persistent
+    static class ConvertExample5_Pet {
+        String name;
+        boolean isCatNotDog;
+        int finickyLevel;
+        double barkVolume;
+    }
+
+    @Entity
+    static class ConvertExample5_Entity
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key;
+
+        ConvertExample5_Pet cat;
+        ConvertExample5_Pet dog;
+
+        private void init() {
+            key = 99;
+            cat = new ConvertExample5_Pet();
+            cat.name = "Jeffry";
+            cat.isCatNotDog = true;
+            cat.finickyLevel = 999;
+            dog = new ConvertExample5_Pet();
+            dog.name = "Nelson";
+            dog.isCatNotDog = false;
+            dog.barkVolume = 0.01;
+        }
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,ConvertExample5_Entity>
+                index = store.getPrimaryIndex
+                    (Integer.class, ConvertExample5_Entity.class);
+            init();
+            index.put(this);
+        }
+    }
+
+    @Persistent
+    static class AllowFieldAddDelete_Embed {
+        private int f1 = 1;
+        private String f2 = "2";
+        private String f4 = "4";
+        private int f6 = 6;
+        private String f7 = "7";
+    }
+
+    @Persistent
+    static class AllowFieldAddDelete_Base
+        extends EvolveCase {
+
+        private int f1 = 1;
+        private String f2 = "2";
+        private String f4 = "4";
+        private int f6 = 6;
+        private String f7 = "7";
+    }
+
+    @Entity
+    static class AllowFieldAddDelete
+        extends AllowFieldAddDelete_Base {
+
+        @PrimaryKey
+        int key;
+
+        AllowFieldAddDelete_Embed embed;
+
+        private int f1 = 1;
+        private String f2 = "2";
+        private String f4 = "4";
+        private int f6 = 6;
+        private String f7 = "7";
+
+        private void init() {
+            key = 99;
+            embed = new AllowFieldAddDelete_Embed();
+        }
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,AllowFieldAddDelete>
+                index = store.getPrimaryIndex
+                    (Integer.class, AllowFieldAddDelete.class);
+            init();
+            index.put(this);
+        }
+    }
+
+    static class ProxiedClass {
+        int data;
+
+        ProxiedClass(int data) {
+            this.data = data;
+        }
+    }
+
+    @Persistent(proxyFor=ProxiedClass.class)
+    static class ProxiedClass_Proxy implements PersistentProxy<ProxiedClass> {
+        int data;
+
+        public void initializeProxy(ProxiedClass o) {
+            data = o.data;
+        }
+
+        public ProxiedClass convertProxy() {
+            return new ProxiedClass(data);
+        }
+    }
+
+    @Entity
+    static class ProxiedClass_Entity
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key;
+
+        ProxiedClass embed;
+
+        private void init() {
+            key = 99;
+            embed = new ProxiedClass(88);
+        }
+
+        @Override
+        void configure(EntityModel model, StoreConfig config) {
+            model.registerClass(ProxiedClass_Proxy.class);
+        }
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,ProxiedClass_Entity>
+                index = store.getPrimaryIndex
+                    (Integer.class, ProxiedClass_Entity.class);
+            init();
+            index.put(this);
+        }
+    }
+
+    @Persistent(proxyFor=StringBuffer.class)
+    static class DisallowChangeProxyFor_Proxy
+        implements PersistentProxy<StringBuffer> {
+
+        String data;
+
+        public void initializeProxy(StringBuffer o) {
+            data = o.toString();
+        }
+
+        public StringBuffer convertProxy() {
+            return new StringBuffer(data);
+        }
+    }
+
+    @Entity
+    static class DisallowChangeProxyFor
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key;
+
+        private void init() {
+            key = 99;
+        }
+
+        @Override
+        void configure(EntityModel model, StoreConfig config) {
+            model.registerClass(DisallowChangeProxyFor_Proxy.class);
+        }
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowChangeProxyFor>
+                index = store.getPrimaryIndex
+                    (Integer.class, DisallowChangeProxyFor.class);
+            init();
+            index.put(this);
+        }
+    }
+
+    @Persistent(proxyFor=StringBuffer.class)
+    static class DisallowDeleteProxyFor_Proxy
+        implements PersistentProxy<StringBuffer> {
+
+        String data;
+
+        public void initializeProxy(StringBuffer o) {
+            data = o.toString();
+        }
+
+        public StringBuffer convertProxy() {
+            return new StringBuffer(data);
+        }
+    }
+
+    @Entity
+    static class DisallowDeleteProxyFor
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key;
+
+        private void init() {
+            key = 99;
+        }
+
+        @Override
+        void configure(EntityModel model, StoreConfig config) {
+            model.registerClass(DisallowDeleteProxyFor_Proxy.class);
+        }
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowDeleteProxyFor>
+                index = store.getPrimaryIndex
+                    (Integer.class, DisallowDeleteProxyFor.class);
+            init();
+            index.put(this);
+        }
+    }
+
+    @Persistent
+    static class ArrayNameChange_Component {
+
+        int data;
+    }
+
+    @Entity
+    static class ArrayNameChange_Entity
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key;
+
+        ArrayNameChange_Component[] embed;
+        ArrayNameChange_Component embed2;
+
+        private void init() {
+            key = 99;
+            embed2 = new ArrayNameChange_Component();
+            embed2.data = 88;
+            embed = new ArrayNameChange_Component[] { embed2 };
+        }
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,ArrayNameChange_Entity>
+                index = store.getPrimaryIndex
+                    (Integer.class, ArrayNameChange_Entity.class);
+            init();
+            index.put(this);
+        }
+    }
+
+    enum AddEnumConstant_Enum {
+        A, B;
+    }
+
+    @Entity
+    static class AddEnumConstant_Entity
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key;
+
+        AddEnumConstant_Enum e1;
+        AddEnumConstant_Enum e2;
+
+        private void init() {
+            key = 99;
+            e1 = AddEnumConstant_Enum.A;
+            e2 = AddEnumConstant_Enum.B;
+        }
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,AddEnumConstant_Entity>
+                index = store.getPrimaryIndex
+                    (Integer.class, AddEnumConstant_Entity.class);
+            init();
+            index.put(this);
+        }
+    }
+
+    enum DeleteEnumConstant_Enum {
+        A, B, C;
+    }
+
+    @Entity
+    static class DeleteEnumConstant_NoMutation
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key;
+
+        DeleteEnumConstant_Enum e1;
+        DeleteEnumConstant_Enum e2;
+        DeleteEnumConstant_Enum e3;
+
+        private void init() {
+            key = 99;
+            e1 = DeleteEnumConstant_Enum.A;
+            e2 = DeleteEnumConstant_Enum.B;
+            e3 = DeleteEnumConstant_Enum.C;
+        }
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DeleteEnumConstant_NoMutation>
+                index = store.getPrimaryIndex
+                    (Integer.class, DeleteEnumConstant_NoMutation.class);
+            init();
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class DisallowChangeKeyRelate
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int skey;
+
+        private void init() {
+            key = 99;
+            skey = 88;
+        }
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,DisallowChangeKeyRelate>
+                index = store.getPrimaryIndex
+                    (Integer.class, DisallowChangeKeyRelate.class);
+            init();
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class AllowChangeKeyMetadata
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key;
+
+        int aa;
+
+        int addAnnotation;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int dropField;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int dropAnnotation;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int toBeRenamedField;
+
+        int ff;
+
+        private void init() {
+            key = 99;
+            addAnnotation = 88;
+            dropField = 77;
+            dropAnnotation = 66;
+            toBeRenamedField = 44;
+            aa = 33;
+            ff = 22;
+        }
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,AllowChangeKeyMetadata>
+                index = store.getPrimaryIndex
+                    (Integer.class, AllowChangeKeyMetadata.class);
+            init();
+            index.put(this);
+        }
+    }
+
+    /** [#16253] */
+    @Persistent
+    static class AllowChangeKeyMetadataInSubclass
+        extends AllowChangeKeyMetadataEntity {
+
+        int aa;
+
+        int addAnnotation;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int dropField;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int dropAnnotation;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        int toBeRenamedField;
+
+        int ff;
+
+        private void init() {
+            key = 99;
+            addAnnotation = 88;
+            dropField = 77;
+            dropAnnotation = 66;
+            toBeRenamedField = 44;
+            aa = 33;
+            ff = 22;
+        }
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,AllowChangeKeyMetadataEntity>
+                index = store.getPrimaryIndex
+                    (Integer.class, AllowChangeKeyMetadataEntity.class);
+            init();
+            index.put(this);
+        }
+    }
+
+    @Entity
+    static class AllowChangeKeyMetadataEntity
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key;
+    }
+
+    /** [#15524] */
+    @Entity
+    static class AllowAddSecondary
+        extends EvolveCase {
+
+        @PrimaryKey
+        long key;
+
+        int a;
+        int b;
+
+        private void init() {
+            key = 99;
+            a = 1;
+            b = 2;
+        }
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Long,AllowAddSecondary>
+                index = store.getPrimaryIndex
+                    (Long.class, AllowAddSecondary.class);
+            init();
+            index.put(this);
+        }
+    }
+
+    /** [#15797] */
+    @Entity
+    static class FieldAddAndConvert
+        extends EvolveCase {
+
+        @PrimaryKey
+        int key;
+
+        private int f1 = 1;
+        private int f3 = 3;
+
+        private void init() {
+            key = 99;
+        }
+
+        @Override
+        void writeObjects(EntityStore store)
+            throws DatabaseException {
+
+            PrimaryIndex<Integer,FieldAddAndConvert>
+                index = store.getPrimaryIndex
+                    (Integer.class, FieldAddAndConvert.class);
+            init();
+            index.put(this);
+        }
+    }
+}
diff --git a/test/com/sleepycat/persist/test/EvolveTest.java b/test/com/sleepycat/persist/test/EvolveTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..e449ab97a6419627b80c12739846d4bed9b61ce2
--- /dev/null
+++ b/test/com/sleepycat/persist/test/EvolveTest.java
@@ -0,0 +1,250 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: EvolveTest.java,v 1.13.2.1 2009/03/27 17:12:55 mark Exp $
+ */
+package com.sleepycat.persist.test;
+
+import java.io.IOException;
+
+import junit.framework.Test;
+
+import com.sleepycat.persist.evolve.EvolveConfig;
+import com.sleepycat.persist.evolve.EvolveEvent;
+import com.sleepycat.persist.evolve.EvolveListener;
+import com.sleepycat.persist.evolve.EvolveStats;
+import com.sleepycat.persist.impl.PersistCatalog;
+import com.sleepycat.util.test.SharedTestUtils;
+
+/**
+ * Runs part two of the EvolveTest.  This part is run with the new/updated
+ * version of EvolveClasses in the classpath.  It uses the environment and
+ * store created by EvolveTestInit.  It verifies that it can read/write/evolve
+ * objects serialized using the old class format, and that it can create new
+ * objects with the new class format.
+ *
+ * @author Mark Hayes
+ */
+public class EvolveTest extends EvolveTestBase {
+
+    /* Toggle to use listener every other test case. */
+    private static boolean useEvolveListener;
+
+    public static Test suite()
+        throws Exception {
+
+        return getSuite(EvolveTest.class);
+    }
+
+    private int evolveNRead;
+    private int evolveNConverted;
+
+    boolean useEvolvedClass() {
+        return true;
+    }
+
+    @Override
+    public void setUp()
+        throws IOException {
+
+        /* Copy the log files created by EvolveTestInit. */
+        envHome = getTestInitHome(true /*evolved*/);
+        envHome.mkdirs();
+        SharedTestUtils.emptyDir(envHome);
+        SharedTestUtils.copyFiles(getTestInitHome(false /*evolved*/), envHome);
+    }
+
+    public void testLazyEvolve()
+        throws Exception {
+
+        openEnv();
+
+        /*
+         * Open in raw mode to check unevolved raw object and formats.  This
+         * is possible whether or not we can open the store further below to
+         * evolve formats without errors.
+         */
+        openRawStore();
+        caseObj.checkUnevolvedModel(rawStore.getModel(), env);
+        caseObj.readRawObjects
+            (rawStore, false /*expectEvolved*/, false /*expectUpdated*/);
+        closeRawStore();
+
+        if (openStoreReadWrite()) {
+
+            /*
+             * When opening read-write, formats are evolved lazily.  Check by
+             * reading evolved objects.
+             */
+            caseObj.checkEvolvedModel
+                (store.getModel(), env, true /*oldTypesExist*/);
+            caseObj.readObjects(store, false /*doUpdate*/);
+            closeStore();
+
+            /*
+             * Read raw objects again to check that the evolved objects are
+             * returned even though the stored objects were not evolved.
+             */
+            openRawStore();
+            caseObj.checkEvolvedModel
+                (rawStore.getModel(), env, true /*oldTypesExist*/);
+            caseObj.readRawObjects
+                (rawStore, true /*expectEvolved*/, false /*expectUpdated*/);
+            closeRawStore();
+
+            /*
+             * Open read-only to ensure that the catalog does not need to
+             * change (evolve formats) unnecessarily.
+             */
+            PersistCatalog.expectNoClassChanges = true;
+            try {
+                openStoreReadOnly();
+            } finally {
+                PersistCatalog.expectNoClassChanges = false;
+            }
+            caseObj.checkEvolvedModel
+                (store.getModel(), env, true /*oldTypesExist*/);
+            caseObj.readObjects(store, false /*doUpdate*/);
+            closeStore();
+
+            /*
+             * Open read-write to update objects and store them in evolved
+             * format.
+             */
+            openStoreReadWrite();
+            caseObj.checkEvolvedModel
+                (store.getModel(), env, true /*oldTypesExist*/);
+            caseObj.readObjects(store, true /*doUpdate*/);
+            caseObj.checkEvolvedModel
+                (store.getModel(), env, true /*oldTypesExist*/);
+            closeStore();
+
+            /*
+             * Check raw objects again after the evolved objects were stored.
+             */
+            openRawStore();
+            caseObj.checkEvolvedModel
+                (rawStore.getModel(), env, true /*oldTypesExist*/);
+            caseObj.readRawObjects
+                (rawStore, true /*expectEvolved*/, true /*expectUpdated*/);
+            closeRawStore();
+        }
+
+        closeAll();
+    }
+
+    public void testEagerEvolve()
+        throws Exception {
+
+        /* If the store cannot be opened, this test is not appropriate. */
+        if (caseObj.getStoreOpenException() != null) {
+            return;
+        }
+
+        EvolveConfig config = new EvolveConfig();
+
+        /*
+         * Use listener every other time to ensure that the stats are returned
+         * correctly when no listener is configured. [#17024]
+         */
+        useEvolveListener = !useEvolveListener;
+        if (useEvolveListener) {
+            config.setEvolveListener(new EvolveListener() {
+                public boolean evolveProgress(EvolveEvent event) {
+                    EvolveStats stats = event.getStats();
+                    evolveNRead = stats.getNRead();
+                    evolveNConverted = stats.getNConverted();
+                    return true;
+                }
+            });
+        }
+
+        openEnv();
+
+        openStoreReadWrite();
+
+        /*
+         * Evolve and expect that the expected number of entities are
+         * converted.
+         */
+        int nExpected = caseObj.getNRecordsExpected();
+        evolveNRead = 0;
+        evolveNConverted = 0;
+        PersistCatalog.unevolvedFormatsEncountered = false;
+        EvolveStats stats = store.evolve(config);
+        if (nExpected > 0) {
+            assertTrue(PersistCatalog.unevolvedFormatsEncountered);
+        }
+        assertTrue(stats.getNRead() == nExpected);
+        assertTrue(stats.getNConverted() == nExpected);
+        assertTrue(stats.getNConverted() >= stats.getNRead());
+        if (useEvolveListener) {
+            assertEquals(evolveNRead, stats.getNRead());
+            assertEquals(evolveNConverted, stats.getNConverted());
+        }
+
+        /* Evolve again and expect that no entities are converted. */
+        evolveNRead = 0;
+        evolveNConverted = 0;
+        PersistCatalog.unevolvedFormatsEncountered = false;
+        stats = store.evolve(config);
+        assertTrue(!PersistCatalog.unevolvedFormatsEncountered);
+        assertEquals(0, stats.getNRead());
+        assertEquals(0, stats.getNConverted());
+        if (useEvolveListener) {
+            assertTrue(evolveNRead == 0);
+            assertTrue(evolveNConverted == 0);
+        }
+
+        /* Ensure that we can read all entities without evolution. */
+        PersistCatalog.unevolvedFormatsEncountered = false;
+        caseObj.readObjects(store, false /*doUpdate*/);
+        assertTrue(!PersistCatalog.unevolvedFormatsEncountered);
+
+        /*
+         * When automatic unused type deletion is implemented in the future the
+         * oldTypesExist parameters below should be changed to false.
+         */
+
+        /* Open again and try an update. */
+        caseObj.checkEvolvedModel
+            (store.getModel(), env, true /*oldTypesExist*/);
+        caseObj.readObjects(store, true /*doUpdate*/);
+        caseObj.checkEvolvedModel
+            (store.getModel(), env, true /*oldTypesExist*/);
+        closeStore();
+
+        /* Open read-only and double check that everything is OK. */
+        openStoreReadOnly();
+        caseObj.checkEvolvedModel
+            (store.getModel(), env, true /*oldTypesExist*/);
+        caseObj.readObjects(store, false /*doUpdate*/);
+        caseObj.checkEvolvedModel
+            (store.getModel(), env, true /*oldTypesExist*/);
+        closeStore();
+
+        /* Check raw objects. */
+        openRawStore();
+        caseObj.checkEvolvedModel
+            (rawStore.getModel(), env, true /*oldTypesExist*/);
+        caseObj.readRawObjects
+            (rawStore, true /*expectEvolved*/, true /*expectUpdated*/);
+
+        /*
+         * Test copy raw object to new store via convertRawObject.  In this
+         * test we can pass false for oldTypesExist because newStore starts
+         * with the new/evolved class model.
+         */
+        openNewStore();
+        caseObj.copyRawObjects(rawStore, newStore);
+        caseObj.readObjects(newStore, true /*doUpdate*/);
+        caseObj.checkEvolvedModel
+            (newStore.getModel(), env, false /*oldTypesExist*/);
+        closeNewStore();
+        closeRawStore();
+
+        closeAll();
+    }
+}
diff --git a/test/com/sleepycat/persist/test/EvolveTestBase.java b/test/com/sleepycat/persist/test/EvolveTestBase.java
new file mode 100644
index 0000000000000000000000000000000000000000..171ffa87561451f91f356dc4c88e8316c77b0641
--- /dev/null
+++ b/test/com/sleepycat/persist/test/EvolveTestBase.java
@@ -0,0 +1,426 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: EvolveTestBase.java,v 1.15.2.1 2009/03/27 17:12:55 mark Exp $
+ */
+package com.sleepycat.persist.test;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Enumeration;
+
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.model.AnnotationModel;
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.raw.RawStore;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * Base class for EvolveTest and EvolveTestInit.
+ *
+ * @author Mark Hayes
+ */
+public abstract class EvolveTestBase extends TestCase {
+
+    /*
+     * When adding a evolve test class, three places need to be changed:
+     * 1) Add the unmodified class to EvolveClass.java.original.
+     * 2) Add the modified class to EvolveClass.java.
+     * 3) Add the class name to the ALL list below as a pair of strings.  The
+     * first string in each pair is the name of the original class, and the
+     * second string is the name of the evolved class or null if the evolved
+     * name is the same as the original.  The index in the list identifies a
+     * test case, and the class at that position identifies the old and new
+     * class to use for the test.
+     */
+    private static final String[] ALL = {
+        "DeletedEntity1_ClassRemoved",
+        "DeletedEntity1_ClassRemoved_NoMutation",
+        "DeletedEntity2_ClassRemoved",
+        "DeletedEntity2_ClassRemoved_WithDeleter",
+        "DeletedEntity3_AnnotRemoved_NoMutation",
+        null,
+        "DeletedEntity4_AnnotRemoved_WithDeleter",
+        null,
+        "DeletedEntity5_EntityToPersist_NoMutation",
+        null,
+        "DeletedEntity6_EntityToPersist_WithDeleter",
+        null,
+        "DeletedPersist1_ClassRemoved_NoMutation",
+        null,
+        "DeletedPersist2_ClassRemoved_WithDeleter",
+        null,
+        "DeletedPersist3_AnnotRemoved_NoMutation",
+        null,
+        "DeletedPersist4_AnnotRemoved_WithDeleter",
+        null,
+        "DeletedPersist5_PersistToEntity_NoMutation",
+        null,
+        "DeletedPersist6_PersistToEntity_WithDeleter",
+        null,
+        "RenamedEntity1_NewEntityName",
+        "RenamedEntity1_NewEntityName_NoMutation",
+        "RenamedEntity2_NewEntityName",
+        "RenamedEntity2_NewEntityName_WithRenamer",
+        "DeleteSuperclass1_NoMutation",
+        null,
+        "DeleteSuperclass2_WithConverter",
+        null,
+        "DeleteSuperclass3_WithDeleter",
+        null,
+        "DeleteSuperclass4_NoFields",
+        null,
+        "DeleteSuperclass5_Top",
+        null,
+        "InsertSuperclass1_Between",
+        null,
+        "InsertSuperclass2_Top",
+        null,
+        "DisallowNonKeyField_PrimitiveToObject",
+        null,
+        "DisallowNonKeyField_ObjectToPrimitive",
+        null,
+        "DisallowNonKeyField_ObjectToSubtype",
+        null,
+        "DisallowNonKeyField_ObjectToUnrelatedSimple",
+        null,
+        "DisallowNonKeyField_ObjectToUnrelatedOther",
+        null,
+        "DisallowNonKeyField_byte2boolean",
+        null,
+        "DisallowNonKeyField_short2byte",
+        null,
+        "DisallowNonKeyField_int2short",
+        null,
+        "DisallowNonKeyField_long2int",
+        null,
+        "DisallowNonKeyField_float2long",
+        null,
+        "DisallowNonKeyField_double2float",
+        null,
+        "DisallowNonKeyField_Byte2byte",
+        null,
+        "DisallowNonKeyField_Character2char",
+        null,
+        "DisallowNonKeyField_Short2short",
+        null,
+        "DisallowNonKeyField_Integer2int",
+        null,
+        "DisallowNonKeyField_Long2long",
+        null,
+        "DisallowNonKeyField_Float2float",
+        null,
+        "DisallowNonKeyField_Double2double",
+        null,
+        "DisallowNonKeyField_float2BigInt",
+        null,
+        "DisallowNonKeyField_BigInt2long",
+        null,
+        "DisallowSecKeyField_byte2short",
+        null,
+        "DisallowSecKeyField_char2int",
+        null,
+        "DisallowSecKeyField_short2int",
+        null,
+        "DisallowSecKeyField_int2long",
+        null,
+        "DisallowSecKeyField_long2float",
+        null,
+        "DisallowSecKeyField_float2double",
+        null,
+        "DisallowSecKeyField_Byte2short2",
+        null,
+        "DisallowSecKeyField_Character2int",
+        null,
+        "DisallowSecKeyField_Short2int2",
+        null,
+        "DisallowSecKeyField_Integer2long",
+        null,
+        "DisallowSecKeyField_Long2float2",
+        null,
+        "DisallowSecKeyField_Float2double2",
+        null,
+        "DisallowSecKeyField_int2BigInt",
+        null,
+        "DisallowPriKeyField_byte2short",
+        null,
+        "DisallowPriKeyField_char2int",
+        null,
+        "DisallowPriKeyField_short2int",
+        null,
+        "DisallowPriKeyField_int2long",
+        null,
+        "DisallowPriKeyField_long2float",
+        null,
+        "DisallowPriKeyField_float2double",
+        null,
+        "DisallowPriKeyField_Byte2short2",
+        null,
+        "DisallowPriKeyField_Character2int",
+        null,
+        "DisallowPriKeyField_Short2int2",
+        null,
+        "DisallowPriKeyField_Integer2long",
+        null,
+        "DisallowPriKeyField_Long2float2",
+        null,
+        "DisallowPriKeyField_Float2double2",
+        null,
+        "DisallowPriKeyField_Long2BigInt",
+        null,
+        "DisallowCompositeKeyField_byte2short",
+        null,
+        "AllowPriKeyField_Byte2byte2",
+        null,
+        "AllowPriKeyField_byte2Byte",
+        null,
+        "AllowFieldTypeChanges",
+        null,
+        "ConvertFieldContent_Entity",
+        null,
+        "ConvertExample1_Entity",
+        null,
+        "ConvertExample2_Person",
+        null,
+        "ConvertExample3_Person",
+        null,
+        "ConvertExample3Reverse_Person",
+        null,
+        "ConvertExample4_Entity",
+        null,
+        "ConvertExample5_Entity",
+        null,
+        "AllowFieldAddDelete",
+        null,
+        "ProxiedClass_Entity",
+        null,
+        "DisallowChangeProxyFor",
+        null,
+        "DisallowDeleteProxyFor",
+        null,
+        "ArrayNameChange_Entity",
+        null,
+        "AddEnumConstant_Entity",
+        null,
+        "DeleteEnumConstant_NoMutation",
+        null,
+        "DisallowChangeKeyRelate",
+        null,
+        "AllowChangeKeyMetadata",
+        null,
+        "AllowChangeKeyMetadataInSubclass",
+        null,
+        "AllowAddSecondary",
+        null,
+        "FieldAddAndConvert",
+        null,
+    };
+
+    File envHome;
+    Environment env;
+    EntityStore store;
+    RawStore rawStore;
+    EntityStore newStore;
+    String caseClsName;
+    Class caseCls;
+    EvolveCase caseObj;
+    String caseLabel;
+
+    static TestSuite getSuite(Class testClass)
+        throws Exception {
+
+        TestSuite suite = new TestSuite();
+        for (int i = 0; i < ALL.length; i += 2) {
+            String originalClsName = ALL[i];
+            String evolvedClsName = ALL[i + 1];
+            if (evolvedClsName == null) {
+                evolvedClsName = originalClsName;
+            }
+            TestSuite baseSuite = new TestSuite(testClass);
+            Enumeration e = baseSuite.tests();
+            while (e.hasMoreElements()) {
+                EvolveTestBase test = (EvolveTestBase) e.nextElement();
+                test.init(originalClsName, evolvedClsName);
+                suite.addTest(test);
+            }
+        }
+        return suite;
+    }
+
+    private void init(String originalClsName,
+                      String evolvedClsName) 
+        throws Exception {
+
+        String caseClsName = useEvolvedClass() ?
+            evolvedClsName : originalClsName;
+        caseClsName = "com.sleepycat.persist.test.EvolveClasses$" +
+                      caseClsName;
+
+        this.caseClsName = caseClsName;
+        this.caseCls = Class.forName(caseClsName);
+        this.caseObj = (EvolveCase) caseCls.newInstance();
+        this.caseLabel = evolvedClsName;
+    }
+
+    abstract boolean useEvolvedClass();
+
+    File getTestInitHome(boolean evolved) {
+        return new File
+            (System.getProperty("testevolvedir"),
+             (evolved ? "evolved" : "original") + '/' + caseLabel);
+    }
+
+    @Override
+    public void tearDown() {
+
+        /* Set test name for reporting; cannot be done in the ctor or setUp. */
+        setName(caseLabel + '-' + getName());
+
+        if (env != null) {
+            try {
+                closeAll();
+            } catch (Throwable e) {
+                System.out.println("During tearDown: " + e);
+            }
+        }
+        envHome = null;
+        env = null;
+        store = null;
+        caseCls = null;
+        caseObj = null;
+        caseLabel = null;
+
+        /* Do not delete log files so they can be used by 2nd phase of test. */
+    }
+
+    void openEnv()
+        throws IOException, DatabaseException {
+
+        EnvironmentConfig config = TestEnv.TXN.getConfig();
+        config.setAllowCreate(true);
+        env = new Environment(envHome, config);
+    }
+
+    /**
+     * Returns true if the store was opened successfully.  Returns false if the
+     * store could not be opened because an exception was expected -- this is
+     * not a test failure but no further tests for an EntityStore may be run.
+     */
+    private boolean openStore(StoreConfig config)
+        throws Exception {
+
+        config.setTransactional(true);
+        config.setMutations(caseObj.getMutations());
+
+        EntityModel model = new AnnotationModel();
+        config.setModel(model);
+        caseObj.configure(model, config);
+
+        String expectException = caseObj.getStoreOpenException();
+        try {
+            store = new EntityStore(env, EvolveCase.STORE_NAME, config);
+            if (expectException != null) {
+                fail("Expected: " + expectException);
+            }
+        } catch (Exception e) {
+            if (expectException != null) {
+                //e.printStackTrace();
+                EvolveCase.checkEquals(expectException, e.toString());
+                return false;
+            } else {
+                throw e;
+            }
+        }
+        return true;
+    }
+
+    boolean openStoreReadOnly()
+        throws Exception {
+
+        StoreConfig config = new StoreConfig();
+        config.setReadOnly(true);
+        return openStore(config);
+    }
+
+    boolean openStoreReadWrite()
+        throws Exception {
+
+        StoreConfig config = new StoreConfig();
+        config.setAllowCreate(true);
+        return openStore(config);
+    }
+
+    void openRawStore()
+        throws DatabaseException {
+
+        rawStore = new RawStore(env, EvolveCase.STORE_NAME, null);
+    }
+
+    void closeStore()
+        throws DatabaseException {
+
+        if (store != null) {
+            store.close();
+            store = null;
+        }
+    }
+
+    void openNewStore()
+        throws Exception {
+
+        StoreConfig config = new StoreConfig();
+        config.setAllowCreate(true);
+        config.setTransactional(true);
+
+        EntityModel model = new AnnotationModel();
+        config.setModel(model);
+        caseObj.configure(model, config);
+
+        newStore = new EntityStore(env, "new", config);
+    }
+
+    void closeNewStore()
+        throws DatabaseException {
+
+        if (newStore != null) {
+            newStore.close();
+            newStore = null;
+        }
+    }
+
+    void closeRawStore()
+        throws DatabaseException {
+
+        if (rawStore != null) {
+            rawStore.close();
+            rawStore = null;
+        }
+    }
+
+    void closeEnv()
+        throws DatabaseException {
+
+        if (env != null) {
+            env.close();
+            env = null;
+        }
+    }
+
+    void closeAll()
+        throws DatabaseException {
+
+        closeStore();
+        closeRawStore();
+        closeNewStore();
+        closeEnv();
+    }
+}
diff --git a/test/com/sleepycat/persist/test/EvolveTestInit.java b/test/com/sleepycat/persist/test/EvolveTestInit.java
new file mode 100644
index 0000000000000000000000000000000000000000..70d35b9e137a2453999708ab29cac6386e1b9821
--- /dev/null
+++ b/test/com/sleepycat/persist/test/EvolveTestInit.java
@@ -0,0 +1,56 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: EvolveTestInit.java,v 1.7 2008/02/05 23:28:28 mark Exp $
+ */
+package com.sleepycat.persist.test;
+
+import java.io.IOException;
+
+import junit.framework.Test;
+
+import com.sleepycat.util.test.SharedTestUtils;
+
+/**
+ * Runs part one of the EvolveTest.  This part is run with the old/original
+ * version of EvolveClasses in the classpath.  It creates a fresh environment
+ * and store containing instances of the original class.  When EvolveTest is
+ * run, it will read/write/evolve these objects from the store created here.
+ *
+ * @author Mark Hayes
+ */
+public class EvolveTestInit extends EvolveTestBase {
+
+    public static Test suite()
+        throws Exception {
+
+        return getSuite(EvolveTestInit.class);
+    }
+
+    boolean useEvolvedClass() {
+        return false;
+    }
+
+    @Override
+    public void setUp()
+        throws IOException {
+
+        envHome = getTestInitHome(false /*evolved*/);
+        envHome.mkdirs();
+        SharedTestUtils.emptyDir(envHome);
+    }
+
+    public void testInit()
+        throws Exception {
+
+        openEnv();
+        if (!openStoreReadWrite()) {
+            fail();
+        }
+        caseObj.writeObjects(store);
+        caseObj.checkUnevolvedModel(store.getModel(), env);
+        closeAll();
+    }
+}
diff --git a/test/com/sleepycat/persist/test/ForeignKeyTest.java b/test/com/sleepycat/persist/test/ForeignKeyTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..8fee59e85cd781ec3c01673d9545b9d48dba4929
--- /dev/null
+++ b/test/com/sleepycat/persist/test/ForeignKeyTest.java
@@ -0,0 +1,324 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: ForeignKeyTest.java,v 1.10 2008/02/05 23:28:28 mark Exp $
+ */
+
+package com.sleepycat.persist.test;
+
+import static com.sleepycat.persist.model.DeleteAction.ABORT;
+import static com.sleepycat.persist.model.DeleteAction.CASCADE;
+import static com.sleepycat.persist.model.DeleteAction.NULLIFY;
+import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE;
+
+import java.util.Enumeration;
+
+import junit.framework.Test;
+import junit.framework.TestSuite;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.SecondaryIndex;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.model.DeleteAction;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.Persistent;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.SecondaryKey;
+import com.sleepycat.util.test.TxnTestCase;
+
+/**
+ * @author Mark Hayes
+ */
+public class ForeignKeyTest extends TxnTestCase {
+
+    private static final DeleteAction[] ACTIONS = {
+        ABORT,
+        NULLIFY,
+        CASCADE,
+    };
+
+    private static final String[] ACTION_LABELS = {
+        "ABORT",
+        "NULLIFY",
+        "CASCADE",
+    };
+
+    public static Test suite() {
+        TestSuite suite = new TestSuite();
+        for (int i = 0; i < ACTIONS.length; i += 1) {
+	    for (int j = 0; j < 2; j++) {
+		TestSuite txnSuite = txnTestSuite
+		    (ForeignKeyTest.class, null, null);
+		Enumeration e = txnSuite.tests();
+		while (e.hasMoreElements()) {
+		    ForeignKeyTest test = (ForeignKeyTest) e.nextElement();
+		    test.onDelete = ACTIONS[i];
+		    test.onDeleteLabel = ACTION_LABELS[i];
+		    test.useSubclass = (j == 0);
+		    test.useSubclassLabel =
+			(j == 0) ? "UseSubclass" : "UseBaseclass";
+		    suite.addTest(test);
+		}
+	    }
+        }
+        return suite;
+    }
+
+    private EntityStore store;
+    private PrimaryIndex<String,Entity1> pri1;
+    private PrimaryIndex<String,Entity2> pri2;
+    private SecondaryIndex<String,String,Entity1> sec1;
+    private SecondaryIndex<String,String,Entity2> sec2;
+    private DeleteAction onDelete;
+    private String onDeleteLabel;
+    private boolean useSubclass;
+    private String useSubclassLabel;
+    
+    public void tearDown()
+        throws Exception {
+
+        super.tearDown();
+        setName(getName() + '-' + onDeleteLabel + "-" + useSubclassLabel);
+    }
+
+    private void open()
+        throws DatabaseException {
+
+        StoreConfig config = new StoreConfig();
+        config.setAllowCreate(envConfig.getAllowCreate());
+        config.setTransactional(envConfig.getTransactional());
+
+        store = new EntityStore(env, "test", config);
+
+        pri1 = store.getPrimaryIndex(String.class, Entity1.class);
+        sec1 = store.getSecondaryIndex(pri1, String.class, "sk");
+        pri2 = store.getPrimaryIndex(String.class, Entity2.class);
+        sec2 = store.getSecondaryIndex
+            (pri2, String.class, "sk_" + onDeleteLabel);
+    }
+
+    private void close()
+        throws DatabaseException {
+
+        store.close();
+    }
+
+    public void testForeignKeys()
+        throws Exception {
+
+        open();
+        Transaction txn = txnBegin();
+
+        Entity1 o1 = new Entity1("pk1", "sk1");
+        assertNull(pri1.put(txn, o1));
+
+        assertEquals(o1, pri1.get(txn, "pk1", null));
+        assertEquals(o1, sec1.get(txn, "sk1", null));
+
+        Entity2 o2 = (useSubclass ?
+		      new Entity3("pk2", "pk1", onDelete) :
+		      new Entity2("pk2", "pk1", onDelete));
+        assertNull(pri2.put(txn, o2));
+
+        assertEquals(o2, pri2.get(txn, "pk2", null));
+        assertEquals(o2, sec2.get(txn, "pk1", null));
+
+        txnCommit(txn);
+        txn = txnBegin();
+
+        /*
+         * pri1 contains o1 with primary key "pk1" and index key "sk1".
+         *
+         * pri2 contains o2 with primary key "pk2" and foreign key "pk1",
+         * which is the primary key of pri1.
+         */
+        if (onDelete == ABORT) {
+
+            /* Test that we abort trying to delete a referenced key. */
+
+            try {
+                pri1.delete(txn, "pk1");
+                fail();
+            } catch (DatabaseException expected) {
+                txnAbort(txn);
+                txn = txnBegin();
+            }
+
+            /* 
+	     * Test that we can put a record into store2 with a null foreign
+             * key value.
+	     */
+            o2 = (useSubclass ?
+		  new Entity3("pk2", null, onDelete) :
+		  new Entity2("pk2", null, onDelete));
+            assertNotNull(pri2.put(txn, o2));
+            assertEquals(o2, pri2.get(txn, "pk2", null));
+
+            /* 
+	     * The index2 record should have been deleted since the key was set
+             * to null above.
+	     */
+            assertNull(sec2.get(txn, "pk1", null));
+
+            /* 
+	     * Test that now we can delete the record in store1, since it is no
+             * longer referenced.
+	     */
+            assertNotNull(pri1.delete(txn, "pk1"));
+            assertNull(pri1.get(txn, "pk1", null));
+            assertNull(sec1.get(txn, "sk1", null));
+
+        } else if (onDelete == NULLIFY) {
+
+            /* Delete the referenced key. */
+            assertNotNull(pri1.delete(txn, "pk1"));
+            assertNull(pri1.get(txn, "pk1", null));
+            assertNull(sec1.get(txn, "sk1", null));
+
+            /* 
+	     * The store2 record should still exist, but should have an empty
+             * secondary key since it was nullified.
+	     */
+            o2 = pri2.get(txn, "pk2", null);
+            assertNotNull(o2);
+            assertEquals("pk2", o2.pk);
+            assertEquals(null, o2.getSk(onDelete));
+
+        } else if (onDelete == CASCADE) {
+
+            /* Delete the referenced key. */
+            assertNotNull(pri1.delete(txn, "pk1"));
+            assertNull(pri1.get(txn, "pk1", null));
+            assertNull(sec1.get(txn, "sk1", null));
+
+            /* The store2 record should have deleted also. */
+            assertNull(pri2.get(txn, "pk2", null));
+            assertNull(sec2.get(txn, "pk1", null));
+
+        } else {
+            throw new IllegalStateException();
+        }
+
+        /*
+         * Test that a foreign key value may not be used that is not present in
+         * the foreign store. "pk2" is not in store1 in this case.
+         */
+	Entity2 o3 = (useSubclass ?
+		      new Entity3("pk3", "pk2", onDelete) :
+		      new Entity2("pk3", "pk2", onDelete));
+        try {
+            pri2.put(txn, o3);
+            fail();
+        } catch (DatabaseException expected) {
+        }
+
+        txnCommit(txn);
+        close();
+    }
+
+    @Entity
+    static class Entity1 {
+
+        @PrimaryKey
+        String pk;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        String sk;
+
+        private Entity1() {}
+
+        Entity1(String pk, String sk) {
+            this.pk = pk;
+            this.sk = sk;
+        }
+
+        @Override
+        public boolean equals(Object other) {
+            Entity1 o = (Entity1) other;
+            return nullOrEqual(pk, o.pk) &&
+                   nullOrEqual(sk, o.sk);
+        }
+    }
+
+    @Entity
+    static class Entity2 {
+
+        @PrimaryKey
+        String pk;
+
+        @SecondaryKey(relate=ONE_TO_ONE, relatedEntity=Entity1.class,
+                                         onRelatedEntityDelete=ABORT)
+        String sk_ABORT;
+
+        @SecondaryKey(relate=ONE_TO_ONE, relatedEntity=Entity1.class,
+                                         onRelatedEntityDelete=CASCADE)
+        String sk_CASCADE;
+
+        @SecondaryKey(relate=ONE_TO_ONE, relatedEntity=Entity1.class,
+                                         onRelatedEntityDelete=NULLIFY)
+        String sk_NULLIFY;
+
+        private Entity2() {}
+
+        Entity2(String pk, String sk, DeleteAction action) {
+            this.pk = pk;
+            switch (action) {
+            case ABORT:
+                sk_ABORT = sk;
+                break;
+            case CASCADE:
+                sk_CASCADE = sk;
+                break;
+            case NULLIFY:
+                sk_NULLIFY = sk;
+                break;
+            default:
+                throw new IllegalArgumentException();
+            }
+        }
+
+        String getSk(DeleteAction action) {
+            switch (action) {
+            case ABORT:
+                return sk_ABORT;
+            case CASCADE:
+                return sk_CASCADE;
+            case NULLIFY:
+                return sk_NULLIFY;
+            default:
+                throw new IllegalArgumentException();
+            }
+        }
+
+        @Override
+        public boolean equals(Object other) {
+            Entity2 o = (Entity2) other;
+            return nullOrEqual(pk, o.pk) &&
+                   nullOrEqual(sk_ABORT, o.sk_ABORT) &&
+                   nullOrEqual(sk_CASCADE, o.sk_CASCADE) &&
+                   nullOrEqual(sk_NULLIFY, o.sk_NULLIFY);
+        }
+    }
+
+    @Persistent
+    static class Entity3 extends Entity2 {
+	Entity3() {}
+
+        Entity3(String pk, String sk, DeleteAction action) {
+	    super(pk, sk, action);
+	}
+    }
+
+    static boolean nullOrEqual(Object o1, Object o2) {
+        if (o1 == null) {
+            return o2 == null;
+        } else {
+            return o1.equals(o2);
+        }
+    }
+}
diff --git a/test/com/sleepycat/persist/test/IndexTest.java b/test/com/sleepycat/persist/test/IndexTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..770f3f1abfdac0dfbd2550cd77ca03d6d87fdd47
--- /dev/null
+++ b/test/com/sleepycat/persist/test/IndexTest.java
@@ -0,0 +1,864 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: IndexTest.java,v 1.19.2.2 2010/01/04 15:30:49 cwl Exp $
+ */
+
+package com.sleepycat.persist.test;
+
+import static com.sleepycat.persist.model.Relationship.MANY_TO_MANY;
+import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE;
+import static com.sleepycat.persist.model.Relationship.ONE_TO_MANY;
+import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.SortedSet;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
+import junit.framework.Test;
+
+import com.sleepycat.collections.MapEntryParameter;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.persist.EntityCursor;
+import com.sleepycat.persist.EntityIndex;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.SecondaryIndex;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.SecondaryKey;
+import com.sleepycat.persist.raw.RawObject;
+import com.sleepycat.persist.raw.RawStore;
+import com.sleepycat.persist.raw.RawType;
+import com.sleepycat.util.test.TxnTestCase;
+
+/**
+ * Tests EntityIndex and EntityCursor in all their permutations.
+ *
+ * @author Mark Hayes
+ */
+public class IndexTest extends TxnTestCase {
+
+    private static final int N_RECORDS = 5;
+    private static final int THREE_TO_ONE = 3;
+
+    public static Test suite() {
+        return txnTestSuite(IndexTest.class, null,
+                            null);
+                            //new String[] { TxnTestCase.TXN_NULL});
+    }
+
+    private EntityStore store;
+    private PrimaryIndex<Integer,MyEntity> primary;
+    private SecondaryIndex<Integer,Integer,MyEntity> oneToOne;
+    private SecondaryIndex<Integer,Integer,MyEntity> manyToOne;
+    private SecondaryIndex<Integer,Integer,MyEntity> oneToMany;
+    private SecondaryIndex<Integer,Integer,MyEntity> manyToMany;
+    private RawStore rawStore;
+    private RawType entityType;
+    private PrimaryIndex<Object,RawObject> primaryRaw;
+    private SecondaryIndex<Object,Object,RawObject> oneToOneRaw;
+    private SecondaryIndex<Object,Object,RawObject> manyToOneRaw;
+    private SecondaryIndex<Object,Object,RawObject> oneToManyRaw;
+    private SecondaryIndex<Object,Object,RawObject> manyToManyRaw;
+
+    /**
+     * Opens the store.
+     */
+    private void open()
+        throws DatabaseException {
+
+        StoreConfig config = new StoreConfig();
+        config.setAllowCreate(envConfig.getAllowCreate());
+        config.setTransactional(envConfig.getTransactional());
+
+        store = new EntityStore(env, "test", config);
+
+        primary = store.getPrimaryIndex(Integer.class, MyEntity.class);
+        oneToOne =
+            store.getSecondaryIndex(primary, Integer.class, "oneToOne");
+        manyToOne =
+            store.getSecondaryIndex(primary, Integer.class, "manyToOne");
+        oneToMany =
+            store.getSecondaryIndex(primary, Integer.class, "oneToMany");
+        manyToMany =
+            store.getSecondaryIndex(primary, Integer.class, "manyToMany");
+
+        assertNotNull(primary);
+        assertNotNull(oneToOne);
+        assertNotNull(manyToOne);
+        assertNotNull(oneToMany);
+        assertNotNull(manyToMany);
+
+        rawStore = new RawStore(env, "test", config);
+        String clsName = MyEntity.class.getName();
+        entityType = rawStore.getModel().getRawType(clsName);
+        assertNotNull(entityType);
+
+        primaryRaw = rawStore.getPrimaryIndex(clsName);
+        oneToOneRaw = rawStore.getSecondaryIndex(clsName, "oneToOne");
+        manyToOneRaw = rawStore.getSecondaryIndex(clsName, "manyToOne");
+        oneToManyRaw = rawStore.getSecondaryIndex(clsName, "oneToMany");
+        manyToManyRaw = rawStore.getSecondaryIndex(clsName, "manyToMany");
+
+        assertNotNull(primaryRaw);
+        assertNotNull(oneToOneRaw);
+        assertNotNull(manyToOneRaw);
+        assertNotNull(oneToManyRaw);
+        assertNotNull(manyToManyRaw);
+    }
+
+    /**
+     * Closes the store.
+     */
+    private void close()
+        throws DatabaseException {
+
+        store.close();
+        store = null;
+        rawStore.close();
+        rawStore = null;
+    }
+
+    /**
+     * The store must be closed before closing the environment.
+     */
+    public void tearDown()
+        throws Exception {
+
+        try {
+            if (rawStore != null) {
+                rawStore.close();
+            }
+        } catch (Throwable e) {
+            System.out.println("During tearDown: " + e);
+        }
+        try {
+            if (store != null) {
+                store.close();
+            }
+        } catch (Throwable e) {
+            System.out.println("During tearDown: " + e);
+        }
+        store = null;
+        rawStore = null;
+        super.tearDown();
+    }
+
+    /**
+     * Primary keys: {0, 1, 2, 3, 4}
+     */
+    public void testPrimary()
+        throws DatabaseException {
+
+        SortedMap<Integer,SortedSet<Integer>> expected =
+            new TreeMap<Integer,SortedSet<Integer>>();
+
+        for (int priKey = 0; priKey < N_RECORDS; priKey += 1) {
+            SortedSet<Integer> values = new TreeSet<Integer>();
+            values.add(priKey);
+            expected.put(priKey, values);
+        }
+
+        open();
+        addEntities(primary);
+        checkIndex(primary, expected, keyGetter, entityGetter);
+        checkIndex(primaryRaw, expected, rawKeyGetter, rawEntityGetter);
+
+        /* Close and reopen, then recheck indices. */
+        close();
+        open();
+        checkIndex(primary, expected, keyGetter, entityGetter);
+        checkIndex(primaryRaw, expected, rawKeyGetter, rawEntityGetter);
+
+        /* Check primary delete, last key first for variety. */
+        for (int priKey = N_RECORDS - 1; priKey >= 0; priKey -= 1) {
+            boolean useRaw = ((priKey & 1) != 0);
+            Transaction txn = txnBegin();
+            if (useRaw) {
+                primaryRaw.delete(txn, priKey);
+            } else {
+                primary.delete(txn, priKey);
+            }
+            txnCommit(txn);
+            expected.remove(priKey);
+            checkIndex(primary, expected, keyGetter, entityGetter);
+        }
+        checkAllEmpty();
+
+        /* Check PrimaryIndex put operations. */
+        MyEntity e;
+        Transaction txn = txnBegin();
+        /* put() */
+        e = primary.put(txn, new MyEntity(1));
+        assertNull(e);
+        e = primary.get(txn, 1, null);
+        assertEquals(1, e.key);
+        /* putNoReturn() */
+        primary.putNoReturn(txn, new MyEntity(2));
+        e = primary.get(txn, 2, null);
+        assertEquals(2, e.key);
+        /* putNoOverwrite */
+        assertTrue(!primary.putNoOverwrite(txn, new MyEntity(1)));
+        assertTrue(!primary.putNoOverwrite(txn, new MyEntity(2)));
+        assertTrue(primary.putNoOverwrite(txn, new MyEntity(3)));
+        e = primary.get(txn, 3, null);
+        assertEquals(3, e.key);
+        txnCommit(txn);
+        close();
+    }
+
+    /**
+     * { 0:0, 1:-1, 2:-2, 3:-3, 4:-4 }
+     */
+    public void testOneToOne()
+        throws DatabaseException {
+
+        SortedMap<Integer,SortedSet<Integer>> expected =
+            new TreeMap<Integer,SortedSet<Integer>>();
+
+        for (int priKey = 0; priKey < N_RECORDS; priKey += 1) {
+            SortedSet<Integer> values = new TreeSet<Integer>();
+            values.add(priKey);
+            Integer secKey = (-priKey);
+            expected.put(secKey, values);
+        }
+
+        open();
+        addEntities(primary);
+        checkSecondary(oneToOne, oneToOneRaw, expected);
+        checkDelete(oneToOne, oneToOneRaw, expected);
+        close();
+    }
+
+    /**
+     * { 0:0, 1:1, 2:2, 3:0, 4:1 }
+     */
+    public void testManyToOne()
+        throws DatabaseException {
+
+        SortedMap<Integer,SortedSet<Integer>> expected =
+            new TreeMap<Integer,SortedSet<Integer>>();
+
+        for (int priKey = 0; priKey < N_RECORDS; priKey += 1) {
+            Integer secKey = priKey % THREE_TO_ONE;
+            SortedSet<Integer> values = expected.get(secKey);
+            if (values == null) {
+                values = new TreeSet<Integer>();
+                expected.put(secKey, values);
+            }
+            values.add(priKey);
+        }
+
+        open();
+        addEntities(primary);
+        checkSecondary(manyToOne, manyToOneRaw, expected);
+        checkDelete(manyToOne, manyToOneRaw, expected);
+        close();
+    }
+
+    /**
+     * { 0:{}, 1:{10}, 2:{20,21}, 3:{30,31,32}, 4:{40,41,42,43}
+     */
+    public void testOneToMany()
+        throws DatabaseException {
+
+        SortedMap<Integer,SortedSet<Integer>> expected =
+            new TreeMap<Integer,SortedSet<Integer>>();
+
+        for (int priKey = 0; priKey < N_RECORDS; priKey += 1) {
+            for (int i = 0; i < priKey; i += 1) {
+                Integer secKey = (N_RECORDS * priKey) + i;
+                SortedSet<Integer> values = expected.get(secKey);
+                if (values == null) {
+                    values = new TreeSet<Integer>();
+                    expected.put(secKey, values);
+                }
+                values.add(priKey);
+            }
+        }
+
+        open();
+        addEntities(primary);
+        checkSecondary(oneToMany, oneToManyRaw, expected);
+        checkDelete(oneToMany, oneToManyRaw, expected);
+        close();
+    }
+
+    /**
+     * { 0:{}, 1:{0}, 2:{0,1}, 3:{0,1,2}, 4:{0,1,2,3}
+     */
+    public void testManyToMany()
+        throws DatabaseException {
+
+        SortedMap<Integer,SortedSet<Integer>> expected =
+            new TreeMap<Integer,SortedSet<Integer>>();
+
+        for (int priKey = 0; priKey < N_RECORDS; priKey += 1) {
+            for (int i = 0; i < priKey; i += 1) {
+                Integer secKey = i;
+                SortedSet<Integer> values = expected.get(secKey);
+                if (values == null) {
+                    values = new TreeSet<Integer>();
+                    expected.put(secKey, values);
+                }
+                values.add(priKey);
+            }
+        }
+
+        open();
+        addEntities(primary);
+        checkSecondary(manyToMany, manyToManyRaw, expected);
+        checkDelete(manyToMany, manyToManyRaw, expected);
+        close();
+    }
+
+    private void addEntities(PrimaryIndex<Integer,MyEntity> primary)
+        throws DatabaseException {
+
+        Transaction txn = txnBegin();
+        for (int priKey = 0; priKey < N_RECORDS; priKey += 1) {
+            MyEntity prev = primary.put(txn, new MyEntity(priKey));
+            assertNull(prev);
+        }
+        txnCommit(txn);
+    }
+
+    private void checkDelete(SecondaryIndex<Integer,Integer,MyEntity> index,
+                             SecondaryIndex<Object,Object,RawObject> indexRaw,
+                             SortedMap<Integer,SortedSet<Integer>> expected)
+        throws DatabaseException {
+
+        SortedMap<Integer,SortedSet<Integer>> expectedSubIndex =
+            new TreeMap<Integer,SortedSet<Integer>>();
+
+        while (expected.size() > 0) {
+            Integer delSecKey = expected.firstKey();
+            SortedSet<Integer> deletedPriKeys = expected.remove(delSecKey);
+            for (SortedSet<Integer> priKeys : expected.values()) {
+                priKeys.removeAll(deletedPriKeys);
+            }
+            Transaction txn = txnBegin();
+            boolean deleted = index.delete(txn, delSecKey);
+            assertEquals(deleted, !deletedPriKeys.isEmpty());
+            deleted = index.delete(txn, delSecKey);
+            assertTrue(!deleted);
+            assertNull(index.get(txn, delSecKey, null));
+            txnCommit(txn);
+            checkSecondary(index, indexRaw, expected);
+        }
+
+        /*
+         * Delete remaining records so that the primary index is empty.  Use
+         * the RawStore for variety.
+         */
+        Transaction txn = txnBegin();
+        for (int priKey = 0; priKey < N_RECORDS; priKey += 1) {
+            primaryRaw.delete(txn, priKey);
+        }
+        txnCommit(txn);
+        checkAllEmpty();
+    }
+
+    private void checkSecondary(SecondaryIndex<Integer,Integer,MyEntity> index,
+                                SecondaryIndex<Object,Object,RawObject>
+                                indexRaw,
+                                SortedMap<Integer,SortedSet<Integer>> expected)
+        throws DatabaseException {
+
+        checkIndex(index, expected, keyGetter, entityGetter);
+        checkIndex(index.keysIndex(), expected, keyGetter, keyGetter);
+
+        checkIndex(indexRaw, expected, rawKeyGetter, rawEntityGetter);
+        checkIndex(indexRaw.keysIndex(), expected, rawKeyGetter, rawKeyGetter);
+
+        SortedMap<Integer,SortedSet<Integer>> expectedSubIndex =
+            new TreeMap<Integer,SortedSet<Integer>>();
+
+        for (Integer secKey : expected.keySet()) {
+            expectedSubIndex.clear();
+            for (Integer priKey : expected.get(secKey)) {
+                SortedSet<Integer> values = new TreeSet<Integer>();
+                values.add(priKey);
+                expectedSubIndex.put(priKey, values);
+            }
+            checkIndex(index.subIndex(secKey),
+                       expectedSubIndex,
+                       keyGetter,
+                       entityGetter);
+            checkIndex(indexRaw.subIndex(secKey),
+                       expectedSubIndex,
+                       rawKeyGetter,
+                       rawEntityGetter);
+        }
+    }
+
+    private <K,V> void checkIndex(EntityIndex<K,V> index,
+                                  SortedMap<Integer,SortedSet<Integer>>
+                                  expected,
+                                  Getter<K> kGetter,
+                                  Getter<V> vGetter)
+        throws DatabaseException {
+
+        SortedMap<K,V> map = index.sortedMap();
+
+        Transaction txn = txnBegin();
+        for (int i : expected.keySet()) {
+            K k = kGetter.fromInt(i);
+            SortedSet<Integer> dups = expected.get(i);
+            if (dups.isEmpty()) {
+
+                /* EntityIndex */
+                V v = index.get(txn, k, null);
+                assertNull(v);
+                assertTrue(!index.contains(txn, k, null));
+
+                /* Map/Collection */
+                v = map.get(i);
+                assertNull(v);
+                assertTrue(!map.containsKey(i));
+            } else {
+                int j = dups.first();
+
+                /* EntityIndex */
+                V v = index.get(txn, k, null);
+                assertNotNull(v);
+                assertEquals(j, vGetter.getKey(v));
+                assertTrue(index.contains(txn, k, null));
+
+                /* Map/Collection */
+                v = map.get(i);
+                assertNotNull(v);
+                assertEquals(j, vGetter.getKey(v));
+                assertTrue(map.containsKey(i));
+                assertTrue("" + i + ' ' + j + ' ' + v + ' ' + map,
+                           map.containsValue(v));
+                assertTrue(map.keySet().contains(i));
+                assertTrue(map.values().contains(v));
+                assertTrue
+                    (map.entrySet().contains(new MapEntryParameter(i, v)));
+            }
+        }
+        txnCommit(txn);
+
+        int keysSize = expandKeySize(expected);
+        int valuesSize = expandValueSize(expected);
+
+        /* EntityIndex.count */
+        assertEquals("keysSize=" + keysSize, (long) valuesSize, index.count());
+
+        /* Map/Collection size */
+        assertEquals(valuesSize, map.size());
+        assertEquals(valuesSize, map.values().size());
+        assertEquals(valuesSize, map.entrySet().size());
+        assertEquals(keysSize, map.keySet().size());
+
+        /* Map/Collection isEmpty */
+        assertEquals(valuesSize == 0, map.isEmpty());
+        assertEquals(valuesSize == 0, map.values().isEmpty());
+        assertEquals(valuesSize == 0, map.entrySet().isEmpty());
+        assertEquals(keysSize == 0, map.keySet().isEmpty());
+
+        txn = txnBeginCursor();
+
+        /* Unconstrained cursors. */
+        checkCursor
+            (index.keys(txn, null),
+             map.keySet(), true,
+             expandKeys(expected), kGetter);
+        checkCursor
+            (index.entities(txn, null),
+             map.values(), false,
+             expandValues(expected), vGetter);
+
+        /* Range cursors. */
+        if (expected.isEmpty()) {
+            checkOpenRanges(txn, 0, index, expected, kGetter, vGetter);
+            checkClosedRanges(txn, 0, 1, index, expected, kGetter, vGetter);
+        } else {
+            int firstKey = expected.firstKey();
+            int lastKey = expected.lastKey();
+            for (int i = firstKey - 1; i <= lastKey + 1; i += 1) {
+                checkOpenRanges(txn, i, index, expected, kGetter, vGetter);
+                int j = i + 1;
+                if (j < lastKey + 1) {
+                    checkClosedRanges
+                        (txn, i, j, index, expected, kGetter, vGetter);
+                }
+            }
+        }
+
+        txnCommit(txn);
+    }
+
+    private <K,V> void checkOpenRanges(Transaction txn, int i,
+                                       EntityIndex<K,V> index,
+                                       SortedMap<Integer,SortedSet<Integer>>
+                                       expected,
+                                       Getter<K> kGetter,
+                                       Getter<V> vGetter)
+        throws DatabaseException {
+
+        SortedMap<K,V> map = index.sortedMap();
+        SortedMap<Integer,SortedSet<Integer>> rangeExpected;
+        K k = kGetter.fromInt(i);
+        K kPlusOne = kGetter.fromInt(i + 1);
+
+        /* Head range exclusive. */
+        rangeExpected = expected.headMap(i);
+        checkCursor
+            (index.keys(txn, null, false, k, false, null),
+             map.headMap(k).keySet(), true,
+             expandKeys(rangeExpected), kGetter);
+        checkCursor
+            (index.entities(txn, null, false, k, false, null),
+             map.headMap(k).values(), false,
+             expandValues(rangeExpected), vGetter);
+
+        /* Head range inclusive. */
+        rangeExpected = expected.headMap(i + 1);
+        checkCursor
+            (index.keys(txn, null, false, k, true, null),
+             map.headMap(kPlusOne).keySet(), true,
+             expandKeys(rangeExpected), kGetter);
+        checkCursor
+            (index.entities(txn, null, false, k, true, null),
+             map.headMap(kPlusOne).values(), false,
+             expandValues(rangeExpected), vGetter);
+
+        /* Tail range exclusive. */
+        rangeExpected = expected.tailMap(i + 1);
+        checkCursor
+            (index.keys(txn, k, false, null, false, null),
+             map.tailMap(kPlusOne).keySet(), true,
+             expandKeys(rangeExpected), kGetter);
+        checkCursor
+            (index.entities(txn, k, false, null, false, null),
+             map.tailMap(kPlusOne).values(), false,
+             expandValues(rangeExpected), vGetter);
+
+        /* Tail range inclusive. */
+        rangeExpected = expected.tailMap(i);
+        checkCursor
+            (index.keys(txn, k, true, null, false, null),
+             map.tailMap(k).keySet(), true,
+             expandKeys(rangeExpected), kGetter);
+        checkCursor
+            (index.entities(txn, k, true, null, false, null),
+             map.tailMap(k).values(), false,
+             expandValues(rangeExpected), vGetter);
+    }
+
+    private <K,V> void checkClosedRanges(Transaction txn, int i, int j,
+                                         EntityIndex<K,V> index,
+                                         SortedMap<Integer,SortedSet<Integer>>
+                                         expected,
+                                         Getter<K> kGetter,
+                                         Getter<V> vGetter)
+        throws DatabaseException {
+
+        SortedMap<K,V> map = index.sortedMap();
+        SortedMap<Integer,SortedSet<Integer>> rangeExpected;
+        K k = kGetter.fromInt(i);
+        K kPlusOne = kGetter.fromInt(i + 1);
+        K l = kGetter.fromInt(j);
+        K lPlusOne = kGetter.fromInt(j + 1);
+
+        /* Sub range exclusive. */
+        rangeExpected = expected.subMap(i + 1, j);
+        checkCursor
+            (index.keys(txn, k, false, l, false, null),
+             map.subMap(kPlusOne, l).keySet(), true,
+             expandKeys(rangeExpected), kGetter);
+        checkCursor
+            (index.entities(txn, k, false, l, false, null),
+             map.subMap(kPlusOne, l).values(), false,
+             expandValues(rangeExpected), vGetter);
+
+        /* Sub range inclusive. */
+        rangeExpected = expected.subMap(i, j + 1);
+        checkCursor
+            (index.keys(txn, k, true, l, true, null),
+             map.subMap(k, lPlusOne).keySet(), true,
+             expandKeys(rangeExpected), kGetter);
+        checkCursor
+            (index.entities(txn, k, true, l, true, null),
+             map.subMap(k, lPlusOne).values(), false,
+             expandValues(rangeExpected), vGetter);
+    }
+
+    private List<List<Integer>>
+        expandKeys(SortedMap<Integer,SortedSet<Integer>> map) {
+
+        List<List<Integer>> list = new ArrayList<List<Integer>>();
+        for (Integer key : map.keySet()) {
+            SortedSet<Integer> values = map.get(key);
+            List<Integer> dups = new ArrayList<Integer>();
+            for (int i = 0; i < values.size(); i += 1) {
+                dups.add(key);
+            }
+            list.add(dups);
+        }
+        return list;
+    }
+
+    private List<List<Integer>>
+        expandValues(SortedMap<Integer,SortedSet<Integer>> map) {
+
+        List<List<Integer>> list = new ArrayList<List<Integer>>();
+        for (SortedSet<Integer> values : map.values()) {
+            list.add(new ArrayList<Integer>(values));
+        }
+        return list;
+    }
+
+    private int expandKeySize(SortedMap<Integer,SortedSet<Integer>> map) {
+
+        int size = 0;
+        for (SortedSet<Integer> values : map.values()) {
+            if (values.size() > 0) {
+                size += 1;
+            }
+        }
+        return size;
+    }
+
+    private int expandValueSize(SortedMap<Integer,SortedSet<Integer>> map) {
+
+        int size = 0;
+        for (SortedSet<Integer> values : map.values()) {
+            size += values.size();
+        }
+        return size;
+    }
+
+    private <T> void checkCursor(EntityCursor<T> cursor,
+                                 Collection<T> collection,
+                                 boolean collectionIsKeySet,
+                                 List<List<Integer>> expected,
+                                 Getter<T> getter)
+        throws DatabaseException {
+
+        boolean first;
+        boolean firstDup;
+        Iterator<T> iterator = collection.iterator();
+
+        for (List<Integer> dups : expected) {
+            for (int i : dups) {
+                T o = cursor.next();
+                assertNotNull(o);
+                assertEquals(i, getter.getKey(o));
+                /* Value iterator over duplicates. */
+                if (!collectionIsKeySet) {
+                    assertTrue(iterator.hasNext());
+                    o = iterator.next();
+                    assertNotNull(o);
+                    assertEquals(i, getter.getKey(o));
+                }
+            }
+        }
+
+        first = true;
+        for (List<Integer> dups : expected) {
+            firstDup = true;
+            for (int i : dups) {
+                T o = first ? cursor.first()
+                            : (firstDup ? cursor.next() : cursor.nextDup());
+                assertNotNull(o);
+                assertEquals(i, getter.getKey(o));
+                first = false;
+                firstDup = false;
+            }
+        }
+
+        first = true;
+        for (List<Integer> dups : expected) {
+            if (!dups.isEmpty()) {
+                int i = dups.get(0);
+                T o = first ? cursor.first() : cursor.nextNoDup();
+                assertNotNull(o);
+                assertEquals(i, getter.getKey(o));
+                /* Key iterator over non-duplicates. */
+                if (collectionIsKeySet) {
+                    assertTrue(iterator.hasNext());
+                    o = iterator.next();
+                    assertNotNull(o);
+                    assertEquals(i, getter.getKey(o));
+                }
+                first = false;
+            }
+        }
+
+        List<List<Integer>> reversed = new ArrayList<List<Integer>>();
+        for (List<Integer> dups : expected) {
+            ArrayList<Integer> reversedDups = new ArrayList<Integer>(dups);
+            Collections.reverse(reversedDups);
+            reversed.add(reversedDups);
+        }
+        Collections.reverse(reversed);
+
+        first = true;
+        for (List<Integer> dups : reversed) {
+            for (int i : dups) {
+                T o = first ? cursor.last() : cursor.prev();
+                assertNotNull(o);
+                assertEquals(i, getter.getKey(o));
+                first = false;
+            }
+        }
+
+        first = true;
+        for (List<Integer> dups : reversed) {
+            firstDup = true;
+            for (int i : dups) {
+                T o = first ? cursor.last()
+                            : (firstDup ? cursor.prev() : cursor.prevDup());
+                assertNotNull(o);
+                assertEquals(i, getter.getKey(o));
+                first = false;
+                firstDup = false;
+            }
+        }
+
+        first = true;
+        for (List<Integer> dups : reversed) {
+            if (!dups.isEmpty()) {
+                int i = dups.get(0);
+                T o = first ? cursor.last() : cursor.prevNoDup();
+                assertNotNull(o);
+                assertEquals(i, getter.getKey(o));
+                first = false;
+            }
+        }
+
+        cursor.close();
+    }
+
+    private void checkAllEmpty()
+        throws DatabaseException {
+
+        checkEmpty(primary);
+        checkEmpty(oneToOne);
+        checkEmpty(oneToMany);
+        checkEmpty(manyToOne);
+        checkEmpty(manyToMany);
+    }
+
+    private <K,V> void checkEmpty(EntityIndex<K,V> index)
+        throws DatabaseException {
+
+        EntityCursor<K> keys = index.keys();
+        assertNull(keys.next());
+        assertTrue(!keys.iterator().hasNext());
+        keys.close();
+        EntityCursor<V> entities = index.entities();
+        assertNull(entities.next());
+        assertTrue(!entities.iterator().hasNext());
+        entities.close();
+    }
+
+    private interface Getter<T> {
+        int getKey(T o);
+        T fromInt(int i);
+    }
+
+    private static Getter<MyEntity> entityGetter =
+               new Getter<MyEntity>() {
+        public int getKey(MyEntity o) {
+            return o.key;
+        }
+        public MyEntity fromInt(int i) {
+            throw new UnsupportedOperationException();
+        }
+    };
+
+    private static Getter<Integer> keyGetter =
+               new Getter<Integer>() {
+        public int getKey(Integer o) {
+            return o;
+        }
+        public Integer fromInt(int i) {
+            return Integer.valueOf(i);
+        }
+    };
+
+    private static Getter<RawObject> rawEntityGetter =
+               new Getter<RawObject>() {
+        public int getKey(RawObject o) {
+            Object val = o.getValues().get("key");
+            return ((Integer) val).intValue();
+        }
+        public RawObject fromInt(int i) {
+            throw new UnsupportedOperationException();
+        }
+    };
+
+    private static Getter<Object> rawKeyGetter =
+               new Getter<Object>() {
+        public int getKey(Object o) {
+            return ((Integer) o).intValue();
+        }
+        public Object fromInt(int i) {
+            return Integer.valueOf(i);
+        }
+    };
+
+    @Entity
+    private static class MyEntity {
+
+        @PrimaryKey
+        private int key;
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        private int oneToOne;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private int manyToOne;
+
+        @SecondaryKey(relate=ONE_TO_MANY)
+        private Set<Integer> oneToMany = new TreeSet<Integer>();
+
+        @SecondaryKey(relate=MANY_TO_MANY)
+        private Set<Integer> manyToMany = new TreeSet<Integer>();
+
+        private MyEntity() {}
+
+        private MyEntity(int key) {
+
+            /* example keys: {0, 1, 2, 3, 4} */
+            this.key = key;
+
+            /* { 0:0, 1:-1, 2:-2, 3:-3, 4:-4 } */
+            oneToOne = -key;
+
+            /* { 0:0, 1:1, 2:2, 3:0, 4:1 } */
+            manyToOne = key % THREE_TO_ONE;
+
+            /* { 0:{}, 1:{10}, 2:{20,21}, 3:{30,31,32}, 4:{40,41,42,43} */
+            for (int i = 0; i < key; i += 1) {
+                oneToMany.add((N_RECORDS * key) + i);
+            }
+
+            /* { 0:{}, 1:{0}, 2:{0,1}, 3:{0,1,2}, 4:{0,1,2,3} */
+            for (int i = 0; i < key; i += 1) {
+                manyToMany.add(i);
+            }
+        }
+
+        @Override
+        public String toString() {
+            return "MyEntity " + key;
+        }
+    }
+}
diff --git a/test/com/sleepycat/persist/test/JoinTest.java b/test/com/sleepycat/persist/test/JoinTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..ea863d085507cf991004c6a2af235d6717befc43
--- /dev/null
+++ b/test/com/sleepycat/persist/test/JoinTest.java
@@ -0,0 +1,174 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: JoinTest.java,v 1.8.2.2 2010/01/04 15:30:49 cwl Exp $
+ */
+
+package com.sleepycat.persist.test;
+
+import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import junit.framework.Test;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.persist.EntityJoin;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.ForwardCursor;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.SecondaryIndex;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.SecondaryKey;
+import com.sleepycat.util.test.TxnTestCase;
+
+/**
+ * @author Mark Hayes
+ */
+public class JoinTest extends TxnTestCase {
+
+    private static final int N_RECORDS = 5;
+
+    public static Test suite() {
+        return txnTestSuite(JoinTest.class, null, null);
+    }
+
+    private EntityStore store;
+    private PrimaryIndex<Integer,MyEntity> primary;
+    private SecondaryIndex<Integer,Integer,MyEntity> sec1;
+    private SecondaryIndex<Integer,Integer,MyEntity> sec2;
+    private SecondaryIndex<Integer,Integer,MyEntity> sec3;
+
+    /**
+     * Opens the store.
+     */
+    private void open()
+        throws DatabaseException {
+
+        StoreConfig config = new StoreConfig();
+        config.setAllowCreate(envConfig.getAllowCreate());
+        config.setTransactional(envConfig.getTransactional());
+
+        store = new EntityStore(env, "test", config);
+
+        primary = store.getPrimaryIndex(Integer.class, MyEntity.class);
+        sec1 = store.getSecondaryIndex(primary, Integer.class, "k1");
+        sec2 = store.getSecondaryIndex(primary, Integer.class, "k2");
+        sec3 = store.getSecondaryIndex(primary, Integer.class, "k3");
+    }
+
+    /**
+     * Closes the store.
+     */
+    private void close()
+        throws DatabaseException {
+
+        store.close();
+    }
+
+    public void testJoin()
+        throws DatabaseException {
+
+        open();
+
+        /*
+         * Primary keys: {   0,   1,   2,   3,   4 }
+         * Secondary k1: { 0:0, 0:1, 0:2, 0:3, 0:4 }
+         * Secondary k2: { 0:0, 1:1, 0:2, 1:3, 0:4 }
+         * Secondary k3: { 0:0, 1:1, 2:2, 0:3, 1:4 }
+         */
+        Transaction txn = txnBegin();
+        for (int i = 0; i < N_RECORDS; i += 1) {
+            MyEntity e = new MyEntity(i, 0, i % 2, i % 3);
+            boolean ok = primary.putNoOverwrite(txn, e);
+            assertTrue(ok);
+        }
+        txnCommit(txn);
+
+        /*
+         * k1, k2, k3, -> { primary keys }
+         * -1 means don't include the key in the join.
+         */
+        doJoin( 0,  0,  0, new int[] { 0 });
+        doJoin( 0,  0,  1, new int[] { 4 });
+        doJoin( 0,  0, -1, new int[] { 0, 2, 4 });
+        doJoin(-1,  1,  1, new int[] { 1 });
+        doJoin(-1,  2,  2, new int[] { });
+        doJoin(-1, -1,  2, new int[] { 2 });
+
+        close();
+    }
+
+    private void doJoin(int k1, int k2, int k3, int[] expectKeys)
+        throws DatabaseException {
+
+        List<Integer> expect = new ArrayList<Integer>();
+        for (int i : expectKeys) {
+            expect.add(i);
+        }
+        EntityJoin join = new EntityJoin(primary);
+        if (k1 >= 0) {
+            join.addCondition(sec1, k1);
+        }
+        if (k2 >= 0) {
+            join.addCondition(sec2, k2);
+        }
+        if (k3 >= 0) {
+            join.addCondition(sec3, k3);
+        }
+        List<Integer> found;
+        Transaction txn = txnBegin();
+
+        /* Keys */
+        found = new ArrayList<Integer>();
+        ForwardCursor<Integer> keys = join.keys(txn, null);
+        for (int i : keys) {
+            found.add(i);
+        }
+        keys.close();
+        assertEquals(expect, found);
+
+        /* Entities */
+        found = new ArrayList<Integer>();
+        ForwardCursor<MyEntity> entities = join.entities(txn, null);
+        for (MyEntity e : entities) {
+            found.add(e.id);
+        }
+        entities.close();
+        assertEquals(expect, found);
+
+        txnCommit(txn);
+    }
+
+    @Entity
+    private static class MyEntity {
+        @PrimaryKey
+        int id;
+        @SecondaryKey(relate=MANY_TO_ONE)
+        int k1;
+        @SecondaryKey(relate=MANY_TO_ONE)
+        int k2;
+        @SecondaryKey(relate=MANY_TO_ONE)
+        int k3;
+
+        private MyEntity() {}
+
+        MyEntity(int id, int k1, int k2, int k3) {
+            this.id = id;
+            this.k1 = k1;
+            this.k2 = k2;
+            this.k3 = k3;
+        }
+
+        @Override
+        public String toString() {
+            return "MyEntity " + id + ' ' + k1 + ' ' + k2 + ' ' + k3;
+        }
+    }
+}
diff --git a/test/com/sleepycat/persist/test/NegativeTest.java b/test/com/sleepycat/persist/test/NegativeTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..396b782e018ad6b197d12f8f48b99ca1c8a0f782
--- /dev/null
+++ b/test/com/sleepycat/persist/test/NegativeTest.java
@@ -0,0 +1,476 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: NegativeTest.java,v 1.19.2.2 2010/01/04 15:30:49 cwl Exp $
+ */
+
+package com.sleepycat.persist.test;
+
+import java.math.BigDecimal;
+import java.util.Collection;
+import java.util.ArrayList;
+
+import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE;
+import static com.sleepycat.persist.model.Relationship.ONE_TO_MANY;
+import junit.framework.Test;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.model.AnnotationModel;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.KeyField;
+import com.sleepycat.persist.model.Persistent;
+import com.sleepycat.persist.model.PersistentProxy;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.SecondaryKey;
+import com.sleepycat.util.test.TxnTestCase;
+
+/**
+ * Negative tests.
+ *
+ * @author Mark Hayes
+ */
+public class NegativeTest extends TxnTestCase {
+
+    public static Test suite() {
+        return txnTestSuite(NegativeTest.class, null, null);
+    }
+
+    private EntityStore store;
+
+    private void open()
+        throws DatabaseException {
+
+        open(null);
+    }
+
+    private void open(Class<ProxyExtendsEntity> clsToRegister)
+        throws DatabaseException {
+
+        StoreConfig config = new StoreConfig();
+        config.setAllowCreate(envConfig.getAllowCreate());
+        config.setTransactional(envConfig.getTransactional());
+
+        if (clsToRegister != null) {
+            AnnotationModel model = new AnnotationModel();
+            model.registerClass(clsToRegister);
+            config.setModel(model);
+        }
+
+        store = new EntityStore(env, "test", config);
+    }
+
+
+    private void close()
+        throws DatabaseException {
+
+        store.close();
+        store = null;
+    }
+
+    @Override
+    public void tearDown()
+        throws Exception {
+
+        if (store != null) {
+            try {
+                store.close();
+            } catch (Throwable e) {
+                System.out.println("tearDown: " + e);
+            }
+            store = null;
+        }
+        super.tearDown();
+    }
+
+    public void testBadKeyClass1()
+        throws DatabaseException {
+
+        open();
+        try {
+            store.getPrimaryIndex(BadKeyClass1.class, UseBadKeyClass1.class);
+            fail();
+        } catch (IllegalArgumentException expected) {
+            assertTrue(expected.getMessage().indexOf("@KeyField") >= 0);
+        }
+        close();
+    }
+
+    /** Missing @KeyField in composite key class. */
+    @Persistent
+    static class BadKeyClass1 {
+
+        private int f1;
+    }
+
+    @Entity
+    static class UseBadKeyClass1 {
+
+        @PrimaryKey
+        private BadKeyClass1 f1 = new BadKeyClass1();
+
+        @SecondaryKey(relate=ONE_TO_ONE)
+        private BadKeyClass1 f2 = new BadKeyClass1();
+    }
+    
+    public void testBadSequenceKeys() 
+        throws DatabaseException {
+
+        open();
+        try {
+            store.getPrimaryIndex(Boolean.class, BadSequenceKeyEntity1.class);
+            fail();
+        } catch (IllegalArgumentException expected) {
+            assertTrue(expected.getMessage().indexOf
+                ("Type not allowed for sequence") >= 0);
+        }
+        try {
+            store.getPrimaryIndex(BadSequenceKeyEntity2.Key.class,
+                     BadSequenceKeyEntity2.class);
+            fail();
+        } catch (IllegalArgumentException expected) {
+            assertTrue(expected.getMessage().indexOf
+                ("Type not allowed for sequence") >= 0);
+        }
+        try {
+            store.getPrimaryIndex(BadSequenceKeyEntity3.Key.class,
+                     BadSequenceKeyEntity3.class);
+            fail();
+        } catch (IllegalArgumentException expected) {
+            assertTrue(expected.getMessage().indexOf
+                ("A composite key class used with a sequence may contain " +
+                 "only a single integer key field")>= 0);
+        }
+        close();
+    }
+    
+    /** Boolean not allowed for sequence key. */
+    @Entity
+    static class BadSequenceKeyEntity1 {
+
+        @PrimaryKey(sequence="X")
+        private boolean key;
+    }
+    
+    /** Composite key with non-integer field not allowed for sequence key. */
+    @Entity
+    static class BadSequenceKeyEntity2 {
+
+        @PrimaryKey(sequence="X")
+        private Key key;
+
+        @Persistent
+        static class Key {
+            @KeyField(1)
+            boolean key;
+        }
+    }
+    
+    /** Composite key with multiple key fields not allowed for sequence key. */
+    @Entity
+    static class BadSequenceKeyEntity3 {
+
+        @PrimaryKey(sequence="X")
+        private Key key;
+
+        @Persistent
+        static class Key {
+            @KeyField(1)
+            int key;
+            @KeyField(2)
+            int key2;
+        }
+    }
+    
+    /**
+     * A proxied object may not current contain a field that references the
+     * parent proxy.  [#15815]
+     */
+    public void testProxyNestedRef() 
+        throws DatabaseException {
+
+        open();
+        PrimaryIndex<Integer,ProxyNestedRef> index = store.getPrimaryIndex
+            (Integer.class, ProxyNestedRef.class);
+        ProxyNestedRef entity = new ProxyNestedRef();
+        entity.list.add(entity.list);
+        try {
+            index.put(entity);
+            fail();
+        } catch (IllegalArgumentException expected) {
+            assertTrue(expected.getMessage().indexOf
+                ("Cannot embed a reference to a proxied object") >= 0);
+        }
+        close();
+    }
+
+    @Entity
+    static class ProxyNestedRef {
+
+        @PrimaryKey
+        private int key;
+
+        ArrayList<Object> list = new ArrayList<Object>();
+    }
+
+    /**
+     * Disallow primary keys on entity subclasses.  [#15757]
+     */
+    public void testEntitySubclassWithPrimaryKey()
+        throws DatabaseException {
+
+        open();
+        PrimaryIndex<Integer,EntitySuperClass> index = store.getPrimaryIndex
+            (Integer.class, EntitySuperClass.class);
+        EntitySuperClass e1 = new EntitySuperClass(1, "one");
+        index.put(e1);
+        assertEquals(e1, index.get(1));
+        EntitySubClass e2 = new EntitySubClass(2, "two", "foo", 9);
+        try {
+            index.put(e2);
+            fail();
+        } catch (IllegalArgumentException e) {
+            assertTrue(e.getMessage().contains
+                ("PrimaryKey may not appear on an Entity subclass"));
+        }
+        assertEquals(e1, index.get(1));
+        close();
+    }
+
+    @Entity
+    static class EntitySuperClass {
+
+        @PrimaryKey
+        private int x;
+
+        private String y;
+
+        EntitySuperClass(int x, String y) {
+            assert y != null;
+            this.x = x;
+            this.y = y;
+        }
+
+        private EntitySuperClass() {}
+
+        @Override
+        public String toString() {
+            return "x=" + x + " y=" + y;
+        }
+
+        @Override
+        public boolean equals(Object other) {
+            if (other instanceof EntitySuperClass) {
+                EntitySuperClass o = (EntitySuperClass) other;
+                return x == o.x && y.equals(o.y);
+            } else {
+                return false;
+            }
+        }
+    }
+
+    @Persistent
+    static class EntitySubClass extends EntitySuperClass {
+
+        @PrimaryKey
+        private String foo;
+
+        private int z;
+
+        EntitySubClass(int x, String y, String foo, int z) {
+            super(x, y);
+            assert foo != null;
+            this.foo = foo;
+            this.z = z;
+        }
+
+        private EntitySubClass() {}
+
+        @Override
+        public String toString() {
+            return super.toString() + " z=" + z;
+        }
+
+        @Override
+        public boolean equals(Object other) {
+            if (other instanceof EntitySubClass) {
+                EntitySubClass o = (EntitySubClass) other;
+                return super.equals(o) && z == o.z;
+            } else {
+                return false;
+            }
+        }
+    }
+
+    /**
+     * Disallow embedded entity classes and subclasses.  [#16077]
+     */
+    public void testEmbeddedEntity()
+        throws DatabaseException {
+
+        open();
+        PrimaryIndex<Integer,EmbeddingEntity> index = store.getPrimaryIndex
+            (Integer.class, EmbeddingEntity.class);
+        EmbeddingEntity e1 = new EmbeddingEntity(1, null);
+        index.put(e1);
+        assertEquals(e1, index.get(1));
+
+        EmbeddingEntity e2 =
+            new EmbeddingEntity(2, new EntitySuperClass(2, "two"));
+        try {
+            index.put(e2);
+            fail();
+        } catch (IllegalArgumentException e) {
+            assertTrue(e.getMessage().contains
+                ("References to entities are not allowed"));
+        }
+
+        EmbeddingEntity e3 = new EmbeddingEntity
+            (3, new EmbeddedEntitySubClass(3, "three", "foo", 9));
+        try {
+            index.put(e3);
+            fail();
+        } catch (IllegalArgumentException e) {
+            assertTrue(e.toString(), e.getMessage().contains
+                ("References to entities are not allowed"));
+        }
+
+        assertEquals(e1, index.get(1));
+        close();
+    }
+
+    @Entity
+    static class EmbeddingEntity {
+
+        @PrimaryKey
+        private int x;
+
+        private EntitySuperClass y;
+
+        EmbeddingEntity(int x, EntitySuperClass y) {
+            this.x = x;
+            this.y = y;
+        }
+
+        private EmbeddingEntity() {}
+
+        @Override
+        public String toString() {
+            return "x=" + x + " y=" + y;
+        }
+
+        @Override
+        public boolean equals(Object other) {
+            if (other instanceof EmbeddingEntity) {
+                EmbeddingEntity o = (EmbeddingEntity) other;
+                return x == o.x && 
+                       ((y == null) ? (o.y == null) : y.equals(o.y));
+            } else {
+                return false;
+            }
+        }
+    }
+
+    @Persistent
+    static class EmbeddedEntitySubClass extends EntitySuperClass {
+
+        private String foo;
+
+        private int z;
+
+        EmbeddedEntitySubClass(int x, String y, String foo, int z) {
+            super(x, y);
+            assert foo != null;
+            this.foo = foo;
+            this.z = z;
+        }
+
+        private EmbeddedEntitySubClass() {}
+
+        @Override
+        public String toString() {
+            return super.toString() + " z=" + z;
+        }
+
+        @Override
+        public boolean equals(Object other) {
+            if (other instanceof EmbeddedEntitySubClass) {
+                EmbeddedEntitySubClass o = (EmbeddedEntitySubClass) other;
+                return super.equals(o) && z == o.z;
+            } else {
+                return false;
+            }
+        }
+    }
+
+    /**
+     * Disallow SecondaryKey collection with no type parameter. [#15950]
+     */
+    public void testTypelessKeyCollection()
+        throws DatabaseException {
+
+        open();
+        try {
+            store.getPrimaryIndex
+                (Integer.class, TypelessKeyCollectionEntity.class);
+            fail();
+        } catch (IllegalArgumentException e) {
+            assertTrue(e.toString(), e.getMessage().contains
+                ("Collection typed secondary key field must have a " +
+                 "single generic type argument and a wildcard or type " +
+                 "bound is not allowed"));
+        }
+        close();
+    }
+
+    @Entity
+    static class TypelessKeyCollectionEntity {
+
+        @PrimaryKey
+        private int x;
+
+        @SecondaryKey(relate=ONE_TO_MANY)
+        private Collection keys = new ArrayList();
+
+        TypelessKeyCollectionEntity(int x) {
+            this.x = x;
+        }
+
+        private TypelessKeyCollectionEntity() {}
+    }
+
+    /**
+     * Disallow a persistent proxy that extends an entity.  [#15950]
+     */
+    public void testProxyEntity()
+        throws DatabaseException {
+
+        try {
+            open(ProxyExtendsEntity.class);
+            fail();
+        } catch (IllegalArgumentException e) {
+            assertTrue(e.toString(), e.getMessage().contains
+                ("A proxy may not be an entity"));
+        }
+    }
+
+    @Persistent(proxyFor=BigDecimal.class)
+    static class ProxyExtendsEntity
+        extends EntitySuperClass
+        implements PersistentProxy<BigDecimal> {
+
+        private String rep;
+
+        public BigDecimal convertProxy() {
+            return new BigDecimal(rep);
+        }
+
+        public void initializeProxy(BigDecimal o) {
+            rep = o.toString();
+        }
+    }
+}
diff --git a/test/com/sleepycat/persist/test/OperationTest.java b/test/com/sleepycat/persist/test/OperationTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..92a8d371d22aac17da176d4e9ce4cc0a8738ea20
--- /dev/null
+++ b/test/com/sleepycat/persist/test/OperationTest.java
@@ -0,0 +1,1119 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: OperationTest.java,v 1.23.2.4 2010/01/04 15:30:49 cwl Exp $
+ */
+
+package com.sleepycat.persist.test;
+
+import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE;
+import static com.sleepycat.persist.model.Relationship.ONE_TO_MANY;
+import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE;
+import static com.sleepycat.persist.model.DeleteAction.CASCADE;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import junit.framework.Test;
+
+/* <!-- begin JE only --> */
+import com.sleepycat.je.CacheMode;
+/* <!-- end JE only --> */
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.persist.EntityCursor;
+import com.sleepycat.persist.EntityIndex;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.SecondaryIndex;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.impl.Store;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.KeyField;
+import com.sleepycat.persist.model.NotPersistent;
+import com.sleepycat.persist.model.NotTransient;
+import com.sleepycat.persist.model.Persistent;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.SecondaryKey;
+import com.sleepycat.persist.raw.RawStore;
+import com.sleepycat.util.test.TxnTestCase;
+
+/**
+ * Tests misc store and index operations that are not tested by IndexTest.
+ *
+ * @author Mark Hayes
+ */
+public class OperationTest extends TxnTestCase {
+
+    private static final String STORE_NAME = "test";
+ 
+    public static Test suite() {
+        return txnTestSuite(OperationTest.class, null, null);
+    }
+
+    private EntityStore store;
+
+    private void openReadOnly()
+        throws DatabaseException {
+
+        StoreConfig config = new StoreConfig();
+        config.setReadOnly(true);
+        open(config);
+    }
+
+    private void open()
+        throws DatabaseException {
+
+        StoreConfig config = new StoreConfig();
+        config.setAllowCreate(envConfig.getAllowCreate());
+        open(config);
+    }
+
+    private void open(StoreConfig config)
+        throws DatabaseException {
+
+        config.setTransactional(envConfig.getTransactional());
+        store = new EntityStore(env, STORE_NAME, config);
+    }
+
+    private void close()
+        throws DatabaseException {
+
+        store.close();
+        store = null;
+    }
+
+    /**
+     * The store must be closed before closing the environment.
+     */
+    public void tearDown()
+        throws Exception {
+
+        try {
+            if (store != null) {
+                store.close();
+            }
+        } catch (Throwable e) {
+            System.out.println("During tearDown: " + e);
+        }
+        store = null;
+        super.tearDown();
+    }
+
+    public void testReadOnly()
+        throws DatabaseException {
+
+        open();
+        PrimaryIndex<Integer,SharedSequenceEntity1> priIndex =
+            store.getPrimaryIndex(Integer.class, SharedSequenceEntity1.class);
+        Transaction txn = txnBegin();
+        SharedSequenceEntity1 e = new SharedSequenceEntity1();
+        priIndex.put(txn, e);
+        assertEquals(1, e.key);
+        txnCommit(txn);
+        close();
+
+        /*
+         * Check that we can open the store read-only and read the records
+         * written above.
+         */
+        openReadOnly();
+        priIndex =
+            store.getPrimaryIndex(Integer.class, SharedSequenceEntity1.class);
+        e = priIndex.get(1);
+        assertNotNull(e);
+        close();
+    }
+
+    /* <!-- begin JE only --> */
+    public void testGetStoreNames()
+        throws DatabaseException {
+
+        open();
+        close();
+        Set<String> names = EntityStore.getStoreNames(env);
+        assertEquals(1, names.size());
+        assertEquals("test", names.iterator().next());
+    }
+    /* <!-- end JE only --> */
+
+    /* <!-- begin JE only --> */
+    public void testCacheMode()
+        throws DatabaseException {
+
+        open();
+
+        PrimaryIndex<Integer,MyEntity> priIndex =
+            store.getPrimaryIndex(Integer.class, MyEntity.class);
+
+        Transaction txn = txnBeginCursor();
+
+        MyEntity e = new MyEntity();
+        e.priKey = 1;
+        e.secKey = 1;
+        priIndex.put(txn, e);
+
+        EntityCursor<MyEntity> entities = priIndex.entities(txn, null);
+
+        assertSame(CacheMode.DEFAULT, entities.getCacheMode());
+        e = entities.first();
+        assertNotNull(e);
+        assertSame(CacheMode.DEFAULT, entities.getCacheMode());
+        entities.setCacheMode(CacheMode.KEEP_HOT);
+        assertSame(CacheMode.KEEP_HOT, entities.getCacheMode());
+        e = entities.first();
+        assertNotNull(e);
+        assertSame(CacheMode.KEEP_HOT, entities.getCacheMode());
+        entities.setCacheMode(CacheMode.UNCHANGED);
+        entities.update(e);
+        entities.setCacheMode(CacheMode.UNCHANGED);
+
+        entities.close();
+        txnCommit(txn);
+        close();
+    }
+    /* <!-- end JE only --> */
+
+    public void testUninitializedCursor()
+        throws DatabaseException {
+
+        open();
+
+        PrimaryIndex<Integer,MyEntity> priIndex =
+            store.getPrimaryIndex(Integer.class, MyEntity.class);
+
+        Transaction txn = txnBeginCursor();
+
+        MyEntity e = new MyEntity();
+        e.priKey = 1;
+        e.secKey = 1;
+        priIndex.put(txn, e);
+
+        EntityCursor<MyEntity> entities = priIndex.entities(txn, null);
+        try {
+            entities.nextDup();
+            fail();
+        } catch (IllegalStateException expected) {}
+        try {
+            entities.prevDup();
+            fail();
+        } catch (IllegalStateException expected) {}
+        try {
+            entities.current();
+            fail();
+        } catch (IllegalStateException expected) {}
+        try {
+            entities.delete();
+            fail();
+        } catch (IllegalStateException expected) {}
+        try {
+            entities.update(e);
+            fail();
+        } catch (IllegalStateException expected) {}
+        try {
+            entities.count();
+            fail();
+        } catch (IllegalStateException expected) {}
+
+        entities.close();
+        txnCommit(txn);
+        close();
+    }
+
+    public void testCursorCount()
+        throws DatabaseException {
+
+        open();
+
+        PrimaryIndex<Integer,MyEntity> priIndex =
+            store.getPrimaryIndex(Integer.class, MyEntity.class);
+
+        SecondaryIndex<Integer,Integer,MyEntity> secIndex =
+            store.getSecondaryIndex(priIndex, Integer.class, "secKey");
+
+        Transaction txn = txnBeginCursor();
+
+        MyEntity e = new MyEntity();
+        e.priKey = 1;
+        e.secKey = 1;
+        priIndex.put(txn, e);
+
+        EntityCursor<MyEntity> cursor = secIndex.entities(txn, null);
+        cursor.next();
+        assertEquals(1, cursor.count());
+        cursor.close();
+
+        e.priKey = 2;
+        priIndex.put(txn, e);
+        cursor = secIndex.entities(txn, null);
+        cursor.next();
+        assertEquals(2, cursor.count());
+        cursor.close();
+
+        txnCommit(txn);
+        close();
+    }
+
+    public void testCursorUpdate()
+        throws DatabaseException {
+
+        open();
+
+        PrimaryIndex<Integer,MyEntity> priIndex =
+            store.getPrimaryIndex(Integer.class, MyEntity.class);
+
+        SecondaryIndex<Integer,Integer,MyEntity> secIndex =
+            store.getSecondaryIndex(priIndex, Integer.class, "secKey");
+
+        Transaction txn = txnBeginCursor();
+
+        Integer k;
+        MyEntity e = new MyEntity();
+        e.priKey = 1;
+        e.secKey = 2;
+        priIndex.put(txn, e);
+
+        /* update() with primary entity cursor. */
+        EntityCursor<MyEntity> entities = priIndex.entities(txn, null);
+        e = entities.next();
+        assertNotNull(e);
+        assertEquals(1, e.priKey);
+        assertEquals(Integer.valueOf(2), e.secKey);
+        e.secKey = null;
+        assertTrue(entities.update(e));
+        e = entities.current();
+        assertNotNull(e);
+        assertEquals(1, e.priKey);
+        assertEquals(null, e.secKey);
+        e.secKey = 3;
+        assertTrue(entities.update(e));
+        e = entities.current();
+        assertNotNull(e);
+        assertEquals(1, e.priKey);
+        assertEquals(Integer.valueOf(3), e.secKey);
+        entities.close();
+
+        /* update() with primary keys cursor. */
+        EntityCursor<Integer> keys = priIndex.keys(txn, null);
+        k = keys.next();
+        assertNotNull(k);
+        assertEquals(Integer.valueOf(1), k);
+        try {
+            keys.update(2);
+            fail();
+        } catch (UnsupportedOperationException expected) {
+        }
+        keys.close();
+
+        /* update() with secondary entity cursor. */
+        entities = secIndex.entities(txn, null);
+        e = entities.next();
+        assertNotNull(e);
+        assertEquals(1, e.priKey);
+        assertEquals(Integer.valueOf(3), e.secKey);
+        try {
+            entities.update(e);
+            fail();
+        } catch (UnsupportedOperationException expected) {
+        } catch (IllegalArgumentException expectedForDbCore) {
+        }
+        entities.close();
+
+        /* update() with secondary keys cursor. */
+        keys = secIndex.keys(txn, null);
+        k = keys.next();
+        assertNotNull(k);
+        assertEquals(Integer.valueOf(3), k);
+        try {
+            keys.update(k);
+            fail();
+        } catch (UnsupportedOperationException expected) {
+        }
+        keys.close();
+
+        txnCommit(txn);
+        close();
+    }
+
+    public void testCursorDelete()
+        throws DatabaseException {
+
+        open();
+
+        PrimaryIndex<Integer,MyEntity> priIndex =
+            store.getPrimaryIndex(Integer.class, MyEntity.class);
+
+        SecondaryIndex<Integer,Integer,MyEntity> secIndex =
+            store.getSecondaryIndex(priIndex, Integer.class, "secKey");
+
+        Transaction txn = txnBeginCursor();
+
+        /* delete() with primary and secondary entities cursor. */
+
+        for (EntityIndex index : new EntityIndex[] { priIndex, secIndex }) {
+
+            MyEntity e = new MyEntity();
+            e.priKey = 1;
+            e.secKey = 1;
+            priIndex.put(txn, e);
+            e.priKey = 2;
+            priIndex.put(txn, e);
+
+            EntityCursor<MyEntity> cursor = index.entities(txn, null);
+
+            e = cursor.next();
+            assertNotNull(e);
+            assertEquals(1, e.priKey);
+            e = cursor.current();
+            assertNotNull(e);
+            assertEquals(1, e.priKey);
+            assertTrue(cursor.delete());
+            assertTrue(!cursor.delete());
+            assertNull(cursor.current());
+
+            e = cursor.next();
+            assertNotNull(e);
+            assertEquals(2, e.priKey);
+            e = cursor.current();
+            assertNotNull(e);
+            assertEquals(2, e.priKey);
+            assertTrue(cursor.delete());
+            assertTrue(!cursor.delete());
+            assertNull(cursor.current());
+
+            e = cursor.next();
+            assertNull(e);
+
+            if (index == priIndex) {
+                e = new MyEntity();
+                e.priKey = 2;
+                e.secKey = 1;
+                assertTrue(!cursor.update(e));
+            }
+
+            cursor.close();
+        }
+
+        /* delete() with primary and secondary keys cursor. */
+
+        for (EntityIndex index : new EntityIndex[] { priIndex, secIndex }) {
+
+            MyEntity e = new MyEntity();
+            e.priKey = 1;
+            e.secKey = 1;
+            priIndex.put(txn, e);
+            e.priKey = 2;
+            priIndex.put(txn, e);
+
+            EntityCursor<Integer> cursor = index.keys(txn, null);
+
+            Integer k = cursor.next();
+            assertNotNull(k);
+            assertEquals(1, k.intValue());
+            k = cursor.current();
+            assertNotNull(k);
+            assertEquals(1, k.intValue());
+            assertTrue(cursor.delete());
+            assertTrue(!cursor.delete());
+            assertNull(cursor.current());
+
+            int expectKey = (index == priIndex) ? 2 : 1;
+            k = cursor.next();
+            assertNotNull(k);
+            assertEquals(expectKey, k.intValue());
+            k = cursor.current();
+            assertNotNull(k);
+            assertEquals(expectKey, k.intValue());
+            assertTrue(cursor.delete());
+            assertTrue(!cursor.delete());
+            assertNull(cursor.current());
+
+            k = cursor.next();
+            assertNull(k);
+
+            cursor.close();
+        }
+
+        txnCommit(txn);
+        close();
+    }
+
+    public void testDeleteFromSubIndex()
+        throws DatabaseException {
+
+        open();
+
+        PrimaryIndex<Integer,MyEntity> priIndex =
+            store.getPrimaryIndex(Integer.class, MyEntity.class);
+
+        SecondaryIndex<Integer,Integer,MyEntity> secIndex =
+            store.getSecondaryIndex(priIndex, Integer.class, "secKey");
+
+        Transaction txn = txnBegin();
+        MyEntity e = new MyEntity();
+        e.secKey = 1;
+        e.priKey = 1;
+        priIndex.put(txn, e);
+        e.priKey = 2;
+        priIndex.put(txn, e);
+        e.priKey = 3;
+        priIndex.put(txn, e);
+        e.priKey = 4;
+        priIndex.put(txn, e);
+        txnCommit(txn);
+
+        EntityIndex<Integer,MyEntity> subIndex = secIndex.subIndex(1);
+        txn = txnBeginCursor();
+        e = subIndex.get(txn, 1, null);
+        assertEquals(1, e.priKey);
+        assertEquals(Integer.valueOf(1), e.secKey);
+        e = subIndex.get(txn, 2, null);
+        assertEquals(2, e.priKey);
+        assertEquals(Integer.valueOf(1), e.secKey);
+        e = subIndex.get(txn, 3, null);
+        assertEquals(3, e.priKey);
+        assertEquals(Integer.valueOf(1), e.secKey);
+        e = subIndex.get(txn, 5, null);
+        assertNull(e);
+
+        boolean deleted = subIndex.delete(txn, 1);
+        assertTrue(deleted);
+        assertNull(subIndex.get(txn, 1, null));
+        assertNotNull(subIndex.get(txn, 2, null));
+
+        EntityCursor<MyEntity> cursor = subIndex.entities(txn, null);
+        boolean saw4 = false;
+        for (MyEntity e2 = cursor.first(); e2 != null; e2 = cursor.next()) {
+            if (e2.priKey == 3) {
+                cursor.delete();
+            }
+            if (e2.priKey == 4) {
+                saw4 = true;
+            }
+        }
+        cursor.close();
+        assertTrue(saw4);
+        assertNull(subIndex.get(txn, 1, null));
+        assertNull(subIndex.get(txn, 3, null));
+        assertNotNull(subIndex.get(txn, 2, null));
+        assertNotNull(subIndex.get(txn, 4, null));
+
+        txnCommit(txn);
+        close();
+    }
+
+    @Entity
+    static class MyEntity {
+
+        @PrimaryKey
+        private int priKey;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        private Integer secKey;
+
+        private MyEntity() {}
+    }
+
+    public void testSharedSequence()
+        throws DatabaseException {
+
+        open();
+
+        PrimaryIndex<Integer,SharedSequenceEntity1> priIndex1 =
+            store.getPrimaryIndex(Integer.class, SharedSequenceEntity1.class);
+
+        PrimaryIndex<Integer,SharedSequenceEntity2> priIndex2 =
+            store.getPrimaryIndex(Integer.class, SharedSequenceEntity2.class);
+
+        Transaction txn = txnBegin();
+        SharedSequenceEntity1 e1 = new SharedSequenceEntity1();
+        SharedSequenceEntity2 e2 = new SharedSequenceEntity2();
+        priIndex1.put(txn, e1);
+        assertEquals(1, e1.key);
+        priIndex2.putNoOverwrite(txn, e2);
+        assertEquals(Integer.valueOf(2), e2.key);
+        e1.key = 0;
+        priIndex1.putNoOverwrite(txn, e1);
+        assertEquals(3, e1.key);
+        e2.key = null;
+        priIndex2.put(txn, e2);
+        assertEquals(Integer.valueOf(4), e2.key);
+        txnCommit(txn);
+
+        close();
+    }
+
+    @Entity
+    static class SharedSequenceEntity1 {
+
+        @PrimaryKey(sequence="shared")
+        private int key;
+    }
+
+    @Entity
+    static class SharedSequenceEntity2 {
+
+        @PrimaryKey(sequence="shared")
+        private Integer key;
+    }
+
+    public void testSeparateSequence()
+        throws DatabaseException {
+
+        open();
+
+        PrimaryIndex<Integer,SeparateSequenceEntity1> priIndex1 =
+            store.getPrimaryIndex
+                (Integer.class, SeparateSequenceEntity1.class);
+
+        PrimaryIndex<Integer,SeparateSequenceEntity2> priIndex2 =
+            store.getPrimaryIndex
+                (Integer.class, SeparateSequenceEntity2.class);
+
+        Transaction txn = txnBegin();
+        SeparateSequenceEntity1 e1 = new SeparateSequenceEntity1();
+        SeparateSequenceEntity2 e2 = new SeparateSequenceEntity2();
+        priIndex1.put(txn, e1);
+        assertEquals(1, e1.key);
+        priIndex2.putNoOverwrite(txn, e2);
+        assertEquals(Integer.valueOf(1), e2.key);
+        e1.key = 0;
+        priIndex1.putNoOverwrite(txn, e1);
+        assertEquals(2, e1.key);
+        e2.key = null;
+        priIndex2.put(txn, e2);
+        assertEquals(Integer.valueOf(2), e2.key);
+        txnCommit(txn);
+
+        close();
+    }
+
+    @Entity
+    static class SeparateSequenceEntity1 {
+
+        @PrimaryKey(sequence="seq1")
+        private int key;
+    }
+
+    @Entity
+    static class SeparateSequenceEntity2 {
+
+        @PrimaryKey(sequence="seq2")
+        private Integer key;
+    }
+    
+    public void testCompositeSequence() 
+        throws DatabaseException {
+
+        open();
+
+        PrimaryIndex<CompositeSequenceEntity1.Key,CompositeSequenceEntity1>
+            priIndex1 =
+            store.getPrimaryIndex
+                (CompositeSequenceEntity1.Key.class,
+                 CompositeSequenceEntity1.class);
+
+        PrimaryIndex<CompositeSequenceEntity2.Key,CompositeSequenceEntity2>
+            priIndex2 =
+            store.getPrimaryIndex
+                (CompositeSequenceEntity2.Key.class,
+                 CompositeSequenceEntity2.class);
+
+        Transaction txn = txnBegin();
+        CompositeSequenceEntity1 e1 = new CompositeSequenceEntity1();
+        CompositeSequenceEntity2 e2 = new CompositeSequenceEntity2();
+        priIndex1.put(txn, e1);
+        assertEquals(1, e1.key.key);
+        priIndex2.putNoOverwrite(txn, e2);
+        assertEquals(Integer.valueOf(1), e2.key.key);
+        e1.key = null;
+        priIndex1.putNoOverwrite(txn, e1);
+        assertEquals(2, e1.key.key);
+        e2.key = null;
+        priIndex2.put(txn, e2);
+        assertEquals(Integer.valueOf(2), e2.key.key);
+        txnCommit(txn);
+
+        EntityCursor<CompositeSequenceEntity1> c1 = priIndex1.entities();
+        e1 = c1.next();
+        assertEquals(2, e1.key.key);
+        e1 = c1.next();
+        assertEquals(1, e1.key.key);
+        e1 = c1.next();
+        assertNull(e1);
+        c1.close();
+
+        EntityCursor<CompositeSequenceEntity2> c2 = priIndex2.entities();
+        e2 = c2.next();
+        assertEquals(Integer.valueOf(2), e2.key.key);
+        e2 = c2.next();
+        assertEquals(Integer.valueOf(1), e2.key.key);
+        e2 = c2.next();
+        assertNull(e2);
+        c2.close();
+
+        close();
+    }
+
+    @Entity
+    static class CompositeSequenceEntity1 {
+
+        @Persistent
+        static class Key implements Comparable<Key> {
+
+            @KeyField(1)
+            private int key;
+
+            public int compareTo(Key o) {
+                /* Reverse the natural order. */
+                return o.key - key;
+            }
+        }
+
+        @PrimaryKey(sequence="seq1")
+        private Key key;
+    }
+
+    @Entity
+    static class CompositeSequenceEntity2 {
+
+        @Persistent
+        static class Key implements Comparable<Key> {
+
+            @KeyField(1)
+            private Integer key;
+
+            public int compareTo(Key o) {
+                /* Reverse the natural order. */
+                return o.key - key;
+            }
+        }
+
+        @PrimaryKey(sequence="seq2")
+        private Key key;
+    }
+
+    /**
+     * When opening read-only, secondaries are not opened when the primary is
+     * opened, causing a different code path to be used for opening
+     * secondaries.  For a RawStore in particular, this caused an unreported
+     * NullPointerException in JE 3.0.12.  No SR was created because the use
+     * case is very obscure and was discovered by code inspection.
+     */
+    public void testOpenRawStoreReadOnly()
+        throws DatabaseException {
+
+        open();
+        store.getPrimaryIndex(Integer.class, MyEntity.class);
+        close();
+
+        StoreConfig config = new StoreConfig();
+        config.setReadOnly(true);
+        config.setTransactional(envConfig.getTransactional());
+        RawStore rawStore = new RawStore(env, "test", config);
+
+        String clsName = MyEntity.class.getName();
+        rawStore.getSecondaryIndex(clsName, "secKey");
+
+        rawStore.close();
+    }
+
+    /**
+     * When opening an X_TO_MANY secondary that has a persistent key class, the
+     * key class was not recognized as being persistent if it was never before
+     * referenced when getSecondaryIndex was called.  This was a bug in JE
+     * 3.0.12, reported on OTN.  [#15103]
+     */
+    public void testToManyKeyClass()
+        throws DatabaseException {
+
+        open();
+
+        PrimaryIndex<Integer,ToManyKeyEntity> priIndex =
+            store.getPrimaryIndex(Integer.class, ToManyKeyEntity.class);
+        SecondaryIndex<ToManyKey,Integer,ToManyKeyEntity> secIndex =
+            store.getSecondaryIndex(priIndex, ToManyKey.class, "key2");
+
+        priIndex.put(new ToManyKeyEntity());
+        secIndex.get(new ToManyKey());
+
+        close();
+    }
+
+    /**
+     * Test a fix for a bug where opening a TO_MANY secondary index would fail
+     * fail with "IllegalArgumentException: Wrong secondary key class: ..."
+     * when the store was opened read-only.  [#15156]
+     */
+    public void testToManyReadOnly()
+        throws DatabaseException {
+
+        open();
+        PrimaryIndex<Integer,ToManyKeyEntity> priIndex =
+            store.getPrimaryIndex(Integer.class, ToManyKeyEntity.class);
+        priIndex.put(new ToManyKeyEntity());
+        close();
+
+        openReadOnly();
+        priIndex = store.getPrimaryIndex(Integer.class, ToManyKeyEntity.class);
+        SecondaryIndex<ToManyKey,Integer,ToManyKeyEntity> secIndex =
+            store.getSecondaryIndex(priIndex, ToManyKey.class, "key2");
+        secIndex.get(new ToManyKey());
+        close();
+    }
+
+    @Persistent
+    static class ToManyKey {
+
+        @KeyField(1)
+        int value = 99;
+    }
+
+    @Entity
+    static class ToManyKeyEntity {
+
+        @PrimaryKey
+        int key = 88;
+
+        @SecondaryKey(relate=ONE_TO_MANY)
+        Set<ToManyKey> key2;
+
+        ToManyKeyEntity() {
+            key2 = new HashSet<ToManyKey>();
+            key2.add(new ToManyKey());
+        }
+    }
+
+    /* <!-- begin JE only --> */
+    public void testDeferredWrite()
+        throws DatabaseException {
+
+        if (envConfig.getTransactional()) {
+            /* Deferred write cannot be used with transactions. */
+            return;
+        }
+        StoreConfig storeConfig = new StoreConfig();
+        storeConfig.setDeferredWrite(true);
+        storeConfig.setAllowCreate(true);
+        open(storeConfig);
+        assertTrue(store.getConfig().getDeferredWrite());
+
+        PrimaryIndex<Integer,MyEntity> priIndex =
+            store.getPrimaryIndex(Integer.class, MyEntity.class);
+
+        SecondaryIndex<Integer,Integer,MyEntity> secIndex =
+            store.getSecondaryIndex(priIndex, Integer.class, "secKey");
+
+        DatabaseConfig dbConfig = priIndex.getDatabase().getConfig();
+        assertTrue(dbConfig.getDeferredWrite());
+        dbConfig = secIndex.getDatabase().getConfig();
+        assertTrue(dbConfig.getDeferredWrite());
+
+        MyEntity e = new MyEntity();
+        e.priKey = 1;
+        e.secKey = 1;
+        priIndex.put(e);
+
+        EntityCursor<MyEntity> cursor = secIndex.entities();
+        cursor.next();
+        assertEquals(1, cursor.count());
+        cursor.close();
+
+        e.priKey = 2;
+        priIndex.put(e);
+        cursor = secIndex.entities();
+        cursor.next();
+        assertEquals(2, cursor.count());
+        cursor.close();
+
+        class MySyncHook implements Store.SyncHook {
+
+            boolean gotFlush;
+            List<Database> synced = new ArrayList<Database>();
+
+            public void onSync(Database db, boolean flushLog) {
+                synced.add(db);
+                if (flushLog) {
+                    assertTrue(!gotFlush);
+                    gotFlush = true;
+                }
+            }
+        }
+
+        MySyncHook hook = new MySyncHook();
+        Store.setSyncHook(hook);
+        store.sync();
+        assertTrue(hook.gotFlush);
+        assertEquals(2, hook.synced.size());
+        assertTrue(hook.synced.contains(priIndex.getDatabase()));
+        assertTrue(hook.synced.contains(secIndex.getDatabase()));
+
+        close();
+    }
+    /* <!-- end JE only --> */
+
+    /* <!-- begin JE only --> */
+    public void testTemporary()
+        throws DatabaseException {
+
+        if (envConfig.getTransactional()) {
+            /* Temporary cannot be used with transactions. */
+            return;
+        }
+        StoreConfig storeConfig = new StoreConfig();
+        storeConfig.setTemporary(true);
+        storeConfig.setAllowCreate(true);
+        open(storeConfig);
+        assertTrue(store.getConfig().getTemporary());
+
+        PrimaryIndex<Integer,MyEntity> priIndex =
+            store.getPrimaryIndex(Integer.class, MyEntity.class);
+
+        SecondaryIndex<Integer,Integer,MyEntity> secIndex =
+            store.getSecondaryIndex(priIndex, Integer.class, "secKey");
+
+        PrimaryIndex<Integer,SharedSequenceEntity1> priIndex1 =
+            store.getPrimaryIndex(Integer.class, SharedSequenceEntity1.class);
+
+        /* All temporary databases exist before closing. */
+        PersistTestUtils.assertDbExists
+            (true, env, STORE_NAME, MyEntity.class.getName(), null);
+        PersistTestUtils.assertDbExists
+            (true, env, STORE_NAME, MyEntity.class.getName(), "secKey");
+        PersistTestUtils.assertDbExists
+            (true, env, STORE_NAME, SharedSequenceEntity1.class.getName(),
+             null);
+        PersistTestUtils.assertDbExists
+            (true, env, STORE_NAME, "com.sleepycat.persist.formats", null);
+        PersistTestUtils.assertDbExists
+            (true, env, STORE_NAME, "com.sleepycat.persist.sequences", null);
+
+        close();
+
+        /* All temporary databases are deleted after before closing. */
+        PersistTestUtils.assertDbExists
+            (false, env, STORE_NAME, MyEntity.class.getName(), null);
+        PersistTestUtils.assertDbExists
+            (false, env, STORE_NAME, MyEntity.class.getName(), "secKey");
+        PersistTestUtils.assertDbExists
+            (false, env, STORE_NAME, SharedSequenceEntity1.class.getName(),
+             null);
+        PersistTestUtils.assertDbExists
+            (false, env, STORE_NAME, "com.sleepycat.persist.formats", null);
+        PersistTestUtils.assertDbExists
+            (false, env, STORE_NAME, "com.sleepycat.persist.sequences", null);
+    }
+    /* <!-- end JE only --> */
+
+    /**
+     * When Y is opened and X has a key with relatedEntity=Y.class, X should
+     * be opened automatically.  If X is not opened, foreign key constraints
+     * will not be enforced. [#15358]
+     */
+    public void testAutoOpenRelatedEntity()
+        throws DatabaseException {
+
+        PrimaryIndex<Integer,RelatedY> priY;
+        PrimaryIndex<Integer,RelatedX> priX;
+
+        /* Opening X should create (and open) Y and enforce constraints. */
+        open();
+        priX = store.getPrimaryIndex(Integer.class, RelatedX.class);
+        PersistTestUtils.assertDbExists
+            (true, env, STORE_NAME, RelatedY.class.getName(), null);
+        try {
+            priX.put(new RelatedX());
+            fail();
+        } catch (DatabaseException e) {
+            assertTrue
+                ("" + e.getMessage(), (e.getMessage().indexOf
+                  ("foreign key not allowed: it is not present") >= 0) ||
+                 (e.getMessage().indexOf("DB_FOREIGN_CONFLICT") >= 0));
+        }
+        priY = store.getPrimaryIndex(Integer.class, RelatedY.class);
+        priY.put(new RelatedY());
+        priX.put(new RelatedX());
+        close();
+
+        /* Delete should cascade even when X is not opened explicitly. */
+        open();
+        priY = store.getPrimaryIndex(Integer.class, RelatedY.class);
+        assertEquals(1, priY.count());
+        priY.delete(88);
+        assertEquals(0, priY.count());
+        priX = store.getPrimaryIndex(Integer.class, RelatedX.class);
+        assertEquals(0, priX.count()); /* Failed prior to [#15358] fix. */
+        close();
+    }
+
+    @Entity
+    static class RelatedX {
+
+        @PrimaryKey
+        int key = 99;
+
+        @SecondaryKey(relate=ONE_TO_ONE,
+                      relatedEntity=RelatedY.class,
+                      onRelatedEntityDelete=CASCADE)
+        int key2 = 88;
+
+        RelatedX() {
+        }
+    }
+
+    @Entity
+    static class RelatedY {
+
+        @PrimaryKey
+        int key = 88;
+
+        RelatedY() {
+        }
+    }
+
+    public void testSecondaryBulkLoad1()
+        throws DatabaseException {
+
+        doSecondaryBulkLoad(true);
+    }
+
+    public void testSecondaryBulkLoad2()
+        throws DatabaseException {
+
+        doSecondaryBulkLoad(false);
+    }
+
+    private void doSecondaryBulkLoad(boolean closeAndOpenNormally)
+        throws DatabaseException {
+
+        PrimaryIndex<Integer,RelatedX> priX;
+        PrimaryIndex<Integer,RelatedY> priY;
+        SecondaryIndex<Integer,Integer,RelatedX> secX;
+
+        /* Open priX with SecondaryBulkLoad=true. */
+        StoreConfig config = new StoreConfig();
+        config.setAllowCreate(true);
+        config.setSecondaryBulkLoad(true);
+        open(config);
+
+        /* Getting priX should not create the secondary index. */
+        priX = store.getPrimaryIndex(Integer.class, RelatedX.class);
+        PersistTestUtils.assertDbExists
+            (false, env, STORE_NAME, RelatedX.class.getName(), "key2");
+
+        /* We can put records that violate the secondary key constraint. */
+        priX.put(new RelatedX());
+
+        if (closeAndOpenNormally) {
+            /* Open normally and the secondary will be populated. */
+            close();
+            open();
+            try {
+                /* Before adding the foreign key, constraint is violated. */
+                priX = store.getPrimaryIndex(Integer.class, RelatedX.class);
+            } catch (DatabaseException e) {
+                assertTrue(e.toString(),
+                           e.toString().contains("foreign key not allowed"));
+            }
+            /* Add the foreign key to avoid the constraint error. */
+            priY = store.getPrimaryIndex(Integer.class, RelatedY.class);
+            priY.put(new RelatedY());
+            priX = store.getPrimaryIndex(Integer.class, RelatedX.class);
+            PersistTestUtils.assertDbExists
+                (true, env, STORE_NAME, RelatedX.class.getName(), "key2");
+            secX = store.getSecondaryIndex(priX, Integer.class, "key2");
+        } else {
+            /* Get secondary index explicitly and it will be populated. */
+            try {
+                /* Before adding the foreign key, constraint is violated. */
+                secX = store.getSecondaryIndex(priX, Integer.class, "key2");
+            } catch (DatabaseException e) {
+                assertTrue(e.toString(),
+                           e.toString().contains("foreign key not allowed"));
+            }
+            /* Add the foreign key. */
+            priY = store.getPrimaryIndex(Integer.class, RelatedY.class);
+            priY.put(new RelatedY());
+            secX = store.getSecondaryIndex(priX, Integer.class, "key2");
+            PersistTestUtils.assertDbExists
+                (true, env, STORE_NAME, RelatedX.class.getName(), "key2");
+        }
+
+        RelatedX x = secX.get(88);
+        assertNotNull(x);
+        close();
+    }
+
+    public void testPersistentFields()
+        throws DatabaseException {
+
+        open();
+        PrimaryIndex<Integer, PersistentFields> pri =
+            store.getPrimaryIndex(Integer.class, PersistentFields.class);
+        PersistentFields o1 = new PersistentFields(-1, 1, 2, 3, 4, 5, 6);
+        assertNull(pri.put(o1));
+        PersistentFields o2 = pri.get(-1);
+        assertNotNull(o2);
+        assertEquals(0, o2.transient1);
+        assertEquals(0, o2.transient2);
+        assertEquals(0, o2.transient3);
+        assertEquals(4, o2.persistent1);
+        assertEquals(5, o2.persistent2);
+        assertEquals(6, o2.persistent3);
+        close();
+    }
+
+    @Entity
+    static class PersistentFields {
+
+        @PrimaryKey int key;
+
+        transient int transient1;
+        @NotPersistent int transient2;
+        @NotPersistent transient int transient3;
+
+        int persistent1;
+        @NotTransient int persistent2;
+        @NotTransient transient int persistent3;
+
+        PersistentFields(int k,
+                         int t1,
+                         int t2,
+                         int t3,
+                         int p1,
+                         int p2,
+                         int p3) {
+            key = k;
+            transient1 = t1;
+            transient2 = t2;
+            transient3 = t3;
+            persistent1 = p1;
+            persistent2 = p2;
+            persistent3 = p3;
+        }
+
+        private PersistentFields() {}
+    }
+
+    /**
+     * When a primary or secondary has a persistent key class, the key class
+     * was not recognized as being persistent when getPrimaryConfig,
+     * getSecondaryConfig, or getSubclassIndex was called, if that key class
+     * was not previously referenced.  All three cases are tested by calling
+     * getSecondaryConfig.  This was a bug in JE 3.3.69, reported on OTN.
+     * [#16407]
+     */
+    public void testKeyClassInitialization()
+        throws DatabaseException {
+
+        open();
+        store.getSecondaryConfig(ToManyKeyEntity.class, "key2");
+        close();
+    }
+}
diff --git a/test/com/sleepycat/persist/test/PersistTestUtils.java b/test/com/sleepycat/persist/test/PersistTestUtils.java
new file mode 100644
index 0000000000000000000000000000000000000000..d9b181c9be2886b2022b82326de68684e033a604
--- /dev/null
+++ b/test/com/sleepycat/persist/test/PersistTestUtils.java
@@ -0,0 +1,73 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: PersistTestUtils.java,v 1.6 2008/02/18 17:11:41 mark Exp $
+ */
+package com.sleepycat.persist.test;
+
+import java.io.FileNotFoundException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+/* <!-- begin JE only --> */
+import com.sleepycat.je.DatabaseNotFoundException;
+/* <!-- end JE only --> */
+import com.sleepycat.je.Environment;
+
+class PersistTestUtils {
+
+    /**
+     * Asserts than a database expectExists or does not exist. If keyName is
+     * null, checks an entity database.  If keyName is non-null, checks a
+     * secondary database.
+     */
+    static void assertDbExists(boolean expectExists,
+                               Environment env,
+                               String storeName,
+                               String entityClassName,
+                               String keyName) {
+        String fileName;
+        String dbName;
+        if (DbCompat.SEPARATE_DATABASE_FILES) {
+            fileName = storeName + '-' + entityClassName;
+            if (keyName != null) {
+                fileName += "-" + keyName;
+            }
+            dbName = null;
+        } else {
+            fileName = null;
+            dbName = "persist#" + storeName + '#' + entityClassName;
+            if (keyName != null) {
+                dbName += "#" + keyName;
+            }
+        }
+        boolean exists;
+        try {
+            DatabaseConfig config = new DatabaseConfig();
+            config.setReadOnly(true);
+            Database db = DbCompat.openDatabase
+                (env, null/*txn*/, fileName, dbName, config);
+            db.close();
+            exists = true;
+        /* <!-- begin JE only --> */
+        } catch (DatabaseNotFoundException e) {
+            exists = false;
+        /* <!-- end JE only --> */
+        } catch (FileNotFoundException e) {
+            exists = false;
+        } catch (Exception e) {
+            /* Any other exception means the DB does exist. */
+            exists = true;
+        }
+        if (expectExists != exists) {
+            TestCase.fail
+                ((expectExists ? "Does not exist: " : "Does exist: ") +
+                 dbName);
+        }
+    }
+}
diff --git a/test/com/sleepycat/persist/test/SequenceTest.java b/test/com/sleepycat/persist/test/SequenceTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..65496f8d2114e71f28478eee23c37ccb5838a8b2
--- /dev/null
+++ b/test/com/sleepycat/persist/test/SequenceTest.java
@@ -0,0 +1,477 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SequenceTest.java,v 1.5.2.2 2010/01/04 15:30:49 cwl Exp $
+ */
+
+package com.sleepycat.persist.test;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.KeyField;
+import com.sleepycat.persist.model.Persistent;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * @author Mark Hayes
+ */
+public class SequenceTest extends TestCase {
+
+    private File envHome;
+    private Environment env;
+
+    public void setUp()
+        throws IOException {
+
+        envHome = new File(System.getProperty(SharedTestUtils.DEST_DIR));
+        SharedTestUtils.emptyDir(envHome);
+    }
+
+    public void tearDown()
+        throws IOException {
+
+        if (env != null) {
+            try {
+                env.close();
+            } catch (DatabaseException e) {
+                System.out.println("During tearDown: " + e);
+            }
+        }
+        try {
+            SharedTestUtils.emptyDir(envHome);
+        } catch (Error e) {
+            System.out.println("During tearDown: " + e);
+        }
+        envHome = null;
+        env = null;
+    }
+
+    public void testSequenceKeys()
+        throws Exception {
+
+        Class[] classes = {
+            SequenceEntity_Long.class,
+            SequenceEntity_Integer.class,
+            SequenceEntity_Short.class,
+            SequenceEntity_Byte.class,
+            SequenceEntity_tlong.class,
+            SequenceEntity_tint.class,
+            SequenceEntity_tshort.class,
+            SequenceEntity_tbyte.class,
+            SequenceEntity_Long_composite.class,
+            SequenceEntity_Integer_composite.class,
+            SequenceEntity_Short_composite.class,
+            SequenceEntity_Byte_composite.class,
+            SequenceEntity_tlong_composite.class,
+            SequenceEntity_tint_composite.class,
+            SequenceEntity_tshort_composite.class,
+            SequenceEntity_tbyte_composite.class,
+        };
+
+        EnvironmentConfig envConfig = TestEnv.BDB.getConfig();
+        envConfig.setAllowCreate(true);
+        env = new Environment(envHome, envConfig);
+
+        StoreConfig storeConfig = new StoreConfig();
+        storeConfig.setAllowCreate(true);
+        EntityStore store = new EntityStore(env, "foo", storeConfig);
+
+        long seq = 0;
+
+        for (int i = 0; i < classes.length; i += 1) {
+            Class entityCls = classes[i];
+            SequenceEntity entity = (SequenceEntity) entityCls.newInstance();
+            Class keyCls = entity.getKeyClass();
+
+            PrimaryIndex<Object,SequenceEntity> index =
+                store.getPrimaryIndex(keyCls, entityCls);
+            index.putNoReturn(entity);
+            seq += 1;
+            assertEquals(seq, entity.getKey());
+
+            index.putNoReturn(entity);
+            assertEquals(seq, entity.getKey());
+
+            entity.nullifyKey();
+            index.putNoReturn(entity);
+            seq += 1;
+            assertEquals(seq, entity.getKey());
+        }
+
+        store.close();
+        env.close();
+        env = null;
+    }
+
+    interface SequenceEntity {
+        Class getKeyClass();
+        long getKey();
+        void nullifyKey();
+    }
+
+    @Entity
+    static class SequenceEntity_Long implements SequenceEntity {
+
+        @PrimaryKey(sequence="X")
+        Long priKey;
+
+        public Class getKeyClass() {
+            return Long.class;
+        }
+
+        public long getKey() {
+            return priKey;
+        }
+
+        public void nullifyKey() {
+            priKey = null;
+        }
+    }
+
+    @Entity
+    static class SequenceEntity_Integer implements SequenceEntity {
+
+        @PrimaryKey(sequence="X")
+        Integer priKey;
+
+        public Class getKeyClass() {
+            return Integer.class;
+        }
+
+        public long getKey() {
+            return priKey;
+        }
+
+        public void nullifyKey() {
+            priKey = null;
+        }
+    }
+
+    @Entity
+    static class SequenceEntity_Short implements SequenceEntity {
+
+        @PrimaryKey(sequence="X")
+        Short priKey;
+
+        public Class getKeyClass() {
+            return Short.class;
+        }
+
+        public long getKey() {
+            return priKey;
+        }
+
+        public void nullifyKey() {
+            priKey = null;
+        }
+    }
+
+    @Entity
+    static class SequenceEntity_Byte implements SequenceEntity {
+
+        @PrimaryKey(sequence="X")
+        Byte priKey;
+
+        public Class getKeyClass() {
+            return Byte.class;
+        }
+
+        public long getKey() {
+            return priKey;
+        }
+
+        public void nullifyKey() {
+            priKey = null;
+        }
+    }
+
+    @Entity
+    static class SequenceEntity_tlong implements SequenceEntity {
+
+        @PrimaryKey(sequence="X")
+        long priKey;
+
+        public Class getKeyClass() {
+            return Long.class;
+        }
+
+        public long getKey() {
+            return priKey;
+        }
+
+        public void nullifyKey() {
+            priKey = 0;
+        }
+    }
+
+    @Entity
+    static class SequenceEntity_tint implements SequenceEntity {
+
+        @PrimaryKey(sequence="X")
+        int priKey;
+
+        public Class getKeyClass() {
+            return Integer.class;
+        }
+
+        public long getKey() {
+            return priKey;
+        }
+
+        public void nullifyKey() {
+            priKey = 0;
+        }
+    }
+
+    @Entity
+    static class SequenceEntity_tshort implements SequenceEntity {
+
+        @PrimaryKey(sequence="X")
+        short priKey;
+
+        public Class getKeyClass() {
+            return Short.class;
+        }
+
+        public long getKey() {
+            return priKey;
+        }
+
+        public void nullifyKey() {
+            priKey = 0;
+        }
+    }
+
+    @Entity
+    static class SequenceEntity_tbyte implements SequenceEntity {
+
+        @PrimaryKey(sequence="X")
+        byte priKey;
+
+        public Class getKeyClass() {
+            return Byte.class;
+        }
+
+        public long getKey() {
+            return priKey;
+        }
+
+        public void nullifyKey() {
+            priKey = 0;
+        }
+    }
+
+    @Entity
+    static class SequenceEntity_Long_composite implements SequenceEntity {
+
+        @PrimaryKey(sequence="X")
+        Key priKey;
+
+        @Persistent
+        static class Key {
+            @KeyField(1)
+            Long priKey;
+        }
+
+        public Class getKeyClass() {
+            return Key.class;
+        }
+
+        public long getKey() {
+            return priKey.priKey;
+        }
+
+        public void nullifyKey() {
+            priKey = null;
+        }
+    }
+
+    @Entity
+    static class SequenceEntity_Integer_composite implements SequenceEntity {
+
+        @PrimaryKey(sequence="X")
+        Key priKey;
+
+        @Persistent
+        static class Key {
+            @KeyField(1)
+            Integer priKey;
+        }
+
+        public Class getKeyClass() {
+            return Key.class;
+        }
+
+        public long getKey() {
+            return priKey.priKey;
+        }
+
+        public void nullifyKey() {
+            priKey = null;
+        }
+    }
+
+    @Entity
+    static class SequenceEntity_Short_composite implements SequenceEntity {
+
+        @PrimaryKey(sequence="X")
+        Key priKey;
+
+        @Persistent
+        static class Key {
+            @KeyField(1)
+            Short priKey;
+        }
+
+        public Class getKeyClass() {
+            return Key.class;
+        }
+
+        public long getKey() {
+            return priKey.priKey;
+        }
+
+        public void nullifyKey() {
+            priKey = null;
+        }
+    }
+
+    @Entity
+    static class SequenceEntity_Byte_composite implements SequenceEntity {
+
+        @PrimaryKey(sequence="X")
+        Key priKey;
+
+        @Persistent
+        static class Key {
+            @KeyField(1)
+            Byte priKey;
+        }
+
+        public Class getKeyClass() {
+            return Key.class;
+        }
+
+        public long getKey() {
+            return priKey.priKey;
+        }
+
+        public void nullifyKey() {
+            priKey = null;
+        }
+    }
+
+    @Entity
+    static class SequenceEntity_tlong_composite implements SequenceEntity {
+
+        @PrimaryKey(sequence="X")
+        Key priKey;
+
+        @Persistent
+        static class Key {
+            @KeyField(1)
+            long priKey;
+        }
+
+        public Class getKeyClass() {
+            return Key.class;
+        }
+
+        public long getKey() {
+            return priKey.priKey;
+        }
+
+        public void nullifyKey() {
+            priKey = null;
+        }
+    }
+
+    @Entity
+    static class SequenceEntity_tint_composite implements SequenceEntity {
+
+        @PrimaryKey(sequence="X")
+        Key priKey;
+
+        @Persistent
+        static class Key {
+            @KeyField(1)
+            int priKey;
+        }
+
+        public Class getKeyClass() {
+            return Key.class;
+        }
+
+        public long getKey() {
+            return priKey.priKey;
+        }
+
+        public void nullifyKey() {
+            priKey = null;
+        }
+    }
+
+    @Entity
+    static class SequenceEntity_tshort_composite implements SequenceEntity {
+
+        @PrimaryKey(sequence="X")
+        Key priKey;
+
+        @Persistent
+        static class Key {
+            @KeyField(1)
+            short priKey;
+        }
+
+        public Class getKeyClass() {
+            return Key.class;
+        }
+
+        public long getKey() {
+            return priKey.priKey;
+        }
+
+        public void nullifyKey() {
+            priKey = null;
+        }
+    }
+
+    @Entity
+    static class SequenceEntity_tbyte_composite implements SequenceEntity {
+
+        @PrimaryKey(sequence="X")
+        Key priKey;
+
+        @Persistent
+        static class Key {
+            @KeyField(1)
+            byte priKey;
+        }
+
+        public Class getKeyClass() {
+            return Key.class;
+        }
+
+        public long getKey() {
+            return priKey.priKey;
+        }
+
+        public void nullifyKey() {
+            priKey = null;
+        }
+    }
+}
diff --git a/test/com/sleepycat/persist/test/SubclassIndexTest.java b/test/com/sleepycat/persist/test/SubclassIndexTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..d200ea7e1d6b62a38ecb7db9b9afbc765f093ae6
--- /dev/null
+++ b/test/com/sleepycat/persist/test/SubclassIndexTest.java
@@ -0,0 +1,241 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SubclassIndexTest.java,v 1.9.2.3 2010/01/04 15:30:49 cwl Exp $
+ */
+
+package com.sleepycat.persist.test;
+
+import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.persist.EntityCursor;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.SecondaryIndex;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.Persistent;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.SecondaryKey;
+import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestEnv;
+
+public class SubclassIndexTest extends TestCase {
+
+    private File envHome;
+    private Environment env;
+    private EntityStore store;
+
+    public void setUp()
+        throws IOException {
+
+        envHome = new File(System.getProperty(SharedTestUtils.DEST_DIR));
+        SharedTestUtils.emptyDir(envHome);
+    }
+
+    public void tearDown()
+        throws IOException {
+
+        if (env != null) {
+            try {
+                env.close();
+            } catch (Throwable e) {
+                System.out.println("During tearDown: " + e);
+            }
+        }
+        try {
+            //SharedTestUtils.emptyDir(envHome);
+        } catch (Throwable e) {
+            System.out.println("During tearDown: " + e);
+        }
+        envHome = null;
+        env = null;
+    }
+
+    private void open()
+        throws IOException, DatabaseException {
+
+        EnvironmentConfig envConfig = TestEnv.BDB.getConfig();
+        envConfig.setAllowCreate(true);
+        env = new Environment(envHome, envConfig);
+
+        StoreConfig storeConfig = new StoreConfig();
+        storeConfig.setAllowCreate(true);
+        store = new EntityStore(env, "foo", storeConfig);
+    }
+
+    private void close()
+        throws DatabaseException {
+
+        store.close();
+        store = null;
+        env.close();
+        env = null;
+    }
+
+    public void testSubclassIndex()
+        throws IOException, DatabaseException {
+
+        open();
+
+        PrimaryIndex<String, Employee> employeesById =
+            store.getPrimaryIndex(String.class, Employee.class);
+
+        employeesById.put(new Employee("1"));
+        employeesById.put(new Manager("2", "a"));
+        employeesById.put(new Manager("3", "a"));
+        employeesById.put(new Manager("4", "b"));
+
+        Employee e;
+        Manager m;
+
+        e = employeesById.get("1");
+        assertNotNull(e);
+        assertTrue(!(e instanceof Manager));
+
+        /* Ensure DB exists BEFORE calling getSubclassIndex. [#15247] */
+        PersistTestUtils.assertDbExists
+            (true, env, "foo", Employee.class.getName(), "dept");
+
+        /* Normal use: Subclass index for a key in the subclass. */
+        SecondaryIndex<String, String, Manager> managersByDept =
+            store.getSubclassIndex
+                (employeesById, Manager.class, String.class, "dept");
+
+        m = managersByDept.get("a");
+        assertNotNull(m);
+        assertEquals("2", m.id);
+
+        m = managersByDept.get("b");
+        assertNotNull(m);
+        assertEquals("4", m.id);
+
+        EntityCursor<Manager> managers = managersByDept.entities();
+        try {
+            m = managers.next();
+            assertNotNull(m);
+            assertEquals("2", m.id);
+            m = managers.next();
+            assertNotNull(m);
+            assertEquals("3", m.id);
+            m = managers.next();
+            assertNotNull(m);
+            assertEquals("4", m.id);
+            m = managers.next();
+            assertNull(m);
+        } finally {
+            managers.close();
+        }
+
+        /* Getting a subclass index for the entity class is also allowed. */
+        store.getSubclassIndex
+            (employeesById, Employee.class, String.class, "other");
+
+        /* Getting a subclass index for a base class key is not allowed. */
+        try {
+            store.getSubclassIndex
+                (employeesById, Manager.class, String.class, "other");
+            fail();
+        } catch (IllegalArgumentException expected) {
+        }
+
+        close();
+    }
+
+    public void testAddSecKey()
+        throws IOException, DatabaseException {
+
+        open();
+        PrimaryIndex<String, Employee> employeesById =
+            store.getPrimaryIndex(String.class, Employee.class);
+        employeesById.put(new Employee("1"));
+        assertTrue(!hasEntityKey("dept"));
+        close();
+
+        open();
+        employeesById = store.getPrimaryIndex(String.class, Employee.class);
+        assertTrue(!hasEntityKey("dept"));
+        employeesById.put(new Manager("2", "a"));
+        assertTrue(hasEntityKey("dept"));
+        close();
+
+        open();
+        assertTrue(hasEntityKey("dept"));
+        close();
+        
+        open();
+        employeesById = store.getPrimaryIndex(String.class, Employee.class);
+        assertTrue(!hasEntityKey("salary"));
+        employeesById.put(new SalariedManager("3", "a", "111"));
+        assertTrue(hasEntityKey("salary"));
+        close();
+
+        open();
+        assertTrue(hasEntityKey("dept"));
+        assertTrue(hasEntityKey("salary"));
+        close();
+    }
+
+    private boolean hasEntityKey(String keyName) {
+        return store.getModel().
+               getRawType(Employee.class.getName()).
+               getEntityMetadata().
+               getSecondaryKeys().
+               keySet().
+               contains(keyName);
+    }
+
+    @Entity
+    private static class Employee {
+
+        @PrimaryKey
+        String id;
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        String other;
+
+        Employee(String id) {
+            this.id = id;
+        }
+
+        private Employee() {}
+    }
+
+    @Persistent
+    private static class Manager extends Employee {
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        String dept;
+
+        Manager(String id, String dept) {
+            super(id);
+            this.dept = dept;
+        }
+
+        private Manager() {}
+    }
+
+    @Persistent
+    private static class SalariedManager extends Manager {
+
+        @SecondaryKey(relate=MANY_TO_ONE)
+        String salary;
+
+        SalariedManager(String id, String dept, String salary) {
+            super(id, dept);
+            this.salary = salary;
+        }
+
+        private SalariedManager() {}
+    }
+}
diff --git a/test/com/sleepycat/persist/test/TestVersionCompatibility.java b/test/com/sleepycat/persist/test/TestVersionCompatibility.java
new file mode 100644
index 0000000000000000000000000000000000000000..f72414733db2e0335bbf974aac79c97dba9da3f2
--- /dev/null
+++ b/test/com/sleepycat/persist/test/TestVersionCompatibility.java
@@ -0,0 +1,123 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000,2008 Oracle.  All rights reserved.
+ *
+ * $Id: TestVersionCompatibility.java,v 1.5 2008/01/07 14:29:16 cwl Exp $
+ */
+package com.sleepycat.persist.test;
+
+import java.io.IOException;
+import java.util.Enumeration;
+
+import junit.framework.Test;
+import junit.framework.TestSuite;
+
+/**
+ * Test that the catalog and data records created with a different version of
+ * the DPL are compatible with this version.  This test is run as follows:
+ *
+ * 1) Run EvolveTest with version X of JE.  For example:
+ *
+ *    cd /jeX
+ *    ant -Dtestcase=com.sleepycat.persist.test.EvolveTest test
+ * or
+ *    ant -Dsuite=persist/test test
+ * or
+ *    ant test
+ *
+ * Step (1) leaves the log files from all tests in the testevolve directory.
+ *
+ * 2) Run TestVersionCompatibility with version Y of JE, passing the JE
+ * testevolve directory from step (1).  For example:
+ *
+ *    cd /jeY
+ *    ant -Dtestcase=com.sleepycat.persist.test.TestVersionCompatibility \
+ *        -Dunittest.testevolvedir=/jeX/build/test/testevolve \
+ *        test
+ *
+ * Currently there are 2 sets of X and Y that can be tested, one set for the
+ * CVS branch and one for the CVS trunk:
+ *
+ *  CVS     Version X   Version Y
+ *  branch  je-3_2_56   je-3_2_57 or greater
+ *  trunk   je-3_3_41   je-3_3_42 or greater 
+ *
+ * This test is not run along with the regular JE test suite run, because the
+ * class name does not end with Test.  It must be run separately as described
+ * above.
+ *
+ * @author Mark Hayes
+ */
+public class TestVersionCompatibility extends EvolveTestBase {
+
+    public static Test suite()
+        throws Exception {
+
+        /*
+         * Run TestVersionCompatibility tests first to check previously evolved
+         * data without changing it.  Then run the EvolveTest to try evolving
+         * it.
+         */
+        TestSuite suite = new TestSuite();
+        Enumeration e = getSuite(TestVersionCompatibility.class).tests();
+        while (e.hasMoreElements()) {
+            EvolveTestBase test = (EvolveTestBase) e.nextElement();
+            if (test.getTestInitHome(true /*evolved*/).exists()) {
+                suite.addTest(test);
+            }
+        }
+        e = getSuite(EvolveTest.class).tests();
+        while (e.hasMoreElements()) {
+            EvolveTestBase test = (EvolveTestBase) e.nextElement();
+            if (test.getTestInitHome(true /*evolved*/).exists()) {
+                suite.addTest(test);
+            }
+        }
+        return suite;
+    }
+
+    boolean useEvolvedClass() {
+        return true;
+    }
+
+    @Override
+    public void setUp()
+        throws IOException {
+
+        envHome = getTestInitHome(true /*evolved*/);
+    }
+
+    public void testPreviouslyEvolved()
+        throws Exception {
+
+        /* If the store cannot be opened, this test is not appropriate. */
+        if (caseObj.getStoreOpenException() != null) {
+            return;
+        }
+
+        /* The update occurred previously. */
+        caseObj.updated = true;
+
+        openEnv();
+
+        /* Open read-only and double check that everything is OK. */
+        openStoreReadOnly();
+        caseObj.checkEvolvedModel
+            (store.getModel(), env, true /*oldTypesExist*/);
+        caseObj.readObjects(store, false /*doUpdate*/);
+        caseObj.checkEvolvedModel
+            (store.getModel(), env, true /*oldTypesExist*/);
+        closeStore();
+
+        /* Check raw objects. */
+        openRawStore();
+        caseObj.checkEvolvedModel
+            (rawStore.getModel(), env, true /*oldTypesExist*/);
+        caseObj.readRawObjects
+            (rawStore, true /*expectEvolved*/, true /*expectUpdated*/);
+        closeRawStore();
+
+        closeAll();
+    }
+}
diff --git a/test/com/sleepycat/util/test/ExceptionWrapperTest.java b/test/com/sleepycat/util/test/ExceptionWrapperTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..1b682b952dd9f3db0b963a22ba6225e38a06c733
--- /dev/null
+++ b/test/com/sleepycat/util/test/ExceptionWrapperTest.java
@@ -0,0 +1,144 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ExceptionWrapperTest.java,v 1.18.2.2 2010/01/04 15:30:49 cwl Exp $
+ */
+
+package com.sleepycat.util.test;
+
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.util.ExceptionUnwrapper;
+import com.sleepycat.util.IOExceptionWrapper;
+import com.sleepycat.util.RuntimeExceptionWrapper;
+import com.sleepycat.util.test.SharedTestUtils;
+
+/**
+ * @author Mark Hayes
+ */
+public class ExceptionWrapperTest extends TestCase {
+
+    public static void main(String[] args)
+        throws Exception {
+
+        junit.framework.TestResult tr =
+            junit.textui.TestRunner.run(suite());
+        if (tr.errorCount() > 0 ||
+            tr.failureCount() > 0) {
+            System.exit(1);
+        } else {
+            System.exit(0);
+        }
+    }
+
+    public static Test suite()
+        throws Exception {
+
+        TestSuite suite = new TestSuite(ExceptionWrapperTest.class);
+        return suite;
+    }
+
+    public ExceptionWrapperTest(String name) {
+
+        super(name);
+    }
+
+    public void setUp() {
+
+        SharedTestUtils.printTestName("ExceptionWrapperTest." + getName());
+    }
+
+    public void testIOWrapper()
+        throws Exception {
+
+        try {
+            throw new IOExceptionWrapper(new RuntimeException("msg"));
+        } catch (IOException e) {
+            Exception ee = ExceptionUnwrapper.unwrap(e);
+            assertTrue(ee instanceof RuntimeException);
+            assertEquals("msg", ee.getMessage());
+
+            Throwable t = ExceptionUnwrapper.unwrapAny(e);
+            assertTrue(t instanceof RuntimeException);
+            assertEquals("msg", t.getMessage());
+        }
+    }
+
+    public void testRuntimeWrapper()
+        throws Exception {
+
+        try {
+            throw new RuntimeExceptionWrapper(new IOException("msg"));
+        } catch (RuntimeException e) {
+            Exception ee = ExceptionUnwrapper.unwrap(e);
+            assertTrue(ee instanceof IOException);
+            assertEquals("msg", ee.getMessage());
+
+            Throwable t = ExceptionUnwrapper.unwrapAny(e);
+            assertTrue(t instanceof IOException);
+            assertEquals("msg", t.getMessage());
+        }
+    }
+
+    public void testErrorWrapper()
+        throws Exception {
+
+        try {
+            throw new RuntimeExceptionWrapper(new Error("msg"));
+        } catch (RuntimeException e) {
+            try {
+                ExceptionUnwrapper.unwrap(e);
+                fail();
+            } catch (Error ee) {
+                assertTrue(ee instanceof Error);
+                assertEquals("msg", ee.getMessage());
+            }
+
+            Throwable t = ExceptionUnwrapper.unwrapAny(e);
+            assertTrue(t instanceof Error);
+            assertEquals("msg", t.getMessage());
+        }
+    }
+
+    /**
+     * Generates a stack trace for a nested exception and checks the output
+     * for the nested exception.
+     */
+    public void testStackTrace() {
+
+        /* Nested stack traces are not avilable in Java 1.3. */
+        String version = System.getProperty("java.version");
+        if (version.startsWith("1.3.")) {
+            return;
+        }
+
+        Exception ex = new Exception("some exception");
+        String causedBy = "Caused by: java.lang.Exception: some exception";
+
+        try {
+            throw new RuntimeExceptionWrapper(ex);
+        } catch (RuntimeException e) {
+            StringWriter sw = new StringWriter();
+            e.printStackTrace(new PrintWriter(sw));
+            String s = sw.toString();
+            assertTrue(s.indexOf(causedBy) != -1);
+        }
+
+        try {
+            throw new IOExceptionWrapper(ex);
+        } catch (IOException e) {
+            StringWriter sw = new StringWriter();
+            e.printStackTrace(new PrintWriter(sw));
+            String s = sw.toString();
+            assertTrue(s.indexOf(causedBy) != -1);
+        }
+    }
+}
diff --git a/test/com/sleepycat/util/test/FastOutputStreamTest.java b/test/com/sleepycat/util/test/FastOutputStreamTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..b38434396d248d233e36740a5d9c459dcebcc3d6
--- /dev/null
+++ b/test/com/sleepycat/util/test/FastOutputStreamTest.java
@@ -0,0 +1,72 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: FastOutputStreamTest.java,v 1.7.2.2 2010/01/04 15:30:49 cwl Exp $
+ */
+
+package com.sleepycat.util.test;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.util.FastOutputStream;
+import com.sleepycat.util.test.SharedTestUtils;
+
+/**
+ * @author Mark Hayes
+ */
+public class FastOutputStreamTest extends TestCase {
+
+    public static void main(String[] args)
+        throws Exception {
+
+        junit.framework.TestResult tr =
+            junit.textui.TestRunner.run(suite());
+        if (tr.errorCount() > 0 ||
+            tr.failureCount() > 0) {
+            System.exit(1);
+        } else {
+            System.exit(0);
+        }
+    }
+
+    public static Test suite()
+        throws Exception {
+
+        TestSuite suite = new TestSuite(FastOutputStreamTest.class);
+        return suite;
+    }
+
+    public FastOutputStreamTest(String name) {
+
+        super(name);
+    }
+
+    public void setUp() {
+
+        SharedTestUtils.printTestName("FastOutputStreamTest." + getName());
+    }
+
+    public void testBufferSizing()
+        throws Exception {
+
+        FastOutputStream fos = new FastOutputStream();
+        assertEquals
+            (FastOutputStream.DEFAULT_INIT_SIZE, fos.getBufferBytes().length);
+
+        /* Write X+1 bytes, expect array size 2X+1 */
+        fos.write(new byte[FastOutputStream.DEFAULT_INIT_SIZE + 1]);
+        assertEquals
+            ((FastOutputStream.DEFAULT_INIT_SIZE * 2) + 1,
+             fos.getBufferBytes().length);
+
+        /* Write X+1 bytes, expect array size 4X+3 = (2(2X+1) + 1) */
+        fos.write(new byte[FastOutputStream.DEFAULT_INIT_SIZE + 1]);
+        assertEquals
+            ((FastOutputStream.DEFAULT_INIT_SIZE * 4) + 3,
+             fos.getBufferBytes().length);
+    }
+}
diff --git a/test/com/sleepycat/util/test/PackedIntegerTest.java b/test/com/sleepycat/util/test/PackedIntegerTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..602d49ae82855e8d8fb79427e0535368363c758a
--- /dev/null
+++ b/test/com/sleepycat/util/test/PackedIntegerTest.java
@@ -0,0 +1,191 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: PackedIntegerTest.java,v 1.6.2.2 2010/01/04 15:30:49 cwl Exp $
+ */
+
+package com.sleepycat.util.test;
+
+import com.sleepycat.util.PackedInteger;
+import junit.framework.Test;
+import junit.framework.TestCase;
+
+public class PackedIntegerTest extends TestCase
+{
+    static final long V119 = 119L;
+    static final long MAX_1 = 0xFFL;
+    static final long MAX_2 = 0xFFFFL;
+    static final long MAX_3 = 0xFFFFFFL;
+    static final long MAX_4 = 0xFFFFFFFFL;
+    static final long MAX_5 = 0xFFFFFFFFFFL;
+    static final long MAX_6 = 0xFFFFFFFFFFFFL;
+    static final long MAX_7 = 0xFFFFFFFFFFFFFFL;
+
+    public static void main(String[] args)
+        throws Exception {
+
+        junit.framework.TestResult tr =
+            junit.textui.TestRunner.run(suite());
+        if (tr.errorCount() > 0 ||
+            tr.failureCount() > 0) {
+            System.exit(1);
+        } else {
+            System.exit(0);
+        }
+    }
+
+    public static Test suite() {
+
+        return new PackedIntegerTest();
+    }
+
+    public PackedIntegerTest() {
+
+        super("PackedIntegerTest");
+    }
+
+    public void runTest() {
+
+        /* Packed int tests. */
+
+        testIntRange(-V119, V119, 1);
+
+        testIntRange(-MAX_1 - V119, -1 - V119, 2);
+        testIntRange(1 + V119, MAX_1 + V119, 2);
+
+        testIntRange(-MAX_2 - V119, -MAX_2 + 99, 3);
+        testIntRange(-MAX_1 - V119 - 99, -MAX_1 - V119 - 1, 3);
+        testIntRange(MAX_1 + V119 + 1, MAX_1 + V119 + 99, 3);
+        testIntRange(MAX_2 - 99, MAX_2 + V119, 3);
+
+        testIntRange(-MAX_3 - V119, -MAX_3 + 99, 4);
+        testIntRange(-MAX_2 - V119 - 99, -MAX_2 - V119 - 1, 4);
+        testIntRange(MAX_2 + V119 + 1, MAX_2 + V119 + 99, 4);
+        testIntRange(MAX_3 - 99, MAX_3 + V119, 4);
+
+        testIntRange(Integer.MIN_VALUE, Integer.MIN_VALUE + 99, 5);
+        testIntRange(Integer.MAX_VALUE - 99, Integer.MAX_VALUE, 5);
+
+        /* Packed long tests. */
+
+        testLongRange(-V119, V119, 1);
+
+        testLongRange(-MAX_1 - V119, -1 - V119, 2);
+        testLongRange(1 + V119, MAX_1 + V119, 2);
+
+        testLongRange(-MAX_2 - V119, -MAX_2 + 99, 3);
+        testLongRange(-MAX_1 - V119 - 99, -MAX_1 - V119 - 1, 3);
+        testLongRange(MAX_1 + V119 + 1, MAX_1 + V119 + 99, 3);
+        testLongRange(MAX_2 - 99, MAX_2 + V119, 3);
+
+        testLongRange(-MAX_3 - V119, -MAX_3 + 99, 4);
+        testLongRange(-MAX_2 - V119 - 99, -MAX_2 - V119 - 1, 4);
+        testLongRange(MAX_2 + V119 + 1, MAX_2 + V119 + 99, 4);
+        testLongRange(MAX_3 - 99, MAX_3 + V119, 4);
+
+        testLongRange(-MAX_4 - V119, -MAX_4 + 99, 5);
+        testLongRange(-MAX_3 - V119 - 99, -MAX_3 - V119 - 1, 5);
+        testLongRange(MAX_3 + V119 + 1, MAX_3 + V119 + 99, 5);
+        testLongRange(MAX_4 - 99, MAX_4 + V119, 5);
+
+        testLongRange(-MAX_5 - V119, -MAX_5 + 99, 6);
+        testLongRange(-MAX_4 - V119 - 99, -MAX_4 - V119 - 1, 6);
+        testLongRange(MAX_4 + V119 + 1, MAX_4 + V119 + 99, 6);
+        testLongRange(MAX_5 - 99, MAX_5 + V119, 6);
+
+        testLongRange(-MAX_6 - V119, -MAX_6 + 99, 7);
+        testLongRange(-MAX_5 - V119 - 99, -MAX_5 - V119 - 1, 7);
+        testLongRange(MAX_5 + V119 + 1, MAX_5 + V119 + 99, 7);
+        testLongRange(MAX_6 - 99, MAX_6 + V119, 7);
+
+        testLongRange(-MAX_7 - V119, -MAX_7 + 99, 8);
+        testLongRange(-MAX_6 - V119 - 99, -MAX_6 - V119 - 1, 8);
+        testLongRange(MAX_6 + V119 + 1, MAX_6 + V119 + 99, 8);
+        testLongRange(MAX_7 - 99, MAX_7 + V119, 8);
+
+        testLongRange(Long.MIN_VALUE, Long.MIN_VALUE + 99, 9);
+        testLongRange(Long.MAX_VALUE - 99, Long.MAX_VALUE - 1, 9);
+    }
+
+    private void testIntRange(long firstValue,
+                              long lastValue,
+                              int bytesExpected) {
+
+        byte[] buf = new byte[1000];
+        int off = 0;
+
+        for (long longI = firstValue; longI <= lastValue; longI += 1) {
+            int i = (int) longI;
+            int before = off;
+            off = PackedInteger.writeInt(buf, off, i);
+            int bytes = off - before;
+            if (bytes != bytesExpected) {
+                fail("output of value=" + i + " bytes=" + bytes +
+                     " bytesExpected=" + bytesExpected);
+            }
+            bytes = PackedInteger.getWriteIntLength(i);
+            if (bytes != bytesExpected) {
+                fail("count of value=" + i + " bytes=" + bytes +
+                     " bytesExpected=" + bytesExpected);
+            }
+        }
+
+        off = 0;
+
+        for (long longI = firstValue; longI <= lastValue; longI += 1) {
+            int i = (int) longI;
+            int bytes = PackedInteger.getReadIntLength(buf, off);
+            if (bytes != bytesExpected) {
+                fail("count of value=" + i + " bytes=" + bytes +
+                     " bytesExpected=" + bytesExpected);
+            }
+            int value = PackedInteger.readInt(buf, off);
+            if (value != i) {
+                fail("input of value=" + i + " but got=" + value);
+            }
+            off += bytes;
+        }
+    }
+
+    private void testLongRange(long firstValue,
+                               long lastValue,
+                               int bytesExpected) {
+
+        byte[] buf = new byte[2000];
+        int off = 0;
+
+        for (long longI = firstValue; longI <= lastValue; longI += 1) {
+            long i = longI;
+            int before = off;
+            off = PackedInteger.writeLong(buf, off, i);
+            int bytes = off - before;
+            if (bytes != bytesExpected) {
+                fail("output of value=" + i + " bytes=" + bytes +
+                     " bytesExpected=" + bytesExpected);
+            }
+            bytes = PackedInteger.getWriteLongLength(i);
+            if (bytes != bytesExpected) {
+                fail("count of value=" + i + " bytes=" + bytes +
+                     " bytesExpected=" + bytesExpected);
+            }
+        }
+
+        off = 0;
+
+        for (long longI = firstValue; longI <= lastValue; longI += 1) {
+            long i = longI;
+            int bytes = PackedInteger.getReadLongLength(buf, off);
+            if (bytes != bytesExpected) {
+                fail("count of value=" + i + " bytes=" + bytes +
+                     " bytesExpected=" + bytesExpected);
+            }
+            long value = PackedInteger.readLong(buf, off);
+            if (value != i) {
+                fail("input of value=" + i + " but got=" + value);
+            }
+            off += bytes;
+        }
+    }
+}
diff --git a/test/com/sleepycat/util/test/SharedTestUtils.java b/test/com/sleepycat/util/test/SharedTestUtils.java
new file mode 100644
index 0000000000000000000000000000000000000000..e19351d14e4b38f5afd0557275ca4903704fb4c5
--- /dev/null
+++ b/test/com/sleepycat/util/test/SharedTestUtils.java
@@ -0,0 +1,192 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: SharedTestUtils.java,v 1.1.2.2 2010/01/04 15:30:49 cwl Exp $
+ */
+
+package com.sleepycat.util.test;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.je.DatabaseConfig;
+
+/**
+ * Test utility methods shared by JE and DB core tests.  Collections and
+ * persist package test are used in both JE and DB core.
+ */
+public class SharedTestUtils {
+
+    /* Common system properties for running tests */
+    public static String DEST_DIR = "testdestdir";
+    public static String NO_SYNC = "txnnosync";
+    public static String LONG_TEST =  "longtest";
+
+    public static final DatabaseConfig DBCONFIG_CREATE = new DatabaseConfig();
+    static {
+        DBCONFIG_CREATE.setAllowCreate(true);
+    }
+
+    private static File getTestDir() {
+        String dir = System.getProperty(DEST_DIR);
+        if (dir == null || dir.length() == 0) {
+            throw new IllegalArgumentException
+                ("System property must be set to test data directory: " +
+                 DEST_DIR);
+        }
+        return new File(dir);
+    }
+
+    /**
+     * @return true if long running tests are enabled via setting the system
+     * property longtest=true.
+     */
+    public static boolean runLongTests() {
+        String longTestProp =  System.getProperty(LONG_TEST);
+        if ((longTestProp != null)  &&
+            longTestProp.equalsIgnoreCase("true")) {
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    public static void printTestName(String name) {
+        // don't want verbose printing for now
+        // System.out.println(name);
+    }
+
+    public static File getExistingDir(String name)
+        throws IOException {
+
+        File dir = new File(getTestDir(), name);
+        if (!dir.exists() || !dir.isDirectory()) {
+            throw new IllegalStateException(
+                    "Not an existing directory: " + dir);
+        }
+        return dir;
+    }
+
+    public static File getNewDir()
+        throws IOException {
+
+        return getNewDir("test-dir");
+    }
+
+    public static void emptyDir(File dir)
+        throws IOException {
+
+        if (dir.isDirectory()) {
+            String[] files = dir.list();
+            if (files != null) {
+                for (int i = 0; i < files.length; i += 1) {
+                    new File(dir, files[i]).delete();
+                }
+            }
+        } else {
+            dir.delete();
+            dir.mkdirs();
+        }
+    }
+
+    public static File getNewDir(String name)
+        throws IOException {
+
+        File dir = new File(getTestDir(), name);
+        emptyDir(dir);
+        return dir;
+    }
+
+    public static File getNewFile()
+        throws IOException {
+
+        return getNewFile("test-file");
+    }
+
+    public static File getNewFile(String name)
+        throws IOException {
+
+        return getNewFile(getTestDir(), name);
+    }
+
+    public static File getNewFile(File dir, String name)
+        throws IOException {
+
+        File file = new File(dir, name);
+        file.delete();
+        return file;
+    }
+
+    public static boolean copyResource(Class cls, String fileName, File toDir)
+        throws IOException {
+
+        InputStream in = cls.getResourceAsStream("testdata/" + fileName);
+        if (in == null) {
+            return false;
+        }
+        in = new BufferedInputStream(in);
+        File file = new File(toDir, fileName);
+        OutputStream out = new FileOutputStream(file);
+        out = new BufferedOutputStream(out);
+        int c;
+        while ((c = in.read()) >= 0) out.write(c);
+        in.close();
+        out.close();
+        return true;
+    }
+
+    public static String qualifiedTestName(TestCase test) {
+
+        String s = test.getClass().getName();
+        int i = s.lastIndexOf('.');
+        if (i >= 0) {
+            s = s.substring(i + 1);
+        }
+        return s + '.' + test.getName();
+    }
+
+    /**
+     * Copies all files in fromDir to toDir.  Does not copy subdirectories.
+     */
+    public static void copyFiles(File fromDir, File toDir)
+        throws IOException {
+
+        String[] names = fromDir.list();
+        if (names != null) {
+            for (int i = 0; i < names.length; i += 1) {
+                File fromFile = new File(fromDir, names[i]);
+                if (fromFile.isDirectory()) {
+                    continue;
+                }
+                File toFile = new File(toDir, names[i]);
+                int len = (int) fromFile.length();
+                byte[] data = new byte[len];
+                FileInputStream fis = null;
+                FileOutputStream fos = null;
+                try {
+                    fis = new FileInputStream(fromFile);
+                    fos = new FileOutputStream(toFile);
+                    fis.read(data);
+                    fos.write(data);
+                } finally {
+                    if (fis != null) {
+                        fis.close();
+                    }
+                    if (fos != null) {
+                        fos.close();
+                    }
+                }
+            }
+        }
+    }
+}
diff --git a/test/com/sleepycat/util/test/TestEnv.java b/test/com/sleepycat/util/test/TestEnv.java
new file mode 100644
index 0000000000000000000000000000000000000000..6faaaa2568bb4dca52c243c56524be3bf3657673
--- /dev/null
+++ b/test/com/sleepycat/util/test/TestEnv.java
@@ -0,0 +1,144 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TestEnv.java,v 1.1.2.2 2010/01/04 15:30:49 cwl Exp $
+ */
+
+package com.sleepycat.util.test;
+
+import java.io.File;
+import java.io.IOException;
+
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+
+/**
+ * @author Mark Hayes
+ */
+public class TestEnv {
+
+    public static final TestEnv BDB;
+    public static final TestEnv CDB;
+    public static final TestEnv TXN;
+    static {
+        EnvironmentConfig config;
+
+        config = newEnvConfig();
+        BDB = new TestEnv("bdb", config);
+
+        if (DbCompat.CDB) {
+            config = newEnvConfig();
+            DbCompat.setInitializeCDB(config, true);
+            CDB = new TestEnv("cdb", config);
+        } else {
+            CDB = null;
+        }
+
+        config = newEnvConfig();
+        config.setTransactional(true);
+        DbCompat.setInitializeLocking(config, true);
+        TXN = new TestEnv("txn", config);
+    }
+
+    private static EnvironmentConfig newEnvConfig() {
+
+        EnvironmentConfig config = new EnvironmentConfig();
+        config.setTxnNoSync(Boolean.getBoolean(SharedTestUtils.NO_SYNC));
+        if (DbCompat.MEMORY_SUBSYSTEM) {
+            DbCompat.setInitializeCache(config, true);
+        }
+        return config;
+    }
+
+    public static final TestEnv[] ALL;
+    static {
+        if (DbCompat.CDB) {
+            ALL = new TestEnv[] { BDB, CDB, TXN };
+        } else {
+            ALL = new TestEnv[] { BDB, TXN };
+        }
+    }
+
+    private String name;
+    private EnvironmentConfig config;
+
+    protected TestEnv(String name, EnvironmentConfig config) {
+
+        this.name = name;
+        this.config = config;
+    }
+
+    public String getName() {
+
+        return name;
+    }
+
+    public EnvironmentConfig getConfig() {
+        return config;
+    }
+
+    void copyConfig(EnvironmentConfig copyToConfig) {
+        DbCompat.setInitializeCache
+            (copyToConfig, DbCompat.getInitializeCache(config));
+        DbCompat.setInitializeLocking
+            (copyToConfig, DbCompat.getInitializeLocking(config));
+        DbCompat.setInitializeCDB
+            (copyToConfig, DbCompat.getInitializeCDB(config));
+        copyToConfig.setTransactional(config.getTransactional());
+    }
+
+    public boolean isTxnMode() {
+
+        return config.getTransactional();
+    }
+
+    public boolean isCdbMode() {
+
+        return DbCompat.getInitializeCDB(config);
+    }
+
+    public Environment open(String testName)
+        throws IOException, DatabaseException {
+
+        return open(testName, true);
+    }
+
+    public Environment open(String testName, boolean create)
+        throws IOException, DatabaseException {
+
+        config.setAllowCreate(create);
+        /* OLDEST deadlock detection on DB matches the use of timeouts on JE.*/
+        DbCompat.setLockDetectModeOldest(config);
+        File dir = getDirectory(testName, create);
+        return newEnvironment(dir, config);
+    }
+
+    /**
+     * Is overridden in XACollectionTest.
+     */
+    protected Environment newEnvironment(File dir, EnvironmentConfig config)
+        throws DatabaseException, IOException {
+
+        return new Environment(dir, config);
+    }
+
+    public File getDirectory(String testName)
+        throws IOException {
+
+        return getDirectory(testName, true);
+    }
+
+    public File getDirectory(String testName, boolean create)
+        throws IOException {
+
+        if (create) {
+            return SharedTestUtils.getNewDir(testName);
+        } else {
+            return SharedTestUtils.getExistingDir(testName);
+        }
+    }
+}
diff --git a/test/com/sleepycat/util/test/TxnTestCase.java b/test/com/sleepycat/util/test/TxnTestCase.java
new file mode 100644
index 0000000000000000000000000000000000000000..1ddb0d2d24710a247aa48206e374e516ad03f622
--- /dev/null
+++ b/test/com/sleepycat/util/test/TxnTestCase.java
@@ -0,0 +1,223 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: TxnTestCase.java,v 1.1.2.2 2010/01/04 15:30:49 cwl Exp $
+ */
+
+package com.sleepycat.util.test;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Enumeration;
+
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.TransactionConfig;
+
+/**
+ * Permuates test cases over three transaction types: null (non-transactional),
+ * auto-commit, and user (explicit).
+ *
+ * <p>Overrides runTest, setUp and tearDown to open/close the environment and to
+ * set up protected members for use by test cases.</p>
+ *
+ * <p>If a subclass needs to override setUp or tearDown, the overridden method
+ * should call super.setUp or super.tearDown.</p>
+ *
+ * <p>When writing a test case based on this class, write it as if a user txn
+ * were always used: call txnBegin, txnCommit and txnAbort for all write
+ * operations.  Use the isTransactional protected field for setup of a database
+ * config.</p>
+ */
+public abstract class TxnTestCase extends TestCase {
+
+    public static final String TXN_NULL = "txn-null";
+    public static final String TXN_AUTO = "txn-auto";
+    public static final String TXN_USER = "txn-user";
+
+    protected File envHome;
+    protected Environment env;
+    protected EnvironmentConfig envConfig;
+    protected String txnType;
+    protected boolean isTransactional;
+
+    /**
+     * Returns a txn test suite.  If txnTypes is null, all three types are run.
+     */
+    public static TestSuite txnTestSuite(Class testCaseClass,
+                                         EnvironmentConfig envConfig,
+                                         String[] txnTypes) {
+        if (txnTypes == null) {
+            txnTypes = new String[] { TxnTestCase.TXN_NULL,
+                                      TxnTestCase.TXN_USER,
+                                      TxnTestCase.TXN_AUTO };
+        }
+        if (envConfig == null) {
+            envConfig = new EnvironmentConfig();
+            envConfig.setAllowCreate(true);
+        }
+        TestSuite suite = new TestSuite();
+        for (int i = 0; i < txnTypes.length; i += 1) {
+            TestSuite baseSuite = new TestSuite(testCaseClass);
+            Enumeration e = baseSuite.tests();
+            while (e.hasMoreElements()) {
+                TxnTestCase test = (TxnTestCase) e.nextElement();
+                test.txnInit(envConfig, txnTypes[i]);
+                suite.addTest(test);
+            }
+        }
+        return suite;
+    }
+
+    private void txnInit(EnvironmentConfig envConfig, String txnType) {
+
+        this.envConfig = envConfig;
+        this.txnType = txnType;
+        isTransactional = (txnType != TXN_NULL);
+    }
+
+    public void setUp()
+        throws Exception {
+
+        envHome = SharedTestUtils.getNewDir();
+    }
+
+    public void runTest()
+        throws Throwable {
+
+        openEnv();
+        super.runTest();
+        closeEnv();
+    }
+
+    public void tearDown()
+        throws Exception {
+
+        /* Set test name for reporting; cannot be done in the ctor or setUp. */
+        setName(txnType + ':' + getName());
+
+        if (env != null) {
+            try {
+                env.close();
+            } catch (Throwable e) {
+                System.out.println("tearDown: " + e);
+            }
+            env = null;
+        }
+
+        try {
+            SharedTestUtils.emptyDir(envHome);
+        } catch (Throwable e) {
+            System.out.println("tearDown: " + e);
+        }
+    }
+
+    /**
+     * Closes the environment and sets the env field to null.
+     * Used for closing and reopening the environment.
+     */
+    public void closeEnv()
+        throws DatabaseException {
+
+        if (env != null) {
+            env.close();
+            env = null;
+        }
+    }
+
+    /**
+     * Opens the environment based on the txnType for this test case.
+     * Used for closing and reopening the environment.
+     */
+    public void openEnv()
+        throws IOException, DatabaseException {
+
+        if (txnType == TXN_NULL) {
+            TestEnv.BDB.copyConfig(envConfig);
+            env = new Environment(envHome, envConfig);
+        } else if (txnType == TXN_AUTO) {
+            TestEnv.TXN.copyConfig(envConfig);
+            env = new Environment(envHome, envConfig);
+        } else if (txnType == TXN_USER) {
+            TestEnv.TXN.copyConfig(envConfig);
+            env = new Environment(envHome, envConfig);
+        } else {
+            assert false;
+        }
+    }
+
+    /**
+     * Begin a txn if in TXN_USER mode; otherwise return null;
+     */
+    protected Transaction txnBegin()
+        throws DatabaseException {
+
+        return txnBegin(null, null);
+    }
+
+    /**
+     * Begin a txn if in TXN_USER mode; otherwise return null;
+     */
+    protected Transaction txnBegin(Transaction parentTxn,
+                                   TransactionConfig config)
+        throws DatabaseException {
+
+        if (txnType == TXN_USER) {
+            return env.beginTransaction(parentTxn, config);
+        } else {
+            return null;
+        }
+    }
+
+    /**
+     * Begin a txn if in TXN_USER or TXN_AUTO mode; otherwise return null;
+     */
+    protected Transaction txnBeginCursor()
+        throws DatabaseException {
+
+        return txnBeginCursor(null, null);
+    }
+
+    /**
+     * Begin a txn if in TXN_USER or TXN_AUTO mode; otherwise return null;
+     */
+    protected Transaction txnBeginCursor(Transaction parentTxn,
+                                         TransactionConfig config)
+        throws DatabaseException {
+
+        if (txnType == TXN_USER || txnType == TXN_AUTO) {
+            return env.beginTransaction(parentTxn, config);
+        } else {
+            return null;
+        }
+    }
+
+    /**
+     * Commit a txn if non-null.
+     */
+    protected void txnCommit(Transaction txn)
+        throws DatabaseException {
+
+        if (txn != null) {
+            txn.commit();
+        }
+    }
+
+    /**
+     * Commit a txn if non-null.
+     */
+    protected void txnAbort(Transaction txn)
+        throws DatabaseException {
+
+        if (txn != null) {
+            txn.abort();
+        }
+    }
+}
diff --git a/test/com/sleepycat/util/test/UtfTest.java b/test/com/sleepycat/util/test/UtfTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..d8333f830e3927289938532e3813de7775c5d580
--- /dev/null
+++ b/test/com/sleepycat/util/test/UtfTest.java
@@ -0,0 +1,168 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: UtfTest.java,v 1.21.2.2 2010/01/04 15:30:49 cwl Exp $
+ */
+
+package com.sleepycat.util.test;
+
+import java.io.DataOutputStream;
+import java.util.Arrays;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.util.FastOutputStream;
+import com.sleepycat.util.UtfOps;
+import com.sleepycat.util.test.SharedTestUtils;
+
+/**
+ * @author Mark Hayes
+ */
+public class UtfTest extends TestCase {
+
+    public static void main(String[] args)
+        throws Exception {
+
+        junit.framework.TestResult tr =
+            junit.textui.TestRunner.run(suite());
+        if (tr.errorCount() > 0 ||
+            tr.failureCount() > 0) {
+            System.exit(1);
+        } else {
+            System.exit(0);
+        }
+    }
+
+    public static Test suite()
+        throws Exception {
+
+        TestSuite suite = new TestSuite(UtfTest.class);
+        return suite;
+    }
+
+    public UtfTest(String name) {
+
+        super(name);
+    }
+
+    public void setUp() {
+
+        SharedTestUtils.printTestName("UtfTest." + getName());
+    }
+
+    /**
+     * Compares the UtfOps implementation to the java.util.DataOutputStream
+     * (and by implication DataInputStream) implementation, character for
+     * character in the full Unicode set.
+     */
+    public void testMultibyte()
+        throws Exception {
+
+        char c = 0;
+        byte[] buf = new byte[10];
+        byte[] javaBuf = new byte[10];
+        char[] cArray = new char[1];
+        FastOutputStream javaBufStream = new FastOutputStream(javaBuf);
+        DataOutputStream javaOutStream = new DataOutputStream(javaBufStream);
+
+        try {
+            for (int cInt = Character.MIN_VALUE; cInt <= Character.MAX_VALUE;
+                 cInt += 1) {
+                c = (char) cInt;
+                cArray[0] = c;
+                int byteLen = UtfOps.getByteLength(cArray);
+
+                javaBufStream.reset();
+                javaOutStream.writeUTF(new String(cArray));
+                int javaByteLen = javaBufStream.size() - 2;
+
+                if (byteLen != javaByteLen) {
+                    fail("Character 0x" + Integer.toHexString(c) +
+                         " UtfOps size " + byteLen +
+                         " != JavaIO size " + javaByteLen);
+                }
+
+                Arrays.fill(buf, (byte) 0);
+                UtfOps.charsToBytes(cArray, 0, buf, 0, 1);
+
+                if (byteLen == 1 && buf[0] == (byte) 0xff) {
+                    fail("Character 0x" + Integer.toHexString(c) +
+                         " was encoded as FF, which is reserved for null");
+                }
+
+                for (int i = 0; i < byteLen; i += 1) {
+                    if (buf[i] != javaBuf[i + 2]) {
+                        fail("Character 0x" + Integer.toHexString(c) +
+                             " byte offset " + i +
+                             " UtfOps byte " + Integer.toHexString(buf[i]) +
+                             " != JavaIO byte " +
+                             Integer.toHexString(javaBuf[i + 2]));
+                    }
+                }
+
+                int charLen = UtfOps.getCharLength(buf, 0, byteLen);
+                if (charLen != 1) {
+                    fail("Character 0x" + Integer.toHexString(c) +
+                         " UtfOps char len " + charLen +
+                         " but should be one");
+                }
+
+                cArray[0] = (char) 0;
+                int len = UtfOps.bytesToChars(buf, 0, cArray, 0, byteLen,
+                                              true);
+                if (len != byteLen) {
+                    fail("Character 0x" + Integer.toHexString(c) +
+                         " UtfOps bytesToChars(w/byteLen) len " + len +
+                         " but should be " + byteLen);
+                }
+
+                if (cArray[0] != c) {
+                    fail("Character 0x" + Integer.toHexString(c) +
+                         " UtfOps bytesToChars(w/byteLen) char " +
+                         Integer.toHexString(cArray[0]));
+                }
+
+                cArray[0] = (char) 0;
+                len = UtfOps.bytesToChars(buf, 0, cArray, 0, 1, false);
+                if (len != byteLen) {
+                    fail("Character 0x" + Integer.toHexString(c) +
+                         " UtfOps bytesToChars(w/charLen) len " + len +
+                         " but should be " + byteLen);
+                }
+
+                if (cArray[0] != c) {
+                    fail("Character 0x" + Integer.toHexString(c) +
+                         " UtfOps bytesToChars(w/charLen) char " +
+                         Integer.toHexString(cArray[0]));
+                }
+
+                String s = new String(cArray, 0, 1);
+                byte[] sBytes = UtfOps.stringToBytes(s);
+                if (sBytes.length != byteLen) {
+                    fail("Character 0x" + Integer.toHexString(c) +
+                         " UtfOps stringToBytes() len " + sBytes.length +
+                         " but should be " + byteLen);
+                }
+
+                for (int i = 0; i < byteLen; i += 1) {
+                    if (sBytes[i] != javaBuf[i + 2]) {
+                        fail("Character 0x" + Integer.toHexString(c) +
+                             " byte offset " + i +
+                             " UtfOps byte " + Integer.toHexString(sBytes[i]) +
+                             " != JavaIO byte " +
+                             Integer.toHexString(javaBuf[i + 2]));
+                    }
+                }
+            }
+        } catch (Exception e) {
+            System.out.println("Character 0x" + Integer.toHexString(c) +
+                               " exception occurred");
+            throw e;
+        }
+    }
+}
+
diff --git a/test/je.properties b/test/je.properties
new file mode 100644
index 0000000000000000000000000000000000000000..c4a2554ba520cf0227456f373b8eac87f9388f91
--- /dev/null
+++ b/test/je.properties
@@ -0,0 +1,64 @@
+# Property file for unit test usage. Usually, all 
+# unit tests should run w/out a je.properties file, so
+# the test can have total control over its environment.
+# It may be useful to use a property file when debugging.
+# This file should always be checked in with all properties
+# commented out.
+# $Id: je.properties,v 1.48 2008/04/18 22:57:38 mark Exp $
+
+# Settings for permutations of unit testing:
+#je.sharedCache=true
+#je.evictor.lruOnly=false
+#je.evictor.forcedYield=false
+#je.env.forcedYield=true
+#je.log.useNIO=true
+#je.log.directNIO=true
+#je.log.chunkedNIO=4096
+#je.cleaner.threads=3
+#je.log.checksumRead=false
+#je.checkpointer.highPriority=true
+
+# Setting je.txn.serializable=true here will cause all unit tests
+# to run with the Serializable isolation level, regardless of what 
+# isolation level is set in code via EnvironmentConfig. 
+# But not all tests work in serializable isolation, for tests testing
+# other three isolation degrees. In this case, these tests would fail.
+# By using -DisolationLevel=serializable, test code can override this setting,
+# by calling EnvironmentConfig.setSerializable(false).
+# In other words, it won't influence tests which set different isolation level.
+# So we should use ant test -DisolationLevel=serializable instead
+#je.txn.serializableIsolation=true
+
+#je.txn.deadlockStackTrace=true
+
+#java.util.logging.ConsoleHandler.on=true
+#java.util.logging.FileHandler.on=true
+#java.util.logging.level=INFO
+
+#je.env.runINCompressor=true
+#je.compressor.deadlockRetry=3
+#je.compressor.lockTimeout=5000
+
+#je.env.runEvictor=true
+#je.maxMemory defaults to 93% of jdb.maxMemory unless specified
+#je.maxMemory=256000
+#je.evictor.nodeScanPercentage=25
+#je.evictor.evictionBatchPercentage=25
+
+#je.env.runCheckpointer=true
+#je.checkpointer.deadlockRetry=3
+
+#je.verify.tree.dump=true
+#je.verify.inlist=true
+#je.verify.throw=false
+
+#je.env.runCleaner=true
+#je.cleaner.deadlockRetry=3
+#je.cleaner.lockTimeout=5000
+#je.cleaner.expunge=false
+#je.cleaner.cluster=true
+
+#je.env.backgroundReadLimit=50
+#je.env.backgroundReadSleep=50000
+#je.env.backgroundWriteLimit=1
+#je.env.backgroundWriteSleep=500000
diff --git a/test/standalone/BigDW.java b/test/standalone/BigDW.java
new file mode 100644
index 0000000000000000000000000000000000000000..fbb657dde8bc5a9c214b693a8dbe470251be0956
--- /dev/null
+++ b/test/standalone/BigDW.java
@@ -0,0 +1,436 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: BigDW.java,v 1.4.2.2 2010/01/04 15:30:49 cwl Exp $
+ */
+
+import java.io.File;
+import java.math.BigInteger;
+import java.security.SecureRandom;
+import java.util.Arrays;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DeadlockException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+
+/**
+ * A large database with random key distribution has lots of IN waste,
+ * especially if records are small; this creates a worst-case scenario for the
+ * cleaner and also possibly for the evictor.  Simulate such an application and
+ * measure how well the cleaner and evictor keep up.
+ *
+ * Some commonly used command lines for running this program are:
+ *
+ *   # Init new DB, causes duplicates to be created and deleted [#15588]
+ *   java BigDW -h HOME -init -dupdel
+ *
+ * Each transaction does the following in "grow" mode.  In "no grow" mode, it
+ * does one less insert, keeping the total number of keys constant.
+ *
+ *   2 inserts, 1 delete, 1 update, 10 reads
+ *
+ * The delete and update operations include a read to find the record.
+ *
+ */
+public class BigDW implements Runnable {
+
+    private String homeDir = "tmp";
+    private Environment env;
+    private Database refDB;
+    private Database testDB;
+    private boolean done;
+    private int nDeadlocks;
+    private boolean init;
+    private boolean verbose;
+    private boolean dupDel;
+    private int nTransactions;
+    private int nMaxTransactions = 20000;
+    private int nThreads = 4;
+
+    private int keySize = 10;
+    private int dataSize = 10;
+    private int nReadsPerWrite = 1;
+    private int maxRetries = 100;
+    private float totalSecs;
+    private float throughput;
+    private SecureRandom random = new SecureRandom();
+    private long startTime;
+    private long time;
+    private long mainCacheSize = 20000000;
+
+    public static void main(String args[]) {
+        try {
+            new BigDW().run(args);
+            System.exit(0);
+        } catch (Throwable e) {
+            e.printStackTrace(System.out);
+            System.exit(1);
+        }
+    }
+    
+    /* Output command-line input arguments to log. */
+    private void printArgs(String[] args) {
+        System.out.print("\nCommand line arguments:");
+        for (String arg : args) {
+            System.out.print(' ');
+            System.out.print(arg);
+        }
+        System.out.println();
+    }
+
+    private void usage(String error) {
+
+        if (error != null) {
+            System.err.println(error);
+        }
+        System.err.println
+            ("java " + getClass().getName() + '\n' +
+             "      [-h <homeDir>] [-v] [-init] [-dupdel]\n" +
+             "      [-txns <maxTxns>]\n");
+        System.exit(1);
+    }
+    
+    private void run(String args[]) throws Exception {
+        
+        try {
+            if (args.length == 0) {
+                throw new IllegalArgumentException();
+            }
+            /* Parse command-line input arguments. */
+            for (int i = 0; i < args.length; i += 1) {
+                String arg = args[i];
+                boolean moreArgs = i < args.length - 1;
+                if (arg.equals("-v")) {
+                    verbose = true;
+                } else if (arg.equals("-dupdel")) {
+                    dupDel = true;
+                } else if (arg.equals("-h") && moreArgs) {
+                    homeDir = args[++i];
+                } else if (arg.equals("-init")) {
+                    init = true;
+                } else if (arg.equals("-txns") && moreArgs) {
+                    nMaxTransactions = Integer.parseInt(args[++i]);
+                } else if (arg.equals("-threads") && moreArgs) {
+                    nThreads = Integer.parseInt(args[++i]);
+                } else {
+                    usage("Unknown arg: " + arg);
+                }
+            }
+            printArgs(args);
+        } catch (IllegalArgumentException e) {
+            usage("IllegalArguments! ");
+            e.printStackTrace();
+            System.exit(1);
+        }
+
+
+        openEnv();
+        startTime = System.currentTimeMillis();
+
+        Thread[] threads = new Thread[nThreads];
+        for (int i = 0; i < nThreads; i += 1) {
+            threads[i] = new Thread(this);
+            threads[i].start();
+            Thread.sleep(1000); /* Stagger threads. */
+        }
+        for (int i = 0; i < nThreads; i += 1) {
+            if (threads[i] != null) {
+                threads[i].join();
+            }
+        }
+
+        time = System.currentTimeMillis();
+        closeEnv();
+
+        totalSecs = (float) (time - startTime) / 1000;
+        throughput = (float) nTransactions / totalSecs;
+        if (verbose) {
+            System.out.println("\nTotal seconds: " + totalSecs +
+                               " txn/sec: " + throughput);
+        }
+    }
+
+    private void openEnv() throws Exception {
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(init);
+        envConfig.setCacheSize(mainCacheSize);
+        env = new Environment(new File(homeDir), envConfig);
+        
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(init);
+        dbConfig.setExclusiveCreate(init);
+        dbConfig.setSortedDuplicates(dupDel);
+        refDB = env.openDatabase(null, "BigDWRef", dbConfig);
+
+        dbConfig.setDeferredWrite(true);
+        testDB = env.openDatabase(null, "BigDWTest", dbConfig);
+
+        compare();
+    }
+
+    private void closeEnv()
+        throws Exception {
+
+        refDB.close();
+        testDB.sync();
+        testDB.close();
+        env.close();
+    }
+
+    public void run() {
+
+        DatabaseEntry data = new DatabaseEntry();
+        DatabaseEntry key = new DatabaseEntry();
+        byte[] lastInsertKey = null;
+
+        while (!done) {
+
+            /* JE-only begin */
+            try {
+                
+                /* Perform the transaction. */
+                for (int retry = 0;; retry += 1) {
+                    Cursor refCursor = refDB.openCursor(null, null);
+                    Cursor testCursor = testDB.openCursor(null, null);
+
+                    try {
+                        if (init) {
+                            key.setData(lastInsertKey);
+                            insert(refCursor, testCursor, key, data);
+                            lastInsertKey = copyData(key);
+                        }
+                        
+                        /* Insert */
+                        key.setData(lastInsertKey);
+                        insert(refCursor, testCursor, key, data);
+                        lastInsertKey = copyData(key);
+                
+                        /* Dup-key insert. */
+                        byte[] dupDataBA = copyData(data);
+                        for (int i = 0; i < 5; i++) {
+                            dupDataBA[0]++;
+                            DatabaseEntry dupData =
+                                new DatabaseEntry(dupDataBA);
+                            OperationStatus status1 =
+                                refCursor.put(key, dupData);
+                            @SuppressWarnings("unused")
+                            boolean insertDone1 = checkInsertStatus(status1);
+                            if (status1 != OperationStatus.SUCCESS) {
+                                throw new RuntimeException("insert1 " +
+                                                           status1);
+                            }
+                            OperationStatus status2 =
+                                testCursor.put(key, dupData);
+                            if (status2 != OperationStatus.SUCCESS) {
+                                throw new RuntimeException("insert2 " +
+                                                           status2);
+                            }
+                            @SuppressWarnings("unused")
+                            boolean insertDone2 = checkInsertStatus(status2);
+                        }
+
+                        /* Delete */
+                        getRandom(refCursor, "BigDWRef",
+                                  testCursor, "BigDWTest",
+                                  key, data, LockMode.RMW);
+                        DatabaseEntry dummy1 = new DatabaseEntry();
+                        DatabaseEntry dummy2 = new DatabaseEntry();
+                        while (refCursor.delete() ==
+                               OperationStatus.SUCCESS &&
+                               refCursor.getNextDup
+                               (dummy1, dummy2, null) ==
+                               OperationStatus.SUCCESS) {
+                        }
+                        while (testCursor.delete() ==
+                               OperationStatus.SUCCESS &&
+                               refCursor.getNextDup
+                               (dummy1, dummy2, null) ==
+                               OperationStatus.SUCCESS) {
+                        }
+                                
+                        /* Read */
+                        for (int i = 0; i < nReadsPerWrite; i += 1) {
+                            getRandom(refCursor, "BigDWRef",
+                                      testCursor, "BigDWTest",
+                                      key, data, LockMode.RMW);
+                        }
+                        refCursor.close();
+                        testCursor.close();
+                        nTransactions += 1;
+                        if (nMaxTransactions != 0 &&
+                            nTransactions >= nMaxTransactions) {
+                            done = true;
+                        }
+                        break;
+                    } catch (DeadlockException e) {
+                        refCursor.close();
+                        testCursor.close();
+                        if (retry >= maxRetries) {
+                            throw e;
+                        }
+                        /* Break deadlock cycle with a small sleep. */
+                        Thread.sleep(5);
+                        nDeadlocks += 1;
+                    }
+                } /* for */
+
+            } catch (Throwable e) {
+                e.printStackTrace();
+                System.exit(1);
+            }
+        }
+    }
+
+    private void checkStatus(OperationStatus status)
+        throws Exception {
+        if (status != OperationStatus.SUCCESS) {
+            throw new Exception("problemStatus = " + status);
+        }
+    }
+
+    private void compare()
+        throws Exception {
+
+        DatabaseEntry refKey = new DatabaseEntry();
+        DatabaseEntry refData = new DatabaseEntry();
+        DatabaseEntry testKey = new DatabaseEntry();
+        DatabaseEntry testData = new DatabaseEntry();
+
+        Cursor refCursor = refDB.openCursor(null, null);
+        Cursor testCursor = testDB.openCursor(null, null);
+
+        System.out.println("Compare starts");
+        try {
+            while (refCursor.getNext(refKey, refData, LockMode.DEFAULT) ==
+                   OperationStatus.SUCCESS) {
+                checkStatus(testCursor.getNext(testKey, testData,
+                                               LockMode.DEFAULT));
+
+                if (!Arrays.equals(refKey.getData(),
+                                   testKey.getData())) {
+                    throw new Exception("Keys don't match");
+                }
+
+                if (!Arrays.equals(refData.getData(),
+                                   testData.getData())) {
+                    throw new Exception("Data don't match");
+                }
+            }
+
+            if (testCursor.getNext(testKey, testData, LockMode.DEFAULT) !=
+                OperationStatus.NOTFOUND) {
+                throw new Exception("testCursor has extra data");
+            }
+        } finally {
+            refCursor.close();
+            testCursor.close();
+        }
+        System.out.println("Compare ends");
+    }
+
+    private void insert(Cursor c1, Cursor c2,
+                        DatabaseEntry key, DatabaseEntry data)
+        throws DatabaseException {
+
+        makeData(data);
+        boolean insertDone1 = false;
+        while (!insertDone1) {
+            makeInsertKey(key);
+            OperationStatus status1 = c1.putNoOverwrite(key, data);
+            insertDone1 = checkInsertStatus(status1);
+            OperationStatus status2 = c2.putNoOverwrite(key, data);
+            boolean insertDone2 = checkInsertStatus(status2);
+            assert insertDone1 == insertDone2 :
+                "status1=" + status1 +
+                " status2=" + status2;
+        }
+    }
+
+    private boolean checkInsertStatus(OperationStatus status)
+        throws DatabaseException {
+
+        if (status == OperationStatus.KEYEXIST) {
+            System.out.println("****** Duplicate random key.");
+            return false; // try again.
+        } else {
+            if (status != OperationStatus.SUCCESS) {
+                System.out.println
+                    ("Unexpected return value from insert(): " + status);
+            }
+            return true; // end one way or another
+        }
+    }
+
+    private void getRandom(Cursor c1, String db1,
+                           Cursor c2, String db2,
+                           DatabaseEntry key, DatabaseEntry data,
+                           LockMode lockMode)
+        throws DatabaseException {
+
+        makeRandomKey(key);
+        getRandomWork(c1, db1, key, data, lockMode);
+        getRandomWork(c2, db2, key, data, lockMode);
+    }
+
+    private void getRandomWork(Cursor c,
+                               String dbName,
+                               DatabaseEntry key,
+                               DatabaseEntry data,
+                               LockMode lockMode)
+        throws DatabaseException {
+
+        OperationStatus status = c.getSearchKeyRange(key, data, lockMode);
+        if (status == OperationStatus.NOTFOUND) {
+            status = c.getLast(key, data, lockMode);
+            if (status != OperationStatus.SUCCESS) {
+                System.out.println
+                    ("Unexpected return value from " + dbName +
+                     ".getRandomWork(): " + status);
+            }
+        }
+    }
+
+    private void makeInsertKey(DatabaseEntry key)
+        throws DatabaseException {
+
+        if (key.getData() != null) {
+            BigInteger num = new BigInteger(copyData(key));
+            num = num.add(BigInteger.ONE);
+            key.setData(num.toByteArray());
+        } else {
+            makeRandomKey(key);
+        }
+    }
+
+    private void makeRandomKey(DatabaseEntry key) {
+        byte[] bytes = new byte[keySize];
+        random.nextBytes(bytes);
+        key.setData(bytes);
+    }
+
+    private void makeData(DatabaseEntry data) {
+
+        byte[] bytes = new byte[dataSize];
+        for (int i = 0; i < bytes.length; i += 1) {
+            bytes[i] = (byte) i;
+        }
+        data.setData(bytes);
+    }
+
+    private byte[] copyData(DatabaseEntry data) {
+
+        byte[] buf = new byte[data.getSize()];
+        System.arraycopy(data.getData(), data.getOffset(), buf, 0, buf.length);
+        return buf;
+    }
+}
diff --git a/test/standalone/BigRandom.java b/test/standalone/BigRandom.java
new file mode 100644
index 0000000000000000000000000000000000000000..0f4a352468cfdd27500e72c09c635518524c8b35
--- /dev/null
+++ b/test/standalone/BigRandom.java
@@ -0,0 +1,598 @@
+import java.io.File;
+import java.math.BigInteger;
+import java.security.SecureRandom;
+
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.DeadlockException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.dbi.MemoryBudget;
+
+/**
+ * A large database with random key distribution has lots of IN waste,
+ * especially if records are small; this creates a worst-case scenario for the
+ * cleaner and also possibly for the evictor.  Simulate such an application and
+ * measure how well the cleaner and evictor keep up.
+ *
+ * Some commonly used command lines for running this program are:
+ *
+ *   # Init new DB, then do updates forever.
+ *   java BigRandom -h HOME -init
+ *
+ *   # Do updates on an existing DB forever.
+ *   java BigRandom -h HOME
+ *
+ *   # Init new DB, then stop and print total rate (MOST COMMON OPTION)
+ *   java BigRandom -h HOME -initonly
+ *
+ *   # -locality N adds locality of reference for N transactions.
+ *   java BigRandom -h HOME -initonly -locality 5
+ *
+ *   # -nosync speeds things up quite a bit
+ *   java BigRandom -h HOME -initonly -locality 5 -nosync
+ *
+ * Each transaction does the following in "grow" mode.  In "no grow" mode, it
+ * does one less insert, keeping the total number of keys constant.
+ *
+ *   2 inserts, 1 delete, 1 update, 10 reads
+ *
+ * The delete and update operations include a read to find the record.
+ *
+ * Every operation uses a random key, unless the -locality option is used.  If
+ * "-locality 100" is specified, each thread will perform 100 transactions by
+ * incrementing the insertion key rather than generating a random number.  Then
+ * a random number is generated as the next starting key.  This is done per
+ * thread, so each thread will be working in a different key area.
+ */
+public class BigRandom implements Runnable {
+
+    private String homeDir = "tmp";
+    private Environment env;
+    private Database db;
+    private boolean done;
+    private int nDeadlocks;
+    private boolean init;
+    private boolean initOnly;
+    private boolean fastInit;
+    private boolean verbose;
+    private boolean sequentialKeys;
+    private boolean noSync;
+    private int nMaxKeys = 10000000;
+    private long nKeys;
+    private long sequence;
+    private int nTransactions;
+    private int nMaxTransactions;
+    private int nThreads = 4;
+    private int oneThreadKeys;
+    private long traceInterval = 10000; // 10 seconds
+    private boolean preload;
+    private int maxLocalKeyTxns;
+    private int keySize = 10;
+    private int dataSize = 10;
+    private int nReadsPerWrite = 10;
+    private int maxRetries = 100;
+    private SecureRandom random = new SecureRandom();
+    private long startTime;
+    private long priorTime = startTime;
+    private int priorTxns;
+    private int[] tpTxns = new int[120]; // 120 * 10 sec = ~20 minutes worth
+    private long[] tpMillis = new long[tpTxns.length];
+    private int tpIndex = tpTxns.length - 1;
+    private int tpMaxIndex;
+    private long tpTotalTxns;
+    private long tpTotalMillis;
+    private int thisTxns;
+    private int thisSecs;
+    private int thisTp;
+    private int avgTp;
+    private long time;
+    private int totalSecs;
+
+    public static void main(String args[]) {
+        try {
+            new BigRandom().run(args);
+            System.exit(0);
+        } catch (Throwable e) {
+            e.printStackTrace(System.out);
+            System.exit(1);
+        }
+    }
+
+    private void run(String args[])
+        throws Exception {
+
+        for (int i = 0; i < args.length; i += 1) {
+            String arg = args[i];
+            boolean moreArgs = i < args.length - 1;
+            if (arg.equals("-v")) {
+                verbose = true;
+            } else if (arg.equals("-seq")) {
+                sequentialKeys = true;
+            } else if (arg.equals("-nosync")) {
+                noSync = true;
+            } else if (arg.equals("-h") && moreArgs) {
+                homeDir = args[++i];
+            } else if (arg.equals("-preload")) {
+                preload = true;
+            } else if (arg.equals("-init")) {
+                init = true;
+            } else if (arg.equals("-initonly")) {
+                init = true;
+                initOnly = true;
+            } else if (arg.equals("-fastinit")) {
+                init = true;
+                fastInit = true;
+                initOnly = true;
+            } else if (arg.equals("-keys") && moreArgs) {
+                nMaxKeys = Integer.parseInt(args[++i]);
+            } else if (arg.equals("-txns") && moreArgs) {
+                nMaxTransactions = Integer.parseInt(args[++i]);
+            } else if (arg.equals("-threads") && moreArgs) {
+                nThreads = Integer.parseInt(args[++i]);
+            } else if (arg.equals("-onethreadkeys") && moreArgs) {
+                oneThreadKeys = Integer.parseInt(args[++i]);
+            } else if (arg.equals("-locality") && moreArgs) {
+                maxLocalKeyTxns = Integer.parseInt(args[++i]);
+            } else {
+                usage("Unknown arg: " + arg);
+            }
+        }
+        openEnv();
+        printArgs(args);
+        printLegend();
+        if (sequentialKeys) {
+            sequence = getLastSequence();
+        }
+        if (preload) {
+            doPreload();
+        }
+        StatsConfig statsConfig = new StatsConfig();
+        statsConfig.setFast(true);
+        statsConfig.setClear(true);
+        startTime = System.currentTimeMillis();
+        priorTime = startTime;
+
+        Thread[] threads = new Thread[nThreads];
+        if (oneThreadKeys > 0) {
+            threads[0] = new Thread(this);
+            threads[0].start();
+        } else {
+            for (int i = 0; i < nThreads; i += 1) {
+                threads[i] = new Thread(this);
+                threads[i].start();
+                Thread.sleep(1000); /* Stagger threads. */
+            }
+        }
+
+        while (!done) {
+            Thread.sleep(traceInterval);
+            calcThroughput();
+            /* JE-only begin */
+            EnvironmentStats stats = env.getStats(statsConfig);
+            MemoryBudget mb =
+                DbInternal.envGetEnvironmentImpl(env).getMemoryBudget();
+            int inListSize =
+                DbInternal.envGetEnvironmentImpl(env).getInMemoryINs().
+                getSize();
+            System.out.println("\nsec: " + totalSecs + ',' + thisSecs +
+                               " txn: " + thisTxns + ',' +
+                               thisTp + ',' + avgTp +
+                               " keys: " + nKeys +
+                               " dlck: " + nDeadlocks +
+                               " buf: " +
+                               stats.getNNotResident() + ',' +
+                               stats.getNCacheMiss() +
+                               "\ncleaner: " +
+                               stats.getNCleanerEntriesRead() + ',' +
+                               stats.getNCleanerRuns() + ',' +
+                               stats.getNCleanerDeletions() + ',' +
+                               stats.getCleanerBacklog() +
+                               " evict: " +
+                               stats.getNBINsStripped() + ',' +
+                               stats.getNNodesExplicitlyEvicted() + ',' +
+                               mb.getCacheMemoryUsage() + ',' +
+                               inListSize +
+                               " ckpt: " +
+                               stats.getNCheckpoints() + ',' +
+                               stats.getNFullINFlush() + ',' +
+                               stats.getNFullBINFlush() + ',' +
+                               stats.getNDeltaINFlush());
+            /* JE-only end */
+            nDeadlocks = 0;
+
+            if (oneThreadKeys > 0 && oneThreadKeys >= nKeys) {
+                for (int i = 1; i < nThreads; i += 1) {
+                    threads[i] = new Thread(this);
+                    threads[i].start();
+                    Thread.sleep(1000); /* Stagger threads. */
+                }
+                oneThreadKeys = 0;
+            }
+        }
+
+        for (int i = 0; i < nThreads; i += 1) {
+            if (threads[i] != null) {
+                threads[i].join();
+            }
+        }
+
+        time = System.currentTimeMillis();
+        totalSecs = (int) ((time - startTime) / 1000);
+        System.out.println("\nTotal seconds: " + totalSecs +
+                           " txn/sec: " + (nTransactions / totalSecs));
+        closeEnv();
+    }
+
+    private void calcThroughput() {
+
+        time = System.currentTimeMillis();
+        totalSecs = (int) ((time - startTime) / 1000);
+        int txns = nTransactions;
+        thisTxns = txns - priorTxns;
+        int thisMillis = (int) (time - priorTime);
+        thisSecs = thisMillis / 1000;
+        thisTp = thisTxns / thisSecs;
+
+        tpIndex += 1;
+        if (tpIndex == tpTxns.length) {
+            tpIndex = 0;
+        }
+        tpTotalTxns += thisTxns;
+        tpTotalTxns -= tpTxns[tpIndex];
+        tpTotalMillis += thisMillis;
+        tpTotalMillis -= tpMillis[tpIndex];
+        tpTxns[tpIndex] = thisTxns;
+        tpMillis[tpIndex] = thisMillis;
+        if (tpMaxIndex < tpTxns.length) {
+            tpMaxIndex = tpIndex + 1;
+        }
+        avgTp = (int) ((tpTotalTxns / (tpTotalMillis / 1000)));
+
+        priorTxns = txns;
+        priorTime = time;
+    }
+
+    private void printArgs(String[] args)
+        throws DatabaseException {
+
+        System.out.print("Command line arguments:");
+        for (String arg : args) {
+            System.out.print(' ');
+            System.out.print(arg);
+        }
+        System.out.println();
+        System.out.println();
+        System.out.println("Environment configuration:");
+        System.out.println(env.getConfig());
+        System.out.println();
+    }
+
+    private void printLegend() {
+
+        /* JE-only begin */
+        System.out.println(
+            "Legend:\n" +
+            "sec:   <totalSeconds>,<runSeconds>\n" +
+            "txn:   <txns>,<txnPerSec>,<runningAvgTxnPerSec>\n" +
+            "keys:  <totalKeys>\n" +
+            "dlck:  <deadlocks>\n" +
+            "buf:   <notResident>,<cacheMisses>\n" +
+            "clean: <entriesRead>,<filesCleaned>,<filesDeleted>,<backlog>\n" +
+            "evict: <binsStripped>,<nodesEvicted>,<cacheSize>,<INListSize>\n" +
+            "ckpt:  <checkpointsStarted>,<fullINs>,<fullBINs>,<deltaINs>");
+        /* JE-only end */
+    }
+
+    private void usage(String error) {
+
+        if (error != null) {
+            System.err.println(error);
+        }
+        System.err.println
+            ("java " + getClass().getName() + '\n' +
+             "      [-h <homeDir>] [-v] [-init | -initonly | -fastinit]\n" +
+             "      [-keys <maxKeys>] [-txns <maxTxns>] [-seq]\n" +
+             "      [-threads <appThreads>] [-onethreadkeys <nKeys>]\n" +
+             "      [-locality <nTxns>] [-nosync] [-preload]");
+        System.exit(2);
+    }
+
+    private void openEnv() throws Exception {
+
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(init);
+        if (noSync) {
+            envConfig.setTxnNoSync(true);
+        }
+        long startTime = System.currentTimeMillis();
+        env = new Environment(new File(homeDir), envConfig);
+        long endTime = System.currentTimeMillis();
+        System.out.println("Recovery time: " + ((endTime - startTime) / 1000));
+        System.out.println();
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(init);
+        dbConfig.setExclusiveCreate(init);
+	dbConfig.setTransactional(true);
+        /* JE-only begin */
+        db = env.openDatabase(null, "BigRandom", dbConfig);
+        /* JE-only end */
+    }
+
+    private void closeEnv()
+	throws DatabaseException {
+
+	db.close();
+        env.close();
+    }
+
+    public void run() {
+
+        /*
+         * The key is reused over multiple loop iterations for computing a
+         * local insertion key, so it must be instantiated at the top of the
+         * loop.  In makeInsertKey a local insertion key is creating by adding
+         * one to the last key accessed.
+         */
+        DatabaseEntry data = new DatabaseEntry();
+        DatabaseEntry key = new DatabaseEntry();
+        int localKeyTxns = 0;
+        byte[] lastInsertKey = null;
+        OperationStatus status;
+
+        while (!done) {
+
+            try {
+                /*
+                 * When using local keys, only the first insert will be with a
+                 * random key, and only if we've exceeded the maximum number of
+                 * local key transactions.  When not using local keys, all keys
+                 * are randomly generated.
+                 */
+                boolean useLocalKeys = maxLocalKeyTxns > 0;
+                boolean insertRandomKey = true;
+                if (useLocalKeys) {
+                    if (localKeyTxns < maxLocalKeyTxns) {
+                        insertRandomKey = false;
+                        localKeyTxns += 1;
+                    } else {
+                        localKeyTxns = 0;
+                    }
+                }
+
+                /* Perform the transaction. */
+                for (int retry = 0;; retry += 1) {
+                    Transaction txn = env.beginTransaction(null, null);
+                    Cursor c = db.openCursor(txn, null);
+                    try {
+                        boolean addedKey = false;
+                        if (init && nKeys < nMaxKeys) {
+                            key.setData(lastInsertKey);
+                            insert(c, key, data, insertRandomKey);
+                            lastInsertKey = copyData(key);
+                            insertRandomKey = !useLocalKeys;
+                            addedKey = true;
+                        }
+                        if (!fastInit) {
+                            /* Insert. */
+                            key.setData(lastInsertKey);
+                            insert(c, key, data, insertRandomKey);
+                            lastInsertKey = copyData(key);
+                            if (useLocalKeys) {
+                                /* Update the following key. */
+                                status = c.getNext(key, data, LockMode.RMW);
+                                if (status == OperationStatus.SUCCESS) {
+                                    c.putCurrent(data);
+                                    /* Delete the following key. */
+                                    status = c.getNext
+                                        (key, data, LockMode.RMW);
+                                    if (status == OperationStatus.SUCCESS) {
+                                        c.delete();
+                                    }
+                                }
+                                /* Read.  Use RMW to avoid deadlocks. */
+                                for (int i = 0; i < nReadsPerWrite; i += 1) {
+                                    c.getNext(key, data, LockMode.RMW);
+                                }
+                            } else {
+                                /* Update */
+                                getRandom(c, key, data, LockMode.RMW);
+                                c.putCurrent(data);
+                                /* Delete */
+                                getRandom(c, key, data, LockMode.RMW);
+                                c.delete();
+                                /* Read */
+                                for (int i = 0; i < nReadsPerWrite; i += 1) {
+                                    getRandom(c, key, data, null);
+                                }
+                            }
+                        }
+                        c.close();
+                        txn.commit();
+                        nTransactions += 1;
+                        if (addedKey) {
+                            nKeys += 1;
+                        }
+                        if (initOnly && nKeys >= nMaxKeys) {
+                            done = true;
+                        }
+                        if (nMaxTransactions != 0 &&
+                            nTransactions >= nMaxTransactions) {
+                            done = true;
+                        }
+                        break;
+                    } catch (DeadlockException e) {
+                        c.close();
+                        txn.abort();
+                        if (retry >= maxRetries) {
+                            throw e;
+                        }
+                        /* Break deadlock cycle with a small sleep. */
+                        Thread.sleep(5);
+                        nDeadlocks += 1;
+                    }
+                }
+            } catch (Throwable e) {
+                e.printStackTrace();
+                System.exit(1);
+            }
+        }
+    }
+
+    private void insert(Cursor c, DatabaseEntry key, DatabaseEntry data,
+                        boolean insertRandomKey)
+	throws DatabaseException {
+
+        makeData(data);
+        while (true) {
+            makeInsertKey(c, key, insertRandomKey);
+            OperationStatus status = c.putNoOverwrite(key, data);
+            if (status == OperationStatus.KEYEXIST) {
+                if (sequentialKeys) {
+                    System.out.println("****** Duplicate sequential key.");
+                } else if (insertRandomKey) {
+                    System.out.println("****** Duplicate random key.");
+                } else {
+                    System.out.println("****** Duplicate local key.");
+                }
+            } else {
+                if (status != OperationStatus.SUCCESS) {
+                    System.out.println
+                        ("Unexpected return value from insert(): " + status);
+                }
+                break;
+            }
+        }
+    }
+
+    private void getRandom(Cursor c, DatabaseEntry key, DatabaseEntry data,
+                           LockMode lockMode)
+	throws DatabaseException {
+
+        makeRandomKey(key);
+        OperationStatus status = c.getSearchKeyRange(key, data, lockMode);
+        if (status == OperationStatus.NOTFOUND) {
+            status = c.getLast(key, data, lockMode);
+            if (status != OperationStatus.SUCCESS) {
+                System.out.println
+                    ("Unexpected return value from getRandom(): " + status);
+            }
+        }
+    }
+
+    private long getLastSequence()
+	throws DatabaseException {
+
+        if (!sequentialKeys) throw new IllegalStateException();
+        DatabaseEntry data = new DatabaseEntry();
+        DatabaseEntry key = new DatabaseEntry();
+        Cursor c = db.openCursor(null, null);
+        try {
+            OperationStatus status = c.getLast(key, data, null);
+            if (status == OperationStatus.SUCCESS) {
+                TupleInput in = new TupleInput(key.getData(),
+                                               key.getOffset(),
+                                               key.getSize());
+                return in.readLong();
+            } else {
+                return 0;
+            }
+        } finally {
+            c.close();
+        }
+    }
+
+    private void doPreload()
+	throws DatabaseException {
+
+        System.out.println("Preloading");
+        DatabaseEntry data = new DatabaseEntry();
+        DatabaseEntry key = new DatabaseEntry();
+        Cursor c = db.openCursor(null, null);
+        try {
+            long startTime = System.currentTimeMillis();
+            int count = 0;
+            while (c.getNext(key, data, LockMode.READ_UNCOMMITTED) ==
+                   OperationStatus.SUCCESS) {
+                count += 1;
+            }
+            long endTime = System.currentTimeMillis();
+            int seconds = (int) ((endTime - startTime) / 1000);
+            System.out.println
+                ("Preloaded records=" + count + " seconds=" + seconds);
+        } finally {
+            c.close();
+        }
+    }
+
+    private void makeInsertKey(Cursor c, DatabaseEntry key,
+                               boolean insertRandomKey)
+	throws DatabaseException {
+
+        if (sequentialKeys) {
+            long val;
+            synchronized (this) {
+                val = ++sequence;
+            }
+            makeLongKey(key, val);
+        } else if (!insertRandomKey && key.getData() != null) {
+            BigInteger num = new BigInteger(copyData(key));
+            num = num.add(BigInteger.ONE);
+            key.setData(num.toByteArray());
+        } else {
+            makeRandomKey(key);
+        }
+    }
+
+    private void makeRandomKey(DatabaseEntry key) {
+
+        if (sequentialKeys) {
+            makeLongKey(key, (long) (random.nextFloat() * sequence));
+        } else {
+            byte[] bytes = new byte[keySize];
+            random.nextBytes(bytes);
+            key.setData(bytes);
+        }
+    }
+
+    private void makeLongKey(DatabaseEntry key, long val) {
+
+        TupleOutput out = new TupleOutput();
+        out.writeLong(val);
+        byte[] pad = new byte[keySize - 8];
+        out.writeFast(pad);
+        if (out.getBufferOffset() != 0 || out.getBufferLength() != keySize) {
+            throw new IllegalStateException();
+        }
+        key.setData(out.getBufferBytes(), 0, keySize);
+    }
+
+    private void makeData(DatabaseEntry data) {
+
+        byte[] bytes = new byte[dataSize];
+        for (int i = 0; i < bytes.length; i += 1) {
+            bytes[i] = (byte) i;
+        }
+        data.setData(bytes);
+    }
+
+    private byte[] copyData(DatabaseEntry data) {
+
+        byte[] buf = new byte[data.getSize()];
+        System.arraycopy(data.getData(), data.getOffset(), buf, 0, buf.length);
+        return buf;
+    }
+}
diff --git a/test/standalone/CleanWithSmallCache.java b/test/standalone/CleanWithSmallCache.java
new file mode 100644
index 0000000000000000000000000000000000000000..069874f84a52263fc4ecd8f788b241c6ee20e1a2
--- /dev/null
+++ b/test/standalone/CleanWithSmallCache.java
@@ -0,0 +1,534 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2005,2008 Oracle.  All rights reserved.
+ *
+ * $Id: CleanWithSmallCache.java,v 1.3.2.1 2009/07/28 20:32:24 mark Exp $
+ */
+
+import java.io.File;
+import java.text.NumberFormat;
+import java.util.Random;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.bind.tuple.TupleOutput;
+import com.sleepycat.je.CheckpointConfig;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.StatsConfig;
+
+/**
+ * Used to test a small cache and log cleaning.  For example, to create a large
+ * set of log files (over 10 GB) that are almost 100% obsolete:
+ *
+ * java -Xmx6m -cp .:before.jar CleanWithSmallCache \
+ *  -records 40000 -key 48 -data 10 -h tmp -random -cache 250k \
+ *  -seconds 2000 -write 10000
+ *
+ * And then to clean that set of logs:
+ *
+ * java -Xmx15m -cp .:before.jar CleanWithSmallCache \
+ *  -records 40000 -key 48 -data 10 -h tmp -random -cache 250k \
+ *  -seconds 22000 -read 10 -clean
+ */
+public class CleanWithSmallCache {
+    
+    private static final NumberFormat INT_FORMAT =
+        NumberFormat.getIntegerInstance();
+    private static final NumberFormat NUMBER_FORMAT =
+        NumberFormat.getNumberInstance();
+
+    private File envHome = null;
+    private int cacheSize = 0;
+    private int records = -1;
+    private int keySize = -1;
+    private int dataSize = -1;
+    private int fanout = 128;
+    private int readRate = 0;
+    private int writeRate = 0;
+    private int totalSeconds = 0;
+    private long beginTime = 0;
+    private long endTime = 0;
+    private boolean randomKeys = false;
+    private boolean doClean = false;
+    private boolean fillCache = false;
+    private Random random = new Random(123);
+    private AtomicInteger nReads = new AtomicInteger(0);
+    private AtomicInteger nWrites = new AtomicInteger(0);
+    private boolean programDone = false;
+    private Environment env = null;
+    private Database db = null;
+
+    public static void main(String[] args) {
+        try {
+            System.out.print("Command line: ");
+            for (String s : args) {
+                System.out.print(s);
+                System.out.print(' ');
+            }
+            System.out.println();
+            CleanWithSmallCache test = new CleanWithSmallCache(args);
+            long start = System.currentTimeMillis();
+            System.out.println("Opening environment");
+            test.open();
+            System.out.println("Starting test");
+            test.execute();
+            test.close();
+            long end = System.currentTimeMillis();
+            System.out.println("Time: " + ((end - start) / 1000) + " sec");
+            System.exit(0);
+        } catch (Throwable e) {
+            e.printStackTrace();
+            System.exit(1);
+        }
+    }
+
+    private CleanWithSmallCache(String[] args) {
+
+        for (int i = 0; i < args.length; i += 1) {
+            String name = args[i];
+            String val = null;
+            if (i < args.length - 1 && !args[i + 1].startsWith("-")) {
+                i += 1;
+                val = args[i];
+            }
+            if (name.equals("-h")) {
+                if (val == null) {
+                    usage("No value after -h");
+                }
+                envHome = new File(val);
+            } else if (name.equals("-cache")) {
+                if (val == null) {
+                    usage("No value after -cache");
+                }
+                boolean mb = false;
+                boolean kb = false;
+                if (val.endsWith("m")) {
+                    mb = true;
+                    val = val.substring(0, val.length() - 1);
+                } else if (val.endsWith("k")) {
+                    kb = true;
+                    val = val.substring(0, val.length() - 1);
+                }
+                try {
+                    cacheSize = Integer.parseInt(val);
+                } catch (NumberFormatException e) {
+                    usage(val + " is not a number");
+                }
+                if (cacheSize <= 0) {
+                    usage(val + " is not a positive integer");
+                }
+                if (mb) {
+                    cacheSize *= 1024 * 1024;
+                } else if (kb) {
+                    cacheSize *= 1024;
+                }
+            } else if (name.equals("-records")) {
+                if (val == null) {
+                    usage("No value after -records");
+                }
+                try {
+                    records = Integer.parseInt(val);
+                } catch (NumberFormatException e) {
+                    usage(val + " is not a number");
+                }
+                if (records <= 0) {
+                    usage(val + " is not a positive integer");
+                }
+            } else if (name.equals("-key")) {
+                if (val == null) {
+                    usage("No value after -key");
+                }
+                try {
+                    keySize = Integer.parseInt(val);
+                } catch (NumberFormatException e) {
+                    usage(val + " is not a number");
+                }
+                if (keySize <= 0) {
+                    usage(val + " is not a positive integer");
+                }
+            } else if (name.equals("-data")) {
+                if (val == null) {
+                    usage("No value after -data");
+                }
+                try {
+                    dataSize = Integer.parseInt(val);
+                } catch (NumberFormatException e) {
+                    usage(val + " is not a number");
+                }
+                if (dataSize < 0) {
+                    usage(val + " is not a non-negative integer");
+                }
+            } else if (name.equals("-read")) {
+                if (val == null) {
+                    usage("No value after -read");
+                }
+                try {
+                    readRate = Integer.parseInt(val);
+                } catch (NumberFormatException e) {
+                    usage(val + " is not a number");
+                }
+                if (readRate < 0) {
+                    usage(val + " is not a non-negative integer");
+                }
+            } else if (name.equals("-write")) {
+                if (val == null) {
+                    usage("No value after -write");
+                }
+                try {
+                    writeRate = Integer.parseInt(val);
+                } catch (NumberFormatException e) {
+                    usage(val + " is not a number");
+                }
+                if (writeRate < 0) {
+                    usage(val + " is not a non-negative integer");
+                }
+            } else if (name.equals("-seconds")) {
+                if (val == null) {
+                    usage("No value after -seconds");
+                }
+                try {
+                    totalSeconds = Integer.parseInt(val);
+                } catch (NumberFormatException e) {
+                    usage(val + " is not a number");
+                }
+                if (totalSeconds < 0) {
+                    usage(val + " is not a non-negative integer");
+                }
+            } else if (name.equals("-fanout")) {
+                if (val == null) {
+                    usage("No value after -fanout");
+                }
+                try {
+                    fanout = Integer.parseInt(val);
+                } catch (NumberFormatException e) {
+                    usage(val + " is not a number");
+                }
+                if (fanout <= 0) {
+                    usage(val + " is not a positive integer");
+                }
+            } else if (name.equals("-random")) {
+                randomKeys = true;
+            } else if (name.equals("-clean")) {
+                doClean = true;
+            } else if (name.equals("-fillcache")) {
+                fillCache = true;
+            } else {
+                usage("Unknown arg: " + name);
+            }
+        }
+
+        if (envHome == null) {
+            usage("-h not specified");
+        }
+
+        if (cacheSize <= 0) {
+            usage("-cache not specified");
+        }
+
+        if (records <= 0) {
+            usage("-records not specified");
+        }
+
+        if (keySize <= 0) {
+            usage("-key not specified");
+        }
+
+        if (dataSize <= 0) {
+            usage("-data not specified");
+        }
+
+        int maxRecNum;
+        switch (keySize) {
+        case 1:
+            maxRecNum = Byte.MAX_VALUE;
+            break;
+        case 2:
+        case 3:
+            maxRecNum = Short.MAX_VALUE;
+            break;
+        default:
+            maxRecNum = Integer.MAX_VALUE;
+        }
+        if (records > maxRecNum) {
+            usage("-key size too small for number of records");
+        }
+    }
+
+    private void usage(String msg) {
+
+        if (msg != null) {
+            System.out.println(msg);
+        }
+
+        System.out.println
+            ("usage:" +
+             "\njava "  + CleanWithSmallCache.class.getName() +
+             "\n   -h <envHome>" +
+             "\n      # Environment home directory" +
+             "\n   -records <count>" +
+             "\n      # Total records (key/data pairs); required" +
+             "\n   -key <bytes>" +
+             "\n      # Key bytes per record; required" +
+             "\n   -data <bytes>" +
+             "\n      # Data bytes per record; required" +
+             "\n  [-fanout <entries>]" +
+             "\n      # Number of entries per Btree node; default: 128" +
+             "\n  [-read <readsPerSecond>]" +
+             "\n      # Number of read operations per second; default: 0" +
+             "\n  [-write <writesPerSecond>]" +
+             "\n      # Number of write operations per second; default: 0" +
+             "\n  [-random]" +
+             "\n      # Write randomly generated keys;" +
+             "\n      # default: write sequential keys" +
+             "\n  [-seconds <totalSeconds>]" +
+             "\n      # Number of seconds to run; default: 0 or forever" +
+             "\n  [-clean]" +
+             "\n      # Perform log cleaning; default: false" +
+             "\n  [-fillcache]" +
+             "\n      # Artificially fill the cache; default: false");
+
+        System.exit(2);
+    }
+
+    private void open()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setConfigParam("je.env.runCleaner", "false");
+        envConfig.setCacheSize(cacheSize);
+        env = new Environment(envHome, envConfig);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setNodeMaxEntries(fanout);
+        db = env.openDatabase(null, "foo", dbConfig);
+
+        if (fillCache) {
+            DbInternal.envGetEnvironmentImpl(env).getMemoryBudget().
+                updateAdminMemoryUsage(cacheSize * 2);
+        }
+    }
+
+    private void close()
+        throws DatabaseException {
+
+        db.close();
+        env.close();
+    }
+
+    private int makeKey(int recNum, DatabaseEntry entry) {
+        if (randomKeys) {
+            recNum = random.nextInt(records - 1) + 1;
+        } else {
+            recNum += 1;
+            if (recNum > records) {
+                recNum = 1;
+            }
+        }
+        if (keySize == 1) {
+            entry.setData(new byte[] { (byte) recNum });
+        } else {
+            TupleOutput out = new TupleOutput(new byte[keySize]);
+            int written;
+            if (keySize == 2 || keySize == 3) {
+                out.writeUnsignedShort((short) recNum);
+                written = 2;
+            } else {
+                out.writeUnsignedInt(recNum);
+                written = 4;
+            }
+            while (written < keySize) {
+                out.writeFast(0);
+                written += 1;
+            }
+            TupleBinding.outputToEntry(out, entry);
+        }
+        return recNum;
+    }
+
+    private void execute()
+        throws InterruptedException, DatabaseException {
+
+        Thread monitor = new Monitor();
+        Thread cleaner = null;
+        if (doClean) {
+            cleaner = new Cleaner();
+        }
+        Thread writer = null;
+        if (writeRate > 0) {
+            writer = new OperationRunner(writeRate, nWrites, new Operation() {
+                public void doOperation(DatabaseEntry key, DatabaseEntry data)
+                    throws DatabaseException {
+                    db.put(null, key, data);
+                }
+            });
+        }
+        Thread reader = null;
+        if (readRate > 0) {
+            reader = new OperationRunner(writeRate, nReads, new Operation() {
+                public void doOperation(DatabaseEntry key, DatabaseEntry data)
+                    throws DatabaseException {
+                    Cursor cursor = db.openCursor(null, null);
+                    cursor.getSearchKeyRange(key, data, null);
+                    cursor.close();
+                }
+            });
+        }
+        beginTime = System.currentTimeMillis();
+        if (totalSeconds > 0) {
+            endTime = beginTime + (totalSeconds * 1000);
+        }
+        monitor.start();
+        if (cleaner != null) {
+            cleaner.start();
+        }
+        if (writer != null) {
+            writer.start();
+        }
+        if (reader != null) {
+            reader.start();
+        }
+        monitor.join();
+        if (cleaner != null) {
+            cleaner.join();
+        }
+        if (writer != null) {
+            writer.join();
+        }
+        if (reader != null) {
+            reader.join();
+        }
+    }
+
+    private class Monitor extends Thread {
+        public void run() {
+            try {
+                long lastTime = System.currentTimeMillis();
+                while (totalSeconds == 0 || lastTime < endTime) {
+                    Thread.sleep(5000);
+                    long time = System.currentTimeMillis();
+                    printStats(time);
+                    lastTime = time;
+                }
+                programDone = true;
+            } catch (Throwable e) {
+                e.printStackTrace();
+                System.exit(1);
+            }
+        }
+    }
+
+    private class Cleaner extends Thread {
+        public void run() {
+            CheckpointConfig forceConfig = new CheckpointConfig();
+            forceConfig.setForce(true);
+            try {
+                boolean cleanedSome;
+                do {
+                    cleanedSome = false;
+                    while (true) {
+                        int nFiles = env.cleanLog();
+                        if (nFiles == 0) {
+                            break;
+                        }
+                        cleanedSome = true;
+                    }
+                    env.checkpoint(forceConfig);
+                } while (cleanedSome && !programDone);
+                programDone = true;
+            } catch (Throwable e) {
+                e.printStackTrace();
+                System.exit(1);
+            }
+        }
+    }
+
+    private interface Operation {
+        void doOperation(DatabaseEntry key, DatabaseEntry data)
+            throws DatabaseException;
+    }
+
+    private class OperationRunner extends Thread {
+
+        private int rate;
+        private Operation op;
+        private AtomicInteger nOps;
+
+        OperationRunner(int rate, AtomicInteger nOps, Operation op) {
+            this.rate = rate;
+            this.nOps = nOps;
+            this.op = op;
+        }
+
+        public void run() {
+
+            int recNum = 0;
+            int ops = 0;
+            long beforeTime = System.currentTimeMillis();
+            DatabaseEntry key = new DatabaseEntry();
+            DatabaseEntry data = new DatabaseEntry(new byte[dataSize]);
+
+            try {
+                while (!programDone) {
+                    recNum = makeKey(recNum, key);
+                    op.doOperation(key, data);
+                    ops += 1;
+                    nOps.incrementAndGet();
+                    if (ops >= rate) {
+                        long afterTime = System.currentTimeMillis();
+                        long interval = afterTime - beforeTime;
+                        if (interval < 1000) {
+                            Thread.sleep(1000 - interval);
+                        }
+                        ops = 0;
+                        beforeTime = afterTime;
+                    }
+                }
+            } catch (Throwable e) {
+                e.printStackTrace();
+                System.exit(1);
+            }
+        }
+    }
+
+    private void printStats(long currentTime)
+        throws DatabaseException {
+
+        StatsConfig statsConfig = new StatsConfig();
+        statsConfig.setClear(true);
+        EnvironmentStats stats = env.getStats(statsConfig);
+
+        float secs = (currentTime - beginTime) / 1000.0f;
+        float writesPerSec = nWrites.get() / secs;
+        float readsPerSec = nReads.get() / secs;
+
+        System.out.println("\nWrites/Sec=" +
+                           NUMBER_FORMAT.format(writesPerSec) +
+                           " Reads/Sec=" +
+                           NUMBER_FORMAT.format(readsPerSec) +
+                           " CacheSize=" +
+                           INT_FORMAT.format(stats.getCacheTotalBytes()) +
+                           " DataSize=" +
+                           INT_FORMAT.format(stats.getDataBytes()) +
+                           " AdminSize=" +
+                           INT_FORMAT.format(stats.getAdminBytes()) +
+                           " LockSize=" +
+                           INT_FORMAT.format(stats.getLockBytes()) +
+                           " NEvictPasses=" +
+                           INT_FORMAT.format(stats.getNEvictPasses()) +
+                           " NCacheMiss=" +
+                           INT_FORMAT.format(stats.getNCacheMiss()) +
+                           " TotalLogSize=" +
+                           INT_FORMAT.format(stats.getTotalLogSize()));
+    }
+}
diff --git a/test/standalone/ClosedDbEviction.java b/test/standalone/ClosedDbEviction.java
new file mode 100644
index 0000000000000000000000000000000000000000..0c40c67e9dc15d8986ce68ec8e9e8f601e26b66c
--- /dev/null
+++ b/test/standalone/ClosedDbEviction.java
@@ -0,0 +1,753 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: ClosedDbEviction.java,v 1.2.2.2 2010/01/04 15:30:49 cwl Exp $
+ */
+
+import java.io.File;
+import java.util.Random;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.StatsConfig;
+
+/**
+ * Applications with a large number of databases, randomly open and close
+ * databases at any time when needed. The mapping tree nodes (roots) in closed
+ * databases won't be evicted from cache immediately. As the applications run
+ * over time, this could cause a lot of waste in cache or even bad performance
+ * and OutOfMemoryError if cache overflows.
+ *
+ * We want to simulate such a scenario to test the efficiency of eviction of
+ * closed databases for SR 13415, to make sure that the eviction would not
+ * cause corruption or concurrency bugs:
+ * + Ensure that concurrency bugs don't occur when multiple threads are trying
+ *   to close, evict and open a single database.
+ * + Another potential problem is that the database doesn't open correctly
+ *   after being closed and evicted;
+ * + Cache budgeting is not done correctly during eviction or re-loading of
+ *   the database after eviction.
+ *
+ */
+public class ClosedDbEviction {
+    private static int nDataAccessDbs = 1;
+    private static int nRegularDbs = 100000;
+    private static int nDbRecords = 100;
+    private static int nInitThreads = 8;
+    private static int nContentionThreads = 4;
+    private static int nDbsPerSet = 5;
+    private static int nKeepOpenedDbs = 100;
+    private static int nOps[] = new int[nContentionThreads];
+    private static long nTxnPerRecovery = 1000000l;
+    private static long nTotalTxns = 100000000l;
+    private static boolean verbose = false;
+    private static boolean init = false;
+    private static boolean contention = false;
+    private static boolean evict = false;
+    private static boolean recovery = false;
+    private static boolean runDataAccessThread = true;
+    private static String homeDir = "./tmp";
+    private static Environment env = null;
+    private static Database dataAccessDb = null;
+    private static Database metadataDb = null;
+    private static Database[] openDbList =  new Database[nKeepOpenedDbs];
+    private static Random random = new Random();
+    private static Runtime rt = Runtime.getRuntime();
+
+    public static void main(String[] args) {
+        try {
+            ClosedDbEviction eviction = new ClosedDbEviction();
+            eviction.start(args);
+        } catch (Exception e) {
+            e.printStackTrace();
+            System.exit(1);
+        }
+    }
+
+    /* Output command-line input arguments to log. */
+    private void printArgs(String[] args) {
+        System.out.print("\nCommand line arguments:");
+        for (String arg : args) {
+            System.out.print(' ');
+            System.out.print(arg);
+        }
+        System.out.println();
+    }
+
+    void start(String[] args) throws DatabaseException, InterruptedException {
+        try {
+            if (args.length == 0) {
+                throw new IllegalArgumentException();
+            }
+
+            /* Parse command-line input arguments. */
+            for (int i = 0; i < args.length; i++) {
+                String arg = args[i];
+                String arg2 = (i < args.length - 1) ? args[i + 1] : null;
+                if (arg.equals("-v")) {
+                    verbose = true;
+                } else if (arg.equals("-h")) {
+                    if (arg2 == null) {
+                        throw new IllegalArgumentException(arg);
+                    }
+                    homeDir = args[++i];
+                } else if (arg.equals("-init")) {
+                    if (arg2 == null) {
+                        throw new IllegalArgumentException(arg);
+                    }
+                    try {
+                        nRegularDbs = Integer.parseInt(args[++i]);
+                    } catch (NumberFormatException e) {
+                        throw new IllegalArgumentException(arg2);
+                    }
+                    init = true;
+                } else if (arg.equals("-contention")) {
+                    if (arg2 == null) {
+                        throw new IllegalArgumentException(arg);
+                    }
+                    try {
+                        nTotalTxns = Long.parseLong(args[++i]);
+                    } catch (NumberFormatException e) {
+                        throw new IllegalArgumentException(arg2);
+                    }
+                    contention = true;
+                } else if (arg.equals("-evict")) {
+                    if (arg2 == null) {
+                        throw new IllegalArgumentException(arg);
+                    }
+                    try {
+                        nTotalTxns = Long.parseLong(args[++i]);
+                    } catch (NumberFormatException e) {
+                        throw new IllegalArgumentException(arg2);
+                    }
+                    evict = true;
+                } else if (arg.equals("-recovery")) {
+                    if (arg2 == null) {
+                        throw new IllegalArgumentException(arg);
+                    }
+                    try {
+                        nTxnPerRecovery = Long.parseLong(args[++i]);
+                    } catch (NumberFormatException e) {
+                        throw new IllegalArgumentException(arg2);
+                    }
+                    recovery = true;
+                } else {
+                    throw new IllegalArgumentException(arg);
+                }
+            }
+            /* Correctness self-check: nTotalTxns >= nTxnPerRecovery. */
+            if (nTotalTxns < nTxnPerRecovery) {
+                System.err.println
+                    ("ERROR: <nTotalTxns> argument should be larger than " +
+                     nTxnPerRecovery + "!");
+                System.exit(1);
+            }
+            printArgs(args);
+        } catch (IllegalArgumentException e) {
+            System.out.println
+                ("Usage: ClosedDbEviction [-v] -h <envHome> -init <nDbs>\n" +
+                 "Usage: ClosedDbEviction [-v] -h <envHome> " +
+                 "[-contention <nTotalTxns> | -evict <nTotalTxns>] " +
+                 "[-recovery <nTxnsPerRecovery>]");
+            e.printStackTrace();
+            System.exit(1);
+        }
+
+        try {
+            if (init) {
+                doInit();
+            } else if (contention) {
+                doContention();
+            } else if (evict) {
+                doEvict();
+            } else {
+                System.err.println("No such argument.");
+                System.exit(1);
+            }
+        } catch (Exception e) {
+            e.printStackTrace();
+            System.exit(1);
+        }
+    }
+
+    /**
+     * Initialize nRegularDBs, one dataAccessDb and one metadataDB.
+     */
+    private void doInit() {
+
+        class InitThread extends Thread {
+            public int id;
+            private Environment env = null;
+            private Database db = null;
+
+            /**
+             * Constructor used for initializing databases.
+             */
+            InitThread(int id, Environment env) {
+                this.id = id;
+                this.env = env;
+            }
+
+            public void run() {
+                try {
+                    DatabaseConfig dbConfig = new DatabaseConfig();
+                    dbConfig.setAllowCreate(true);
+                    DatabaseEntry key = new DatabaseEntry();
+                    DatabaseEntry data = new DatabaseEntry();
+                    for (int i = 0;
+                         i <= ((nRegularDbs + nDataAccessDbs) / nInitThreads);
+                         i++) {
+
+                        int dbId = id + (i * nInitThreads);
+                        int totalRecords = nDbRecords;
+                        boolean isDataAccessDb = false;
+                        String dbName = "db" + dbId;
+                        if (dbId <= (nRegularDbs / 10)) {
+                            dbConfig.setDeferredWrite(true);
+                        } else if (dbId >= nRegularDbs) {
+                            if (dbId < (nRegularDbs + nDataAccessDbs)) {
+                                isDataAccessDb = true;
+                                dbName = "dataAccessDb";
+                                totalRecords = 10 * nDbRecords;
+                            } else {
+                                break;
+                            }
+                        }
+                        /* Open the database. */
+                        db = env.openDatabase(null, dbName, dbConfig);
+                        /* Insert totalRecords into database. */
+                        for (int j = 0; j < totalRecords; j++) {
+                            key.setData(Integer.toString(j).getBytes("UTF-8"));
+                            makeData(data, j, isDataAccessDb);
+                            OperationStatus status = db.put(null, key, data);
+                            if (status != OperationStatus.SUCCESS) {
+                                System.err.println
+                                    ("ERROR: failed to insert the #" + j +
+                                     " key/data pair into " +
+                                     db.getDatabaseName());
+                                System.exit(1);
+                            }
+                        }
+                        db.close();
+                    }
+                } catch (Exception e) {
+                    e.printStackTrace();
+                    System.exit(1);
+                }
+            }
+
+            /**
+             * Generate the data. nDataAccessDbs should have a bigger size of
+             * data entry; regularDbs only make data entry equal to
+             * (index + "th-dataEntry").
+             */
+            private void makeData(DatabaseEntry data,
+                                  int index,
+                                  boolean isDataAccessDb) throws Exception {
+
+                assert (data != null) : "makeData: Null data pointer";
+
+                if (isDataAccessDb) {
+                    byte[] bytes = new byte[1024];
+                    for (int i = 0; i < bytes.length; i++) {
+                        bytes[i] = (byte) i;
+                    }
+                    data.setData(bytes);
+                } else {
+                    data.setData((Integer.toString(index) + "th-dataEntry").
+                            getBytes("UTF-8"));
+                }
+            }
+        }
+
+        /*
+         * Initialize "nRegularDbs" regular Dbs, one dataAccessDb and one
+         * metaDataDb according to these rules:
+         * - The "nRegularDBs" databases, with the dbIds range from
+         *   0 to (nRegularDBs - 1). Each of them would have "nDbRecords".
+         * - 10% of all "nRegularDBs" are deferredWrite databases.
+         * - 90% of all "nRegularDBs" are regular databases.
+         * - The dataAccessDb has "10 * nDbRecords" key/data pairs.
+         * - The metaDataDb is to save "nRegularDbs" info for contention test.
+         */
+        try {
+            openEnv(128 * 1024 * 1024);
+            saveMetadata();
+            InitThread[] threads = new InitThread[nInitThreads];
+            long startTime = System.currentTimeMillis();
+            for (int i = 0; i < threads.length; i++) {
+                InitThread t = new InitThread(i, env);
+                t.start();
+                threads[i] = t;
+            }
+            for (int i = 0; i < threads.length; i++) {
+                threads[i].join();
+            }
+            long endTime = System.currentTimeMillis();
+            if (verbose) {
+                float elapsedSeconds =  (endTime - startTime) / 1000f;
+                float throughput = (nRegularDbs * nDbRecords) / elapsedSeconds;
+                System.out.println
+                    ("\nInitialization Statistics Report" +
+                     "\n Run starts at: " + (new java.util.Date(startTime)) +
+                     ", finishes at: " + (new java.util.Date(endTime)) +
+                     "\n Initialized " + nRegularDbs + " databases, " +
+                     "each contains " + nDbRecords + " records." +
+                     "\n Elapsed seconds: " + elapsedSeconds +
+                     ", throughput: " + throughput + " ops/sec.");
+            }
+            closeEnv();
+        } catch (DatabaseException de) {
+            de.printStackTrace();
+            System.exit(1);
+        } catch (Exception e) {
+            e.printStackTrace();
+            System.exit(1);
+        }
+    }
+
+    /**
+     * Simulate some contentions to make sure that the eviction would not
+     * cause corruption or concurrency bugs.
+     */
+    private void doContention() {
+
+        class ContentionThread extends Thread {
+            public int id;
+            private float dataCheckPossibility = .01f;
+            private long txns;
+            private boolean done = false;
+            private Database currentDb = null;
+            private Database lastOpenedDb = null;
+
+            /**
+             * Constructor used for initializing databases.
+             */
+            ContentionThread(int id, long txns) {
+                this.id = id;
+                this.txns = txns;
+            }
+
+            public void run() {
+                try {
+
+                    /* Start dataAccessThread here. */
+                    startDataAccessor();
+
+                    /*
+                     * All contention threads try to open "nDbsPerSet" DBs
+                     * from the same set concurrently.
+                     */
+                    while (!done) {
+                        int dbId = random.nextInt(nDbsPerSet);
+                        currentDb = env.openDatabase(null, "db" + dbId, null);
+                        if (lastOpenedDb != null) {
+                            lastOpenedDb.close();
+                        }
+                        lastOpenedDb = currentDb;
+                        if (random.nextFloat() <= dataCheckPossibility) {
+                            verifyData();
+                        }
+                        nOps[id]++;
+                        if (nOps[id] > txns) {
+                            if (lastOpenedDb != null) {
+                                lastOpenedDb.close();
+                            }
+                            done = true;
+                        }
+                    }
+
+                    /* Stop dataAccessThread here. */
+                    stopDataAccessor();
+                } catch (Exception e) {
+                    e.printStackTrace();
+                    System.exit(1);
+                }
+            }
+
+            private void startDataAccessor() {
+                runDataAccessThread = true;
+            }
+
+            private void stopDataAccessor() {
+                runDataAccessThread = false;
+            }
+
+            /**
+             * Do the corruption check: just check that the data
+             * that is present looks correct.
+             */
+            private void verifyData() throws Exception {
+                long dbCount = currentDb.count();
+                if (dbCount != nDbRecords) {
+                    System.err.println
+                        ("WARNING: total records in " +
+                         currentDb.getDatabaseName() + ": " + dbCount +
+                         " doesn't meet the expected value: " + nDbRecords);
+                    System.exit(1);
+                } else {
+                    DatabaseEntry key = new DatabaseEntry();
+                    DatabaseEntry data = new DatabaseEntry();
+                    for (int i = 0; i < nDbRecords; i++) {
+                        key.setData(Integer.toString(i).getBytes("UTF-8"));
+                        OperationStatus status =
+                            currentDb.get(null, key, data, LockMode.DEFAULT);
+                        if (status != OperationStatus.SUCCESS) {
+                            System.err.println
+                                ("ERROR: failed to retrieve the #" +
+                                 i + " key/data pair from " +
+                                 currentDb.getDatabaseName());
+                            System.exit(1);
+                        } else if (!(new String(data.getData(), "UTF-8")).
+                                equals((Integer.toString(i) +
+                                        "th-dataEntry"))) {
+                            System.err.println
+                                ("ERROR: current key/data pair:" + i +
+                                 "/" + (new String(data.getData(), "UTF-8")) +
+                                 " doesn't match the expected:" +
+                                 i + "/" + i +"th-dataEntry in " +
+                                 currentDb.getDatabaseName());
+                            System.exit(1);
+                        }
+                    }
+                }
+            }
+        }
+
+        class DataAccessThread extends Thread {
+
+            public void run() {
+                try {
+                    while (runDataAccessThread) {
+                        /* Access records to fill up cache. */
+                        DatabaseEntry key = new DatabaseEntry();
+                        key.setData(Integer.
+                                    toString(random.nextInt(10 * nDbRecords)).
+                                    getBytes("UTF-8"));
+                        DatabaseEntry data = new DatabaseEntry();
+                        OperationStatus status =
+                            dataAccessDb.get(null, key, data, LockMode.DEFAULT);
+                        if (status != OperationStatus.SUCCESS) {
+                            System.err.println
+                                ("ERROR: failed to retrieve the #" +
+                                 new String(key.getData(), "UTF-8") +
+                                 " key/data pair from dataAccessDb.");
+                            System.exit(1);
+                        }
+                    }
+                } catch (Exception e) {
+                    e.printStackTrace();
+                    System.exit(1);
+                }
+            }
+        }
+
+        /*
+         * Simulate some contentions according to following rules:
+         * - Several threads try to open/close a set of databases repeatedly.
+         * - The other thread will continually access records from dataAccessDb
+         *   to fill up cache.
+         */
+        try {
+            long startTime = System.currentTimeMillis();
+            long txns = nTotalTxns;
+            if (recovery) {
+                txns = nTxnPerRecovery;
+            }
+            for (int loop = 0; loop < nTotalTxns / txns; loop++) {
+                /* Clear nOps[] before each run starts. */
+                for (int i = 0; i < nContentionThreads; i++) {
+                    nOps[i] = 0;
+                }
+                openEnv(1024 * 1024);
+                readMetadata();
+                DataAccessThread dat = new DataAccessThread();
+                ContentionThread[] threads =
+                    new ContentionThread[nContentionThreads];
+                for (int i = 0; i < threads.length; i++) {
+                    ContentionThread t =
+                        new ContentionThread(i, txns);
+                    t.start();
+                    threads[i] = t;
+                }
+                dat.start();
+                for (int i = 0; i < threads.length; i++) {
+                    threads[i].join();
+                }
+                dat.join();
+                if (!checkStats(txns)) {
+                    System.err.println
+                        ("doContention: stats check failed.");
+                    System.exit(1);
+                }
+                closeEnv();
+            }
+            long endTime = System.currentTimeMillis();
+            float elapsedSecs =  (endTime - startTime) / 1000f;
+            float throughput = nTotalTxns / elapsedSecs;
+            if (verbose) {
+                System.out.println
+                    ("\nContention Test Statistics Report" +
+                     "\n Starts at: " + (new java.util.Date(startTime)) +
+                     ", Finishes at: " + (new java.util.Date(endTime)) +
+                     "\n Total operations: " + nTotalTxns +
+                     ", Elapsed seconds: " + elapsedSecs +
+                     ", Throughput: " + throughput + " ops/sec.");
+            }
+        } catch (DatabaseException de) {
+            de.printStackTrace();
+            System.exit(1);
+        } catch (Throwable e) {
+            e.printStackTrace();
+            System.exit(1);
+        }
+    }
+
+    private void doEvict() throws DatabaseException {
+        final int offset = random.nextInt(nRegularDbs - nKeepOpenedDbs);
+
+        class EvictThread extends Thread {
+            public int id;
+            private float dataAccessPossibility = .01f;
+            private long txns = 0;
+            private Database currentDb = null;
+            private Database lastOpenedDb = null;
+
+            /**
+             * Constructor.
+             */
+            public EvictThread(int id, long txns) {
+                this.id = id;
+                this.txns = txns;
+            }
+
+            public void run() {
+                try {
+                    int dbId;
+                    boolean done = false;
+                    while (!done) {
+                        dbId = random.nextInt(nRegularDbs);
+                        if ((0 <= (dbId - offset)) &&
+                                ((dbId - offset) < nKeepOpenedDbs)) {
+
+                            /*
+                             * Randomly select nKeepOpenedDbs databases opened
+                             * in a time. The dbId ranges from <offset> to
+                             * <offset + nKeepOpenedDbs - 1>.
+                             */
+                            if (openDbList[dbId - offset] == null) {
+                                openDbList[dbId - offset] =
+                                    env.openDatabase(null, "db" + dbId, null);
+                            }
+                        } else {
+                            /* Each thread select randomly from all DBs. */
+                            currentDb =
+                                env.openDatabase(null, "db" + dbId, null);
+                            if (random.nextFloat() < dataAccessPossibility) {
+                                DatabaseEntry key = new DatabaseEntry();
+                                DatabaseEntry data = new DatabaseEntry();
+                                key.setData(Integer.toString
+                                            (random.nextInt(nDbRecords)).
+                                            getBytes("UTF-8"));
+                                currentDb.get(null, key, data,
+                                              LockMode.DEFAULT);
+                            }
+                            if (lastOpenedDb != null) {
+                                lastOpenedDb.close();
+                            }
+                            lastOpenedDb = currentDb;
+                        }
+                        nOps[id]++;
+                        if (nOps[id] > txns) {
+                            if (lastOpenedDb != null) {
+                                lastOpenedDb.close();
+                            }
+                            /* Close nKeepOpenedDbs before exit. */
+                            for (int i = 0; i < nKeepOpenedDbs; i++) {
+                                currentDb = openDbList[i];
+                                if (currentDb != null) {
+                                    currentDb.close();
+                                    openDbList[i] = null;
+                                }
+                            }
+                            done = true;
+                        }
+                    }
+                } catch (Exception e) {
+                    e.printStackTrace();
+                    System.exit(1);
+                }
+            }
+        }
+
+        /*
+         * Simulate some contentions according to following rules:
+         * - Several threads try to open/close a set of databases repeatedly.
+         * - The other thread will continually access records from dataAccessDb
+         *   to fill up cache.
+         */
+        try {
+            long startTime = System.currentTimeMillis();
+            long txns = nTotalTxns;
+            if (recovery) {
+                txns = nTxnPerRecovery;
+            }
+            for (int loop = 0; loop < nTotalTxns / txns; loop++) {
+                /* Clear nOps[] before each run starts. */
+                for (int i = 0; i < nContentionThreads; i++) {
+                    nOps[i] = 0;
+                }
+                openEnv(512 * 1024);
+                readMetadata();
+                EvictThread[] threads = new EvictThread[nContentionThreads];
+                for (int i = 0; i < threads.length; i++) {
+                    EvictThread t = new EvictThread(i, txns);
+                    t.start();
+                    threads[i] = t;
+                }
+                for (int i = 0; i < threads.length; i++) {
+                    threads[i].join();
+                }
+                if (!checkStats(txns)) {
+                    System.err.println("doEvict: stats check failed.");
+                    System.exit(1);
+                }
+                closeEnv();
+            }
+            long endTime = System.currentTimeMillis();
+            if (verbose) {
+                float elapsedSeconds =  (endTime - startTime) / 1000f;
+                float throughput = nTotalTxns / elapsedSeconds;
+                System.out.println
+                    ("\nEviction Test Statistics Report" +
+                     "\n Run starts at: " + (new java.util.Date(startTime)) +
+                     ", finishes at: " + (new java.util.Date(endTime)) +
+                     "\n Total operations: " + nTotalTxns +
+                     ", Elapsed seconds: " + elapsedSeconds +
+                     ", Throughput: " + throughput + " ops/sec.");
+            }
+        } catch (DatabaseException de) {
+            de.printStackTrace();
+            System.exit(1);
+        } catch (Exception e) {
+            e.printStackTrace();
+            System.exit(1);
+        }
+    }
+
+    /**
+     * Open an Environment.
+     */
+    private void openEnv(long cacheSize) throws DatabaseException {
+
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setAllowCreate(true);
+        envConfig.setCacheSize(cacheSize);
+        env = new Environment(new File(homeDir), envConfig);
+        if (contention) {
+            dataAccessDb = env.openDatabase(null, "dataAccessDb", null);
+        }
+    }
+
+    /**
+     * Check to see if stats looks correct.
+     */
+    private boolean checkStats(long txns) throws DatabaseException {
+
+        /* Get EnvironmentStats numbers. */
+        StatsConfig statsConfig = new StatsConfig();
+        statsConfig.setFast(true);
+        statsConfig.setClear(true);
+        EnvironmentStats stats = env.getStats(statsConfig);
+        long evictedINs = stats.getNNodesExplicitlyEvicted();
+        long evictedRoots = stats.getNRootNodesEvicted();
+        long dataBytes = stats.getDataBytes();
+        /* Check the eviction of INs and ROOTs actually happens. */
+        boolean nodesCheck = (evictedINs > 0);
+        boolean rootsCheck = (evictedRoots > 0);
+        if (verbose) {
+            System.out.printf
+                ("\n\tEviction Statistics(calc txns: %d)%n" +
+                 "                Data     Pass/Fail%n" +
+                 "             ----------  ---------%n" +
+                 "EvictedINs:  %10d  %9S%n" +
+                 "EvictedRoots:%10d  %9S%n" +
+                 "DataBytes:   %10d%n" +
+                 "jvm.maxMem:  %10d%n" +
+                 "jvm.freeMem: %10d%n" +
+                 "jvm.totlMem: %10d%n",
+                 txns, evictedINs, (nodesCheck ? "PASS" : "FAIL"),
+                 evictedRoots, (rootsCheck ? "PASS" : "FAIL"),
+                 dataBytes, rt.maxMemory(), rt.freeMemory(), rt.totalMemory());
+            System.out.println
+                ("The test criteria: EvictedINs > 0, EvictedRoots > 0.");
+        }
+
+        return nodesCheck && rootsCheck;
+    }
+
+    /**
+     * Close the Databases and Environment.
+     */
+    private void closeEnv() throws DatabaseException {
+
+        if (dataAccessDb != null) {
+            dataAccessDb.close();
+        }
+        if (env != null) {
+            env.close();
+        }
+    }
+
+    /**
+     * Store meta-data information into metadataDb.
+     */
+    private void saveMetadata() throws Exception {
+
+        /* Store meta-data information into one additional database. */
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        metadataDb = env.openDatabase(null, "metadataDb", dbConfig);
+        OperationStatus status =
+            metadataDb.put(null,
+                           new DatabaseEntry("nRegularDbs".getBytes("UTF-8")),
+                           new DatabaseEntry(Integer.
+                                             toString(nRegularDbs).
+                                             getBytes("UTF-8")));
+        if (status != OperationStatus.SUCCESS) {
+            System.err.println
+                ("Not able to save info into the metadata database.");
+            System.exit(1);
+        }
+        metadataDb.close();
+    }
+
+    /**
+     * Retrieve meta-data information from metadataDb.
+     */
+    private void readMetadata() throws Exception {
+
+        /* Retrieve meta-data information from metadataDB. */
+        metadataDb = env.openDatabase(null, "metadataDb", null);
+        DatabaseEntry key = new DatabaseEntry("nRegularDbs".getBytes("UTF-8"));
+        DatabaseEntry data = new DatabaseEntry();
+        OperationStatus status =
+            metadataDb.get(null, key, data, LockMode.DEFAULT);
+        if (status != OperationStatus.SUCCESS) {
+            System.err.println
+                ("Couldn't retrieve info from the metadata database.");
+            System.exit(1);
+        }
+        nRegularDbs = Integer.parseInt(new String (data.getData(), "UTF-8"));
+        metadataDb.close();
+    }
+}
diff --git a/test/standalone/EnvSharedCache.java b/test/standalone/EnvSharedCache.java
new file mode 100644
index 0000000000000000000000000000000000000000..8ad41a60e777811b60dabd0d3d52b2ef0b5a0dd5
--- /dev/null
+++ b/test/standalone/EnvSharedCache.java
@@ -0,0 +1,938 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: EnvSharedCache.java,v 1.19.2.2 2010/01/04 15:30:49 cwl Exp $
+ */
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.security.SecureRandom;
+import java.util.Arrays;
+import java.util.Properties;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.Transaction;
+
+/**
+ * Typical usage:
+ * # Initialize the DBs
+ * java EnvSharedCache -h HOME -initonly
+ *
+ * # Run updates with two classes of worker threads (different cache size)
+ * java EnvSharedCache -h HOME -shared -cachetest -txns 1000000
+ */
+public class EnvSharedCache implements Runnable {
+
+    private static final int INSERT = 1;
+    private static final int UPDATE = 2;
+    private static final int SELECT = 3;
+    private static boolean verbose = false;
+    private static boolean debug = false;
+    private static boolean openTest = false;
+    private static boolean cacheTest = false;
+    private static boolean sharedTest = false;
+    private static boolean evenTest = false;
+    private static boolean initOnly = false;
+    private static String delimiter = System.getProperty("file.separator");
+    private static String homeDirPrefix = "db";
+    private static StringBuffer inputArgs = new StringBuffer();
+    private static int nEnvs = 4;
+    private static int nThreadsPerEnv = 4;
+    private static int nMaxKeys = 1000000;
+    private static int nMaxTransactions = 100000;
+    private static float nCacheMissThreshold = 0.5f;
+    private static float nCacheSizeThreshold = 0.25f;
+    private static float nThruputThreshold = 0.5f;
+    private Environment[] envs;
+    private Database[] dbs;
+    private EnvironmentStats[] envStats;
+    private SecureRandom random = new SecureRandom();
+    private boolean isSharedCacheRun = false;
+    private int keySize = 10;
+    private int dataSize = 100;
+    private int nRecordsPerThread = 0;
+    private int nDeletesPerThread = 0;
+    private int nInitEnvs = 0;
+    private int nInitThreadsPerEnv = 0;
+    private int nTransactions[][];
+    private int nInserts[][];
+    private int nUpdates[][];
+    private int nDeletes[][];
+    private int nSelects[][];
+    private int nReadsPerWrite = 10;
+    private float nThroughput = 0.0f;
+    private long nElapsedTime[][];
+
+    public static void main(String args[]) {
+        try {
+            /* Parse command-line input arguments. */
+            for (int i = 0; i < args.length; i++) {
+                String arg = args[i];
+                boolean moreArgs = i < args.length - 1;
+                if (arg.equals("-v")) {
+                    verbose = true;
+                } else if (arg.equals("-d")) {
+                    debug = true;
+                } else if (arg.equals("-initonly")) {
+                    initOnly = true;
+                } else if (arg.equals("-opentest")) {
+                    openTest = true;
+                } else if (arg.equals("-cachetest")) {
+                    cacheTest = true;
+                } else if (arg.equals("-eventest")) {
+                    evenTest = true;
+                } else if (arg.equals("-h") && moreArgs) {
+                    homeDirPrefix = args[++i] + delimiter + homeDirPrefix;
+                } else if (arg.equals("-shared")) {
+                    sharedTest = true;
+                } else if (arg.equals("-envs") && moreArgs) {
+                    nEnvs = Integer.parseInt(args[++i]);
+                } else if (arg.equals("-keys") && moreArgs) {
+                    nMaxKeys = Integer.parseInt(args[++i]);
+                } else if (arg.equals("-txns") && moreArgs) {
+                    nMaxTransactions = Integer.parseInt(args[++i]);
+                } else if (arg.equals("-threads") && moreArgs) {
+                    nThreadsPerEnv = Integer.parseInt(args[++i]);
+                } else if (arg.equals("-help")) {
+                    usage(null);
+                    System.exit(0);
+                } else {
+                    usage("Unknown arg: " + arg);
+                    System.exit(1);
+                }
+            }
+            /* Save command-line input arguments. */
+            for (String s : args) {
+                inputArgs.append(" " + s);
+            }
+            System.out.println("\nCommand-line input arguments:\n  "
+                    + inputArgs);
+            /*
+             * If -shared flag is specified, compare EnvironmentStats
+             * between shareCache and nonSharedCache runs to judge
+             * whether environment shared cache test passes/fails.
+             */
+            if (sharedTest) {
+                EnvSharedCache nonSharedCacheRun = new EnvSharedCache();
+                nonSharedCacheRun.setSharedCacheRun(false);
+
+                EnvSharedCache sharedCacheRun = new EnvSharedCache();
+                sharedCacheRun.setSharedCacheRun(true);
+
+                System.out.println("Starting non-sharedCache test...");
+                nonSharedCacheRun.startTest();
+                System.out.println("\nStarting sharedCache test...");
+                sharedCacheRun.startTest();
+                /* Compare stats to judge test passes/fails. */
+                if (!verifyResults(nonSharedCacheRun, sharedCacheRun)) {
+                    /* Failed to meet test criteria, exit with error. */
+                    System.exit(1);
+                }
+            } else {
+                new EnvSharedCache().startTest();
+            }
+        } catch (Exception e) {
+            e.printStackTrace();
+        }
+    }
+
+    /**
+     * Print the usage.
+     */
+    private static void usage(String msg) {
+        String usageStr;
+        if (msg != null) {
+            System.err.println(msg);
+        }
+        usageStr = "Usage: java EnvSharedCache\n"
+            + "            [-v] [-d] [-h <homeDirPrefix>]\n"
+            + "            [-envs <numOfEnvs>]\n"
+            + "            [-threads <numOfThreadsPerEnv>]\n"
+            + "            [-keys <numOfKeysPerThread>] [-initonly]\n\n"
+            + "Usage: java EnvSharedCache\n"
+            + "            [-v] [-d] [-h <homeDirPrefix>]\n"
+            + "            [-envs <numOfEnvs>]\n"
+            + "            [-threads <numOfThreadsPerEnv>]\n"
+            + "            [-txns <numOfTransactionsPerThread>]\n"
+            + "            [-cachetest [-shared] [-opentest] [-eventest]]";
+        System.err.println(usageStr);
+    }
+
+    /**
+     * Compare results between non-shared and shared cache run.
+     */
+    public static boolean verifyResults(EnvSharedCache nonSharedCache,
+                                        EnvSharedCache sharedCache) {
+        EnvironmentStats nonSharedStatsArray[] = nonSharedCache.getEnvStats();
+        EnvironmentStats sharedStatsArray[] = sharedCache.getEnvStats();
+        boolean thruputCheck = false;
+        boolean cacheMissCheck = false;
+        boolean cacheSizeCheck = false;
+        boolean overallCheck = true;
+        System.out.println
+            ("\n\n          "
+             + "Multi-Env SharedCache Test Summary Report At: "
+             + new java.util.Date());
+        System.out.println
+            ("                         Non-Shared      Shared       Pass/Fail");
+        System.out.println
+            ("                         ----------    ----------    ----------");
+        /* Check to see if throughput meet the given threshold. */
+        if (evenTest) {
+            thruputCheck =
+                (Math.abs(sharedCache.nThroughput - nonSharedCache.nThroughput)
+                     / nonSharedCache.nThroughput)
+                <= nThruputThreshold;
+            overallCheck &= thruputCheck;
+        }
+        System.out.printf
+            ("Throughput(%.2f):        %10.2f    %10.2f    %10S%n",
+             nThruputThreshold,
+             nonSharedCache.nThroughput,
+             sharedCache.nThroughput,
+             (evenTest ? (thruputCheck ? "PASS" : "FAIL") : "N/A"));
+        for (int i = 0; i < nEnvs; i++) {
+            EnvironmentStats nonSharedStats = nonSharedStatsArray[i];
+            EnvironmentStats sharedStats = sharedStatsArray[i];
+            System.out.printf("Env(%d)\n", i);
+            /*
+             * Check if the regular worker's NCacheMiss variation meet
+             * the given threshold. This check doesn't make sense
+             * to smallCache workers.
+             */
+            if ((!openTest) && (!evenTest) && ((i % 2) != 1)) {
+                cacheMissCheck = sharedStats.getNCacheMiss()
+                    <= (nonSharedStats.getNCacheMiss() * nCacheMissThreshold);
+            } else {
+                cacheMissCheck = true;
+            }
+            overallCheck &= cacheMissCheck;
+            System.out.printf
+                ("        NCacheMiss(%.2f):%10d    %10d    %10S\n",
+                 nCacheMissThreshold,
+                 nonSharedStats.getNCacheMiss(),
+                 sharedStats.getNCacheMiss(),
+                 (!openTest) && (!evenTest)
+                 ? (cacheMissCheck ? "PASS" : "FAIL")
+                 : "N/A");
+            /* For eventest, check CacheDataBytes to see if within 25%. */
+            if (evenTest) {
+                cacheSizeCheck =
+                    ((float) Math.abs(sharedStats.getDataBytes()
+                                - nonSharedStats.getDataBytes())
+                         / nonSharedStats.getDataBytes())
+                    <= nCacheSizeThreshold;
+                overallCheck &= cacheSizeCheck;
+            }
+            System.out.printf
+                ("         DataBytes(%.2f):%10d    %10d    %10S\n",
+                 nCacheSizeThreshold,
+                 nonSharedStats.getDataBytes(),
+                 sharedStats.getDataBytes(),
+                 (evenTest ? (cacheSizeCheck ? "PASS" : "FAIL") : "N/A"));
+            System.out.printf
+                ("             NLogBuffers:%10d    %10d\n",
+                 nonSharedStats.getNLogBuffers(),
+                 sharedStats.getNLogBuffers());
+            System.out.printf
+                ("         LogBuffersBytes:%10d    %10d\n",
+                 nonSharedStats.getBufferBytes(),
+                 sharedStats.getBufferBytes());
+            System.out.printf
+                ("         CacheTotalBytes:%10d    %10d\n",
+                 nonSharedStats.getCacheTotalBytes(),
+                 sharedStats.getCacheTotalBytes());
+            System.out.printf
+                ("            NNotResident:%10d    %10d\n",
+                 nonSharedStats.getNNotResident(),
+                 sharedStats.getNNotResident());
+            System.out.printf
+                ("         NSharedCacheEnv:%10d    %10d\n",
+                 nonSharedStats.getNSharedCacheEnvironments(),
+                 sharedStats.getNSharedCacheEnvironments());
+            System.out.printf
+                ("        SCacheTotalBytes:%10d    %10d\n",
+                 nonSharedStats.getSharedCacheTotalBytes(),
+                 sharedStats.getSharedCacheTotalBytes());
+        }
+        System.out.print("\nThe run is: " + (sharedTest ? "-shared " : "")
+                + (openTest ? "-opentest " : "")
+                + (evenTest ? "-eventest " : "")
+                + "\nThe run is considered as: "
+                + (overallCheck ? "PASS" : "FAIL") + "\n");
+        return overallCheck;
+    }
+
+    /**
+     * Set the isSharedCacheRun flag.
+     */
+    private void setSharedCacheRun(boolean flag) {
+        isSharedCacheRun = flag;
+    }
+
+    /**
+     * Get the envStats.
+     */
+    private EnvironmentStats[] getEnvStats() {
+        return envStats;
+    }
+
+    /**
+     * Precheck if database files exist before starting the run.
+     */
+    private boolean validateHomeDir() {
+        for (int i = 0; i < nEnvs; i++) {
+            File f = new File(homeDirPrefix + i);
+            if (f.isDirectory()) {
+                continue;
+            } else if (initOnly) {
+                f.mkdirs();
+            } else {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    private void startTest() throws Exception {
+
+        if (!validateHomeDir()) {
+            System.err.println("ERROR: Invalid HomeDirPrefix!"
+                    + " Please specify a valid HomeDirPrefix parameter"
+                    + " that points to your *.jdb files.");
+            System.exit(1);
+        }
+        /* Read properties from ${DB0}/run.properties file. */
+        File file = new File(homeDirPrefix + "0"
+                + System.getProperty("file.separator") + "run.properties");
+        Properties prop = new Properties();
+        if (file.exists()) {
+            FileInputStream in = new FileInputStream(file);
+            prop.load(in);
+            nRecordsPerThread =
+                Integer.parseInt(prop.getProperty("RecordsPerThread"));
+            nDeletesPerThread =
+                Integer.parseInt(prop.getProperty("DeletesPerThread"));
+            nInitEnvs =
+                Integer.parseInt(prop.getProperty("InitEnvs"));
+            nInitThreadsPerEnv =
+                Integer.parseInt(prop.getProperty("InitThreadsPerEnv"));
+            in.close();
+        }
+        if (initOnly) {
+            nInitEnvs = nEnvs;
+            nInitThreadsPerEnv = nThreadsPerEnv;
+        } else if (nInitEnvs > 0 && nEnvs > nInitEnvs) {
+            System.out.println("Warning: The number of environments"
+                    + " specified here is beyond the value of environments"
+                    + " when last initiating databases.\nAuto adjust to"
+                    + " last initiating value:" + nInitEnvs);
+        } else if (nInitThreadsPerEnv > 0
+                && nThreadsPerEnv > nInitThreadsPerEnv) {
+            System.out.println("Warning: The number of threads specified"
+                    + " here is beyond the value of threads when last"
+                    + " initiating databases.\nAuto adjust to last"
+                    + " initiating value:" + nInitThreadsPerEnv);
+            nThreadsPerEnv = nInitThreadsPerEnv;
+        }
+
+        envs = new Environment[nEnvs];
+        dbs = new Database[nEnvs];
+        envStats = new EnvironmentStats[nEnvs];
+        nInserts = new int[nEnvs][nThreadsPerEnv];
+        nUpdates = new int[nEnvs][nThreadsPerEnv];
+        nDeletes = new int[nEnvs][nThreadsPerEnv];
+        nSelects = new int[nEnvs][nThreadsPerEnv];
+        nTransactions = new int[nEnvs][nThreadsPerEnv];
+        nElapsedTime = new long[nEnvs][nThreadsPerEnv];
+
+        /*
+         * Initialize the Environments and open the Databases. For
+         * open/close test, we initialize with each transaction in the
+         * thread main loop.
+         */
+        if (!openTest) {
+            for (int i = 0; i < nEnvs; i++) {
+                envs[i] = openEnv(i);
+                dbs[i] = openDB(envs[i], i);
+            }
+        }
+
+        /* Create the workers and initialize operation counters. */
+        Thread[][] threads = new Thread[nEnvs][nThreadsPerEnv];
+        for (int i = 0; i < nEnvs; i++) {
+            for (int j = 0; j < nThreadsPerEnv; j++) {
+                nInserts[i][j] = 0;
+                nUpdates[i][j] = 0;
+                nDeletes[i][j] = 0;
+                nSelects[i][j] = 0;
+                nTransactions[i][j] = 0;
+                threads[i][j] =
+                    new Thread(this, Integer.toString(i * nThreadsPerEnv + j));
+                threads[i][j].start();
+                Thread.sleep(100);
+            }
+        }
+
+        /* Wait until threads finished. */
+        for (int i = 0; i < nEnvs; i++) {
+            for (int j = 0; j < nThreadsPerEnv; j++) {
+                if (threads[i][j] != null) {
+                    threads[i][j].join();
+                }
+            }
+        }
+
+        if (!openTest) {
+            for (int i = 0; i < nEnvs; i++) {
+                /* Put EnvironmentStats objects into arrays before closing. */
+                envStats[i] = getStats(envs[i], i);
+                closeEnv(envs[i], dbs[i]);
+            }
+        }
+
+        /* Calculate elapsed time, transactions and throughput. */
+        int transactions = 0;
+        long timeMillis = 0;
+        float elapsedSecs = 0.0f;
+        float throughput = 0.0f;
+        for (int i = 0; i < nEnvs; i++) {
+            int inserts = 0, updates = 0, deletes = 0, selects = 0;
+            for (int j = 0; j < nThreadsPerEnv; j++) {
+                inserts += nInserts[i][j];
+                updates += nUpdates[i][j];
+                deletes += nDeletes[i][j];
+                selects += nSelects[i][j];
+                transactions += nTransactions[i][j];
+                timeMillis += nElapsedTime[i][j];
+                elapsedSecs = (float) nElapsedTime[i][j] / 1000;
+                throughput = (float) nTransactions[i][j] / elapsedSecs;
+                if (verbose) {
+                    System.out.printf("%nENV(%d) Thread %d "
+                            + " Running time: %.2f secs Transactions: %d"
+                            + " Throughput: %.2f txns/sec", i, j, elapsedSecs,
+                            nTransactions[i][j], throughput);
+                }
+            }
+            if (verbose) {
+                System.out.println("\nENV(" + i + "): " + inserts + " inserts "
+                        + updates + " updates " + deletes + " deletes "
+                        + selects + " selects ");
+            }
+        }
+        elapsedSecs = (float) timeMillis / (nEnvs * nThreadsPerEnv * 1000);
+        throughput = (float) transactions / elapsedSecs;
+        nThroughput = throughput;
+        System.out.printf("%nAverage elapsed time: %.2f secs"
+                + " Transactions: %d Throughput: %.2f txns/sec%n",
+                elapsedSecs, transactions, throughput);
+
+        /* Create/Update ${DB0}/run.properties file. */
+        FileOutputStream out = new FileOutputStream(file);
+        prop.setProperty("RecordsPerThread", Integer.toString(nRecordsPerThread
+                + nInserts[0][0] - nDeletes[0][0]));
+        prop.setProperty("DeletesPerThread", Integer.toString(nDeletesPerThread
+                + nDeletes[0][0]));
+        prop.setProperty("InitEnvs", Integer.toString(nInitEnvs));
+        prop.setProperty("InitThreadsPerEnv",
+                         Integer.toString(nInitThreadsPerEnv));
+        prop.store(out, "EnvSharedCache test runtime properties."
+                + " Please don't update/remove this file.");
+        out.close();
+    }
+
+    /**
+     * Print and return the cache related stats for the env.
+     */
+    private EnvironmentStats getStats(Environment env, int envId)
+            throws Exception {
+
+        assert (env != null) : "getStats: Null env pointer";
+
+        StatsConfig statsConfig = new StatsConfig();
+        statsConfig.setFast(true);
+        statsConfig.setClear(true);
+        EnvironmentStats stats = env.getStats(statsConfig);
+        return stats;
+    }
+
+    /**
+     * Open an Environment.
+     */
+    private Environment openEnv(int i) throws Exception {
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+        if (isSharedCacheRun) {
+            envConfig.setCacheSize(10000000 * nEnvs);
+            envConfig.setSharedCache(true);
+        } else {
+            envConfig.setCacheSize(10000000);
+            envConfig.setSharedCache(false);
+        }
+        Environment env = new Environment(new File(homeDirPrefix + i),
+                envConfig);
+        return env;
+    }
+
+    /**
+     * Open a Database.
+     */
+    private Database openDB(Environment env, int i) throws Exception {
+
+        assert (env != null) : "openDB: Null env pointer";
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setAllowCreate(true);
+        dbConfig.setTransactional(true);
+        return env.openDatabase(null, "db" + i, dbConfig);
+    }
+
+    /**
+     * Close the Database and Environment.
+     */
+    private void closeEnv(Environment env, Database db)
+            throws DatabaseException {
+
+        assert (db != null) : "closeEnv: Null db pointer";
+        assert (env != null) : "closeEnv: Null env pointer";
+
+        db.close();
+        env.close();
+    }
+
+    /**
+     * Generate the data.
+     */
+    private void makeData(DatabaseEntry data) {
+
+        assert (data != null) : "makeData: Null data pointer";
+
+        byte[] bytes = new byte[dataSize];
+        for (int i = 0; i < bytes.length; i++) {
+            bytes[i] = (byte) i;
+        }
+        data.setData(bytes);
+    }
+
+    /**
+     * Generate the random data.
+     */
+    private void makeRandomData(DatabaseEntry data) {
+
+        assert (data != null) : "makeRandomData: Null data pointer";
+
+        byte[] bytes = new byte[dataSize];
+        random.nextBytes(bytes);
+        data.setData(bytes);
+    }
+
+    /**
+     * Return a copy of the byte array in data.
+     */
+    private byte[] copyData(DatabaseEntry data) {
+
+        assert (data != null) : "copyData: Null data pointer";
+
+        byte[] buf = new byte[data.getSize()];
+        System.arraycopy(data.getData(), data.getOffset(), buf, 0, buf.length);
+        return buf;
+    }
+
+    /**
+     * Return a copy of the byte array in data starting at the offset.
+     */
+    private byte[] copyData(DatabaseEntry data, int offset) {
+
+        assert (data != null) : "copyData: Null data pointer";
+
+        byte[] buf = new byte[data.getSize() - offset];
+        System.arraycopy(data.getData(), data.getOffset() + offset,
+                         buf, 0, buf.length);
+        return buf;
+    }
+
+    /**
+     * Generate the insert key with a prefix string.
+     */
+    private void makeInsertKey(Cursor c,
+                               DatabaseEntry key,
+                               String keyPrefix,
+                               boolean smallCache) {
+
+        assert (c != null) : "makeInsertKey: Null cursor pointer";
+        assert (key != null) : "makeInsertKey: Null key pointer";
+        assert (keyPrefix != null) : "makeInsertKey: Null keyPrefix pointer";
+
+        String buf = keyPrefix;
+        int num;
+        if (key.getData() != null) {
+            num = Integer.parseInt
+                (new String(copyData(key, keyPrefix.length())));
+            num++;
+        } else {
+            /*
+             * For regular working set, we define:
+             * deletion always occurs at the first database record,
+             * and insertion always appends to the last record,
+             * search randomly between the first and last.
+             */
+            if (smallCache) {
+                num = nRecordsPerThread;
+            } else {
+                num = nRecordsPerThread + nDeletesPerThread;
+            }
+        }
+        buf += Integer.toString(num);
+        key.setData(buf.getBytes());
+    }
+
+    /**
+     * Insert a record.
+     */
+    private void insert(Cursor c,
+                        DatabaseEntry key,
+                        DatabaseEntry data,
+                        String keyPrefix,
+                        boolean smallCache) throws DatabaseException {
+
+        assert (c != null) : "insert: Null cursor pointer";
+        assert (key != null) : "insert: Null key pointer";
+        assert (data != null) : "insert: Null data pointer";
+
+        makeData(data);
+        boolean done = false;
+        while (!done) {
+            /*
+             * Generate a key that is prefixed with the thread name so each
+             * thread is working on its own data set to reduce deadlocks.
+             */
+            makeInsertKey(c, key, keyPrefix, smallCache);
+            OperationStatus status = c.putNoOverwrite(key, data);
+            if (status == OperationStatus.KEYEXIST) {
+                System.out.println("Duplicate key.");
+            } else {
+                if (status != OperationStatus.SUCCESS) {
+                    System.out.println("Unexpected insert error: " + status);
+                }
+                done = true;
+            }
+        }
+    }
+
+    /**
+     * Generate the search key with a prefix string.
+     */
+    private void makeSearchKey(Cursor c,
+                               DatabaseEntry key,
+                               String keyPrefix,
+                               boolean smallCache,
+                               int offset) {
+
+        assert (c != null) : "makeSearchKey: Null cursor pointer";
+        assert (key != null) : "makeSearchKey: Null key pointer";
+        assert (keyPrefix != null) : "makeSearchKey: Null keyPrefix pointer";
+
+        String buf = keyPrefix;
+        int num;
+        if (smallCache) {
+            num = offset;
+        } else {
+            /*
+             * For regular working set, we create the random search key
+             * between the current "beginning" and "end" of database records.
+             */
+            num = random.nextInt(nRecordsPerThread) + nDeletesPerThread
+                    + offset;
+        }
+        buf += Integer.toString(num);
+        key.setData(buf.getBytes());
+    }
+
+    public void run() {
+        Environment env = null;
+        Database db = null;
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        DatabaseEntry searchKey = new DatabaseEntry();
+        DatabaseEntry searchData = new DatabaseEntry();
+        boolean done = false;
+        boolean smallCache = false;
+        byte[] lastInsertKey = null;
+        Transaction txn = null;
+        Cursor c = null;
+        int nKeys = 0;
+        OperationStatus status;
+
+        String threadName = Thread.currentThread().getName();
+        int envId = Integer.parseInt(threadName) / nThreadsPerEnv;
+        int threadId = Integer.parseInt(threadName) % nThreadsPerEnv;
+        String keyPrefix = threadId + "-";
+
+        if (verbose) {
+            System.out.println("Thread " + threadId + " started on ENV("
+                    + envId + ")");
+        }
+
+        /* Initialize with start time. */
+        nElapsedTime[envId][threadId] = System.currentTimeMillis();
+
+        /*
+         * If it is not evenTest (even work load on each env), to test cache
+         * utilization efficiency, we create two classes of users. One set
+         * will simply insert, update, and delete the same record repeatedly
+         * and the other set will have a larger working set.
+         * The former will use very little cache and will result in waste
+         * in non-shared cache case.
+         */
+        smallCache = (!evenTest) & ((envId % 2) == 1);
+
+        if (!openTest) {
+            env = envs[envId];
+            db = dbs[envId];
+        }
+
+        while (!done) {
+            try {
+                /* Test the env open/close */
+                if (openTest) {
+                    env = openEnv(envId);
+                    db = openDB(env, envId);
+                }
+
+                txn = env.beginTransaction(null, null);
+                c = db.openCursor(txn, null);
+
+                if (initOnly && nKeys < nMaxKeys) {
+                    insert(c, key, data, keyPrefix, smallCache);
+                    checkCorrectness(INSERT, key, data, keyPrefix, smallCache,
+                            nKeys);
+                    nKeys++;
+                    nInserts[envId][threadId]++;
+                }
+
+                if (!initOnly) {
+                    /* Insert */
+                    if (smallCache) {
+                        /*
+                         * Set key to null, so every time
+                         * it will insert the same key.
+                         */
+                        key.setData(null);
+                    }
+                    insert(c, key, data, keyPrefix, smallCache);
+                    if (smallCache) {
+                        checkCorrectness(INSERT, key, data, keyPrefix,
+                                smallCache, nRecordsPerThread);
+                    } else {
+                        checkCorrectness(INSERT, key, data, keyPrefix,
+                                smallCache,
+                                (nRecordsPerThread + nDeletesPerThread
+                                     + nInserts[envId][threadId]));
+                    }
+                    lastInsertKey = copyData(key);
+                    nInserts[envId][threadId]++;
+                    /* Update */
+                    if (smallCache) {
+                        searchKey.setData(lastInsertKey);
+                    } else {
+                        makeSearchKey(c, searchKey, keyPrefix, smallCache,
+                                nDeletes[envId][threadId]);
+                    }
+                    status = c.getSearchKeyRange(searchKey, searchData,
+                            LockMode.DEFAULT);
+                    if (status == OperationStatus.SUCCESS) {
+                        makeRandomData(data);
+                        status = c.putCurrent(data);
+                        if (status == OperationStatus.SUCCESS) {
+                            c.getSearchKey(searchKey, searchData,
+                                    LockMode.DEFAULT);
+                            if (smallCache) {
+                                checkCorrectness(UPDATE, searchKey, searchData,
+                                        keyPrefix, smallCache,
+                                        nRecordsPerThread);
+                            } else {
+                                checkCorrectness(UPDATE, searchKey, searchData,
+                                        keyPrefix, smallCache,
+                                        nUpdates[envId][threadId]);
+                            }
+                            nUpdates[envId][threadId]++;
+                        }
+                        /* Delete */
+                        if (!smallCache) {
+                            String buf = keyPrefix
+                                    + Integer.toString(nDeletesPerThread
+                                            + nDeletes[envId][threadId]);
+                            searchKey.setData(buf.getBytes());
+                            status = c.getSearchKey(searchKey, searchData,
+                                    LockMode.DEFAULT);
+                        }
+                        if (status == OperationStatus.SUCCESS) {
+                            status = c.delete();
+                            if (status == OperationStatus.SUCCESS) {
+                                status = c.getSearchKey(searchKey, searchData,
+                                        LockMode.DEFAULT);
+                                /*
+                                 * Delete correctness check: only checks if
+                                 * the record still exists.
+                                 */
+                                if (status != OperationStatus.NOTFOUND) {
+                                    System.err.println
+                                        ("DELETE Correctness Check Failed: "
+                                         + "key/data pair still exists after "
+                                         + "deletion.");
+                                    System.exit(1);
+                                }
+                                nDeletes[envId][threadId]++;
+                            }
+                        }
+                    }
+                    /* Read */
+                    if (nReadsPerWrite > 0) {
+                        int i;
+                        for (i = 0; i < nReadsPerWrite; i++) {
+                            if (smallCache) {
+                                makeSearchKey(c, searchKey, keyPrefix,
+                                        smallCache, i);
+                                c.getSearchKey(searchKey, searchData,
+                                        LockMode.DEFAULT);
+                                checkCorrectness(SELECT, searchKey, searchData,
+                                        keyPrefix, smallCache, i);
+                            } else {
+                                makeSearchKey(c, searchKey, keyPrefix,
+                                        smallCache, nDeletes[envId][threadId]);
+                                c.getSearchKey(searchKey, searchData,
+                                        LockMode.DEFAULT);
+                                checkCorrectness(SELECT, searchKey, searchData,
+                                        keyPrefix, smallCache,
+                                        nDeletes[envId][threadId]);
+                            }
+
+                            /* 
+                             * Call Thread.yield() to try to eliminate the
+                             * possible unfair-thread-scheduling issue which
+                             * may cause the throughput cache failure.
+                             */
+                            Thread.yield();
+                        }
+                        nSelects[envId][threadId] += i;
+                    }
+                }
+                c.close();
+                txn.commit();
+                nTransactions[envId][threadId]++;
+                if (initOnly) {
+                    if (nKeys >= nMaxKeys) {
+                        done = true;
+                    }
+                } else if (nMaxTransactions != 0
+                        && nTransactions[envId][threadId] >= nMaxTransactions) {
+                    done = true;
+                }
+                if (done && openTest && (threadId == (nThreadsPerEnv - 1))) {
+                    envStats[envId] = getStats(env, envId);
+                }
+                if (openTest) {
+                    closeEnv(env, db);
+                }
+            } catch (Exception e) {
+                e.printStackTrace();
+                System.exit(1);
+            }
+        } // End of while loop.
+
+        /* Calculate elapsed time. */
+        nElapsedTime[envId][threadId] = System.currentTimeMillis()
+                - nElapsedTime[envId][threadId];
+        if (verbose) {
+            System.out.println("Thread " + threadId + " finished on ENV("
+                    + envId + ")");
+        }
+    }
+
+    /**
+     * Operation correctness check.
+     */
+    private void checkCorrectness(int operationType,
+                                  DatabaseEntry key,
+                                  DatabaseEntry data,
+                                  String keyPrefix,
+                                  boolean smallCache,
+                                  int checkNum) {
+
+        assert (key != null) : "checkCorrectness: Null key pointer";
+        assert (keyPrefix != null) : "checkCorrectness: Null keyPrefix pointer";
+
+        String s = new String(key.getData());
+        int num = Integer.parseInt(s.substring(s.indexOf("-") + 1));
+        DatabaseEntry d = new DatabaseEntry();
+        makeData(d);
+        if (operationType == INSERT) {
+            if (num != checkNum) {
+                System.err.println("INSERT Correctness Check Failed: "
+                        + "key value: " + s + " doesn't match checkNum: "
+                        + checkNum + ".");
+                System.exit(1);
+            }
+        } else if (operationType == UPDATE) {
+            if (smallCache && (num != checkNum)) {
+                System.err.println("UPDATE Correctness Check Failed: "
+                        + "key value " + s + " doesn't match checkNum "
+                        + checkNum + ".");
+                System.exit(1);
+            } else if (!smallCache) {
+                if (num < checkNum) {
+                    System.err.println("UPDATE Correctness Check Failed: "
+                            + "key value should be larger than "
+                            + checkNum + ".");
+                    System.exit(1);
+                } else if (num
+                        > (nRecordsPerThread + nDeletesPerThread + checkNum)) {
+                    System.err.println("UPDATE Correctness Check Failed: "
+                            + "key value should be smaller than "
+                            + (nRecordsPerThread + nDeletesPerThread + checkNum)
+                            + ".");
+                    System.exit(1);
+                }
+            } else if (Arrays.equals(data.getData(), d.getData())) {
+                System.err.println("UPDATE Correctness Check Failed: "
+                        + "data value doesn't change.");
+                System.exit(1);
+            }
+        } else if (operationType == SELECT) {
+            if (smallCache && num != checkNum) {
+                System.err.println("SELECT Correctness Check Failed: "
+                        + "key value: " + s + " doesn't match checkNum: "
+                        + checkNum + ".");
+                System.exit(1);
+            } else if (!smallCache) {
+                if (num < checkNum) {
+                    System.err.println("SELECT Correctness Check Failed: "
+                            + "key value should be larger than "
+                            + checkNum + ".");
+                    System.exit(1);
+                } else if (num
+                        > (nRecordsPerThread + nDeletesPerThread + checkNum)) {
+                    System.err.println("SELECT Correctness Check Failed: "
+                            + "key value should be smaller than "
+                            + (nRecordsPerThread + nDeletesPerThread + checkNum)
+                            + ".");
+                    System.exit(1);
+                }
+            }
+        }
+    }
+}
diff --git a/test/standalone/MemoryStress.java b/test/standalone/MemoryStress.java
new file mode 100644
index 0000000000000000000000000000000000000000..76925a88442460ffafdcf36d2ac11c724500955f
--- /dev/null
+++ b/test/standalone/MemoryStress.java
@@ -0,0 +1,430 @@
+import java.io.File;
+import java.text.DecimalFormat;
+import java.text.NumberFormat;
+import java.util.Iterator;
+import java.util.Random;
+
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.je.BtreeStats;
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DbInternal;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.EnvironmentStats;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.StatsConfig;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.dbi.EnvironmentImpl;
+import com.sleepycat.je.dbi.INList;
+import com.sleepycat.je.dbi.MemoryBudget;
+import com.sleepycat.je.incomp.INCompressor;
+import com.sleepycat.je.tree.BIN;
+import com.sleepycat.je.tree.IN;
+
+/**
+ * The original version of this test was written by Brian O'Neill of Amazon,
+ * for SR 11163. It used to get OutOfMemoryError at 720,000 records on Linda's
+ * laptop, on JE 1.5.3.
+ */
+public class MemoryStress {
+    private Environment env;
+    private Database db;
+    private StatsConfig statsConfig;
+    private EnvironmentImpl envImpl;
+
+    private DecimalFormat decimalFormat;
+    private NumberFormat numberFormat;
+
+    private int reportingInterval = 10000;
+    private int nextSerialKey = 0;
+
+    private String environmentHome;
+    private int numThreads;
+    private boolean insertDups;
+    private boolean serialKeys;
+    private boolean doDelete;
+    private boolean deleteExisting;
+    private int totalOps = Integer.MAX_VALUE;
+
+    /* accumulated stats */
+    private int totalEvictPasses;
+    private int totalSelected;
+    private int totalScanned;
+    private int totalExEvicted;
+    private int totalStripped;
+    private int totalCkpts;
+    private int totalCleaned;
+    private int totalNotResident;
+    private int totalCacheMiss;
+
+    public static void main(String[] args) {
+        try {
+            MemoryStress ms = new MemoryStress();
+            for (int i = 0; i < args.length; i += 1) {
+                String arg = args[i];
+                String arg2 = (i < args.length - 1) ? args[i + 1] : null;
+                if (arg.equals("-h")) {
+                    if (arg2 == null) {
+                        throw new IllegalArgumentException(arg);
+                    }
+                    ms.environmentHome = arg2;
+                    i += 1;
+                } else if (arg.equals("-nThreads")) {
+                    if (arg2 == null) {
+                        throw new IllegalArgumentException(arg);
+                    }
+                    try {
+                        ms.numThreads = Integer.parseInt(arg2);
+                    } catch (NumberFormatException e) {
+                        throw new IllegalArgumentException(arg2);
+                    }
+                    i += 1;
+                } else if (arg.equals("-nOps")) {
+                    if (arg2 == null) {
+                        throw new IllegalArgumentException(arg);
+                    }
+                    try {
+                        ms.totalOps = Integer.parseInt(arg2);
+                    } catch (NumberFormatException e) {
+                        throw new IllegalArgumentException(arg2);
+                    }
+                    i += 1;
+                } else if (arg.equals("-dups")) {
+                    ms.insertDups = true;
+                } else if (arg.equals("-serial")) {
+                    ms.serialKeys = true;
+                } else if (arg.equals("-delete")) {
+                    ms.doDelete = true;
+                } else if (arg.equals("-deleteExisting")) {
+                    ms.deleteExisting = true;
+                } else {
+                    throw new IllegalArgumentException(arg);
+                }
+            }
+            if (ms.environmentHome == null) {
+                throw new IllegalArgumentException("-h not specified");
+            }
+            ms.run();
+            System.exit(0);
+        } catch (IllegalArgumentException e) {
+            System.out.println(
+                "Usage: MemoryStress -h <envHome> [-nThreads <nThreads>" +
+                "-nOps <nOps> -dups -serial -delete -deleteExisting]");
+            e.printStackTrace();
+            System.exit(2);
+        } catch (Exception e) {
+            e.printStackTrace();
+            System.exit(1);
+        }
+    }
+
+    MemoryStress() {
+        decimalFormat = new DecimalFormat();
+        decimalFormat.setMaximumFractionDigits(2);
+        decimalFormat.setMinimumFractionDigits(2);
+
+        numberFormat = NumberFormat.getInstance();
+
+        statsConfig = new StatsConfig();
+        statsConfig.setFast(true);
+        statsConfig.setClear(true);
+    }
+
+    void run()
+        throws DatabaseException, InterruptedException  {
+
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setTransactional(true);
+        envConfig.setReadOnly(false);
+        envConfig.setAllowCreate(true);
+        envConfig.setTxnNoSync(true);
+
+        env = new Environment(new File(environmentHome), envConfig);
+
+        EnvironmentConfig seeConfig = env.getConfig();
+        System.out.println("maxMem = " +
+                           numberFormat.format(seeConfig.getCacheSize()));
+        System.out.println(seeConfig);
+        envImpl = DbInternal.envGetEnvironmentImpl(env);
+
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setSortedDuplicates(insertDups);
+        dbConfig.setTransactional(true);
+        dbConfig.setReadOnly(false);
+        dbConfig.setAllowCreate(true);
+
+        db = env.openDatabase(null, "test", dbConfig);
+
+        Worker[] workers = new Worker[numThreads];
+        for (int i = 0; i < numThreads; i++) {
+            Worker w = new Worker(i, db, totalOps);
+            w.start();
+            workers[i] = w;
+        }
+
+        for (int i = 0; i < numThreads; i++) {
+            workers[i].join();
+        }
+
+        db.close();
+
+        long startTime = System.currentTimeMillis();
+        env.close();
+        String timeStr = numberFormat.format
+            ((System.currentTimeMillis() - startTime)/1e3);
+        System.out.println("Environment.close took " + timeStr + " seconds");
+    }
+
+    private class Worker extends Thread {
+
+        public int id;
+        Database db;
+        private int totalOps;
+
+        Worker(int id, Database db, int totalOps) {
+            this.id = id;
+            this.db = db;
+            this.totalOps = totalOps;
+        }
+
+        public void run() {
+            int count = 0;
+            Random rnd = new Random(4361 + id);
+            byte[] key = new byte[10];
+            byte[] value = new byte[100];
+
+            long start = System.currentTimeMillis();
+
+            DatabaseEntry keyEntry = new DatabaseEntry();
+            DatabaseEntry valueEntry = new DatabaseEntry();
+
+            try {
+                int intervalCount = 0;
+                long intervalStart = start;
+                while (count < totalOps) {
+                    if (deleteExisting) {
+                        Transaction txn = env.beginTransaction(null, null);
+                        Cursor cursor = db.openCursor(txn, null);
+                        OperationStatus status =
+                            cursor.getFirst(keyEntry, valueEntry, null);
+                        if (status == OperationStatus.SUCCESS) {
+                            cursor.delete();
+                        }
+                        cursor.close();
+                        txn.commit();
+                        if (status == OperationStatus.SUCCESS) {
+                            count += 1;
+                        } else {
+                            System.out.println("No more records");
+                            break;
+                        }
+                    } else {
+                        if (serialKeys) {
+                            int keyValue = getNextSerialKey();
+                            IntegerBinding.intToEntry(keyValue, keyEntry);
+                            System.arraycopy(keyEntry.getData(), 0, key, 0, 4);
+                            keyEntry.setData(key);
+                        } else {
+                            rnd.nextBytes(key);
+                            keyEntry.setData(key);
+                        }
+                        rnd.nextBytes(value);
+                        valueEntry.setData(value);
+
+                        db.put(null, keyEntry, valueEntry);
+                        count++;
+                        intervalCount++;
+
+                        if (insertDups) {
+                            for (int i = 0; i < 3; i += 1) {
+                                rnd.nextBytes(value);
+                                valueEntry.setData(value);
+                                db.put(null, keyEntry, valueEntry);
+                                count += 1;
+                            }
+                        }
+
+                        if (doDelete) {
+                            db.delete(null, keyEntry);
+                        }
+                    }
+
+                    if (count % reportingInterval == 0) {
+                        reportStats(id, intervalCount, count,
+                                    intervalStart, start, db);
+                        intervalCount = 0;
+                        intervalStart = System.currentTimeMillis();
+                    }
+                }
+                reportStats(id, intervalCount, count,
+                            intervalStart, start, db);
+            } catch (DatabaseException e) {
+                e.printStackTrace();
+            }
+        }
+    }
+
+    private synchronized int getNextSerialKey() {
+        return nextSerialKey++;
+    }
+
+    private void reportStats(int threadId,
+                             int intervalCount,
+                             int count,
+                             long intervalStart,
+                             long start,
+                             Database db)
+        throws DatabaseException {
+
+        long end = System.currentTimeMillis();
+
+        double seconds = (end - start)/1e3;
+        double intervalSeconds = (end - intervalStart)/1e3;
+        double rate = (double)(intervalCount/intervalSeconds);
+        double totalRate = (double)(count/seconds);
+        MemoryBudget mb = envImpl.getMemoryBudget();
+        INList inList = envImpl.getInMemoryINs();
+        INCompressor compressor = envImpl.getINCompressor();
+        EnvironmentStats stats = env.getStats(statsConfig);
+        System.out.println("id=" + threadId +
+                           " " +  numberFormat.format(count) +
+                           " rate=" +
+                           decimalFormat.format(rate) +
+                           " totalRate=" +
+                           decimalFormat.format(totalRate) +
+                           " cache=" +
+                           numberFormat.format(mb.getCacheMemoryUsage()) +
+                           " inList=" +
+                           numberFormat.format(inList.getSize()) +
+                           " passes=" +
+                           stats.getNEvictPasses() +
+			   " sel=" +
+			   numberFormat.format(stats.getNNodesSelected()) +
+			   " scan=" +
+			   numberFormat.format(stats.getNNodesScanned()) +
+			   " evict=" +
+			   numberFormat.format(stats.getNNodesExplicitlyEvicted()) +
+			   " strip=" +
+			   numberFormat.format(stats.getNBINsStripped()) +
+                           " ckpt=" +
+                           stats.getNCheckpoints() +
+                           " clean=" +
+                           stats.getNCleanerRuns() +
+                           " cleanBacklog=" +
+                           stats.getCleanerBacklog() +
+                           " compress=" +
+                           compressor.getBinRefQueueSize() +
+                           " notRes/cmiss=" +
+                           stats.getNNotResident() + "/" +
+                           stats.getNCacheMiss());
+        totalEvictPasses += stats.getNEvictPasses();
+        totalSelected += stats.getNNodesSelected();
+        totalScanned += stats.getNNodesScanned();
+        totalExEvicted += stats.getNNodesExplicitlyEvicted();
+        totalStripped += stats.getNBINsStripped();
+        totalCkpts += stats.getNCheckpoints();
+        totalCleaned += stats.getNCleanerRuns();
+        totalNotResident += stats.getNNotResident();
+        totalCacheMiss += stats.getNCacheMiss();
+        System.out.println("id=" + threadId +
+                           " " +  numberFormat.format(count) +
+                           " totals:" + numberFormat.format(totalEvictPasses) +
+                           " sel=" + numberFormat.format(totalSelected) +
+                           " scan=" + numberFormat.format(totalScanned) +
+                           " evict=" + numberFormat.format(totalExEvicted) +
+                           " strip=" + numberFormat.format(totalStripped) +
+                           " ckpt=" + numberFormat.format(totalCkpts) +
+                           " clean=" + numberFormat.format(totalCleaned) +
+                           " notRes=" + numberFormat.format(totalNotResident) +
+                           " miss=" + numberFormat.format(totalCacheMiss));
+
+        //summarizeINList(inList);
+	//summarizeBtree(db);
+	System.out.println("\n");
+    }
+
+    private void summarizeINList(INList inList)
+	throws DatabaseException {
+
+	int binCount = 0;
+	int binBytes = 0;
+	int inCount = 0;
+	int inBytes = 0;
+	
+        Iterator iter = inList.iterator();
+
+        while (iter.hasNext()) {
+            IN theIN = (IN) iter.next();
+            if (theIN instanceof BIN) {
+                binCount++;
+                //		    binBytes += theIN.computeMemorySize();
+                BIN theBIN = (BIN) theIN;
+                theBIN.evictLNs();
+                binBytes += theIN.getBudgetedMemorySize();
+                /*
+                  for (int i = 0; i < theBIN.getNEntries(); i++) {
+                  if (theBIN.getTarget(i) != null) {
+                  lnCount++;
+                  //	    lnBytes += theBIN.getTarget(i).
+                  //	getMemorySizeIncludedByParent();
+                  }
+                  }
+                */
+            } else if (theIN instanceof IN) {
+                inCount++;
+                inBytes += theIN.getBudgetedMemorySize();
+            } else {
+                System.out.println("non-IN, non-BIN found on INList");
+            }
+        }
+
+
+        double perBIN = ((double)binBytes)/binCount;
+        double perIN = ((double)inBytes)/inCount;
+
+	System.out.println("INList:" +
+			   " nBINs: " + numberFormat.format(binCount) +
+                           " binBytes (incl LNBytes): " + binBytes +
+                           " perBin: " + numberFormat.format(perBIN) +
+			   " nINs: " + numberFormat.format(inCount) +
+                           " inBytes: " + inBytes +
+                           " perIN: " + numberFormat.format(perIN));
+        //" nLNs: " + numberFormat.format(lnCount));
+        //   " lnBytes (incl in binBytes): " + lnBytes);
+    }
+
+    private void summarizeBtree(Database db)
+	throws DatabaseException {
+
+        StatsConfig dbStatsConfig = new StatsConfig();
+	dbStatsConfig.setFast(false);
+	BtreeStats stats = (BtreeStats) db.getStats(null);
+	System.out.print("BTreeStats: BINCount=" +
+			 stats.getBottomInternalNodeCount() +
+			 " INCount=" +
+			 stats.getInternalNodeCount() +
+			 " LNCount=" +
+			 stats.getLeafNodeCount() +
+			 " treeDepth=" +
+			 stats.getMainTreeMaxDepth() +
+			 " ");
+	summarizeINsByLevel("IN", stats.getINsByLevel());
+    }
+
+    private void summarizeINsByLevel(String msg, long[] insByLevel) {
+	if (insByLevel != null) {
+	    System.out.print(msg + " count by level: ");
+	    for (int i = 0; i < insByLevel.length; i++) {
+		long cnt = insByLevel[i];
+		if (cnt != 0) {
+		    System.out.print("[" + i + "," + insByLevel[i] + "]");
+		}
+	    }
+	    System.out.print("   ");
+	}
+    }
+}
diff --git a/test/standalone/RemoveDbStress.java b/test/standalone/RemoveDbStress.java
new file mode 100644
index 0000000000000000000000000000000000000000..250aff4ab8ef511a4273774d68fa08c4e5c4e751
--- /dev/null
+++ b/test/standalone/RemoveDbStress.java
@@ -0,0 +1,257 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2010 Oracle.  All rights reserved.
+ *
+ * $Id: RemoveDbStress.java,v 1.2.2.2 2010/01/04 15:30:50 cwl Exp $
+ */
+
+import java.io.File;
+import java.util.Random;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.config.EnvironmentParams;
+import com.sleepycat.je.dbi.MemoryBudget;
+
+/**
+ * Make sure no bugs are spotted when remove/truncate database is being called
+ * and the log cleaner and checkpointer threads are interacting with the db.
+ */
+public class RemoveDbStress {
+    private int totalIterations = 500000;
+    private int totalThreads = 4;
+    private String envHome = "./tmp";
+    private Random random = new Random();
+    private Environment env = null;
+
+    public static void main(String[] args) {
+
+        RemoveDbStress stress = new RemoveDbStress();
+        try {
+            stress.run(args);
+        } catch (Exception e){
+            System.err.println("Error initializing env!");
+            e.printStackTrace();
+            System.exit(1);
+        }
+    }
+
+    
+    /** Kickoff the run. */
+    private void run(String args[]) throws Exception {
+
+        for (int i = 0; i < args.length; i += 1) {
+            String arg = args[i];
+            boolean moreArgs = i < args.length - 1;
+            if (arg.equals("-h") && moreArgs) {
+                envHome = args[++i];
+            } else if (arg.equals("-iter") && moreArgs) {
+                totalIterations = Integer.parseInt(args[++i]);
+            } else if (arg.equals("-threads") && moreArgs) {
+                totalThreads = Integer.parseInt(args[++i]);
+            } else {
+                usage("Unknown arg: " + arg);
+            }
+        }
+        openEnv();
+        printArgs(args);
+        
+        /*
+         * Perform some operations to simulate a scenario to find bugs:
+         * make sure remove/truncate database is being called and the log
+         * cleaner and checkpointer threads are interacting with the db.
+         */
+        Worker[] workers = new Worker[totalThreads];
+        for (int i = 0; i < totalThreads; i += 1) {
+            workers[i] = new Worker(i);
+            workers[i].start();
+            Thread.sleep(1000); /* Stagger threads. */
+        }
+        for (int i = 0; i < totalThreads; i += 1) {
+            workers[i].join();
+        }
+        
+        closeEnv();
+    }
+    
+    /** Print usage. */
+    private void usage(String error) {
+
+        if (error != null) {
+            System.err.println(error);
+        }
+        System.err.println
+            ("java " + getClass().getName() + '\n' +
+             "      [-h <homeDir>] [-iter <iterations>] " +
+             "[-threads <appThreads>]\n");
+        System.exit(1);
+    }
+
+    /** Print cmd arguments and database env settings to log file. */
+    private void printArgs(String[] args)
+        throws DatabaseException {
+
+        System.out.print("Command line arguments:");
+        for (String arg : args) {
+            System.out.print(' ');
+            System.out.print(arg);
+        }
+        System.out.println();
+        System.out.println();
+        System.out.println("Environment configuration:");
+        System.out.println(env.getConfig());
+        System.out.println();
+    }
+    
+    /**
+     * Open an Environment.
+     */
+    private void openEnv() throws Exception {
+        EnvironmentConfig config = new EnvironmentConfig();
+        config.setAllowCreate(true);
+
+        config.setConfigParam
+            (EnvironmentParams.MAX_MEMORY.getName(),
+             MemoryBudget.MIN_MAX_MEMORY_SIZE_STRING);
+        /* Don't track detail with a tiny cache size. */
+        config.setConfigParam
+            (EnvironmentParams.CLEANER_TRACK_DETAIL.getName(), "false");
+        config.setConfigParam
+            (EnvironmentParams.CLEANER_BYTES_INTERVAL.getName(),
+             "100");
+        config.setConfigParam
+            (EnvironmentParams.CHECKPOINTER_BYTES_INTERVAL.getName(),
+             "100");
+        config.setConfigParam
+            (EnvironmentParams.COMPRESSOR_WAKEUP_INTERVAL.getName(),
+             "1000000");
+        config.setConfigParam(EnvironmentParams.LOG_MEM_SIZE.getName(),
+                  EnvironmentParams.LOG_MEM_SIZE_MIN_STRING);
+        config.setConfigParam
+            (EnvironmentParams.NUM_LOG_BUFFERS.getName(), "2");
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "true");
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "true");
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_CLEANER.getName(), "true");
+        config.setConfigParam
+            (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "true");
+        env = new Environment(new File(envHome), config);
+    }
+
+    private void closeEnv()
+        throws DatabaseException {
+        env.close();
+    }
+    
+    class Worker extends Thread {
+        private int iterations = 0;
+
+        /** The identifier of the current thread. */
+        private int id;
+
+        /**
+         * Creates a new worker thread object.
+         */
+        public Worker(int id) {
+            this.id = id;
+        }
+
+        /**
+         * This thread is responsible for executing transactions.
+         */
+        public void run() {
+
+            long startTime = System.currentTimeMillis();
+            while (iterations < totalIterations) {
+                try {
+                    doOperations();
+                } catch (Exception e) {
+                    System.err.println
+                        ("Error! " + iterations +
+                         " iterations processed so far.");
+                    e.printStackTrace();
+                    System.exit(1);
+                }
+                iterations += 1;
+                if ((iterations % 1000) == 0)
+                    System.out.println
+                        (new java.util.Date() + ": Thread " + id +
+                         " finishes " + iterations + " iterations.");
+            }
+            long endTime = System.currentTimeMillis();
+            float elapsedSec = (float) ((endTime - startTime) / 1e3);
+            float throughput = ((float) totalIterations) / elapsedSec;
+            System.out.println
+                ("Thread " + id + " finishes " + iterations +
+                 " iterations in:" + elapsedSec +
+                 " sec, average throughput:" + throughput + " op/sec.");
+        }
+        
+        /**
+         * Perform some insert and delete operations in order to wakeup the
+         * checkpointer, cleaner and evictor.
+         */
+        private void doOperations() throws DatabaseException {
+            String dbName = "testDb" + id;
+            DatabaseConfig dbConfig = new DatabaseConfig();
+            dbConfig.setAllowCreate(true);
+            dbConfig.setSortedDuplicates(true);
+            Database db = env.openDatabase(null, dbName, dbConfig);
+            Cursor cursor = db.openCursor(null, null);
+            doSimpleCursorPutAndDelete(cursor);
+            cursor.close();
+            db.close();
+            
+            if (random.nextFloat() < .5) {
+                env.removeDatabase(null, dbName);
+            } else {
+                env.truncateDatabase(null, dbName, false);
+            }
+        }
+
+        /**
+         * Write some data to wakeup the checkpointer, cleaner and evictor.
+         */
+        protected void doSimpleCursorPutAndDelete(Cursor cursor)
+            throws DatabaseException {
+            
+            String[] simpleKeyStrings = {
+                    "foo", "bar", "baz", "aaa", "fubar",
+                    "foobar", "quux", "mumble", "froboy" };
+
+            String[] simpleDataStrings = {
+                    "one", "two", "three", "four", "five",
+                    "six", "seven", "eight", "nine" };
+
+            DatabaseEntry foundKey = new DatabaseEntry();
+            DatabaseEntry foundData = new DatabaseEntry();
+
+            for (int i = 0; i < simpleKeyStrings.length; i++) {
+                foundKey.setData(simpleKeyStrings[i].getBytes());
+                foundData.setData(simpleDataStrings[i].getBytes());
+                if (cursor.putNoOverwrite(foundKey, foundData) !=
+                    OperationStatus.SUCCESS) {
+                    throw new DatabaseException("non-0 return");
+                }
+            }
+
+            OperationStatus status =
+                cursor.getFirst(foundKey, foundData, LockMode.DEFAULT);
+
+            while (status == OperationStatus.SUCCESS) {
+                cursor.delete();
+                status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT);
+            }
+        }
+    }
+}